From b29ce723747ddc428e2cbf1bbdf5c9ee7912556c Mon Sep 17 00:00:00 2001 From: rudaoshi Date: Sat, 26 Nov 2022 11:17:32 +0800 Subject: [PATCH 1/4] add EE and CC --- CC/README.md | 2 + EE/README.md | 4 + EE/paddleext/CHANGELOG.md | 21 + EE/paddleext/README.md | 103 + EE/paddleext/__init__.py | 0 EE/paddleext/paddleext/__init__.py | 3 + EE/paddleext/paddleext/torchapi/__init__.py | 74 + EE/paddleext/paddleext/torchapi/core.py | 115 + EE/paddleext/paddleext/torchapi/cuda.py | 27 + EE/paddleext/paddleext/torchapi/data.py | 5 + .../paddleext/torchapi/distributed.py | 17 + EE/paddleext/paddleext/torchapi/functional.py | 485 +++ EE/paddleext/paddleext/torchapi/machine.py | 35 + EE/paddleext/paddleext/torchapi/metrics.py | 7 + .../paddleext/torchapi/nn/__init__.py | 47 + .../paddleext/torchapi/nn/functional.py | 15 + EE/paddleext/paddleext/torchapi/nn/init.py | 49 + .../paddleext/torchapi/optim/__init__.py | 48 + .../paddleext/torchapi/optim/lr_scheduler.py | 34 + EE/paddleext/paddleext/torchapi/sampler.py | 154 + EE/paddleext/paddleext/torchapi/tensor_.py | 547 ++++ EE/paddleext/setup.py | 39 + EE/paddleext/test/__init__.py | 0 EE/paddleext/test/test_diagonal.py | 32 + EE/paddleext/test/test_function.py | 13 + EE/paddleext/test/test_pad.py | 29 + EE/paddleext/test/test_scatter.py | 146 + EE/paddlemetric/.gitignore | 2 + EE/paddlemetric/.ignore | 1 + EE/paddlemetric/CHANGELOG.md | 20 + EE/paddlemetric/src/README.md | 28 + .../src/build/lib/paddlemetrics/__about__.py | 27 + .../src/build/lib/paddlemetrics/__init__.py | 143 + .../build/lib/paddlemetrics/aggregation.py | 445 +++ .../build/lib/paddlemetrics/audio/__init__.py | 19 + .../src/build/lib/paddlemetrics/audio/pesq.py | 130 + .../src/build/lib/paddlemetrics/audio/pit.py | 113 + .../build/lib/paddlemetrics/audio/si_sdr.py | 105 + .../build/lib/paddlemetrics/audio/si_snr.py | 101 + .../src/build/lib/paddlemetrics/audio/snr.py | 110 + .../src/build/lib/paddlemetrics/audio/stoi.py | 133 + .../paddlemetrics/classification/__init__.py | 34 + .../paddlemetrics/classification/accuracy.py | 276 ++ .../lib/paddlemetrics/classification/auc.py | 91 + .../lib/paddlemetrics/classification/auroc.py | 186 ++ .../classification/average_precision.py | 147 + .../classification/binned_precision_recall.py | 324 ++ .../classification/calibration_error.py | 115 + .../classification/cohen_kappa.py | 119 + .../classification/confusion_matrix.py | 141 + .../paddlemetrics/classification/f_beta.py | 301 ++ .../classification/hamming_distance.py | 110 + .../lib/paddlemetrics/classification/hinge.py | 127 + .../lib/paddlemetrics/classification/iou.py | 107 + .../classification/kl_divergence.py | 109 + .../classification/matthews_corrcoef.py | 111 + .../classification/precision_recall.py | 320 ++ .../classification/precision_recall_curve.py | 149 + .../lib/paddlemetrics/classification/roc.py | 169 + .../classification/specificity.py | 171 + .../classification/stat_scores.py | 267 ++ .../build/lib/paddlemetrics/collections.py | 239 ++ .../lib/paddlemetrics/functional/__init__.py | 138 + .../functional/audio/__init__.py | 19 + .../paddlemetrics/functional/audio/pesq.py | 100 + .../lib/paddlemetrics/functional/audio/pit.py | 206 ++ .../paddlemetrics/functional/audio/si_sdr.py | 64 + .../paddlemetrics/functional/audio/si_snr.py | 46 + .../lib/paddlemetrics/functional/audio/snr.py | 66 + .../paddlemetrics/functional/audio/stoi.py | 105 + .../functional/classification/__init__.py | 32 + .../functional/classification/accuracy.py | 418 +++ .../functional/classification/auc.py | 133 + .../functional/classification/auroc.py | 257 ++ .../classification/average_precision.py | 236 ++ .../classification/calibration_error.py | 156 + .../functional/classification/cohen_kappa.py | 112 + .../classification/confusion_matrix.py | 184 ++ .../functional/classification/dice.py | 112 + .../functional/classification/f_beta.py | 351 +++ .../classification/hamming_distance.py | 97 + .../functional/classification/hinge.py | 231 ++ .../functional/classification/iou.py | 133 + .../classification/kl_divergence.py | 110 + .../classification/matthews_corrcoef.py | 78 + .../classification/precision_recall.py | 568 ++++ .../classification/precision_recall_curve.py | 332 ++ .../functional/classification/roc.py | 273 ++ .../functional/classification/specificity.py | 215 ++ .../functional/classification/stat_scores.py | 396 +++ .../functional/image/__init__.py | 16 + .../functional/image/gradients.py | 81 + .../paddlemetrics/functional/image/psnr.py | 150 + .../paddlemetrics/functional/image/ssim.py | 225 ++ .../functional/pairwise/__init__.py | 17 + .../functional/pairwise/cosine.py | 85 + .../functional/pairwise/euclidean.py | 79 + .../functional/pairwise/helpers.py | 59 + .../functional/pairwise/linear.py | 78 + .../functional/pairwise/manhatten.py | 78 + .../functional/regression/__init__.py | 27 + .../regression/cosine_similarity.py | 98 + .../regression/explained_variance.py | 137 + .../regression/mean_absolute_error.py | 73 + .../mean_absolute_percentage_error.py | 91 + .../regression/mean_squared_error.py | 74 + .../regression/mean_squared_log_error.py | 76 + .../functional/regression/pearson.py | 102 + .../paddlemetrics/functional/regression/r2.py | 173 ++ .../functional/regression/spearman.py | 129 + ...ymmetric_mean_absolute_percentage_error.py | 99 + .../functional/regression/tweedie_deviance.py | 139 + .../functional/retrieval/__init__.py | 22 + .../functional/retrieval/average_precision.py | 49 + .../functional/retrieval/fall_out.py | 62 + .../functional/retrieval/hit_rate.py | 57 + .../functional/retrieval/ndcg.py | 72 + .../functional/retrieval/precision.py | 60 + .../functional/retrieval/r_precision.py | 49 + .../functional/retrieval/recall.py | 61 + .../functional/retrieval/reciprocal_rank.py | 49 + .../functional/self_supervised.py | 57 + .../paddlemetrics/functional/text/__init__.py | 17 + .../lib/paddlemetrics/functional/text/bert.py | 650 ++++ .../lib/paddlemetrics/functional/text/bleu.py | 171 + .../paddlemetrics/functional/text/rouge.py | 325 ++ .../functional/text/sacre_bleu.py | 355 +++ .../lib/paddlemetrics/functional/text/wer.py | 114 + .../build/lib/paddlemetrics/image/__init__.py | 19 + .../src/build/lib/paddlemetrics/image/fid.py | 283 ++ .../lib/paddlemetrics/image/inception.py | 179 ++ .../src/build/lib/paddlemetrics/image/kid.py | 277 ++ .../paddlemetrics/image/lpip_similarity.py | 156 + .../src/build/lib/paddlemetrics/image/psnr.py | 147 + .../src/build/lib/paddlemetrics/image/ssim.py | 105 + .../src/build/lib/paddlemetrics/metric.py | 775 +++++ .../lib/paddlemetrics/regression/__init__.py | 26 + .../regression/cosine_similarity.py | 105 + .../regression/explained_variance.py | 136 + .../regression/mean_absolute_error.py | 86 + .../mean_absolute_percentage_error.py | 95 + .../regression/mean_squared_error.py | 91 + .../regression/mean_squared_log_error.py | 90 + .../lib/paddlemetrics/regression/pearson.py | 140 + .../build/lib/paddlemetrics/regression/r2.py | 149 + .../lib/paddlemetrics/regression/spearman.py | 96 + ...ymmetric_mean_absolute_percentage_error.py | 92 + .../regression/tweedie_deviance.py | 116 + .../lib/paddlemetrics/retrieval/__init__.py | 22 + .../retrieval/mean_average_precision.py | 70 + .../retrieval/mean_reciprocal_rank.py | 70 + .../retrieval/retrieval_fallout.py | 131 + .../retrieval/retrieval_hit_rate.py | 98 + .../retrieval/retrieval_metric.py | 147 + .../paddlemetrics/retrieval/retrieval_ndcg.py | 99 + .../retrieval/retrieval_precision.py | 98 + .../retrieval/retrieval_r_precision.py | 70 + .../retrieval/retrieval_recall.py | 98 + .../build/lib/paddlemetrics/setup_tools.py | 74 + .../build/lib/paddlemetrics/text/__init__.py | 18 + .../src/build/lib/paddlemetrics/text/bert.py | 251 ++ .../src/build/lib/paddlemetrics/text/bleu.py | 120 + .../src/build/lib/paddlemetrics/text/rouge.py | 171 + .../lib/paddlemetrics/text/sacre_bleu.py | 134 + .../src/build/lib/paddlemetrics/text/wer.py | 109 + .../lib/paddlemetrics/utilities/__init__.py | 3 + .../lib/paddlemetrics/utilities/checks.py | 582 ++++ .../build/lib/paddlemetrics/utilities/data.py | 240 ++ .../paddlemetrics/utilities/distributed.py | 145 + .../lib/paddlemetrics/utilities/enums.py | 83 + .../lib/paddlemetrics/utilities/exceptions.py | 17 + .../lib/paddlemetrics/utilities/imports.py | 90 + .../lib/paddlemetrics/utilities/prints.py | 49 + .../lib/paddlemetrics/wrappers/__init__.py | 16 + .../paddlemetrics/wrappers/bootstrapping.py | 173 ++ .../lib/paddlemetrics/wrappers/multioutput.py | 165 + .../lib/paddlemetrics/wrappers/tracker.py | 127 + .../paddlemetrics-1.0.0b0-py3-none-any.whl | Bin 0 -> 306551 bytes .../src/paddle_extension.egg-info/PKG-INFO | 22 + .../src/paddle_extension.egg-info/SOURCES.txt | 152 + .../dependency_links.txt | 1 + .../paddle_extension.egg-info/top_level.txt | 1 + .../src/paddlemetrics.egg-info/PKG-INFO | 22 + .../src/paddlemetrics.egg-info/SOURCES.txt | 152 + .../dependency_links.txt | 1 + .../src/paddlemetrics.egg-info/top_level.txt | 1 + .../src/paddlemetrics/__about__.py | 27 + EE/paddlemetric/src/paddlemetrics/__init__.py | 143 + .../src/paddlemetrics/aggregation.py | 445 +++ .../src/paddlemetrics/audio/__init__.py | 19 + .../src/paddlemetrics/audio/pesq.py | 130 + .../src/paddlemetrics/audio/pit.py | 113 + .../src/paddlemetrics/audio/si_sdr.py | 105 + .../src/paddlemetrics/audio/si_snr.py | 101 + .../src/paddlemetrics/audio/snr.py | 110 + .../src/paddlemetrics/audio/stoi.py | 133 + .../paddlemetrics/classification/__init__.py | 34 + .../paddlemetrics/classification/accuracy.py | 276 ++ .../src/paddlemetrics/classification/auc.py | 91 + .../src/paddlemetrics/classification/auroc.py | 186 ++ .../classification/average_precision.py | 147 + .../classification/binned_precision_recall.py | 324 ++ .../classification/calibration_error.py | 115 + .../classification/cohen_kappa.py | 119 + .../classification/confusion_matrix.py | 141 + .../paddlemetrics/classification/f_beta.py | 301 ++ .../classification/hamming_distance.py | 110 + .../src/paddlemetrics/classification/hinge.py | 127 + .../src/paddlemetrics/classification/iou.py | 107 + .../classification/kl_divergence.py | 109 + .../classification/matthews_corrcoef.py | 111 + .../classification/precision_recall.py | 320 ++ .../classification/precision_recall_curve.py | 149 + .../src/paddlemetrics/classification/roc.py | 169 + .../classification/specificity.py | 171 + .../classification/stat_scores.py | 267 ++ .../src/paddlemetrics/collections.py | 239 ++ .../src/paddlemetrics/functional/__init__.py | 138 + .../functional/audio/__init__.py | 19 + .../paddlemetrics/functional/audio/pesq.py | 100 + .../src/paddlemetrics/functional/audio/pit.py | 206 ++ .../paddlemetrics/functional/audio/si_sdr.py | 64 + .../paddlemetrics/functional/audio/si_snr.py | 46 + .../src/paddlemetrics/functional/audio/snr.py | 66 + .../paddlemetrics/functional/audio/stoi.py | 105 + .../functional/classification/__init__.py | 32 + .../functional/classification/accuracy.py | 418 +++ .../functional/classification/auc.py | 133 + .../functional/classification/auroc.py | 257 ++ .../classification/average_precision.py | 236 ++ .../classification/calibration_error.py | 156 + .../functional/classification/cohen_kappa.py | 112 + .../classification/confusion_matrix.py | 184 ++ .../functional/classification/dice.py | 112 + .../functional/classification/f_beta.py | 351 +++ .../classification/hamming_distance.py | 97 + .../functional/classification/hinge.py | 231 ++ .../functional/classification/iou.py | 133 + .../classification/kl_divergence.py | 110 + .../classification/matthews_corrcoef.py | 78 + .../classification/precision_recall.py | 568 ++++ .../classification/precision_recall_curve.py | 332 ++ .../functional/classification/roc.py | 273 ++ .../functional/classification/specificity.py | 215 ++ .../functional/classification/stat_scores.py | 396 +++ .../functional/image/__init__.py | 16 + .../functional/image/gradients.py | 81 + .../paddlemetrics/functional/image/psnr.py | 150 + .../paddlemetrics/functional/image/ssim.py | 225 ++ .../functional/pairwise/__init__.py | 17 + .../functional/pairwise/cosine.py | 85 + .../functional/pairwise/euclidean.py | 79 + .../functional/pairwise/helpers.py | 59 + .../functional/pairwise/linear.py | 78 + .../functional/pairwise/manhatten.py | 78 + .../functional/regression/__init__.py | 27 + .../regression/cosine_similarity.py | 98 + .../regression/explained_variance.py | 137 + .../regression/mean_absolute_error.py | 73 + .../mean_absolute_percentage_error.py | 91 + .../regression/mean_squared_error.py | 74 + .../regression/mean_squared_log_error.py | 76 + .../functional/regression/pearson.py | 102 + .../paddlemetrics/functional/regression/r2.py | 173 ++ .../functional/regression/spearman.py | 129 + ...ymmetric_mean_absolute_percentage_error.py | 99 + .../functional/regression/tweedie_deviance.py | 139 + .../functional/retrieval/__init__.py | 22 + .../functional/retrieval/average_precision.py | 49 + .../functional/retrieval/fall_out.py | 62 + .../functional/retrieval/hit_rate.py | 57 + .../functional/retrieval/ndcg.py | 72 + .../functional/retrieval/precision.py | 60 + .../functional/retrieval/r_precision.py | 49 + .../functional/retrieval/recall.py | 61 + .../functional/retrieval/reciprocal_rank.py | 49 + .../functional/self_supervised.py | 57 + .../paddlemetrics/functional/text/__init__.py | 17 + .../src/paddlemetrics/functional/text/bert.py | 650 ++++ .../src/paddlemetrics/functional/text/bleu.py | 171 + .../paddlemetrics/functional/text/rouge.py | 325 ++ .../functional/text/sacre_bleu.py | 355 +++ .../src/paddlemetrics/functional/text/wer.py | 114 + .../src/paddlemetrics/image/__init__.py | 19 + .../src/paddlemetrics/image/fid.py | 283 ++ .../src/paddlemetrics/image/inception.py | 179 ++ .../src/paddlemetrics/image/kid.py | 277 ++ .../paddlemetrics/image/lpip_similarity.py | 156 + .../src/paddlemetrics/image/psnr.py | 147 + .../src/paddlemetrics/image/ssim.py | 105 + EE/paddlemetric/src/paddlemetrics/metric.py | 775 +++++ EE/paddlemetric/src/paddlemetrics/py.typed | 0 .../src/paddlemetrics/regression/__init__.py | 26 + .../regression/cosine_similarity.py | 105 + .../regression/explained_variance.py | 136 + .../regression/mean_absolute_error.py | 86 + .../mean_absolute_percentage_error.py | 95 + .../regression/mean_squared_error.py | 91 + .../regression/mean_squared_log_error.py | 90 + .../src/paddlemetrics/regression/pearson.py | 140 + .../src/paddlemetrics/regression/r2.py | 149 + .../src/paddlemetrics/regression/spearman.py | 96 + ...ymmetric_mean_absolute_percentage_error.py | 92 + .../regression/tweedie_deviance.py | 116 + .../src/paddlemetrics/retrieval/__init__.py | 22 + .../retrieval/mean_average_precision.py | 70 + .../retrieval/mean_reciprocal_rank.py | 70 + .../retrieval/retrieval_fallout.py | 131 + .../retrieval/retrieval_hit_rate.py | 98 + .../retrieval/retrieval_metric.py | 147 + .../paddlemetrics/retrieval/retrieval_ndcg.py | 99 + .../retrieval/retrieval_precision.py | 98 + .../retrieval/retrieval_r_precision.py | 70 + .../retrieval/retrieval_recall.py | 98 + .../src/paddlemetrics/setup_tools.py | 74 + .../src/paddlemetrics/text/__init__.py | 18 + .../src/paddlemetrics/text/bert.py | 251 ++ .../src/paddlemetrics/text/bleu.py | 120 + .../src/paddlemetrics/text/rouge.py | 171 + .../src/paddlemetrics/text/sacre_bleu.py | 134 + EE/paddlemetric/src/paddlemetrics/text/wer.py | 109 + .../src/paddlemetrics/utilities/__init__.py | 3 + .../src/paddlemetrics/utilities/checks.py | 582 ++++ .../src/paddlemetrics/utilities/data.py | 240 ++ .../paddlemetrics/utilities/distributed.py | 145 + .../src/paddlemetrics/utilities/enums.py | 83 + .../src/paddlemetrics/utilities/exceptions.py | 17 + .../src/paddlemetrics/utilities/imports.py | 90 + .../src/paddlemetrics/utilities/prints.py | 49 + .../src/paddlemetrics/wrappers/__init__.py | 16 + .../paddlemetrics/wrappers/bootstrapping.py | 173 ++ .../src/paddlemetrics/wrappers/multioutput.py | 165 + .../src/paddlemetrics/wrappers/tracker.py | 127 + EE/paddlemetric/src/setup.py | 39 + EE/paddlemetric/src/tests/__init__.py | 1 + EE/paddlemetric/src/tests/audio/__init__.py | 0 .../src/tests/audio/examples/audio_speech.wav | Bin 0 -> 99244 bytes .../audio/examples/audio_speech_bab_0dB.wav | Bin 0 -> 99244 bytes EE/paddlemetric/src/tests/audio/test_pesq.py | 138 + EE/paddlemetric/src/tests/audio/test_pit.py | 196 ++ .../src/tests/audio/test_si_sdr.py | 131 + .../src/tests/audio/test_si_snr.py | 112 + EE/paddlemetric/src/tests/audio/test_snr.py | 125 + EE/paddlemetric/src/tests/audio/test_stoi.py | 146 + EE/paddlemetric/src/tests/bases/__init__.py | 0 EE/paddlemetric/src/tests/bases/test.log | 2764 +++++++++++++++++ .../src/tests/bases/test_aggregation.py | 166 + .../src/tests/bases/test_collections.py | 251 ++ .../src/tests/bases/test_composition.py | 559 ++++ EE/paddlemetric/src/tests/bases/test_ddp.py | 241 ++ .../src/tests/bases/test_hashing.py | 22 + .../src/tests/bases/test_metric.py | 356 +++ .../src/tests/classification/__init__.py | 0 .../src/tests/classification/inputs.py | 125 + .../src/tests/classification/test.log | 451 +++ .../src/tests/classification/test_accuracy.py | 362 +++ .../src/tests/classification/test_auc.py | 106 + .../src/tests/classification/test_auroc.py | 218 ++ .../classification/test_average_precision.py | 170 + .../test_binned_precision_recall.py | 129 + .../classification/test_calibration_error.py | 114 + .../tests/classification/test_cohen_kappa.py | 133 + .../classification/test_confusion_matrix.py | 188 ++ .../src/tests/classification/test_f_beta.py | 451 +++ .../classification/test_hamming_distance.py | 106 + .../src/tests/classification/test_hinge.py | 156 + .../src/tests/classification/test_inputs.py | 312 ++ .../src/tests/classification/test_iou.py | 235 ++ .../classification/test_kl_divergence.py | 114 + .../classification/test_matthews_corrcoef.py | 142 + .../classification/test_precision_recall.py | 461 +++ .../test_precision_recall_curve.py | 121 + .../src/tests/classification/test_roc.py | 146 + .../tests/classification/test_specificity.py | 414 +++ .../tests/classification/test_stat_scores.py | 323 ++ .../src/tests/functional/__init__.py | 0 .../tests/functional/test_classification.py | 123 + .../tests/functional/test_image_gradients.py | 110 + .../src/tests/functional/test_reduction.py | 41 + .../tests/functional/test_self_supervised.py | 46 + EE/paddlemetric/src/tests/helpers/__init__.py | 20 + .../src/tests/helpers/non_sklearn_metrics.py | 187 ++ EE/paddlemetric/src/tests/helpers/testers.py | 578 ++++ EE/paddlemetric/src/tests/image/__init__.py | 0 EE/paddlemetric/src/tests/image/test_fid.py | 156 + .../src/tests/image/test_inception.py | 125 + EE/paddlemetric/src/tests/image/test_kid.py | 166 + EE/paddlemetric/src/tests/image/test_lpips.py | 103 + EE/paddlemetric/src/tests/image/test_psnr.py | 149 + EE/paddlemetric/src/tests/image/test_ssim.py | 167 + .../src/tests/pairwise/__init__.py | 0 .../tests/pairwise/test_pairwise_distance.py | 121 + .../src/tests/regression/__init__.py | 0 .../regression/test_cosine_similarity.py | 111 + .../regression/test_explained_variance.py | 110 + .../src/tests/regression/test_mean_error.py | 177 ++ .../src/tests/regression/test_pearson.py | 93 + .../src/tests/regression/test_r2.py | 164 + .../src/tests/regression/test_spearman.py | 115 + .../tests/regression/test_tweedie_deviance.py | 140 + .../src/tests/retrieval/__init__.py | 0 .../src/tests/retrieval/helpers.py | 511 +++ EE/paddlemetric/src/tests/retrieval/inputs.py | 82 + .../src/tests/retrieval/test_fallout.py | 152 + .../src/tests/retrieval/test_hit_rate.py | 147 + .../src/tests/retrieval/test_map.py | 120 + .../src/tests/retrieval/test_mrr.py | 142 + .../src/tests/retrieval/test_ndcg.py | 151 + .../src/tests/retrieval/test_precision.py | 151 + .../src/tests/retrieval/test_r_precision.py | 136 + .../src/tests/retrieval/test_recall.py | 150 + EE/paddlemetric/src/tests/test_utilities.py | 21 + EE/paddlemetric/src/tests/text/__init__.py | 0 EE/paddlemetric/src/tests/text/helpers.py | 479 +++ .../src/tests/text/test_bertscore.py | 318 ++ EE/paddlemetric/src/tests/text/test_bleu.py | 141 + EE/paddlemetric/src/tests/text/test_rouge.py | 147 + .../src/tests/text/test_sacre_bleu.py | 73 + EE/paddlemetric/src/tests/text/test_wer.py | 75 + .../src/tests/wrappers/__init__.py | 0 .../src/tests/wrappers/test_bootstrapping.py | 123 + .../src/tests/wrappers/test_multioutput.py | 142 + .../src/tests/wrappers/test_tracker.py | 76 + 423 files changed, 61009 insertions(+) create mode 100644 CC/README.md create mode 100644 EE/README.md create mode 100644 EE/paddleext/CHANGELOG.md create mode 100644 EE/paddleext/README.md create mode 100644 EE/paddleext/__init__.py create mode 100644 EE/paddleext/paddleext/__init__.py create mode 100644 EE/paddleext/paddleext/torchapi/__init__.py create mode 100644 EE/paddleext/paddleext/torchapi/core.py create mode 100644 EE/paddleext/paddleext/torchapi/cuda.py create mode 100644 EE/paddleext/paddleext/torchapi/data.py create mode 100644 EE/paddleext/paddleext/torchapi/distributed.py create mode 100644 EE/paddleext/paddleext/torchapi/functional.py create mode 100644 EE/paddleext/paddleext/torchapi/machine.py create mode 100644 EE/paddleext/paddleext/torchapi/metrics.py create mode 100644 EE/paddleext/paddleext/torchapi/nn/__init__.py create mode 100644 EE/paddleext/paddleext/torchapi/nn/functional.py create mode 100644 EE/paddleext/paddleext/torchapi/nn/init.py create mode 100644 EE/paddleext/paddleext/torchapi/optim/__init__.py create mode 100644 EE/paddleext/paddleext/torchapi/optim/lr_scheduler.py create mode 100644 EE/paddleext/paddleext/torchapi/sampler.py create mode 100644 EE/paddleext/paddleext/torchapi/tensor_.py create mode 100644 EE/paddleext/setup.py create mode 100644 EE/paddleext/test/__init__.py create mode 100644 EE/paddleext/test/test_diagonal.py create mode 100644 EE/paddleext/test/test_function.py create mode 100644 EE/paddleext/test/test_pad.py create mode 100644 EE/paddleext/test/test_scatter.py create mode 100644 EE/paddlemetric/.gitignore create mode 100644 EE/paddlemetric/.ignore create mode 100644 EE/paddlemetric/CHANGELOG.md create mode 100644 EE/paddlemetric/src/README.md create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/collections.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/metric.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py create mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py create mode 100644 EE/paddlemetric/src/dist/paddlemetrics-1.0.0b0-py3-none-any.whl create mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO create mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt create mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt create mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt create mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO create mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt create mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt create mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt create mode 100644 EE/paddlemetric/src/paddlemetrics/__about__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/aggregation.py create mode 100644 EE/paddlemetric/src/paddlemetrics/audio/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/audio/pesq.py create mode 100644 EE/paddlemetric/src/paddlemetrics/audio/pit.py create mode 100644 EE/paddlemetric/src/paddlemetrics/audio/si_sdr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/audio/si_snr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/audio/snr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/audio/stoi.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/accuracy.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/auc.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/auroc.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/average_precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/calibration_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/f_beta.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/hinge.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/iou.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/precision_recall.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/roc.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/specificity.py create mode 100644 EE/paddlemetric/src/paddlemetrics/classification/stat_scores.py create mode 100644 EE/paddlemetric/src/paddlemetrics/collections.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/audio/pit.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/audio/snr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/auc.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/dice.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/iou.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/roc.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/image/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/image/gradients.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/image/psnr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/image/ssim.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/r2.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/self_supervised.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/text/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/text/bert.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/text/bleu.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/text/rouge.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py create mode 100644 EE/paddlemetric/src/paddlemetrics/functional/text/wer.py create mode 100644 EE/paddlemetric/src/paddlemetrics/image/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/image/fid.py create mode 100644 EE/paddlemetric/src/paddlemetrics/image/inception.py create mode 100644 EE/paddlemetric/src/paddlemetrics/image/kid.py create mode 100644 EE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py create mode 100644 EE/paddlemetric/src/paddlemetrics/image/psnr.py create mode 100644 EE/paddlemetric/src/paddlemetrics/image/ssim.py create mode 100644 EE/paddlemetric/src/paddlemetrics/metric.py create mode 100644 EE/paddlemetric/src/paddlemetrics/py.typed create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/explained_variance.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/pearson.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/r2.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/spearman.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py create mode 100644 EE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py create mode 100644 EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py create mode 100644 EE/paddlemetric/src/paddlemetrics/setup_tools.py create mode 100644 EE/paddlemetric/src/paddlemetrics/text/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/text/bert.py create mode 100644 EE/paddlemetric/src/paddlemetrics/text/bleu.py create mode 100644 EE/paddlemetric/src/paddlemetrics/text/rouge.py create mode 100644 EE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py create mode 100644 EE/paddlemetric/src/paddlemetrics/text/wer.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/checks.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/data.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/distributed.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/enums.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/exceptions.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/imports.py create mode 100644 EE/paddlemetric/src/paddlemetrics/utilities/prints.py create mode 100644 EE/paddlemetric/src/paddlemetrics/wrappers/__init__.py create mode 100644 EE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py create mode 100644 EE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py create mode 100644 EE/paddlemetric/src/paddlemetrics/wrappers/tracker.py create mode 100644 EE/paddlemetric/src/setup.py create mode 100644 EE/paddlemetric/src/tests/__init__.py create mode 100644 EE/paddlemetric/src/tests/audio/__init__.py create mode 100644 EE/paddlemetric/src/tests/audio/examples/audio_speech.wav create mode 100644 EE/paddlemetric/src/tests/audio/examples/audio_speech_bab_0dB.wav create mode 100644 EE/paddlemetric/src/tests/audio/test_pesq.py create mode 100644 EE/paddlemetric/src/tests/audio/test_pit.py create mode 100644 EE/paddlemetric/src/tests/audio/test_si_sdr.py create mode 100644 EE/paddlemetric/src/tests/audio/test_si_snr.py create mode 100644 EE/paddlemetric/src/tests/audio/test_snr.py create mode 100644 EE/paddlemetric/src/tests/audio/test_stoi.py create mode 100644 EE/paddlemetric/src/tests/bases/__init__.py create mode 100644 EE/paddlemetric/src/tests/bases/test.log create mode 100644 EE/paddlemetric/src/tests/bases/test_aggregation.py create mode 100644 EE/paddlemetric/src/tests/bases/test_collections.py create mode 100644 EE/paddlemetric/src/tests/bases/test_composition.py create mode 100644 EE/paddlemetric/src/tests/bases/test_ddp.py create mode 100644 EE/paddlemetric/src/tests/bases/test_hashing.py create mode 100644 EE/paddlemetric/src/tests/bases/test_metric.py create mode 100644 EE/paddlemetric/src/tests/classification/__init__.py create mode 100644 EE/paddlemetric/src/tests/classification/inputs.py create mode 100644 EE/paddlemetric/src/tests/classification/test.log create mode 100644 EE/paddlemetric/src/tests/classification/test_accuracy.py create mode 100644 EE/paddlemetric/src/tests/classification/test_auc.py create mode 100644 EE/paddlemetric/src/tests/classification/test_auroc.py create mode 100644 EE/paddlemetric/src/tests/classification/test_average_precision.py create mode 100644 EE/paddlemetric/src/tests/classification/test_binned_precision_recall.py create mode 100644 EE/paddlemetric/src/tests/classification/test_calibration_error.py create mode 100644 EE/paddlemetric/src/tests/classification/test_cohen_kappa.py create mode 100644 EE/paddlemetric/src/tests/classification/test_confusion_matrix.py create mode 100644 EE/paddlemetric/src/tests/classification/test_f_beta.py create mode 100644 EE/paddlemetric/src/tests/classification/test_hamming_distance.py create mode 100644 EE/paddlemetric/src/tests/classification/test_hinge.py create mode 100644 EE/paddlemetric/src/tests/classification/test_inputs.py create mode 100644 EE/paddlemetric/src/tests/classification/test_iou.py create mode 100644 EE/paddlemetric/src/tests/classification/test_kl_divergence.py create mode 100644 EE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py create mode 100644 EE/paddlemetric/src/tests/classification/test_precision_recall.py create mode 100644 EE/paddlemetric/src/tests/classification/test_precision_recall_curve.py create mode 100644 EE/paddlemetric/src/tests/classification/test_roc.py create mode 100644 EE/paddlemetric/src/tests/classification/test_specificity.py create mode 100644 EE/paddlemetric/src/tests/classification/test_stat_scores.py create mode 100644 EE/paddlemetric/src/tests/functional/__init__.py create mode 100644 EE/paddlemetric/src/tests/functional/test_classification.py create mode 100644 EE/paddlemetric/src/tests/functional/test_image_gradients.py create mode 100644 EE/paddlemetric/src/tests/functional/test_reduction.py create mode 100644 EE/paddlemetric/src/tests/functional/test_self_supervised.py create mode 100644 EE/paddlemetric/src/tests/helpers/__init__.py create mode 100644 EE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py create mode 100644 EE/paddlemetric/src/tests/helpers/testers.py create mode 100644 EE/paddlemetric/src/tests/image/__init__.py create mode 100644 EE/paddlemetric/src/tests/image/test_fid.py create mode 100644 EE/paddlemetric/src/tests/image/test_inception.py create mode 100644 EE/paddlemetric/src/tests/image/test_kid.py create mode 100644 EE/paddlemetric/src/tests/image/test_lpips.py create mode 100644 EE/paddlemetric/src/tests/image/test_psnr.py create mode 100644 EE/paddlemetric/src/tests/image/test_ssim.py create mode 100644 EE/paddlemetric/src/tests/pairwise/__init__.py create mode 100644 EE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py create mode 100644 EE/paddlemetric/src/tests/regression/__init__.py create mode 100644 EE/paddlemetric/src/tests/regression/test_cosine_similarity.py create mode 100644 EE/paddlemetric/src/tests/regression/test_explained_variance.py create mode 100644 EE/paddlemetric/src/tests/regression/test_mean_error.py create mode 100644 EE/paddlemetric/src/tests/regression/test_pearson.py create mode 100644 EE/paddlemetric/src/tests/regression/test_r2.py create mode 100644 EE/paddlemetric/src/tests/regression/test_spearman.py create mode 100644 EE/paddlemetric/src/tests/regression/test_tweedie_deviance.py create mode 100644 EE/paddlemetric/src/tests/retrieval/__init__.py create mode 100644 EE/paddlemetric/src/tests/retrieval/helpers.py create mode 100644 EE/paddlemetric/src/tests/retrieval/inputs.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_fallout.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_hit_rate.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_map.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_mrr.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_ndcg.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_precision.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_r_precision.py create mode 100644 EE/paddlemetric/src/tests/retrieval/test_recall.py create mode 100644 EE/paddlemetric/src/tests/test_utilities.py create mode 100644 EE/paddlemetric/src/tests/text/__init__.py create mode 100644 EE/paddlemetric/src/tests/text/helpers.py create mode 100644 EE/paddlemetric/src/tests/text/test_bertscore.py create mode 100644 EE/paddlemetric/src/tests/text/test_bleu.py create mode 100644 EE/paddlemetric/src/tests/text/test_rouge.py create mode 100644 EE/paddlemetric/src/tests/text/test_sacre_bleu.py create mode 100644 EE/paddlemetric/src/tests/text/test_wer.py create mode 100644 EE/paddlemetric/src/tests/wrappers/__init__.py create mode 100644 EE/paddlemetric/src/tests/wrappers/test_bootstrapping.py create mode 100644 EE/paddlemetric/src/tests/wrappers/test_multioutput.py create mode 100644 EE/paddlemetric/src/tests/wrappers/test_tracker.py diff --git a/CC/README.md b/CC/README.md new file mode 100644 index 000000000..242d07dff --- /dev/null +++ b/CC/README.md @@ -0,0 +1,2 @@ +# Cognitive Computing + diff --git a/EE/README.md b/EE/README.md new file mode 100644 index 000000000..2cda11a78 --- /dev/null +++ b/EE/README.md @@ -0,0 +1,4 @@ +# Engineer Efficiency + + + diff --git a/EE/paddleext/CHANGELOG.md b/EE/paddleext/CHANGELOG.md new file mode 100644 index 000000000..9aba09d60 --- /dev/null +++ b/EE/paddleext/CHANGELOG.md @@ -0,0 +1,21 @@ +Changelog +=== +以下记录了项目中所有值得关注的变更内容,其格式基于[Keep a Changelog]。 + +本项目版本遵守[Semantic Versioning]和[PEP-440]。 + +## [v1.0]- 2022-07-04 +--- +### Added +- Support the testing of some classification modules for paddlemetric +### Changed + + + + + +[v1.0]: https://console.cloud.baidu-int.com/devops/icode/repos/baidu/ccl/torch2paddle/commits/7476c4f8477d6161f8d5aaaf78f47d6bee990d42 + +[Keep a Changelog]: https://keepachangelog.com/zh-CN/1.0.0/ +[Semantic Versioning]: https://semver.org/lang/zh-CN/ +[PEP-440]: https://www.python.org/dev/peps/pep-0440/ diff --git a/EE/paddleext/README.md b/EE/paddleext/README.md new file mode 100644 index 000000000..03a9162cc --- /dev/null +++ b/EE/paddleext/README.md @@ -0,0 +1,103 @@ +# Paddle Extension + +Paddle extensions, including implementation for torch apis. + +## Install + +* Clone the repo +* Add the path of paddleext folder to PYTHONPATH + +## Document + +### Seameless shift backend between Paddle and PyTorch + +* Add following code to the root __init__.py of your project +(assume your project name is PROJECT): + +```python + +import importlib +import sys +import os + +BACKEND = os.environ.get('BACKEND', 'paddle') + +if BACKEND == "paddle": + + from paddleext import torchapi + sys.modules["PROJECT.backend"] = torchapi + + try: + import paddlemetrics + sys.modules["PROJECT.metrics"] = paddlemetrics + except Exception as e: + pass + +elif BACKEND == "torch": + try: + import torch + import types + + class VirtualModule(types.ModuleType): + def __init__(self, module_name, sub_modules): + + super().__init__(module_name) + try: + import sys + sys.modules[module_name] = self + self._module_name = module_name + self._sub_modules = sub_modules + for sub_name, module in sub_modules.items(): + if sub_name is None: + sys.modules[f"{module_name}"] = module + else: + sys.modules[f"{module_name}.{sub_name}"] = module + except ImportError as err: + raise err # please signal error in some useful way :-) + + def __repr__(self): + return "Virtual module for " + self._module_name + + def __getattr__(self, attrname): + + if attrname in self._sub_modules.keys(): + import sys + return self._sub_modules[attrname] + else: + return super().__getattr__(attrname) + + + import pkgutil + + sub_modules = {None: torch} + for module_info in pkgutil.iter_modules(torch.__path__): + if not module_info.name.startswith("_"): + try: + module = importlib.import_module("torch." + module_info.name) + sub_modules[module_info.name] = module + except: + pass + + VirtualModule("PROJECT.backend", sub_modules) + + + except Exception as e: + raise e + + try: + import torchmetrics + + sys.modules["PROJECT.metrics"] = torchmetrics + except Exception as e: + pass + +``` +* set the environment variable BACKEND to "paddle" or "torch" to switch backend +* import the backend module in your code + +```python +import PROJECT.backend as B +from PROJECT.backend import nn +import PROJECT.metrics as M +``` +* replace all "torch." or "paddle." with "B." in your code \ No newline at end of file diff --git a/EE/paddleext/__init__.py b/EE/paddleext/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddleext/paddleext/__init__.py b/EE/paddleext/paddleext/__init__.py new file mode 100644 index 000000000..7fd017394 --- /dev/null +++ b/EE/paddleext/paddleext/__init__.py @@ -0,0 +1,3 @@ + + +from . import torchapi \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/__init__.py b/EE/paddleext/paddleext/torchapi/__init__.py new file mode 100644 index 000000000..7ba3f89ae --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/__init__.py @@ -0,0 +1,74 @@ +import inspect + +from .core import * +from .tensor_ import * +from .functional import * +from . import sampler +from . import data +from . import nn +from . import distributed +from . import cuda +from . import optim + +#from . import paddle_func + +this_module = sys.modules[__name__] + + +def get_module_attribute(module, *args, **kwargs): + # Perform custom logic here + + obj = object.__getattribute__(module, *args, **kwargs) + + print("input module:", module) + print("result object", obj) + if isinstance(obj, types.FunctionType): + if not obj.__module__.startswith("paddleext.torchapi."): + return partial(paddle_delegate_func, obj) + else: + return obj + elif isinstance(obj, types.ModuleType): + print("result module: " + obj.__name__) + return ModuleDelegate(obj) + elif inspect.isclass(obj): + print("result class: " + obj.__name__) + return obj + else: + return obj + +class ModuleDelegate(object): + def __init__(self, module): + self.module = module + + def __getattribute__(self, *args, **kwargs): + + module = object.__getattribute__(self, "module") + result = object.__getattribute__(module, *args, **kwargs) + if isinstance(result, types.ModuleType): + return ModuleDelegate(result) + elif isinstance(result, types.FunctionType): + if not result.__module__.startswith("paddleext.torchapi."): + return partial(paddle_delegate_func, result) + else: + return result + elif inspect.isclass(result): + if result.__module__.startswith("paddle."): + return make_delegate_class(result) + else: + return result + else: + return result + + + # def __getattr__(self, *args, **kwargs): + # return get_module_attribute(self.module, *args, **kwargs), + + # def __delattr__(self, *args, **kwargs): + # return object.__delattr__(self.module, *args, **kwargs) + # + # def __dir__(self): + # return dir(self.module) + + + +sys.modules[__name__] = ModuleDelegate(sys.modules[__name__]) diff --git a/EE/paddleext/paddleext/torchapi/core.py b/EE/paddleext/paddleext/torchapi/core.py new file mode 100644 index 000000000..f7f1db0ac --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/core.py @@ -0,0 +1,115 @@ +""" +paddle core +""" +import sys +import types +from functools import partial +from types import MethodType +from typing import Any + +import paddle +import random +import numpy as np + +Module = paddle.nn.Layer +ModuleBase = paddle.nn.Layer +ModuleDict = paddle.nn.LayerDict +ModuleList = paddle.nn.LayerList +device=str + +dtype=paddle.dtype + +def load_state_dict(module: Module, state_dict, *args, **kwargs): + module.set_state_dict(state_dict, *args, **kwargs) + + +Module.load_state_dict = load_state_dict + +from paddle import * + +def deterministic(seed=0): + seed = 0 + random.seed(seed) + paddle.seed(seed) + np.random.seed(seed) + + +import paddle + +from paddle import bool, int32, int64, int8, float32, float64, float16 + +long = paddle.int64 +int = paddle.int32 +float = paddle.float32 +double = paddle.float64 + + +def platform(): + """ + + Returns: + + """ + + return "paddle" + + + +from paddle import no_grad, autograd + +class set_detect_anomaly(object): + r"""Context-manager that sets the anomaly detection for the autograd engine on or off. + ``set_detect_anomaly`` will enable or disable the autograd anomaly detection + based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + See ``detect_anomaly`` above for details of the anomaly detection behaviour. + Args: + mode (bool): Flag whether to enable anomaly detection (``True``), + or disable (``False``). + """ + + def __init__(self, mode: bool) -> None: + pass + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> None: + pass + + +setattr(autograd, "set_detect_anomaly", set_detect_anomaly) + + +def paddle_delegate_func(func, *args, **kwargs): + if "dim" in kwargs: + kwargs["axis"] = kwargs["dim"] + del kwargs["dim"] + + if "device" in kwargs: + del kwargs["device"] + + return func(*args, **kwargs) + +def make_delegate_class(class_): + + class DelegateClass(class_): + def __init__(self, *args, **kwargs): + + if class_.__name__.endswith("Linear"): + if "bias" in kwargs: + kwargs["bias_attr"] = kwargs["bias"] + del kwargs["bias"] + if "weight" in kwargs: + kwargs["weight_attr"] = kwargs["weight"] + del kwargs["weight"] + if class_.__name__.endswith("LayerNorm"): + if "eps" in kwargs: + kwargs["epsilon"] = kwargs["eps"] + del kwargs["eps"] + super().__init__(*args, **kwargs) +# self.__class__ = class_ + + return DelegateClass + + diff --git a/EE/paddleext/paddleext/torchapi/cuda.py b/EE/paddleext/paddleext/torchapi/cuda.py new file mode 100644 index 000000000..23c774c37 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/cuda.py @@ -0,0 +1,27 @@ + +import paddle + +_initialized=True +def is_available(): + + return paddle.device.cuda.device_count() > 0 + +def manual_seed_all(seed): + paddle.seed(seed) + + +def manual_seed(seed): + paddle.seed(seed) + + +def set_device(device): + return paddle.set_device(device) + + +def empty_cache(): + return + + +def device_count(): + + return paddle.device.cuda.device_count() \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/data.py b/EE/paddleext/paddleext/torchapi/data.py new file mode 100644 index 000000000..bd61f5627 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/data.py @@ -0,0 +1,5 @@ +""" +data for paddle +""" + +from paddle.io import DataLoader, Dataset \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/distributed.py b/EE/paddleext/paddleext/torchapi/distributed.py new file mode 100644 index 000000000..b9bf7698e --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/distributed.py @@ -0,0 +1,17 @@ + +import paddle + + +def is_available(): + return True + +DISTRIBUTED = False + +def is_initialized(): + return DISTRIBUTED + + +def init_process_group(*args, **kwargs): + + pass + diff --git a/EE/paddleext/paddleext/torchapi/functional.py b/EE/paddleext/paddleext/torchapi/functional.py new file mode 100644 index 000000000..8d2ad2d22 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/functional.py @@ -0,0 +1,485 @@ +import builtins +from collections import namedtuple + +import paddle +from paddle import Tensor +import numpy as np + +from paddle import is_tensor + +from paddle import less_than, less_equal, greater_than, greater_equal, equal + +from paddle.nn.functional import * + +from paddle import arange, ones_like, zeros_like, ones + +from paddle import logical_and, logical_not, logical_or, logical_xor + +from paddle import all, any + +from paddle import argmax, argmin + +from paddle import stack + +from paddle import einsum + +from paddle import inverse + +from paddle.linalg import * + + +def max_along_dim(input, dim=None, keepdim=False, *, out=None): + + if dim is None: + result = paddle.max(input) + return paddle.ones([], dtype=result.dtype) * result.item() + + max_val = paddle.max(input, axis=dim, keepdim=keepdim) + max_index = paddle.argmax(input, axis=dim) + + if out is not None: + out[0] = max_val + out[1] = max_index + + return (max_val, max_index) + +def max(input, *args, **kwargs): + + if len(args) == 0: + return max_along_dim(input, **kwargs) + + if isinstance(args[0], (int, list, tuple)): + return max_along_dim(input, *args, **kwargs) + elif isinstance(args[0], Tensor): + return paddle.maximum(input, args[0], *args[1:], **kwargs) + else: + raise Exception(f"unknown parameter combination") + + +def min_along_dim(input, dim=None, keepdim=False, *, out=None): + + if dim is None: + result = paddle.min(input) + return paddle.ones([], dtype=result.dtype) * result.item() + + min_val = paddle.min(input, axis=dim, keepdim=keepdim) + min_index = paddle.argmin(input, axis=dim) + + if out is not None: + out[0] = min_val + out[1] = min_index + + return (min_val, min_index) + + +def min(input, *args, **kwargs): + + if len(args) == 0: + return min_along_dim(input, **kwargs) + + if isinstance(args[0], (int, list, tuple)): + return min_along_dim(input, *args, **kwargs) + elif isinstance(args[0], Tensor): + return paddle.minimum(input, args[0], *args[1:], **kwargs) + else: + raise Exception(f"unknown parameter combination") + + +def lt(a, b): + if np.isscalar(a) or np.isscalar(b): + return a < b + else: + return less_than(a, b) + + +def le(a, b): + if np.isscalar(a) or np.isscalar(b): + return a <= b + else: + return less_equal(a, b) + + +def gt(a, b): + if np.isscalar(a) or np.isscalar(b): + return a > b + else: + return greater_than(a, b) + + +def ge(a, b): + if np.isscalar(a) or np.isscalar(b): + return a >= b + else: + return greater_equal(a, b) + + +def eq(a, b): + if np.isscalar(a) or np.isscalar(b): + return a == b + else: + return equal(a, b) + + +def standardize_dtype(type): + + if type == int: + return paddle.int64 + elif type == float: + return paddle.float32 + + return type + +def empty(*size, dtype=None, device=None): + + if len(size) == 1 and isinstance(size[0], (list, tuple)): + size = size[0] + + dtype = standardize_dtype(dtype) + x = paddle.empty(size, dtype=dtype) + + return x + +def zeros(*size, dtype=None, device=None): + if len(size) == 1 and isinstance(size[0], (list, tuple)): + size = size[0] + + dtype = standardize_dtype(dtype) + x = paddle.zeros(size, dtype=dtype) + # if device is not None: + # x = x.to(device) + return x + + +def ones(*size, dtype=None, device=None): + if len(size) == 1 and isinstance(size[0], (list, tuple)): + size = size[0] + dtype = standardize_dtype(dtype) + x = paddle.ones(size, dtype=dtype) + # if device is not None: + # x = x.to(device) + return x + + +def rand(*size, dtype=None, device=None): + if len(size) == 1 and isinstance(size[0], (list, tuple)): + size = size[0] + dtype = standardize_dtype(dtype) + x = paddle.rand(size, dtype=dtype) + # if device is not None: + # x = x.to(device) + return x + + +def randint(low=None, high=None, size=None, dtype=None, name=None, device=None): + + arg1 = low + arg2 = high + arg3 = size + + dtype = standardize_dtype(dtype) + + if dtype == paddle.int32 or dtype == paddle.int64: + int_dtype = dtype + target_dtype = None + else: + int_dtype = None + target_dtype = None + + if arg3 is not None: + assert isinstance(arg3, (list, tuple)) + if low is None and high is not None: + arg1 = high + arg2 = None + result = paddle.randint(low=arg1, high=arg2, shape=arg3, dtype=int_dtype, name=name) + return result.astype(target_dtype) if target_dtype else result + else: + assert isinstance(arg2, (list, tuple)) + result = paddle.randint(low=arg1, high=None, shape=arg2, dtype=int_dtype, name=name) + return result.astype(target_dtype) if target_dtype else result + + +def randn(*size, out=None, dtype=None, device=None): + if len(size) == 1 and isinstance(size[0], (list, tuple)): + size = size[0] + + dtype = standardize_dtype(dtype) + x = paddle.randn(size, dtype=dtype) + + if out is not None: + paddle.assign(x, out) + return out + + return x + + +def manual_seed_all(seed): + paddle.seed(seed) + + +def manual_seed(seed): + paddle.seed(seed) + + +def scalar_dtype(x): + from . import core + return getattr(core, type(x).__name__) + + +def tensor(x, dtype=None, device=None): + if np.isscalar(x): + if dtype is None: + dtype = scalar_dtype(x) + result = paddle.ones([], dtype=dtype) + if np.isnan(x): + result = (result * (-1)).sqrt() + else: + result.fill_(x) + return result + + return paddle.to_tensor(x, dtype=dtype) + +def from_numpy(x): + return paddle.to_tensor(x) + + +cat = paddle.concat + + +# different meaning of scatter +# in tensorflow/ paddle, scatter is : +# for idx, l in enumerate(index): +# output[l] = update[idx] +# in torch, scatter is: +# for i, j, k: +# output[i, j, index[i,j,k]] = update[i, j, k] + + +def sum(x, dim=None, dtype=None, keepdim=False, name=None): + + if x.ndim == 0: + return x + + result = paddle.sum(x, axis=dim, dtype=dtype, keepdim=keepdim, name=name) + + dim_len = 1 if np.isscalar(dim) else x.ndim if dim is None else len(dim) + + if not keepdim and x.ndim == dim_len: + return tensor(result.item(), dtype=result.dtype) + else: + return result + + +def nonzero(input, *, out=None, as_tuple=False): + + result = paddle.nonzero(input, as_tuple=as_tuple) + if not as_tuple: + if out is not None: + paddle.assign(result, out) + return out + else: + return result + else: + assert out is None + return tuple([x.squeeze(-1) for x in result]) + + +def where(condition, x=None, y=None, name=None): + + if x is not None and y is not None: + assert is_tensor(x) or is_tensor(y) + + if np.isscalar(x): + x = paddle.ones_like(condition, dtype=scalar_dtype(x)) * x + if x.ndim == 0: + x = paddle.ones_like(condition, dtype=x.dtype) * x.item() + + if np.isscalar(y): + y = paddle.ones_like(condition, dtype=scalar_dtype(y)) * y + if x.ndim == 0: + y = paddle.ones_like(condition, dtype=y.dtype) * y.item() + + return paddle.where(condition, x, y, name=name) + + elif x is None and y is None: + result = nonzero(condition, as_tuple=True) + + return result + else: + raise Exception("x and y must be None or not None at the sametime") + + +def is_nonzero(input): + + assert paddle.numel(input) == 1 + + return input.item() != 0.0 + + +def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): + + if np.isscalar(input): + input = tensor(input) + if np.isscalar(other): + other = tensor(other) + + return paddle.allclose(input.float(), other.float(), rtol=rtol, atol=atol, equal_nan=equal_nan, name=name) + + +def scatter(input: Tensor, dim, index, value) -> Tensor: + if input.ndim == 1: + output = paddle.scatter(input, index, value, overwrite=True) + else: + +# index, selected = paddle.unique(index, axis=dim, return_index=True) +# if isinstance(value, Tensor): +# value = paddle.index_select(value, selected, axis=dim) + + grids = [paddle.arange(index.shape[x]) for x in range(index.ndim)] + inner_indexes = list(paddle.meshgrid(*grids)) + inner_indexes[dim] = index + inner_indexes = [x.flatten() for x in inner_indexes] + inner_indexes = paddle.stack(inner_indexes, axis=1) + + value_shape = list(inner_indexes.shape[:-1]) + list(input.shape[inner_indexes.shape[-1]:]) + + if paddle.is_tensor(value): + value = paddle.reshape(value, value_shape) + elif isinstance(value, (builtins.bool, builtins.int, builtins.float, np.integer, np.float32, np.float64)): + value = paddle.full(shape=value_shape, fill_value=value) + else: + raise Exception(f"unknown value type: {type(value)}") + + to_overwrite = paddle.scatter_nd(inner_indexes, value, shape=input.shape) + condition = paddle.scatter_nd(inner_indexes, paddle.ones_like(value), shape=input.shape) + output = paddle.where(condition > 0, to_overwrite.float(), input.float()).cast(input.dtype) + + return output + +def gather(x,dim,index): + index_shape=index.shape + index_flatten=index.flatten() + if dim<0: + dim=len(x.shape)+dim + nd_index=[] + for k in range(len(x.shape)): + if k==dim: + nd_index.append(index_flatten) + else: + reshape_shape=[1]*len(x.shape) + reshape_shape[k]=x.shape[k] + dim_index=paddle.expand( paddle.reshape(paddle.arange(x.shape[k],dtype=index.dtype), reshape_shape), index_shape).flatten() + nd_index.append(dim_index) + + ind2 = paddle.transpose(paddle.stack(nd_index),[1, 0]) + # ind2 = paddle.stack(nd_index).transpose([1, 0]) + paddle_out = paddle.gather_nd(x, ind2).reshape(index_shape) + return paddle_out + + +def scatter_(input: Tensor, dim, index, value): + + output = scatter(input, dim, index, value) + # return output + paddle.assign(output, input) + + return input + + + +def scatter_add(input: Tensor, dim, index, update) -> Tensor: + # donot use scatter with overwrite=False even for 1-d case; + # It does not produce correct result for duplicated indexes + # if input.ndim == 1: + # output = paddle.scatter(input, index, update, overwrite=False) + # else: + if index.ndim > 1: + grids = [paddle.arange(index.shape[x]) for x in range(index.ndim)] + inner_indexes = list(paddle.meshgrid(*grids)) + inner_indexes[dim] = index + else: + inner_indexes = [index] + inner_indexes = [x.flatten() for x in inner_indexes] + inner_indexes = paddle.stack(inner_indexes, axis=1) + + update_shape = list(inner_indexes.shape[:-1]) + list(input.shape[inner_indexes.shape[-1]:]) + update = paddle.reshape(update, update_shape) + output = paddle.scatter_nd_add(input, inner_indexes, update) + + return output + + +def scatter_add_(input: Tensor, dim, index, update) -> Tensor: + output = scatter_add(input, dim, index, update) + paddle.assign(output, input) + # return output + return input + + +def norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None): + + result = paddle.linalg.norm(input, p, axis=dim, keepdim=keepdim) + if dtype is not None: + result = result.cast(dtype) + + if out is not None: + out.assign(result) + + return result + +def isinf(x, name=None): + if x.dtype == paddle.bool: + return paddle.zeros_like(x, dtype=paddle.bool) + else: + return paddle.isinf(x, name=name) + +def isnan(x, name=None): + if x.dtype == paddle.bool: + return paddle.zeros_like(x, dtype=paddle.bool) + else: + return paddle.isnan(x, name=name) + +def broadcast_to(x, shape, name=None): + + if len(shape) == 1 and shape[0] == 0: + assert x.numel() == 1 + return tensor(x.item()) + else: + return paddle.broadcast_to(x, shape, name) + + +def as_tensor(data, dtype=None, device=None): + + return paddle.to_tensor(data, dtype=dtype) + + +TopKResult = namedtuple("TopKResult", ["values", "indices"]) +def topk(input, k, dim=None, largest=True, sorted=True, *, out=None): + + result, indice = paddle.topk(input, k, axis=dim, largest=largest, sorted=sorted) + + if out is not None: + out[0].set_value(result) + out[1].set_value(indice) + + return TopKResult(values=result, indices=indice) + + +def split(tensor, split_size_or_sections, dim=0): + """ + paddle interface is different from pytorch + + Args: + tensor: + split_size_or_sections: + dim: + + Returns: + + """ + if isinstance(split_size_or_sections, int): + sizes = [split_size_or_sections] * (tensor.shape[dim] // split_size_or_sections) + if tensor.shape[dim] % split_size_or_sections != 0: + sizes.append(tensor.shape[dim] % split_size_or_sections) + split_size_or_sections = sizes + + return paddle.split(tensor, split_size_or_sections, axis=dim) \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/machine.py b/EE/paddleext/paddleext/torchapi/machine.py new file mode 100644 index 000000000..bb0ee241c --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/machine.py @@ -0,0 +1,35 @@ +""" +machine for paddle +""" + +import paddle + + +class PaddleTrainer(object): + """ + PaddleTrainer + """ + + def __init__(self, machine, loss, optimizer, + evaluator, *args, **kwargs): + + self.model = paddle.Model(machine) + + self.model.prepare(optimizer=optimizer, loss=loss, + metrics=evaluator) + + def fit(self, train_data_streams): + """ + + Args: + train_dataloader (): + val_dataloaders (): + test_dataloaders (): + + Returns: + + """ + + self.model.fit(train_data_streams.train, eval_data=train_data_streams.dev) + +Trainer = PaddleTrainer \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/metrics.py b/EE/paddleext/paddleext/torchapi/metrics.py new file mode 100644 index 000000000..8d5f431c8 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/metrics.py @@ -0,0 +1,7 @@ + +import sys + +import paddlemetrics + +sys.modules[__name__] = paddlemetrics + diff --git a/EE/paddleext/paddleext/torchapi/nn/__init__.py b/EE/paddleext/paddleext/torchapi/nn/__init__.py new file mode 100644 index 000000000..64b48d081 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/nn/__init__.py @@ -0,0 +1,47 @@ +import paddle +import random +import numpy as np + +ModuleBase = paddle.nn.Layer +ModuleDict = paddle.nn.LayerDict +ModuleList = paddle.nn.LayerList + +from paddle.nn import * + +Conv2d = Conv2D +Conv3d = Conv3D +from . import functional +from paddle.nn import initializer + +from . import init + +def Parameter(data, requires_grad=True): + """ + + Args: + data: + requires_grad: + + Returns: + + """ + + param = paddle.create_parameter(data.shape, dtype=data.dtype, default_initializer=initializer.Assign(data)) + + param.stop_gradient = not requires_grad + + return param + +from paddle.fluid import framework + +class Module(paddle.nn.Layer): + """ + Module with add_parameter + """ + + def __setattr__(self, key, value): + + if isinstance(value, framework.Parameter): + self.add_parameter(key, value) + else: + super().__setattr__(key, value) \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/nn/functional.py b/EE/paddleext/paddleext/torchapi/nn/functional.py new file mode 100644 index 000000000..cf489074f --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/nn/functional.py @@ -0,0 +1,15 @@ + + +import paddle +from more_itertools import chunked +from paddle.nn.functional import * + +def pad(input, pad, mode='constant', value=0.0): + + pad = sum(reversed(list(chunked(pad, 2))), []) + + if len(pad) < 2 * input.ndim: + pad = [0] * (2 * input.ndim - len(pad)) + pad + + return paddle.nn.functional.pad(input, pad, mode=mode, value=value) + diff --git a/EE/paddleext/paddleext/torchapi/nn/init.py b/EE/paddleext/paddleext/torchapi/nn/init.py new file mode 100644 index 000000000..4a74a80c9 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/nn/init.py @@ -0,0 +1,49 @@ +""" +init function for paddle +""" +import paddle + + +def normal_(tensor, mean=0.0, std=1.0): + """ + + Args: + tensor: + mean: + std: + + Returns: + + """ + + paddle.assign(paddle.normal(mean=mean, std=std, shape=tensor.shape), tensor) + + return tensor + +def zeros_(tensor): + """ + + Args: + tensor: + + Returns: + + """ + + paddle.assign(paddle.zeros_like(tensor), tensor) + + return tensor + +def ones_(tensor): + """ + + Args: + tensor: + + Returns: + + """ + + paddle.assign(paddle.ones_like(tensor), tensor) + + return tensor \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/optim/__init__.py b/EE/paddleext/paddleext/torchapi/optim/__init__.py new file mode 100644 index 000000000..c31d1d6c8 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/optim/__init__.py @@ -0,0 +1,48 @@ +from functools import partial + +from paddle.optimizer import * + +from . import lr_scheduler + +# import sys +# this_module = sys.modules[__name__] +# import inspect +# +# def paddle_optim_class_creator(paddle_optim_class, *args, **kwargs): +# """ +# +# Args: +# paddle_optim_class: +# *args: +# **kwargs: +# +# Returns: +# +# """ +# if "params" in kwargs: +# kwargs["parameters"] = kwargs["params"] +# del kwargs["params"] +# if "lr" in kwargs: +# kwargs["learning_rate"] = kwargs["lr"] +# del kwargs["lr"] +# +# return paddle_optim_class(*args, **kwargs) +# +# from . import lr +# +# class PaddleOptimModuleProxy(object): +# +# def __getattribute__(self, *args, **kwargs): +# # Perform custom logic here +# +# obj = object.__getattribute__(this_module, *args, **kwargs) +# +# if inspect.isclass(obj) and obj.__module__.startswith("paddle.optimization"): +# print(obj.__module__) +# return partial(paddle_optim_class_creator, obj) +# else: +# return obj +# +# +# +# sys.modules[__name__] = PaddleOptimModuleProxy() \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/optim/lr_scheduler.py b/EE/paddleext/paddleext/torchapi/optim/lr_scheduler.py new file mode 100644 index 000000000..1ba3040a0 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/optim/lr_scheduler.py @@ -0,0 +1,34 @@ +from functools import partial + +from paddle.optimizer.lr import * + +StepLR = StepDecay +ExponentialLR = ExponentialDecay + +# +# def paddle_lr_class_creator(paddle_lr_class, *args, **kwargs): +# +# if "lr" in kwargs: +# kwargs["learning_rate"] = kwargs["lr"] +# del kwargs["lr"] +# +# return paddle_lr_class(*args, **kwargs) +# +# import sys +# this_module = sys.modules[__name__] +# import inspect +# +# class PaddleLRModuleProxy(object): +# +# def __getattribute__(self, *args, **kwargs): +# # Perform custom logic here +# +# obj = object.__getattribute__(this_module, *args, **kwargs) +# +# if inspect.isclass(obj) and obj.__module__.startswith("paddle.optimization"): +# print("LR", obj.__module__) +# return partial(paddle_lr_class_creator, obj) +# else: +# return obj +# +# sys.modules[__name__] = PaddleLRModuleProxy() \ No newline at end of file diff --git a/EE/paddleext/paddleext/torchapi/sampler.py b/EE/paddleext/paddleext/torchapi/sampler.py new file mode 100644 index 000000000..7e4f1cb54 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/sampler.py @@ -0,0 +1,154 @@ +""" +paddle operation for sampler +""" +from typing import Iterator, Optional, Sequence, List, TypeVar, Generic, Sized + +import paddle + +from paddle.io import Sampler, DistributedBatchSampler, SequenceSampler, RandomSampler +import math + + +def identity(x): + """ + + Args: + x (): + + Returns: + + """ + return x + + + +class BatchSampler(Sampler): + r"""Wraps another sampler to yield a mini-batch of indices. + + Args: + sampler (Sampler or Iterable): Base sampler. Can be any iterable object + batch_size (int): Size of mini-batch. + drop_last (bool): If ``True``, the sampler will drop the last batch if + its size would be less than ``batch_size`` + + Example: + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + def __init__(self, sampler: Sampler, batch_size: int, drop_last: bool) -> None: + # Since collections.abc.Iterable does not check for `__getitem__`, which + # is one way for an object to be an iterable, we don't do an `isinstance` + # check here. + super().__init__() + if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \ + batch_size <= 0: + raise ValueError("batch_size should be a positive integer value, " + "but got batch_size={}".format(batch_size)) + if not isinstance(drop_last, bool): + raise ValueError("drop_last should be a boolean value, but got " + "drop_last={}".format(drop_last)) + self.sampler = sampler + self.batch_size = batch_size + self.drop_last = drop_last + + + def __iter__(self) -> Iterator[List[int]]: + batch = [] + for idx in self.sampler: + batch.append(idx) + if len(batch) == self.batch_size: + yield batch + batch = [] + if len(batch) > 0 and not self.drop_last: + yield batch + + def __len__(self) -> int: + # Can only be called if self.sampler has __len__ implemented + # We cannot enforce this condition, so we turn off typechecking for the + # implementation below. + # Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + if self.drop_last: + return len(self.sampler) // self.batch_size # type: ignore[arg-type] + else: + return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type] + + + +class SequentialSampler(Sampler): + r"""Samples elements sequentially, always in the same order. + + Args: + data_source (Dataset): dataset to sample from + """ + data_source: Sized + + def __init__(self, data_source: Sized) -> None: + self.data_source = data_source + + def __iter__(self) -> Iterator[int]: + return iter(range(len(self.data_source))) + + def __len__(self) -> int: + return len(self.data_source) + +class SubsetRandomSampler(Sampler): + r"""Samples elements randomly from a given list of indices, without replacement. + + Arguments: + indices (sequence): a sequence of indices + generator (Generator): Generator used in sampling. + """ + indices: Sequence[int] + + def __init__(self, indices: Sequence[int]) -> None: + self.indices = indices + + def __iter__(self): + return (self.indices[i] for i in paddle.randperm(len(self.indices))) + + def __len__(self): + return len(self.indices) + + + +class DistributedSampler(Sampler): + """ Iterable wrapper that distributes data across multiple workers. + + Args: + iterable (iterable) + num_replicas (int, optional): Number of processes participating in distributed training. + rank (int, optional): Rank of the current process within ``num_replicas``. + + Example: + >>> list(DistributedSampler(range(10), num_replicas=2, rank=0)) + [0, 2, 4, 6, 8] + >>> list(DistributedSampler(range(10), num_replicas=2, rank=1)) + [1, 3, 5, 7, 9] + """ + + def __init__(self, iterable, num_replicas=None, rank=None): + self.iterable = iterable + self.num_replicas = num_replicas + self.rank = rank + + if num_replicas is None or rank is None: # pragma: no cover +# if not paddle.distributed.is_initialized(): +# raise RuntimeError('Requires `torch.distributed` to be initialized.') + + self.num_replicas = ( + paddle.distributed.get_world_size() if num_replicas is None else num_replicas) + self.rank = paddle.distributed.get_rank() if rank is None else rank + + if self.rank >= self.num_replicas: + raise IndexError('`rank` must be smaller than the `num_replicas`.') + + def __iter__(self): + return iter( + [e for i, e in enumerate(self.iterable) if (i - self.rank) % self.num_replicas == 0]) + + def __len__(self): + return len(self.iterable) + diff --git a/EE/paddleext/paddleext/torchapi/tensor_.py b/EE/paddleext/paddleext/torchapi/tensor_.py new file mode 100644 index 000000000..838c75a82 --- /dev/null +++ b/EE/paddleext/paddleext/torchapi/tensor_.py @@ -0,0 +1,547 @@ +""" +paddle tensor +""" +from functools import partial + +import numpy as np +from collections.abc import Iterable + +from . import paddle_delegate_func +from .functional import * +import paddle + +""" +paddle tensor +""" +import types +import paddle +from paddle import Tensor + +# just for type hint. If there are statements like isinstance(x, FloatTensor), this may cause error +FloatTensor = Tensor + +def size(self, dim=None): + shape = self.shape + if dim is None: + return shape + else: + return shape[dim] + + +# def __new__(cls, *args, **kwargs): +# +# obj = cls.__default_new__(cls, *args, **kwargs) +# +# setattr(obj, "size", types.MethodType(size, obj)) +# +# return obj +# +# setattr(Tensor, "__default_new__", Tensor.__new__) +# setattr(Tensor, "__new__", __new__) + + +def bool_(self): + return self.astype("bool") + +def float_(self): + return self.astype('float32') + + +def double_(self): + return self.astype("float64") + + +def int_(self): + return self.astype("int32") + + +def long_(self): + return self.astype('int64') + + +def expand(self, *sizes): + if isinstance(sizes[0], Iterable): + sizes = sizes[0] + ##handle -1 case + if len(sizes) > len(self.shape): + for _ in range(len(sizes) - len(self.shape)): + self = self.unsqueeze(dim=0) + expand_times = [x // y if x >= y else 1 for x, y in zip(sizes, self.shape)] + x = paddle.fluid.layers.expand(self, expand_times, name=None) + return x + + +def masked_fill(self, mask, value): + if self.ndim == 0: + assert mask.ndim == 0 + if mask.item(): + return paddle.full([], value, self.dtype) + else: + return self + + y = paddle.full(self.shape, value, self.dtype) + mask_shape = [1] * (self.ndim - mask.ndim) + mask.shape + mask = paddle.reshape(mask, mask_shape) + mask = paddle.expand_as(mask, self) + new_values = paddle.where(mask, y, self) + return new_values + # mask_float = mask.astype("float32") + # if self.dtype == paddle.bool: + # self_float = self.astype("float32") + # else: + # self_float = self + # result = self_float * (1 - mask_float) + mask_float * value + # if self.dtype == paddle.bool: + # result = result.astype(paddle.bool) + # return result + +# def masked_fill_(self, mask, value): +# +# new_values = masked_fill(self, mask, value) +# paddle.assign(new_values, self) +# +# return self + + +def to(self, arg): + if isinstance(arg, paddle.dtype): + return self.astype(arg) + elif isinstance(arg, Tensor): + return self.astype(arg.dtype) + else: + return self + +def is_floating_point(self): + return self.dtype in {paddle.float16, paddle.float32, paddle.float64} + + +def reshape(self, *size): + + if len(size) == 1 and isinstance(size[0], Iterable): + size = size[0] + + return paddle.reshape(self, size) + + +def view(self, *size): + if len(size) == 1 and isinstance(size[0], Iterable): + size = size[0] + + return reshape(self, size) + +def view_as(self, other): + + return view(self, *other.size()) + + +Tensor.__native__size = Tensor.size + +Tensor.device = None +Tensor.float = float_ +Tensor.double = double_ +Tensor.int = int_ +Tensor.long = long_ +Tensor.bool = bool_ +Tensor.scatter_explicit_index = Tensor.scatter +Tensor.scatter = scatter +Tensor.scatter_explicit_index_ = Tensor.scatter_ +Tensor.scatter_ = scatter_ +Tensor.scatter_add = scatter_add +Tensor.scatter_add_ = scatter_add_ +Tensor.expand = expand +Tensor.masked_fill = masked_fill +#Tensor.masked_fill_ = masked_fill_ +Tensor.to = to +Tensor.is_floating_point = is_floating_point +Tensor.reshape = reshape +Tensor.view = view +Tensor.view_as = view_as + +Tensor.__invert__ = paddle.logical_not + +Tensor.__native__numel = Tensor.numel +def numel(x): + return x.__native__numel().item() + +Tensor.numel = numel + +import math + +class SizeObject(int): + + def __new__(cls, sizes, *args, **kwargs): + size = int(math.prod(sizes)) + instance = int.__new__(cls, size, *args, **kwargs) + instance.sizes = sizes + return instance + + def __call__(self, index=None): + if index is None: + return self.sizes + else: + return self.sizes[index] + +Tensor.size = property(lambda self: SizeObject(self.shape)) + + +def flatten(self, *args, **kwargs): + + if self.dtype == paddle.bool: + return flatten(self.int(), *args, **kwargs) > 0 + else: + return paddle.flatten(self, *args, **kwargs) + +Tensor.flatten = flatten + + +Tensor.__getitem__official__ = Tensor.__getitem__ + +import builtins + +def getitem(self, args): + + if self.dtype == paddle.bool: + return getitem(self.int(), args) > 0 + + if isinstance(args, (list, tuple)): + ellipsis_num = builtins.sum(x is Ellipsis for x in args) + if ellipsis_num > 1: + raise Exception(f"multiple ellipsis found in args: {args}") + elif ellipsis_num == 1: + args = list(args) + ellips_idx = args.index(Ellipsis) + args_before_ellips = args[:ellips_idx] + args_after_ellips = args[ellips_idx+1:] + ommited_dims = [builtins.slice(None, None, None) for _ in range(self.ndim - len(args) + 1)] + args = tuple(args_before_ellips + ommited_dims + args_after_ellips) + + return self.__getitem__official__(args) + + elif isinstance(args, Tensor): + if args.dtype == paddle.bool and args.ndim > 1: + # paddle do not support boolean indexing with ndim > 1 + return self.flatten(start_axis=0, stop_axis=args.ndim-1)[args.flatten().nonzero()] + if args.ndim == 0: + assert args.dtype == paddle.bool + assert self.ndim == 0 + return tensor(self.reshape((1,))[args.reshape((1,))].item(), dtype=self.dtype) + + return self.__getitem__official__(args) + +Tensor.__getitem__ = getitem + +Tensor.__setitem__official__ = Tensor.__setitem__ + +def setitem(self, index, value): + + if isinstance(index, Tensor): + if self.ndim == 0: + index = index.item() + assert type(index) == bool + if index: + self.fill_(value) + return + + if index.dtype == paddle.bool and (paddle.any(paddle.isnan(self)) or paddle.any(paddle.isinf(self))): + + result = masked_fill(self, index, value) + self.set_value(result) + return + + self.__setitem__official__(index, value) + +Tensor.__setitem__ = setitem + +def getattribute(self, *args, **kwargs): + # Perform custom logic here + + obj = object.__getattribute__(self, *args, **kwargs) + + if isinstance(obj, types.MethodType) and not obj.__module__.startswith("paddleext.torchapi."): + + return partial(paddle_delegate_func, obj) + else: + return obj + + +Tensor.__getattribute__ = getattribute + +Tensor.sum = sum + + + +def permute(self, *perm): + + if len(perm) == 1 and isinstance(perm[0], Iterable): + perm = perm[0] + + assert len(perm) == self.ndim + perm = [self.ndim + x if x < 0 else x for x in perm] ##not allow negative values + + if self.dtype == paddle.bool: + return permute(self.int(), * perm) > 0 + else: + return paddle.transpose(self, perm) + +Tensor.permute = permute + + +def transpose(self, *perm): + # if len(perm)==2 and len(self.shape)>2: + if isinstance(perm[0], Iterable): + assert len(perm) == 1 + perm = perm[0] + + if len(perm) == 2 and len(perm) < self.ndim: + + perm = [self.ndim + x if x < 0 else x for x in perm] + dim1, dim2 = perm + perm = list(range(self.rank())) + perm[dim1] = dim2 + perm[dim2] = dim1 + + return self.permute(*perm) + else: + return paddle.transpose(self, perm) + + +Tensor.transpose = transpose + +def contiguous(self): + return self + +Tensor.contiguous = contiguous + + +Tensor.__lt__origin__ = Tensor.__lt__ +def __lt__(self, other): + if self.ndim == 0 and np.isscalar(other): + other = tensor(other) + return self.__lt__origin__(other) +Tensor.__lt__ = __lt__ + + +Tensor.__le__origin__ = Tensor.__le__ +def __le__(self, other): + if self.ndim == 0 and np.isscalar(other): + other = tensor(other) + return self.__le__origin__(other) +Tensor.__le__ = __le__ + + +Tensor.__gt__origin__ = Tensor.__gt__ +def __gt__(self, other): + if self.ndim == 0 and np.isscalar(other): + other = tensor(other) + return self.__gt__origin__(other) +Tensor.__gt__ = __gt__ + + +Tensor.__ge__origin__ = Tensor.__ge__ +def __ge__(self, other): + if self.ndim == 0 and np.isscalar(other): + other = tensor(other) + return self.__ge__origin__(other) +Tensor.__ge__ = __ge__ + + +Tensor.__eq__origin__ = Tensor.__eq__ +def __eq__(self, other): + if self.ndim == 0 and np.isscalar(other): + other = tensor(other) + return self.__eq__origin__(other) +Tensor.__eq__ = __eq__ + + +Tensor.__ne__origin__ = Tensor.__ne__ +def __ne__(self, other): + if self.ndim == 0 and np.isscalar(other): + other = tensor(other) + return self.__ne__origin__(other) +Tensor.__ne__ = __ne__ + + +def __or__(self, other): + return paddle.logical_or(self.bool(), other.bool()) +Tensor.__or__ = __or__ + +def __and__(self, other): + return paddle.logical_or(self.bool(), other.bool()) +Tensor.__and__ = __and__ + + +Tensor.__native__any = Tensor.any +def any(x, dim=None, keepdim=False, name=None): + if isinstance(x, Tensor) and x.ndim == 0: + assert dim is None + return x + else: + return x.__native__any(axis=dim, keepdim=keepdim, name=name) + +Tensor.any = any + +Tensor.__native__all = Tensor.all +def all(x, dim=None, keepdim=False, name=None): + + if isinstance(x, Tensor) and x.ndim == 0: + assert dim is None + return x + else: + return x.__native__all(axis=dim, keepdim=keepdim, name=name) + +Tensor.all = all + +Tensor.__native__add__ = Tensor.__add__ +#Tensor.__native__iadd__ = Tensor.__iadd__ +def add(x, y): + + tensor_out = isinstance(x, Tensor) or isinstance(y, Tensor) + + out_dtype = x.dtype if isinstance(x, Tensor) else y.dtype if isinstance(y, Tensor) else None + + if isinstance(x, Tensor) and x.ndim == 0: + x = x.item() + if isinstance(y, Tensor) and y.ndim == 0: + y = y.item() + + if isinstance(x, Tensor): + return Tensor.__native__add__(x, y) + elif isinstance(y, Tensor): + return Tensor.__native__add__(y, x) + else: + result = x + y + if np.isscalar(result) and tensor_out: + return tensor(result, dtype=out_dtype) + else: + return result + + +# def iadd(x, y): +# if isinstance(y, Tensor) and y.ndim == 0: +# y = y.item() +# +# return Tensor.__native__iadd__(x, y) + +Tensor.__add__ = add +Tensor.__radd__ = add +# Tensor.__iadd__ = iadd + +Tensor.__native__sub__ = Tensor.__sub__ +Tensor.__native__rsub__ = Tensor.__rsub__ + +def subtract(x, y): + tensor_out = isinstance(x, Tensor) or isinstance(y, Tensor) + + out_dtype = x.dtype if isinstance(x, Tensor) else y.dtype if isinstance(y, Tensor) else None + + if isinstance(x, Tensor) and x.ndim == 0: + x = x.item() + if isinstance(y, Tensor) and y.ndim == 0: + y = y.item() + + if isinstance(x, Tensor): + return Tensor.__native__sub__(x, y) + elif isinstance(y, Tensor): + return Tensor.__native__rsub__(y, x) + else: + result = x - y + if np.isscalar(result) and tensor_out: + return tensor(result, dtype=out_dtype) + else: + return result + +def rsub(x, y): + + if isinstance(y, Tensor) and y.ndim == 0: + y = y.item() + + return Tensor.__native__rsub__(x, y) + + +Tensor.__sub__ = subtract +Tensor.__rsub__ = rsub + +Tensor.__native__mul__ = Tensor.__mul__ +def multiply(x, y): + tensor_out = isinstance(x, Tensor) or isinstance(y, Tensor) + + out_dtype = x.dtype if isinstance(x, Tensor) else y.dtype if isinstance(y, Tensor) else None + + if isinstance(x, Tensor) and x.ndim == 0: + x = x.item() + if isinstance(y, Tensor) and y.ndim == 0: + y = y.item() + + if isinstance(x, Tensor): + return Tensor.__native__mul__(x, y) + elif isinstance(y, Tensor): + return Tensor.__native__mul__(y, x) + else: + result = x * y + if np.isscalar(result) and tensor_out: + return tensor(result, dtype=out_dtype) + else: + return result + +Tensor.__mul__ = multiply +Tensor.__rmul__ = multiply + +Tensor.__native__truediv__ = Tensor.__truediv__ +Tensor.__native__rdiv__ = Tensor.__rdiv__ + +def divide(x, y): + tensor_out = isinstance(x, Tensor) or isinstance(y, Tensor) + + out_dtype = x.dtype if isinstance(x, Tensor) else y.dtype if isinstance(y, Tensor) else None + + if isinstance(x, Tensor) and x.ndim == 0: + x = x.item() + if isinstance(y, Tensor) and y.ndim == 0: + y = y.item() + + if isinstance(x, Tensor): + return Tensor.__native__truediv__(x, y) + elif isinstance(y, Tensor): + return Tensor.__native__rdiv__(y, x) + else: + result = x / y + if np.isscalar(result) and tensor_out: + return tensor(result, dtype=out_dtype) + else: + return result + +def rdiv(x, y): + if isinstance(y, Tensor) and y.ndim == 0: + y = y.item() + + return Tensor.__native__rdiv__(x, y) + + +Tensor.__truediv__ = divide +Tensor.__rdiv__ = rdiv + + +def __getstate__(self): + state = {"dtype": self.dtype, "value": self.numpy()} + return state + + +def __setstate__(self, newstate): + + loaded = paddle.to_tensor(newstate["value"], dtype=newstate["dtype"]) + self.set_value(loaded) + +Tensor.__getstate__ = __getstate__ +Tensor.__setstate__ = __setstate__ + +## requires_grad property + +def getter(x): + return not x.stop_gradient + +def setter(x, value): + x.stop_gradient = not value + +Tensor.requires_grad = property(getter, setter) + +Tensor.topk = topk \ No newline at end of file diff --git a/EE/paddleext/setup.py b/EE/paddleext/setup.py new file mode 100644 index 000000000..d0e29ea72 --- /dev/null +++ b/EE/paddleext/setup.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +################################################################################ +# +# Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved +# +################################################################################ +""" +Setup script. + +Authors: sunmingming01(sunmingming01@baidu.com) +Date: 2020/12/31 12:33:34 +""" + +from setuptools import setup, find_packages + +with open('README.md') as readme_file: + README = readme_file.read() + +setup_args = dict( + name='paddle-extension', + version='1.0.0-beta', + description='Paddle extensions, including implementation for torch apis.', + long_description_content_type="text/markdown", + long_description=README, + license='Apache', + packages=find_packages(include=["paddleext", "paddleext.*"]), + author='Mingming Sun', + author_email='sunmingming01@baidu.com', + keywords=['Deep Learning', 'Paddlepaddle'], + url='', + download_url='' +) + +install_requires = [ +] + +if __name__ == '__main__': + setup(**setup_args, install_requires=install_requires) \ No newline at end of file diff --git a/EE/paddleext/test/__init__.py b/EE/paddleext/test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddleext/test/test_diagonal.py b/EE/paddleext/test/test_diagonal.py new file mode 100644 index 000000000..80714ed1c --- /dev/null +++ b/EE/paddleext/test/test_diagonal.py @@ -0,0 +1,32 @@ + +import numpy as np + +from chaos import paddle_ as paddle, torch_ as torch + + +def test_diagonal(): + import random + + for rank in range(2, 6): + for test in range(10): + while True: + dim1 = random.randint(0, rank - 1) + dim2 = random.randint(0, rank - 1) + if dim1 != dim2: + break + + shape = [random.randint(5, 10) for _ in range(rank)] + offset = random.randint(-shape[dim1] + 1, shape[dim2]) + + x = np.random.rand(*shape) + + torch_input = torch.from_numpy(x) + torch.fill_diagonal(torch_input, value=100, offset=offset, dim1=dim1, dim2=dim2) + + paddle_input = paddle.from_numpy(x) + paddle.fill_diagonal(paddle_input, value=100, offset=offset, dim1=dim1, dim2=dim2) + + paddle_out = paddle_input.numpy() + torch_out = torch_input.numpy() + + assert np.sum(np.abs(paddle_out - torch_out)) < 1e-5 \ No newline at end of file diff --git a/EE/paddleext/test/test_function.py b/EE/paddleext/test/test_function.py new file mode 100644 index 000000000..db676d1b2 --- /dev/null +++ b/EE/paddleext/test/test_function.py @@ -0,0 +1,13 @@ + + + +from chaos.backend_.paddle_.functional import fill_diagonal +import paddle + +def test_fill_diagnonal(): + + a = paddle.randn((5, 5)) + fill_diagonal(a, float("-inf")) + +if __name__ == "__main__": + test_fill_diagnonal() \ No newline at end of file diff --git a/EE/paddleext/test/test_pad.py b/EE/paddleext/test/test_pad.py new file mode 100644 index 000000000..6fb0e2bcb --- /dev/null +++ b/EE/paddleext/test/test_pad.py @@ -0,0 +1,29 @@ + +import numpy as np + +from chaos.backend_ import paddle_ as paddle, torch_ as torch + + +def test_pad(): + import random + + for ndim in range(2, 6): + for test in range(5): + shape = [random.randint(5, 10) for _ in range(ndim)] + x = np.random.rand(*shape) + + torch_input = torch.from_numpy(x) + paddle_input = paddle.from_numpy(x) + + for rank in range(1, ndim + 1): + + pad = [random.randint(0, 10) for _ in range(rank)] + [random.randint(0, 10) for _ in range(rank)] + + torch_output = torch.nn.functional.pad(torch_input, pad, mode='constant', value=0.0) + + paddle_output = paddle.nn.functional.pad(paddle_input, pad, mode='constant', value=0.0) + + paddle_out = paddle_output.numpy() + torch_out = torch_output.numpy() + + assert np.allclose(paddle_out, torch_out) \ No newline at end of file diff --git a/EE/paddleext/test/test_scatter.py b/EE/paddleext/test/test_scatter.py new file mode 100644 index 000000000..52c2d29ab --- /dev/null +++ b/EE/paddleext/test/test_scatter.py @@ -0,0 +1,146 @@ +from chaos.backend_ import paddle_ as paddle, torch_ as torch +import numpy as np + +def test_scatter_1d(): + + x = np.random.rand(100) + + indices = np.random.randint(low=0, high=100, size=50) + updates = np.random.rand(50) + + paddle_out = paddle.scatter(paddle.from_numpy(x), 0, paddle.from_numpy(indices), paddle.from_numpy(updates)) + torch_out = torch.scatter(torch.from_numpy(x), 0, torch.from_numpy(indices), torch.from_numpy(updates)) + + paddle_out = paddle_out.numpy() + torch_out = torch_out.numpy() + + assert np.all(paddle_out == torch_out) + + +def test_scatter_2d_dim0(): + + dim0 = 101 + dim1 = 31 + x = np.random.rand(dim0, dim1) + + # for dim = 0 + + import random + + indices = list(range(dim0)) + random.shuffle(indices) + indices = np.array(indices[:50]).reshape((25, 2)) + updates = np.random.rand(indices.shape[0], 2) + + torch_out = torch.scatter(torch.from_numpy(x), 0, torch.from_numpy(indices), torch.from_numpy(updates)) + paddle_out = paddle.scatter(paddle.from_numpy(x), 0, paddle.from_numpy(indices), paddle.from_numpy(updates)) + + paddle_out = paddle_out.numpy() + torch_out = torch_out.numpy() + + assert np.allclose(paddle_out, torch_out) + + +def test_scatter_2d_dim1(): + + dim0 = 101 + dim1 = 131 + x = np.random.rand(dim0, dim1) + + # for dim = 0 + + import random + + indices = list(range(dim1)) + random.shuffle(indices) + indices = np.array(indices[:50]).reshape((25, 2)) + updates = np.random.rand(indices.shape[0], 2) + + torch_out = torch.scatter(torch.from_numpy(x), 1, torch.from_numpy(indices), torch.from_numpy(updates)) + paddle_out = paddle.scatter(paddle.from_numpy(x), 1, paddle.from_numpy(indices), paddle.from_numpy(updates)) + + paddle_out = paddle_out.numpy() + torch_out = torch_out.numpy() + + assert np.allclose(paddle_out, torch_out) + + +def test_scatter_nd_dimm(): + import random, math + + for rank in range(1, 6): + for test in range(10): + dim = random.randint(0, rank-1) + + shape = [random.randint(5, 10) for _ in range(rank)] + + indice_shape = [random.randint(5, 10) for _ in range(rank)] + indice_shape = [min(shape[i], indice_shape[i]) for i in range(rank)] + indice_numel = math.prod(indice_shape) + + shape[dim] = 2 * indice_numel + + x = np.random.rand(*shape) + + indice_value = list(range(shape[dim])) + random.shuffle(indice_value) + + indices = np.array(indice_value[:indice_numel]).reshape(indice_shape) + updates = np.random.rand(*indice_shape) + + torch_out = torch.scatter(torch.from_numpy(x), dim, torch.from_numpy(indices), torch.from_numpy(updates)) + paddle_out = paddle.scatter(paddle.from_numpy(x), dim, paddle.from_numpy(indices), paddle.from_numpy(updates)) + + paddle_out = paddle_out.numpy() + torch_out = torch_out.numpy() + + assert np.allclose(paddle_out, torch_out) + +def test_scatter_add_1d(): + + x = np.random.rand(100) + + indices = np.random.randint(low=0, high=100, size=50) + updates = np.random.rand(50) + + paddle_out = paddle.scatter_add(paddle.from_numpy(x), 0, paddle.from_numpy(indices), paddle.from_numpy(updates)) + torch_out = torch.scatter_add(torch.from_numpy(x), 0, torch.from_numpy(indices), torch.from_numpy(updates)) + + paddle_out = paddle_out.numpy() + torch_out = torch_out.numpy() + + assert np.all(paddle_out == torch_out) + +def test_scatter_add_nd_dimm(): + import random, math + + for rank in range(1, 6): + for test in range(10): + dim = random.randint(0, rank-1) + + shape = [random.randint(5, 10) for _ in range(rank)] + + indice_shape = [random.randint(5, 10) for _ in range(rank)] + indice_shape = [min(shape[i], indice_shape[i]) for i in range(rank)] + indice_numel = math.prod(indice_shape) + + shape[dim] = 2 * indice_numel + + x = np.random.rand(*shape) + + + indice_value = list(range(shape[dim])) + random.shuffle(indice_value) + + indices = np.array(indice_value[:indice_numel]).reshape(indice_shape) + + # indices = np.random.randint(0, shape[dim], size=indice_shape) + updates = np.random.rand(*indice_shape) + + torch_out = torch.scatter_add(torch.from_numpy(x), dim, torch.from_numpy(indices), torch.from_numpy(updates)) + paddle_out = paddle.scatter_add(paddle.from_numpy(x), dim, paddle.from_numpy(indices), paddle.from_numpy(updates)) + + paddle_out = paddle_out.numpy() + torch_out = torch_out.numpy() + + assert np.allclose(paddle_out, torch_out) \ No newline at end of file diff --git a/EE/paddlemetric/.gitignore b/EE/paddlemetric/.gitignore new file mode 100644 index 000000000..2f78cf5b6 --- /dev/null +++ b/EE/paddlemetric/.gitignore @@ -0,0 +1,2 @@ +*.pyc + diff --git a/EE/paddlemetric/.ignore b/EE/paddlemetric/.ignore new file mode 100644 index 000000000..0d20b6487 --- /dev/null +++ b/EE/paddlemetric/.ignore @@ -0,0 +1 @@ +*.pyc diff --git a/EE/paddlemetric/CHANGELOG.md b/EE/paddlemetric/CHANGELOG.md new file mode 100644 index 000000000..a843ec779 --- /dev/null +++ b/EE/paddlemetric/CHANGELOG.md @@ -0,0 +1,20 @@ +Changelog +=== +以下记录了项目中所有值得关注的变更内容,其格式基于[Keep a Changelog]。 + +本项目版本遵守[Semantic Versioning]和[PEP-440]。 + +## [v1.0]- 2022-07-04 +--- +### Added +- Support the testing of some classification modules +### Changed + + + + +[v1.0]: https://console.cloud.baidu-int.com/devops/icode/repos/baidu/ccl/torch2paddle/commits/7476c4f8477d6161f8d5aaaf78f47d6bee990d42 + +[Keep a Changelog]: https://keepachangelog.com/zh-CN/1.0.0/ +[Semantic Versioning]: https://semver.org/lang/zh-CN/ +[PEP-440]: https://www.python.org/dev/peps/pep-0440/ diff --git a/EE/paddlemetric/src/README.md b/EE/paddlemetric/src/README.md new file mode 100644 index 000000000..1b8004fc0 --- /dev/null +++ b/EE/paddlemetric/src/README.md @@ -0,0 +1,28 @@ +# Paddle Metrics + +Metrics library for paddle, porting from torch metrics +## Install + +pip install http://public.bcc-bdbl.baidu.com:8000/Package/paddlemetrics-1.0.0b0-py3-none-any.whl + +## Document + +### Requirements + +* paddleextension + +### Progress + +Testing progress: + +### Classification + +* classification/test_accuracy.py +* classification/test_f_beta.py +* classification/test_precision_recall.py +* classification/test_stat_scores.py + +### functional + +* functional/test_classification.py + diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py new file mode 100644 index 000000000..53a9cfa4f --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py @@ -0,0 +1,27 @@ +__version__ = "0.6.0dev" +__author__ = "PyTorchLightning et al." +__author_email__ = "name@pytorchlightning.ai" +__license__ = "Apache-2.0" +__copyright__ = f"Copyright (c) 2020-2021, {__author__}." +__homepage__ = "https://github.com/PyTorchLightning/metrics" +__docs__ = "PyTorch native Metrics" +__docs_url__ = "https://paddlemetrics.readthedocs.io/en/stable/" +__long_doc__ = """ +paddlemetrics is a metrics API created for easy metric development and usage in both PyTorch and +[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of +Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics +implemented without having to install Pytorch Lightning (even though we would love for you to try it out). +We currently have around 25+ metrics implemented and we continuously is adding more metrics, both within +already covered domains (classification, regression ect.) but also new domains (object detection ect.). +We make sure that all our metrics are rigorously tested such that you can trust them. +""" + +__all__ = [ + "__author__", + "__author_email__", + "__copyright__", + "__docs__", + "__homepage__", + "__license__", + "__version__", +] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py new file mode 100644 index 000000000..ea557086b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py @@ -0,0 +1,143 @@ +r"""Root package info.""" +import logging as __logging +import os +import sys + +from paddlemetrics.__about__ import * # noqa: F401, F403 + +_logger = __logging.getLogger("paddlemetrics") +_logger.addHandler(__logging.StreamHandler()) +_logger.setLevel(__logging.INFO) + +_PACKAGE_ROOT = os.path.dirname(__file__) +_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT) + +from paddlemetrics import functional # noqa: E402 +from paddlemetrics.aggregation import CatMetric, MaxMetric, MeanMetric, MinMetric, SumMetric # noqa: E402 +from paddlemetrics.audio import PESQ, PIT, SI_SDR, SI_SNR, SNR, STOI # noqa: E402 +from paddlemetrics.classification import ( # noqa: E402 + AUC, + AUROC, + F1, + ROC, + Accuracy, + AveragePrecision, + BinnedAveragePrecision, + BinnedPrecisionRecallCurve, + BinnedRecallAtFixedPrecision, + CalibrationError, + CohenKappa, + ConfusionMatrix, + FBeta, + HammingDistance, + Hinge, + IoU, + KLDivergence, + MatthewsCorrcoef, + Precision, + PrecisionRecallCurve, + Recall, + Specificity, + StatScores, +) +from paddlemetrics.collections import MetricCollection # noqa: E402 +#from paddlemetrics.image import FID, IS, KID, LPIPS, PSNR, SSIM # noqa: E402 +from paddlemetrics.metric import Metric # noqa: E402 +from paddlemetrics.regression import ( # noqa: E402 + CosineSimilarity, + ExplainedVariance, + MeanAbsoluteError, + MeanAbsolutePercentageError, + MeanSquaredError, + MeanSquaredLogError, + PearsonCorrcoef, + R2Score, + SpearmanCorrcoef, + SymmetricMeanAbsolutePercentageError, + TweedieDevianceScore, +) +from paddlemetrics.retrieval import ( # noqa: E402 + RetrievalFallOut, + RetrievalHitRate, + RetrievalMAP, + RetrievalMRR, + RetrievalNormalizedDCG, + RetrievalPrecision, + RetrievalRecall, + RetrievalRPrecision, +) +from paddlemetrics.text import WER, BLEUScore, ROUGEScore, SacreBLEUScore # noqa: E402 BERTScore, +from paddlemetrics.wrappers import BootStrapper, MetricTracker, MultioutputWrapper # noqa: E402 + +__all__ = [ + "functional", + "Accuracy", + "AUC", + "AUROC", + "AveragePrecision", + "BinnedAveragePrecision", + "BinnedPrecisionRecallCurve", + "BinnedRecallAtFixedPrecision", +# "BERTScore", + "BLEUScore", + "BootStrapper", + "CalibrationError", + "CatMetric", + "CohenKappa", + "ConfusionMatrix", + "CosineSimilarity", + "TweedieDevianceScore", + "ExplainedVariance", + "F1", + "FBeta", +# "FID", + "HammingDistance", + "Hinge", + "IoU", +# "IS", +# "KID", + "KLDivergence", +# "LPIPS", + "MatthewsCorrcoef", + "MaxMetric", + "MeanAbsoluteError", + "MeanAbsolutePercentageError", + "MeanMetric", + "MeanSquaredError", + "MeanSquaredLogError", + "Metric", + "MetricCollection", + "MetricTracker", + "MinMetric", + "MultioutputWrapper", + "PearsonCorrcoef", + "PESQ", + "PIT", + "Precision", + "PrecisionRecallCurve", +# "PSNR", + "R2Score", + "Recall", + "RetrievalFallOut", + "RetrievalHitRate", + "RetrievalMAP", + "RetrievalMRR", + "RetrievalNormalizedDCG", + "RetrievalPrecision", + "RetrievalRecall", + "RetrievalRPrecision", + "ROC", + "ROUGEScore", + "SacreBLEUScore", + "SI_SDR", + "SI_SNR", + "SNR", + "SpearmanCorrcoef", + "Specificity", +# "SSIM", + "StatScores", + "STOI", + "SumMetric", + "SymmetricMeanAbsolutePercentageError", + "WER", +] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py b/EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py new file mode 100644 index 000000000..a95c51c0e --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py @@ -0,0 +1,445 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Any, Callable, List, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class BaseAggregator(Metric): + """Base class for aggregation metrics. + + Args: + fn: string specifying the reduction function + default_value: default tensor value to use for the metric state + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + """ + + value: Tensor + is_differentiable = None + higher_is_better = None + + def __init__( + self, + fn: Union[Callable, str], + default_value: Union[Tensor, List], + nan_strategy: Union[str, float] = "error", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + allowed_nan_strategy = ("error", "warn", "ignore") + if nan_strategy not in allowed_nan_strategy and not isinstance(nan_strategy, float): + raise ValueError( + f"Arg `nan_strategy` should either be a float or one of {allowed_nan_strategy}" + f" but got {nan_strategy}." + ) + + self.nan_strategy = nan_strategy + self.add_state("value", default=default_value, dist_reduce_fx=fn) + + def _cast_and_nan_check_input(self, x: Union[float, Tensor]) -> Tensor: + """Converts input x to a tensor if not already and afterwards checks for nans that either give an error, + warning or just ignored.""" + if not isinstance(x, Tensor): + x = B.as_tensor(x, dtype=B.float32, device=self.device) + + nans = B.isnan(x) + if any(nans.flatten()): + if self.nan_strategy == "error": + raise RuntimeError("Encounted `nan` values in tensor") + if self.nan_strategy == "warn": + warnings.warn("Encounted `nan` values in tensor. Will be removed.", UserWarning) + x = x[~nans] + elif self.nan_strategy == "ignore": + x = x[~nans] + else: + x[nans] = self.nan_strategy + + return x.float() + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Overwrite in child class.""" + pass + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + return self.value.squeeze() if isinstance(self.value, Tensor) else self.value + + +class MaxMetric(BaseAggregator): + """Aggregate a stream of value into their maximum value. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import MaxMetric + >>> metric = MaxMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor(3.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "max", + -B.tensor(float("inf")), + nan_strategy, + compute_on_step, + dist_sync_on_step, + process_group, + dist_sync_fn, + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if any(value.flatten()): # make sure tensor not empty + self.value = B.max(self.value, B.max(value)) + + +class MinMetric(BaseAggregator): + """Aggregate a stream of value into their minimum value. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import MinMetric + >>> metric = MinMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor(1.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "min", + B.tensor(float("inf")), + nan_strategy, + compute_on_step, + dist_sync_on_step, + process_group, + dist_sync_fn, + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if any(value.flatten()): # make sure tensor not empty + self.value = B.min(self.value, B.min(value)) + + +class SumMetric(BaseAggregator): + """Aggregate a stream of value into their sum. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import SumMetric + >>> metric = SumMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor(6.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "sum", B.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + self.value += value.sum() + + +class CatMetric(BaseAggregator): + """Concatenate a stream of values. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import CatMetric + >>> metric = CatMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor([1., 2., 3.]) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__("cat", [], nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if any(value.flatten()): + self.value.append(value) + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + if isinstance(self.value, list) and self.value: + return dim_zero_cat(self.value) + return self.value + + +class MeanMetric(BaseAggregator): + """Aggregate a stream of value into their mean value. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import MeanMetric + >>> metric = MeanMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor([2.]) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "sum", B.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn + ) + self.add_state("weight", default=B.zeros(1), dist_reduce_fx="sum") + + def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + weight: Either a float or tensor containing weights for calculating + the average. Shape of weight should be able to broadcast with + the shape of `value`. Default to `1.0` corresponding to simple + harmonic average. + """ + value = self._cast_and_nan_check_input(value) + weight = self._cast_and_nan_check_input(weight) + + # broadcast weight to values shape + if not hasattr(B, "broadcast_to"): + if weight.shape == (): + weight = B.ones_like(value) * weight + if weight.shape != value.shape: + raise ValueError("Broadcasting not supported on PyTorch <1.8") + else: + weight = B.broadcast_to(weight, value.shape) + + self.value += (value * weight).sum() + self.weight += weight.sum() + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + return self.value / self.weight diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py new file mode 100644 index 000000000..efd0b451e --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py @@ -0,0 +1,19 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.audio.pesq import PESQ # noqa: F401 +from paddlemetrics.audio.pit import PIT # noqa: F401 +from paddlemetrics.audio.si_sdr import SI_SDR # noqa: F401 +from paddlemetrics.audio.si_snr import SI_SNR # noqa: F401 +from paddlemetrics.audio.snr import SNR # noqa: F401 +from paddlemetrics.audio.stoi import STOI # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py new file mode 100644 index 000000000..d45fab53d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py @@ -0,0 +1,130 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.pesq import pesq +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _PESQ_AVAILABLE + + +class PESQ(Metric): + """PESQ (Perceptual Evaluation of Speech Quality) + + This is a wrapper for the pesq package [1]. . Note that input will be moved to `cpu` + to perform the metric calculation. + + .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pesq`` + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + fs: + sampling frequency, should be 16000 or 8000 (Hz) + mode: + 'wb' (wide-band) or 'nb' (narrow-band) + keep_same_device: + whether to move the pesq value to the device of preds + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``peqs`` package is not installed + ValueError: + If ``fs`` is not either ``8000`` or ``16000`` + ValueError: + If ``mode`` is not either ``"wb"`` or ``"nb"`` + + Example: + >>> from paddlemetrics.audio import PESQ + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> nb_pesq = PESQ(8000, 'nb') + >>> nb_pesq(preds, target) + tensor(2.2076) + >>> wb_pesq = PESQ(16000, 'wb') + >>> wb_pesq(preds, target) + tensor(1.7359) + + References: + [1] https://github.com/ludlows/python-pesq + """ + + sum_pesq: Tensor + total: Tensor + is_differentiable = False + higher_is_better = True + + def __init__( + self, + fs: int, + mode: str, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if not _PESQ_AVAILABLE: + raise ValueError( + "PESQ metric requires that pesq is installed." + "Either install as `pip install paddlemetrics[audio]` or `pip install pesq`" + ) + if fs not in (8000, 16000): + raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}") + self.fs = fs + if mode not in ("wb", "nb"): + raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}") + self.mode = mode + + self.add_state("sum_pesq", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + pesq_batch = pesq(preds, target, self.fs, self.mode, False).to(self.sum_pesq.device) + + self.sum_pesq += pesq_batch.sum() + self.total += pesq_batch.numel() + + def compute(self) -> Tensor: + """Computes average PESQ.""" + return self.sum_pesq / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py new file mode 100644 index 000000000..9d9dc7576 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py @@ -0,0 +1,113 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Dict, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.pit import pit +from paddlemetrics.metric import Metric + + +class PIT(Metric): + """Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method. + + [1] in speech separation field in order to calculate audio metrics in a permutation invariant way. + + Forward accepts + + - ``preds``: ``shape [batch, spk, ...]`` + - ``target``: ``shape [batch, spk, ...]`` + + Args: + metric_func: + a metric function accept a batch of target and estimate, i.e. metric_func(preds[:, i, ...], + target[:, j, ...]), and returns a batch of metric tensors [batch] + eval_func: + the function to find the best permutation, can be 'min' or 'max', i.e. the smaller the better + or the larger the better. + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + kwargs: + additional args for metric_func + + Returns: + average PIT metric + + Example: + >>> import torchapi as B + >>> from paddlemetrics import PIT + >>> from paddlemetrics.functional import si_snr + >>> _ = B.manual_seed(42) + >>> preds = B.randn(3, 2, 5) # [batch, spk, time] + >>> target = B.randn(3, 2, 5) # [batch, spk, time] + >>> pit = PIT(si_snr, 'max') + >>> pit(preds, target) + tensor(-2.1065) + + Reference: + [1] D. Yu, M. Kolbaek, Z.-H. Tan, J. Jensen, Permutation invariant training of deep models for + speaker-independent multi-talker speech separation, in: 2017 IEEE Int. Conf. Acoust. Speech + Signal Process. ICASSP, IEEE, New Orleans, LA, 2017: pp. 241–245. https://doi.org/10.1109/ICASSP.2017.7952154. + """ + + is_differentiable = True + sum_pit_metric: Tensor + total: Tensor + + def __init__( + self, + metric_func: Callable, + eval_func: str = "max", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + **kwargs: Dict[str, Any], + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.metric_func = metric_func + self.eval_func = eval_func + self.kwargs = kwargs + + self.add_state("sum_pit_metric", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + pit_metric = pit(preds, target, self.metric_func, self.eval_func, **self.kwargs)[0] + + self.sum_pit_metric += pit_metric.sum() + self.total += pit_metric.numel() + + def compute(self) -> Tensor: + """Computes average PIT metric.""" + return self.sum_pit_metric / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py new file mode 100644 index 000000000..f6a463780 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.si_sdr import si_sdr +from paddlemetrics.metric import Metric + + +class SI_SDR(Metric): + """Scale-invariant signal-to-distortion ratio (SI-SDR). The SI-SDR value is in general considered an overall + measure of how good a source sound. + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + zero_mean: + if to zero mean target and preds or not + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + TypeError: + if target and preds have a different shape + + Returns: + average si-sdr value + + Example: + >>> import torchapi as B + >>> from paddlemetrics import SI_SDR + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_sdr = SI_SDR() + >>> si_sdr_val = si_sdr(preds, target) + >>> si_sdr_val + tensor(18.4030) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + """ + + is_differentiable = True + higher_is_better = True + sum_si_sdr: Tensor + total: Tensor + + def __init__( + self, + zero_mean: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.zero_mean = zero_mean + + self.add_state("sum_si_sdr", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + si_sdr_batch = si_sdr(preds=preds, target=target, zero_mean=self.zero_mean) + + self.sum_si_sdr += si_sdr_batch.sum() + self.total += si_sdr_batch.numel() + + def compute(self) -> Tensor: + """Computes average SI-SDR.""" + return self.sum_si_sdr / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py new file mode 100644 index 000000000..31747a28d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py @@ -0,0 +1,101 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.si_snr import si_snr +from paddlemetrics.metric import Metric + + +class SI_SNR(Metric): + """Scale-invariant signal-to-noise ratio (SI-SNR). + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + TypeError: + if target and preds have a different shape + + Returns: + average si-snr value + + Example: + >>> import torchapi as B + >>> from paddlemetrics import SI_SNR + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_snr = SI_SNR() + >>> si_snr_val = si_snr(preds, target) + >>> si_snr_val + tensor(15.0918) + + References: + [1] Y. Luo and N. Mesgarani, "TaSNet: Time-Domain Audio Separation Network for Real-Time, Single-Channel Speech + Separation," 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp. + 696-700, doi: 10.1109/ICASSP.2018.8462116. + """ + + is_differentiable = True + sum_si_snr: Tensor + total: Tensor + higher_is_better = True + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_si_snr", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + si_snr_batch = si_snr(preds=preds, target=target) + + self.sum_si_snr += si_snr_batch.sum() + self.total += si_snr_batch.numel() + + def compute(self) -> Tensor: + """Computes average SI-SNR.""" + return self.sum_si_snr / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py new file mode 100644 index 000000000..683cb8bf3 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.snr import snr +from paddlemetrics.metric import Metric + + +class SNR(Metric): + r"""Signal-to-noise ratio (SNR_): + + .. math:: + \text{SNR} = \frac{P_{signal}}{P_{noise}} + + where :math:`P` denotes the power of each signal. The SNR metric compares the level + of the desired signal to the level of background noise. Therefore, a high value of + SNR means that the audio is clear. + + Forward accepts + + - ``preds``: ``shape [..., time]`` + - ``target``: ``shape [..., time]`` + + Args: + zero_mean: + if to zero mean target and preds or not + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + TypeError: + if target and preds have a different shape + + Returns: + average snr value + + Example: + >>> import torchapi as B + >>> from paddlemetrics import SNR + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> snr = SNR() + >>> snr_val = snr(preds, target) + >>> snr_val + tensor(16.1805) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + + """ + is_differentiable = True + sum_snr: Tensor + total: Tensor + + def __init__( + self, + zero_mean: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.zero_mean = zero_mean + + self.add_state("sum_snr", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + snr_batch = snr(preds=preds, target=target, zero_mean=self.zero_mean) + + self.sum_snr += snr_batch.sum() + self.total += snr_batch.numel() + + def compute(self) -> Tensor: + """Computes average SNR.""" + return self.sum_snr / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py new file mode 100644 index 000000000..1c8cf3788 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py @@ -0,0 +1,133 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.stoi import stoi +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _PYSTOI_AVAILABLE + + +class STOI(Metric): + r"""STOI (Short Term Objective Intelligibility, see [2,3]), a wrapper for the pystoi package [1]. + Note that input will be moved to `cpu` to perform the metric calculation. + + Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due + to additive noise, single/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations. + The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good + alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are + interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, + on speech intelligibility. Description taken from [Cees Taal's website](http://www.ceestaal.nl/code/). + + .. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pystoi`` + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + fs: + sampling frequency (Hz) + extended: + whether to use the extended STOI described in [4] + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Returns: + average STOI value + + Raises: + ModuleNotFoundError: + If ``pystoi`` package is not installed + + Example: + >>> from paddlemetrics.audio import STOI + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> stoi = STOI(8000, False) + >>> stoi(preds, target) + tensor(-0.0100) + + References: + [1] https://github.com/mpariente/pystoi + + [2] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'A Short-Time Objective Intelligibility Measure for + Time-Frequency Weighted Noisy Speech', ICASSP 2010, Texas, Dallas. + + [3] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'An Algorithm for Intelligibility Prediction of + Time-Frequency Weighted Noisy Speech', IEEE Transactions on Audio, Speech, and Language Processing, 2011. + + [4] J. Jensen and C. H. Taal, 'An Algorithm for Predicting the Intelligibility of Speech Masked by Modulated + Noise Maskers', IEEE Transactions on Audio, Speech and Language Processing, 2016. + + """ + sum_stoi: Tensor + total: Tensor + is_differentiable = False + higher_is_better = True + + def __init__( + self, + fs: int, + extended: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if not _PYSTOI_AVAILABLE: + raise ModuleNotFoundError( + "STOI metric requires that pystoi is installed." + " Either install as `pip install paddlemetrics[audio]` or `pip install pystoi`" + ) + self.fs = fs + self.extended = extended + + self.add_state("sum_stoi", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + stoi_batch = stoi(preds, target, self.fs, self.extended, False).to(self.sum_stoi.device) + + self.sum_stoi += stoi_batch.sum() + self.total += stoi_batch.numel() + + def compute(self) -> Tensor: + """Computes average STOI.""" + return self.sum_stoi / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py new file mode 100644 index 000000000..e928018b6 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py @@ -0,0 +1,34 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.classification.accuracy import Accuracy # noqa: F401 +from paddlemetrics.classification.auc import AUC # noqa: F401 +from paddlemetrics.classification.auroc import AUROC # noqa: F401 +from paddlemetrics.classification.average_precision import AveragePrecision # noqa: F401 +from paddlemetrics.classification.binned_precision_recall import BinnedAveragePrecision # noqa: F401 +from paddlemetrics.classification.binned_precision_recall import BinnedPrecisionRecallCurve # noqa: F401 +from paddlemetrics.classification.binned_precision_recall import BinnedRecallAtFixedPrecision # noqa: F401 +from paddlemetrics.classification.calibration_error import CalibrationError # noqa: F401 +from paddlemetrics.classification.cohen_kappa import CohenKappa # noqa: F401 +from paddlemetrics.classification.confusion_matrix import ConfusionMatrix # noqa: F401 +from paddlemetrics.classification.f_beta import F1, FBeta # noqa: F401 +from paddlemetrics.classification.hamming_distance import HammingDistance # noqa: F401 +from paddlemetrics.classification.hinge import Hinge # noqa: F401 +from paddlemetrics.classification.iou import IoU # noqa: F401 +from paddlemetrics.classification.kl_divergence import KLDivergence # noqa: F401 +from paddlemetrics.classification.matthews_corrcoef import MatthewsCorrcoef # noqa: F401 +from paddlemetrics.classification.precision_recall import Precision, Recall # noqa: F401 +from paddlemetrics.classification.precision_recall_curve import PrecisionRecallCurve # noqa: F401 +from paddlemetrics.classification.roc import ROC # noqa: F401 +from paddlemetrics.classification.specificity import Specificity # noqa: F401 +from paddlemetrics.classification.stat_scores import StatScores # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py new file mode 100644 index 000000000..325a18d42 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py @@ -0,0 +1,276 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.accuracy import ( + _accuracy_compute, + _accuracy_update, + _check_subset_validity, + _mode, + _subset_accuracy_compute, + _subset_accuracy_update, +) +from paddlemetrics.utilities.enums import DataType + +from paddlemetrics.classification.stat_scores import StatScores # isort:skip + + +class Accuracy(StatScores): + r""" + Computes Accuracy_: + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + For multi-class and multi-dimensional multi-class data with probability or logits predictions, the + parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the + top-K highest probability or logit score items are considered to find the correct label. + + For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" + accuracy by default, which counts all labels or sub-samples separately. This can be + changed to subset accuracy (which requires all labels or sub-samples in the sample to + be correctly predicted) by setting ``subset_accuracy=True``. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + subset_accuracy: + Whether to compute subset accuracy for multi-label and multi-dimensional + multi-class inputs (has no effect for other input types). + + - For multi-label inputs, if the parameter is set to ``True``, then all labels for + each sample must be correctly predicted for the sample to count as correct. If it + is set to ``False``, then all labels are counted separately - this is equivalent to + flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). + + - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all + sub-sample (on the extra axis) must be correct for the sample to be counted as correct. + If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, + in the case of label predictions, to flattening the inputs beforehand (i.e. + ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter + still applies in both cases, if set. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``top_k`` is not an ``integer`` larger than ``0``. + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + ValueError: + If two different input modes are provided, eg. using ``multi-label`` with ``multi-class``. + ValueError: + If ``top_k`` parameter is set for ``multi-label`` inputs. + + Example: + >>> import paddleext.torchapi as B + >>> from paddlemetrics import Accuracy + >>> target = B.tensor([0, 1, 2, 3]) + >>> preds = B.tensor([0, 2, 1, 3]) + >>> accuracy = Accuracy() + >>> accuracy(preds, target) + tensor(0.5000) + + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> accuracy = Accuracy(top_k=2) + >>> accuracy(preds, target) + tensor(0.6667) + + """ + is_differentiable = False + correct: Tensor + total: Tensor + + def __init__( + self, + threshold: float = 0.5, + num_classes: Optional[int] = None, + average: str = "micro", + mdmc_average: Optional[str] = "global", + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + subset_accuracy: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("correct", default=tensor(0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): + raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") + + self.average = average + self.threshold = threshold + self.top_k = top_k + self.subset_accuracy = subset_accuracy + self.mode: DataType = None # type: ignore + self.multiclass = multiclass + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. See + :ref:`references/modules:input types` for more information on input + types. + + Args: + preds: Predictions from model (logits, probabilities, or labels) + target: Ground truth labels + """ + """ returns the mode of the data (binary, multi label, multi class, multi-dim multi class) """ + mode = _mode(preds, target, self.threshold, self.top_k, self.num_classes, self.multiclass) + + if not self.mode: + self.mode = mode + elif self.mode != mode: + raise ValueError(f"You can not use {mode} inputs with {self.mode} inputs.") + + if self.subset_accuracy and not _check_subset_validity(self.mode): + self.subset_accuracy = False + + if self.subset_accuracy: + correct, total = _subset_accuracy_update(preds, target, threshold=self.threshold, top_k=self.top_k) + self.correct += correct + self.total += total + else: + if not self.mode: + raise RuntimeError("You have to have determined mode.") + tp, fp, tn, fn = _accuracy_update( + preds, + target, + reduce=self.reduce, + mdmc_reduce=self.mdmc_reduce, + threshold=self.threshold, + num_classes=self.num_classes, + top_k=self.top_k, + multiclass=self.multiclass, + ignore_index=self.ignore_index, + mode=self.mode, + ) + + # Update states + if self.reduce != "samples" and self.mdmc_reduce != "samplewise": + self.tp += tp + self.fp += fp + self.tn += tn + self.fn += fn + else: + self.tp.append(tp) + self.fp.append(fp) + self.tn.append(tn) + self.fn.append(fn) + + def compute(self) -> Tensor: + """Computes accuracy based on inputs passed in to ``update`` previously.""" + if not self.mode: + raise RuntimeError("You have to have determined mode.") + if self.subset_accuracy: + return _subset_accuracy_compute(self.correct, self.total) + tp, fp, tn, fn = self._get_final_stats() + return _accuracy_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce, self.mode) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py new file mode 100644 index 000000000..99b64048d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.auc import _auc_compute, _auc_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class AUC(Metric): + r""" + Computes Area Under the Curve (AUC) using the trapezoidal rule + + Forward accepts two input tensors that should be 1D and have the same number + of elements + + Args: + reorder: AUC expects its first input to be sorted. If this is not the case, + setting this argument to ``True`` will use a stable sorting algorithm to + sort the input in descending order + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the ``allgather`` operation on the metric state. When ``None``, DDP + will be used to perform the ``allgather``. + """ + is_differentiable = False + x: List[Tensor] + y: List[Tensor] + + def __init__( + self, + reorder: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.reorder = reorder + + self.add_state("x", default=[], dist_reduce_fx="cat") + self.add_state("y", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `AUC` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model (probabilities, or labels) + target: Ground truth labels + """ + x, y = _auc_update(preds, target) + + self.x.append(x) + self.y.append(y) + + def compute(self) -> Tensor: + """Computes AUC based on inputs passed in to ``update`` previously.""" + x = dim_zero_cat(self.x) + y = dim_zero_cat(self.y) + return _auc_compute(x, y, reorder=self.reorder) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py new file mode 100644 index 000000000..6236391de --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py @@ -0,0 +1,186 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.auroc import _auroc_compute, _auroc_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.enums import DataType +from paddlemetrics.utilities.imports import _TORCH_LOWER_1_6 + + +class AUROC(Metric): + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). + Works for both binary, multilabel and multiclass problems. In the case of + multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + For non-binary input, if the ``preds`` and ``target`` tensor have the same + size the input will be interpretated as multilabel and if ``preds`` have one + dimension more than the ``target`` tensor the input will be interpretated as + multiclass. + + Args: + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + - ``'micro'`` computes metric globally. Only works for multilabel problems + - ``'macro'`` computes metric for each class and uniformly averages them + - ``'weighted'`` computes metric for each class and does a weighted-average, + where each class is weighted by their support (accounts for class imbalance) + - ``None`` computes and returns the metric per class + max_fpr: + If not ``None``, calculates standardized partial AUC over the + range [0, max_fpr]. Should be a float between 0 and 1. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``. + ValueError: + If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``. + RuntimeError: + If ``PyTorch version`` is ``below 1.6`` since max_fpr requires ``B.bucketize`` + which is not available below 1.6. + ValueError: + If the mode of data (binary, multi-label, multi-class) changes between batches. + + Example (binary case): + >>> from paddlemetrics import AUROC + >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = B.tensor([0, 0, 1, 1, 1]) + >>> auroc = AUROC(pos_label=1) + >>> auroc(preds, target) + tensor(0.5000) + + Example (multiclass case): + >>> preds = B.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = B.tensor([0, 1, 1, 2, 2]) + >>> auroc = AUROC(num_classes=3) + >>> auroc(preds, target) + tensor(0.7778) + + """ + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + max_fpr: Optional[float] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + self.average = average + self.max_fpr = max_fpr + + allowed_average = (None, "macro", "weighted", "micro") + if self.average not in allowed_average: + raise ValueError( + f"Argument `average` expected to be one of the following: {allowed_average} but got {average}" + ) + + if self.max_fpr is not None: + if not isinstance(max_fpr, float) or not 0 < max_fpr <= 1: + raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}") + + if _TORCH_LOWER_1_6: + raise RuntimeError( + "`max_fpr` argument requires `B.bucketize` which is not available below PyTorch version 1.6" + ) + + self.mode: DataType = None # type: ignore + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `AUROC` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model (probabilities, or labels) + target: Ground truth labels + """ + preds, target, mode = _auroc_update(preds, target) + + self.preds.append(preds) + self.target.append(target) + + if self.mode and self.mode != mode: + raise ValueError( + "The mode of data (binary, multi-label, multi-class) should be constant, but changed" + f" between batches from {self.mode} to {mode}" + ) + self.mode = mode + + def compute(self) -> Tensor: + """Computes AUROC based on inputs passed in to ``update`` previously.""" + if not self.mode: + raise RuntimeError("You have to have determined mode.") + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _auroc_compute( + preds, + target, + self.mode, + self.num_classes, + self.pos_label, + self.average, + self.max_fpr, + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py new file mode 100644 index 000000000..0e37da588 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.average_precision import ( + _average_precision_compute, + _average_precision_update, +) +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class AveragePrecision(Metric): + """Computes the average precision score, which summarises the precision recall curve into one number. Works for + both binary and multiclass problems. In the case of multiclass, the values will be calculated based on a one- + vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + defines the reduction that is applied in the case of multiclass and multilabel input. + Should be one of the following: + + - ``'macro'`` [default]: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be + used with multiclass input. + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support. + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example (binary case): + >>> from paddlemetrics import AveragePrecision + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> average_precision = AveragePrecision(pos_label=1) + >>> average_precision(pred, target) + tensor(1.) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision = AveragePrecision(num_classes=5, average=None) + >>> average_precision(pred, target) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + allowed_average = ("micro", "macro", "weighted", None) + if average not in allowed_average: + raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}") + self.average = average + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `AveragePrecision` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target, num_classes, pos_label = _average_precision_update( + preds, target, self.num_classes, self.pos_label, self.average + ) + self.preds.append(preds) + self.target.append(target) + self.num_classes = num_classes + self.pos_label = pos_label + + def compute(self) -> Union[Tensor, List[Tensor]]: + """Compute the average precision score. + + Returns: + tensor with average precision. If multiclass will return list + of such tensors, one for each class + """ + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + if not self.num_classes: + raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") + return _average_precision_compute(preds, target, self.num_classes, self.pos_label, self.average) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py new file mode 100644 index 000000000..ffc86ae69 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py @@ -0,0 +1,324 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.average_precision import _average_precision_compute_with_precision_recall +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import METRIC_EPS, to_onehot + + +def _recall_at_precision( + precision: Tensor, + recall: Tensor, + thresholds: Tensor, + min_precision: float, +) -> Tuple[Tensor, Tensor]: + try: + max_recall, _, best_threshold = max( + (r, p, t) for p, r, t in zip(precision, recall, thresholds) if p >= min_precision + ) + + except ValueError: + max_recall = B.tensor(0.0, device=recall.device, dtype=recall.dtype) + best_threshold = B.tensor(0) + + if max_recall == 0.0: + best_threshold = B.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype) + + return max_recall, best_threshold + + +class BinnedPrecisionRecallCurve(Metric): + """Computes precision-recall pairs for different thresholds. Works for both binary and multiclass problems. In + the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Computation is performed in constant-memory by computing precision and recall + for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. For binary, set to 1. + thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. + It is used for computation will lead to more detailed curve and accurate estimates, + but will be slower and consume more memory. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``thresholds`` is not a int, list or tensor + + Example (binary case): + >>> from paddlemetrics import BinnedPrecisionRecallCurve + >>> pred = B.tensor([0, 0.1, 0.8, 0.4]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> pr_curve = BinnedPrecisionRecallCurve(num_classes=1, thresholds=5) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision + tensor([0.5000, 0.5000, 1.0000, 1.0000, 1.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> pr_curve = BinnedPrecisionRecallCurve(num_classes=5, thresholds=3) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([0.2500, 1.0000, 1.0000, 1.0000]), + tensor([0.2500, 1.0000, 1.0000, 1.0000]), + tensor([2.5000e-01, 1.0000e-06, 1.0000e+00, 1.0000e+00]), + tensor([2.5000e-01, 1.0000e-06, 1.0000e+00, 1.0000e+00]), + tensor([2.5000e-07, 1.0000e+00, 1.0000e+00, 1.0000e+00])] + >>> recall # doctest: +NORMALIZE_WHITESPACE + [tensor([1.0000, 1.0000, 0.0000, 0.0000]), + tensor([1.0000, 1.0000, 0.0000, 0.0000]), + tensor([1.0000, 0.0000, 0.0000, 0.0000]), + tensor([1.0000, 0.0000, 0.0000, 0.0000]), + tensor([0., 0., 0., 0.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000])] + """ + + TPs: Tensor + FPs: Tensor + FNs: Tensor + + def __init__( + self, + num_classes: int, + thresholds: Union[int, Tensor, List[float], None] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.num_classes = num_classes + if isinstance(thresholds, int): + self.num_thresholds = thresholds + thresholds = B.linspace(0, 1.0, thresholds) + self.register_buffer("thresholds", thresholds) + elif thresholds is not None: + if not isinstance(thresholds, (list, Tensor)): + raise ValueError("Expected argument `thresholds` to either be an integer, list of floats or a tensor") + thresholds = B.tensor(thresholds) if isinstance(thresholds, list) else thresholds + self.num_thresholds = thresholds.numel() + self.register_buffer("thresholds", thresholds) + + for name in ("TPs", "FPs", "FNs"): + self.add_state( + name=name, + default=B.zeros(num_classes, self.num_thresholds, dtype=B.float32), + dist_reduce_fx="sum", + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """ + Args + preds: (n_samples, n_classes) tensor + target: (n_samples, n_classes) tensor + """ + # binary case + if len(preds.shape) == len(target.shape) == 1: + preds = preds.reshape(-1, 1) + target = target.reshape(-1, 1) + + if len(preds.shape) == len(target.shape) + 1: + target = to_onehot(target, num_classes=self.num_classes) + + target = target == 1 + # Iterate one threshold at a time to conserve memory + for i in range(self.num_thresholds): + predictions = preds >= self.thresholds[i] + self.TPs[:, i] += (target & predictions).sum(dim=0) + self.FPs[:, i] += ((~target) & (predictions)).sum(dim=0) + self.FNs[:, i] += ((target) & (~predictions)).sum(dim=0) + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Returns float tensor of size n_classes.""" + precisions = (self.TPs + METRIC_EPS) / (self.TPs + self.FPs + METRIC_EPS) + recalls = self.TPs / (self.TPs + self.FNs + METRIC_EPS) + + # Need to guarantee that last precision=1 and recall=0, similar to precision_recall_curve + t_ones = B.ones(self.num_classes, 1, dtype=precisions.dtype, device=precisions.device) + precisions = B.cat([precisions, t_ones], dim=1) + t_zeros = B.zeros(self.num_classes, 1, dtype=recalls.dtype, device=recalls.device) + recalls = B.cat([recalls, t_zeros], dim=1) + if self.num_classes == 1: + return precisions[0, :], recalls[0, :], self.thresholds + return list(precisions), list(recalls), [self.thresholds for _ in range(self.num_classes)] + + +class BinnedAveragePrecision(BinnedPrecisionRecallCurve): + """Computes the average precision score, which summarises the precision recall curve into one number. Works for + both binary and multiclass problems. In the case of multiclass, the values will be calculated based on a one- + vs-the-rest approach. + + Computation is performed in constant-memory by computing precision and recall + for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. + It is used for computation will lead to more detailed curve and accurate estimates, + but will be slower and consume more memory + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``thresholds`` is not a list or tensor + + Example (binary case): + >>> from paddlemetrics import BinnedAveragePrecision + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> average_precision = BinnedAveragePrecision(num_classes=1, thresholds=10) + >>> average_precision(pred, target) + tensor(1.0000) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision = BinnedAveragePrecision(num_classes=5, thresholds=10) + >>> average_precision(pred, target) + [tensor(1.0000), tensor(1.0000), tensor(0.2500), tensor(0.2500), tensor(-0.)] + """ + + def compute(self) -> Union[List[Tensor], Tensor]: # type: ignore + precisions, recalls, _ = super().compute() + return _average_precision_compute_with_precision_recall(precisions, recalls, self.num_classes, average=None) + + +class BinnedRecallAtFixedPrecision(BinnedPrecisionRecallCurve): + """Computes the higest possible recall value given the minimum precision thresholds provided. + + Computation is performed in constant-memory by computing precision and recall + for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. Provide 1 for for binary problems. + min_precision: float value specifying minimum precision threshold. + thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. + It is used for computation will lead to more detailed curve and accurate estimates, + but will be slower and consume more memory + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``thresholds`` is not a list or tensor + + Example (binary case): + >>> from paddlemetrics import BinnedRecallAtFixedPrecision + >>> pred = B.tensor([0, 0.2, 0.5, 0.8]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> average_precision = BinnedRecallAtFixedPrecision(num_classes=1, thresholds=10, min_precision=0.5) + >>> average_precision(pred, target) + (tensor(1.0000), tensor(0.1111)) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision = BinnedRecallAtFixedPrecision(num_classes=5, thresholds=10, min_precision=0.5) + >>> average_precision(pred, target) # doctest: +NORMALIZE_WHITESPACE + (tensor([1.0000, 1.0000, 0.0000, 0.0000, 0.0000]), + tensor([6.6667e-01, 6.6667e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06])) + """ + + def __init__( + self, + num_classes: int, + min_precision: float, + thresholds: Union[int, Tensor, List[float], None] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + num_classes=num_classes, + thresholds=thresholds, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.min_precision = min_precision + + def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore + """Returns float tensor of size n_classes.""" + precisions, recalls, thresholds = super().compute() + + if self.num_classes == 1: + return _recall_at_precision(precisions, recalls, thresholds, self.min_precision) + + recalls_at_p = B.zeros(self.num_classes, device=recalls[0].device, dtype=recalls[0].dtype) + thresholds_at_p = B.zeros(self.num_classes, device=thresholds[0].device, dtype=thresholds[0].dtype) + for i in range(self.num_classes): + recalls_at_p[i], thresholds_at_p[i] = _recall_at_precision( + precisions[i], recalls[i], thresholds[i], self.min_precision + ) + return recalls_at_p, thresholds_at_p diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py new file mode 100644 index 000000000..5fc9d10a0 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py @@ -0,0 +1,115 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.calibration_error import _ce_compute, _ce_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class CalibrationError(Metric): + r""" + + `Computes the Top-label Calibration Error`_ + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + L1 norm (Expected Calibration Error) + + .. math:: + \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\| + + Infinity norm (Maximum Calibration Error) + + .. math:: + \text{RMSCE} = \max_{i} (p_i - c_i) + + L2 norm (Root Mean Square Calibration Error) + + .. math:: + \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2 + + Where :math:`p_i` is the top-1 prediction accuracy in bin i + and :math:`c_i` is the average confidence of predictions in bin i. + + .. note:: + L2-norm debiasing is not yet supported. + + Args: + n_bins: Number of bins to use when computing probabilites and accuracies. + norm: Norm used to compare empirical and expected probability bins. + Defaults to "l1", or Expected Calibration Error. + debias: Applies debiasing term, only implemented for l2 norm. Defaults to True. + compute_on_step: Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: Specify the process group on which synchronization is called. + default: None (which selects the entire world) + """ + DISTANCES = {"l1", "l2", "max"} + confidences: List[Tensor] + accuracies: List[Tensor] + + def __init__( + self, + n_bins: int = 15, + norm: str = "l1", + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ): + + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=None, + ) + + if norm not in self.DISTANCES: + raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") + + if not isinstance(n_bins, int) or n_bins <= 0: + raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}") + self.n_bins = n_bins + self.register_buffer("bin_boundaries", B.linspace(0, 1, n_bins + 1)) + self.norm = norm + + self.add_state("confidences", [], dist_reduce_fx="cat") + self.add_state("accuracies", [], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Computes top-level confidences and accuracies for the input probabilites and appends them to internal + state. + + Args: + preds (Tensor): Model output probabilities. + target (Tensor): Ground-truth target class labels. + """ + confidences, accuracies = _ce_update(preds, target) + + self.confidences.append(confidences) + self.accuracies.append(accuracies) + + def compute(self) -> Tensor: + """Computes calibration error across all confidences and accuracies. + + Returns: + Tensor: Calibration error across previously collected examples. + """ + confidences = dim_zero_cat(self.confidences) + accuracies = dim_zero_cat(self.accuracies) + return _ce_compute(confidences, accuracies, self.bin_boundaries, norm=self.norm) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py new file mode 100644 index 000000000..3a4817cf4 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py @@ -0,0 +1,119 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.cohen_kappa import _cohen_kappa_compute, _cohen_kappa_update +from paddlemetrics.metric import Metric + + +class CohenKappa(Metric): + r""" + Calculates `Cohen's kappa score`_ that measures + inter-annotator agreement. It is defined as + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + Works with binary, multiclass, and multilabel data. Accepts probabilities from a model output or + integer class values in prediction. Works with multi-dimensional preds and target. + + Forward accepts + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: Number of classes in the dataset. + + weights: Weighting type to calculate the score. Choose from + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import CohenKappa + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> cohenkappa = CohenKappa(num_classes=2) + >>> cohenkappa(preds, target) + tensor(0.5000) + + """ + is_differentiable = False + confmat: Tensor + + def __init__( + self, + num_classes: int, + weights: Optional[str] = None, + threshold: float = 0.5, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.num_classes = num_classes + self.weights = weights + self.threshold = threshold + + allowed_weights = ("linear", "quadratic", "none", None) + if self.weights not in allowed_weights: + raise ValueError(f"Argument weights needs to one of the following: {allowed_weights}") + + self.add_state("confmat", default=B.zeros(num_classes, num_classes), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + confmat = _cohen_kappa_update(preds, target, self.num_classes, self.threshold) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes cohen kappa score.""" + return _cohen_kappa_compute(self.confmat, self.weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py new file mode 100644 index 000000000..a3485570d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py @@ -0,0 +1,141 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_compute, _confusion_matrix_update +from paddlemetrics.metric import Metric + + +class ConfusionMatrix(Metric): + r""" + Computes the `confusion matrix`_. Works with binary, + multiclass, and multilabel data. Accepts probabilities or logits from a model output or integer class + values in prediction. Works with multi-dimensional preds and target, but it should be noted that + additional dimensions will be flattened. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + If working with multilabel data, setting the `is_multilabel` argument to `True` will make sure that a + `confusion matrix gets calculated per label`_. + + Args: + num_classes: Number of classes in the dataset. + normalize: Normalization mode for confusion matrix. Choose from + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + multilabel: + determines if data is multilabel or not. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example (binary data): + >>> from paddlemetrics import ConfusionMatrix + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = ConfusionMatrix(num_classes=2) + >>> confmat(preds, target) + tensor([[2., 0.], + [1., 1.]]) + + Example (multiclass data): + >>> target = B.tensor([2, 1, 0, 0]) + >>> preds = B.tensor([2, 1, 0, 1]) + >>> confmat = ConfusionMatrix(num_classes=3) + >>> confmat(preds, target) + tensor([[1., 1., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example (multilabel data): + >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = ConfusionMatrix(num_classes=3, multilabel=True) + >>> confmat(preds, target) # doctest: +NORMALIZE_WHITESPACE + tensor([[[1., 0.], [0., 1.]], + [[1., 0.], [1., 0.]], + [[0., 1.], [0., 1.]]]) + + """ + is_differentiable = False + confmat: Tensor + + def __init__( + self, + num_classes: int, + normalize: Optional[str] = None, + threshold: float = 0.5, + multilabel: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.num_classes = num_classes + self.normalize = normalize + self.threshold = threshold + self.multilabel = multilabel + + allowed_normalize = ("true", "pred", "all", "none", None) + if self.normalize not in allowed_normalize: + raise ValueError(f"Argument average needs to one of the following: {allowed_normalize}") + + default = B.zeros(num_classes, 2, 2) if multilabel else B.zeros(num_classes, num_classes) + self.add_state("confmat", default=default, dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + confmat = _confusion_matrix_update(preds, target, self.num_classes, self.threshold, self.multilabel) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes confusion matrix. + + Returns: + If `multilabel=False` this will be a `[n_classes, n_classes]` tensor and if `multilabel=True` + this will be a `[n_classes, 2, 2]` tensor + """ + return _confusion_matrix_compute(self.confmat, self.normalize) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py new file mode 100644 index 000000000..4b24dc0e9 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py @@ -0,0 +1,301 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.stat_scores import StatScores +from paddlemetrics.functional.classification.f_beta import _fbeta_compute +from paddlemetrics.utilities.enums import AverageMethod + + +class FBeta(StatScores): + r""" + Computes `F-score`_, specifically: + + .. math:: + F_\beta = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + Where :math:`\beta` is some positive real factor. Works with binary, multiclass, and multilabel data. + Accepts logit scores or probabilities from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label logits and probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + beta: + Beta coefficient in the F measure. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import FBeta + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> f_beta = FBeta(num_classes=3, beta=0.5) + >>> f_beta(preds, target) + tensor(0.3333) + + """ + + def __init__( + self, + num_classes: Optional[int] = None, + beta: float = 1.0, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + self.beta = beta + allowed_average = list(AverageMethod) + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in [AverageMethod.WEIGHTED, AverageMethod.NONE] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes fbeta over state.""" + tp, fp, tn, fn = self._get_final_stats() + return _fbeta_compute(tp, fp, tn, fn, self.beta, self.ignore_index, self.average, self.mdmc_reduce) + + +class F1(FBeta): + """Computes F1 metric. F1 metrics correspond to a harmonic mean of the precision and recall scores. + + Works with binary, multiclass, and multilabel data. Accepts logits or probabilities from a model + output or integer class values in prediction. Works with multi-dimensional preds and target. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument. + This is the case for binary and multi-label logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + + Example: + >>> from paddlemetrics import F1 + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> f1 = F1(num_classes=3) + >>> f1(preds, target) + tensor(0.3333) + """ + + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + num_classes=num_classes, + beta=1.0, + threshold=threshold, + average=average, + mdmc_average=mdmc_average, + ignore_index=ignore_index, + top_k=top_k, + multiclass=multiclass, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py new file mode 100644 index 000000000..855d7f7e8 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.hamming_distance import _hamming_distance_compute, _hamming_distance_update +from paddlemetrics.metric import Metric + + +class HammingDistance(Metric): + r""" + Computes the average `Hamming distance`_ (also + known as Hamming loss) between targets and predictions: + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L}\sum_i^N \sum_l^L 1(y_{il} \neq \hat{y_{il}}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it + treats each possible label separately - meaning that, for example, multi-class data is + treated as if it were multi-label. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the all gather. + + Raises: + ValueError: + If ``threshold`` is not between ``0`` and ``1``. + + Example: + >>> from paddlemetrics import HammingDistance + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> hamming_distance = HammingDistance() + >>> hamming_distance(preds, target) + tensor(0.2500) + + """ + is_differentiable = False + correct: Tensor + total: Tensor + + def __init__( + self, + threshold: float = 0.5, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("correct", default=tensor(0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + self.threshold = threshold + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. See + :ref:`references/modules:input types` for more information on input + types. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth labels + """ + correct, total = _hamming_distance_update(preds, target, self.threshold) + + self.correct += correct + self.total += total + + def compute(self) -> Tensor: + """Computes hamming distance based on inputs passed in to ``update`` previously.""" + return _hamming_distance_compute(self.correct, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py new file mode 100644 index 000000000..099864a35 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py @@ -0,0 +1,127 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional, Union + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.hinge import MulticlassMode, _hinge_compute, _hinge_update +from paddlemetrics.metric import Metric + + +class Hinge(Metric): + r""" + Computes the mean `Hinge loss`_, typically used for Support Vector + Machines (SVMs). In the binary case it is defined as: + + .. math:: + \text{Hinge loss} = \max(0, 1 - y \times \hat{y}) + + Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction. + + In the multi-class case, when ``multiclass_mode=None`` (default), ``multiclass_mode=MulticlassMode.CRAMMER_SINGER`` + or ``multiclass_mode="crammer-singer"``, this metric will compute the multi-class hinge loss defined by Crammer and + Singer as: + + .. math:: + \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right) + + Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes), + and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. + + In the multi-class case when ``multiclass_mode=MulticlassMode.ONE_VS_ALL`` or ``multiclass_mode='one-vs-all'``, this + metric will use a one-vs-all approach to compute the hinge loss, giving a vector of C outputs where each entry pits + that class against all remaining classes. + + This metric can optionally output the mean of the squared hinge loss by setting ``squared=True`` + + Only accepts inputs with preds shape of (N) (binary) or (N, C) (multi-class) and target shape of (N). + + Args: + squared: + If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss (default). + multiclass_mode: + Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), + ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. + + Raises: + ValueError: + If ``multiclass_mode`` is not: None, ``MulticlassMode.CRAMMER_SINGER``, ``"crammer-singer"``, + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"``. + + Example (binary case): + >>> import torchapi as B + >>> from paddlemetrics import Hinge + >>> target = B.tensor([0, 1, 1]) + >>> preds = B.tensor([-2.2, 2.4, 0.1]) + >>> hinge = Hinge() + >>> hinge(preds, target) + tensor(0.3000) + + Example (default / multiclass case): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge = Hinge() + >>> hinge(preds, target) + tensor(2.9000) + + Example (multiclass example, one vs all mode): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge = Hinge(multiclass_mode="one-vs-all") + >>> hinge(preds, target) + tensor([2.2333, 1.5000, 1.2333]) + + """ + is_differentiable = True + measure: Tensor + total: Tensor + + def __init__( + self, + squared: bool = False, + multiclass_mode: Optional[Union[str, MulticlassMode]] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("measure", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + if multiclass_mode not in (None, MulticlassMode.CRAMMER_SINGER, MulticlassMode.ONE_VS_ALL): + raise ValueError( + "The `multiclass_mode` should be either None / 'crammer-singer' / MulticlassMode.CRAMMER_SINGER" + "(default) or 'one-vs-all' / MulticlassMode.ONE_VS_ALL," + f" got {multiclass_mode}." + ) + + self.squared = squared + self.multiclass_mode = multiclass_mode + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + measure, total = _hinge_update(preds, target, squared=self.squared, multiclass_mode=self.multiclass_mode) + + self.measure = measure + self.measure + self.total = total + self.total + + def compute(self) -> Tensor: + return _hinge_compute(self.measure, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py new file mode 100644 index 000000000..9e89946a1 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py @@ -0,0 +1,107 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.confusion_matrix import ConfusionMatrix +from paddlemetrics.functional.classification.iou import _iou_from_confmat + + +class IoU(ConfusionMatrix): + r""" + Computes Intersection over union, or `Jaccard index`_: + + .. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|} + + Where: :math:`A` and :math:`B` are both tensors of the same size, containing integer class values. + They may be subject to conversion from input data (see description below). Note that it is different from box IoU. + + Works with binary, multiclass and multi-label data. + Accepts probabilities from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: Number of classes in the dataset. + ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. Has no effect if given an int that is not in the + range [0, num_classes-1]. By default, no index is ignored, and all classes are used. + absent_score: score to use for an individual class, if no instances of the class index were present in + `pred` AND no instances of the class index were present in `target`. For example, if we have 3 classes, + [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be assigned the `absent_score`. + threshold: + Threshold value for binary or multi-label probabilities. + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import IoU + >>> target = B.randint(0, 2, (10, 25, 25)) + >>> pred = B.tensor(target) + >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] + >>> iou = IoU(num_classes=2) + >>> iou(pred, target) + tensor(0.9660) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: int, + ignore_index: Optional[int] = None, + absent_score: float = 0.0, + threshold: float = 0.5, + reduction: str = "elementwise_mean", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + num_classes=num_classes, + normalize=None, + threshold=threshold, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.reduction = reduction + self.ignore_index = ignore_index + self.absent_score = absent_score + + def compute(self) -> Tensor: + """Computes intersection over union (IoU)""" + return _iou_from_confmat(self.confmat, self.num_classes, self.ignore_index, self.absent_score, self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py new file mode 100644 index 000000000..cce887f09 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py @@ -0,0 +1,109 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.kl_divergence import _kld_compute, _kld_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class KLDivergence(Metric): + r"""Computes the `KL divergence`_: + + .. math:: + D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}} + + Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution + over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence + is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. + + Args: + p: data distribution with shape ``[N, d]`` + q: prior or approximate distribution with shape ``[N, d]`` + log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, + will normalize to make sure the distributes sum to 1 + reduction: + Determines how to reduce over the ``N``/batch dimension: + + - ``'mean'`` [default]: Averages score across samples + - ``'sum'``: Sum score across samples + - ``'none'`` or ``None``: Returns score per sample + + Raises: + TypeError: + If ``log_prob`` is not an ``bool`` + ValueError: + If ``reduction`` is not one of ``'mean'``, ``'sum'``, ``'none'`` or ``None`` + + .. note:: + Half precision is only support on GPU for this metric + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import kl_divergence + >>> p = B.tensor([[0.36, 0.48, 0.16]]) + >>> q = B.tensor([[1/3, 1/3, 1/3]]) + >>> kl_divergence(p, q) + tensor(0.0853) + + """ + is_differentiable = True + # TODO: canot be used because if scripting + # measures: Union[List[Tensor], Tensor] + total: Tensor + + def __init__( + self, + log_prob: bool = False, + reduction: Optional[str] = "mean", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if not isinstance(log_prob, bool): + raise TypeError(f"Expected argument `log_prob` to be bool but got {log_prob}") + self.log_prob = log_prob + + allowed_reduction = ["mean", "sum", "none", None] + if reduction not in allowed_reduction: + raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}") + self.reduction = reduction + + if self.reduction in ["mean", "sum"]: + self.add_state("measures", B.zeros(1), dist_reduce_fx="sum") + else: + self.add_state("measures", [], dist_reduce_fx="cat") + self.add_state("total", B.zeros(1), dist_reduce_fx="sum") + + def update(self, p: Tensor, q: Tensor) -> None: # type: ignore + measures, total = _kld_update(p, q, self.log_prob) + if self.reduction is None or self.reduction == "none": + self.measures.append(measures) + else: + self.measures += measures.sum() + self.total += total + + def compute(self) -> Tensor: + measures = dim_zero_cat(self.measures) if self.reduction is None or self.reduction == "none" else self.measures + return _kld_compute(measures, self.total, self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py new file mode 100644 index 000000000..2ea52673b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py @@ -0,0 +1,111 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.matthews_corrcoef import ( + _matthews_corrcoef_compute, + _matthews_corrcoef_update, +) +from paddlemetrics.metric import Metric + + +class MatthewsCorrcoef(Metric): + r""" + Calculates `Matthews correlation coefficient`_ that measures + the general correlation or quality of a classification. In the binary case it + is defined as: + + .. math:: + MCC = \frac{TP*TN - FP*FN}{\sqrt{(TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)}} + + where TP, TN, FP and FN are respectively the true postitives, true negatives, + false positives and false negatives. Also works in the case of multi-label or + multi-class input. + + Note: + This metric produces a multi-dimensional output, so it can not be directly logged. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: Number of classes in the dataset. + threshold: + Threshold value for binary or multi-label probabilites. default: 0.5 + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Example: + >>> from paddlemetrics import MatthewsCorrcoef + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> matthews_corrcoef = MatthewsCorrcoef(num_classes=2) + >>> matthews_corrcoef(preds, target) + tensor(0.5774) + + """ + is_differentiable = False + confmat: Tensor + + def __init__( + self, + num_classes: int, + threshold: float = 0.5, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.num_classes = num_classes + self.threshold = threshold + + self.add_state("confmat", default=B.zeros(num_classes, num_classes), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + confmat = _matthews_corrcoef_update(preds, target, self.num_classes, self.threshold) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes matthews correlation coefficient.""" + return _matthews_corrcoef_compute(self.confmat) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py new file mode 100644 index 000000000..77920cfc9 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py @@ -0,0 +1,320 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.stat_scores import StatScores +from paddlemetrics.functional.classification.precision_recall import _precision_compute, _recall_compute + + +class Precision(StatScores): + r""" + Computes `Precision`_: + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Precision@K. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import Precision + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> precision = Precision(average='macro', num_classes=3) + >>> precision(preds, target) + tensor(0.1667) + >>> precision = Precision(average='micro') + >>> precision(preds, target) + tensor(0.2500) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes the precision score based on inputs passed in to ``update`` previously. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + """ + tp, fp, _, fn = self._get_final_stats() + return _precision_compute(tp, fp, fn, self.average, self.mdmc_reduce) + + +class Recall(StatScores): + r""" + Computes `Recall`_: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Recall@K. + + The reduction method (how the recall scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import Recall + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> recall = Recall(average='macro', num_classes=3) + >>> recall(preds, target) + tensor(0.3333) + >>> recall = Recall(average='micro') + >>> recall(preds, target) + tensor(0.2500) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes the recall score based on inputs passed in to ``update`` previously. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + """ + tp, fp, _, fn = self._get_final_stats() + return _recall_compute(tp, fp, fn, self.average, self.mdmc_reduce) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py new file mode 100644 index 000000000..341419092 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py @@ -0,0 +1,149 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.precision_recall_curve import ( + _precision_recall_curve_compute, + _precision_recall_curve_update, +) +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class PrecisionRecallCurve(Metric): + """Computes precision-recall pairs for different thresholds. Works for both binary and multiclass problems. In + the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + Args: + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example (binary case): + >>> from paddlemetrics import PrecisionRecallCurve + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> pr_curve = PrecisionRecallCurve(pos_label=1) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision + tensor([0.6667, 0.5000, 0.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([1, 2, 3]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> pr_curve = PrecisionRecallCurve(num_classes=5) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + """ + + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `PrecisionRecallCurve` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target, num_classes, pos_label = _precision_recall_curve_update( + preds, target, self.num_classes, self.pos_label + ) + self.preds.append(preds) + self.target.append(target) + self.num_classes = num_classes + self.pos_label = pos_label + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Compute the precision-recall curve. + + Returns: + 3-element tuple containing + + precision: + tensor where element i is the precision of predictions with + score >= thresholds[i] and the last element is 1. + If multiclass, this is a list of such tensors, one for each class. + recall: + tensor where element i is the recall of predictions with + score >= thresholds[i] and the last element is 0. + If multiclass, this is a list of such tensors, one for each class. + thresholds: + Thresholds used for computing precision/recall scores + """ + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + if not self.num_classes: + raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") + return _precision_recall_curve_compute(preds, target, self.num_classes, self.pos_label) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py new file mode 100644 index 000000000..a01a5b94d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py @@ -0,0 +1,169 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.roc import _roc_compute, _roc_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn + + +class ROC(Metric): + """Computes the Receiver Operating Characteristic (ROC). Works for both binary, multiclass and multilabel + problems. In the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass/multilabel) tensor + with probabilities, where C is the number of classes/labels. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + Args: + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Example (binary case): + >>> from paddlemetrics import ROC + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> roc = ROC(pos_label=1) + >>> fpr, tpr, thresholds = roc(pred, target) + >>> fpr + tensor([0., 0., 0., 0., 1.]) + >>> tpr + tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) + >>> thresholds + tensor([4, 3, 2, 1, 0]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> roc = ROC(num_classes=4) + >>> fpr, tpr, thresholds = roc(pred, target) + >>> fpr + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500])] + + Example (multilabel case): + >>> pred = B.tensor([[0.8191, 0.3680, 0.1138], + ... [0.3584, 0.7576, 0.1183], + ... [0.2286, 0.3468, 0.1338], + ... [0.8603, 0.0745, 0.1837]]) + >>> target = B.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) + >>> roc = ROC(num_classes=3, pos_label=1) + >>> fpr, tpr, thresholds = roc(pred, target) + >>> fpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]), + tensor([0., 0., 0., 1., 1.]), + tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])] + >>> tpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0., 0., 1., 1., 1.]), + tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), + tensor([0., 1., 1., 1., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.8603, 0.8603, 0.8191, 0.3584, 0.2286]), + tensor([1.7576, 0.7576, 0.3680, 0.3468, 0.0745]), + tensor([1.1837, 0.1837, 0.1338, 0.1183, 0.1138])] + """ + + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + + self.add_state("preds", default=[], dist_reduce_fx=None) + self.add_state("target", default=[], dist_reduce_fx=None) + + rank_zero_warn( + "Metric `ROC` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target, num_classes, pos_label = _roc_update(preds, target, self.num_classes, self.pos_label) + self.preds.append(preds) + self.target.append(target) + self.num_classes = num_classes + self.pos_label = pos_label + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Compute the receiver operating characteristic. + + Returns: + 3-element tuple containing + + fpr: + tensor with false positive rates. + If multiclass, this is a list of such tensors, one for each class. + tpr: + tensor with true positive rates. + If multiclass, this is a list of such tensors, one for each class. + thresholds: + thresholds used for computing false- and true postive rates + """ + preds = B.cat(self.preds, dim=0) + target = B.cat(self.target, dim=0) + if not self.num_classes: + raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") + return _roc_compute(preds, target, self.num_classes, self.pos_label) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py new file mode 100644 index 000000000..0ad44268a --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py @@ -0,0 +1,171 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.stat_scores import StatScores +from paddlemetrics.functional.classification.specificity import _specificity_compute + + +class Specificity(StatScores): + r""" + Computes `Specificity`_: + + .. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}} + + Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Specificity@K. + + The reduction method (how the specificity scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold probability value for transforming probability predictions to binary + (0,1) predictions, in the case of binary or multi-label inputs. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tn + fp``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for inputs with probability predictions. If this parameter is set for multi-label + inputs, it will take precedence over ``threshold``. For (multi-dim) multi-class inputs, + this parameter defaults to 1. + + Should be left unset (``None``) for inputs with label predictions. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import Specificity + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> specificity = Specificity(average='macro', num_classes=3) + >>> specificity(preds, target) + tensor(0.6111) + >>> specificity = Specificity(average='micro') + >>> specificity(preds, target) + tensor(0.6250) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes the specificity score based on inputs passed in to ``update`` previously. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + """ + tp, fp, tn, fn = self._get_final_stats() + return _specificity_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py new file mode 100644 index 000000000..ec099c867 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py @@ -0,0 +1,267 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _stat_scores_compute, _stat_scores_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +class StatScores(Metric): + r"""Computes the number of true positives, false positives, true negatives, false negatives. + Related to `Type I and Type II errors`_ + and the `confusion matrix`_. + + The reduction method (how the statistics are aggregated) is controlled by the + ``reduce`` parameter, and additionally by the ``mdmc_reduce`` parameter in the + multi-dimensional multi-class case. + + Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + reduce: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Counts the statistics by summing over all [sample, class] + combinations (globally). Each statistic is represented by a single integer. + - ``'macro'``: Counts the statistics for each class separately (over all samples). + Each statistic is represented by a ``(C,)`` tensor. Requires ``num_classes`` + to be set. + - ``'samples'``: Counts the statistics for each sample separately (over all classes). + Each statistic is represented by a ``(N, )`` 1d tensor. + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_reduce``. + + num_classes: + Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. + + ignore_index: + Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + + mdmc_reduce: + Defines how the multi-dimensional multi-class inputs are handeled. Should be + one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class (see :ref:`references/modules:input types` for the definition of input types). + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then the outputs are concatenated together. In each + sample the extra axes ``...`` are flattened to become the sub-sample axis, and + statistics for each sample are computed by treating the sub-sample axis as the + ``N`` axis for that sample. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs are + flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``reduce`` parameter applies as usual. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``reduce`` is none of ``"micro"``, ``"macro"`` or ``"samples"``. + ValueError: + If ``mdmc_reduce`` is none of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``reduce`` is set to ``"macro"`` and ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``0`` <= ``ignore_index`` < ``num_classes``. + + Example: + >>> from paddlemetrics.classification import StatScores + >>> preds = B.tensor([1, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> stat_scores = StatScores(reduce='macro', num_classes=3) + >>> stat_scores(preds, target) + tensor([[0, 1, 2, 1, 1], + [1, 1, 1, 1, 2], + [1, 0, 3, 0, 1]]) + >>> stat_scores = StatScores(reduce='micro') + >>> stat_scores(preds, target) + tensor([2, 2, 6, 2, 4]) + + """ + is_differentiable = False + # TODO: canot be used because if scripting + # tp: Union[Tensor, List[Tensor]] + # fp: Union[Tensor, List[Tensor]] + # tn: Union[Tensor, List[Tensor]] + # fn: Union[Tensor, List[Tensor]] + + def __init__( + self, + threshold: float = 0.5, + top_k: Optional[int] = None, + reduce: str = "micro", + num_classes: Optional[int] = None, + ignore_index: Optional[int] = None, + mdmc_reduce: Optional[str] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.reduce = reduce + self.mdmc_reduce = mdmc_reduce + self.num_classes = num_classes + self.threshold = threshold + self.multiclass = multiclass + self.ignore_index = ignore_index + self.top_k = top_k + + if reduce not in ["micro", "macro", "samples"]: + raise ValueError(f"The `reduce` {reduce} is not valid.") + + if mdmc_reduce not in [None, "samplewise", "global"]: + raise ValueError(f"The `mdmc_reduce` {mdmc_reduce} is not valid.") + + if reduce == "macro" and (not num_classes or num_classes < 1): + raise ValueError("When you set `reduce` as 'macro', you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + default: Callable = lambda: [] + reduce_fn: Optional[str] = None + if mdmc_reduce != "samplewise" and reduce != "samples": + if reduce == "micro": + zeros_shape = [] + elif reduce == "macro": + zeros_shape = [num_classes] + else: + raise ValueError(f'Wrong reduce="{reduce}"') + default = lambda: B.zeros(zeros_shape, dtype=B.long) + reduce_fn = "sum" + + for s in ("tp", "fp", "tn", "fn"): + self.add_state(s, default=default(), dist_reduce_fx=reduce_fn) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. See + :ref:`references/modules:input types` for more information on input + types. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + """ + + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=self.reduce, + mdmc_reduce=self.mdmc_reduce, + threshold=self.threshold, + num_classes=self.num_classes, + top_k=self.top_k, + multiclass=self.multiclass, + ignore_index=self.ignore_index, + ) + + # Update states + if self.reduce != AverageMethod.SAMPLES and self.mdmc_reduce != MDMCAverageMethod.SAMPLEWISE: + self.tp += tp + self.fp += fp + self.tn += tn + self.fn += fn + else: + self.tp.append(tp) + self.fp.append(fp) + self.tn.append(tn) + self.fn.append(fn) + + def _get_final_stats(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Performs concatenation on the stat scores if neccesary, before passing them to a compute function.""" + tp = B.cat(self.tp) if isinstance(self.tp, list) else self.tp + fp = B.cat(self.fp) if isinstance(self.fp, list) else self.fp + tn = B.cat(self.tn) if isinstance(self.tn, list) else self.tn + fn = B.cat(self.fn) if isinstance(self.fn, list) else self.fn + return tp, fp, tn, fn + + def compute(self) -> Tensor: + """Computes the stat scores based on inputs passed in to ``update`` previously. + + Return: + The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds + to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The + shape depends on the ``reduce`` and ``mdmc_reduce`` (in case of multi-dimensional + multi-class data) parameters: + + - If the data is not multi-dimensional multi-class, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)``, + where ``C`` stands for the number of classes + - If ``reduce='samples'``, the shape will be ``(N, 5)``, where ``N`` stands for + the number of samples + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='global'``, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N*X, 5)``, where ``X`` stands for + the product of sizes of all "extra" dimensions of the data (i.e. all dimensions + except for ``C`` and ``N``) + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='samplewise'``, then + + - If ``reduce='micro'``, the shape will be ``(N, 5)`` + - If ``reduce='macro'``, the shape will be ``(N, C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N, X, 5)`` + """ + tp, fp, tn, fn = self._get_final_stats() + return _stat_scores_compute(tp, fp, tn, fn) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/collections.py b/EE/paddlemetric/src/build/lib/paddlemetrics/collections.py new file mode 100644 index 000000000..3b03856e7 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/collections.py @@ -0,0 +1,239 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Dict, Hashable, Iterable, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import nn + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn + + +class MetricCollection(nn.ModuleDict): + """MetricCollection class can be used to chain metrics that have the same call pattern into one single class. + + Args: + metrics: One of the following + + * list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name + as key for output dict. Therefore, two metrics of the same class cannot be chained this way. + + * arguments: similar to passing in as a list, metrics passed in as arguments will use their metric + class name as key for the output dict. + + * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict. + Use this format if you want to chain together multiple of the same metric with different parameters. + Note that the keys in the output dict will be sorted alphabetically. + + prefix: a string to append in front of the keys of the output dict + + postfix: a string to append after the keys of the output dict + + Raises: + ValueError: + If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``. + ValueError: + If two elements in ``metrics`` have the same ``name``. + ValueError: + If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``. + ValueError: + If ``metrics`` is ``dict`` and additional_metrics are passed in. + ValueError: + If ``prefix`` is set and it is not a string. + ValueError: + If ``postfix`` is set and it is not a string. + + Example (input as list): + >>> import torchapi as B + >>> from pprint import pprint + >>> from paddlemetrics import MetricCollection, Accuracy, Precision, Recall + >>> target = B.tensor([0, 2, 0, 2, 0, 1, 0, 2]) + >>> preds = B.tensor([2, 1, 2, 0, 1, 2, 2, 2]) + >>> metrics = MetricCollection([Accuracy(), + ... Precision(num_classes=3, average='macro'), + ... Recall(num_classes=3, average='macro')]) + >>> metrics(preds, target) + {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)} + + Example (input as arguments): + >>> metrics = MetricCollection(Accuracy(), Precision(num_classes=3, average='macro'), + ... Recall(num_classes=3, average='macro')) + >>> metrics(preds, target) + {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)} + + Example (input as dict): + >>> metrics = MetricCollection({'micro_recall': Recall(num_classes=3, average='micro'), + ... 'macro_recall': Recall(num_classes=3, average='macro')}) + >>> same_metric = metrics.clone() + >>> pprint(metrics(preds, target)) + {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} + >>> pprint(same_metric(preds, target)) + {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} + >>> metrics.persistent() + """ + + def __init__( + self, + metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], + *additional_metrics: Metric, + prefix: Optional[str] = None, + postfix: Optional[str] = None, + ) -> None: + super().__init__() + + self._modules = self._sub_layers + + self.add_metrics(metrics, *additional_metrics) + + self.prefix = self._check_arg(prefix, "prefix") + self.postfix = self._check_arg(postfix, "postfix") + + def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]: + """Iteratively call forward for each metric. + + Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) + will be filtered based on the signature of the individual metric. + """ + return {k: m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items()} + + def update(self, *args: Any, **kwargs: Any) -> None: + """Iteratively call update for each metric. + + Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) + will be filtered based on the signature of the individual metric. + """ + for _, m in self.items(keep_base=True): + m_kwargs = m._filter_kwargs(**kwargs) + m.update(*args, **m_kwargs) + + def compute(self) -> Dict[str, Any]: + return {k: m.compute() for k, m in self.items()} + + def reset(self) -> None: + """Iteratively call reset for each metric.""" + for _, m in self.items(keep_base=True): + m.reset() + + def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> "MetricCollection": + """Make a copy of the metric collection + Args: + prefix: a string to append in front of the metric keys + postfix: a string to append after the keys of the output dict + + """ + mc = deepcopy(self) + if prefix: + mc.prefix = self._check_arg(prefix, "prefix") + if postfix: + mc.postfix = self._check_arg(postfix, "postfix") + return mc + + def persistent(self, mode: bool = True) -> None: + """Method for post-init to change if metric states should be saved to its state_dict.""" + for _, m in self.items(keep_base=True): + m.persistent(mode) + + def add_metrics( + self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric + ) -> None: + """Add new metrics to Metric Collection.""" + if isinstance(metrics, Metric): + # set compatible with original type expectations + metrics = [metrics] + if isinstance(metrics, Sequence): + # prepare for optional additions + metrics = list(metrics) + remain: list = [] + for m in additional_metrics: + (metrics if isinstance(m, Metric) else remain).append(m) + + if remain: + rank_zero_warn( + f"You have passes extra arguments {remain} which are not `Metric` so they will be ignored." + ) + elif additional_metrics: + raise ValueError( + f"You have passes extra arguments {additional_metrics} which are not compatible" + f" with first passed dictionary {metrics} so they will be ignored." + ) + + if isinstance(metrics, dict): + # Check all values are metrics + # Make sure that metrics are added in deterministic order + for name in sorted(metrics.keys()): + metric = metrics[name] + if not isinstance(metric, Metric): + raise ValueError( + f"Value {metric} belonging to key {name} is not an instance of `pl.metrics.Metric`" + ) + self[name] = metric + elif isinstance(metrics, Sequence): + for metric in metrics: + if not isinstance(metric, Metric): + raise ValueError(f"Input {metric} to `MetricCollection` is not a instance of `pl.metrics.Metric`") + name = metric.__class__.__name__ + if name in self: + raise ValueError(f"Encountered two metrics both named {name}") + self[name] = metric + else: + raise ValueError("Unknown input to MetricCollection.") + + def _set_name(self, base: str) -> str: + name = base if self.prefix is None else self.prefix + base + name = name if self.postfix is None else name + self.postfix + return name + + def _to_renamed_ordered_dict(self) -> OrderedDict: + od = OrderedDict() + for k, v in self._modules.items(): + od[self._set_name(k)] = v + return od + + def keys(self, keep_base: bool = False) -> Iterable[Hashable]: + r"""Return an iterable of the ModuleDict key. + Args: + keep_base: Whether to add prefix/postfix on the items collection. + """ + if keep_base: + return self._modules.keys() + return self._to_renamed_ordered_dict().keys() + + def items(self, keep_base: bool = False) -> Iterable[Tuple[str, nn.Module]]: + r"""Return an iterable of the ModuleDict key/value pairs. + Args: + keep_base: Whether to add prefix/postfix on the items collection. + """ + if keep_base: + return self._modules.items() + return self._to_renamed_ordered_dict().items() + + @staticmethod + def _check_arg(arg: Optional[str], name: str) -> Optional[str]: + if arg is None or isinstance(arg, str): + return arg + raise ValueError(f"Expected input `{name}` to be a string, but got {type(arg)}") + + def __repr__(self) -> str: + repr_str = super().__repr__()[:-2] + if self.prefix: + repr_str += f",\n prefix={self.prefix}{',' if self.postfix else ''}" + if self.postfix: + repr_str += f"{',' if not self.prefix else ''}\n postfix={self.postfix}" + return repr_str + "\n)" + + def to(self, device): + pass \ No newline at end of file diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py new file mode 100644 index 000000000..365d93c97 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py @@ -0,0 +1,138 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.audio.pesq import pesq +from paddlemetrics.functional.audio.pit import pit, pit_permutate +from paddlemetrics.functional.audio.si_sdr import si_sdr +from paddlemetrics.functional.audio.si_snr import si_snr +from paddlemetrics.functional.audio.snr import snr +from paddlemetrics.functional.audio.stoi import stoi +from paddlemetrics.functional.classification.accuracy import accuracy +from paddlemetrics.functional.classification.auc import auc +from paddlemetrics.functional.classification.auroc import auroc +from paddlemetrics.functional.classification.average_precision import average_precision +from paddlemetrics.functional.classification.calibration_error import calibration_error +from paddlemetrics.functional.classification.cohen_kappa import cohen_kappa +from paddlemetrics.functional.classification.confusion_matrix import confusion_matrix +from paddlemetrics.functional.classification.dice import dice_score +from paddlemetrics.functional.classification.f_beta import f1, fbeta +from paddlemetrics.functional.classification.hamming_distance import hamming_distance +from paddlemetrics.functional.classification.hinge import hinge +from paddlemetrics.functional.classification.iou import iou +from paddlemetrics.functional.classification.kl_divergence import kl_divergence +from paddlemetrics.functional.classification.matthews_corrcoef import matthews_corrcoef +from paddlemetrics.functional.classification.precision_recall import precision, precision_recall, recall +from paddlemetrics.functional.classification.precision_recall_curve import precision_recall_curve +from paddlemetrics.functional.classification.roc import roc +from paddlemetrics.functional.classification.specificity import specificity +from paddlemetrics.functional.classification.stat_scores import stat_scores +from paddlemetrics.functional.image.gradients import image_gradients +from paddlemetrics.functional.image.psnr import psnr +from paddlemetrics.functional.image.ssim import ssim +from paddlemetrics.functional.pairwise.cosine import pairwise_cosine_similarity +from paddlemetrics.functional.pairwise.euclidean import pairwise_euclidean_distance +from paddlemetrics.functional.pairwise.linear import pairwise_linear_similarity +from paddlemetrics.functional.pairwise.manhatten import pairwise_manhatten_distance +from paddlemetrics.functional.regression.cosine_similarity import cosine_similarity +from paddlemetrics.functional.regression.explained_variance import explained_variance +from paddlemetrics.functional.regression.mean_absolute_error import mean_absolute_error +from paddlemetrics.functional.regression.mean_absolute_percentage_error import mean_absolute_percentage_error +from paddlemetrics.functional.regression.mean_squared_error import mean_squared_error +from paddlemetrics.functional.regression.mean_squared_log_error import mean_squared_log_error +from paddlemetrics.functional.regression.pearson import pearson_corrcoef +from paddlemetrics.functional.regression.r2 import r2_score +from paddlemetrics.functional.regression.spearman import spearman_corrcoef +from paddlemetrics.functional.regression.symmetric_mean_absolute_percentage_error import ( + symmetric_mean_absolute_percentage_error, +) +from paddlemetrics.functional.regression.tweedie_deviance import tweedie_deviance_score +from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision +from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out +from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate +from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg +from paddlemetrics.functional.retrieval.precision import retrieval_precision +from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision +from paddlemetrics.functional.retrieval.recall import retrieval_recall +from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank +from paddlemetrics.functional.self_supervised import embedding_similarity +#from paddlemetrics.functional.text.bert import bert_score +from paddlemetrics.functional.text.bleu import bleu_score +from paddlemetrics.functional.text.rouge import rouge_score +from paddlemetrics.functional.text.sacre_bleu import sacre_bleu_score +from paddlemetrics.functional.text.wer import wer + +__all__ = [ + "accuracy", + "auc", + "auroc", + "average_precision", +# "bert_score", + "bleu_score", + "calibration_error", + "cohen_kappa", + "confusion_matrix", + "cosine_similarity", + "tweedie_deviance_score", + "dice_score", + "embedding_similarity", + "explained_variance", + "f1", + "fbeta", + "hamming_distance", + "hinge", + "image_gradients", + "iou", + "kl_divergence", + "kldivergence", + "matthews_corrcoef", + "mean_absolute_error", + "mean_absolute_percentage_error", + "mean_squared_error", + "mean_squared_log_error", + "pairwise_cosine_similarity", + "pairwise_euclidean_distance", + "pairwise_linear_similarity", + "pairwise_manhatten_distance", + "pearson_corrcoef", + "pesq", + "pit", + "pit_permutate", + "precision", + "precision_recall", + "precision_recall_curve", + "psnr", + "r2_score", + "r2score", + "recall", + "retrieval_average_precision", + "retrieval_fall_out", + "retrieval_hit_rate", + "retrieval_normalized_dcg", + "retrieval_precision", + "retrieval_r_precision", + "retrieval_recall", + "retrieval_reciprocal_rank", + "roc", + "rouge_score", + "sacre_bleu_score", + "si_sdr", + "si_snr", + "snr", + "spearman_corrcoef", + "specificity", + "ssim", + "stat_scores", + "stoi", + "symmetric_mean_absolute_percentage_error", + "wer", +] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py new file mode 100644 index 000000000..a7e7d89c0 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py @@ -0,0 +1,19 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.audio.pesq import pesq # noqa: F401 +from paddlemetrics.functional.audio.pit import pit, pit_permutate # noqa: F401 +from paddlemetrics.functional.audio.si_sdr import si_sdr # noqa: F401 +from paddlemetrics.functional.audio.si_snr import si_snr # noqa: F401 +from paddlemetrics.functional.audio.snr import snr # noqa: F401 +from paddlemetrics.functional.audio.stoi import stoi # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py new file mode 100644 index 000000000..ab81723da --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py @@ -0,0 +1,100 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np + +from paddlemetrics.utilities.imports import _PESQ_AVAILABLE + +if _PESQ_AVAILABLE: + import pesq as pesq_backend +else: + pesq_backend = None +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def pesq(preds: Tensor, target: Tensor, fs: int, mode: str, keep_same_device: bool = False) -> Tensor: + r"""PESQ (Perceptual Evaluation of Speech Quality) + + This is a wrapper for the ``pesq`` package [1]. Note that input will be moved to `cpu` + to perform the metric calculation. + + .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pesq`` + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + fs: + sampling frequency, should be 16000 or 8000 (Hz) + mode: + 'wb' (wide-band) or 'nb' (narrow-band) + keep_same_device: + whether to move the pesq value to the device of preds + + Returns: + pesq value of shape [...] + + Raises: + ValueError: + If ``peqs`` package is not installed + ValueError: + If ``fs`` is not either ``8000`` or ``16000`` + ValueError: + If ``mode`` is not either ``"wb"`` or ``"nb"`` + + Example: + >>> from paddlemetrics.functional.audio import pesq + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> pesq(preds, target, 8000, 'nb') + tensor(2.2076) + >>> pesq(preds, target, 16000, 'wb') + tensor(1.7359) + + References: + [1] https://github.com/ludlows/python-pesq + """ + if not _PESQ_AVAILABLE: + raise ValueError( + "PESQ metric requires that pesq is installed." + "Either install as `pip install paddlemetrics[audio]` or `pip install pesq`" + ) + if fs not in (8000, 16000): + raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}") + if mode not in ("wb", "nb"): + raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}") + _check_same_shape(preds, target) + + if preds.ndim == 1: + pesq_val_np = pesq_backend.pesq(fs, target.detach().cpu().numpy(), preds.detach().cpu().numpy(), mode) + pesq_val = B.tensor(pesq_val_np) + else: + preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + pesq_val_np = np.empty(shape=(preds_np.shape[0])) + for b in range(preds_np.shape[0]): + pesq_val_np[b] = pesq_backend.pesq(fs, target_np[b, :], preds_np[b, :], mode) + pesq_val = B.from_numpy(pesq_val_np) + pesq_val = pesq_val.reshape(preds.shape[:-1]) + + if keep_same_device: + pesq_val = pesq_val.to(preds.device) + + return pesq_val diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py new file mode 100644 index 000000000..3ca729a2d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py @@ -0,0 +1,206 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from itertools import permutations +from typing import Any, Callable, Dict, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape +from paddlemetrics.utilities.imports import _SCIPY_AVAILABLE + +# _ps_dict: cache of permutations +# it's necessary to cache it, otherwise it will consume a large amount of time +_ps_dict: dict = {} # _ps_dict[str(spk_num)+str(device)] = permutations + + +def _find_best_perm_by_linear_sum_assignment( + metric_mtx: B.Tensor, + eval_func: Union[B.min, B.max], +) -> Tuple[Tensor, Tensor]: + """Solves the linear sum assignment problem using scipy, and returns the best metric values and the + corresponding permutations. + + Args: + metric_mtx: + the metric matrix, shape [batch_size, spk_num, spk_num] + eval_func: + the function to reduce the metric values of different the permutations + + Returns: + best_metric: + shape [batch] + best_perm: + shape [batch, spk] + """ + from scipy.optimize import linear_sum_assignment + + mmtx = metric_mtx.detach().cpu() + best_perm = B.tensor([linear_sum_assignment(pwm, eval_func == B.max)[1] for pwm in mmtx]) + best_perm = best_perm.to(metric_mtx.device) + best_metric = B.gather(metric_mtx, 2, best_perm[:, :, None]).mean([-1, -2]) + return best_metric, best_perm # shape [batch], shape [batch, spk] + + +def _find_best_perm_by_exhuastive_method( + metric_mtx: B.Tensor, + eval_func: Union[B.min, B.max], +) -> Tuple[Tensor, Tensor]: + """Solves the linear sum assignment problem using exhuastive method, i.e. exhuastively calculates the metric + values of all possible permutations, and returns the best metric values and the corresponding permutations. + + Args: + metric_mtx: + the metric matrix, shape [batch_size, spk_num, spk_num] + eval_func: + the function to reduce the metric values of different the permutations + + Returns: + best_metric: + shape [batch] + best_perm: + shape [batch, spk] + """ + # create/read/cache the permutations and its indexes + # reading from cache would be much faster than creating in CPU then moving to GPU + batch_size, spk_num = metric_mtx.shape[:2] + key = str(spk_num) + str(metric_mtx.device) + if key not in _ps_dict: + # ps: all the permutations, shape [spk_num, perm_num] + # ps: In i-th permutation, the predcition corresponds to the j-th target is ps[j,i] + ps = B.tensor(list(permutations(range(spk_num))), device=metric_mtx.device).T + _ps_dict[key] = ps + else: + ps = _ps_dict[key] # all the permutations, shape [spk_num, perm_num] + + # find the metric of each permutation + perm_num = ps.shape[-1] + # shape [batch_size, spk_num, perm_num] + bps = ps[None, ...].expand(batch_size, spk_num, perm_num) + # shape [batch_size, spk_num, perm_num] + metric_of_ps_details = B.gather(metric_mtx, 2, bps) + # shape [batch_size, perm_num] + metric_of_ps = metric_of_ps_details.mean(dim=1) + + # find the best metric and best permutation + best_metric, best_indexes = eval_func(metric_of_ps, dim=1) + best_indexes = best_indexes.detach() + best_perm = ps.T[best_indexes, :] + return best_metric, best_perm # shape [batch], shape [batch, spk] + + +def pit( + preds: B.Tensor, target: B.Tensor, metric_func: Callable, eval_func: str = "max", **kwargs: Dict[str, Any] +) -> Tuple[Tensor, Tensor]: + """Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method. + + [1] in speech separation field in order to calculate audio metrics in a permutation invariant way. + + Args: + preds: + shape [batch, spk, ...] + target: + shape [batch, spk, ...] + metric_func: + a metric function accept a batch of target and estimate, + i.e. metric_func(preds[:, i, ...], target[:, j, ...]), and returns a batch of metric tensors [batch] + eval_func: + the function to find the best permutation, can be 'min' or 'max', + i.e. the smaller the better or the larger the better. + kwargs: + additional args for metric_func + + Returns: + best_metric of shape [batch], + best_perm of shape [batch] + + Example: + >>> from paddlemetrics.functional.audio import si_sdr + >>> # [batch, spk, time] + >>> preds = B.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]]) + >>> target = B.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]]) + >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max') + >>> best_metric + tensor([-5.1091]) + >>> best_perm + tensor([[0, 1]]) + >>> pit_permutate(preds, best_perm) + tensor([[[-0.0579, 0.3560, -0.9604], + [-0.1719, 0.3205, 0.2951]]]) + + Reference: + [1] `Permutation Invariant Training of Deep Models`_ + """ + _check_same_shape(preds, target) + if eval_func not in ["max", "min"]: + raise ValueError(f'eval_func can only be "max" or "min" but got {eval_func}') + if target.ndim < 2: + raise ValueError(f"Inputs must be of shape [batch, spk, ...], got {target.shape} and {preds.shape} instead") + + # calculate the metric matrix + batch_size, spk_num = target.shape[0:2] + metric_mtx = None + for target_idx in range(spk_num): # we have spk_num speeches in target in each sample + for preds_idx in range(spk_num): # we have spk_num speeches in preds in each sample + if metric_mtx is not None: + metric_mtx[:, target_idx, preds_idx] = metric_func( + preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs + ) + else: + first_ele = metric_func(preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs) + metric_mtx = B.empty((batch_size, spk_num, spk_num), dtype=first_ele.dtype, device=first_ele.device) + metric_mtx[:, target_idx, preds_idx] = first_ele + + # find best + op = B.max if eval_func == "max" else B.min + if spk_num < 3 or not _SCIPY_AVAILABLE: + if spk_num >= 3 and not _SCIPY_AVAILABLE: + warnings.warn( + f"In pit metric for speaker-num {spk_num}>3, we recommend installing scipy for better performance" + ) + + best_metric, best_perm = _find_best_perm_by_exhuastive_method(metric_mtx, op) + else: + best_metric, best_perm = _find_best_perm_by_linear_sum_assignment(metric_mtx, op) + + return best_metric, best_perm + + +def pit_permutate(preds: Tensor, perm: Tensor) -> Tensor: + """permutate estimate according to perm. + + Args: + preds (Tensor): the estimates you want to permutate, shape [batch, spk, ...] + perm (Tensor): the permutation returned from pit, shape [batch, spk] + + Returns: + Tensor: the permutated version of estimate + + Example: + >>> from paddlemetrics.functional.audio import si_sdr + >>> # [batch, spk, time] + >>> preds = B.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]]) + >>> target = B.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]]) + >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max') + >>> best_metric + tensor([-5.1091]) + >>> best_perm + tensor([[0, 1]]) + >>> pit_permutate(preds, best_perm) + tensor([[[-0.0579, 0.3560, -0.9604], + [-0.1719, 0.3205, 0.2951]]]) + """ + preds_pmted = B.stack([B.index_select(pred, 0, p) for pred, p in zip(preds, perm)]) + return preds_pmted diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py new file mode 100644 index 000000000..66eb9e3ae --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py @@ -0,0 +1,64 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def si_sdr(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: + """Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general + considered an overall measure of how good a source sound. + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + zero_mean: + If to zero mean target and preds or not + + Returns: + si-sdr value of shape [...] + + Example: + >>> from paddlemetrics.functional.audio import si_sdr + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_sdr_val = si_sdr(preds, target) + >>> si_sdr_val + tensor(18.4030) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + """ + _check_same_shape(preds, target) + EPS = B.finfo(preds.dtype).eps + + if zero_mean: + target = target - B.mean(target, dim=-1, keepdim=True) + preds = preds - B.mean(preds, dim=-1, keepdim=True) + + alpha = (B.sum(preds * target, dim=-1, keepdim=True) + EPS) / ( + B.sum(target ** 2, dim=-1, keepdim=True) + EPS + ) + target_scaled = alpha * target + + noise = target_scaled - preds + + si_sdr_value = (B.sum(target_scaled ** 2, dim=-1) + EPS) / (B.sum(noise ** 2, dim=-1) + EPS) + si_sdr_value = 10 * B.log10(si_sdr_value) + + return si_sdr_value diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py new file mode 100644 index 000000000..abddf039f --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py @@ -0,0 +1,46 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.audio.si_sdr import si_sdr + + +def si_snr(preds: Tensor, target: Tensor) -> Tensor: + """Scale-invariant signal-to-noise ratio (SI-SNR). + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + + Returns: + si-snr value of shape [...] + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional.audio import si_snr + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_snr_val = si_snr(preds, target) + >>> si_snr_val + tensor(15.0918) + + References: + [1] Y. Luo and N. Mesgarani, "TaSNet: Time-Domain Audio Separation Network for Real-Time, Single-Channel Speech + Separation," 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp. + 696-700, doi: 10.1109/ICASSP.2018.8462116. + """ + + return si_sdr(target=target, preds=preds, zero_mean=True) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py new file mode 100644 index 000000000..8c54128ba --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py @@ -0,0 +1,66 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def snr(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: + r"""Signal-to-noise ratio (SNR_): + + .. math:: + \text{SNR} = \frac{P_{signal}}{P_{noise}} + + where :math:`P` denotes the power of each signal. The SNR metric compares the level + of the desired signal to the level of background noise. Therefore, a high value of + SNR means that the audio is clear. + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + zero_mean: + if to zero mean target and preds or not + + Returns: + snr value of shape [...] + + Example: + >>> from paddlemetrics.functional.audio import snr + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> snr_val = snr(preds, target) + >>> snr_val + tensor(16.1805) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + + """ + _check_same_shape(preds, target) + EPS = B.finfo(preds.dtype).eps + + if zero_mean: + target = target - B.mean(target, dim=-1, keepdim=True) + preds = preds - B.mean(preds, dim=-1, keepdim=True) + + noise = target - preds + + snr_value = (B.sum(target ** 2, dim=-1) + EPS) / (B.sum(noise ** 2, dim=-1) + EPS) + snr_value = 10 * B.log10(snr_value) + + return snr_value diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py new file mode 100644 index 000000000..4c1f5806d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import paddleext.torchapi as B + +from paddlemetrics.utilities.imports import _PYSTOI_AVAILABLE + +if _PYSTOI_AVAILABLE: + from pystoi import stoi as stoi_backend +else: + stoi_backend = None +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def stoi(preds: Tensor, target: Tensor, fs: int, extended: bool = False, keep_same_device: bool = False) -> Tensor: + r"""STOI (Short Term Objective Intelligibility, see [2,3]), a wrapper for the pystoi package [1]. + Note that input will be moved to `cpu` to perform the metric calculation. + + Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due + to additive noise, single/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations. + The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good + alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are + interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, + on speech intelligibility. Description taken from [Cees Taal's website](http://www.ceestaal.nl/code/). + + .. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pystoi`` + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + fs: + sampling frequency (Hz) + extended: + whether to use the extended STOI described in [4] + keep_same_device: + whether to move the stoi value to the device of preds + + Returns: + stoi value of shape [...] + + Raises: + ValueError: + If ``pystoi`` package is not installed + + Example: + >>> from paddlemetrics.functional.audio import stoi + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> stoi(preds, target, 8000).float() + tensor(-0.0100) + + References: + [1] https://github.com/mpariente/pystoi + + [2] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'A Short-Time Objective Intelligibility Measure for + Time-Frequency Weighted Noisy Speech', ICASSP 2010, Texas, Dallas. + + [3] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'An Algorithm for Intelligibility Prediction of + Time-Frequency Weighted Noisy Speech', IEEE Transactions on Audio, Speech, and Language Processing, 2011. + + [4] J. Jensen and C. H. Taal, 'An Algorithm for Predicting the Intelligibility of Speech Masked by Modulated + Noise Maskers', IEEE Transactions on Audio, Speech and Language Processing, 2016. + + """ + if not _PYSTOI_AVAILABLE: + raise ValueError( + "STOI metric requires that pystoi is installed." + "Either install as `pip install paddlemetrics[audio]` or `pip install pystoi`" + ) + _check_same_shape(preds, target) + + if len(preds.shape) == 1: + stoi_val_np = stoi_backend(target.detach().cpu().numpy(), preds.detach().cpu().numpy(), fs, extended) + stoi_val = B.tensor(stoi_val_np) + else: + preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + stoi_val_np = np.empty(shape=(preds_np.shape[0])) + for b in range(preds_np.shape[0]): + stoi_val_np[b] = stoi_backend(target_np[b, :], preds_np[b, :], fs, extended) + stoi_val = B.from_numpy(stoi_val_np) + stoi_val = stoi_val.reshape(preds.shape[:-1]) + + if keep_same_device: + stoi_val = stoi_val.to(preds.device) + + return stoi_val diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py new file mode 100644 index 000000000..a03982c8c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py @@ -0,0 +1,32 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.classification.accuracy import accuracy # noqa: F401 +from paddlemetrics.functional.classification.auc import auc # noqa: F401 +from paddlemetrics.functional.classification.auroc import auroc # noqa: F401 +from paddlemetrics.functional.classification.average_precision import average_precision # noqa: F401 +from paddlemetrics.functional.classification.calibration_error import calibration_error # noqa: F401 +from paddlemetrics.functional.classification.cohen_kappa import cohen_kappa # noqa: F401 +from paddlemetrics.functional.classification.confusion_matrix import confusion_matrix # noqa: F401 +from paddlemetrics.functional.classification.dice import dice_score # noqa: F401 +from paddlemetrics.functional.classification.f_beta import f1, fbeta # noqa: F401 +from paddlemetrics.functional.classification.hamming_distance import hamming_distance # noqa: F401 +from paddlemetrics.functional.classification.hinge import hinge # noqa: F401 +from paddlemetrics.functional.classification.iou import iou # noqa: F401 +from paddlemetrics.functional.classification.kl_divergence import kl_divergence # noqa: F401 +from paddlemetrics.functional.classification.matthews_corrcoef import matthews_corrcoef # noqa: F401 +from paddlemetrics.functional.classification.precision_recall import precision, precision_recall, recall # noqa: F401 +from paddlemetrics.functional.classification.precision_recall_curve import precision_recall_curve # noqa: F401 +from paddlemetrics.functional.classification.roc import roc # noqa: F401 +from paddlemetrics.functional.classification.specificity import specificity # noqa: F401 +from paddlemetrics.functional.classification.stat_scores import stat_scores # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py new file mode 100644 index 000000000..44c89fa92 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py @@ -0,0 +1,418 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.checks import _check_classification_inputs, _input_format_classification, _input_squeeze +from paddlemetrics.utilities.enums import AverageMethod, DataType, MDMCAverageMethod + + +def _check_subset_validity(mode: DataType) -> bool: + """Checks input mode is valid.""" + return mode in (DataType.MULTILABEL, DataType.MULTIDIM_MULTICLASS) + + +def _mode( + preds: Tensor, + target: Tensor, + threshold: float, + top_k: Optional[int], + num_classes: Optional[int], + multiclass: Optional[bool], +) -> DataType: + """Finds the mode of the input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the + case of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. + num_classes: Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. + + Example: + >>> target = B.tensor([0, 1, 2, 3]) + >>> preds = B.tensor([0, 2, 1, 3]) + >>> _mode(preds, target, 0.5, None, None, None) + + """ + + mode = _check_classification_inputs( + preds, target, threshold=threshold, top_k=top_k, num_classes=num_classes, multiclass=multiclass + ) + return mode + + +def _accuracy_update( + preds: Tensor, + target: Tensor, + reduce: Optional[str], + mdmc_reduce: Optional[str], + threshold: float, + num_classes: Optional[int], + top_k: Optional[int], + multiclass: Optional[bool], + ignore_index: Optional[int], + mode: DataType, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns stat scores (true positives, false positives, true negatives, false negatives) required + to compute accuracy. + + Args: + preds: Predicted tensor + target: Ground truth tensor + reduce: Defines the reduction that is applied. + mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handeled. + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in + the case of binary or multi-label inputs. + num_classes: Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. + multiclass: Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. + ignore_index: Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + mode: Mode of the input tensors + """ + + if mode == DataType.MULTILABEL and top_k: + raise ValueError("You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.") + + preds, target = _input_squeeze(preds, target) + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_reduce, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + return tp, fp, tn, fn + + +def _accuracy_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: Optional[str], + mdmc_average: Optional[str], + mode: DataType, +) -> Tensor: + """Computes accuracy from stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + average: Defines the reduction that is applied. + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). + mode: Mode of the input tensors + + Example: + >>> preds = B.tensor([0, 2, 1, 3]) + >>> target = B.tensor([0, 1, 2, 3]) + >>> threshold = 0.5 + >>> reduce = average = 'micro' + >>> mdmc_average = 'global' + >>> mode = _mode(preds, target, threshold, top_k=None, num_classes=None, multiclass=None) + >>> tp, fp, tn, fn = _accuracy_update( + ... preds, + ... target, + ... reduce, + ... mdmc_average, + ... threshold=0.5, + ... num_classes=None, + ... top_k=None, + ... multiclass=None, + ... ignore_index=None, + ... mode=mode) + >>> _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) + tensor(0.5000) + + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> top_k, threshold = 2, 0.5 + >>> reduce = average = 'micro' + >>> mdmc_average = 'global' + >>> mode = _mode(preds, target, threshold, top_k, num_classes=None, multiclass=None) + >>> tp, fp, tn, fn = _accuracy_update( + ... preds, + ... target, + ... reduce, + ... mdmc_average, + ... threshold, + ... num_classes=None, + ... top_k=top_k, + ... multiclass=None, + ... ignore_index=None, + ... mode=mode) + >>> _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) + tensor(0.6667) + """ + + simple_average = [AverageMethod.MICRO, AverageMethod.SAMPLES] + if (mode == DataType.BINARY and average in simple_average) or mode == DataType.MULTILABEL: + numerator = tp + tn + denominator = tp + tn + fp + fn + else: + numerator = tp + denominator = tp + fn + + if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = tp + fp + fn == 0 + numerator = numerator[~cond] + denominator = denominator[~cond] + + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != AverageMethod.WEIGHTED else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def _subset_accuracy_update( + preds: Tensor, + target: Tensor, + threshold: float, + top_k: Optional[int], +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute subset accuracy. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. + """ + + preds, target = _input_squeeze(preds, target) + preds, target, mode = _input_format_classification(preds, target, threshold=threshold, top_k=top_k) + + if mode == DataType.MULTILABEL and top_k: + raise ValueError("You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.") + + if mode == DataType.MULTILABEL: + correct = (preds == target).all(dim=1).sum() + total = tensor(target.shape[0], device=target.device) + elif mode == DataType.MULTICLASS: + correct = (preds * target).sum() + total = target.sum() + elif mode == DataType.MULTIDIM_MULTICLASS: + sample_correct = (preds * target).sum(dim=(1, 2)) + correct = (sample_correct == target.shape[2]).sum() + total = tensor(target.shape[0], device=target.device) + else: + correct, total = tensor(0), tensor(0) + + return correct, total + + +def _subset_accuracy_compute(correct: Tensor, total: Tensor) -> Tensor: + """Computes subset accuracy from number of correct observations and total number of observations. + + Args: + correct: Number of correct observations + total: Number of observations + """ + + return correct.float() / total + + +def accuracy( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = "global", + threshold: float = 0.5, + top_k: Optional[int] = None, + subset_accuracy: bool = False, + num_classes: Optional[int] = None, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, +) -> Tensor: + r"""Computes `Accuracy`_ + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + For multi-class and multi-dimensional multi-class data with probability or logits predictions, the + parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the + top-K highest probability or logits items are considered to find the correct label. + + For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" + accuracy by default, which counts all labels or sub-samples separately. This can be + changed to subset accuracy (which requires all labels or sub-samples in the sample to + be correctly predicted) by setting ``subset_accuracy=True``. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth labels + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + subset_accuracy: + Whether to compute subset accuracy for multi-label and multi-dimensional + multi-class inputs (has no effect for other input types). + + - For multi-label inputs, if the parameter is set to ``True``, then all labels for + each sample must be correctly predicted for the sample to count as correct. If it + is set to ``False``, then all labels are counted separately - this is equivalent to + flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). + + - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all + sub-sample (on the extra axis) must be correct for the sample to be counted as correct. + If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, + in the case of label predictions, to flattening the inputs beforehand (i.e. + ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter + still applies in both cases, if set. + + Raises: + ValueError: + If ``top_k`` parameter is set for ``multi-label`` inputs. + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + ValueError: + If ``top_k`` is not an ``integer`` larger than ``0``. + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import accuracy + >>> target = B.tensor([0, 1, 2, 3]) + >>> preds = B.tensor([0, 2, 1, 3]) + >>> accuracy(preds, target) + tensor(0.5000) + + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> accuracy(preds, target, top_k=2) + tensor(0.6667) + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): + raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") + + preds, target = _input_squeeze(preds, target) + mode = _mode(preds, target, threshold, top_k, num_classes, multiclass) + reduce = "macro" if average in ["weighted", "none", None] else average + + if subset_accuracy and _check_subset_validity(mode): + correct, total = _subset_accuracy_update(preds, target, threshold, top_k) + return _subset_accuracy_compute(correct, total) + tp, fp, tn, fn = _accuracy_update( + preds, target, reduce, mdmc_average, threshold, num_classes, top_k, multiclass, ignore_index, mode + ) + return _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py new file mode 100644 index 000000000..0e2fddb3d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py @@ -0,0 +1,133 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + + +def _auc_update(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute area under the curve. Checks if the 2 input tenseor have + the same number of elements and if they are 1d. + + Args: + x: x-coordinates + y: y-coordinates + """ + + if x.ndim > 1: + x = x.squeeze() + + if y.ndim > 1: + y = y.squeeze() + + if x.ndim > 1 or y.ndim > 1: + raise ValueError( + f"Expected both `x` and `y` tensor to be 1d, but got tensors with dimension {x.ndim} and {y.ndim}" + ) + if x.numel() != y.numel(): + raise ValueError( + f"Expected the same number of elements in `x` and `y` tensor but received {x.numel()} and {y.numel()}" + ) + return x, y + + +def _auc_compute_without_check(x: Tensor, y: Tensor, direction: float) -> Tensor: + """Computes area under the curve using the trapezoidal rule. Assumes increasing or decreasing order of `x`. + + Args: + x: x-coordinates, must be either increasing or decreasing + y: y-coordinates + direction: 1 if increaing, -1 if decreasing + + Example: + >>> x = B.tensor([0, 1, 2, 3]) + >>> y = B.tensor([0, 1, 2, 2]) + >>> x, y = _auc_update(x, y) + >>> _auc_compute_without_check(x, y, direction=1.0) + tensor(4.) + """ + + with B.no_grad(): + auc_: Tensor = B.trapz(y, x) * direction + return auc_ + + +def _auc_compute(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor: + """Computes area under the curve using the trapezoidal rule. Checks for increasing or decreasing order of `x`. + + Args: + x: x-coordinates, must be either increasing or decreasing + y: y-coordinates + reorder: if True, will reorder the arrays to make it either increasing or decreasing + + Example: + >>> x = B.tensor([0, 1, 2, 3]) + >>> y = B.tensor([0, 1, 2, 2]) + >>> x, y = _auc_update(x, y) + >>> _auc_compute(x, y) + tensor(4.) + >>> _auc_compute(x, y, reorder=True) + tensor(4.) + """ + + with B.no_grad(): + if reorder: + # TODO: include stable=True arg when pytorch v1.9 is released + x, x_idx = B.sort(x) + y = y[x_idx] + + dx = x[1:] - x[:-1] + if (dx < 0).any(): + if (dx <= 0).all(): + direction = -1.0 + else: + raise ValueError( + "The `x` tensor is neither increasing or decreasing. Try setting the reorder argument to `True`." + ) + else: + direction = 1.0 + return _auc_compute_without_check(x, y, direction) + + +def auc(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor: + """Computes Area Under the Curve (AUC) using the trapezoidal rule. + + Args: + x: x-coordinates, must be either increasing or decreasing + y: y-coordinates + reorder: if True, will reorder the arrays to make it either increasing or decreasing + + Return: + Tensor containing AUC score (float) + + Raises: + ValueError: + If both ``x`` and ``y`` tensors are not ``1d``. + ValueError: + If both ``x`` and ``y`` don't have the same numnber of elements. + ValueError: + If ``x`` tesnsor is neither increasing or decreasing. + + Example: + >>> from paddlemetrics.functional import auc + >>> x = B.tensor([0, 1, 2, 3]) + >>> y = B.tensor([0, 1, 2, 2]) + >>> auc(x, y) + tensor(4.) + >>> auc(x, y, reorder=True) + tensor(4.) + """ + x, y = _auc_update(x, y) + return _auc_compute(x, y, reorder=reorder) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py new file mode 100644 index 000000000..a393f20e5 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py @@ -0,0 +1,257 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Optional, Sequence, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.auc import _auc_compute_without_check +from paddlemetrics.functional.classification.roc import roc +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod, DataType +from paddlemetrics.utilities.imports import _TORCH_LOWER_1_6 + + +def _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]: + """Updates and returns variables required to compute Area Under the Receiver Operating Characteristic Curve. + Validates the inputs and returns the mode of the inputs. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + # use _input_format_classification for validating the input and get the mode of data + _, _, mode = _input_format_classification(preds, target) + + if mode == "multi class multi dim": + n_classes = preds.shape[1] + preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) + target = target.flatten() + if mode == "multi-label" and preds.ndim > 2: + n_classes = preds.shape[1] + preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) + target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) + + return preds, target, mode + + +def _auroc_compute( + preds: Tensor, + target: Tensor, + mode: DataType, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + max_fpr: Optional[float] = None, + sample_weights: Optional[Sequence] = None, +) -> Tensor: + """Computes Area Under the Receiver Operating Characteristic Curve. + + Args: + preds: predictions from model (logits or probabilities) + target: Ground truth labels + mode: 'multi class multi dim' or 'multi-label' or 'binary' + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. + Should be set to ``None`` for binary problems + average: Defines the reduction that is applied to the output: + max_fpr: If not ``None``, calculates standardized partial AUC over the + range [0, max_fpr]. Should be a float between 0 and 1. + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = B.tensor([0, 0, 1, 1, 1]) + >>> preds, target, mode = _auroc_update(preds, target) + >>> _auroc_compute(preds, target, mode, pos_label=1) + tensor(0.5000) + + >>> # multiclass case + >>> preds = B.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = B.tensor([0, 1, 1, 2, 2]) + >>> preds, target, mode = _auroc_update(preds, target) + >>> _auroc_compute(preds, target, mode, num_classes=3) + tensor(0.7778) + """ + + # binary mode override num_classes + if mode == DataType.BINARY: + num_classes = 1 + + # check max_fpr parameter + if max_fpr is not None: + if not isinstance(max_fpr, float) and 0 < max_fpr <= 1: + raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}") + + if _TORCH_LOWER_1_6: + raise RuntimeError( + "`max_fpr` argument requires `B.bucketize` which" " is not available below PyTorch version 1.6" + ) + + # max_fpr parameter is only support for binary + if mode != DataType.BINARY: + raise ValueError( + f"Partial AUC computation not available in" + f" multilabel/multiclass setting, 'max_fpr' must be" + f" set to `None`, received `{max_fpr}`." + ) + + # calculate fpr, tpr + if mode == DataType.MULTILABEL: + if average == AverageMethod.MICRO: + fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights) + elif num_classes: + # for multilabel we iteratively evaluate roc in a binary fashion + output = [ + roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights) + for i in range(num_classes) + ] + fpr = [o[0] for o in output] + tpr = [o[1] for o in output] + else: + raise ValueError("Detected input to be `multilabel` but you did not provide `num_classes` argument") + else: + if mode != DataType.BINARY: + if num_classes is None: + raise ValueError("Detected input to `multiclass` but you did not provide `num_classes` argument") + if average == AverageMethod.WEIGHTED and len(B.unique(target)) < num_classes: + # If one or more classes has 0 observations, we should exclude them, as its weight will be 0 + target_bool_mat = B.zeros((len(target), num_classes), dtype=bool) + target_bool_mat[B.arange(len(target)), target.long()] = 1 + class_observed = target_bool_mat.sum(axis=0) > 0 + for c in range(num_classes): + if not class_observed[c]: + warnings.warn(f"Class {c} had 0 observations, omitted from AUROC calculation", UserWarning) + preds = preds[:, class_observed] + target = target_bool_mat[:, class_observed] + target = B.where(target)[1] + num_classes = class_observed.sum() + if num_classes == 1: + raise ValueError("Found 1 non-empty class in `multiclass` AUROC calculation") + fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights) + + # calculate standard roc auc score + if max_fpr is None or max_fpr == 1: + if mode == DataType.MULTILABEL and average == AverageMethod.MICRO: + pass + elif num_classes != 1: + # calculate auc scores per class + auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)] + + # calculate average + if average == AverageMethod.NONE: + return tensor(auc_scores) + if average == AverageMethod.MACRO: + return B.mean(B.stack(auc_scores)) + if average == AverageMethod.WEIGHTED: + if mode == DataType.MULTILABEL: + support = B.sum(target, dim=0) + else: + support = B.bincount(target.flatten(), minlength=num_classes) + return B.sum(B.stack(auc_scores) * support / support.sum()) + + allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value) + raise ValueError( + f"Argument `average` expected to be one of the following:" f" {allowed_average} but got {average}" + ) + + return _auc_compute_without_check(fpr, tpr, 1.0) + + _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device + max_area: Tensor = tensor(max_fpr, device=_device) + # Add a single point at max_fpr and interpolate its tpr value + stop = B.bucketize(max_area, fpr, out_int32=True, right=True) + weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1]) + interp_tpr: Tensor = B.lerp(tpr[stop - 1], tpr[stop], weight) + tpr = B.cat([tpr[:stop], interp_tpr.view(1)]) + fpr = B.cat([fpr[:stop], max_area.view(1)]) + + # Compute partial AUC + partial_auc = _auc_compute_without_check(fpr, tpr, 1.0) + + # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal + min_area: Tensor = 0.5 * max_area ** 2 + return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) + + +def auroc( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + max_fpr: Optional[float] = None, + sample_weights: Optional[Sequence] = None, +) -> Tensor: + """Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) + + Args: + preds: predictions from model (logits or probabilities) + target: Ground truth labels + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + - ``'micro'`` computes metric globally. Only works for multilabel problems + - ``'macro'`` computes metric for each class and uniformly averages them + - ``'weighted'`` computes metric for each class and does a weighted-average, + where each class is weighted by their support (accounts for class imbalance) + - ``None`` computes and returns the metric per class + max_fpr: + If not ``None``, calculates standardized partial AUC over the + range [0, max_fpr]. Should be a float between 0 and 1. + sample_weights: sample weights for each data point + + Raises: + ValueError: + If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``. + RuntimeError: + If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `B.bucketize` + which is not available below 1.6. + ValueError: + If ``max_fpr`` is not set to ``None`` and the mode is ``not binary`` + since partial AUC computation is not available in multilabel/multiclass. + ValueError: + If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``. + + Example (binary case): + >>> from paddlemetrics.functional import auroc + >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = B.tensor([0, 0, 1, 1, 1]) + >>> auroc(preds, target, pos_label=1) + tensor(0.5000) + + Example (multiclass case): + >>> preds = B.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = B.tensor([0, 1, 1, 2, 2]) + >>> auroc(preds, target, num_classes=3) + tensor(0.7778) + """ + preds, target, mode = _auroc_update(preds, target) + return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py new file mode 100644 index 000000000..bc6118168 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py @@ -0,0 +1,236 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import List, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.precision_recall_curve import ( + _precision_recall_curve_compute, + _precision_recall_curve_update, +) + + +def _average_precision_update( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", +) -> Tuple[Tensor, Tensor, int, Optional[int]]: + """Format the predictions and target based on the ``num_classes``, ``pos_label`` and ``average`` parameter + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: reduction method for multi-class or multi-label problems + """ + preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label) + if average == "micro": + if preds.ndim == target.ndim: + # Considering each element of the label indicator matrix as a label + preds = preds.flatten() + target = target.flatten() + num_classes = 1 + else: + raise ValueError("Cannot use `micro` average with multi-class input") + + return preds, target, num_classes, pos_label + + +def _average_precision_compute( + preds: Tensor, + target: Tensor, + num_classes: int, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + sample_weights: Optional[Sequence] = None, +) -> Union[List[Tensor], Tensor]: + """Computes the average precision score. + + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: reduction method for multi-class or multi-label problems + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label) + >>> _average_precision_compute(preds, target, num_classes, pos_label) + tensor(1.) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 5 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes) + >>> _average_precision_compute(preds, target, num_classes, average=None) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + + # todo: `sample_weights` is unused + precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label) + if average == "weighted": + if preds.ndim == target.ndim and target.ndim > 1: + weights = target.sum(dim=0).float() + else: + weights = B.bincount(target, minlength=num_classes).float() + weights = weights / B.sum(weights) + else: + weights = None + return _average_precision_compute_with_precision_recall(precision, recall, num_classes, average, weights) + + +def _average_precision_compute_with_precision_recall( + precision: Tensor, + recall: Tensor, + num_classes: int, + average: Optional[str] = "macro", + weights: Optional[Tensor] = None, +) -> Union[List[Tensor], Tensor]: + """Computes the average precision score from precision and recall. + + Args: + precision: precision values + recall: recall values + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + average: reduction method for multi-class or multi-label problems + weights: weights to use when average='weighted' + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label) + >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label) + >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None) + tensor(1.) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 5 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes) + >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes) + >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + + # Return the step function integral + # The following works because the last entry of precision is + # guaranteed to be 1, as returned by precision_recall_curve + if num_classes == 1: + return -B.sum((recall[1:] - recall[:-1]) * precision[:-1]) + + res = [] + for p, r in zip(precision, recall): + res.append(-B.sum((r[1:] - r[:-1]) * p[:-1])) + + # Reduce + if average in ("macro", "weighted"): + res = B.stack(res) + if B.isnan(res).any(): + warnings.warn( + "Average precision score for one or more classes was `nan`. Ignoring these classes in average", + UserWarning, + ) + if average == "macro": + return res[~B.isnan(res)].mean() + weights = B.ones_like(res) if weights is None else weights + return (res * weights)[~B.isnan(res)].sum() + if average is None: + return res + allowed_average = ("micro", "macro", "weighted", None) + raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}") + + +def average_precision( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + sample_weights: Optional[Sequence] = None, +) -> Union[List[Tensor], Tensor]: + """Computes the average precision score. + + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + defines the reduction that is applied in the case of multiclass and multilabel input. + Should be one of the following: + + - ``'macro'`` [default]: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be + used with multiclass input. + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support. + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + + sample_weights: sample weights for each data point + + Returns: + tensor with average precision. If multiclass will return list + of such tensors, one for each class + + Example (binary case): + >>> from paddlemetrics.functional import average_precision + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> average_precision(pred, target, pos_label=1) + tensor(1.) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision(pred, target, num_classes=5, average=None) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + # fixme: `sample_weights` is unused + preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes, pos_label, average) + return _average_precision_compute(preds, target, num_classes, pos_label, average, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py new file mode 100644 index 000000000..132036417 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py @@ -0,0 +1,156 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import FloatTensor, Tensor + +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import DataType + + +def _ce_compute( + confidences: FloatTensor, + accuracies: FloatTensor, + bin_boundaries: FloatTensor, + norm: str = "l1", + debias: bool = False, +) -> Tensor: + """Computes the calibration error given the provided bin boundaries and norm. + + Args: + confidences (FloatTensor): The confidence (i.e. predicted prob) of the top1 prediction. + accuracies (FloatTensor): 1.0 if the top-1 prediction was correct, 0.0 otherwise. + bin_boundaries (FloatTensor): Bin boundaries separating the linspace from 0 to 1. + norm (str, optional): Norm function to use when computing calibration error. Defaults to "l1". + debias (bool, optional): Apply debiasing to L2 norm computation as in + `Verified Uncertainty Calibration`_. Defaults to False. + + Raises: + ValueError: If an unsupported norm function is provided. + + Returns: + Tensor: Calibration error scalar. + """ + if norm not in {"l1", "l2", "max"}: + raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") + + conf_bin = B.zeros_like(bin_boundaries) + acc_bin = B.zeros_like(bin_boundaries) + prop_bin = B.zeros_like(bin_boundaries) + for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])): + # Calculated confidence and accuracy in each bin + in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) + prop_in_bin = in_bin.float().mean() + if prop_in_bin.item() > 0: + acc_bin[i] = accuracies[in_bin].float().mean() + conf_bin[i] = confidences[in_bin].mean() + prop_bin[i] = prop_in_bin + + if norm == "l1": + ce = B.sum(B.abs(acc_bin - conf_bin) * prop_bin) + elif norm == "max": + ce = B.max(B.abs(acc_bin - conf_bin)) + elif norm == "l2": + ce = B.sum(B.pow(acc_bin - conf_bin, 2) * prop_bin) + # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn. + if debias: + # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from + # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/ + debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1) + ce += B.sum(B.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin + ce = B.sqrt(ce) if ce > 0 else B.tensor(0) + return ce + + +def _ce_update(preds: Tensor, target: Tensor) -> Tuple[FloatTensor, FloatTensor]: + """Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their + correctness. + + Args: + preds (Tensor): Input softmaxed predictions. + target (Tensor): Labels. + + Raises: + ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass. + + Returns: + Tuple[FloatTensor, FloatTensor]: [description] + """ + _, _, mode = _input_format_classification(preds, target) + + if mode == DataType.BINARY: + confidences, accuracies = preds, target + elif mode == DataType.MULTICLASS: + confidences, predictions = preds.max(dim=1) + accuracies = predictions.eq(target) + elif mode == DataType.MULTIDIM_MULTICLASS: + # reshape tensors + # for preds, move the class dimension to the final axis and flatten the rest + confidences, predictions = B.transpose(preds, 1, -1).flatten(0, -2).max(dim=1) + # for targets, just flatten the target + accuracies = predictions.eq(target.flatten()) + else: + raise ValueError( + f"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}." + ) + # must be cast to float for ddp allgather to work + return confidences.float(), accuracies.float() + + +def calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = "l1") -> Tensor: + r""" + `Computes the Top-label Calibration Error`_ + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + L1 norm (Expected Calibration Error) + + .. math:: + \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\| + + Infinity norm (Maximum Calibration Error) + + .. math:: + \text{RMSCE} = \max_{i} (p_i - c_i) + + L2 norm (Root Mean Square Calibration Error) + + .. math:: + \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2 + + Where :math:`p_i` is the top-1 prediction accuracy in + bin i and :math:`c_i` is the average confidence of predictions in bin i. + + .. note: + L2-norm debiasing is not yet supported. + + Args: + preds (Tensor): Model output probabilities. + target (Tensor): Ground-truth target class labels. + n_bins (int, optional): Number of bins to use when computing t. Defaults to 15. + norm (str, optional): Norm used to compare empirical and expected probability bins. + Defaults to "l1", or Expected Calibration Error. + """ + if norm not in ("l1", "l2", "max"): + raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") + + if not isinstance(n_bins, int) or n_bins <= 0: + raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}") + + confidences, accuracies = _ce_update(preds, target) + + bin_boundaries = B.linspace(0, 1, n_bins + 1, dtype=B.float, device=preds.device) + + return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py new file mode 100644 index 000000000..2face7a5c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py @@ -0,0 +1,112 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_compute, _confusion_matrix_update + +_cohen_kappa_update = _confusion_matrix_update + + +def _cohen_kappa_compute(confmat: Tensor, weights: Optional[str] = None) -> Tensor: + """Computes Cohen's kappa based on the weighting type. + + Args: + confmat: Confusion matrix without normalization + weights: Weighting type to calculate the score. Choose from + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + Example: + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = _cohen_kappa_update(preds, target, num_classes=2) + >>> _cohen_kappa_compute(confmat) + tensor(0.5000) + """ + + confmat = _confusion_matrix_compute(confmat) + confmat = confmat.float() if not confmat.is_floating_point() else confmat + n_classes = confmat.shape[0] + sum0 = confmat.sum(dim=0, keepdim=True) + sum1 = confmat.sum(dim=1, keepdim=True) + expected = sum1 @ sum0 / sum0.sum() # outer product + + if weights is None: + w_mat = B.ones_like(confmat).flatten() + w_mat[:: n_classes + 1] = 0 + w_mat = w_mat.reshape(n_classes, n_classes) + elif weights in ("linear", "quadratic"): + w_mat = B.zeros_like(confmat) + w_mat += B.arange(n_classes, dtype=w_mat.dtype, device=w_mat.device) + if weights == "linear": + w_mat = B.abs(w_mat - w_mat.T) + else: + w_mat = B.pow(w_mat - w_mat.T, 2.0) + else: + raise ValueError( + f"Received {weights} for argument ``weights`` but should be either" " None, 'linear' or 'quadratic'" + ) + + k = B.sum(w_mat * confmat) / B.sum(w_mat * expected) + return 1 - k + + +def cohen_kappa( + preds: Tensor, + target: Tensor, + num_classes: int, + weights: Optional[str] = None, + threshold: float = 0.5, +) -> Tensor: + r""" + Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. + It is defined as + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` isg + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + Args: + preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or + ``(N, C, ...)`` where C is the number of classes, tensor with labels/probabilities + + target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels + + num_classes: Number of classes in the dataset. + + weights: Weighting type to calculate the score. Choose from + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + threshold: + Threshold value for binary or multi-label probabilities. default: 0.5 + + Example: + >>> from paddlemetrics.functional import cohen_kappa + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> cohen_kappa(preds, target, num_classes=2) + tensor(0.5000) + """ + confmat = _cohen_kappa_update(preds, target, num_classes, threshold) + return _cohen_kappa_compute(confmat, weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py new file mode 100644 index 000000000..b4f3c12de --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py @@ -0,0 +1,184 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import DataType + + +def _confusion_matrix_update( + preds: Tensor, target: Tensor, num_classes: int, threshold: float = 0.5, multilabel: bool = False +) -> Tensor: + """Updates and returns confusion matrix (without any normalization) based on the mode of the input. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the + case of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + multilabel: determines if data is multilabel or not. + """ + + preds, target, mode = _input_format_classification(preds, target, threshold) + if mode not in (DataType.BINARY, DataType.MULTILABEL): + preds = preds.argmax(dim=1) + target = target.argmax(dim=1) + if multilabel: + unique_mapping = ((2 * target + preds) + 4 * B.arange(num_classes, device=preds.device)).flatten() + minlength = 4 * num_classes + else: + unique_mapping = (target.view(-1) * num_classes + preds.view(-1)).to(B.long) + minlength = num_classes ** 2 + + bins = B.bincount(unique_mapping, minlength=minlength) + if multilabel: + confmat = bins.reshape(num_classes, 2, 2) + else: + confmat = bins.reshape(num_classes, num_classes) + return confmat + + +def _confusion_matrix_compute(confmat: Tensor, normalize: Optional[str] = None) -> Tensor: + """Computes confusion matrix based on the normalization mode. + + Args: + confmat: Confusion matrix without normalization + normalize: Normalization mode for confusion matrix. Choose from + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + + Example: + >>> # binary case + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = _confusion_matrix_update(preds, target, num_classes=2) + >>> _confusion_matrix_compute(confmat) + tensor([[2, 0], + [1, 1]]) + + >>> # multiclass case + >>> target = B.tensor([2, 1, 0, 0]) + >>> preds = B.tensor([2, 1, 0, 1]) + >>> confmat = _confusion_matrix_update(preds, target, num_classes=3) + >>> _confusion_matrix_compute(confmat) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + + >>> # multilabel case + >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = _confusion_matrix_update(preds, target, num_classes=3, multilabel=True) + >>> _confusion_matrix_compute(confmat) # doctest: +NORMALIZE_WHITESPACE + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + """ + + allowed_normalize = ("true", "pred", "all", "none", None) + if normalize not in allowed_normalize: + raise ValueError(f"Argument average needs to one of the following: {allowed_normalize}") + if normalize is not None and normalize != "none": + confmat = confmat.float() if not confmat.is_floating_point() else confmat + if normalize == "true": + confmat = confmat / confmat.sum(axis=1, keepdim=True) + elif normalize == "pred": + confmat = confmat / confmat.sum(axis=0, keepdim=True) + elif normalize == "all": + confmat = confmat / confmat.sum() + + nan_elements = confmat[B.isnan(confmat)].nelement() + if nan_elements != 0: + confmat[B.isnan(confmat)] = 0 + rank_zero_warn(f"{nan_elements} nan values found in confusion matrix have been replaced with zeros.") + return confmat + + +def confusion_matrix( + preds: Tensor, + target: Tensor, + num_classes: int, + normalize: Optional[str] = None, + threshold: float = 0.5, + multilabel: bool = False, +) -> Tensor: + r""" + Computes the `confusion matrix`_. Works with binary, + multiclass, and multilabel data. Accepts probabilities or logits from a model output or integer class + values in prediction. Works with multi-dimensional preds and target, but it should be noted that + additional dimensions will be flattened. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + If working with multilabel data, setting the `is_multilabel` argument to `True` will make sure that a + `confusion matrix gets calculated per label`_. + + Args: + preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or + ``(N, C, ...)`` where C is the number of classes, tensor with labels/logits/probabilities + target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels + num_classes: Number of classes in the dataset. + normalize: Normalization mode for confusion matrix. Choose from + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + multilabel: + determines if data is multilabel or not. + + Example (binary data): + >>> from paddlemetrics import ConfusionMatrix + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = ConfusionMatrix(num_classes=2) + >>> confmat(preds, target) + tensor([[2., 0.], + [1., 1.]]) + + Example (multiclass data): + >>> target = B.tensor([2, 1, 0, 0]) + >>> preds = B.tensor([2, 1, 0, 1]) + >>> confmat = ConfusionMatrix(num_classes=3) + >>> confmat(preds, target) + tensor([[1., 1., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example (multilabel data): + >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = ConfusionMatrix(num_classes=3, multilabel=True) + >>> confmat(preds, target) # doctest: +NORMALIZE_WHITESPACE + tensor([[[1., 0.], [0., 1.]], + [[1., 0.], [1., 0.]], + [[0., 1.], [0., 1.]]]) + + """ + confmat = _confusion_matrix_update(preds, target, num_classes, threshold, multilabel) + return _confusion_matrix_compute(confmat, normalize) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py new file mode 100644 index 000000000..5f90fe02b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py @@ -0,0 +1,112 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.data import to_categorical +from paddlemetrics.utilities.distributed import reduce + + +def _stat_scores( + preds: Tensor, + target: Tensor, + class_index: int, + argmax_dim: int = 1, +) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + """Calculates the number of true positive, false positive, true negative and false negative for a specific + class. + + Args: + preds: prediction tensor + target: target tensor + class_index: class to calculate over + argmax_dim: if pred is a tensor of probabilities, this indicates the + axis the argmax transformation will be applied over + + Return: + True Positive, False Positive, True Negative, False Negative, Support + + Example: + >>> x = B.tensor([1, 2, 3]) + >>> y = B.tensor([0, 2, 3]) + >>> tp, fp, tn, fn, sup = _stat_scores(x, y, class_index=1) + >>> tp, fp, tn, fn, sup + (tensor(0), tensor(1), tensor(2), tensor(0), tensor(0)) + """ + if preds.ndim == target.ndim + 1: + preds = to_categorical(preds, argmax_dim=argmax_dim) + + tp = ((preds == class_index) * (target == class_index)).to(B.long).sum() + fp = ((preds == class_index) * (target != class_index)).to(B.long).sum() + tn = ((preds != class_index) * (target != class_index)).to(B.long).sum() + fn = ((preds != class_index) * (target == class_index)).to(B.long).sum() + sup = (target == class_index).to(B.long).sum() + + return tp, fp, tn, fn, sup + + +def dice_score( + preds: Tensor, + target: Tensor, + bg: bool = False, + nan_score: float = 0.0, + no_fg_score: float = 0.0, + reduction: str = "elementwise_mean", +) -> Tensor: + """Compute dice score from prediction scores. + + Args: + preds: estimated probabilities + target: ground-truth labels + bg: whether to also compute dice for the background + nan_score: score to return, if a NaN occurs during computation + no_fg_score: score to return, if no foreground pixel was found in target + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + Return: + Tensor containing dice score + + Example: + >>> from paddlemetrics.functional import dice_score + >>> pred = B.tensor([[0.85, 0.05, 0.05, 0.05], + ... [0.05, 0.85, 0.05, 0.05], + ... [0.05, 0.05, 0.85, 0.05], + ... [0.05, 0.05, 0.05, 0.85]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> dice_score(pred, target) + tensor(0.3333) + """ + num_classes = preds.shape[1] + bg_inv = 1 - int(bg) + scores = B.zeros(num_classes - bg_inv, device=preds.device, dtype=B.float32) + for i in range(bg_inv, num_classes): + if not (target == i).any(): + # no foreground class + scores[i - bg_inv] += no_fg_score + continue + + # TODO: rewrite to use general `stat_scores` + tp, fp, _, fn, _ = _stat_scores(preds=preds, target=target, class_index=i) + denom = (2 * tp + fp + fn).to(B.float) + # nan result + score_cls = (2 * tp).to(B.float) / denom if B.is_nonzero(denom) else nan_score + scores[i - bg_inv] += score_cls.item() + + return reduce(scores, reduction=reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py new file mode 100644 index 000000000..7b9b626ce --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py @@ -0,0 +1,351 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.enums import AverageMethod as AvgMethod +from paddlemetrics.utilities.enums import MDMCAverageMethod + + +def _safe_divide(num: Tensor, denom: Tensor) -> Tensor: + """prevent zero division.""" + denom[denom == 0.0] = 1 + return num / denom + + +def _fbeta_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + beta: float, + ignore_index: Optional[int], + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes f_beta metric from stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + beta: The parameter `beta` (which determines the weight of recall in the combined score) + ignore_index: Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> tp, fp, tn, fn = _stat_scores_update( + ... preds, + ... target, + ... reduce='micro', + ... num_classes=3, + ... ) + >>> _fbeta_compute(tp, fp, tn, fn, beta=0.5, ignore_index=None, average='micro', mdmc_average=None) + tensor(0.3333) + """ + if average == AvgMethod.MICRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + mask = tp >= 0 + precision = _safe_divide(tp[mask].sum().float(), (tp[mask] + fp[mask]).sum()) + recall = _safe_divide(tp[mask].sum().float(), (tp[mask] + fn[mask]).sum()) + else: + precision = _safe_divide(tp.float(), tp + fp) + recall = _safe_divide(tp.float(), tp + fn) + + num = (1 + beta ** 2) * precision * recall + denom = beta ** 2 * precision + recall + denom[denom == 0.0] = 1.0 # avoid division by 0 + + # if classes matter and a given class is not present in both the preds and the target, + # computing the score for this class is meaningless, thus they should be ignored + if average == AvgMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + if ignore_index is None: + ignore_index = meaningless_indeces + else: + ignore_index = B.unique(B.cat((meaningless_indeces, B.tensor([[ignore_index]])))) + + if ignore_index is not None: + if average not in (AvgMethod.MICRO, AvgMethod.SAMPLES) and mdmc_average == MDMCAverageMethod.SAMPLEWISE: + num[..., ignore_index] = -1 + denom[..., ignore_index] = -1 + elif average not in (AvgMethod.MICRO, AvgMethod.SAMPLES): + num[ignore_index, ...] = -1 + denom[ignore_index, ...] = -1 + + if average == AvgMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = (tp + fp + fn == 0) | (tp + fp + fn == -3) + num = num[~cond] + denom = denom[~cond] + + return _reduce_stat_scores( + numerator=num, + denominator=denom, + weights=None if average != AvgMethod.WEIGHTED else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def fbeta( + preds: Tensor, + target: Tensor, + beta: float = 1.0, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes f_beta metric. + + .. math:: + F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + Works with binary, multiclass, and multilabel data. + Accepts probabilities or logits from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label logits or probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Example: + >>> from paddlemetrics.functional import fbeta + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> fbeta(preds, target, num_classes=3, beta=0.5) + tensor(0.3333) + + """ + allowed_average = list(AvgMethod) + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + if mdmc_average is not None and MDMCAverageMethod.from_str(mdmc_average) is None: + raise ValueError(f"The `mdmc_average` has to be one of {list(MDMCAverageMethod)}, got {mdmc_average}.") + + if average in [AvgMethod.MACRO, AvgMethod.WEIGHTED, AvgMethod.NONE] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = AvgMethod.MACRO if average in [AvgMethod.WEIGHTED, AvgMethod.NONE] else average + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _fbeta_compute(tp, fp, tn, fn, beta, ignore_index, average, mdmc_average) + + +def f1( + preds: Tensor, + target: Tensor, + beta: float = 1.0, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + """Computes F1 metric. F1 metrics correspond to a equally weighted average of the precision and recall scores. + + Works with binary, multiclass, and multilabel data. + Accepts probabilities or logits from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Example: + >>> from paddlemetrics.functional import f1 + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> f1(preds, target, num_classes=3) + tensor(0.3333) + """ + return fbeta(preds, target, 1.0, average, mdmc_average, ignore_index, num_classes, threshold, top_k, multiclass) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py new file mode 100644 index 000000000..e3f95bad4 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py @@ -0,0 +1,97 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _input_format_classification + + +def _hamming_distance_update( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, +) -> Tuple[Tensor, int]: + """Returns the number of positions where prediction equals target, and number of predictions. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + """ + + preds, target, _ = _input_format_classification(preds, target, threshold=threshold) + + correct = (preds == target).sum() + total = preds.numel() + + return correct, total + + +def _hamming_distance_compute(correct: Tensor, total: Union[int, Tensor]) -> Tensor: + """Computes the Hamming distance. + + Args: + correct: Number of positions where prediction equals target + total: Total number of predictions + + Example: + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> correct, total = _hamming_distance_update(preds, target) + >>> _hamming_distance_compute(correct, total) + tensor(0.2500) + """ + + return 1 - correct.float() / total + + +def hamming_distance(preds: Tensor, target: Tensor, threshold: float = 0.5) -> Tensor: + r""" + Computes the average `Hamming distance`_ (also + known as Hamming loss) between targets and predictions: + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it + treats each possible label separately - meaning that, for example, multi-class data is + treated as if it were multi-label. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + Example: + >>> from paddlemetrics.functional import hamming_distance + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> hamming_distance(preds, target) + tensor(0.2500) + + """ + + correct, total = _hamming_distance_update(preds, target, threshold) + return _hamming_distance_compute(correct, total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py new file mode 100644 index 000000000..59d8be1af --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py @@ -0,0 +1,231 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _input_squeeze +from paddlemetrics.utilities.data import to_onehot +from paddlemetrics.utilities.enums import DataType, EnumStr + + +class MulticlassMode(EnumStr): + """Enum to represent possible multiclass modes of hinge. + + >>> "Crammer-Singer" in list(MulticlassMode) + True + """ + + CRAMMER_SINGER = "crammer-singer" + ONE_VS_ALL = "one-vs-all" + + +def _check_shape_and_type_consistency_hinge( + preds: Tensor, + target: Tensor, +) -> DataType: + """Checks shape and type of `preds` and `target` and returns mode of the input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + + Raises: + `ValueError`: if `target` is not one dimensional + `ValueError`: if `preds` and `target` do not have the same shape in the first dimension + `ValueError`: if `pred` is neither one nor two dimensional + """ + + if target.ndim > 1: + raise ValueError( + f"The `target` should be one dimensional, got `target` with shape={target.shape}.", + ) + + if preds.ndim == 1: + if preds.shape != target.shape: + raise ValueError( + "The `preds` and `target` should have the same shape,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + mode = DataType.BINARY + elif preds.ndim == 2: + if preds.shape[0] != target.shape[0]: + raise ValueError( + "The `preds` and `target` should have the same shape in the first dimension,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + mode = DataType.MULTICLASS + else: + raise ValueError(f"The `preds` should be one or two dimensional, got `preds` with shape={preds.shape}.") + return mode + + +def _hinge_update( + preds: Tensor, + target: Tensor, + squared: bool = False, + multiclass_mode: Optional[Union[str, MulticlassMode]] = None, +) -> Tuple[Tensor, Tensor]: + """Updates and returns sum over Hinge loss scores for each observation and the total number of observations. + + Args: + preds: Predicted tensor + target: Ground truth tensor + squared: If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss. + multiclass_mode: + Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), + ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. + """ + preds, target = _input_squeeze(preds, target) + + mode = _check_shape_and_type_consistency_hinge(preds, target) + + if mode == DataType.MULTICLASS: + target = to_onehot(target, max(2, preds.shape[1])).bool() + + if mode == DataType.MULTICLASS and (multiclass_mode is None or multiclass_mode == MulticlassMode.CRAMMER_SINGER): + margin = preds[target] + margin -= B.max(preds[~target].view(preds.shape[0], -1), dim=1)[0] + elif mode == DataType.BINARY or multiclass_mode == MulticlassMode.ONE_VS_ALL: + target = target.bool() + margin = B.zeros_like(preds) + margin[target] = preds[target] + margin[~target] = -preds[~target] + else: + raise ValueError( + "The `multiclass_mode` should be either None / 'crammer-singer' / MulticlassMode.CRAMMER_SINGER" + "(default) or 'one-vs-all' / MulticlassMode.ONE_VS_ALL," + f" got {multiclass_mode}." + ) + + measures = 1 - margin + measures = B.clamp(measures, 0) + + if squared: + measures = measures.pow(2) + + total = tensor(target.shape[0], device=target.device) + return measures.sum(dim=0), total + + +def _hinge_compute(measure: Tensor, total: Tensor) -> Tensor: + """Computes mean Hinge loss. + + Args: + measure: Sum over hinge losses for each each observation + total: Number of observations + + Example: + >>> # binary case + >>> target = B.tensor([0, 1, 1]) + >>> preds = B.tensor([-2.2, 2.4, 0.1]) + >>> measure, total = _hinge_update(preds, target) + >>> _hinge_compute(measure, total) + tensor(0.3000) + + >>> # multiclass case + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> measure, total = _hinge_update(preds, target) + >>> _hinge_compute(measure, total) + tensor(2.9000) + + >>> # multiclass one-vs-all mode case + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> measure, total = _hinge_update(preds, target, multiclass_mode="one-vs-all") + >>> _hinge_compute(measure, total) + tensor([2.2333, 1.5000, 1.2333]) + """ + + return measure / total + + +def hinge( + preds: Tensor, + target: Tensor, + squared: bool = False, + multiclass_mode: Optional[Union[str, MulticlassMode]] = None, +) -> Tensor: + r""" + Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs). + + In the binary case it is defined as: + + .. math:: + \text{Hinge loss} = \max(0, 1 - y \times \hat{y}) + + Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction. + + In the multi-class case, when ``multiclass_mode=None`` (default), ``multiclass_mode=MulticlassMode.CRAMMER_SINGER`` + or ``multiclass_mode="crammer-singer"``, this metric will compute the multi-class hinge loss defined by Crammer and + Singer as: + + .. math:: + \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right) + + Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes), + and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. + + In the multi-class case when ``multiclass_mode=MulticlassMode.ONE_VS_ALL`` or ``multiclass_mode='one-vs-all'``, this + metric will use a one-vs-all approach to compute the hinge loss, giving a vector of C outputs where each entry pits + that class against all remaining classes. + + This metric can optionally output the mean of the squared hinge loss by setting ``squared=True`` + + Only accepts inputs with preds shape of (N) (binary) or (N, C) (multi-class) and target shape of (N). + + Args: + preds: Predictions from model (as float outputs from decision function). + target: Ground truth labels. + squared: + If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss (default). + multiclass_mode: + Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), + ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. + + Raises: + ValueError: + If preds shape is not of size (N) or (N, C). + ValueError: + If target shape is not of size (N). + ValueError: + If ``multiclass_mode`` is not: None, ``MulticlassMode.CRAMMER_SINGER``, ``"crammer-singer"``, + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"``. + + Example (binary case): + >>> import torchapi as B + >>> from paddlemetrics.functional import hinge + >>> target = B.tensor([0, 1, 1]) + >>> preds = B.tensor([-2.2, 2.4, 0.1]) + >>> hinge(preds, target) + tensor(0.3000) + + Example (default / multiclass case): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge(preds, target) + tensor(2.9000) + + Example (multiclass example, one vs all mode): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge(preds, target, multiclass_mode="one-vs-all") + tensor([2.2333, 1.5000, 1.2333]) + """ + measure, total = _hinge_update(preds, target, squared=squared, multiclass_mode=multiclass_mode) + return _hinge_compute(measure, total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py new file mode 100644 index 000000000..b7cf60774 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py @@ -0,0 +1,133 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_update +from paddlemetrics.utilities.data import get_num_classes +from paddlemetrics.utilities.distributed import reduce + + +def _iou_from_confmat( + confmat: Tensor, + num_classes: int, + ignore_index: Optional[int] = None, + absent_score: float = 0.0, + reduction: str = "elementwise_mean", +) -> Tensor: + """Computes the intersection over union from confusion matrix. + + Args: + confmat: Confusion matrix without normalization + num_classes: Number of classes for a given prediction and target tensor + ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. + absent_score: score to use for an individual class, if no instances of the class index were present in `pred` + AND no instances of the class index were present in `target`. + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + """ + + # Remove the ignored class index from the scores. + if ignore_index is not None and 0 <= ignore_index < num_classes: + confmat[ignore_index] = 0.0 + + intersection = B.diag(confmat) + union = confmat.sum(0) + confmat.sum(1) - intersection + + # If this class is absent in both target AND pred (union == 0), then use the absent_score for this class. + scores = intersection.float() / union.float() + scores[union == 0] = absent_score + + if ignore_index is not None and 0 <= ignore_index < num_classes: + scores = B.cat( + [ + scores[:ignore_index], + scores[ignore_index + 1 :], + ] + ) + + return reduce(scores, reduction=reduction) + + +def iou( + preds: Tensor, + target: Tensor, + ignore_index: Optional[int] = None, + absent_score: float = 0.0, + threshold: float = 0.5, + num_classes: Optional[int] = None, + reduction: str = "elementwise_mean", +) -> Tensor: + r""" + Computes `Jaccard index`_ + + .. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|} + + Where: :math:`A` and :math:`B` are both tensors of the same size, + containing integer class values. They may be subject to conversion from + input data (see description below). + + Note that it is different from box IoU. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities. + + If pred has an extra dimension as in the case of multi-class scores we + perform an argmax on ``dim=1``. + + Args: + preds: tensor containing predictions from model (probabilities, or labels) with shape ``[N, d1, d2, ...]`` + target: tensor containing ground truth labels with shape ``[N, d1, d2, ...]`` + ignore_index: optional int specifying a target class to ignore. If given, + this class index does not contribute to the returned score, regardless + of reduction method. Has no effect if given an int that is not in the + range [0, num_classes-1], where num_classes is either given or derived + from pred and target. By default, no index is ignored, and all classes are used. + absent_score: score to use for an individual class, if no instances of + the class index were present in `pred` AND no instances of the class + index were present in `target`. For example, if we have 3 classes, + [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be + assigned the `absent_score`. + threshold: + Threshold value for binary or multi-label probabilities. default: 0.5 + num_classes: + Optionally specify the number of classes + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + Return: + IoU score: Tensor containing single value if reduction is + 'elementwise_mean', or number of classes if reduction is 'none' + + Example: + >>> from paddlemetrics.functional import iou + >>> target = B.randint(0, 2, (10, 25, 25)) + >>> pred = B.tensor(target) + >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] + >>> iou(pred, target) + tensor(0.9660) + """ + + num_classes = get_num_classes(preds=preds, target=target, num_classes=num_classes) + confmat = _confusion_matrix_update(preds, target, num_classes, threshold) + return _iou_from_confmat(confmat, num_classes, ignore_index, absent_score, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py new file mode 100644 index 000000000..0d7685c1e --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape +from paddlemetrics.utilities.data import METRIC_EPS + + +def _kld_update(p: Tensor, q: Tensor, log_prob: bool) -> Tuple[Tensor, int]: + """Updates and returns KL divergence scores for each observation and the total number of observations. Checks + same shape and 2D nature of the input tensors else raises ValueError. + + Args: + p: data distribution with shape ``[N, d]`` + q: prior or approximate distribution with shape ``[N, d]`` + log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, + will normalize to make sure the distributes sum to 1 + """ + _check_same_shape(p, q) + if p.ndim != 2 or q.ndim != 2: + raise ValueError(f"Expected both p and q distribution to be 2D but got {p.ndim} and {q.ndim} respectively") + + total = p.shape[0] + if log_prob: + measures = B.sum(p.exp() * (p - q), axis=-1) + else: + p = p / p.sum(axis=-1, keepdim=True) + q = q / q.sum(axis=-1, keepdim=True) + q = B.clamp(q, METRIC_EPS) + measures = B.sum(p * B.log(p / q), axis=-1) + + return measures, total + + +def _kld_compute(measures: Tensor, total: Tensor, reduction: Optional[str] = "mean") -> Tensor: + """Computes the KL divergenece based on the type of reduction. + + Args: + measures: Tensor of KL divergence scores for each observation + total: Number of observations + reduction: + Determines how to reduce over the ``N``/batch dimension: + + - ``'mean'`` [default]: Averages score across samples + - ``'sum'``: Sum score across samples + - ``'none'`` or ``None``: Returns score per sample + + Example: + >>> p = B.tensor([[0.36, 0.48, 0.16]]) + >>> q = B.tensor([[1/3, 1/3, 1/3]]) + >>> measures, total = _kld_update(p, q, log_prob=False) + >>> _kld_compute(measures, total) + tensor(0.0853) + """ + + if reduction == "sum": + return measures.sum() + if reduction == "mean": + return measures.sum() / total + if reduction is None or reduction == "none": + return measures + return measures / total + + +def kl_divergence(p: Tensor, q: Tensor, log_prob: bool = False, reduction: Optional[str] = "mean") -> Tensor: + r"""Computes `KL divergence`_ + + .. math:: + D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}} + + Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution + over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence + is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. + + Args: + p: data distribution with shape ``[N, d]`` + q: prior or approximate distribution with shape ``[N, d]`` + log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, + will normalize to make sure the distributes sum to 1 + reduction: + Determines how to reduce over the ``N``/batch dimension: + + - ``'mean'`` [default]: Averages score across samples + - ``'sum'``: Sum score across samples + - ``'none'`` or ``None``: Returns score per sample + + Example: + >>> import torchapi as B + >>> p = B.tensor([[0.36, 0.48, 0.16]]) + >>> q = B.tensor([[1/3, 1/3, 1/3]]) + >>> kl_divergence(p, q) + tensor(0.0853) + """ + measures, total = _kld_update(p, q, log_prob) + return _kld_compute(measures, total, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py new file mode 100644 index 000000000..8532a358d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py @@ -0,0 +1,78 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_update + +_matthews_corrcoef_update = _confusion_matrix_update + + +def _matthews_corrcoef_compute(confmat: Tensor) -> Tensor: + """Computes Matthews correlation coefficient. + + Args: + confmat: Confusion matrix + + Example: + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = _matthews_corrcoef_update(preds, target, num_classes=2) + >>> _matthews_corrcoef_compute(confmat) + tensor(0.5774) + """ + + tk = confmat.sum(dim=1).float() + pk = confmat.sum(dim=0).float() + c = B.trace(confmat).float() + s = confmat.sum().float() + return (c * s - sum(tk * pk)) / (B.sqrt(s ** 2 - sum(pk * pk)) * B.sqrt(s ** 2 - sum(tk * tk))) + + +def matthews_corrcoef( + preds: Tensor, + target: Tensor, + num_classes: int, + threshold: float = 0.5, +) -> Tensor: + r""" + Calculates `Matthews correlation coefficient`_ that measures + the general correlation or quality of a classification. In the binary case it + is defined as: + + .. math:: + MCC = \frac{TP*TN - FP*FN}{\sqrt{(TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)}} + + where TP, TN, FP and FN are respectively the true postitives, true negatives, + false positives and false negatives. Also works in the case of multi-label or + multi-class input. + + Args: + preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or + ``(N, C, ...)`` where C is the number of classes, tensor with labels/probabilities + target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels + num_classes: Number of classes in the dataset. + threshold: + Threshold value for binary or multi-label probabilities. default: 0.5 + + Example: + >>> from paddlemetrics.functional import matthews_corrcoef + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> matthews_corrcoef(preds, target, num_classes=2) + tensor(0.5774) + + """ + confmat = _matthews_corrcoef_update(preds, target, num_classes, threshold) + return _matthews_corrcoef_compute(confmat) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py new file mode 100644 index 000000000..4b8528dc2 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py @@ -0,0 +1,568 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +def _precision_compute( + tp: Tensor, + fp: Tensor, + fn: Tensor, + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes precision from the stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + fn: False negatives + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update( preds, target, reduce='macro', num_classes=3) + >>> _precision_compute(tp, fp, fn, average='macro', mdmc_average=None) + tensor(0.1667) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _precision_compute(tp, fp, fn, average='micro', mdmc_average=None) + tensor(0.2500) + """ + + numerator = tp + denominator = tp + fp + + if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = tp + fp + fn == 0 + numerator = numerator[~cond] + denominator = denominator[~cond] + + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != "weighted" else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def precision( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes `Precision`_ + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Precision@K. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import precision + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> precision(preds, target, average='macro', num_classes=3) + tensor(0.1667) + >>> precision(preds, target, average='micro') + tensor(0.2500) + + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, _, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _precision_compute(tp, fp, fn, average, mdmc_average) + + +def _recall_compute( + tp: Tensor, + fp: Tensor, + fn: Tensor, + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes precision from the stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + fn: False negatives + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) + >>> _recall_compute(tp, fp, fn, average='macro', mdmc_average=None) + tensor(0.3333) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _recall_compute(tp, fp, fn, average='micro', mdmc_average=None) + tensor(0.2500) + """ + numerator = tp + denominator = tp + fn + + if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = tp + fp + fn == 0 + numerator = numerator[~cond] + denominator = denominator[~cond] + + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = ((tp | fn | fp) == 0).nonzero().cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != AverageMethod.WEIGHTED else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def recall( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes `Recall`_ + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Recall@K. + + The reduction method (how the recall scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import recall + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> recall(preds, target, average='macro', num_classes=3) + tensor(0.3333) + >>> recall(preds, target, average='micro') + tensor(0.2500) + + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, _, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _recall_compute(tp, fp, fn, average, mdmc_average) + + +def precision_recall( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tuple[Tensor, Tensor]: + r""" + Computes `Precision`_ + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}`m :math:`\text{FN}` and :math:`\text{FP}` represent the number + of true positives, false negatives and false positives respecitively. With the use of + ``top_k`` parameter, this metric can generalize to Recall@K and Precision@K. + + The reduction method (how the recall scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The function returns a tuple with two elements: precision and recall. Their shape + depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, they are a single element tensor + - If ``average in ['none', None]``, they are a tensor of shape ``(C, )``, where ``C`` stands for + the number of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import precision_recall + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> precision_recall(preds, target, average='macro', num_classes=3) + (tensor(0.1667), tensor(0.3333)) + >>> precision_recall(preds, target, average='micro') + (tensor(0.2500), tensor(0.2500)) + + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, _, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + precision_ = _precision_compute(tp, fp, fn, average, mdmc_average) + recall_ = _recall_compute(tp, fp, fn, average, mdmc_average) + + return precision_, recall_ diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py new file mode 100644 index 000000000..11b32500b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py @@ -0,0 +1,332 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities import rank_zero_warn + + +def _binary_clf_curve( + preds: Tensor, + target: Tensor, + sample_weights: Optional[Sequence] = None, + pos_label: int = 1, +) -> Tuple[Tensor, Tensor, Tensor]: + """adapted from https://github.com/scikit-learn/scikit- learn/blob/master/sklearn/metrics/_ranking.py.""" + if sample_weights is not None and not isinstance(sample_weights, Tensor): + sample_weights = tensor(sample_weights, device=preds.device, dtype=B.float) + + # remove class dimension if necessary + if preds.ndim > target.ndim: + preds = preds[:, 0] + desc_score_indices = B.argsort(preds, descending=True) + + preds = preds[desc_score_indices] + target = target[desc_score_indices] + + if sample_weights is not None: + weight = sample_weights[desc_score_indices] + else: + weight = 1.0 + + # pred typically has many tied values. Here we extract + # the indices associated with the distinct values. We also + # concatenate a value for the end of the curve. + distinct_value_indices = B.where(preds[1:] - preds[:-1])[0] + threshold_idxs = B.nn.functional.pad(distinct_value_indices, [0, 1], value=target.size(0) - 1) + target = (target == pos_label).to(B.long) + tps = B.cumsum(target * weight, dim=0)[threshold_idxs] + + if sample_weights is not None: + # express fps as a cumsum to ensure fps is increasing even in + # the presence of floating point errors + fps = B.cumsum((1 - target) * weight, dim=0)[threshold_idxs] + else: + fps = 1 + threshold_idxs - tps + + return fps, tps, preds[threshold_idxs] + + +def _precision_recall_curve_update( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, +) -> Tuple[Tensor, Tensor, int, Optional[int]]: + """Updates and returns variables required to compute the precision-recall pairs for different thresholds. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + """ + + if len(preds.shape) == len(target.shape): + if pos_label is None: + pos_label = 1 + if num_classes is not None and num_classes != 1: + # multilabel problem + if num_classes != preds.shape[1]: + raise ValueError( + f"Argument `num_classes` was set to {num_classes} in" + f" metric `precision_recall_curve` but detected {preds.shape[1]}" + " number of classes from predictions" + ) + preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) + target = target.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) + else: + # binary problem + preds = preds.flatten() + target = target.flatten() + num_classes = 1 + + # multi class problem + elif len(preds.shape) == len(target.shape) + 1: + if pos_label is not None: + rank_zero_warn( + "Argument `pos_label` should be `None` when running" + f" multiclass precision recall curve. Got {pos_label}" + ) + if num_classes != preds.shape[1]: + raise ValueError( + f"Argument `num_classes` was set to {num_classes} in" + f" metric `precision_recall_curve` but detected {preds.shape[1]}" + " number of classes from predictions" + ) + preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) + target = target.flatten() + + else: + raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds") + + return preds, target, num_classes, pos_label + + +def _precision_recall_curve_compute_single_class( + preds: Tensor, + target: Tensor, + pos_label: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes precision-recall pairs for single class inputs. + + Args: + preds: Predicted tensor + target: Ground truth tensor + pos_label: integer determining the positive class. + sample_weights: sample weights for each data point + """ + + fps, tps, thresholds = _binary_clf_curve( + preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label + ) + precision = tps / (tps + fps) + recall = tps / tps[-1] + + # stop when full recall attained and reverse the outputs so recall is decreasing + last_ind = B.where(tps == tps[-1])[0][0] + sl = slice(0, last_ind.item() + 1) + + # need to call reversed explicitly, since including that to slice would + # introduce negative strides that are not yet supported in pytorch + precision = B.cat([reversed(precision[sl]), B.ones(1, dtype=precision.dtype, device=precision.device)]) + + recall = B.cat([reversed(recall[sl]), B.zeros(1, dtype=recall.dtype, device=recall.device)]) + + thresholds = reversed(thresholds[sl]).detach().clone() # type: ignore + + return precision, recall, thresholds + + +def _precision_recall_curve_compute_multi_class( + preds: Tensor, + target: Tensor, + num_classes: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: + """Computes precision-recall pairs for multi class inputs. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + sample_weights: sample weights for each data point + """ + + # Recursively call per class + precision, recall, thresholds = [], [], [] + for cls in range(num_classes): + preds_cls = preds[:, cls] + + prc_args = dict( + preds=preds_cls, + target=target, + num_classes=1, + pos_label=cls, + sample_weights=sample_weights, + ) + if target.ndim > 1: + prc_args.update( + dict( + target=target[:, cls], + pos_label=1, + ) + ) + res = precision_recall_curve(**prc_args) + precision.append(res[0]) + recall.append(res[1]) + thresholds.append(res[2]) + + return precision, recall, thresholds + + +def _precision_recall_curve_compute( + preds: Tensor, + target: Tensor, + num_classes: int, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes precision-recall pairs based on the number of classes. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, pos_label=pos_label) + >>> precision, recall, thresholds = _precision_recall_curve_compute(preds, target, num_classes, pos_label) + >>> precision + tensor([0.6667, 0.5000, 0.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([1, 2, 3]) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 5 + >>> preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes) + >>> precision, recall, thresholds = _precision_recall_curve_compute(preds, target, num_classes) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + """ + + with B.no_grad(): + if num_classes == 1: + if pos_label is None: + pos_label = 1 + return _precision_recall_curve_compute_single_class(preds, target, pos_label, sample_weights) + return _precision_recall_curve_compute_multi_class(preds, target, num_classes, sample_weights) + + +def precision_recall_curve( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes precision-recall pairs for different thresholds. + + Args: + preds: predictions from model (probabilities) + target: ground truth labels + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Returns: + 3-element tuple containing + + precision: + tensor where element i is the precision of predictions with + score >= thresholds[i] and the last element is 1. + If multiclass, this is a list of such tensors, one for each class. + recall: + tensor where element i is the recall of predictions with + score >= thresholds[i] and the last element is 0. + If multiclass, this is a list of such tensors, one for each class. + thresholds: + Thresholds used for computing precision/recall scores + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same number of dimensions, + or one additional dimension for ``preds``. + ValueError: + If the number of classes deduced from ``preds`` is not the same as the + ``num_classes`` provided. + + Example (binary case): + >>> from paddlemetrics.functional import precision_recall_curve + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> precision, recall, thresholds = precision_recall_curve(pred, target, pos_label=1) + >>> precision + tensor([0.6667, 0.5000, 0.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([1, 2, 3]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> precision, recall, thresholds = precision_recall_curve(pred, target, num_classes=5) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + """ + preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label) + return _precision_recall_curve_compute(preds, target, num_classes, pos_label, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py new file mode 100644 index 000000000..86f4e2a4c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py @@ -0,0 +1,273 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.precision_recall_curve import ( + _binary_clf_curve, + _precision_recall_curve_update, +) + + +def _roc_update( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, +) -> Tuple[Tensor, Tensor, int, Optional[int]]: + """Updates and returns variables required to compute the Receiver Operating Characteristic. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + """ + + return _precision_recall_curve_update(preds, target, num_classes, pos_label) + + +def _roc_compute_single_class( + preds: Tensor, + target: Tensor, + pos_label: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes Receiver Operating Characteristic for single class inputs. Returns tensor with false positive + rates, tensor with true positive rates, tensor with thresholds used for computing false- and true postive + rates. + + Args: + preds: Predicted tensor + target: Ground truth tensor + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + """ + + fps, tps, thresholds = _binary_clf_curve( + preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label + ) + # Add an extra threshold position to make sure that the curve starts at (0, 0) + tps = B.cat([B.zeros(1, dtype=tps.dtype, device=tps.device), tps]) + fps = B.cat([B.zeros(1, dtype=fps.dtype, device=fps.device), fps]) + thresholds = B.cat([thresholds[0][None] + 1, thresholds]) + + if fps[-1] <= 0: + raise ValueError("No negative samples in targets, false positive value should be meaningless") + fpr = fps / fps[-1] + + if tps[-1] <= 0: + raise ValueError("No positive samples in targets, true positive value should be meaningless") + tpr = tps / tps[-1] + + return fpr, tpr, thresholds + + +def _roc_compute_multi_class( + preds: Tensor, + target: Tensor, + num_classes: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: + """Computes Receiver Operating Characteristic for multi class inputs. Returns tensor with false positive rates, + tensor with true positive rates, tensor with thresholds used for computing false- and true postive rates. + + Args: + preds: Predicted tensor + target: Ground truth tensor + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + """ + + fpr, tpr, thresholds = [], [], [] + for cls in range(num_classes): + if preds.shape == target.shape: + target_cls = target[:, cls] + pos_label = 1 + else: + target_cls = target + pos_label = cls + res = roc( + preds=preds[:, cls], + target=target_cls, + num_classes=1, + pos_label=pos_label, + sample_weights=sample_weights, + ) + fpr.append(res[0]) + tpr.append(res[1]) + thresholds.append(res[2]) + + return fpr, tpr, thresholds + + +def _roc_compute( + preds: Tensor, + target: Tensor, + num_classes: int, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes Receiver Operating Characteristic based on the number of classes. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _roc_update(preds, target, pos_label=pos_label) + >>> fpr, tpr, thresholds = _roc_compute(preds, target, num_classes, pos_label) + >>> fpr + tensor([0., 0., 0., 0., 1.]) + >>> tpr + tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) + >>> thresholds + tensor([4, 3, 2, 1, 0]) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 4 + >>> preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes) + >>> fpr, tpr, thresholds = _roc_compute(preds, target, num_classes) + >>> fpr + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500])] + """ + + with B.no_grad(): + if num_classes == 1 and preds.ndim == 1: # binary + if pos_label is None: + pos_label = 1 + return _roc_compute_single_class(preds, target, pos_label, sample_weights) + return _roc_compute_multi_class(preds, target, num_classes, sample_weights) + + +def roc( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes the Receiver Operating Characteristic (ROC). Works with both binary, multiclass and multilabel + input. + + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Returns: + 3-element tuple containing + + fpr: + tensor with false positive rates. + If multiclass or multilabel, this is a list of such tensors, one for each class/label. + tpr: + tensor with true positive rates. + If multiclass or multilabel, this is a list of such tensors, one for each class/label. + thresholds: + tensor with thresholds used for computing false- and true postive rates + If multiclass or multilabel, this is a list of such tensors, one for each class/label. + + Example (binary case): + >>> from paddlemetrics.functional import roc + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> fpr, tpr, thresholds = roc(pred, target, pos_label=1) + >>> fpr + tensor([0., 0., 0., 0., 1.]) + >>> tpr + tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) + >>> thresholds + tensor([4, 3, 2, 1, 0]) + + Example (multiclass case): + >>> from paddlemetrics.functional import roc + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> fpr, tpr, thresholds = roc(pred, target, num_classes=4) + >>> fpr + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500])] + + Example (multilabel case): + >>> from paddlemetrics.functional import roc + >>> pred = B.tensor([[0.8191, 0.3680, 0.1138], + ... [0.3584, 0.7576, 0.1183], + ... [0.2286, 0.3468, 0.1338], + ... [0.8603, 0.0745, 0.1837]]) + >>> target = B.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) + >>> fpr, tpr, thresholds = roc(pred, target, num_classes=3, pos_label=1) + >>> fpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]), + tensor([0., 0., 0., 1., 1.]), + tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])] + >>> tpr + [tensor([0., 0., 1., 1., 1.]), tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), tensor([0., 1., 1., 1., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.8603, 0.8603, 0.8191, 0.3584, 0.2286]), + tensor([1.7576, 0.7576, 0.3680, 0.3468, 0.0745]), + tensor([1.1837, 0.1837, 0.1338, 0.1183, 0.1138])] + """ + preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes, pos_label) + return _roc_compute(preds, target, num_classes, pos_label, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py new file mode 100644 index 000000000..be87dce7d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py @@ -0,0 +1,215 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +def _specificity_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes specificity from the stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) + >>> _specificity_compute(tp, fp, tn, fn, average='macro', mdmc_average=None) + tensor(0.6111) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _specificity_compute(tp, fp, tn, fn, average='micro', mdmc_average=None) + tensor(0.6250) + """ + + numerator = tn + denominator = tn + fp + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != AverageMethod.WEIGHTED else denominator, + average=average, + mdmc_average=mdmc_average, + ) + + +def specificity( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes `Specificity`_ + + .. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}} + + Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Specificity@K. + + The reduction method (how the specificity scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tn + fp``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold probability value for transforming probability predictions to binary + (0,1) predictions, in the case of binary or multi-label inputs + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for inputs with probability predictions. If this parameter is set for multi-label + inputs, it will take precedence over ``threshold``. For (multi-dim) multi-class inputs, + this parameter defaults to 1. + + Should be left unset (``None``) for inputs with label predictions. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import specificity + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> specificity(preds, target, average='macro', num_classes=3) + tensor(0.6111) + >>> specificity(preds, target, average='micro') + tensor(0.6250) + + """ + + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _specificity_compute(tp, fp, tn, fn, average, mdmc_average) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py new file mode 100644 index 000000000..33e1cafdd --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py @@ -0,0 +1,396 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +def _del_column(data: Tensor, idx: int) -> Tensor: + """Delete the column at index.""" + return B.cat([data[:, :idx], data[:, (idx + 1) :]], 1) + + +def _stat_scores( + preds: Tensor, + target: Tensor, + reduce: Optional[str] = "micro", +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Calculate the number of tp, fp, tn, fn. + + Args: + preds: + An ``(N, C)`` or ``(N, C, X)`` tensor of predictions (0 or 1) + target: + An ``(N, C)`` or ``(N, C, X)`` tensor of true labels (0 or 1) + reduce: + One of ``'micro'``, ``'macro'``, ``'samples'`` + + Return: + Returns a list of 4 tensors; tp, fp, tn, fn. + The shape of the returned tensors depnds on the shape of the inputs + and the ``reduce`` parameter: + + If inputs are of the shape ``(N, C)``, then + - If ``reduce='micro'``, the returned tensors are 1 element tensors + - If ``reduce='macro'``, the returned tensors are ``(C,)`` tensors + - If ``reduce'samples'``, the returned tensors are ``(N,)`` tensors + + If inputs are of the shape ``(N, C, X)``, then + - If ``reduce='micro'``, the returned tensors are ``(N,)`` tensors + - If ``reduce='macro'``, the returned tensors are ``(N,C)`` tensors + - If ``reduce='samples'``, the returned tensors are ``(N,X)`` tensors + """ + dim: Union[int, List[int]] = 1 # for "samples" + if reduce == "micro": + dim = [0, 1] if preds.ndim == 2 else [1, 2] + elif reduce == "macro": + dim = 0 if preds.ndim == 2 else 2 + + true_pred, false_pred = target == preds, target != preds + pos_pred, neg_pred = preds == 1, preds == 0 + + tp = (true_pred * pos_pred).sum(dim=dim) + fp = (false_pred * pos_pred).sum(dim=dim) + + tn = (true_pred * neg_pred).sum(dim=dim) + fn = (false_pred * neg_pred).sum(dim=dim) + return tp.long(), fp.long(), tn.long(), fn.long() + + +def _stat_scores_update( + preds: Tensor, + target: Tensor, + reduce: Optional[str] = "micro", + mdmc_reduce: Optional[str] = None, + num_classes: Optional[int] = None, + top_k: Optional[int] = None, + threshold: float = 0.5, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns the the number of true positives, false positives, true negatives, false negatives. + Raises ValueError if: + + - The `ignore_index` is not valid + - When `ignore_index` is used with binary data + - When inputs are multi-dimensional multi-class, and the `mdmc_reduce` parameter is not set + + Args: + preds: Predicted tensor + target: Ground truth tensor + reduce: Defines the reduction that is applied + mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handeled + num_classes: Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities + multiclass: Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be + ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + """ + + preds, target, _ = _input_format_classification( + preds, target, threshold=threshold, num_classes=num_classes, multiclass=multiclass, top_k=top_k + ) + + if ignore_index is not None and not 0 <= ignore_index < preds.shape[1]: + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {preds.shape[0]} classes") + + if ignore_index is not None and preds.shape[1] == 1: + raise ValueError("You can not use `ignore_index` with binary data.") + + if preds.ndim == 3: + if not mdmc_reduce: + raise ValueError( + "When your inputs are multi-dimensional multi-class, you have to set the `mdmc_reduce` parameter" + ) + if mdmc_reduce == "global": + preds = B.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) + target = B.transpose(target, 1, 2).reshape(-1, target.shape[1]) + + # Delete what is in ignore_index, if applicable (and classes don't matter): + if ignore_index is not None and reduce != "macro": + preds = _del_column(preds, ignore_index) + target = _del_column(target, ignore_index) + + tp, fp, tn, fn = _stat_scores(preds, target, reduce=reduce) + + # Take care of ignore_index + if ignore_index is not None and reduce == "macro": + tp[..., ignore_index] = -1 + fp[..., ignore_index] = -1 + tn[..., ignore_index] = -1 + fn[..., ignore_index] = -1 + + return tp, fp, tn, fn + + +def _stat_scores_compute(tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> Tensor: + """Computes the number of true positives, false positives, true negatives, false negatives. Concatenates the + input tensors along with the support into one output. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + + Example: + >>> preds = B.tensor([1, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) + >>> _stat_scores_compute(tp, fp, tn, fn) + tensor([[0, 1, 2, 1, 1], + [1, 1, 1, 1, 2], + [1, 0, 3, 0, 1]]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _stat_scores_compute(tp, fp, tn, fn) + tensor([2, 2, 6, 2, 4]) + """ + stats = [ + tp.unsqueeze(-1), + fp.unsqueeze(-1), + tn.unsqueeze(-1), + fn.unsqueeze(-1), + tp.unsqueeze(-1) + fn.unsqueeze(-1), # support + ] + outputs: Tensor = B.cat(stats, -1) + outputs = B.where(outputs < 0, tensor(-1, device=outputs.device, dtype=outputs.dtype), outputs) + + return outputs + + +def _reduce_stat_scores( + numerator: Tensor, + denominator: Tensor, + weights: Optional[Tensor], + average: Optional[str], + mdmc_average: Optional[str], + zero_division: int = 0, +) -> Tensor: + """Reduces scores of type ``numerator/denominator`` or. + + ``weights * (numerator/denominator)``, if ``average='weighted'``. + + Args: + numerator: A tensor with numerator numbers. + denominator: A tensor with denominator numbers. If a denominator is + negative, the class will be ignored (if averaging), or its score + will be returned as ``nan`` (if ``average=None``). + If the denominator is zero, then ``zero_division`` score will be + used for those elements. + weights: A tensor of weights to be used if ``average='weighted'``. + average: The method to average the scores + mdmc_average: The method to average the scores if inputs were multi-dimensional multi-class (MDMC) + zero_division: The value to use for the score if denominator equals zero. + """ + numerator, denominator = numerator.float(), denominator.float() + zero_div_mask = denominator == 0 + ignore_mask = denominator < 0 + + if weights is None: + weights = B.ones_like(denominator) + else: + weights = weights.float() + + numerator = B.where(zero_div_mask, tensor(float(zero_division), device=numerator.device), numerator) + denominator = B.where(zero_div_mask | ignore_mask, tensor(1.0, device=denominator.device), denominator) + weights = B.where(ignore_mask, tensor(0.0, device=weights.device), weights) + + if average not in (AverageMethod.MICRO, AverageMethod.NONE, None): + weights = weights / weights.sum(dim=-1, keepdim=True) + + scores = weights * (numerator / denominator) + + # This is in case where sum(weights) = 0, which happens if we ignore the only present class with average='weighted' + scores = B.where(B.isnan(scores), tensor(float(zero_division), device=scores.device), scores) + + if mdmc_average == MDMCAverageMethod.SAMPLEWISE: + scores = scores.mean(dim=0) + ignore_mask = ignore_mask.sum(dim=0).bool() + + if average in (AverageMethod.NONE, None): + scores = B.where(ignore_mask, tensor(float("nan"), device=scores.device), scores) + else: + scores = scores.sum() + + return scores + + +def stat_scores( + preds: Tensor, + target: Tensor, + reduce: str = "micro", + mdmc_reduce: Optional[str] = None, + num_classes: Optional[int] = None, + top_k: Optional[int] = None, + threshold: float = 0.5, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, +) -> Tensor: + r"""Computes the number of true positives, false positives, true negatives, false negatives. + Related to `Type I and Type II errors`_ + and the `confusion matrix`_. + + The reduction method (how the statistics are aggregated) is controlled by the + ``reduce`` parameter, and additionally by the ``mdmc_reduce`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + reduce: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Counts the statistics by summing over all [sample, class] + combinations (globally). Each statistic is represented by a single integer. + - ``'macro'``: Counts the statistics for each class separately (over all samples). + Each statistic is represented by a ``(C,)`` tensor. Requires ``num_classes`` + to be set. + - ``'samples'``: Counts the statistics for each sample separately (over all classes). + Each statistic is represented by a ``(N, )`` 1d tensor. + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_reduce``. + + num_classes: + Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. + + ignore_index: + Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + + mdmc_reduce: + Defines how the multi-dimensional multi-class inputs are handeled. Should be + one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class (see :ref:`references/modules:input types` for the definition of input types). + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then the outputs are concatenated together. In each + sample the extra axes ``...`` are flattened to become the sub-sample axis, and + statistics for each sample are computed by treating the sub-sample axis as the + ``N`` axis for that sample. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs are + flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``reduce`` parameter applies as usual. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds + to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The + shape depends on the ``reduce`` and ``mdmc_reduce`` (in case of multi-dimensional + multi-class data) parameters: + + - If the data is not multi-dimensional multi-class, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)``, + where ``C`` stands for the number of classes + - If ``reduce='samples'``, the shape will be ``(N, 5)``, where ``N`` stands for + the number of samples + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='global'``, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N*X, 5)``, where ``X`` stands for + the product of sizes of all "extra" dimensions of the data (i.e. all dimensions + except for ``C`` and ``N``) + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='samplewise'``, then + + - If ``reduce='micro'``, the shape will be ``(N, 5)`` + - If ``reduce='macro'``, the shape will be ``(N, C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N, X, 5)`` + + Raises: + ValueError: + If ``reduce`` is none of ``"micro"``, ``"macro"`` or ``"samples"``. + ValueError: + If ``mdmc_reduce`` is none of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``reduce`` is set to ``"macro"`` and ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + ValueError: + If ``ignore_index`` is used with ``binary data``. + ValueError: + If inputs are ``multi-dimensional multi-class`` and ``mdmc_reduce`` is not provided. + + Example: + >>> from paddlemetrics.functional import stat_scores + >>> preds = B.tensor([1, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> stat_scores(preds, target, reduce='macro', num_classes=3) + tensor([[0, 1, 2, 1, 1], + [1, 1, 1, 1, 2], + [1, 0, 3, 0, 1]]) + >>> stat_scores(preds, target, reduce='micro') + tensor([2, 2, 6, 2, 4]) + + """ + if reduce not in ["micro", "macro", "samples"]: + raise ValueError(f"The `reduce` {reduce} is not valid.") + + if mdmc_reduce not in [None, "samplewise", "global"]: + raise ValueError(f"The `mdmc_reduce` {mdmc_reduce} is not valid.") + + if reduce == "macro" and (not num_classes or num_classes < 1): + raise ValueError("When you set `reduce` as 'macro', you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_reduce, + top_k=top_k, + threshold=threshold, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + ) + return _stat_scores_compute(tp, fp, tn, fn) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py new file mode 100644 index 000000000..9fe64120c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py @@ -0,0 +1,16 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddlemetrics.functional.image.gradients import image_gradients # noqa: F401 +from paddlemetrics.functional.image.psnr import psnr # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py new file mode 100644 index 000000000..abe1b08d5 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py @@ -0,0 +1,81 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + + +def _image_gradients_validate(img: Tensor) -> None: + """Validates whether img is a 4D torch Tensor.""" + + if not isinstance(img, Tensor): + raise TypeError(f"The `img` expects a value of type but got {type(img)}") + if img.ndim != 4: + raise RuntimeError(f"The `img` expects a 4D tensor but got {img.ndim}D tensor") + + +def _compute_image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]: + """Computes image gradients (dy/dx) for a given image.""" + + batch_size, channels, height, width = img.shape + + dy = img[..., 1:, :] - img[..., :-1, :] + dx = img[..., :, 1:] - img[..., :, :-1] + + shapey = [batch_size, channels, 1, width] + dy = B.cat([dy, B.zeros(shapey, device=img.device, dtype=img.dtype)], dim=2) + dy = dy.view(img.shape) + + shapex = [batch_size, channels, height, 1] + dx = B.cat([dx, B.zeros(shapex, device=img.device, dtype=img.dtype)], dim=3) + dx = dx.view(img.shape) + + return dy, dx + + +def image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]: + """Computes `Gradient Computation of Image`_ of a given image using finite difference. + + Args: + img: An ``(N, C, H, W)`` input tensor where C is the number of image channels + + Return: + Tuple of (dy, dx) with each gradient of shape ``[N, C, H, W]`` + + Raises: + TypeError: + If ``img`` is not of the type . + RuntimeError: + If ``img`` is not a 4D tensor. + + Example: + >>> from paddlemetrics.functional import image_gradients + >>> image = B.arange(0, 1*1*5*5, dtype=B.float32) + >>> image = B.reshape(image, (1, 1, 5, 5)) + >>> dy, dx = image_gradients(image) + >>> dy[0, 0, :, :] + tensor([[5., 5., 5., 5., 5.], + [5., 5., 5., 5., 5.], + [5., 5., 5., 5., 5.], + [5., 5., 5., 5., 5.], + [0., 0., 0., 0., 0.]]) + + .. note:: The implementation follows the 1-step finite difference method as followed + by the TF implementation. The values are organized such that the gradient of + [I(x+1, y)-[I(x, y)]] are at the (x, y) location + """ + _image_gradients_validate(img) + + return _compute_image_gradients(img) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py new file mode 100644 index 000000000..2ffd60461 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py @@ -0,0 +1,150 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities import rank_zero_warn, reduce + + +def _psnr_compute( + sum_squared_error: Tensor, + n_obs: Tensor, + data_range: Tensor, + base: float = 10.0, + reduction: str = "elementwise_mean", +) -> Tensor: + """Computes peak signal-to-noise ratio. + + Args: + sum_squared_error: Sum of square of errors over all observations + n_obs: Number of predictions or observations + data_range: + the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given + when ``dim`` is not None. + base: a base of a logarithm to use (default: 10) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + Example: + >>> preds = B.tensor([[0.0, 1.0], [2.0, 3.0]]) + >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) + >>> data_range = target.max() - target.min() + >>> sum_squared_error, n_obs = _psnr_update(preds, target) + >>> _psnr_compute(sum_squared_error, n_obs, data_range) + tensor(2.5527) + """ + + psnr_base_e = 2 * B.log(data_range) - B.log(sum_squared_error / n_obs) + psnr_vals = psnr_base_e * (10 / B.log(tensor(base))) + return reduce(psnr_vals, reduction=reduction) + + +def _psnr_update( + preds: Tensor, + target: Tensor, + dim: Optional[Union[int, Tuple[int, ...]]] = None, +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute peak signal-to-noise ratio. + + Args: + preds: Predicted tensor + target: Ground truth tensor + dim: + Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is + None meaning scores will be reduced across all dimensions. + """ + + if dim is None: + sum_squared_error = B.sum(B.pow(preds - target, 2)) + n_obs = tensor(target.numel(), device=target.device) + return sum_squared_error, n_obs + + diff = preds - target + sum_squared_error = B.sum(diff * diff, dim=dim) + + if isinstance(dim, int): + dim_list = [dim] + else: + dim_list = list(dim) + if not dim_list: + n_obs = tensor(target.numel(), device=target.device) + else: + n_obs = tensor(target.size(), device=target.device)[dim_list].prod() + n_obs = n_obs.expand_as(sum_squared_error) + + return sum_squared_error, n_obs + + +def psnr( + preds: Tensor, + target: Tensor, + data_range: Optional[float] = None, + base: float = 10.0, + reduction: str = "elementwise_mean", + dim: Optional[Union[int, Tuple[int, ...]]] = None, +) -> Tensor: + """Computes the peak signal-to-noise ratio. + + Args: + preds: estimated signal + target: groun truth signal + data_range: + the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given + when ``dim`` is not None. + base: a base of a logarithm to use (default: 10) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + dim: + Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is + None meaning scores will be reduced across all dimensions. + Return: + Tensor with PSNR score + + Raises: + ValueError: + If ``dim`` is not ``None`` and ``data_range`` is not provided. + + Example: + >>> from paddlemetrics.functional import psnr + >>> pred = B.tensor([[0.0, 1.0], [2.0, 3.0]]) + >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) + >>> psnr(pred, target) + tensor(2.5527) + + .. note:: + Half precision is only support on GPU for this metric + """ + if dim is None and reduction != "elementwise_mean": + rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.") + + if data_range is None: + if dim is not None: + # Maybe we could use `B.amax(target, dim=dim) - B.amin(target, dim=dim)` in PyTorch 1.7 to calculate + # `data_range` in the future. + raise ValueError("The `data_range` must be given when `dim` is not None.") + + data_range = target.max() - target.min() + else: + data_range = tensor(float(data_range)) + sum_squared_error, n_obs = _psnr_update(preds, target, dim=dim) + return _psnr_compute(sum_squared_error, n_obs, data_range, base=base, reduction=reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py new file mode 100644 index 000000000..52af9b793 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py @@ -0,0 +1,225 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Sequence, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape +from paddlemetrics.utilities.distributed import reduce + + +def _gaussian(kernel_size: int, sigma: float, dtype: B.dtype, device: B.device) -> Tensor: + """Computes 1D gaussian kernel. + + Args: + kernel_size: size of the gaussian kernel + sigma: Standard deviation of the gaussian kernel + dtype: data type of the output tensor + device: device of the output tensor + + Example: + >>> _gaussian(3, 1, B.float, 'cpu') + tensor([[0.2741, 0.4519, 0.2741]]) + """ + dist = B.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=dtype, device=device) + gauss = B.exp(-B.pow(dist / sigma, 2) / 2) + return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size) + + +def _gaussian_kernel( + channel: int, kernel_size: Sequence[int], sigma: Sequence[float], dtype: B.dtype, device: B.device +) -> Tensor: + """Computes 2D gaussian kernel. + + Args: + channel: number of channels in the image + kernel_size: size of the gaussian kernel as a tuple (h, w) + sigma: Standard deviation of the gaussian kernel + dtype: data type of the output tensor + device: device of the output tensor + + Example: + >>> _gaussian_kernel(1, (5,5), (1,1), B.float, "cpu") + tensor([[[[0.0030, 0.0133, 0.0219, 0.0133, 0.0030], + [0.0133, 0.0596, 0.0983, 0.0596, 0.0133], + [0.0219, 0.0983, 0.1621, 0.0983, 0.0219], + [0.0133, 0.0596, 0.0983, 0.0596, 0.0133], + [0.0030, 0.0133, 0.0219, 0.0133, 0.0030]]]]) + """ + + gaussian_kernel_x = _gaussian(kernel_size[0], sigma[0], dtype, device) + gaussian_kernel_y = _gaussian(kernel_size[1], sigma[1], dtype, device) + kernel = B.matmul(gaussian_kernel_x.t(), gaussian_kernel_y) # (kernel_size, 1) * (1, kernel_size) + + return kernel.expand(channel, 1, kernel_size[0], kernel_size[1]) + + +def _ssim_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Structural Similarity Index Measure. Checks for same shape + and type of the input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + if preds.dtype != target.dtype: + raise TypeError( + "Expected `preds` and `target` to have the same data type." + f" Got preds: {preds.dtype} and target: {target.dtype}." + ) + _check_same_shape(preds, target) + if len(preds.shape) != 4: + raise ValueError( + "Expected `preds` and `target` to have BxCxHxW shape." + f" Got preds: {preds.shape} and target: {target.shape}." + ) + return preds, target + + +def _ssim_compute( + preds: Tensor, + target: Tensor, + kernel_size: Sequence[int] = (11, 11), + sigma: Sequence[float] = (1.5, 1.5), + reduction: str = "elementwise_mean", + data_range: Optional[float] = None, + k1: float = 0.01, + k2: float = 0.03, +) -> Tensor: + """Computes Structual Similarity Index Measure. + + Args: + preds: estimated image + target: ground truth image + kernel_size: size of the gaussian kernel (default: (11, 11)) + sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + data_range: Range of the image. If ``None``, it is determined from the image (max - min) + k1: Parameter of SSIM. Default: 0.01 + k2: Parameter of SSIM. Default: 0.03 + + Example: + >>> preds = B.rand([16, 1, 16, 16]) + >>> target = preds * 0.75 + >>> preds, target = _ssim_update(preds, target) + >>> _ssim_compute(preds, target) + tensor(0.9219) + """ + if len(kernel_size) != 2 or len(sigma) != 2: + raise ValueError( + "Expected `kernel_size` and `sigma` to have the length of two." + f" Got kernel_size: {len(kernel_size)} and sigma: {len(sigma)}." + ) + + if any(x % 2 == 0 or x <= 0 for x in kernel_size): + raise ValueError(f"Expected `kernel_size` to have odd positive number. Got {kernel_size}.") + + if any(y <= 0 for y in sigma): + raise ValueError(f"Expected `sigma` to have positive number. Got {sigma}.") + + if data_range is None: + data_range = max(preds.max() - preds.min(), target.max() - target.min()) + + c1 = pow(k1 * data_range, 2) + c2 = pow(k2 * data_range, 2) + device = preds.device + + channel = preds.size(1) + dtype = preds.dtype + kernel = _gaussian_kernel(channel, kernel_size, sigma, dtype, device) + pad_h = (kernel_size[0] - 1) // 2 + pad_w = (kernel_size[1] - 1) // 2 + + preds = B.pad(preds, (pad_h, pad_h, pad_w, pad_w), mode="reflect") + target = B.pad(target, (pad_h, pad_h, pad_w, pad_w), mode="reflect") + + input_list = B.cat((preds, target, preds * preds, target * target, preds * target)) # (5 * B, C, H, W) + outputs = B.conv2d(input_list, kernel, groups=channel) + output_list = outputs.split(preds.shape[0]) + + mu_pred_sq = output_list[0].pow(2) + mu_target_sq = output_list[1].pow(2) + mu_pred_target = output_list[0] * output_list[1] + + sigma_pred_sq = output_list[2] - mu_pred_sq + sigma_target_sq = output_list[3] - mu_target_sq + sigma_pred_target = output_list[4] - mu_pred_target + + upper = 2 * sigma_pred_target + c2 + lower = sigma_pred_sq + sigma_target_sq + c2 + + ssim_idx = ((2 * mu_pred_target + c1) * upper) / ((mu_pred_sq + mu_target_sq + c1) * lower) + ssim_idx = ssim_idx[..., pad_h:-pad_h, pad_w:-pad_w] + + return reduce(ssim_idx, reduction) + + +def ssim( + preds: Tensor, + target: Tensor, + kernel_size: Sequence[int] = (11, 11), + sigma: Sequence[float] = (1.5, 1.5), + reduction: str = "elementwise_mean", + data_range: Optional[float] = None, + k1: float = 0.01, + k2: float = 0.03, +) -> Tensor: + """Computes Structual Similarity Index Measure. + + Args: + preds: estimated image + target: ground truth image + kernel_size: size of the gaussian kernel (default: (11, 11)) + sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + data_range: Range of the image. If ``None``, it is determined from the image (max - min) + k1: Parameter of SSIM. Default: 0.01 + k2: Parameter of SSIM. Default: 0.03 + + Return: + Tensor with SSIM score + + Raises: + TypeError: + If ``preds`` and ``target`` don't have the same data type. + ValueError: + If ``preds`` and ``target`` don't have ``BxCxHxW shape``. + ValueError: + If the length of ``kernel_size`` or ``sigma`` is not ``2``. + ValueError: + If one of the elements of ``kernel_size`` is not an ``odd positive number``. + ValueError: + If one of the elements of ``sigma`` is not a ``positive number``. + + Example: + >>> from paddlemetrics.functional import ssim + >>> preds = B.rand([16, 1, 16, 16]) + >>> target = preds * 0.75 + >>> ssim(preds, target) + tensor(0.9219) + """ + preds, target = _ssim_update(preds, target) + return _ssim_compute(preds, target, kernel_size, sigma, reduction, data_range, k1, k2) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py new file mode 100644 index 000000000..1d28d0c4b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py @@ -0,0 +1,17 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.pairwise.cosine import pairwise_cosine_similarity # noqa: F401 +from paddlemetrics.functional.pairwise.euclidean import pairwise_euclidean_distance # noqa: F401 +from paddlemetrics.functional.pairwise.linear import pairwise_linear_similarity # noqa: F401 +from paddlemetrics.functional.pairwise.manhatten import pairwise_manhatten_distance # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py new file mode 100644 index 000000000..cdd24e155 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py @@ -0,0 +1,85 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_cosine_similarity_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise cosine similarity matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + + norm = B.norm(x, p=2, dim=1) + x /= norm.unsqueeze(1) + norm = B.norm(y, p=2, dim=1) + y /= norm.unsqueeze(1) + + distance = x @ y.T + if zero_diagonal: + distance.fill_diagonal_(0) + return distance + + +def pairwise_cosine_similarity( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise cosine similarity: + + .. math:: + s_{cos}(x,y) = \frac{}{||x|| \cdot ||y||} + = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}} + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_cosine_similarity + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_cosine_similarity(x, y) + tensor([[0.5547, 0.8682], + [0.5145, 0.8437], + [0.5300, 0.8533]]) + >>> pairwise_cosine_similarity(x) + tensor([[0.0000, 0.9989, 0.9996], + [0.9989, 0.0000, 0.9998], + [0.9996, 0.9998, 0.0000]]) + + """ + distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py new file mode 100644 index 000000000..fd31cd7f7 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py @@ -0,0 +1,79 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_euclidean_distance_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise euclidean distance matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + x_norm = x.norm(dim=1, keepdim=True) + y_norm = y.norm(dim=1).T + distance = x_norm * x_norm + y_norm * y_norm - 2 * x.mm(y.T) + if zero_diagonal: + distance.fill_diagonal_(0) + return distance.sqrt() + + +def pairwise_euclidean_distance( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise euclidean distances: + + .. math:: + d_{euc}(x,y) = ||x - y||_2 = \sqrt{\sum_{d=1}^D (x_d - y_d)^2} + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_euclidean_distance + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_euclidean_distance(x, y) + tensor([[3.1623, 2.0000], + [5.3852, 4.1231], + [8.9443, 7.6158]]) + >>> pairwise_euclidean_distance(x) + tensor([[0.0000, 2.2361, 5.8310], + [2.2361, 0.0000, 3.6056], + [5.8310, 3.6056, 0.0000]]) + + """ + distance = _pairwise_euclidean_distance_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py new file mode 100644 index 000000000..2d38916af --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py @@ -0,0 +1,59 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +from paddleext.torchapi import Tensor + + +def _check_input( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tuple[Tensor, Tensor, bool]: + """Check that input has the right dimensionality and sets the zero_diagonal argument if user has not provided + import module. + + Args: + x: tensor of shape ``[N,d]`` + y: if provided, a tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + if x.ndim != 2: + raise ValueError(f"Expected argument `x` to be a 2D tensor of shape `[N, d]` but got {x.shape}") + + if y is not None: + if y.ndim != 2 or y.shape[1] != x.shape[1]: + raise ValueError( + "Expected argument `y` to be a 2D tensor of shape `[M, d]` where" + " `d` should be same as the last dimension of `x`" + ) + zero_diagonal = False if zero_diagonal is None else zero_diagonal + else: + y = x.clone() + zero_diagonal = True if zero_diagonal is None else zero_diagonal + return x, y, zero_diagonal + + +def _reduce_distance_matrix(distmat: Tensor, reduction: Optional[str] = None) -> Tensor: + """Final reduction of distance matrix. + + Args: + distance: a ``[N,M]`` matrix + reduction: string determining how to reduce along last dimension + """ + if reduction == "mean": + return distmat.mean(dim=-1) + if reduction == "sum": + return distmat.sum(dim=-1) + if reduction is None or reduction == "none": + return distmat + raise ValueError(f"Expected reduction to be one of `['mean', 'sum', None]` but got {reduction}") diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py new file mode 100644 index 000000000..08e793019 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py @@ -0,0 +1,78 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_linear_similarity_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise linear similarity matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + + distance = x @ y.T + if zero_diagonal: + distance.fill_diagonal_(0) + return distance + + +def pairwise_linear_similarity( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise linear similarity: + + .. math:: + s_{lin}(x,y) = = \sum_{d=1}^D x_d \cdot y_d + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_linear_similarity + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_linear_similarity(x, y) + tensor([[ 2., 7.], + [ 3., 11.], + [ 5., 18.]]) + >>> pairwise_linear_similarity(x) + tensor([[ 0., 21., 34.], + [21., 0., 55.], + [34., 55., 0.]]) + + """ + distance = _pairwise_linear_similarity_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py new file mode 100644 index 000000000..d0079bd62 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py @@ -0,0 +1,78 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_manhatten_distance_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise manhatten similarity matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: if provided, a tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + + distance = (x.unsqueeze(1) - y.unsqueeze(0).repeat(x.shape[0], 1, 1)).abs().sum(dim=-1) + if zero_diagonal: + distance.fill_diagonal_(0) + return distance + + +def pairwise_manhatten_distance( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise manhatten distance: + + .. math:: + d_{man}(x,y) = ||x-y||_1 = \sum_{d=1}^D |x_d - y_d| + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_manhatten_distance + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_manhatten_distance(x, y) + tensor([[ 4., 2.], + [ 7., 5.], + [12., 10.]]) + >>> pairwise_manhatten_distance(x) + tensor([[0., 3., 8.], + [3., 0., 5.], + [8., 5., 0.]]) + + """ + distance = _pairwise_manhatten_distance_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py new file mode 100644 index 000000000..7ddc60404 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py @@ -0,0 +1,27 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.image.psnr import psnr # noqa: F401 +from paddlemetrics.functional.image.ssim import ssim # noqa: F401 +from paddlemetrics.functional.regression.cosine_similarity import cosine_similarity # noqa: F401 +from paddlemetrics.functional.regression.explained_variance import explained_variance # noqa: F401 +from paddlemetrics.functional.regression.mean_absolute_error import mean_absolute_error # noqa: F401 +from paddlemetrics.functional.regression.mean_absolute_percentage_error import ( # noqa: F401 + mean_absolute_percentage_error, +) +from paddlemetrics.functional.regression.mean_squared_error import mean_squared_error # noqa: F401 +from paddlemetrics.functional.regression.mean_squared_log_error import mean_squared_log_error # noqa: F401 +from paddlemetrics.functional.regression.pearson import pearson_corrcoef # noqa: F401 +from paddlemetrics.functional.regression.r2 import r2_score # noqa: F401 +from paddlemetrics.functional.regression.spearman import spearman_corrcoef # noqa: F401 +from paddlemetrics.functional.regression.tweedie_deviance import tweedie_deviance_score # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py new file mode 100644 index 000000000..ea0f77a3b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _cosine_similarity_update( + preds: Tensor, + target: Tensor, +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Cosine Similarity. Checks for same shape of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + preds = preds.float() + target = target.float() + + return preds, target + + +def _cosine_similarity_compute(preds: Tensor, target: Tensor, reduction: str = "sum") -> Tensor: + """Computes Cosine Similarity. + + Args: + preds: Predicted tensor + target: Ground truth tensor + reduction: + The method of reducing along the batch dimension using sum, mean or taking the individual scores + + Example: + >>> target = B.tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) + >>> preds = B.tensor([[1, 2, 3, 4], [-1, -2, -3, -4]]) + >>> preds, target = _cosine_similarity_update(preds, target) + >>> _cosine_similarity_compute(preds, target, 'none') + tensor([ 1.0000, -1.0000]) + """ + + dot_product = (preds * target).sum(dim=-1) + preds_norm = preds.norm(dim=-1) + target_norm = target.norm(dim=-1) + similarity = dot_product / (preds_norm * target_norm) + reduction_mapping = { + "sum": B.sum, + "mean": B.mean, + "none": lambda x: x, + None: lambda x: x, + } + return reduction_mapping[reduction](similarity) + + +def cosine_similarity(preds: Tensor, target: Tensor, reduction: str = "sum") -> Tensor: + r""" + Computes the `Cosine Similarity`_ + between targets and predictions: + + .. math:: + cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} = + \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}} + + where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions. + + Args: + preds: Predicted tensor with shape ``(N,d)`` + target: Ground truth tensor with shape ``(N,d)`` + reduction: + The method of reducing along the batch dimension using sum, mean or taking the individual scores + + Example: + >>> from paddlemetrics.functional.regression import cosine_similarity + >>> target = B.tensor([[1, 2, 3, 4], + ... [1, 2, 3, 4]]) + >>> preds = B.tensor([[1, 2, 3, 4], + ... [-1, -2, -3, -4]]) + >>> cosine_similarity(preds, target, 'none') + tensor([ 1.0000, -1.0000]) + + """ + preds, target = _cosine_similarity_update(preds, target) + return _cosine_similarity_compute(preds, target, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py new file mode 100644 index 000000000..95ef6acf4 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py @@ -0,0 +1,137 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _explained_variance_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute Explained Variance. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + + n_obs = preds.size(0) + sum_error = B.sum(target - preds, dim=0) + diff = target - preds + sum_squared_error = B.sum(diff * diff, dim=0) + + sum_target = B.sum(target, dim=0) + sum_squared_target = B.sum(target * target, dim=0) + + return n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target + + +def _explained_variance_compute( + n_obs: Tensor, + sum_error: Tensor, + sum_squared_error: Tensor, + sum_target: Tensor, + sum_squared_target: Tensor, + multioutput: str = "uniform_average", +) -> Tensor: + """Computes Explained Variance. + + Args: + n_obs: Number of predictions or observations + sum_error: Sum of errors over all observations + sum_squared_error: Sum of square of errors over all observations + sum_target: Sum of target values + sum_squared_target: Sum of squares of target values + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + Example: + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> n_obs, sum_error, ss_error, sum_target, ss_target = _explained_variance_update(preds, target) + >>> _explained_variance_compute(n_obs, sum_error, ss_error, sum_target, ss_target, multioutput='raw_values') + tensor([0.9677, 1.0000]) + """ + + diff_avg = sum_error / n_obs + numerator = sum_squared_error / n_obs - (diff_avg * diff_avg) + + target_avg = sum_target / n_obs + denominator = sum_squared_target / n_obs - (target_avg * target_avg) + + # Take care of division by zero + nonzero_numerator = numerator != 0 + nonzero_denominator = denominator != 0 + valid_score = nonzero_numerator & nonzero_denominator + output_scores = B.ones_like(diff_avg) + output_scores[valid_score] = 1.0 - (numerator[valid_score] / denominator[valid_score]) + output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 + + # Decide what to do in multioutput case + # Todo: allow user to pass in tensor with weights + if multioutput == "raw_values": + return output_scores + if multioutput == "uniform_average": + return B.mean(output_scores) + if multioutput == "variance_weighted": + denom_sum = B.sum(denominator) + return B.sum(denominator / denom_sum * output_scores) + + +def explained_variance( + preds: Tensor, + target: Tensor, + multioutput: str = "uniform_average", +) -> Union[Tensor, Sequence[Tensor]]: + """Computes explained variance. + + Args: + preds: estimated labels + target: ground truth labels + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + Example: + >>> from paddlemetrics.functional import explained_variance + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> explained_variance(preds, target) + tensor(0.9572) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> explained_variance(preds, target, multioutput='raw_values') + tensor([0.9677, 1.0000]) + """ + n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target) + return _explained_variance_compute( + n_obs, + sum_error, + sum_squared_error, + sum_target, + sum_squared_target, + multioutput, + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py new file mode 100644 index 000000000..1ddb41533 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py @@ -0,0 +1,73 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Mean Absolute Error. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + sum_abs_error = B.sum(B.abs(preds - target)) + n_obs = target.numel() + return sum_abs_error, n_obs + + +def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor: + """Computes Mean Absolute Error. + + Args: + sum_abs_error: Sum of absolute value of errors over all observations + n_obs: Number of predictions or observations + + Example: + >>> preds = B.tensor([0., 1, 2, 3]) + >>> target = B.tensor([0., 1, 2, 2]) + >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) + >>> _mean_absolute_error_compute(sum_abs_error, n_obs) + tensor(0.2500) + """ + + return sum_abs_error / n_obs + + +def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor: + """Computes mean absolute error. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with MAE + + Example: + >>> from paddlemetrics.functional import mean_absolute_error + >>> x = B.tensor([0., 1, 2, 3]) + >>> y = B.tensor([0., 1, 2, 2]) + >>> mean_absolute_error(x, y) + tensor(0.2500) + """ + sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) + return _mean_absolute_error_compute(sum_abs_error, n_obs) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py new file mode 100644 index 000000000..862617c01 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_absolute_percentage_error_update( + preds: Tensor, + target: Tensor, + epsilon: float = 1.17e-06, +) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Mean Percentage Error. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + epsilon: Specifies the lower bound for target values. Any target value below epsilon + is set to epsilon (avoids ZeroDivisionError). default: 1.17e-06 + """ + + _check_same_shape(preds, target) + + abs_diff = B.abs(preds - target) + abs_per_error = abs_diff / B.clamp(B.abs(target), min=epsilon) + + sum_abs_per_error = B.sum(abs_per_error) + + num_obs = target.numel() + + return sum_abs_per_error, num_obs + + +def _mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: int) -> Tensor: + """Computes Mean Absolute Percentage Error. + + Args: + sum_abs_per_error: Sum of absolute value of percentage errors over all observations + (percentage error = (target - prediction) / target) + num_obs: Number of predictions or observations + + Example: + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) + >>> _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) + tensor(0.2667) + """ + + return sum_abs_per_error / num_obs + + +def mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: + """Computes mean absolute percentage error. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with MAPE + + Note: + The epsilon value is taken from `scikit-learn's implementation of MAPE`_. + + Example: + >>> from paddlemetrics.functional import mean_absolute_percentage_error + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> mean_absolute_percentage_error(preds, target) + tensor(0.2667) + """ + sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) + mean_ape = _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) + + return mean_ape diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py new file mode 100644 index 000000000..58af5d21b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py @@ -0,0 +1,74 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_squared_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Mean Squared Error. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + _check_same_shape(preds, target) + diff = preds - target + sum_squared_error = B.sum(diff * diff) + n_obs = target.numel() + return sum_squared_error, n_obs + + +def _mean_squared_error_compute(sum_squared_error: Tensor, n_obs: int, squared: bool = True) -> Tensor: + """Computes Mean Squared Error. + + Args: + sum_squared_error: Sum of square of errors over all observations + n_obs: Number of predictions or observations + squared: Returns RMSE value if set to False. default: True + + Example: + >>> preds = B.tensor([0., 1, 2, 3]) + >>> target = B.tensor([0., 1, 2, 2]) + >>> sum_squared_error, n_obs = _mean_squared_error_update(preds, target) + >>> _mean_squared_error_compute(sum_squared_error, n_obs) + tensor(0.2500) + """ + return sum_squared_error / n_obs if squared else B.sqrt(sum_squared_error / n_obs) + + +def mean_squared_error(preds: Tensor, target: Tensor, squared: bool = True) -> Tensor: + """Computes mean squared error. + + Args: + preds: estimated labels + target: ground truth labels + squared: returns RMSE value if set to False + + Return: + Tensor with MSE + + Example: + >>> from paddlemetrics.functional import mean_squared_error + >>> x = B.tensor([0., 1, 2, 3]) + >>> y = B.tensor([0., 1, 2, 2]) + >>> mean_squared_error(x, y) + tensor(0.2500) + """ + sum_squared_error, n_obs = _mean_squared_error_update(preds, target) + return _mean_squared_error_compute(sum_squared_error, n_obs, squared=squared) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py new file mode 100644 index 000000000..7270ffc00 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py @@ -0,0 +1,76 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: + """Returns variables required to compute Mean Squared Log Error. Checks for same shape of tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + sum_squared_log_error = B.sum(B.pow(B.log1p(preds) - B.log1p(target), 2)) + n_obs = target.numel() + return sum_squared_log_error, n_obs + + +def _mean_squared_log_error_compute(sum_squared_log_error: Tensor, n_obs: int) -> Tensor: + """Computes Mean Squared Log Error. + + Args: + sum_squared_log_error: Sum of square of log errors over all observations + (log error = log(target) - log(prediction)) + n_obs: Number of predictions or observations + + Example: + >>> preds = B.tensor([0., 1, 2, 3]) + >>> target = B.tensor([0., 1, 2, 2]) + >>> sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + >>> _mean_squared_log_error_compute(sum_squared_log_error, n_obs) + tensor(0.0207) + """ + + return sum_squared_log_error / n_obs + + +def mean_squared_log_error(preds: Tensor, target: Tensor) -> Tensor: + """Computes mean squared log error. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with RMSLE + + Example: + >>> from paddlemetrics.functional import mean_squared_log_error + >>> x = B.tensor([0., 1, 2, 3]) + >>> y = B.tensor([0., 1, 2, 2]) + >>> mean_squared_log_error(x, y) + tensor(0.0207) + + .. note:: + Half precision is only support on GPU for this metric + """ + sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + return _mean_squared_log_error_compute(sum_squared_log_error, n_obs) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py new file mode 100644 index 000000000..e1f7dd82f --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py @@ -0,0 +1,102 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _pearson_corrcoef_update( + preds: Tensor, + target: Tensor, + mean_x: Tensor, + mean_y: Tensor, + var_x: Tensor, + var_y: Tensor, + corr_xy: Tensor, + n_prior: Tensor, +) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute Pearson Correlation Coefficient. Checks for same shape of + input tensors. + + Args: + mean_x: current mean estimate of x tensor + mean_y: current mean estimate of y tensor + var_x: current variance estimate of x tensor + var_y: current variance estimate of y tensor + corr_xy: current covariance estimate between x and y tensor + n_prior: current number of observed observations + """ + # Data checking + _check_same_shape(preds, target) + preds = preds.squeeze() + target = target.squeeze() + if preds.ndim > 1 or target.ndim > 1: + raise ValueError("Expected both predictions and target to be 1 dimensional tensors.") + + n_obs = preds.numel() + mx_new = (n_prior * mean_x + preds.mean() * n_obs) / (n_prior + n_obs) + my_new = (n_prior * mean_y + target.mean() * n_obs) / (n_prior + n_obs) + n_prior += n_obs + var_x += ((preds - mx_new) * (preds - mean_x)).sum() + var_y += ((target - my_new) * (target - mean_y)).sum() + corr_xy += ((preds - mx_new) * (target - mean_y)).sum() + mean_x = mx_new + mean_y = my_new + + return mean_x, mean_y, var_x, var_y, corr_xy, n_prior + + +def _pearson_corrcoef_compute( + var_x: Tensor, + var_y: Tensor, + corr_xy: Tensor, + nb: Tensor, +) -> Tensor: + """Computes the final pearson correlation based on accumulated statistics. + + Args: + var_x: variance estimate of x tensor + var_y: variance estimate of y tensor + corr_xy: covariance estimate between x and y tensor + nb: number of observations + """ + var_x /= nb - 1 + var_y /= nb - 1 + corr_xy /= nb - 1 + corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() + return B.clamp(corrcoef, -1.0, 1.0) + + +def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor: + """Computes pearson correlation coefficient. + + Args: + preds: estimated scores + target: ground truth scores + + Example: + >>> from paddlemetrics.functional import pearson_corrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> pearson_corrcoef(preds, target) + tensor(0.9849) + """ + _temp = B.zeros(1, dtype=preds.dtype, device=preds.device) + mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone() + var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone() + _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb) + return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py new file mode 100644 index 000000000..a83219122 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py @@ -0,0 +1,173 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.checks import _check_same_shape + + +def _r2_score_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute R2 score. Checks for same shape and 1D/2D input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + if preds.ndim > 2: + raise ValueError( + "Expected both prediction and target to be 1D or 2D tensors," + f" but received tensors with dimension {preds.shape}" + ) + + sum_obs = B.sum(target, dim=0) + sum_squared_obs = B.sum(target * target, dim=0) + residual = target - preds + rss = B.sum(residual * residual, dim=0) + n_obs = target.size(0) + + return sum_squared_obs, sum_obs, rss, n_obs + + +def _r2_score_compute( + sum_squared_obs: Tensor, + sum_obs: Tensor, + rss: Tensor, + n_obs: Tensor, + adjusted: int = 0, + multioutput: str = "uniform_average", +) -> Tensor: + """Computes R2 score. + + Args: + sum_squared_obs: Sum of square of all observations + sum_obs: Sum of all observations + rss: Residual sum of squares + n_obs: Number of predictions or observations + adjusted: number of independent regressors for calculating adjusted r2 score. + Default 0 (standard r2 score). + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + Example: + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) + >>> _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, multioutput="raw_values") + tensor([0.9654, 0.9082]) + """ + if n_obs < 2: + raise ValueError("Needs at least two samples to calculate r2 score.") + + mean_obs = sum_obs / n_obs + tss = sum_squared_obs - sum_obs * mean_obs + raw_scores = 1 - (rss / tss) + + if multioutput == "raw_values": + r2 = raw_scores + elif multioutput == "uniform_average": + r2 = B.mean(raw_scores) + elif multioutput == "variance_weighted": + tss_sum = B.sum(tss) + r2 = B.sum(tss / tss_sum * raw_scores) + else: + raise ValueError( + "Argument `multioutput` must be either `raw_values`," + f" `uniform_average` or `variance_weighted`. Received {multioutput}." + ) + + if adjusted < 0 or not isinstance(adjusted, int): + raise ValueError("`adjusted` parameter should be an integer larger or" " equal to 0.") + + if adjusted != 0: + if adjusted > n_obs - 1: + rank_zero_warn( + "More independent regressions than data points in" + " adjusted r2 score. Falls back to standard r2 score.", + UserWarning, + ) + elif adjusted == n_obs - 1: + rank_zero_warn("Division by zero in adjusted r2 score. Falls back to" " standard r2 score.", UserWarning) + else: + r2 = 1 - (1 - r2) * (n_obs - 1) / (n_obs - adjusted - 1) + return r2 + + +def r2_score( + preds: Tensor, + target: Tensor, + adjusted: int = 0, + multioutput: str = "uniform_average", +) -> Tensor: + r""" + Computes r2 score also known as `R2 Score_Coefficient Determination`_: + + .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} + + where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and + :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate + adjusted r2 score given by + + .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} + + where the parameter :math:`k` (the number of independent regressors) should + be provided as the ``adjusted`` argument. + + Args: + preds: estimated labels + target: ground truth labels + adjusted: number of independent regressors for calculating adjusted r2 score. + Default 0 (standard r2 score). + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is ``'uniform_average'``.): + + * ``'raw_values'`` returns full set of scores + * ``'uniform_average'`` scores are uniformly averaged + * ``'variance_weighted'`` scores are weighted by their individual variances + + Raises: + ValueError: + If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors. + ValueError: + If ``len(preds)`` is less than ``2`` + since at least ``2`` sampels are needed to calculate r2 score. + ValueError: + If ``multioutput`` is not one of ``raw_values``, + ``uniform_average`` or ``variance_weighted``. + ValueError: + If ``adjusted`` is not an ``integer`` greater than ``0``. + + Example: + >>> from paddlemetrics.functional import r2_score + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> r2_score(preds, target) + tensor(0.9486) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> r2_score(preds, target, multioutput='raw_values') + tensor([0.9654, 0.9082]) + + """ + sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) + return _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, adjusted, multioutput) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py new file mode 100644 index 000000000..62f7a9d4a --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py @@ -0,0 +1,129 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _find_repeats(data: Tensor) -> Tensor: + """find and return values which have repeats i.e. the same value are more than once in the tensor.""" + temp = data.detach().clone() + temp = temp.sort()[0] + + change = B.cat([B.tensor([True], device=temp.device), temp[1:] != temp[:-1]]) + unique = temp[change] + change_idx = B.cat([B.nonzero(change), B.tensor([[temp.numel()]], device=temp.device)]).flatten() + freq = change_idx[1:] - change_idx[:-1] + atleast2 = freq > 1 + return unique[atleast2] + + +def _rank_data(data: Tensor) -> Tensor: + """Calculate the rank for each element of a tensor. The rank refers to the indices of an element in the + corresponding sorted tensor (starting from 1). Duplicates of the same value will be assigned the mean of their + rank. + + Adopted from: `Rank of element tensor`_ + """ + n = data.numel() + rank = B.empty_like(data) + idx = data.argsort() + rank[idx[:n]] = B.arange(1, n + 1, dtype=data.dtype, device=data.device) + + repeats = _find_repeats(data) + for r in repeats: + condition = data == r + rank[condition] = rank[condition].mean() + return rank + + +def _spearman_corrcoef_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Spearman Correlation Coefficient. Checks for same shape + and type of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + if preds.dtype != target.dtype: + raise TypeError( + "Expected `preds` and `target` to have the same data type." + f" Got preds: {preds.dtype} and target: {target.dtype}." + ) + _check_same_shape(preds, target) + preds = preds.squeeze() + target = target.squeeze() + if preds.ndim > 1 or target.ndim > 1: + raise ValueError("Expected both predictions and target to be 1 dimensional tensors.") + return preds, target + + +def _spearman_corrcoef_compute(preds: Tensor, target: Tensor, eps: float = 1e-6) -> Tensor: + """Computes Spearman Correlation Coefficient. + + Args: + preds: Predicted tensor + target: Ground truth tensor + eps: Avoids ZeroDivisionError. default: 1e-6 + + Example: + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> preds, target = _spearman_corrcoef_update(preds, target) + >>> _spearman_corrcoef_compute(preds, target) + tensor(1.0000) + """ + + preds = _rank_data(preds) + target = _rank_data(target) + + preds_diff = preds - preds.mean() + target_diff = target - target.mean() + + cov = (preds_diff * target_diff).mean() + preds_std = B.sqrt((preds_diff * preds_diff).mean()) + target_std = B.sqrt((target_diff * target_diff).mean()) + + corrcoef = cov / (preds_std * target_std + eps) + return B.clamp(corrcoef, -1.0, 1.0) + + +def spearman_corrcoef(preds: Tensor, target: Tensor) -> Tensor: + r""" + Computes `spearmans rank correlation coefficient`_: + + .. math: + r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}} + + where :math:`rg_x` and :math:`rg_y` are the rank associated to the variables x and y. Spearmans correlations + coefficient corresponds to the standard pearsons correlation coefficient calculated on the rank variables. + + Args: + preds: estimated scores + target: ground truth scores + + Example: + >>> from paddlemetrics.functional import spearman_corrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> spearman_corrcoef(preds, target) + tensor(1.0000) + + """ + preds, target = _spearman_corrcoef_update(preds, target) + return _spearman_corrcoef_compute(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py new file mode 100644 index 000000000..89eadf9e6 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py @@ -0,0 +1,99 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _symmetric_mean_absolute_percentage_error_update( + preds: Tensor, + target: Tensor, + epsilon: float = 1.17e-06, +) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Symmetric Mean Absolute Percentage Error. Checks for same + shape of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + epsilon: Avoids ZeroDivisionError. default: 1.17e-06 + """ + + _check_same_shape(preds, target) + + abs_diff = B.abs(preds - target) + abs_per_error = abs_diff / B.clamp(B.abs(target) + B.abs(preds), min=epsilon) + + sum_abs_per_error = 2 * B.sum(abs_per_error) + + num_obs = target.numel() + + return sum_abs_per_error, num_obs + + +def _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: int) -> Tensor: + """Computes Symmetric Mean Absolute Percentage Error. + + Args: + sum_abs_per_error: Sum of values of symmetric absolute percentage errors over all observations + (symmetric absolute percentage error = 2 * |target - prediction| / (target + prediction)) + num_obs: Number of predictions or observations + + Example: + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) + >>> _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) + tensor(0.2290) + """ + + return sum_abs_per_error / num_obs + + +def symmetric_mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: + r""" + Computes symmetric mean absolute percentage error (SMAPE_): + + .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{max(| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with SMAPE. + + Example: + >>> from paddlemetrics.functional import symmetric_mean_absolute_percentage_error + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> symmetric_mean_absolute_percentage_error(preds, target) + tensor(0.2290) + + """ + sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update( + preds, + target, + ) + mean_ape = _symmetric_mean_absolute_percentage_error_compute( + sum_abs_per_error, + num_obs, + ) + + return mean_ape diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py new file mode 100644 index 000000000..7cb366a2c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py @@ -0,0 +1,139 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape + of input tensors. + + Args: + preds: Predicted tensor + targets: Ground truth tensor + power: see :func:`tweedie_deviance_score` + + Example: + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> _tweedie_deviance_score_update(preds, targets, power=2) + (tensor(4.8333), tensor(4)) + """ + _check_same_shape(preds, targets) + + zero_tensor = B.zeros(preds.shape, device=preds.device) + + if 0 < power < 1: + raise ValueError(f"Deviance Score is not defined for power={power}.") + + if power == 0: + deviance_score = B.pow(targets - preds, exponent=2) + elif power == 1: + # Poisson distribution + if B.any(preds <= 0) or B.any(targets < 0): + raise ValueError( + f"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative." + ) + + deviance_score = 2 * (targets * B.log(targets / preds) + preds - targets) + elif power == 2: + # Gamma distribution + if B.any(preds <= 0) or B.any(targets <= 0): + raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") + + deviance_score = 2 * (B.log(preds / targets) + (targets / preds) - 1) + else: + if power < 0: + if B.any(preds <= 0): + raise ValueError(f"For power={power}, 'preds' has to be strictly positive.") + elif 1 < power < 2: + if B.any(preds <= 0) or B.any(targets < 0): + raise ValueError( + f"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative." + ) + else: + if B.any(preds <= 0) or B.any(targets <= 0): + raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") + + term_1 = B.pow(B.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power)) + term_2 = targets * B.pow(preds, 1 - power) / (1 - power) + term_3 = B.pow(preds, 2 - power) / (2 - power) + deviance_score = 2 * (term_1 - term_2 + term_3) + + sum_deviance_score = B.sum(deviance_score) + num_observations = B.tensor(B.numel(deviance_score), device=preds.device) + + return sum_deviance_score, num_observations + + +def _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor: + """Computes Deviance Score. + + Args: + sum_deviance_score: Sum of deviance scores accumalated until now. + num_observations: Number of observations encountered until now. + + Example: + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2) + >>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations) + tensor(1.2083) + """ + + return sum_deviance_score / num_observations + + +def tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor: + r""" + Computes the `Tweedie Deviance Score`_ between targets and predictions: + + .. math:: + deviance\_score(\hat{y},y) = + \begin{cases} + (\hat{y} - y)^2, & \text{for }power=0\\ + 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }power=1\\ + 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }power=2\\ + 2 * (\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \frac{y(\hat{y})^{1 - power}}{1 - power} + \frac{(\hat{y}) + ^{2 - power}}{2 - power}), & \text{otherwise} + \end{cases} + + where :math:`y` is a tensor of targets values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + preds: Predicted tensor with shape ``(N,...)`` + targets: Ground truth tensor with shape ``(N,...)`` + power: + - power < 0 : Extreme stable distribution. (Requires: preds > 0.) + - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.) + - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) + - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) + - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.) + - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) + - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.) + + Example: + >>> from paddlemetrics.functional import tweedie_deviance_score + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> tweedie_deviance_score(preds, targets, power=2) + tensor(1.2083) + + """ + sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power) + return _tweedie_deviance_score_compute(sum_deviance_score, num_observations) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py new file mode 100644 index 000000000..d05abb6af --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py @@ -0,0 +1,22 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision # noqa: F401 +from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out # noqa: F401 +from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate # noqa: F401 +from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg # noqa: F401 +from paddlemetrics.functional.retrieval.precision import retrieval_precision # noqa: F401 +from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision # noqa: F401 +from paddlemetrics.functional.retrieval.recall import retrieval_recall # noqa: F401 +from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py new file mode 100644 index 000000000..0b067a892 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_average_precision(preds: Tensor, target: Tensor) -> Tensor: + """Computes average precision (for information retrieval), as explained in `IR Average precision`_. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + + Return: + a single-value tensor with the average precision (AP) of the predictions ``preds`` w.r.t. the labels ``target``. + + Example: + >>> from paddlemetrics.functional import retrieval_average_precision + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_average_precision(preds, target) + tensor(0.8333) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if not target.sum(): + return tensor(0.0, device=preds.device) + + target = target[B.argsort(preds, dim=-1, descending=True)] + positions = B.arange(1, len(target) + 1, device=target.device, dtype=B.float32)[target > 0] + res = B.div((B.arange(len(positions), device=positions.device, dtype=B.float32) + 1), positions).mean() + return res diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py new file mode 100644 index 000000000..10c5762b0 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py @@ -0,0 +1,62 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_fall_out(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction + of non-relevant documents retrieved among all the non-relevant documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics.functional import retrieval_fall_out + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_fall_out(preds, target, k=2) + tensor(1.) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + k = preds.shape[-1] if k is None else k + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + target = 1 - target + + if not target.sum(): + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() + return relevant / target.sum() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py new file mode 100644 index 000000000..83336a50b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py @@ -0,0 +1,57 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_hit_rate(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the hit rate (for information retrieval). The hit rate is 1.0 if there is at least one relevant + document among all the top `k` retrieved documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure HitRate@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the hit rate (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_hit_rate(preds, target, k=2) + tensor(1.) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if k is None: + k = preds.shape[-1] + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum() + return (relevant > 0).float() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py new file mode 100644 index 000000000..73fedad5e --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py @@ -0,0 +1,72 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def _dcg(target: Tensor) -> Tensor: + """Computes Discounted Cumulative Gain for input tensor.""" + denom = B.log2(B.arange(target.shape[-1], device=target.device) + 2.0) + return (target / denom).sum(dim=-1) + + +def retrieval_normalized_dcg(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes `Normalized Discounted Cumulative Gain`_ (for information retrieval). + + ``preds`` and ``target`` should be of the same shape and live on the same device. + ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document relevance. + k: consider only the top k elements (default: None, which considers them all) + + Return: + a single-value tensor with the nDCG of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics.functional import retrieval_normalized_dcg + >>> preds = B.tensor([.1, .2, .3, 4, 70]) + >>> target = B.tensor([10, 0, 0, 1, 5]) + >>> retrieval_normalized_dcg(preds, target) + tensor(0.6957) + """ + preds, target = _check_retrieval_functional_inputs(preds, target, allow_non_binary_target=True) + + k = preds.shape[-1] if k is None else k + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + sorted_target = target[B.argsort(preds, dim=-1, descending=True)][:k] + ideal_target = B.sort(target, descending=True)[0][:k] + + ideal_dcg = _dcg(ideal_target) + target_dcg = _dcg(sorted_target) + + # filter undefined scores + all_irrelevant = ideal_dcg == 0 + target_dcg[all_irrelevant] = 0 + target_dcg[~all_irrelevant] /= ideal_dcg[~all_irrelevant] + + return target_dcg.mean() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py new file mode 100644 index 000000000..83bd11727 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py @@ -0,0 +1,60 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_precision(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the precision metric (for information retrieval). Precision is the fraction of relevant documents + among all the retrieved documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the precision (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_precision(preds, target, k=2) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if k is None: + k = preds.shape[-1] + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + if not target.sum(): + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() + return relevant / k diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py new file mode 100644 index 000000000..d26e32f8b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_r_precision(preds: Tensor, target: Tensor) -> Tensor: + """Computes the r-precision metric (for information retrieval). R-Precision is the fraction of relevant + documents among all the top ``k`` retrieved documents where ``k`` is equal to the total number of relevant + documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + + Returns: + a single-value tensor with the r-precision of the predictions ``preds`` w.r.t. the labels ``target``. + + Example: + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_r_precision(preds, target) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + relevant_number = target.sum() + if not relevant_number: + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:relevant_number].sum().float() + return relevant / relevant_number diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py new file mode 100644 index 000000000..e00d450c3 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py @@ -0,0 +1,61 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_recall(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the recall metric (for information retrieval). Recall is the fraction of relevant documents + retrieved among all the relevant documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Recall@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the recall (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics.functional import retrieval_recall + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_recall(preds, target, k=2) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if k is None: + k = preds.shape[-1] + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + if not target.sum(): + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() + return relevant / target.sum() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py new file mode 100644 index 000000000..c92c223ec --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_reciprocal_rank(preds: Tensor, target: Tensor) -> Tensor: + """Computes reciprocal rank (for information retrieval). See `Mean Reciprocal Rank`_ + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + 0 is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + + Return: + a single-value tensor with the reciprocal rank (RR) of the predictions ``preds`` wrt the labels ``target``. + + Example: + >>> from paddlemetrics.functional import retrieval_reciprocal_rank + >>> preds = B.tensor([0.2, 0.3, 0.5]) + >>> target = B.tensor([False, True, False]) + >>> retrieval_reciprocal_rank(preds, target) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if not target.sum(): + return tensor(0.0, device=preds.device) + + target = target[B.argsort(preds, dim=-1, descending=True)] + position = B.nonzero(target).view(-1) + res = 1.0 / (position[0] + 1.0) + return res diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py new file mode 100644 index 000000000..9af407aae --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py @@ -0,0 +1,57 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from warnings import warn + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise import pairwise_cosine_similarity, pairwise_linear_similarity + + +def embedding_similarity( + batch: Tensor, similarity: str = "cosine", reduction: str = "none", zero_diagonal: bool = True +) -> Tensor: + """Computes representation similarity. + + Example: + >>> from paddlemetrics.functional import embedding_similarity + >>> embeddings = B.tensor([[1., 2., 3., 4.], [1., 2., 3., 4.], [4., 5., 6., 7.]]) + >>> embedding_similarity(embeddings) + tensor([[0.0000, 1.0000, 0.9759], + [1.0000, 0.0000, 0.9759], + [0.9759, 0.9759, 0.0000]]) + + Args: + batch: (batch, dim) + similarity: 'dot' or 'cosine' + reduction: 'none', 'sum', 'mean' (all along dim -1) + zero_diagonal: if True, the diagonals are set to zero + + Return: + A square matrix (batch, batch) with the similarity scores between all elements + If sum or mean are used, then returns (b, 1) with the reduced value for each row + + .. deprecated:: v0.6 + Use :func:`paddlemetrics.functional.pairwise_cosine_similarity` when `similarity='cosine'` + else use :func:`paddlemetrics.functional.pairwise_euclidean_distance`. Will be removed in v0.7. + """ + warn( + "Function `embedding_similarity` was deprecated v0.6 and will be removed in v0.7." + " Use `paddlemetrics.functional.pairwise_cosine_similarity` instead when argument" + " similarity='cosine' else use `paddlemetrics.functional.pairwise_linear_similarity", + DeprecationWarning, + ) + if similarity == "cosine": + return pairwise_cosine_similarity(batch, reduction=reduction, zero_diagonal=zero_diagonal) + return pairwise_linear_similarity(batch, reduction=reduction, zero_diagonal=zero_diagonal) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py new file mode 100644 index 000000000..971708401 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py @@ -0,0 +1,17 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddlemetrics.functional.text.bleu import bleu_score # noqa: F401 +from paddlemetrics.functional.text.sacre_bleu import sacre_bleu_score # noqa: F401 +from paddlemetrics.functional.text.wer import wer # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py new file mode 100644 index 000000000..168be6eee --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py @@ -0,0 +1,650 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import csv +import math +import urllib +import warnings +from collections import Counter, defaultdict +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, nn +from paddleext.torchapi.data import DataLoader, Dataset + +from paddlemetrics.utilities.imports import _TQDM_AVAILABLE, _TRANSFORMERS_AVAILABLE + +if _TRANSFORMERS_AVAILABLE: + from transformers import AutoModel, AutoTokenizer + +if _TQDM_AVAILABLE: + import tqdm + + +def _preprocess_text( + text: List[str], + tokenizer: Any, + max_length: int = 512, + truncation: bool = True, + sort_according_length: bool = True, + own_tokenizer: bool = False, +) -> Dict[str, Tensor]: + """Default text pre-processing function using `transformers` `AutoTokenizer` instance. + + Args: + text: + An iterable of sentences. + tokenizer: + Either `AutoTokenizer` instance from `transformers` package, or a user's own tokenizer. + max_length: + A maximum sequence length. + truncation: + An indication of whether tokenized sequences should be padded only to the length of the longest sequence. + sort_according_length: + An indication of whether tokenized sequences should be sorted from shortest to longest. This is appropriate + to do for leveraging dynamic padding during embedding calculation and thereby to hasten inference. + own_tokenizer: + An indication of whether a non-default user's own tokenizer is used. + + Return: + A dictionary of tokenized sentences including input_ids and attention_mask. + + Raises: + BaseException: + If a tokenization with a user's own tokenizer is not successful. + """ + if not own_tokenizer: + tokenized_data = tokenizer( + text, padding="max_length", max_length=max_length, truncation=truncation, return_tensors="pt" + ) + else: + try: + tokenized_data = tokenizer(text, max_length) + except BaseException as e: + raise BaseException(f"Tokenization was not successful: {e}") + + input_ids, attention_mask = ( + _sort_data_according_length(tokenized_data["input_ids"], tokenized_data["attention_mask"]) + if sort_according_length + else (tokenized_data["input_ids"], tokenized_data["attention_mask"]) + ) + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +def _process_attention_mask_for_special_tokens(attention_mask: Tensor) -> Tensor: + """Process attention mask to be zero for special [CLS] and [SEP] tokens as they're not included in a + calculation for BERT score. + + Args: + attention_mask: An attention mask to be returned, for example, by a `transformers` tokenizer. + + Return: + A processed attention mask. + """ + # Make attention_mask zero for [CLS] token + attention_mask[:, 0] = 0 + # Make attention_mask zero for [SEP] token + sep_token_position = (attention_mask - 0.1).cumsum(-1).argmax(-1) + attention_mask[B.arange(attention_mask.size(0)).long(), sep_token_position] = 0 + return attention_mask + + +def _sort_data_according_length(input_ids: Tensor, attention_mask: Tensor) -> Tuple[Tensor, Tensor]: + """Sort tokenized sentence from the shortest to the longest one.""" + sorted_indices = attention_mask.sum(1).argsort() + input_ids = input_ids[sorted_indices] + attention_mask = attention_mask[sorted_indices] + return input_ids, attention_mask + + +def _input_data_collator( + batch: Dict[str, Tensor], device: Optional[Union[str, B.device]] = None +) -> Dict[str, Tensor]: + """Helper function that trims model inputs to the longest sequence within the batch and put the input on the + proper device.""" + max_len = int(batch["attention_mask"].sum(1).max().item()) + input_ids = batch["input_ids"][:, :max_len].to(device) + attention_mask = batch["attention_mask"][:, :max_len].to(device) + batch.update({"input_ids": input_ids, "attention_mask": attention_mask}) + return batch + + +def _output_data_collator(model_output: Tensor, attention_mask: Tensor, target_len: int) -> Tuple[Tensor, Tensor]: + """Helper function that pads the model output and attention mask to the target length.""" + zeros_shape = list(model_output.shape) + zeros_shape[2] = target_len - zeros_shape[2] + model_output = B.cat( + [model_output, B.zeros(zeros_shape, dtype=model_output.dtype).to(model_output.device)], dim=2 + ) + zeros = B.zeros(zeros_shape[0], zeros_shape[2], dtype=attention_mask.dtype).to(attention_mask.device) + attention_mask = B.cat([attention_mask, zeros], dim=1) + return model_output, attention_mask + + +class TextDataset(Dataset): + """PyTorch dataset class for storing tokenized sentences and other properties used for BERT score + calculation.""" + + def __init__( + self, + text: List[str], + tokenizer: Any, + max_length: int = 512, + preprocess_text_fn: Callable[[List[str], Any, int], Dict[str, Tensor]] = _preprocess_text, + idf: bool = False, + tokens_idf: Optional[Dict[int, float]] = None, + ) -> None: + """ + Args: + text: + An iterable of sentences. + tokenizer: + `AutoTokenizer` instance from `transformers` package. + max_length: + A maximum sequence length. + preprocess_text_fn: + A function used for processing the input sentences. + idf: + An indication of whether calculate token inverse document frequencies to weight the model embeddings. + tokens_idf: + Inverse document frequencies (these should be calculated on reference sentences). + """ + self.text = preprocess_text_fn(text, tokenizer, max_length) + self.max_length = self.text["input_ids"].shape[1] + self.num_sentences = len(text) + self.idf = idf + self.tokens_idf = {} + if idf: + self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf() + + def __getitem__(self, idx: int) -> Dict[str, Tensor]: + input_ids = self.text["input_ids"][idx, :] + attention_mask = self.text["attention_mask"][idx, :] + inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} + if self.idf: + input_ids_idf = B.tensor([self.tokens_idf[input_idx] for input_idx in input_ids.tolist()]) + inputs_dict["input_ids_idf"] = input_ids_idf + return inputs_dict + + def __len__(self) -> int: + return self.num_sentences + + def _get_tokens_idf(self) -> Dict[int, float]: + """Calculate token inverse document frequences. + + Return: + A python dictionary containing inverse document frequences for token ids. + """ + token_counter: Counter = Counter() + for tokens in map(self._set_of_tokens, self.text["input_ids"]): + token_counter.update(tokens) + + tokens_idf: Dict[int, float] = defaultdict(self._get_tokens_idf_default_value) + tokens_idf.update( + {idx: math.log((self.num_sentences + 1) / (occurrence + 1)) for idx, occurrence in token_counter.items()} + ) + return tokens_idf + + def _get_tokens_idf_default_value(self) -> float: + """Helper function that ensures `defaultdict` to be pickled.""" + return math.log((self.num_sentences + 1) / 1) + + @staticmethod + def _set_of_tokens(input_ids: Tensor) -> Set: + """Return set of tokens from the `input_ids` `B.Tensor`.""" + return set(input_ids.tolist()) + + +class TokenizedDataset(TextDataset): + """The child class of `TextDataset` class used with already tokenized data.""" + + def __init__( + self, + input_ids: Tensor, + attention_mask: Tensor, + idf: bool = False, + tokens_idf: Optional[Dict[int, float]] = None, + ) -> None: + """ + Args: + input_ids: + Input ids (`B.Tensor`). + attention_mask: + Attention mask (`B.Tensor`). + idf: + An indication of whether calculate token inverse document frequencies to weight the model embeddings. + tokens_idf: + Inverse document frequencies (these should be calculated on reference sentences). + """ + self.text = dict(zip(["input_ids", "attention_mask"], _sort_data_according_length(input_ids, attention_mask))) + self.text = _input_data_collator(self.text) + self.num_sentences = len(self.text["input_ids"]) + self.max_length = self.text["input_ids"].shape[1] + self.idf = idf + self.tokens_idf = {} + if idf: + self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf() + + +def _get_progress_bar(dataloader: DataLoader, verbose: bool = False) -> Union[DataLoader, "tqdm.auto.tqdm"]: + """Helper function returning either the dataloader itself when `verbose = False`, or it wraps the dataloader with + `tqdm.auto.tqdm`, when `verbose = True` to display a progress bar during the embbeddings calculation.""" + return tqdm.auto.tqdm(dataloader) if verbose else dataloader + + +def _check_shape_of_model_output(output: Tensor, input_ids: Tensor) -> None: + """Check if the shape of the user's own model output.""" + bs, seq_len = input_ids.shape[:2] + invalid_out_shape = len(output.shape) != 3 or output.shape[0] != bs or output.shape[1] != seq_len + if invalid_out_shape: + raise ValueError( + "The model output must be `B.Tensor` of a shape `[batch_size, seq_len, model_dim]` " + f"i.e. [{bs}, {seq_len}. , `model_dim`], but got {output.shape}." + ) + + +def _get_embeddings_and_idf_scale( + dataloader: DataLoader, + target_len: int, + model: nn.Module, + device: Optional[Union[str, B.device]] = None, + num_layers: Optional[int] = None, + all_layers: bool = False, + idf: bool = False, + verbose: bool = False, + user_forward_fn: Callable[[nn.Module, Dict[str, Tensor]], Tensor] = None, +) -> Tuple[Tensor, Tensor]: + """Calculate sentence embeddings and the inverse-document-frequence scaling factor. + Args: + dataloader: + `B.utils.data.DataLoader` instance. + target_len: + A length of the longest sequence in the data. Used for padding the model output. + model: + BERT model. + device: + A device to be used for calculation. + num_layers: + The layer of representation to use. + all_layers: + An indication whether representation from all model layers should be used for BERTScore. + idf: + An Indication whether normalization using inverse document frequencies should be used. + verbose: + An indication of whether a progress bar to be displayed during the embeddings calculation. + user_forward_fn: + A user's own forward function used in a combination with `user_model`. This function must take `user_model` + and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` + as an input and return the model's output represented by the single `B.Tensor`. + + Return: + A tuple of B.Tensors containing the model's embeddings and the normalized tokens IDF. + When `idf = False`, tokens IDF is not calculated, and a matrix of mean weights is returned instead. + For a single sentence, `mean_weight = 1/seq_len`, where `seq_len` is a sum over the corresponding + `attention_mask`. + + Raises: + ValueError: + If `all_layers = True` and a model, which is not from the `transformers` package, is used. + """ + embeddings_list: List[Tensor] = [] + idf_scale_list: List[Tensor] = [] + for batch in _get_progress_bar(dataloader, verbose): + with B.no_grad(): + batch = _input_data_collator(batch, device) + # Output shape: batch_size x num_layers OR 1 x sequence_length x bert_dim + if not all_layers: + if not user_forward_fn: + out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True) + out = out.hidden_states[num_layers if num_layers is not None else -1] + else: + out = user_forward_fn(model, batch) + _check_shape_of_model_output(out, batch["input_ids"]) + out = out.unsqueeze(1) + else: + if user_forward_fn: + raise ValueError( + "The option `all_layers=True` can be used only with default `transformers` models." + ) + out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True) + out = B.cat([o.unsqueeze(1) for o in out.hidden_states], dim=1) + + out /= out.norm(dim=-1).unsqueeze(-1) # normalize embeddings + out, attention_mask = _output_data_collator(out, batch["attention_mask"], target_len) + processed_attention_mask = _process_attention_mask_for_special_tokens(attention_mask) + # Multiply embeddings with attention_mask (b=batch_size, l=num_layers, s=seq_len, d=emb_dim) + out = B.einsum("blsd, bs -> blsd", out, processed_attention_mask) + embeddings_list.append(out.cpu()) + + # Calculate weighted (w.r.t. sentence length) input_ids IDF matrix + input_ids_idf = ( + batch["input_ids_idf"] * processed_attention_mask if idf else processed_attention_mask.type(out.dtype) + ) + input_ids_idf /= input_ids_idf.sum(-1, keepdim=True) + idf_scale_list.append(input_ids_idf) + + embeddings = B.cat(embeddings_list) + idf_scale = B.cat(idf_scale_list) + + return embeddings, idf_scale + + +def _get_scaled_precision_or_recall(cos_sim: Tensor, metric: str, idf_scale: Tensor) -> Tensor: + """Helper function that calculates precision or recall, transpose it and scale it with idf_scale factor.""" + dim = 3 if metric == "precision" else 2 + res = cos_sim.max(dim=dim).values + res = B.einsum("bls, bs -> bls", res, idf_scale).sum(-1) + # We transpose the results and squeeze if possible to match the format of the original BERTScore implementation + res = res.transpose(0, 1).squeeze() + return res + + +def _get_precision_recall_f1( + pred_embeddings: Tensor, ref_embeddings: Tensor, pred_idf_scale: Tensor, ref_idf_scale: Tensor +) -> Tuple[Tensor, Tensor, Tensor]: + """Calculate precision, recall and F1 score over candidate and reference sentences. + + Args: + pred_embeddings: Embeddings of candidate sentenecs. + ref_embeddings: Embeddings of reference sentences. + pred_idf_scale: An IDF scale factor for candidate sentences. + ref_idf_scale: An IDF scale factor for reference sentences. + + Return: + Tensors containing precision, recall and F1 score, respectively. + """ + # Dimensions: b = batch_size, l = num_layers, p = predictions_seq_len, r = references_seq_len, d = bert_dim + cos_sim = B.einsum("blpd, blrd -> blpr", pred_embeddings, ref_embeddings) + # Final metrics shape = (batch_size * num_layers | batch_size) + precision = _get_scaled_precision_or_recall(cos_sim, "precision", pred_idf_scale) + recall = _get_scaled_precision_or_recall(cos_sim, "recall", ref_idf_scale) + + f1_score = 2 * precision * recall / (precision + recall) + f1_score = f1_score.masked_fill(B.isnan(f1_score), 0.0) + + return precision, recall, f1_score + + +def _get_hash(model_name_or_path: Optional[str] = None, num_layers: Optional[int] = None, idf: bool = False) -> str: + """Compute `BERT_score`_ (copied and adjusted)""" + msg = f"{model_name_or_path}_L{num_layers}{'_idf' if idf else '_no-idf'}" + return msg + + +def _read_csv_from_local_file(baseline_path: str) -> Tensor: + """Helper function which reads baseline the csv file from the local file. + + This method implemented to avoid `pandas` dependency. + """ + with open(baseline_path) as fname: + csv_file = csv.reader(fname) + baseline_list = [[float(item) for item in row] for idx, row in enumerate(csv_file) if idx > 0] + baseline = B.tensor(baseline_list)[:, 1:] + return baseline + + +def _read_csv_from_url(baseline_url: str) -> Tensor: + """Helper function which reads the baseline csv file from URL. + + This method is implemented to avoid `pandas` dependency. + """ + with urllib.request.urlopen(baseline_url) as http_request: # type: ignore + baseline_list = [ + [float(item) for item in row.strip().decode("utf-8").split(",")] + for idx, row in enumerate(http_request) + if idx > 0 + ] + baseline = B.tensor(baseline_list)[:, 1:] + return baseline + + +def _load_baseline( + lang: str = "en", + model_name_or_path: Optional[str] = None, + baseline_path: Optional[str] = None, + baseline_url: Optional[str] = None, +) -> Optional[Tensor]: + """Load a CSV file with the baseline values used for rescaling.""" + if baseline_path: + baseline: Optional[Tensor] = _read_csv_from_local_file(baseline_path) + elif baseline_url: + baseline = _read_csv_from_url(baseline_url) + # Read default baseline from the original `bert-score` package https://github.com/Tiiiger/bert_score + elif lang and model_name_or_path: + _URL_BASE = "https://raw.githubusercontent.com/Tiiiger/bert_score/master/bert_score/rescale_baseline" + baseline_url = f"{_URL_BASE}/{lang}/{model_name_or_path}.tsv" + baseline = _read_csv_from_url(baseline_url) + else: + baseline = None + warnings.warn("Baseline was not successfully loaded. No baseline is going to be used.") + + return baseline + + +def _rescale_metrics_with_baseline( + precision: Tensor, + recall: Tensor, + f1_score: Tensor, + baseline: Tensor, + num_layers: Optional[int] = None, + all_layers: bool = False, +) -> Tuple[Tensor, Tensor, Tensor]: + """Rescale the computed metrics with the pre-computed baseline.""" + if num_layers is None and all_layers is False: + num_layers = -1 + all_metrics = B.stack([precision, recall, f1_score], dim=-1) + baseline_scale = baseline.unsqueeze(1) if all_layers else baseline[num_layers] + all_metrics = (all_metrics - baseline_scale) / (1 - baseline_scale) + + return all_metrics[..., 0], all_metrics[..., 1], all_metrics[..., 2] + + +def bert_score( + predictions: Union[List[str], Dict[str, Tensor]], + references: Union[List[str], Dict[str, Tensor]], + model_name_or_path: Optional[str] = None, + num_layers: Optional[int] = None, + all_layers: bool = False, + model: Optional[nn.Module] = None, + user_tokenizer: Any = None, + user_forward_fn: Callable[[nn.Module, Dict[str, Tensor]], Tensor] = None, + verbose: bool = False, + idf: bool = False, + device: Optional[Union[str, B.device]] = None, + max_length: int = 512, + batch_size: int = 64, + num_threads: int = 4, + return_hash: bool = False, + lang: str = "en", + rescale_with_baseline: bool = False, + baseline_path: Optional[str] = None, + baseline_url: Optional[str] = None, +) -> Dict[str, Union[List[float], str]]: + """`Bert_score Evaluating Text Generation`_ leverages the pre-trained contextual embeddings from BERT and + matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with + human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, + and F1 measure, which can be useful for evaluating different language generation tasks. + + This implemenation follows the original implementation from `BERT_score`_ + + Args: + predictions: + Either an iterable of predicted sentences or a `Dict[str, B.Tensor]` containing `input_ids` and + `attention_mask` `B.Tensor`. + references: + Either an iterable of target sentences or a `Dict[str, B.Tensor]` containing `input_ids` and + `attention_mask` `B.Tensor`. + model_name_or_path: + A name or a model path used to load `transformers` pretrained model. + num_layers: + A layer of representation to use. + all_layers: + An indication of whether the representation from all model's layers should be used. + If `all_layers = True`, the argument `num_layers` is ignored. + model: + A user's own model. Must be of `nn.Module` instance. + user_tokenizer: + A user's own tokenizer used with the own model. This must be an instance with the `__call__` method. + This method must take an iterable of sentences (`List[str]`) and must return a python dictionary + containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor`. It is up to the user's model + of whether `"input_ids"` is a `B.Tensor` of input ids or embedding vectors. + This tokenizer must prepend an equivalent of `[CLS]` token and append an equivalent of `[SEP]` token + as `transformers` tokenizer does. + user_forward_fn: + A user's own forward function used in a combination with `user_model`. This function must take `user_model` + and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` + as an input and return the model's output represented by the single `B.Tensor`. + verbose: + An indication of whether a progress bar to be displayed during the embeddings calculation. + idf: + An indication of whether normalization using inverse document frequencies should be used. + device: + A device to be used for calculation. + max_length: + A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed. + batch_size: + A batch size used for model processing. + num_threads: + A number of threads to use for a dataloader. + return_hash: + An indication of whether the correspodning `hash_code` should be returned. + lang: + A language of input sentences. It is used when the scores are rescaled with a baseline. + rescale_with_baseline: + An indication of whether bertscore should be rescaled with a pre-computed baseline. + When a pretrained model from `transformers` model is used, the corresponding baseline is downloaded + from the original `bert-score` package from `BERT_score`_ if available. + In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting + of the files from `BERT_score`_ + baseline_path: + A path to the user's own local csv/tsv file with the baseline scale. + baseline_url: + A url path to the user's own csv/tsv file with the baseline scale. + + Returns: + Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. + + Raises: + ValueError: + If `len(predictions) != len(references)`. + ValueError: + If `tqdm` package is required and not installed. + ValueError: + If `transformers` package is required and not installed. + ValueError: + If `num_layer` is larger than the number of the model layers. + ValueError: + If invalid input is provided. + + Example: + >>> predictions = ["hello there", "general kenobi"] + >>> references = ["hello there", "master kenobi"] + >>> bert_score(predictions=predictions, references=references, lang="en") # doctest: +SKIP + {'precision': [0.99..., 0.99...], + 'recall': [0.99..., 0.99...], + 'f1': [0.99..., 0.99...]} + """ + if len(predictions) != len(references): + raise ValueError("Number of predicted and reference sententes must be the same!") + + if verbose and (not _TQDM_AVAILABLE): + raise ValueError( + "An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`." + ) + + if model is None: + if not _TRANSFORMERS_AVAILABLE: + raise ValueError( + "`bert_score` metric with default models requires `transformers` package be installed. " + "Either install with `pip install transformers>=4.0` or `pip install paddlemetrics[text]`" + ) + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + model = AutoModel.from_pretrained(model_name_or_path) + else: + tokenizer = user_tokenizer + model.eval() + model.to(device) + + try: + if num_layers and num_layers > model.config.num_hidden_layers: # type: ignore + raise ValueError( + f"num_layers={num_layers} is forbidden for {model_name_or_path}. " # type: ignore + f"Please use num_layers <= {model.config.num_hidden_layers}" # type: ignore + ) + except AttributeError: + warnings.warn("It was not possible to retrieve the parameter `num_layers` from the model specification.") + + _are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (predictions, references)) + _are_valid_lists = all( + isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (predictions, references) + ) + _are_valid_tensors = all( + isinstance(text, dict) and isinstance(text["input_ids"], Tensor) for text in (predictions, references) + ) + if _are_empty_lists: + warnings.warn("Predictions and references are empty.") + output_dict: Dict[str, Union[List[float], str]] = { + "precision": [0.0], + "recall": [0.0], + "f1": [0.0], + } + if return_hash: + output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)}) + return output_dict + + # Load baselines if needed + baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None + + # We ignore mypy typing below as the proper typing is ensured by conditions above, only mypy cannot infer that. + if _are_valid_lists: + ref_dataset = TextDataset(references, tokenizer, max_length, idf=idf) # type: ignore + pred_dataset = TextDataset( + predictions, # type: ignore + tokenizer, + max_length, + idf=idf, + tokens_idf=ref_dataset.tokens_idf, + ) + elif _are_valid_tensors: + ref_dataset = TokenizedDataset(**references, idf=idf) # type: ignore + pred_dataset = TokenizedDataset(**predictions, idf=idf, tokens_idf=ref_dataset.tokens_idf) # type: ignore + else: + raise ValueError("Invalid input provided.") + + ref_loader = DataLoader(ref_dataset, batch_size=batch_size, num_workers=num_threads) + pred_loader = DataLoader(pred_dataset, batch_size=batch_size, num_workers=num_threads) + + ref_embeddings, ref_idf_scale = _get_embeddings_and_idf_scale( + ref_loader, ref_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn + ) + pred_embeddings, pred_idf_scale = _get_embeddings_and_idf_scale( + pred_loader, pred_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn + ) + + precision, recall, f1_score = _get_precision_recall_f1( + pred_embeddings, ref_embeddings, pred_idf_scale, ref_idf_scale + ) + + if baseline is not None: + precision, recall, f1_score = _rescale_metrics_with_baseline( + precision, recall, f1_score, baseline, num_layers, all_layers + ) + + output_dict = { + "precision": precision.tolist(), + "recall": recall.tolist(), + "f1": f1_score.tolist(), + } + if return_hash: + output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)}) + return output_dict diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py new file mode 100644 index 000000000..4d00946b7 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py @@ -0,0 +1,171 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score +from collections import Counter +from typing import Sequence, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + + +def _count_ngram(ngram_input_list: Sequence[str], n_gram: int) -> Counter: + """Counting how many times each word appears in a given text with ngram. + + Args: + ngram_input_list: A list of translated text or reference texts + n_gram: gram value ranged 1 to 4 + + Return: + ngram_counter: a collections.Counter object of ngram + """ + + ngram_counter: Counter = Counter() + + for i in range(1, n_gram + 1): + for j in range(len(ngram_input_list) - i + 1): + ngram_key = tuple(ngram_input_list[j : (i + j)]) + ngram_counter[ngram_key] += 1 + + return ngram_counter + + +def _bleu_score_update( + reference_corpus: Sequence[Sequence[Sequence[str]]], + translate_corpus: Sequence[Sequence[str]], + numerator: Tensor, + denominator: Tensor, + trans_len: Tensor, + ref_len: Tensor, + n_gram: int = 4, +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute the BLEU score. + + Args: + reference_corpus: An iterable of iterables of reference corpus + translate_corpus: An iterable of machine translated corpus + numerator: Numerator of precision score (true positives) + denominator: Denominator of precision score (true positives + false positives) + trans_len: count of words in a candidate translation + ref_len: count of words in a reference translation + n_gram: gram value ranged 1 to 4 + """ + + for (translation, references) in zip(translate_corpus, reference_corpus): + trans_len += len(translation) + ref_len_list = [len(ref) for ref in references] + ref_len_diff = [abs(len(translation) - x) for x in ref_len_list] + ref_len += ref_len_list[ref_len_diff.index(min(ref_len_diff))] + translation_counter: Counter = _count_ngram(translation, n_gram) + reference_counter: Counter = Counter() + + for ref in references: + reference_counter |= _count_ngram(ref, n_gram) + + ngram_counter_clip = translation_counter & reference_counter + + for counter_clip in ngram_counter_clip: + numerator[len(counter_clip) - 1] += ngram_counter_clip[counter_clip] + + for counter in translation_counter: + denominator[len(counter) - 1] += translation_counter[counter] + + return trans_len, ref_len + + +def _bleu_score_compute( + trans_len: Tensor, ref_len: Tensor, numerator: Tensor, denominator: Tensor, n_gram: int = 4, smooth: bool = False +) -> Tensor: + """Computes the BLEU score. + + Args: + trans_len: count of words in a candidate translation + ref_len: count of words in a reference translation + numerator: Numerator of precision score (true positives) + denominator: Denominator of precision score (true positives + false positives) + n_gram: gram value ranged 1 to 4 + smooth: Whether or not to apply smoothing + """ + device = numerator.device + if min(numerator) == 0.0: + return tensor(0.0, device=device) + + if smooth: + precision_scores = B.div( + B.add(numerator, B.ones(n_gram, device=device)), + B.add(denominator, B.ones(n_gram, device=device)), + ) + precision_scores[0] = numerator[0] / denominator[0] + else: + precision_scores = numerator / denominator + + log_precision_scores = tensor([1.0 / n_gram] * n_gram, device=device) * B.log(precision_scores) + geometric_mean = B.exp(B.sum(log_precision_scores)) + brevity_penalty = tensor(1.0, device=device) if trans_len > ref_len else B.exp(1 - (ref_len / trans_len)) + bleu = brevity_penalty * geometric_mean + + return bleu + + +def bleu_score( + reference_corpus: Sequence[Sequence[Sequence[str]]], + translate_corpus: Sequence[Sequence[str]], + n_gram: int = 4, + smooth: bool = False, +) -> Tensor: + """Calculate `BLEU score`_ of machine translated text with one or more references. + + Args: + reference_corpus: + An iterable of iterables of reference corpus + translate_corpus: + An iterable of machine translated corpus + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + + Return: + Tensor with BLEU Score + + Example: + >>> from paddlemetrics.functional import bleu_score + >>> translate_corpus = ['the cat is on the mat'.split()] + >>> reference_corpus = [['there is a cat on the mat'.split(), 'a cat is on the mat'.split()]] + >>> bleu_score(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + + if len(translate_corpus) != len(reference_corpus): + raise ValueError(f"Corpus has different size {len(translate_corpus)} != {len(reference_corpus)}") + numerator = B.zeros(n_gram) + denominator = B.zeros(n_gram) + trans_len = tensor(0, dtype=B.float) + ref_len = tensor(0, dtype=B.float) + + trans_len, ref_len = _bleu_score_update( + reference_corpus, translate_corpus, numerator, denominator, trans_len, ref_len, n_gram + ) + + return _bleu_score_compute(trans_len, ref_len, numerator, denominator, n_gram, smooth) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py new file mode 100644 index 000000000..e83c00d0b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py @@ -0,0 +1,325 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re +from collections import Counter +from typing import Any, Dict, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.imports import _NLTK_AVAILABLE + +ALLOWED_ROUGE_KEYS: Dict[str, Union[int, str]] = { + "rouge1": 1, + "rouge2": 2, + "rouge3": 3, + "rouge4": 4, + "rouge5": 5, + "rouge6": 6, + "rouge7": 7, + "rouge8": 8, + "rouge9": 9, + "rougeL": "L", + "rougeLsum": "Lsum", +} + + +def _add_newline_to_end_of_each_sentence(x: str) -> str: + """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" + if not _NLTK_AVAILABLE: + raise ValueError("ROUGE-Lsum calculation requires that nltk is installed. Use `pip install nltk`.") + import nltk + + nltk.download("punkt", quiet=True, force=False) + + re.sub("", "", x) # remove pegasus newline char + return "\n".join(nltk.sent_tokenize(x)) + + +def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) -> Dict[str, Tensor]: + """This computes precision, recall and F1 score based on hits/lcs, and the length of lists of tokenizer + predicted and target sentences. + + Args: + hits_or_lcs: + A number of matches or a length of the longest common subsequence. + pred_len: + A length of a tokenized predicted sentence. + target_len: + A length of a tokenized target sentence. + """ + precision = hits_or_lcs / pred_len + recall = hits_or_lcs / target_len + if precision == recall == 0.0: + return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) + + fmeasure = 2 * precision * recall / (precision + recall) + return dict(precision=tensor(precision), recall=tensor(recall), fmeasure=tensor(fmeasure)) + + +def _lcs(pred_tokens: List[str], target_tokens: List[str]) -> int: + """Common DP algorithm to compute the length of the longest common subsequence. + + Args: + pred_tokens: + A tokenized predicted sentence. + target_toknes: + A tokenized target sentence. + """ + LCS = [[0] * (len(pred_tokens) + 1) for _ in range(len(target_tokens) + 1)] + for i in range(1, len(target_tokens) + 1): + for j in range(1, len(pred_tokens) + 1): + if target_tokens[i - 1] == pred_tokens[j - 1]: + LCS[i][j] = LCS[i - 1][j - 1] + 1 + else: + LCS[i][j] = max(LCS[i - 1][j], LCS[i][j - 1]) + return LCS[-1][-1] + + +def _normalize_and_tokenize_text(text: str, stemmer: Optional[Any] = None) -> List[str]: + """Rouge score should be calculated only over lowercased words and digits. Optionally, Porter stemmer can be + used to strip word suffixes to improve matching. The text normalization follows the implemantion from `Rouge + score_Text Normalizition`_ + + Args: + text: + An input sentence. + stemmer: + Porter stemmer instance to strip word suffixes to improve matching. + """ + # Replace any non-alpha-numeric characters with spaces. + text = re.sub(r"[^a-z0-9]+", " ", text.lower()) + + tokens = re.split(r"\s+", text) + if stemmer: + # Only stem words more than 3 characters long. + tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens] + + # One final check to drop any empty or invalid tokens. + tokens = [x for x in tokens if (isinstance(x, str) and re.match(r"^[a-z0-9]+$", x))] + + return tokens + + +def _rouge_n_score(pred: List[str], target: List[str], n_gram: int) -> Dict[str, Tensor]: + """This computes precision, recall and F1 score for the Rouge-N metric. + + Args: + pred: + A predicted sentence. + target: + A target sentence. + n_gram: + N-gram overlap. + """ + + def _create_ngrams(tokens: List[str], n: int) -> Counter: + ngrams: Counter = Counter() + for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)): + ngrams[ngram] += 1 + return ngrams + + pred_ngrams, target_ngrams = _create_ngrams(pred, n_gram), _create_ngrams(target, n_gram) + pred_len, target_len = sum(pred_ngrams.values()), sum(target_ngrams.values()) + if 0 in (pred_len, target_len): + return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) + + # It is sufficient to take a set(pred_tokenized) for hits count as we consider intersenction of pred & target + hits = sum(min(pred_ngrams[w], target_ngrams[w]) for w in set(pred_ngrams)) + return _compute_metrics(hits, max(pred_len, 1), max(target_len, 1)) + + +def _rouge_l_score(pred: List[str], target: List[str]) -> Dict[str, Tensor]: + """This computes precision, recall and F1 score for the Rouge-L or Rouge-LSum metric. + + Args: + pred: + A predicted sentence. + target: + A target sentence. + """ + pred_len, target_len = len(pred), len(target) + if 0 in (pred_len, target_len): + return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) + + lcs = _lcs(pred, target) + return _compute_metrics(lcs, pred_len, target_len) + + +def _rouge_score_update( + preds: List[str], + targets: List[str], + rouge_keys_values: List[Union[int, str]], + stemmer: Optional[Any] = None, +) -> Dict[Union[int, str], List[Dict[str, Tensor]]]: + """Update the rouge score with the current set of predicted and target sentences. + + Args: + preds: + An iterable of predicted sentences. + targets: + An iterable of target sentences. + rouge_keys_values: + List of N-grams/'L'/'Lsum' arguments. + stemmer: + Porter stemmer instance to strip word suffixes to improve matching. + + Example: + >>> targets = "Is your name John".split() + >>> preds = "My name is John".split() + >>> from pprint import pprint + >>> score = _rouge_score_update(preds, targets, rouge_keys_values=[1, 2, 3, 'L']) + >>> pprint(score) # doctest: +NORMALIZE_WHITESPACE +SKIP + {1: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(1.), 'precision': tensor(1.), 'recall': tensor(1.)}], + 2: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}], + 3: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}], + 'L': [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(1.), 'precision': tensor(1.), 'recall': tensor(1.)}]} + """ + results: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values} + for pred_raw, target_raw in zip(preds, targets): + pred = _normalize_and_tokenize_text(pred_raw, stemmer) + target = _normalize_and_tokenize_text(target_raw, stemmer) + + if "Lsum" in rouge_keys_values: + # rougeLsum expects "\n" separated sentences within a summary + pred_Lsum = _normalize_and_tokenize_text(_add_newline_to_end_of_each_sentence(pred_raw), stemmer) + target_Lsum = _normalize_and_tokenize_text(_add_newline_to_end_of_each_sentence(target_raw), stemmer) + + for rouge_key in rouge_keys_values: + if isinstance(rouge_key, int): + score = _rouge_n_score(pred, target, rouge_key) + else: + score = _rouge_l_score( + pred if rouge_key != "Lsum" else pred_Lsum, + target if rouge_key != "Lsum" else target_Lsum, + ) + results[rouge_key].append(score) + return results + + +def _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) -> Dict[str, Tensor]: + """Compute the combined ROUGE metric for all the input set of predicted and target sentences. + + Args: + sentence_results: + Rouge-N/Rouge-L/Rouge-LSum metrics calculated for single sentence. + """ + results: Dict[str, Tensor] = {} + # Obtain mean scores for individual rouge metrics + if sentence_results == {}: + return results + + for rouge_key, scores in sentence_results.items(): + results[rouge_key] = B.tensor(scores).mean() + + return results + + +def rouge_score( + preds: Union[str, List[str]], + targets: Union[str, List[str]], + use_stemmer: bool = False, + rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore +) -> Dict[str, Tensor]: + """Calculate `Calculate Rouge Score`_ , used for automatic summarization. + + Args: + preds: + An iterable of predicted sentences. + targets: + An iterable of target sentences. + use_stemmer: + Use Porter stemmer to strip word suffixes to improve matching. + rouge_keys: + A list of rouge types to calculate. + Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``. + + Return: + Python dictionary of rouge scores for each input rouge key. + + Example: + >>> targets = "Is your name John".split() + >>> preds = "My name is John".split() + >>> from pprint import pprint + >>> pprint(rouge_score(preds, targets)) # doctest: +NORMALIZE_WHITESPACE +SKIP + {'rouge1_fmeasure': 0.25, + 'rouge1_precision': 0.25, + 'rouge1_recall': 0.25, + 'rouge2_fmeasure': 0.0, + 'rouge2_precision': 0.0, + 'rouge2_recall': 0.0, + 'rougeL_fmeasure': 0.25, + 'rougeL_precision': 0.25, + 'rougeL_recall': 0.25, + 'rougeLsum_fmeasure': 0.25, + 'rougeLsum_precision': 0.25, + 'rougeLsum_recall': 0.25} + + Raises: + ValueError: + If the python package ``nltk`` is not installed. + ValueError: + If any of the ``rouge_keys`` does not belong to the allowed set of keys. + + References: + [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/ + """ + + if use_stemmer: + if not _NLTK_AVAILABLE: + raise ValueError("Stemmer requires that nltk is installed. Use `pip install nltk`.") + import nltk + + stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None + + if not isinstance(rouge_keys, tuple): + rouge_keys = tuple([rouge_keys]) + for key in rouge_keys: + if key not in ALLOWED_ROUGE_KEYS.keys(): + raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {list(ALLOWED_ROUGE_KEYS.keys())}") + rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys] + + if isinstance(preds, str): + preds = [preds] + + if isinstance(targets, str): + targets = [targets] + + sentence_results: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update( + preds, targets, rouge_keys_values, stemmer=stemmer + ) + + output: Dict[str, List[Tensor]] = {} + for rouge_key in rouge_keys_values: + for type in ["fmeasure", "precision", "recall"]: + output[f"rouge{rouge_key}_{type}"] = [] + + for rouge_key, metrics in sentence_results.items(): + for metric in metrics: + for type, value in metric.items(): + output[f"rouge{rouge_key}_{type}"].append(value) + + return _rouge_score_compute(output) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py new file mode 100644 index 000000000..1a59377f6 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py @@ -0,0 +1,355 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score + +############## + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +############## + +# MIT License +# Copyright (c) 2017 - Shujian Huang + + +import re +from typing import Sequence + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor +from typing_extensions import Literal + +from paddlemetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update +from paddlemetrics.utilities.imports import _REGEX_AVAILABLE + +AVAILABLE_TOKENIZERS = ("none", "13a", "zh", "intl", "char") + +_UCODE_RANGES = ( + ("\u3400", "\u4db5"), # CJK Unified Ideographs Extension A, release 3.0 + ("\u4e00", "\u9fa5"), # CJK Unified Ideographs, release 1.1 + ("\u9fa6", "\u9fbb"), # CJK Unified Ideographs, release 4.1 + ("\uf900", "\ufa2d"), # CJK Compatibility Ideographs, release 1.1 + ("\ufa30", "\ufa6a"), # CJK Compatibility Ideographs, release 3.2 + ("\ufa70", "\ufad9"), # CJK Compatibility Ideographs, release 4.1 + ("\u20000", "\u2a6d6"), # (UTF16) CJK Unified Ideographs Extension B, release 3.1 + ("\u2f800", "\u2fa1d"), # (UTF16) CJK Compatibility Supplement, release 3.1 + ("\uff00", "\uffef"), # Full width ASCII, full width of English punctuation, + # half width Katakana, half wide half width kana, Korean alphabet + ("\u2e80", "\u2eff"), # CJK Radicals Supplement + ("\u3000", "\u303f"), # CJK punctuation mark + ("\u31c0", "\u31ef"), # CJK stroke + ("\u2f00", "\u2fdf"), # Kangxi Radicals + ("\u2ff0", "\u2fff"), # Chinese character structure + ("\u3100", "\u312f"), # Phonetic symbols + ("\u31a0", "\u31bf"), # Phonetic symbols (Taiwanese and Hakka expansion) + ("\ufe10", "\ufe1f"), + ("\ufe30", "\ufe4f"), + ("\u2600", "\u26ff"), + ("\u2700", "\u27bf"), + ("\u3200", "\u32ff"), + ("\u3300", "\u33ff"), +) + + +class _SacreBLEUTokenizer: + """Tokenizer used for SacreBLEU calculation. + + Source: https://github.com/mjpost/sacrebleu/tree/master/sacrebleu/tokenizers + """ + + _REGEX = ( + # language-dependent part (assuming Western languages) + (re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), r" \1 "), + # tokenize period and comma unless preceded by a digit + (re.compile(r"([^0-9])([\.,])"), r"\1 \2 "), + # tokenize period and comma unless followed by a digit + (re.compile(r"([\.,])([^0-9])"), r" \1 \2"), + # tokenize dash when preceded by a digit + (re.compile(r"([0-9])(-)"), r"\1 \2 "), + # one space only between words + # NOTE: Doing this in Python (below) is faster + # (re.compile(r'\s+'), r' '), + ) + + if _REGEX_AVAILABLE: + import regex + + _INT_REGEX = ( + # Separate out punctuations preceeded by a non-digit + (regex.compile(r"(\P{N})(\p{P})"), r"\1 \2 "), + # Separate out punctuations followed by a non-digit + (regex.compile(r"(\p{P})(\P{N})"), r" \1 \2"), + # Separate out symbols + (regex.compile(r"(\p{S})"), r" \1 "), + ) + + _TOKENIZE_FN = { + "none": "_tokenize_base", + "13a": "_tokenize_13a", + "zh": "_tokenize_zh", + "intl": "_tokenize_international", + "char": "_tokenize_char", + } + + def __init__(self, tokenize: Literal["none", "13a", "zh", "intl", "char"], lowercase: bool = False) -> None: + self.tokenize_fn = getattr(self, self._TOKENIZE_FN[tokenize]) + self.lowercase = lowercase + + def __call__(self, line: str) -> Sequence[str]: + tokenized_line = self.tokenize_fn(line) + return self._lower(tokenized_line, self.lowercase).split() + + @classmethod + def tokenize( + cls, line: str, tokenize: Literal["none", "13a", "zh", "intl", "char"], lowercase: bool = False + ) -> Sequence[str]: + tokenize_fn = getattr(cls, cls._TOKENIZE_FN[tokenize]) + tokenized_line = tokenize_fn(line) + return cls._lower(tokenized_line, lowercase).split() + + @classmethod + def _tokenize_regex(cls, line: str) -> str: + """Common post-processing tokenizer for `13a` and `zh` tokenizers. + Args: + line: a segment to tokenize + + Return: + the tokenized line + """ + for (_re, repl) in cls._REGEX: + line = _re.sub(repl, line) + # no leading or trailing spaces, single space within words + return " ".join(line.split()) + + @staticmethod + def _is_chinese_char(uchar: str) -> bool: + """ + Args: + uchar: input char in unicode + + Return: + whether the input char is a Chinese character. + """ + for start, end in _UCODE_RANGES: + if start <= uchar <= end: + return True + return False + + @classmethod + def _tokenize_base(cls, line: str) -> str: + """Tokenizes an input line with the tokenizer. + + Args: + line: a segment to tokenize + + Return: + the tokenized line + """ + return line + + @classmethod + def _tokenize_13a(cls, line: str) -> str: + """Tokenizes an input line using a relatively minimal tokenization that is however equivalent to + mteval-v13a, used by WMT. + + Args: + line: input sentence + + Return: + tokenized sentence + """ + # language-independent part: + line = line.replace("", "") + line = line.replace("-\n", "") + line = line.replace("\n", " ") + + if "&" in line: + line = line.replace(""", '"') + line = line.replace("&", "&") + line = line.replace("<", "<") + line = line.replace(">", ">") + + return cls._tokenize_regex(line) + + @classmethod + def _tokenize_zh(cls, line: str) -> str: + """The tokenization of Chinese text in this script contains two + steps: separate each Chinese characters (by utf-8 encoding); tokenize + the non Chinese part (following the `13a` i.e. mteval tokenizer). + Author: Shujian Huang huangsj@nju.edu.cn + + Args: + line: input sentence + + Return: + tokenized sentence + """ + + line = line.strip() + line_in_chars = "" + + for char in line: + if cls._is_chinese_char(char): + line_in_chars += " " + line_in_chars += char + line_in_chars += " " + else: + line_in_chars += char + + return cls._tokenize_regex(line_in_chars) + + @classmethod + def _tokenize_international(cls, line: str) -> str: + """Tokenizes a string following the official BLEU implementation. + + See github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983 + + In our case, the input string is expected to be just one line. + We just tokenize on punctuation and symbols, + except when a punctuation is preceded and followed by a digit + (e.g. a comma/dot as a thousand/decimal separator). + We do not recover escaped forms of punctuations such as ' or > + as these should never appear in MT system outputs (see issue #138) + + Note that a number (e.g., a year) followed by a dot at the end of + sentence is NOT tokenized, i.e. the dot stays with the number because + `s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a + space after each sentence). However, this error is already in the + original mteval-v14.pl and we want to be consistent with it. + The error is not present in the non-international version, + which uses `$norm_text = " $norm_text "`. + + Args: + line: the input string to tokenize. + + Return: + The tokenized string. + """ + for (_re, repl) in cls._INT_REGEX: + line = _re.sub(repl, line) + + return " ".join(line.split()) + + @classmethod + def _tokenize_char(cls, line: str) -> str: + """Tokenizes all the characters in the input line. + + Args: + line: a segment to tokenize + + Return: + the tokenized line + """ + return " ".join(char for char in line) + + @staticmethod + def _lower(line: str, lowercase: bool) -> str: + if lowercase: + return line.lower() + return line + + +def sacre_bleu_score( + reference_corpus: Sequence[Sequence[str]], + translate_corpus: Sequence[str], + n_gram: int = 4, + smooth: bool = False, + tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a", + lowercase: bool = False, +) -> Tensor: + """Calculate `BLEU score`_ [1] of machine translated text with one or more references. This implementation + follows the behaviour of SacreBLEU [2] implementation from https://github.com/mjpost/sacrebleu. + + Args: + reference_corpus: + An iterable of iterables of reference corpus + translate_corpus: + An iterable of machine translated corpus + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + tokenize: + Tokenization technique to be used. (Default '13a') + Supported tokenization: ['none', '13a', 'zh', 'intl', 'char'] + lowercase: + If ``True``, BLEU score over lowercased text is calculated. + + Return: + Tensor with BLEU Score + + Example: + >>> from paddlemetrics.functional import sacre_bleu_score + >>> translate_corpus = ['the cat is on the mat'] + >>> reference_corpus = [['there is a cat on the mat', 'a cat is on the mat']] + >>> sacre_bleu_score(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] A Call for Clarity in Reporting BLEU Scores by Matt Post. + + [3] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + if tokenize not in AVAILABLE_TOKENIZERS: + raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.") + + if tokenize not in _SacreBLEUTokenizer._TOKENIZE_FN.keys(): + raise ValueError( + f"Unsupported tokenizer selected. Please, choose one of {list(_SacreBLEUTokenizer._TOKENIZE_FN.keys())}" + ) + if len(translate_corpus) != len(reference_corpus): + raise ValueError(f"Corpus has different size {len(translate_corpus)} != {len(reference_corpus)}") + if tokenize == "intl" and not _REGEX_AVAILABLE: + raise ValueError( + "`'intl'` tokenization requires `regex` installed. Use `pip install regex` or `pip install " + "paddlemetrics[text]`." + ) + + reference_corpus_: Sequence[Sequence[Sequence[str]]] = [ + [_SacreBLEUTokenizer.tokenize(line, tokenize, lowercase) for line in reference] + for reference in reference_corpus + ] + translate_corpus_: Sequence[Sequence[str]] = [ + _SacreBLEUTokenizer.tokenize(line, tokenize, lowercase) for line in translate_corpus + ] + + numerator = B.zeros(n_gram) + denominator = B.zeros(n_gram) + trans_len = tensor(0, dtype=B.float) + ref_len = tensor(0, dtype=B.float) + + trans_len, ref_len = _bleu_score_update( + reference_corpus_, translate_corpus_, numerator, denominator, trans_len, ref_len, n_gram + ) + + return _bleu_score_compute(trans_len, ref_len, numerator, denominator, n_gram, smooth) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py new file mode 100644 index 000000000..4cd19b059 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py @@ -0,0 +1,114 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union +from warnings import warn + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + + +def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) -> int: + """Standard dynamic programming algorithm to compute the edit distance. + + Args: + prediction_tokens: A tokenized predicted sentence + reference_tokens: A tokenized reference sentence + + Returns: + (int) Edit distance between the predicted sentence and the reference sentence + """ + dp = [[0] * (len(reference_tokens) + 1) for _ in range(len(prediction_tokens) + 1)] + for i in range(len(prediction_tokens) + 1): + dp[i][0] = i + for j in range(len(reference_tokens) + 1): + dp[0][j] = j + for i in range(1, len(prediction_tokens) + 1): + for j in range(1, len(reference_tokens) + 1): + if prediction_tokens[i - 1] == reference_tokens[j - 1]: + dp[i][j] = dp[i - 1][j - 1] + else: + dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1 + return dp[-1][-1] + + +def _wer_update( + predictions: Union[str, List[str]], + references: Union[str, List[str]], +) -> Tuple[Tensor, Tensor]: + """Update the wer score with the current set of references and predictions. + + Args: + predictions: Transcription(s) to score as a string or list of strings + references: Reference(s) for each speech input as a string or list of strings + + Returns: + (Tensor) Number of edit operations to get from the reference to the prediction, summed over all samples + (Tensor) Number of words over all references + """ + if isinstance(predictions, str): + predictions = [predictions] + if isinstance(references, str): + references = [references] + errors = tensor(0, dtype=B.float) + total = tensor(0, dtype=B.float) + for prediction, reference in zip(predictions, references): + prediction_tokens = prediction.split() + reference_tokens = reference.split() + errors += _edit_distance(prediction_tokens, reference_tokens) + total += len(reference_tokens) + return errors, total + + +def _wer_compute(errors: Tensor, total: Tensor) -> Tensor: + """Compute the word error rate. + + Args: + errors: Number of edit operations to get from the reference to the prediction, summed over all samples + total: Number of words over all references + + Returns: + (Tensor) Word error rate + """ + return errors / total + + +def wer( + predictions: Union[str, List[str]], + references: Union[str, List[str]], + concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7 +) -> Tensor: + """Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. This + value indicates the percentage of words that were incorrectly predicted. The lower the value, the better the + performance of the ASR system with a WER of 0 being a perfect score. + + Args: + predictions: Transcription(s) to score as a string or list of strings + references: Reference(s) for each speech input as a string or list of strings + concatenate_texts: Whether to concatenate all input texts or compute WER iteratively + This argument is deprecated in v0.6 and it will be removed in v0.7. + + Returns: + (Tensor) Word error rate + + Examples: + >>> predictions = ["this is the prediction", "there is an other sample"] + >>> references = ["this is the reference", "there is another one"] + >>> wer(predictions=predictions, references=references) + tensor(0.5000) + """ + if concatenate_texts is not None: + warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning) + errors, total = _wer_update(predictions, references) + return _wer_compute(errors, total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py new file mode 100644 index 000000000..c3fb3568f --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py @@ -0,0 +1,19 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#from paddlemetrics.image.fid import FID # noqa: F401 +from paddlemetrics.image.inception import IS # noqa: F401 +from paddlemetrics.image.kid import KID # noqa: F401 +from paddlemetrics.image.lpip_similarity import LPIPS # noqa: F401 +from paddlemetrics.image.psnr import PSNR # noqa: F401 +from paddlemetrics.image.ssim import SSIM # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py new file mode 100644 index 000000000..6f2965db6 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py @@ -0,0 +1,283 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Union + +import numpy as np +import paddleext.torchapi as B +from paddleext.torchapi import Tensor +from paddleext.torchapi.autograd import Function + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_info, rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.imports import _SCIPY_AVAILABLE, _TORCH_FIDELITY_AVAILABLE + +if _TORCH_FIDELITY_AVAILABLE: + from torch_fidelity.feature_extractor_inceptionv3 import FeatureExtractorInceptionV3 +else: + + class FeatureExtractorInceptionV3(B.nn.Module): # type: ignore + pass + + +if _SCIPY_AVAILABLE: + import scipy + + +class NoTrainInceptionV3(FeatureExtractorInceptionV3): + def __init__( + self, + name: str, + features_list: List[str], + feature_extractor_weights_path: Optional[str] = None, + ) -> None: + super().__init__(name, features_list, feature_extractor_weights_path) + # put into evaluation mode + self.eval() + + def train(self, mode: bool) -> "NoTrainInceptionV3": + """the inception network should not be able to be switched away from evaluation mode.""" + return super().train(False) + + def forward(self, x: Tensor) -> Tensor: + out = super().forward(x) + return out[0].reshape(x.shape[0], -1) + + +class MatrixSquareRoot(Function): + """Square root of a positive definite matrix. + + All credit to: `Square Root of a Positive Definite Matrix`_ + """ + + @staticmethod + def forward(ctx: Any, input_data: Tensor) -> Tensor: + # TODO: update whenever pytorch gets an matrix square root function + # Issue: https://github.com/pytorch/pytorch/issues/9983 + m = input_data.detach().cpu().numpy().astype(np.float_) + scipy_res, _ = scipy.linalg.sqrtm(m, disp=False) + sqrtm = B.from_numpy(scipy_res.real).to(input_data) + ctx.save_for_backward(sqrtm) + return sqrtm + + @staticmethod + def backward(ctx: Any, grad_output: Tensor) -> Tensor: + grad_input = None + if ctx.needs_input_grad[0]: + (sqrtm,) = ctx.saved_tensors + sqrtm = sqrtm.data.cpu().numpy().astype(np.float_) + gm = grad_output.data.cpu().numpy().astype(np.float_) + + # Given a positive semi-definite matrix X, + # since X = X^{1/2}X^{1/2}, we can compute the gradient of the + # matrix square root dX^{1/2} by solving the Sylvester equation: + # dX = (d(X^{1/2})X^{1/2} + X^{1/2}(dX^{1/2}). + grad_sqrtm = scipy.linalg.solve_sylvester(sqrtm, sqrtm, gm) + + grad_input = B.from_numpy(grad_sqrtm).to(grad_output) + return grad_input + + +sqrtm = MatrixSquareRoot.apply + + +def _compute_fid(mu1: Tensor, sigma1: Tensor, mu2: Tensor, sigma2: Tensor, eps: float = 1e-6) -> Tensor: + r""" + Adjusted version of `Fid Score`_ + + The Frechet Inception Distance between two multivariate Gaussians X_x ~ N(mu_1, sigm_1) + and X_y ~ N(mu_2, sigm_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(sigm_1 + sigm_2 - 2*sqrt(sigm_1*sigm_2)). + + Args: + mu1: mean of activations calculated on predicted (x) samples + sigma1: covariance matrix over activations calculated on predicted (x) samples + mu2: mean of activations calculated on target (y) samples + sigma2: covariance matrix over activations calculated on target (y) samples + eps: offset constant. used if sigma_1 @ sigma_2 matrix is singular + + Returns: + Scalar value of the distance between sets. + """ + diff = mu1 - mu2 + + covmean = sqrtm(sigma1.mm(sigma2)) + # Product might be almost singular + if not B.isfinite(covmean).all(): + rank_zero_info(f"FID calculation produces singular product; adding {eps} to diagonal of covariance estimates") + offset = B.eye(sigma1.size(0), device=mu1.device, dtype=mu1.dtype) * eps + covmean = sqrtm((sigma1 + offset).mm(sigma2 + offset)) + + tr_covmean = B.trace(covmean) + return diff.dot(diff) + B.trace(sigma1) + B.trace(sigma2) - 2 * tr_covmean + + +class FID(Metric): + r""" + Calculates Fréchet inception distance (FID_) which is used to access the quality of generated images. Given by + + .. math:: + FID = |\mu - \mu_w| + tr(\Sigma + \Sigma_w - 2(\Sigma \Sigma_w)^{\frac{1}{2}}) + + where :math:`\mathcal{N}(\mu, \Sigma)` is the multivariate normal distribution estimated from Inception v3 [1] + features calculated on real life images and :math:`\mathcal{N}(\mu_w, \Sigma_w)` is the multivariate normal + distribution estimated from Inception v3 features calculated on generated (fake) images. The metric was + originally proposed in [1]. + + Using the default feature extraction (Inception v3 using the original weights from [2]), the input is + expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images + will be resized to 299 x 299 which is the size of the original training data. The boolian flag ``real`` + determines if the images should update the statistics of the real distribution or the fake distribution. + + .. note:: using this metrics requires you to have ``scipy`` install. Either install as ``pip install + paddlemetrics[image]`` or ``pip install scipy`` + + .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` + is installed. Either install as ``pip install paddlemetrics[image]`` or + ``pip install torch-fidelity`` + + .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of + all other metrics) as this metric does not really make sense to calculate on a single batch. This + means that by default ``forward`` will just call ``update`` underneat. + + Args: + feature: + Either an integer or ``nn.Module``: + + - an integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: + 64, 192, 768, 2048 + - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns + an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + References: + [1] Rethinking the Inception Architecture for Computer Vision + Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna + https://arxiv.org/abs/1512.00567 + + [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, + Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter + https://arxiv.org/abs/1706.08500 + + Raises: + ValueError: + If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed + ValueError: + If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048] + TypeError: + If ``feature`` is not an ``str``, ``int`` or ``B.nn.Module`` + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import FID + >>> fid = FID(feature=64) # doctest: +SKIP + >>> # generate two slightly overlapping image intensity distributions + >>> imgs_dist1 = B.randint(0, 200, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> imgs_dist2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> fid.update(imgs_dist1, real=True) # doctest: +SKIP + >>> fid.update(imgs_dist2, real=False) # doctest: +SKIP + >>> fid.compute() # doctest: +SKIP + tensor(12.7202) + + """ + real_features: List[Tensor] + fake_features: List[Tensor] + + def __init__( + self, + feature: Union[int, B.nn.Module] = 2048, + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + rank_zero_warn( + "Metric `FID` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + UserWarning, + ) + + if isinstance(feature, int): + if not _TORCH_FIDELITY_AVAILABLE: + raise ValueError( + "FID metric requires that Torch-fidelity is installed." + "Either install as `pip install paddlemetrics[image]` or `pip install torch-fidelity`" + ) + valid_int_input = [64, 192, 768, 2048] + if feature not in valid_int_input: + raise ValueError( + f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}." + ) + + self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) + elif isinstance(feature, B.nn.Module): + self.inception = feature + else: + raise TypeError("Got unknown input to argument `feature`") + + self.add_state("real_features", [], dist_reduce_fx=None) + self.add_state("fake_features", [], dist_reduce_fx=None) + + def update(self, imgs: Tensor, real: bool) -> None: # type: ignore + """Update the state with extracted features. + + Args: + imgs: tensor with images feed to the feature extractor + real: bool indicating if imgs belong to the real or the fake distribution + """ + features = self.inception(imgs) + + if real: + self.real_features.append(features) + else: + self.fake_features.append(features) + + def compute(self) -> Tensor: + """Calculate FID score based on accumulated extracted features from the two distributions.""" + real_features = dim_zero_cat(self.real_features) + fake_features = dim_zero_cat(self.fake_features) + # computation is extremely sensitive so it needs to happen in double precision + orig_dtype = real_features.dtype + real_features = real_features.double() + fake_features = fake_features.double() + + # calculate mean and covariance + n = real_features.shape[0] + mean1 = real_features.mean(dim=0) + mean2 = fake_features.mean(dim=0) + diff1 = real_features - mean1 + diff2 = fake_features - mean2 + cov1 = 1.0 / (n - 1) * diff1.t().mm(diff1) + cov2 = 1.0 / (n - 1) * diff2.t().mm(diff2) + + # compute fid + return _compute_fid(mean1, cov1, mean2, cov2).to(orig_dtype) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py new file mode 100644 index 000000000..6c05b9a4b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py @@ -0,0 +1,179 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +#from paddlemetrics.image.fid import NoTrainInceptionV3 +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE + + +class IS(Metric): + r""" + Calculates the Inception Score (IS) which is used to access how realistic generated images are. + It is defined as + + .. math:: + IS = exp(\mathbb{E}_x KL(p(y | x ) || p(y))) + + where :math:`KL(p(y | x) || p(y))` is the KL divergence between the conditional distribution :math:`p(y|x)` + and the margianl distribution :math:`p(y)`. Both the conditional and marginal distribution is calculated + from features extracted from the images. The score is calculated on random splits of the images such that + both a mean and standard deviation of the score are returned. The metric was originally proposed in [1]. + + Using the default feature extraction (Inception v3 using the original weights from [2]), the input is + expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images + will be resized to 299 x 299 which is the size of the original training data. + + .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` + is installed. Either install as ``pip install paddlemetrics[image]`` or + ``pip install torch-fidelity`` + + .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of + all other metrics) as this metric does not really make sense to calculate on a single batch. This + means that by default ``forward`` will just call ``update`` underneat. + + Args: + feature: + Either an str, integer or ``nn.Module``: + + - an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: + 'logits_unbiased', 64, 192, 768, 2048 + - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns + an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. + + splits: integer determining how many splits the inception score calculation should be split among + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + References: + [1] Improved Techniques for Training GANs + Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, Xi Chen + https://arxiv.org/abs/1606.03498 + + [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, + Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter + https://arxiv.org/abs/1706.08500 + + Raises: + ValueError: + If ``feature`` is set to an ``str`` or ``int`` and ``torch-fidelity`` is not installed + ValueError: + If ``feature`` is set to an ``str`` or ``int`` and not one of ['logits_unbiased', 64, 192, 768, 2048] + TypeError: + If ``feature`` is not an ``str``, ``int`` or ``B.nn.Module`` + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import IS + >>> inception = IS() # doctest: +SKIP + >>> # generate some images + >>> imgs = B.randint(0, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> inception.update(imgs) # doctest: +SKIP + >>> inception.compute() # doctest: +SKIP + (tensor(1.0569), tensor(0.0113)) + + """ + features: List + + def __init__( + self, + feature: Union[str, int, B.nn.Module] = "logits_unbiased", + splits: int = 10, + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + rank_zero_warn( + "Metric `IS` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + UserWarning, + ) + + if isinstance(feature, (str, int)): + if not _TORCH_FIDELITY_AVAILABLE: + raise ValueError( + "IS metric requires that Torch-fidelity is installed." + "Either install as `pip install paddlemetrics[image]`" + " or `pip install torch-fidelity`" + ) + valid_int_input = ("logits_unbiased", 64, 192, 768, 2048) + if feature not in valid_int_input: + raise ValueError( + f"Integer input to argument `feature` must be one of {valid_int_input}," f" but got {feature}." + ) + + self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) + elif isinstance(feature, B.nn.Module): + self.inception = feature + else: + raise TypeError("Got unknown input to argument `feature`") + + self.splits = splits + self.add_state("features", [], dist_reduce_fx=None) + + def update(self, imgs: Tensor) -> None: # type: ignore + """Update the state with extracted features. + + Args: + imgs: tensor with images feed to the feature extractor + """ + features = self.inception(imgs) + self.features.append(features) + + def compute(self) -> Tuple[Tensor, Tensor]: + features = dim_zero_cat(self.features) + # random permute the features + idx = B.randperm(features.shape[0]) + features = features[idx] + + # calculate probs and logits + prob = features.softmax(dim=1) + log_prob = features.log_softmax(dim=1) + + # split into groups + prob = prob.chunk(self.splits, dim=0) + log_prob = log_prob.chunk(self.splits, dim=0) + + # calculate score per split + mean_prob = [p.mean(dim=0, keepdim=True) for p in prob] + kl_ = [p * (log_p - m_p.log()) for p, log_p, m_p in zip(prob, log_prob, mean_prob)] + kl_ = [k.sum(dim=1).mean().exp() for k in kl_] + kl = B.stack(kl_) + + # return mean and std + return kl.mean(), kl.std() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py new file mode 100644 index 000000000..2f3d3a6b7 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py @@ -0,0 +1,277 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor +from paddleext.torchapi.nn import Module + +from paddlemetrics.image.fid import NoTrainInceptionV3 +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE + + +def maximum_mean_discrepancy(k_xx: Tensor, k_xy: Tensor, k_yy: Tensor) -> Tensor: + """Adapted from `KID Score`_""" + m = k_xx.shape[0] + + diag_x = B.diag(k_xx) + diag_y = B.diag(k_yy) + + kt_xx_sums = k_xx.sum(dim=-1) - diag_x + kt_yy_sums = k_yy.sum(dim=-1) - diag_y + k_xy_sums = k_xy.sum(dim=0) + + kt_xx_sum = kt_xx_sums.sum() + kt_yy_sum = kt_yy_sums.sum() + k_xy_sum = k_xy_sums.sum() + + value = (kt_xx_sum + kt_yy_sum) / (m * (m - 1)) + value -= 2 * k_xy_sum / (m ** 2) + return value + + +def poly_kernel(f1: Tensor, f2: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0) -> Tensor: + """Adapted from `KID Score`_""" + if gamma is None: + gamma = 1.0 / f1.shape[1] + kernel = (f1 @ f2.T * gamma + coef) ** degree + return kernel + + +def poly_mmd( + f_real: Tensor, f_fake: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0 +) -> Tensor: + """Adapted from `KID Score`_""" + k_11 = poly_kernel(f_real, f_real, degree, gamma, coef) + k_22 = poly_kernel(f_fake, f_fake, degree, gamma, coef) + k_12 = poly_kernel(f_real, f_fake, degree, gamma, coef) + return maximum_mean_discrepancy(k_11, k_12, k_22) + + +class KID(Metric): + r""" + Calculates Kernel Inception Distance (KID) which is used to access the quality of generated images. Given by + + .. math:: + KID = MMD(f_{real}, f_{fake})^2 + + where :math:`MMD` is the maximum mean discrepancy and :math:`I_{real}, I_{fake}` are extracted features + from real and fake images, see [1] for more details. In particular, calculating the MMD requires the + evaluation of a polynomial kernel function :math:`k` + + .. math:: + k(x,y) = (\gamma * x^T y + coef)^{degree} + + which controls the distance between two features. In practise the MMD is calculated over a number of + subsets to be able to both get the mean and standard deviation of KID. + + Using the default feature extraction (Inception v3 using the original weights from [2]), the input is + expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images + will be resized to 299 x 299 which is the size of the original training data. + + .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` + is installed. Either install as ``pip install paddlemetrics[image]`` or + ``pip install torch-fidelity`` + + .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of + all other metrics) as this metric does not really make sense to calculate on a single batch. This + means that by default ``forward`` will just call ``update`` underneat. + + Args: + feature: + Either an str, integer or ``nn.Module``: + + - an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: + 'logits_unbiased', 64, 192, 768, 2048 + - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns + an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. + + subsets: + Number of subsets to calculate the mean and standard deviation scores over + subset_size: + Number of randomly picked samples in each subset + degree: + Degree of the polynomial kernel function + gamma: + Scale-length of polynomial kernel. If set to ``None`` will be automatically set to the feature size + coef: + Bias term in the polynomial kernel. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + References: + [1] Demystifying MMD GANs + Mikołaj Bińkowski, Danica J. Sutherland, Michael Arbel, Arthur Gretton + https://arxiv.org/abs/1801.01401 + + [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, + Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter + https://arxiv.org/abs/1706.08500 + + Raises: + ValueError: + If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed + ValueError: + If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048] + ValueError: + If ``subsets`` is not an integer larger than 0 + ValueError: + If ``subset_size`` is not an integer larger than 0 + ValueError: + If ``degree`` is not an integer larger than 0 + ValueError: + If ``gamma`` is niether ``None`` or a float larger than 0 + ValueError: + If ``coef`` is not an float larger than 0 + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import KID + >>> kid = KID(subset_size=50) # doctest: +SKIP + >>> # generate two slightly overlapping image intensity distributions + >>> imgs_dist1 = B.randint(0, 200, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> imgs_dist2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> kid.update(imgs_dist1, real=True) # doctest: +SKIP + >>> kid.update(imgs_dist2, real=False) # doctest: +SKIP + >>> kid_mean, kid_std = kid.compute() # doctest: +SKIP + >>> print((kid_mean, kid_std)) # doctest: +SKIP + (tensor(0.0338), tensor(0.0025)) + + """ + real_features: List[Tensor] + fake_features: List[Tensor] + + def __init__( + self, + feature: Union[str, int, B.nn.Module] = 2048, + subsets: int = 100, + subset_size: int = 1000, + degree: int = 3, + gamma: Optional[float] = None, # type: ignore + coef: float = 1.0, + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + rank_zero_warn( + "Metric `KID` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + UserWarning, + ) + + if isinstance(feature, (str, int)): + if not _TORCH_FIDELITY_AVAILABLE: + raise RuntimeError( + "KID metric requires that Torch-fidelity is installed." + " Either install as `pip install paddlemetrics[image]`" + " or `pip install torch-fidelity`" + ) + valid_int_input = ("logits_unbiased", 64, 192, 768, 2048) + if feature not in valid_int_input: + raise ValueError( + f"Integer input to argument `feature` must be one of {valid_int_input}," f" but got {feature}." + ) + + self.inception: Module = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) + elif isinstance(feature, Module): + self.inception = feature + else: + raise TypeError("Got unknown input to argument `feature`") + + if not (isinstance(subsets, int) and subsets > 0): + raise ValueError("Argument `subsets` expected to be integer larger than 0") + self.subsets = subsets + + if not (isinstance(subset_size, int) and subset_size > 0): + raise ValueError("Argument `subset_size` expected to be integer larger than 0") + self.subset_size = subset_size + + if not (isinstance(degree, int) and degree > 0): + raise ValueError("Argument `degree` expected to be integer larger than 0") + self.degree = degree + + if gamma is not None and not (isinstance(gamma, float) and gamma > 0): + raise ValueError("Argument `gamma` expected to be `None` or float larger than 0") + self.gamma = gamma + + if not (isinstance(coef, float) and coef > 0): + raise ValueError("Argument `coef` expected to be float larger than 0") + self.coef = coef + + # states for extracted features + self.add_state("real_features", [], dist_reduce_fx=None) + self.add_state("fake_features", [], dist_reduce_fx=None) + + def update(self, imgs: Tensor, real: bool) -> None: # type: ignore + """Update the state with extracted features. + + Args: + imgs: tensor with images feed to the feature extractor + real: bool indicating if imgs belong to the real or the fake distribution + """ + features = self.inception(imgs) + + if real: + self.real_features.append(features) + else: + self.fake_features.append(features) + + def compute(self) -> Tuple[Tensor, Tensor]: + """Calculate KID score based on accumulated extracted features from the two distributions. Returns a tuple + of mean and standard deviation of KID scores calculated on subsets of extracted features. + + Implementation inspired by `Fid Score`_ + """ + real_features = dim_zero_cat(self.real_features) + fake_features = dim_zero_cat(self.fake_features) + + n_samples_real = real_features.shape[0] + if n_samples_real < self.subset_size: + raise ValueError("Argument `subset_size` should be smaller than the number of samples") + n_samples_fake = fake_features.shape[0] + if n_samples_fake < self.subset_size: + raise ValueError("Argument `subset_size` should be smaller than the number of samples") + + kid_scores_ = [] + for _ in range(self.subsets): + perm = B.randperm(n_samples_real) + f_real = real_features[perm[: self.subset_size]] + perm = B.randperm(n_samples_fake) + f_fake = fake_features[perm[: self.subset_size]] + + o = poly_mmd(f_real, f_fake, self.degree, self.gamma, self.coef) + kid_scores_.append(o) + kid_scores = B.stack(kid_scores_) + return kid_scores.mean(), kid_scores.std(unbiased=False) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py new file mode 100644 index 000000000..7cf6d03a6 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py @@ -0,0 +1,156 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _LPIPS_AVAILABLE + +if _LPIPS_AVAILABLE: + from lpips import LPIPS as Lpips_backbone +else: + + class Lpips_backbone(B.nn.Module): # type: ignore + pass + + +class NoTrainLpips(Lpips_backbone): + def train(self, mode: bool) -> "NoTrainLpips": + """the network should not be able to be switched away from evaluation mode.""" + return super().train(False) + + +def _valid_img(img: Tensor) -> bool: + """check that input is a valid image to the network.""" + return img.ndim == 4 and img.shape[1] == 3 and img.min() >= -1.0 and img.max() <= 1.0 + + +class LPIPS(Metric): + """The Learned Perceptual Image Patch Similarity (`LPIPS_`) is used to judge the perceptual similarity between + two images. LPIPS essentially computes the similarity between the activations of two image patches for some + pre-defined network. This measure have been shown to match human perseption well. A low LPIPS score means that + image patches are perceptual similar. + + Both input image patches are expected to have shape `[N, 3, H, W]` and be normalized to the [-1,1] + range. The minimum size of `H, W` depends on the chosen backbone (see `net_type` arg). + + .. note:: using this metrics requires you to have ``lpips`` package installed. Either install + as ``pip install paddlemetrics[image]`` or ``pip install lpips`` + + .. note:: this metric is not scriptable when using ``torch<1.8``. Please update your pytorch installation + if this is a issue. + + Args: + net_type: str indicating backbone network type to use. Choose between `'alex'`, `'vgg'` or `'squeeze'` + reduction: str indicating how to reduce over the batch dimension. Choose between `'sum'` or `'mean'`. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``lpips`` package is not installed + ValueError: + If ``net_type`` is not one of ``"vgg"``, ``"alex"`` or ``"squeeze"`` + ValueError: + If ``reduction`` is not one of ``"mean"`` or ``"sum"`` + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import LPIPS + >>> lpips = LPIPS(net_type='vgg') + >>> img1 = B.rand(10, 3, 100, 100) + >>> img2 = B.rand(10, 3, 100, 100) + >>> lpips(img1, img2) + tensor([0.3566], grad_fn=) + """ + + is_differentiable = True + real_features: List[Tensor] + fake_features: List[Tensor] + + # due to the use of named tuple in the backbone the net variable cannot be scriptet + __jit_ignored_attributes__ = ["net"] + + def __init__( + self, + net_type: str = "alex", + reduction: str = "mean", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if not _LPIPS_AVAILABLE: + raise ValueError( + "LPIPS metric requires that lpips is installed." + "Either install as `pip install paddlemetrics[image]` or `pip install lpips`" + ) + + valid_net_type = ("vgg", "alex", "squeeze") + if net_type not in valid_net_type: + raise ValueError(f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}.") + self.net = NoTrainLpips(net=net_type, verbose=False) + + valid_reduction = ("mean", "sum") + if reduction not in valid_reduction: + raise ValueError(f"Argument `reduction` must be one of {valid_reduction}, but got {reduction}") + self.reduction = reduction + + self.add_state("sum_scores", B.zeros(1), dist_reduce_fx="sum") + self.add_state("total", B.zeros(1), dist_reduce_fx="sum") + + def update(self, img1: Tensor, img2: Tensor) -> None: # type: ignore + """Update internal states with lpips score. + + Args: + img1: tensor with images of shape [N, 3, H, W] + img2: tensor with images of shape [N, 3, H, W] + """ + if not (_valid_img(img1) and _valid_img(img2)): + raise ValueError( + "Expected both input arguments to be normalized tensors (all values in range [-1,1])" + f" and to have shape [N, 3, H, W] but `img1` have shape {img1.shape} with values in" + f" range {[img1.min(), img1.max()]} and `img2` have shape {img2.shape} with value" + f" in range {[img2.min(), img2.max()]}" + ) + + loss = self.net(img1, img2).squeeze() + self.sum_scores += loss.sum() + self.total += img1.shape[0] + + def compute(self) -> Tensor: + """Compute final perceptual similarity metric.""" + if self.reduction == "mean": + return self.sum_scores / self.total + if self.reduction == "sum": + return self.sum_scores diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py new file mode 100644 index 000000000..3226203d7 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.image.psnr import _psnr_compute, _psnr_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn + + +class PSNR(Metric): + r""" + Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR): + + .. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right) + + Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function. + + Args: + data_range: + the range of the data. If None, it is determined from the data (max - min). + The ``data_range`` must be given when ``dim`` is not None. + base: a base of a logarithm to use (default: 10) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + dim: + Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is + None meaning scores will be reduced across all dimensions and all batches. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``dim`` is not ``None`` and ``data_range`` is not given. + + Example: + >>> from paddlemetrics import PSNR + >>> psnr = PSNR() + >>> preds = B.tensor([[0.0, 1.0], [2.0, 3.0]]) + >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) + >>> psnr(preds, target) + tensor(2.5527) + + .. note:: + Half precision is only support on GPU for this metric + + """ + min_target: Tensor + max_target: Tensor + + def __init__( + self, + data_range: Optional[float] = None, + base: float = 10.0, + reduction: str = "elementwise_mean", + dim: Optional[Union[int, Tuple[int, ...]]] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + if dim is None and reduction != "elementwise_mean": + rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.") + + if dim is None: + self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + else: + self.add_state("sum_squared_error", default=[]) + self.add_state("total", default=[]) + + if data_range is None: + if dim is not None: + # Maybe we could use `B.amax(target, dim=dim) - B.amin(target, dim=dim)` in PyTorch 1.7 to + # calculate `data_range` in the future. + raise ValueError("The `data_range` must be given when `dim` is not None.") + + self.data_range = None + self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=B.min) + self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=B.max) + else: + self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx="mean") + self.base = base + self.reduction = reduction + self.dim = tuple(dim) if isinstance(dim, Sequence) else dim + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim) + if self.dim is None: + if self.data_range is None: + # keep track of min and max target values + self.min_target = min(target.min(), self.min_target) + self.max_target = max(target.max(), self.max_target) + + self.sum_squared_error += sum_squared_error + self.total += n_obs + else: + self.sum_squared_error.append(sum_squared_error) + self.total.append(n_obs) + + def compute(self) -> Tensor: + """Compute peak signal-to-noise ratio over state.""" + if self.data_range is not None: + data_range = self.data_range + else: + data_range = self.max_target - self.min_target + + if self.dim is None: + sum_squared_error = self.sum_squared_error + total = self.total + else: + sum_squared_error = B.cat([values.flatten() for values in self.sum_squared_error]) + total = B.cat([values.flatten() for values in self.total]) + return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py new file mode 100644 index 000000000..f34a19b1c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Sequence + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.image.ssim import _ssim_compute, _ssim_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class SSIM(Metric): + """Computes Structual Similarity Index Measure (SSIM_). + + Args: + kernel_size: size of the gaussian kernel (default: (11, 11)) + sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + data_range: Range of the image. If ``None``, it is determined from the image (max - min) + k1: Parameter of SSIM. Default: 0.01 + k2: Parameter of SSIM. Default: 0.03 + + Return: + Tensor with SSIM score + + Example: + >>> from paddlemetrics import SSIM + >>> preds = B.rand([16, 1, 16, 16]) + >>> target = preds * 0.75 + >>> ssim = SSIM() + >>> ssim(preds, target) + tensor(0.9219) + """ + + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + kernel_size: Sequence[int] = (11, 11), + sigma: Sequence[float] = (1.5, 1.5), + reduction: str = "elementwise_mean", + data_range: Optional[float] = None, + k1: float = 0.01, + k2: float = 0.03, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + rank_zero_warn( + "Metric `SSIM` will save all targets and" + " predictions in buffer. For large datasets this may lead" + " to large memory footprint." + ) + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + self.kernel_size = kernel_size + self.sigma = sigma + self.data_range = data_range + self.k1 = k1 + self.k2 = k2 + self.reduction = reduction + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target = _ssim_update(preds, target) + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + """Computes explained variance over state.""" + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _ssim_compute( + preds, target, self.kernel_size, self.sigma, self.reduction, self.data_range, self.k1, self.k2 + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/metric.py b/EE/paddlemetric/src/build/lib/paddlemetrics/metric.py new file mode 100644 index 000000000..21c2148ba --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/metric.py @@ -0,0 +1,775 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import functools +import inspect +import operator as op +from abc import ABC, abstractmethod +from collections.abc import Sequence +from contextlib import contextmanager +from copy import deepcopy +from typing import Any, Callable, Dict, Generator, List, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, Module + +from paddlemetrics.utilities import apply_to_collection, rank_zero_warn +from paddlemetrics.utilities.data import _flatten, dim_zero_cat, dim_zero_max, dim_zero_mean, dim_zero_min, dim_zero_sum +from paddlemetrics.utilities.distributed import gather_all_tensors +from paddlemetrics.utilities.exceptions import paddlemetricsUserError +from paddlemetrics.utilities.imports import _LIGHTNING_AVAILABLE, _compare_version + + +def jit_distributed_available() -> bool: + return B.distributed.is_available() and B.distributed.is_initialized() + + +class Metric(Module): + """Base class for all metrics present in the Metrics API. + + Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to + handle distributed synchronization and per-step metric computation. + + Override ``update()`` and ``compute()`` functions to implement your own metric. Use + ``add_state()`` to register metric state variables which keep track of state on each + call of ``update()`` and are synchronized across processes when ``compute()`` is called. + + Note: + Metric state variables can either be ``B.Tensors`` or an empty list which can we used + to store `B.Tensors``. + + Note: + Different metrics only override ``update()`` and not ``forward()``. A call to ``update()`` + is valid, but it won't return the metric value at the current step. A call to ``forward()`` + automatically calls ``update()`` and also returns the metric value at the current step. + + Args: + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + """ + + __jit_ignored_attributes__ = ["device"] + __jit_unused_properties__ = ["is_differentiable"] + is_differentiable: Optional[bool] = None + higher_is_better: Optional[bool] = None + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__() + + # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/ + # B.nn/modules/module.py#L227) +# B._C._log_api_usage_once(f"paddlemetrics.metric.{self.__class__.__name__}") + +# self._LIGHTNING_GREATER_EQUAL_1_3 = _compare_version("pytorch_lightning", op.ge, "1.3.0") + self._device = B.device("cpu") + + self.dist_sync_on_step = dist_sync_on_step + self.compute_on_step = compute_on_step + self.process_group = process_group + self.dist_sync_fn = dist_sync_fn + self._to_sync = True + self._should_unsync = True + + self._update_signature = inspect.signature(self.update) + self.update: Callable = self._wrap_update(self.update) # type: ignore + self.compute: Callable = self._wrap_compute(self.compute) # type: ignore + self._computed = None + self._forward_cache = None + self._update_called = False + + # initialize state + self._defaults: Dict[str, Union[List, Tensor]] = {} + self._persistent: Dict[str, bool] = {} + self._reductions: Dict[str, Union[str, Callable[[Union[List[Tensor], Tensor]], Tensor], None]] = {} + + # state management + self._is_synced = False + self._cache: Optional[Dict[str, Union[List[Tensor], Tensor]]] = None + + def to(self, *args, **kwargs): + + return self + # result = super().to(*args, **kwargs) + # + # return result if result is not None else self + + def add_state( + self, + name: str, + default: Union[list, Tensor], + dist_reduce_fx: Optional[Union[str, Callable]] = None, + persistent: bool = False, + ) -> None: + """Adds metric state variable. Only used by subclasses. + + Args: + name: The name of the state variable. The variable will then be accessible at ``self.name``. + default: Default value of the state; can either be a ``B.Tensor`` or an empty list. The state will be + reset to this value when ``self.reset()`` is called. + dist_reduce_fx (Optional): Function to reduce state across multiple processes in distributed mode. + If value is ``"sum"``, ``"mean"``, ``"cat"``, ``"min"`` or ``"max"`` we will use ``B.sum``, + ``B.mean``, ``B.cat``, ``B.min`` and ``B.max``` respectively, each with argument + ``dim=0``. Note that the ``"cat"`` reduction only makes sense if the state is a list, and not + a tensor. The user can also pass a custom function in this parameter. + persistent (Optional): whether the state will be saved as part of the modules ``state_dict``. + Default is ``False``. + + Note: + Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes. + However, there won't be any reduction function applied to the synchronized metric state. + + The metric states would be synced as follows + + - If the metric state is ``B.Tensor``, the synced value will be a stacked ``B.Tensor`` across + the process dimension if the metric state was a ``B.Tensor``. The original ``B.Tensor`` metric + state retains dimension and hence the synchronized output will be of shape ``(num_process, ...)``. + + - If the metric state is a ``list``, the synced value will be a ``list`` containing the + combined elements from all processes. + + Note: + When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow + the format discussed in the above note. + + Raises: + ValueError: + If ``default`` is not a ``tensor`` or an ``empty list``. + ValueError: + If ``dist_reduce_fx`` is not callable or one of ``"mean"``, ``"sum"``, ``"cat"``, ``None``. + """ + if not isinstance(default, (Tensor, list)) or (isinstance(default, list) and default): + raise ValueError(f"state variable must be a tensor or any empty list (where you can append tensors): {type(default)}") + + if dist_reduce_fx == "sum": + dist_reduce_fx = dim_zero_sum + elif dist_reduce_fx == "mean": + dist_reduce_fx = dim_zero_mean + elif dist_reduce_fx == "max": + dist_reduce_fx = dim_zero_max + elif dist_reduce_fx == "min": + dist_reduce_fx = dim_zero_min + elif dist_reduce_fx == "cat": + dist_reduce_fx = dim_zero_cat + elif dist_reduce_fx is not None and not callable(dist_reduce_fx): + raise ValueError("`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', None]") + + if isinstance(default, Tensor): + default = default.contiguous() + + setattr(self, name, default) + + self._defaults[name] = deepcopy(default) + self._persistent[name] = persistent + self._reductions[name] = dist_reduce_fx + +# @B.jit.unused + def forward(self, *args: Any, **kwargs: Any) -> Any: + """Automatically calls ``update()``. + + Returns the metric value over inputs if ``compute_on_step`` is True. + """ + # add current step + if self._is_synced: + raise paddlemetricsUserError( + "The Metric shouldn't be synced when performing ``update``. " + "HINT: Did you forget to call ``unsync`` ?." + ) + + with B.no_grad(): + self.update(*args, **kwargs) + + if self.compute_on_step: + self._to_sync = self.dist_sync_on_step + # skip restore cache operation from compute as cache is stored below. + self._should_unsync = False + + # save context before switch + cache = {attr: getattr(self, attr) for attr in self._defaults} + + # call reset, update, compute, on single batch + self.reset() + self.update(*args, **kwargs) + self._forward_cache = self.compute() + + # restore context + for attr, val in cache.items(): + setattr(self, attr, val) + self._is_synced = False + + self._should_unsync = True + self._to_sync = True + self._computed = None + + return self._forward_cache + + def _sync_dist(self, dist_sync_fn: Callable = gather_all_tensors, process_group: Optional[Any] = None) -> None: + input_dict = {attr: getattr(self, attr) for attr in self._reductions} + + for attr, reduction_fn in self._reductions.items(): + # pre-concatenate metric states that are lists to reduce number of all_gather operations + if reduction_fn == dim_zero_cat and isinstance(input_dict[attr], list) and len(input_dict[attr]) > 1: + input_dict[attr] = [dim_zero_cat(input_dict[attr])] + + output_dict = apply_to_collection( + input_dict, + Tensor, + dist_sync_fn, + group=process_group or self.process_group, + ) + + for attr, reduction_fn in self._reductions.items(): + # pre-processing ops (stack or flatten for inputs) + if isinstance(output_dict[attr][0], Tensor): + output_dict[attr] = B.stack(output_dict[attr]) + elif isinstance(output_dict[attr][0], list): + output_dict[attr] = _flatten(output_dict[attr]) + + if not (callable(reduction_fn) or reduction_fn is None): + raise TypeError("reduction_fn must be callable or None") + reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr] + setattr(self, attr, reduced) + + def _wrap_update(self, update: Callable) -> Callable: + @functools.wraps(update) + def wrapped_func(*args: Any, **kwargs: Any) -> Optional[Any]: + self._computed = None + self._update_called = True + return update(*args, **kwargs) + + return wrapped_func + + def sync( + self, + dist_sync_fn: Optional[Callable] = None, + process_group: Optional[Any] = None, + should_sync: bool = True, + distributed_available: Optional[Callable] = jit_distributed_available, + ) -> None: + """Sync function for manually controlling when metrics states should be synced across processes. + + Args: + dist_sync_fn: Function to be used to perform states synchronization + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + should_sync: Whether to apply to state synchronization. This will have an impact + only when running in a distributed setting. + distributed_available: Function to determine if we are running inside a distributed setting + """ + if self._is_synced and should_sync: + raise paddlemetricsUserError("The Metric has already been synced.") + + is_distributed = distributed_available() if callable(distributed_available) else None + + if not should_sync or not is_distributed: + return + + if dist_sync_fn is None: + dist_sync_fn = gather_all_tensors + + # cache prior to syncing + self._cache = {attr: getattr(self, attr) for attr in self._defaults} + + # sync + self._sync_dist(dist_sync_fn, process_group=process_group) + self._is_synced = True + + def unsync(self, should_unsync: bool = True) -> None: + """Unsync function for manually controlling when metrics states should be reverted back to their local + states. + + Args: + should_unsync: Whether to perform unsync + """ + if not should_unsync: + return + + if not self._is_synced: + raise paddlemetricsUserError("The Metric has already been un-synced.") + + if self._cache is None: + raise paddlemetricsUserError("The internal cache should exist to unsync the Metric.") + + # if we synced, restore to cache so that we can continue to accumulate un-synced state + for attr, val in self._cache.items(): + setattr(self, attr, val) + self._is_synced = False + self._cache = None + + @contextmanager + def sync_context( + self, + dist_sync_fn: Optional[Callable] = None, + process_group: Optional[Any] = None, + should_sync: bool = True, + should_unsync: bool = True, + distributed_available: Optional[Callable] = jit_distributed_available, + ) -> Generator: + """Context manager to synchronize the states between processes when running in a distributed setting and + restore the local cache states after yielding. + + Args: + dist_sync_fn: Function to be used to perform states synchronization + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + should_sync: Whether to apply to state synchronization. This will have an impact + only when running in a distributed setting. + should_unsync: Whether to restore the cache state so that the metrics can + continue to be accumulated. + distributed_available: Function to determine if we are running inside a distributed setting + """ + self.sync( + dist_sync_fn=dist_sync_fn, + process_group=process_group, + should_sync=should_sync, + distributed_available=distributed_available, + ) + + yield + + self.unsync(should_unsync=self._is_synced and should_unsync) + + def _wrap_compute(self, compute: Callable) -> Callable: + @functools.wraps(compute) + def wrapped_func(*args: Any, **kwargs: Any) -> Any: + if not self._update_called: + rank_zero_warn( + f"The ``compute`` method of metric {self.__class__.__name__}" + " was called before the ``update`` method which may lead to errors," + " as metric states have not yet been updated.", + UserWarning, + ) + + # return cached value + if self._computed is not None: + return self._computed + + # compute relies on the sync context manager to gather the states across processes and apply reduction + # if synchronization happened, the current rank accumulated states will be restored to keep + # accumulation going if ``should_unsync=True``, + with self.sync_context( + dist_sync_fn=self.dist_sync_fn, should_sync=self._to_sync, should_unsync=self._should_unsync + ): + self._computed = compute(*args, **kwargs) + + return self._computed + + return wrapped_func + + @abstractmethod + def update(self, *_: Any, **__: Any) -> None: + """Override this method to update the state variables of your metric class.""" + + @abstractmethod + def compute(self) -> Any: + """Override this method to compute the final metric value from state variables synchronized across the + distributed backend.""" + + def reset(self) -> None: + """This method automatically resets the metric state variables to their default value.""" + self._update_called = False + self._forward_cache = None + # lower lightning versions requires this implicitly to log metric objects correctly in self.log +# if not _LIGHTNING_AVAILABLE or self._LIGHTNING_GREATER_EQUAL_1_3: + self._computed = None + + for attr, default in self._defaults.items(): + current_val = getattr(self, attr) + if isinstance(default, Tensor): + setattr(self, attr, default.detach().clone().to(current_val.device)) + else: + setattr(self, attr, []) + + # reset internal states + self._cache = None + self._is_synced = False + + def clone(self) -> "Metric": + """Make a copy of the metric.""" + return deepcopy(self) + + def __getstate__(self) -> Dict[str, Any]: + # ignore update and compute functions for pickling + return {k: v for k, v in self.__dict__.items() if k not in ["update", "compute", "_update_signature"]} + + def __setstate__(self, state: Dict[str, Any]) -> None: + # manually restore update and compute functions for pickling + self.__dict__.update(state) + self._update_signature = inspect.signature(self.update) + self.update: Callable = self._wrap_update(self.update) # type: ignore + self.compute: Callable = self._wrap_compute(self.compute) # type: ignore + + def __setattr__(self, name: str, value: Any) -> None: + if name in ("higher_is_better", "is_differentiable"): + raise RuntimeError(f"Can't change const `{name}`.") + super().__setattr__(name, value) + + @property + def device(self) -> "B.device": + """Return the device of the metric.""" + return self._device + + def type(self, dst_type: Union[str, B.dtype]) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def float(self) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def double(self) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def half(self) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def set_dtype(self, dst_type: Union[str, B.dtype]) -> None: + """Special version of `type` for transferring all metric states to specific dtype + Arguments: + dst_type (type or string): the desired type + """ + return super().type(dst_type) + + def _apply(self, fn: Callable, *args, **kwargs) -> Module: + """Overwrite _apply function such that we can also move metric states to the correct device when `.to`, + `.cuda`, etc methods are called.""" + this = super()._apply(fn, *args, **kwargs) + if this is None: # for paddle + this = self + # Also apply fn to metric states and defaults + for key, value in this._defaults.items(): + if isinstance(value, Tensor): + this._defaults[key] = fn(value, *args, **kwargs) + elif isinstance(value, Sequence): + this._defaults[key] = [fn(v, *args, **kwargs) for v in value] + + current_val = getattr(this, key) + if isinstance(current_val, Tensor): + setattr(this, key, fn(current_val, *args, **kwargs)) + elif isinstance(current_val, Sequence): + setattr(this, key, [fn(cur_v, *args, **kwargs) for cur_v in current_val]) + else: + raise TypeError( + "Expected metric state to be either a Tensor" f"or a list of Tensor, but encountered {current_val}" + ) + + # make sure to update the device attribute + # if the dummy tensor moves device by fn function we should also update the attribute + self._device = fn(B.zeros(1, device=self.device), *args, **kwargs).device + + # Additional apply to forward cache and computed attributes (may be nested) + if this._computed is not None: + this._computed = apply_to_collection(this._computed, Tensor, fn) + if this._forward_cache is not None: + this._forward_cache = apply_to_collection(this._forward_cache, Tensor, fn) + + return this + + def persistent(self, mode: bool = False) -> None: + """Method for post-init to change if metric states should be saved to its state_dict.""" + for key in self._persistent: + self._persistent[key] = mode + + def state_dict( + self, + destination: Dict[str, Any] = None, + prefix: str = "", + keep_vars: bool = False, + ) -> Optional[Dict[str, Any]]: + destination = super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars) + # Register metric states to be part of the state_dict + for key in self._defaults: + if not self._persistent[key]: + continue + current_val = getattr(self, key) + if not keep_vars: + if isinstance(current_val, Tensor): + current_val = current_val.detach() + elif isinstance(current_val, list): + current_val = [cur_v.detach() if isinstance(cur_v, Tensor) else cur_v for cur_v in current_val] + destination[prefix + key] = deepcopy(current_val) # type: ignore + return destination + + def _load_from_state_dict( + self, + state_dict: dict, + prefix: str, + local_metadata: dict, + strict: bool, + missing_keys: List[str], + unexpected_keys: List[str], + error_msgs: List[str], + ) -> None: + """Loads metric states from state_dict.""" + + for key in self._defaults: + name = prefix + key + if name in state_dict: + setattr(self, key, state_dict.pop(name)) + super()._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs + ) + + def _filter_kwargs(self, **kwargs: Any) -> Dict[str, Any]: + """filter kwargs such that they match the update signature of the metric.""" + + # filter all parameters based on update signature except those of + # type VAR_POSITIONAL (*args) and VAR_KEYWORD (**kwargs) + _params = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) + _sign_params = self._update_signature.parameters + filtered_kwargs = { + k: v for k, v in kwargs.items() if (k in _sign_params.keys() and _sign_params[k].kind not in _params) + } + + # if no kwargs filtered, return al kwargs as default + if not filtered_kwargs: + filtered_kwargs = kwargs + return filtered_kwargs + + def __hash__(self) -> int: + # we need to add the id here, since PyTorch requires a module hash to be unique. + # Internally, PyTorch nn.Module relies on that for children discovery + # (see https://github.com/pytorch/pytorch/blob/v1.9.0/B.nn/modules/module.py#L1544) + # For metrics that include tensors it is not a problem, + # since their hash is unique based on the memory location but we cannot rely on that for every metric. + hash_vals = [self.__class__.__name__, id(self)] + + for key in self._defaults: + val = getattr(self, key) + # Special case: allow list values, so long + # as their elements are hashable + if hasattr(val, "__iter__") and not isinstance(val, Tensor): + hash_vals.extend(val) + else: + hash_vals.append(val) + + return hash(tuple(hash_vals)) + + def __add__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.add, self, other) + + def __and__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_and, self, other) + + # Fixme: this shall return bool instead of Metric + def __eq__(self, other: "Metric") -> "Metric": # type: ignore + return CompositionalMetric(B.eq, self, other) + + def __floordiv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.floor_divide, self, other) + + def __ge__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.ge, self, other) + + def __gt__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.gt, self, other) + + def __le__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.le, self, other) + + def __lt__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.lt, self, other) + + def __matmul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.matmul, self, other) + + def __mod__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.fmod, self, other) + + def __mul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.mul, self, other) + + # Fixme: this shall return bool instead of Metric + def __ne__(self, other: "Metric") -> "Metric": # type: ignore + return CompositionalMetric(B.ne, self, other) + + def __or__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_or, self, other) + + def __pow__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.pow, self, other) + + def __radd__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.add, other, self) + + def __rand__(self, other: "Metric") -> "Metric": + # swap them since bitwise_and only supports that way and it's commutative + return CompositionalMetric(B.bitwise_and, self, other) + + def __rfloordiv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.floor_divide, other, self) + + def __rmatmul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.matmul, other, self) + + def __rmod__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.fmod, other, self) + + def __rmul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.mul, other, self) + + def __ror__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_or, other, self) + + def __rpow__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.pow, other, self) + + def __rsub__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.sub, other, self) + + def __rtruediv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.true_divide, other, self) + + def __rxor__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_xor, other, self) + + def __sub__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.sub, self, other) + + def __truediv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.true_divide, self, other) + + def __xor__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_xor, self, other) + + def __abs__(self) -> "Metric": + return CompositionalMetric(B.abs, self, None) + + def __inv__(self) -> "Metric": + return CompositionalMetric(B.bitwise_not, self, None) + + def __invert__(self) -> "Metric": + return self.__inv__() + + def __neg__(self) -> "Metric": + return CompositionalMetric(_neg, self, None) + + def __pos__(self) -> "Metric": + return CompositionalMetric(B.abs, self, None) + + def __getitem__(self, idx: int) -> "Metric": + return CompositionalMetric(lambda x: x[idx], self, None) + + +def _neg(x: Tensor) -> Tensor: + return -B.abs(x) + + +class CompositionalMetric(Metric): + """Composition of two metrics with a specific operator which will be executed upon metrics compute.""" + + def __init__( + self, + operator: Callable, + metric_a: Union[Metric, int, float, Tensor], + metric_b: Union[Metric, int, float, Tensor, None], + ) -> None: + """ + Args: + operator: the operator taking in one (if metric_b is None) + or two arguments. Will be applied to outputs of metric_a.compute() + and (optionally if metric_b is not None) metric_b.compute() + metric_a: first metric whose compute() result is the first argument of operator + metric_b: second metric whose compute() result is the second argument of operator. + For operators taking in only one input, this should be None + """ + super().__init__() + + self.op = operator + + if isinstance(metric_a, Tensor): + self.register_buffer("metric_a", metric_a) + else: + self.metric_a = metric_a + + if isinstance(metric_b, Tensor): + self.register_buffer("metric_b", metric_b) + else: + self.metric_b = metric_b + + def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None: + # No syncing required here. syncing will be done in metric_a and metric_b + pass + + def update(self, *args: Any, **kwargs: Any) -> None: + if isinstance(self.metric_a, Metric): + self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs)) + + if isinstance(self.metric_b, Metric): + self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs)) + + def compute(self) -> Any: + + # also some parsing for kwargs? + if isinstance(self.metric_a, Metric): + val_a = self.metric_a.compute() + else: + val_a = self.metric_a + + if isinstance(self.metric_b, Metric): + val_b = self.metric_b.compute() + else: + val_b = self.metric_b + + if val_b is None: + return self.op(val_a) + + return self.op(val_a, val_b) + + def reset(self) -> None: + if isinstance(self.metric_a, Metric): + self.metric_a.reset() + + if isinstance(self.metric_b, Metric): + self.metric_b.reset() + + def persistent(self, mode: bool = False) -> None: + if isinstance(self.metric_a, Metric): + self.metric_a.persistent(mode=mode) + if isinstance(self.metric_b, Metric): + self.metric_b.persistent(mode=mode) + + def __repr__(self) -> str: + _op_metrics = f"(\n {self.op.__name__}(\n {repr(self.metric_a)},\n {repr(self.metric_b)}\n )\n)" + repr_str = self.__class__.__name__ + _op_metrics + + return repr_str diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py new file mode 100644 index 000000000..aafc10247 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py @@ -0,0 +1,26 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.regression.cosine_similarity import CosineSimilarity # noqa: F401 +from paddlemetrics.regression.explained_variance import ExplainedVariance # noqa: F401 +from paddlemetrics.regression.mean_absolute_error import MeanAbsoluteError # noqa: F401 +from paddlemetrics.regression.mean_absolute_percentage_error import MeanAbsolutePercentageError # noqa: F401 +from paddlemetrics.regression.mean_squared_error import MeanSquaredError # noqa: F401 +from paddlemetrics.regression.mean_squared_log_error import MeanSquaredLogError # noqa: F401 +from paddlemetrics.regression.pearson import PearsonCorrcoef # noqa: F401 +from paddlemetrics.regression.r2 import R2Score # noqa: F401 +from paddlemetrics.regression.spearman import SpearmanCorrcoef # noqa: F401 +from paddlemetrics.regression.symmetric_mean_absolute_percentage_error import ( # noqa: F401 + SymmetricMeanAbsolutePercentageError, +) +from paddlemetrics.regression.tweedie_deviance import TweedieDevianceScore # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py new file mode 100644 index 000000000..3b2946e2c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.cosine_similarity import _cosine_similarity_compute, _cosine_similarity_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class CosineSimilarity(Metric): + r""" + Computes the `Cosine Similarity`_ + between targets and predictions: + + .. math:: + cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} = + \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}} + + where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,d)`` + - ``target`` (float tensor): ``(N,d)`` + + Args: + reduction: how to reduce over the batch dimension using 'sum', 'mean' or 'none' + (taking the individual scores) + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the all gather. + + Example: + >>> from paddlemetrics import CosineSimilarity + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> cosine_similarity = CosineSimilarity(reduction = 'mean') + >>> cosine_similarity(preds, target) + tensor(0.8536) + + """ + is_differentiable = True + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + reduction: str = "sum", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + allowed_reduction = ("sum", "mean", "none", None) + if reduction not in allowed_reduction: + raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}") + self.reduction = reduction + + self.add_state("preds", [], dist_reduce_fx="cat") + self.add_state("target", [], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update metric states with predictions and targets. + + Args: + preds: Predicted tensor with shape ``(N,d)`` + target: Ground truth tensor with shape ``(N,d)`` + """ + preds, target = _cosine_similarity_update(preds, target) + + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _cosine_similarity_compute(preds, target, self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py new file mode 100644 index 000000000..226ac0760 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py @@ -0,0 +1,136 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional, Sequence, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.explained_variance import ( + _explained_variance_compute, + _explained_variance_update, +) +from paddlemetrics.metric import Metric + + +class ExplainedVariance(Metric): + r""" + Computes `explained variance`_: + + .. math:: \text{ExplainedVariance} = 1 - \frac{\text{Var}(y - \hat{y})}{\text{Var}(y)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,)`` or ``(N, ...)`` (multioutput) + - ``target`` (long tensor): ``(N,)`` or ``(N, ...)`` (multioutput) + + In the case of multioutput, as default the variances will be uniformly + averaged over the additional dimensions. Please see argument `multioutput` + for changing this behavior. + + Args: + multioutput: + Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. + + Example: + >>> from paddlemetrics import ExplainedVariance + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> explained_variance = ExplainedVariance() + >>> explained_variance(preds, target) + tensor(0.9572) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> explained_variance = ExplainedVariance(multioutput='raw_values') + >>> explained_variance(preds, target) + tensor([0.9677, 1.0000]) + + """ + is_differentiable = True + n_obs: Tensor + sum_error: Tensor + sum_squared_error: Tensor + sum_target: Tensor + sum_squared_target: Tensor + + def __init__( + self, + multioutput: str = "uniform_average", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + allowed_multioutput = ("raw_values", "uniform_average", "variance_weighted") + if multioutput not in allowed_multioutput: + raise ValueError( + f"Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}" + ) + self.multioutput: str = multioutput + self.add_state("sum_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_target", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_squared_target", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("n_obs", default=tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target) + self.n_obs = self.n_obs + n_obs + self.sum_error = self.sum_error + sum_error + self.sum_squared_error = self.sum_squared_error + sum_squared_error + self.sum_target = self.sum_target + sum_target + self.sum_squared_target = self.sum_squared_target + sum_squared_target + + def compute(self) -> Union[Tensor, Sequence[Tensor]]: + """Computes explained variance over state.""" + return _explained_variance_compute( + self.n_obs, + self.sum_error, + self.sum_squared_error, + self.sum_target, + self.sum_squared_target, + self.multioutput, + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py new file mode 100644 index 000000000..8614bed21 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py @@ -0,0 +1,86 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_absolute_error import ( + _mean_absolute_error_compute, + _mean_absolute_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanAbsoluteError(Metric): + r""" + `Computes Mean Absolute Error`_ (MAE): + + .. math:: \text{MAE} = \frac{1}{N}\sum_i^N | y_i - \hat{y_i} | + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import MeanAbsoluteError + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> mean_absolute_error = MeanAbsoluteError() + >>> mean_absolute_error(preds, target) + tensor(0.5000) + """ + is_differentiable = True + sum_abs_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_abs_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) + + self.sum_abs_error += sum_abs_error + self.total += n_obs + + def compute(self) -> Tensor: + """Computes mean absolute error over state.""" + return _mean_absolute_error_compute(self.sum_abs_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py new file mode 100644 index 000000000..66d9c0916 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py @@ -0,0 +1,95 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_absolute_percentage_error import ( + _mean_absolute_percentage_error_compute, + _mean_absolute_percentage_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanAbsolutePercentageError(Metric): + r""" + Computes `Mean Absolute Percentage Error`_ (MAPE): + + .. math:: \text{MAPE} = \frac{1}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(\epsilon, y_i)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Note: + The epsilon value is taken from `scikit-learn's implementation of MAPE`_. + + Note: + MAPE output is a non-negative floating point. Best result is 0.0 . But it is important to note that, + bad predictions, can lead to arbitarily large values. Especially when some ``target`` values are close to 0. + This `MAPE implementation returns`_ a very large number instead of ``inf``. + + Example: + >>> from paddlemetrics import MeanAbsolutePercentageError + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> mean_abs_percentage_error = MeanAbsolutePercentageError() + >>> mean_abs_percentage_error(preds, target) + tensor(0.2667) + + """ + is_differentiable = True + sum_abs_per_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) + + self.sum_abs_per_error += sum_abs_per_error + self.total += num_obs + + def compute(self) -> Tensor: + """Computes mean absolute percentage error over state.""" + return _mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py new file mode 100644 index 000000000..8c1c9245b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_squared_error import ( + _mean_squared_error_compute, + _mean_squared_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanSquaredError(Metric): + r""" + Computes `mean squared error`_ (MSE): + + .. math:: \text{MSE} = \frac{1}{N}\sum_i^N(y_i - \hat{y_i})^2 + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + squared: + If True returns MSE value, if False returns RMSE value. + + Example: + >>> from paddlemetrics import MeanSquaredError + >>> target = B.tensor([2.5, 5.0, 4.0, 8.0]) + >>> preds = B.tensor([3.0, 5.0, 2.5, 7.0]) + >>> mean_squared_error = MeanSquaredError() + >>> mean_squared_error(preds, target) + tensor(0.8750) + + """ + is_differentiable = True + sum_squared_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + squared: bool = True, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + self.squared = squared + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_error, n_obs = _mean_squared_error_update(preds, target) + + self.sum_squared_error += sum_squared_error + self.total += n_obs + + def compute(self) -> Tensor: + """Computes mean squared error over state.""" + return _mean_squared_error_compute(self.sum_squared_error, self.total, squared=self.squared) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py new file mode 100644 index 000000000..e36773b0e --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py @@ -0,0 +1,90 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_squared_log_error import ( + _mean_squared_log_error_compute, + _mean_squared_log_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanSquaredLogError(Metric): + r""" + Computes `mean squared logarithmic error`_ (MSLE): + + .. math:: \text{MSLE} = \frac{1}{N}\sum_i^N (\log_e(1 + y_i) - \log_e(1 + \hat{y_i}))^2 + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import MeanSquaredLogError + >>> target = B.tensor([2.5, 5, 4, 8]) + >>> preds = B.tensor([3, 5, 2.5, 7]) + >>> mean_squared_log_error = MeanSquaredLogError() + >>> mean_squared_log_error(preds, target) + tensor(0.0397) + + .. note:: + Half precision is only support on GPU for this metric + + """ + is_differentiable = True + sum_squared_log_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_squared_log_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + + self.sum_squared_log_error += sum_squared_log_error + self.total += n_obs + + def compute(self) -> Tensor: + """Compute mean squared logarithmic error over state.""" + return _mean_squared_log_error_compute(self.sum_squared_log_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py new file mode 100644 index 000000000..7927392a7 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py @@ -0,0 +1,140 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update +from paddlemetrics.metric import Metric + + +def _final_aggregation( + means_x: Tensor, + means_y: Tensor, + vars_x: Tensor, + vars_y: Tensor, + corrs_xy: Tensor, + nbs: Tensor, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Aggregate the statistics from multiple devices. + + Formula taken from here: `Aggregate the statistics from multiple devices`_ + """ + # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1 + mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0] + for i in range(1, len(means_x)): + mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i] + + nb = n1 + n2 + mean_x = (n1 * mx1 + n2 * mx2) / nb + mean_y = (n1 * my1 + n2 * my2) / nb + var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2) + var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2) + + corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y) + corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y) + corr_xy = (corr1 + corr2) / (n1 + n2) + + mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb + + return var_x, var_y, corr_xy, nb + + +class PearsonCorrcoef(Metric): + r""" + Computes `Pearson Correlation Coefficient`_: + + .. math:: + P_{corr}(x,y) = \frac{cov(x,y)}{\sigma_x \sigma_y} + + Where :math:`y` is a tensor of target values, and :math:`x` is a + tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,)`` + - ``target``(float tensor): ``(N,)`` + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import PearsonCorrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> pearson = PearsonCorrcoef() + >>> pearson(preds, target) + tensor(0.9849) + + """ + is_differentiable = True + preds: List[Tensor] + target: List[Tensor] + mean_x: Tensor + mean_y: Tensor + var_x: Tensor + var_y: Tensor + corr_xy: Tensor + n_total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.add_state("mean_x", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("mean_y", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("var_x", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("var_y", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("corr_xy", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("n_total", default=B.zeros(1), dist_reduce_fx=None) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update( + preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total + ) + + def compute(self) -> Tensor: + """Computes pearson correlation coefficient over state.""" + if self.mean_x.numel() > 1: # multiple devices, need further reduction + var_x, var_y, corr_xy, n_total = _final_aggregation( + self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total + ) + else: + var_x = self.var_x + var_y = self.var_y + corr_xy = self.corr_xy + n_total = self.n_total + + return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py new file mode 100644 index 000000000..36db3d8d5 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py @@ -0,0 +1,149 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.r2 import _r2_score_compute, _r2_score_update +from paddlemetrics.metric import Metric + + +class R2Score(Metric): + r""" + Computes r2 score also known as `R2 Score_Coefficient Determination`_: + + .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} + + where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and + :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate + adjusted r2 score given by + + .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} + + where the parameter :math:`k` (the number of independent regressors) should + be provided as the `adjusted` argument. + + Forward accepts + + - ``preds`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) + - ``target`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) + + In the case of multioutput, as default the variances will be uniformly + averaged over the additional dimensions. Please see argument `multioutput` + for changing this behavior. + + Args: + num_outputs: + Number of outputs in multioutput setting (default is 1) + adjusted: + number of independent regressors for calculating adjusted r2 score. + Default 0 (standard r2 score). + multioutput: + Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is ``'uniform_average'``.): + + * ``'raw_values'`` returns full set of scores + * ``'uniform_average'`` scores are uniformly averaged + * ``'variance_weighted'`` scores are weighted by their individual variances + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``adjusted`` parameter is not an integer larger or equal to 0. + ValueError: + If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. + + Example: + >>> from paddlemetrics import R2Score + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> r2score = R2Score() + >>> r2score(preds, target) + tensor(0.9486) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> r2score = R2Score(num_outputs=2, multioutput='raw_values') + >>> r2score(preds, target) + tensor([0.9654, 0.9082]) + + """ + is_differentiable = True + sum_squared_error: Tensor + sum_error: Tensor + residual: Tensor + total: Tensor + + def __init__( + self, + num_outputs: int = 1, + adjusted: int = 0, + multioutput: str = "uniform_average", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.num_outputs = num_outputs + + if adjusted < 0 or not isinstance(adjusted, int): + raise ValueError("`adjusted` parameter should be an integer larger or equal to 0.") + self.adjusted = adjusted + + allowed_multioutput = ("raw_values", "uniform_average", "variance_weighted") + if multioutput not in allowed_multioutput: + raise ValueError( + f"Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}" + ) + self.multioutput = multioutput + + self.add_state("sum_squared_error", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") + self.add_state("sum_error", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") + self.add_state("residual", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_error, sum_error, residual, total = _r2_score_update(preds, target) + + self.sum_squared_error += sum_squared_error + self.sum_error += sum_error + self.residual += residual + self.total += total + + def compute(self) -> Tensor: + """Computes r2 score over the metric states.""" + return _r2_score_compute( + self.sum_squared_error, self.sum_error, self.residual, self.total, self.adjusted, self.multioutput + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py new file mode 100644 index 000000000..76249378f --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py @@ -0,0 +1,96 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.spearman import _spearman_corrcoef_compute, _spearman_corrcoef_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class SpearmanCorrcoef(Metric): + r""" + Computes `spearmans rank correlation coefficient`_. + + .. math: + r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}} + + where rg_x and rg_y are the rank associated to the variables x and y. Spearmans correlations coefficient + corresponds to the standard pearsons correlation coefficient calculated on the rank variables. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Example: + >>> from paddlemetrics import SpearmanCorrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> spearman = SpearmanCorrcoef() + >>> spearman(preds, target) + tensor(1.0000) + + """ + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + rank_zero_warn( + "Metric `SpearmanCorrcoef` will save all targets and predictions in the buffer." + " For large datasets, this may lead to large memory footprint." + ) + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target = _spearman_corrcoef_update(preds, target) + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + """Computes spearmans correlation coefficient.""" + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _spearman_corrcoef_compute(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py new file mode 100644 index 000000000..3e545e08a --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py @@ -0,0 +1,92 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.symmetric_mean_absolute_percentage_error import ( + _symmetric_mean_absolute_percentage_error_compute, + _symmetric_mean_absolute_percentage_error_update, +) +from paddlemetrics.metric import Metric + + +class SymmetricMeanAbsolutePercentageError(Metric): + r""" + Computes symmetric mean absolute percentage error (`SMAPE`_). + + .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon}) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Note: + The epsilon value is taken from `scikit-learn's implementation of SMAPE`_. + + Note: + SMAPE output is a non-negative floating point between 0 and 1. Best result is 0.0 . + + + Example: + >>> from paddlemetrics import SymmetricMeanAbsolutePercentageError + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> smape = SymmetricMeanAbsolutePercentageError() + >>> smape(preds, target) + tensor(0.2290) + """ + is_differentiable = True + sum_abs_per_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) + + self.sum_abs_per_error += sum_abs_per_error + self.total += num_obs + + def compute(self) -> Tensor: + """Computes mean absolute percentage error over state.""" + return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py new file mode 100644 index 000000000..4687bdd5c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py @@ -0,0 +1,116 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.tweedie_deviance import ( + _tweedie_deviance_score_compute, + _tweedie_deviance_score_update, +) +from paddlemetrics.metric import Metric + + +class TweedieDevianceScore(Metric): + r""" + Computes the `Tweedie Deviance Score`_ between targets and predictions: + + .. math:: + deviance\_score(\hat{y},y) = + \begin{cases} + (\hat{y} - y)^2, & \text{for }power=0\\ + 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }power=1\\ + 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }power=2\\ + 2 * (\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \frac{y(\hat{y})^{1 - power}}{1 - power} + \frac{(\hat{y}) + ^{2 - power}}{2 - power}), & \text{otherwise} + \end{cases} + + where :math:`y` is a tensor of targets values, and :math:`\hat{y}` is a tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,...)`` + - ``targets`` (float tensor): ``(N,...)`` + + Args: + power: + - power < 0 : Extreme stable distribution. (Requires: preds > 0.) + - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.) + - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) + - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) + - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.) + - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) + - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.) + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the all gather. + + Example: + >>> from paddlemetrics import TweedieDevianceScore + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> deviance_score = TweedieDevianceScore(power=2) + >>> deviance_score(preds, targets) + tensor(1.2083) + + """ + is_differentiable = True + sum_deviance_score: Tensor + num_observations: Tensor + + def __init__( + self, + power: float = 0.0, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if 0 < power < 1: + raise ValueError(f"Deviance Score is not defined for power={power}.") + + self.power: float = power + + self.add_state("sum_deviance_score", B.tensor(0.0), dist_reduce_fx="sum") + self.add_state("num_observations", B.tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, targets: Tensor) -> None: # type: ignore + """Update metric states with predictions and targets. + + Args: + preds: Predicted tensor with shape ``(N,d)`` + targets: Ground truth tensor with shape ``(N,d)`` + """ + sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, self.power) + + self.sum_deviance_score += sum_deviance_score + self.num_observations += num_observations + + def compute(self) -> Tensor: + return _tweedie_deviance_score_compute(self.sum_deviance_score, self.num_observations) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py new file mode 100644 index 000000000..208a02246 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py @@ -0,0 +1,22 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.retrieval.mean_average_precision import RetrievalMAP # noqa: F401 +from paddlemetrics.retrieval.mean_reciprocal_rank import RetrievalMRR # noqa: F401 +from paddlemetrics.retrieval.retrieval_fallout import RetrievalFallOut # noqa: F401 +from paddlemetrics.retrieval.retrieval_hit_rate import RetrievalHitRate # noqa: F401 +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric # noqa: F401 +from paddlemetrics.retrieval.retrieval_ndcg import RetrievalNormalizedDCG # noqa: F401 +from paddlemetrics.retrieval.retrieval_precision import RetrievalPrecision # noqa: F401 +from paddlemetrics.retrieval.retrieval_r_precision import RetrievalRPrecision # noqa: F401 +from paddlemetrics.retrieval.retrieval_recall import RetrievalRecall # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py new file mode 100644 index 000000000..ee7f9065b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py @@ -0,0 +1,70 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalMAP(RetrievalMetric): + """Computes `Mean Average Precision`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `MAP` will be computed as the mean + of the `Average Precisions` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Example: + >>> from paddlemetrics import RetrievalMAP + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> rmap = RetrievalMAP() + >>> rmap(preds, target, indexes=indexes) + tensor(0.7917) + """ + + higher_is_better = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_average_precision(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py new file mode 100644 index 000000000..76f15bde8 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py @@ -0,0 +1,70 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalMRR(RetrievalMetric): + """Computes `Mean Reciprocal Rank`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `MRR` will be computed as the mean + of the `Reciprocal Rank` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Example: + >>> from paddlemetrics import RetrievalMRR + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> mrr = RetrievalMRR() + >>> mrr(preds, target, indexes=indexes) + tensor(0.7500) + """ + + higher_is_better = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_reciprocal_rank(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py new file mode 100644 index 000000000..38b70f7c1 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py @@ -0,0 +1,131 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric +from paddlemetrics.utilities.data import get_group_indexes + + +class RetrievalFallOut(RetrievalMetric): + """Computes `Fall-out`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Fall-out` will be computed as the mean + of the `Fall-out` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a negative ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalFallOut + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> fo = RetrievalFallOut(k=2) + >>> fo(preds, target, indexes=indexes) + tensor(0.5000) + """ + + higher_is_better = False + + def __init__( + self, + empty_target_action: str = "pos", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def compute(self) -> Tensor: + """First concat state `indexes`, `preds` and `target` since they were stored as lists. + + After that, compute list of groups that will help in keeping together predictions about the same query. Finally, + for each group compute the `_metric` if the number of negative targets is at least 1, otherwise behave as + specified by `self.empty_target_action`. + """ + indexes = B.cat(self.indexes, dim=0) + preds = B.cat(self.preds, dim=0) + target = B.cat(self.target, dim=0) + + res = [] + groups = get_group_indexes(indexes) + + for group in groups: + mini_preds = preds[group] + mini_target = target[group] + + if not (1 - mini_target).sum(): + if self.empty_target_action == "error": + raise ValueError("`compute` method was provided with a query with no negative target.") + if self.empty_target_action == "pos": + res.append(tensor(1.0)) + elif self.empty_target_action == "neg": + res.append(tensor(0.0)) + else: + # ensure list containt only float tensors + res.append(self._metric(mini_preds, mini_target)) + + return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds) + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_fall_out(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py new file mode 100644 index 000000000..6a053b7b5 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalHitRate(RetrievalMetric): + """Computes `IR HitRate`. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then the `Hit Rate` will be computed as the mean + of the `Hit Rate` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalHitRate + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([True, False, False, False, True, False, True]) + >>> hr2 = RetrievalHitRate(k=2) + >>> hr2(preds, target, indexes=indexes) + tensor(0.5000) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_hit_rate(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py new file mode 100644 index 000000000..ab43876fa --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from abc import ABC, abstractmethod +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics import Metric +from paddlemetrics.utilities.checks import _check_retrieval_inputs +from paddlemetrics.utilities.data import get_group_indexes + +#: get_group_indexes is used to group predictions belonging to the same document + + +class RetrievalMetric(Metric): + """Works with binary target data. Accepts float predictions from a model output. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + `indexes`, `preds` and `target` must have the same dimension and will be flatten + to single dimension once provided. + + `indexes` indicate to which query a prediction belongs. + Predictions will be first grouped by indexes. Then the + real metric, defined by overriding the `_metric` method, + will be computed as the mean of the scores over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive + or negative (depend on metric) target. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + """ + + indexes: List[Tensor] + preds: List[Tensor] + target: List[Tensor] + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.allow_non_binary_target = False + + empty_target_action_options = ("error", "skip", "neg", "pos") + if empty_target_action not in empty_target_action_options: + raise ValueError(f"Argument `empty_target_action` received a wrong value `{empty_target_action}`.") + + self.empty_target_action = empty_target_action + + self.add_state("indexes", default=[], dist_reduce_fx=None) + self.add_state("preds", default=[], dist_reduce_fx=None) + self.add_state("target", default=[], dist_reduce_fx=None) + + def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None: # type: ignore + """Check shape, check and convert dtypes, flatten and add to accumulators.""" + if indexes is None: + raise ValueError("Argument `indexes` cannot be None") + + indexes, preds, target = _check_retrieval_inputs( + indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target + ) + + self.indexes.append(indexes) + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + """First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists. + + After that, compute list of groups that will help in keeping together predictions about the same query. Finally, + for each group compute the ``_metric`` if the number of positive targets is at least 1, otherwise behave as + specified by ``self.empty_target_action``. + """ + indexes = B.cat(self.indexes, dim=0) + preds = B.cat(self.preds, dim=0) + target = B.cat(self.target, dim=0) + + res = [] + groups = get_group_indexes(indexes) + + for group in groups: + mini_preds = preds[group] + mini_target = target[group] + + if not mini_target.sum(): + if self.empty_target_action == "error": + raise ValueError("`compute` method was provided with a query with no positive target.") + if self.empty_target_action == "pos": + res.append(tensor(1.0)) + elif self.empty_target_action == "neg": + res.append(tensor(0.0)) + else: + # ensure list contains only float tensors + res.append(self._metric(mini_preds, mini_target)) + + return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds) + + @abstractmethod + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + """Compute a metric over a predictions and target of a single group. + + This method should be overridden by subclasses. + """ diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py new file mode 100644 index 000000000..bb0740cac --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py @@ -0,0 +1,99 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalNormalizedDCG(RetrievalMetric): + """Computes `Normalized Discounted Cumulative Gain`_. + + Works with binary or positive integer target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long, int, bool or float tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Normalized Discounted Cumulative Gain` + will be computed as the mean of the `Normalized Discounted Cumulative Gain` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalNormalizedDCG + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> ndcg = RetrievalNormalizedDCG() + >>> ndcg(preds, target, indexes=indexes) + tensor(0.8467) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + self.allow_non_binary_target = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_normalized_dcg(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py new file mode 100644 index 000000000..f0f983a89 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.precision import retrieval_precision +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalPrecision(RetrievalMetric): + """Computes `IR Precision`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Precision` will be computed as the mean + of the `Precision` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalPrecision + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> p2 = RetrievalPrecision(k=2) + >>> p2(preds, target, indexes=indexes) + tensor(0.5000) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_precision(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py new file mode 100644 index 000000000..75373532a --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py @@ -0,0 +1,70 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalRPrecision(RetrievalMetric): + """Computes `IR R-Precision`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `R-Precision` will be computed as the mean + of the `R-Precision` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Example: + >>> from paddlemetrics import RetrievalRPrecision + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> p2 = RetrievalRPrecision() + >>> p2(preds, target, indexes=indexes) + tensor(0.7500) + """ + + higher_is_better = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_r_precision(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py new file mode 100644 index 000000000..26ace51c2 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.recall import retrieval_recall +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalRecall(RetrievalMetric): + """Computes `IR Recall`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Recall` will be computed as the mean + of the `Recall` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalRecall + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> r2 = RetrievalRecall(k=2) + >>> r2(preds, target, indexes=indexes) + tensor(0.7500) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_recall(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py b/EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py new file mode 100644 index 000000000..e3233cef9 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py @@ -0,0 +1,74 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import re +from typing import List + +_PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__)) + + +def _load_requirements(path_dir: str, file_name: str = "requirements.txt", comment_char: str = "#") -> List[str]: + """Load requirements from a file. + + >>> _load_requirements(_PROJECT_ROOT) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE + ['numpy...', 'B...'] + """ + with open(os.path.join(path_dir, file_name)) as file: + lines = [ln.strip() for ln in file.readlines()] + reqs = [] + for ln in lines: + # filer all comments + if comment_char in ln: + ln = ln[: ln.index(comment_char)].strip() + # skip directly installed dependencies + if ln.startswith("http"): + continue + if ln: # if requirement is not empty + reqs.append(ln) + return reqs + + +def _load_readme_description(path_dir: str, homepage: str, version: str) -> str: + """Load readme as decribtion. + + >>> _load_readme_description(_PROJECT_ROOT, "", "") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE + '
...' + """ + path_readme = os.path.join(path_dir, "README.md") + with open(path_readme, encoding="utf-8") as fp: + text = fp.read() + + # https://github.com/PyTorchLightning/paddlemetrics/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png + github_source_url = os.path.join(homepage, "raw", version) + # replace relative repository path to absolute link to the release + # do not replace all "docs" as in the readme we reger some other sources with particular path to docs + text = text.replace("docs/source/_static/", f"{os.path.join(github_source_url, 'docs/source/_static/')}") + + # readthedocs badge + text = text.replace("badge/?version=stable", f"badge/?version={version}") + text = text.replace("paddlemetrics.readthedocs.io/en/stable/", f"paddlemetrics.readthedocs.io/en/{version}") + # codecov badge + text = text.replace("/branch/master/graph/badge.svg", f"/release/{version}/graph/badge.svg") + # replace github badges for release ones + text = text.replace("badge.svg?branch=master&event=push", f"badge.svg?tag={version}") + # Azure... + text = text.replace("?branchName=master", f"?branchName=refs%2Ftags%2F{version}") + text = re.sub(r"\?definitionId=\d+&branchName=master", f"?definitionId=2&branchName=refs%2Ftags%2F{version}", text) + + skip_begin = r"" + skip_end = r"" + # todo: wrap content as commented description + text = re.sub(rf"{skip_begin}.+?{skip_end}", "", text, flags=re.IGNORECASE + re.DOTALL) + + return text diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py new file mode 100644 index 000000000..782ca2955 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py @@ -0,0 +1,18 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#from paddlemetrics.text.bert import BERTScore # noqa: F401 +from paddlemetrics.text.bleu import BLEUScore # noqa: F401 +from paddlemetrics.text.rouge import ROUGEScore # noqa: F401 +from paddlemetrics.text.sacre_bleu import SacreBLEUScore # noqa: F401 +from paddlemetrics.text.wer import WER # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py new file mode 100644 index 000000000..0f602f30a --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py @@ -0,0 +1,251 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import paddleext.torchapi as B + +from paddlemetrics.functional import bert_score +from paddlemetrics.functional.text.bert import _preprocess_text +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _TRANSFORMERS_AVAILABLE + +if _TRANSFORMERS_AVAILABLE: + from transformers import AutoTokenizer + + +# Default model recommended in the original implementation. +_DEFAULT_MODEL = "roberta-large" + + +def _concatenate(d: Dict[str, List[B.Tensor]]) -> Dict[str, B.Tensor]: + """Concatenate list of tensors within a given dictionary.""" + output_dict: Dict[str, B.Tensor] = {} + for k, v in d.items(): + output_dict[k] = B.cat(v) + return output_dict + + +class BERTScore(Metric): + """`Bert_score Evaluating Text Generation`_ leverages the pre-trained contextual embeddings from BERT and + matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with + human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, + and F1 measure, which can be useful for evaluating different language generation tasks. + + This implemenation follows the original implementation from `BERT_score`_. + + Args: + predictions: + An iterable of predicted sentences. + references: + An iterable of target sentences. + model_type: + A name or a model path used to load `transformers` pretrained model. + num_layers: + A layer of representation to use. + all_layers: + An indication of whether the representation from all model's layers should be used. + If `all_layers = True`, the argument `num_layers` is ignored. + model: + A user's own model. Must be of `B.nn.Module` instance. + user_tokenizer: + A user's own tokenizer used with the own model. This must be an instance with the `__call__` method. + This method must take an iterable of sentences (`List[str]`) and must return a python dictionary + containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor`. It is up to the user's model + of whether `"input_ids"` is a `B.Tensor` of input ids or embedding vectors. + This tokenizer must prepend an equivalent of `[CLS]` token and append an equivalent of `[SEP]` token + as `transformers` tokenizer does. + user_forward_fn: + A user's own forward function used in a combination with `user_model`. This function must take `user_model` + and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` + as an input and return the model's output represented by the single `B.Tensor`. + verbose: + An indication of whether a progress bar to be displayed during the embeddings calculation. + idf: + An indication whether normalization using inverse document frequencies should be used. + device: + A device to be used for calculation. + max_length: + A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed. + batch_size: + A batch size used for model processing. + num_threads: + A number of threads to use for a dataloader. + return_hash: + An indication of whether the correspodning `hash_code` should be returned. + lang: + A language of input sentences. + rescale_with_baseline: + An indication of whether bertscore should be rescaled with a pre-computed baseline. + When a pretrained model from `transformers` model is used, the corresponding baseline is downloaded + from the original `bert-score` package from `BERT_score`_ if available. + In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting + of the files from `BERT_score`_. + baseline_path: + A path to the user's own local csv/tsv file with the baseline scale. + baseline_url: + A url path to the user's own csv/tsv file with the baseline scale. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Returns: + Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. + + Example: + >>> predictions = ["hello there", "general kenobi"] + >>> references = ["hello there", "master kenobi"] + >>> bertscore = BERTScore() + >>> bertscore.update(predictions=predictions,references=references) + >>> bertscore.compute() # doctest: +SKIP + {'precision': [0.99..., 0.99...], + 'recall': [0.99..., 0.99...], + 'f1': [0.99..., 0.99...]} + """ + + higher_is_better = True + + def __init__( + self, + model_name_or_path: Optional[str] = None, + num_layers: Optional[int] = None, + all_layers: bool = False, + model: Optional[B.nn.Module] = None, + user_tokenizer: Optional[Any] = None, + user_forward_fn: Callable[[B.nn.Module, Dict[str, B.Tensor]], B.Tensor] = None, + verbose: bool = False, + idf: bool = False, + device: Optional[Union[str, B.device]] = None, + max_length: int = 512, + batch_size: int = 64, + num_threads: int = 4, + return_hash: bool = False, + lang: str = "en", + rescale_with_baseline: bool = False, + baseline_path: Optional[str] = None, + baseline_url: Optional[str] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.model_name_or_path = model_name_or_path + self.num_layers = num_layers + self.all_layers = all_layers + self.model = model + self.user_forward_fn = user_forward_fn + self.verbose = verbose + self.idf = idf + self.embedding_device = device + self.max_length = max_length + self.batch_size = batch_size + self.num_threads = num_threads + self.return_hash = return_hash + self.lang = lang + self.rescale_with_baseline = rescale_with_baseline + self.baseline_path = baseline_path + self.baseline_url = baseline_url + self.predictions: Dict[str, List[B.Tensor]] = {"input_ids": [], "attention_mask": []} + self.references: Dict[str, List[B.Tensor]] = {"input_ids": [], "attention_mask": []} + + if user_tokenizer: + self.tokenizer = user_tokenizer + self.user_tokenizer = True + else: + if not _TRANSFORMERS_AVAILABLE: + raise ValueError( + "`BERTScore` metric with default tokenizers requires `transformers` package be installed. " + "Either install with `pip install transformers>=4.0` or `pip install paddlemetrics[text]`" + ) + if not model_name_or_path: + model_name_or_path = _DEFAULT_MODEL + warnings.warn( + "The argument `model_name_or_path` was not specified while it is required when default " + " `transformers` model are used." + f"It is, therefore, used the default recommended model - {_DEFAULT_MODEL}." + ) + self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + self.user_tokenizer = False + + def update(self, predictions: List[str], references: List[str]) -> None: # type: ignore + """Store predictions/references for computing BERT scores. It is necessary to store sentences in a + tokenized form to ensure the DDP mode working. + + Args: + predictions: + An iterable of predicted sentences. + references: + An iterable of predicted sentences. + """ + predictions_dict = _preprocess_text( + predictions, + self.tokenizer, + self.max_length, + truncation=False, + sort_according_length=False, + own_tokenizer=self.user_tokenizer, + ) + references_dict = _preprocess_text( + references, + self.tokenizer, + self.max_length, + truncation=False, + sort_according_length=False, + own_tokenizer=self.user_tokenizer, + ) + self.predictions["input_ids"].append(predictions_dict["input_ids"]) + self.predictions["attention_mask"].append(predictions_dict["attention_mask"]) + self.references["input_ids"].append(references_dict["input_ids"]) + self.references["attention_mask"].append(references_dict["attention_mask"]) + + def compute(self) -> Dict[str, Union[List[float], str]]: + """Calculate BERT scores. + + Return: + Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. + """ + return bert_score( + predictions=_concatenate(self.predictions), + references=_concatenate(self.references), + model_name_or_path=self.model_name_or_path, + num_layers=self.num_layers, + all_layers=self.all_layers, + model=self.model, + user_tokenizer=self.tokenizer if self.user_tokenizer else None, + user_forward_fn=self.user_forward_fn, + verbose=self.verbose, + idf=self.idf, + device=self.embedding_device, + max_length=self.max_length, + batch_size=self.batch_size, + num_threads=self.num_threads, + return_hash=self.return_hash, + lang=self.lang, + rescale_with_baseline=self.rescale_with_baseline, + baseline_path=self.baseline_path, + baseline_url=self.baseline_url, + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py new file mode 100644 index 000000000..46937d98f --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py @@ -0,0 +1,120 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score +from typing import Any, Callable, Optional, Sequence + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics import Metric +from paddlemetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update + + +class BLEUScore(Metric): + """Calculate `BLEU score`_ of machine translated text with one or more references. + + Args: + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Example: + >>> translate_corpus = ['the cat is on the mat'.split()] + >>> reference_corpus = [['there is a cat on the mat'.split(), 'a cat is on the mat'.split()]] + >>> metric = BLEUScore() + >>> metric(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + + is_differentiable = False + higher_is_better = True + trans_len: Tensor + ref_len: Tensor + numerator: Tensor + denominator: Tensor + + def __init__( + self, + n_gram: int = 4, + smooth: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.n_gram = n_gram + self.smooth = smooth + + self.add_state("trans_len", tensor(0, dtype=B.float), dist_reduce_fx="sum") + self.add_state("ref_len", tensor(0, dtype=B.float), dist_reduce_fx="sum") + self.add_state("numerator", B.zeros(self.n_gram), dist_reduce_fx="sum") + self.add_state("denominator", B.zeros(self.n_gram), dist_reduce_fx="sum") + + def update( # type: ignore + self, reference_corpus: Sequence[Sequence[Sequence[str]]], translate_corpus: Sequence[Sequence[str]] + ) -> None: + """Compute Precision Scores. + + Args: + reference_corpus: An iterable of iterables of reference corpus + translate_corpus: An iterable of machine translated corpus + """ + self.trans_len, self.ref_len = _bleu_score_update( + reference_corpus, + translate_corpus, + self.numerator, + self.denominator, + self.trans_len, + self.ref_len, + self.n_gram, + ) + + def compute(self) -> Tensor: + """Calculate BLEU score. + + Return: + Tensor with BLEU Score + """ + return _bleu_score_compute( + self.trans_len, self.ref_len, self.numerator, self.denominator, self.n_gram, self.smooth + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py new file mode 100644 index 000000000..254f366d7 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py @@ -0,0 +1,171 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from paddleext.torchapi import Tensor + +from paddlemetrics import Metric +from paddlemetrics.functional.text.rouge import ALLOWED_ROUGE_KEYS, _rouge_score_compute, _rouge_score_update +from paddlemetrics.utilities.imports import _NLTK_AVAILABLE + + +class ROUGEScore(Metric): + """`Calculate Rouge Score`_, used for automatic summarization. This implementation should imitate the behaviour + of the `rouge-score` package `Python ROUGE Implementation` + + Args: + newline_sep: + New line separate the inputs. + This argument has not been in use any more. It is deprecated in v0.6 and will be removed in v0.7. + use_stemmer: + Use Porter stemmer to strip word suffixes to improve matching. + rouge_keys: + A list of rouge types to calculate. + Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``. + decimal_places: + The number of digits to round the computed the values to. + This argument has not been in usd any more. It is deprecated in v0.6 and will be removed in v0.7. + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Example: + + >>> targets = "Is your name John".split() + >>> preds = "My name is John".split() + >>> rouge = ROUGEScore() # doctest: +SKIP + >>> from pprint import pprint + >>> pprint(rouge(preds, targets)) # doctest: +NORMALIZE_WHITESPACE +SKIP + {'rouge1_fmeasure': 0.25, + 'rouge1_precision': 0.25, + 'rouge1_recall': 0.25, + 'rouge2_fmeasure': 0.0, + 'rouge2_precision': 0.0, + 'rouge2_recall': 0.0, + 'rougeL_fmeasure': 0.25, + 'rougeL_precision': 0.25, + 'rougeL_recall': 0.25, + 'rougeLsum_fmeasure': 0.25, + 'rougeLsum_precision': 0.25, + 'rougeLsum_recall': 0.25} + + Raises: + ValueError: + If the python packages ``nltk`` is not installed. + ValueError: + If any of the ``rouge_keys`` does not belong to the allowed set of keys. + + References: + [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin `Rouge Detail`_ + """ + + higher_is_better = True + + def __init__( + self, + newline_sep: Optional[bool] = None, # remove in v0.7 + use_stemmer: bool = False, + rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore + decimal_places: Optional[bool] = None, # remove in v0.7 + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if newline_sep is not None: + warnings.warn("Argument `newline_sep` is deprecated in v0.6 and will be removed in v0.7") + if decimal_places is not None: + warnings.warn("Argument `decimal_places` is deprecated in v0.6 and will be removed in v0.7") + + if use_stemmer or "rougeLsum" in rouge_keys: + if not _NLTK_AVAILABLE: + raise ValueError("Stemmer and/or `rougeLsum` requires that nltk is installed. Use `pip install nltk`.") + import nltk + + if not isinstance(rouge_keys, tuple): + rouge_keys = tuple([rouge_keys]) + for key in rouge_keys: + if key not in ALLOWED_ROUGE_KEYS: + raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {ALLOWED_ROUGE_KEYS}") + + self.rouge_keys = rouge_keys + self.rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys] + self.stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None + + # Adding stated dynamically to prevent IndexError during sync function as some lists can be empty. + for rouge_key in self.rouge_keys: + for score in ["fmeasure", "precision", "recall"]: + self.add_state(f"{rouge_key}_{score}", [], dist_reduce_fx=None) + + def update(self, preds: Union[str, List[str]], targets: Union[str, List[str]]) -> None: # type: ignore + """Compute rouge scores. + + Args: + preds: An iterable of predicted sentences. + targets: An iterable of target sentences. + """ + + if isinstance(preds, str): + preds = [preds] + + if isinstance(targets, str): + targets = [targets] + + output: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update( + preds, targets, self.rouge_keys_values, stemmer=self.stemmer + ) + for rouge_key, metrics in output.items(): + for metric in metrics: + for type, value in metric.items(): + getattr(self, f"rouge{rouge_key}_{type}").append(value.to(self.device)) + + def compute(self) -> Dict[str, Tensor]: + """Calculate (Aggregate and provide confidence intervals) ROUGE score. + + Return: + Python dictionary of rouge scores for each input rouge key. + """ + update_output = {} + for rouge_key in self.rouge_keys_values: + for type in ["fmeasure", "precision", "recall"]: + update_output[f"rouge{rouge_key}_{type}"] = getattr(self, f"rouge{rouge_key}_{type}") + + return _rouge_score_compute(update_output) + + def __hash__(self) -> int: + # override to hash list objects. + # this is a bug in the upstream pytorch release. + hash_vals = [self.__class__.__name__] + + for key in self._defaults: + value = getattr(self, key) + if isinstance(value, list): + value = tuple(value) + hash_vals.append(value) + + return hash(tuple(hash_vals)) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py new file mode 100644 index 000000000..4f4d99e8f --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py @@ -0,0 +1,134 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score +from typing import Any, Callable, Optional, Sequence + +from typing_extensions import Literal + +from paddlemetrics.functional.text.bleu import _bleu_score_update +from paddlemetrics.functional.text.sacre_bleu import _SacreBLEUTokenizer +from paddlemetrics.text.bleu import BLEUScore +from paddlemetrics.utilities.imports import _REGEX_AVAILABLE + +AVAILABLE_TOKENIZERS = ("none", "13a", "zh", "intl", "char") + + +class SacreBLEUScore(BLEUScore): + """Calculate `BLEU score`_ [1] of machine translated text with one or more references. This implementation + follows the behaviour of SacreBLEU [2] implementation from https://github.com/mjpost/sacrebleu. + + The SacreBLEU implementation differs from the NLTK BLEU implementation in tokenization techniques. + + Args: + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + tokenize: + Tokenization technique to be used. (Default '13a') + Supported tokenization: ['none', '13a', 'zh', 'intl', 'char'] + lowercase: + If ``True``, BLEU score over lowercased text is calculated. + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``tokenize`` not one of 'none', '13a', 'zh', 'intl' or 'char' + ValueError: + If ``tokenize`` is set to 'intl' and `regex` is not installed + + + Example: + >>> translate_corpus = ['the cat is on the mat'] + >>> reference_corpus = [['there is a cat on the mat', 'a cat is on the mat']] + >>> metric = SacreBLEUScore() + >>> metric(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] A Call for Clarity in Reporting BLEU Scores by Matt Post. + + [3] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + + def __init__( + self, + n_gram: int = 4, + smooth: bool = False, + tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a", + lowercase: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ): + super().__init__( + n_gram=n_gram, + smooth=smooth, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if tokenize not in AVAILABLE_TOKENIZERS: + raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.") + + if tokenize == "intl" and not _REGEX_AVAILABLE: + raise ValueError( + "`'intl'` tokenization requires `regex` installed. Use `pip install regex` or `pip install " + "paddlemetrics[text]`." + ) + self.tokenizer = _SacreBLEUTokenizer(tokenize, lowercase) + + def update( # type: ignore + self, reference_corpus: Sequence[Sequence[str]], translate_corpus: Sequence[str] + ) -> None: + """Compute Precision Scores. + + Args: + reference_corpus: An iterable of iterables of reference corpus + translate_corpus: An iterable of machine translated corpus + """ + reference_corpus_: Sequence[Sequence[Sequence[str]]] = [ + [self.tokenizer(line) for line in reference] for reference in reference_corpus + ] + translate_corpus_: Sequence[Sequence[str]] = [self.tokenizer(line) for line in translate_corpus] + + self.trans_len, self.ref_len = _bleu_score_update( + reference_corpus_, + translate_corpus_, + self.numerator, + self.denominator, + self.trans_len, + self.ref_len, + self.n_gram, + ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py new file mode 100644 index 000000000..7bb69740b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py @@ -0,0 +1,109 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, List, Optional, Union +from warnings import warn + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.text.wer import _wer_compute, _wer_update +from paddlemetrics.metric import Metric + + +class WER(Metric): + r""" + Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. + This value indicates the percentage of words that were incorrectly predicted. + The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. + Word error rate can then be computed as: + + .. math:: + WER = \frac{S + D + I}{N} = \frac{S + D + I}{S + D + C} + + where: + - S is the number of substitutions, + - D is the number of deletions, + - I is the number of insertions, + - C is the number of correct words, + - N is the number of words in the reference (N=S+D+C). + + Compute WER score of transcribed segments against references. + + Args: + concatenate_texts: Whether to concatenate all input texts or compute WER iteratively. + This argument is deprecated in v0.6 and it will be removed in v0.7. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Returns: + (Tensor) Word error rate + + Examples: + >>> predictions = ["this is the prediction", "there is an other sample"] + >>> references = ["this is the reference", "there is another one"] + >>> metric = WER() + >>> metric(predictions, references) + tensor(0.5000) + """ + is_differentiable = False + higher_is_better = False + error: Tensor + total: Tensor + + def __init__( + self, + concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7 + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if concatenate_texts is not None: + warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning) + self.add_state("errors", tensor(0, dtype=B.float), dist_reduce_fx="sum") + self.add_state("total", tensor(0, dtype=B.float), dist_reduce_fx="sum") + + def update(self, predictions: Union[str, List[str]], references: Union[str, List[str]]) -> None: # type: ignore + """Store references/predictions for computing Word Error Rate scores. + + Args: + predictions: Transcription(s) to score as a string or list of strings + references: Reference(s) for each speech input as a string or list of strings + """ + errors, total = _wer_update(predictions, references) + self.errors += errors + self.total += total + + def compute(self) -> Tensor: + """Calculate the word error rate. + + Returns: + (Tensor) Word error rate + """ + return _wer_compute(self.errors, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py new file mode 100644 index 000000000..b1b2a5067 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py @@ -0,0 +1,3 @@ +from paddlemetrics.utilities.data import apply_to_collection # noqa: F401 +from paddlemetrics.utilities.distributed import class_reduce, reduce # noqa: F401 +from paddlemetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py new file mode 100644 index 000000000..b948f103b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py @@ -0,0 +1,582 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.data import select_topk, to_onehot +from paddlemetrics.utilities.enums import DataType + + +def _check_same_shape(preds: Tensor, target: Tensor) -> None: + """Check that predictions and target have the same shape, else raise error.""" + if preds.shape != target.shape: + raise RuntimeError("Predictions and targets are expected to have the same shape") + + +def _basic_input_validation(preds: Tensor, target: Tensor, threshold: float, multiclass: Optional[bool]) -> None: + """Perform basic validation of inputs that does not require deducing any information of the type of inputs.""" + + if target.is_floating_point(): + raise ValueError("The `target` has to be an integer tensor.") + if target.min() < 0: + raise ValueError("The `target` has to be a non-negative tensor.") + + preds_float = preds.is_floating_point() + if not preds_float and preds.min() < 0: + raise ValueError("If `preds` are integers, they have to be non-negative.") + + if not preds.shape[0] == target.shape[0]: + raise ValueError("The `preds` and `target` should have the same first dimension.") + + if multiclass is False and target.max() > 1: + raise ValueError("If you set `multiclass=False`, then `target` should not exceed 1.") + + if multiclass is False and not preds_float and preds.max() > 1: + raise ValueError("If you set `multiclass=False` and `preds` are integers, then `preds` should not exceed 1.") + + +def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> Tuple[DataType, int]: + """This checks that the shape and type of inputs are consistent with each other and fall into one of the + allowed input types (see the documentation of docstring of ``_input_format_classification``). It does not check + for consistency of number of classes, other functions take care of that. + + It returns the name of the case in which the inputs fall, and the implied number of classes (from the ``C`` dim for + multi-class data, or extra dim(s) for multi-label data). + """ + + preds_float = preds.is_floating_point() + + if preds.ndim == target.ndim: + if preds.shape != target.shape: + raise ValueError( + "The `preds` and `target` should have the same shape,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + if preds_float and target.max() > 1: + raise ValueError( + "If `preds` and `target` are of shape (N, ...) and `preds` are floats, `target` should be binary." + ) + + # Get the case + if preds.ndim == 1 and preds_float: + case = DataType.BINARY + elif preds.ndim == 1 and not preds_float: + case = DataType.MULTICLASS + elif preds.ndim > 1 and preds_float: + case = DataType.MULTILABEL + else: + case = DataType.MULTIDIM_MULTICLASS + + implied_classes = preds[0].numel() + + elif preds.ndim == target.ndim + 1: + if not preds_float: + raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.") + if preds.shape[2:] != target.shape[1:]: + raise ValueError( + "If `preds` have one dimension more than `target`, the shape of `preds` should be" + " (N, C, ...), and the shape of `target` should be (N, ...)." + ) + + implied_classes = preds.shape[1] + + if preds.ndim == 2: + case = DataType.MULTICLASS + else: + case = DataType.MULTIDIM_MULTICLASS + else: + raise ValueError( + "Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)" + " and `preds` should be (N, C, ...)." + ) + + return case, implied_classes + + +def _check_num_classes_binary(num_classes: int, multiclass: Optional[bool]) -> None: + """This checks that the consistency of `num_classes` with the data and `multiclass` param for binary data.""" + + if num_classes > 2: + raise ValueError("Your data is binary, but `num_classes` is larger than 2.") + if num_classes == 2 and not multiclass: + raise ValueError( + "Your data is binary and `num_classes=2`, but `multiclass` is not True." + " Set it to True if you want to transform binary data to multi-class format." + ) + if num_classes == 1 and multiclass: + raise ValueError( + "You have binary data and have set `multiclass=True`, but `num_classes` is 1." + " Either set `multiclass=None`(default) or set `num_classes=2`" + " to transform binary data to multi-class format." + ) + + +def _check_num_classes_mc( + preds: Tensor, + target: Tensor, + num_classes: int, + multiclass: Optional[bool], + implied_classes: int, +) -> None: + """This checks that the consistency of `num_classes` with the data and `multiclass` param for (multi- + dimensional) multi-class data.""" + + if num_classes == 1 and multiclass is not False: + raise ValueError( + "You have set `num_classes=1`, but predictions are integers." + " If you want to convert (multi-dimensional) multi-class data with 2 classes" + " to binary/multi-label, set `multiclass=False`." + ) + if num_classes > 1: + if multiclass is False and implied_classes != num_classes: + raise ValueError( + "You have set `multiclass=False`, but the implied number of classes " + " (from shape of inputs) does not match `num_classes`. If you are trying to" + " transform multi-dim multi-class data with 2 classes to multi-label, `num_classes`" + " should be either None or the product of the size of extra dimensions (...)." + " See Input Types in Metrics documentation." + ) + if num_classes <= target.max(): + raise ValueError("The highest label in `target` should be smaller than `num_classes`.") + if preds.shape != target.shape and num_classes != implied_classes: + raise ValueError("The size of C dimension of `preds` does not match `num_classes`.") + + +def _check_num_classes_ml(num_classes: int, multiclass: Optional[bool], implied_classes: int) -> None: + """This checks that the consistency of `num_classes` with the data and `multiclass` param for multi-label + data.""" + + if multiclass and num_classes != 2: + raise ValueError( + "Your have set `multiclass=True`, but `num_classes` is not equal to 2." + " If you are trying to transform multi-label data to 2 class multi-dimensional" + " multi-class, you should set `num_classes` to either 2 or None." + ) + if not multiclass and num_classes != implied_classes: + raise ValueError("The implied number of classes (from shape of inputs) does not match num_classes.") + + +def _check_top_k(top_k: int, case: str, implied_classes: int, multiclass: Optional[bool], preds_float: bool) -> None: + if case == DataType.BINARY: + raise ValueError("You can not use `top_k` parameter with binary data.") + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError("The `top_k` has to be an integer larger than 0.") + if not preds_float: + raise ValueError("You have set `top_k`, but you do not have probability predictions.") + if multiclass is False: + raise ValueError("If you set `multiclass=False`, you can not set `top_k`.") + if case == DataType.MULTILABEL and multiclass: + raise ValueError( + "If you want to transform multi-label data to 2 class multi-dimensional" + "multi-class data using `multiclass=True`, you can not use `top_k`." + ) + if top_k >= implied_classes: + raise ValueError("The `top_k` has to be strictly smaller than the `C` dimension of `preds`.") + + +def _check_classification_inputs( + preds: Tensor, + target: Tensor, + threshold: float, + num_classes: Optional[int], + multiclass: Optional[bool], + top_k: Optional[int], +) -> DataType: + """Performs error checking on inputs for classification. + + This ensures that preds and target take one of the shape/type combinations that are + specified in ``_input_format_classification`` docstring. It also checks the cases of + over-rides with ``multiclass`` by checking (for multi-class and multi-dim multi-class + cases) that there are only up to 2 distinct labels. + + In case where preds are floats (probabilities), it is checked whether they are in [0,1] interval. + + When ``num_classes`` is given, it is checked that it is consistent with input cases (binary, + multi-label, ...), and that, if available, the implied number of classes in the ``C`` + dimension is consistent with it (as well as that max label in target is smaller than it). + + When ``num_classes`` is not specified in these cases, consistency of the highest target + value against ``C`` dimension is checked for (multi-dimensional) multi-class cases. + + If ``top_k`` is set (not None) for inputs that do not have probability predictions (and + are not binary), an error is raised. Similarly if ``top_k`` is set to a number that + is higher than or equal to the ``C`` dimension of ``preds``, an error is raised. + + Preds and target tensors are expected to be squeezed already - all dimensions should be + greater than 1, except perhaps the first one (``N``). + + Args: + preds: Tensor with predictions (labels or probabilities) + target: Tensor with ground truth labels, always integers (labels) + threshold: + Threshold value for transforming probability/logit predictions to binary + (0,1) predictions, in the case of binary or multi-label inputs. + num_classes: + Number of classes. If not explicitly set, the number of classes will be inferred + either from the shape of inputs, or the maximum label in the ``target`` and ``preds`` + tensor, where applicable. + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for inputs with probability predictions. The default value (``None``) will be + interpreted as 1 for these inputs. If this parameter is set for multi-label inputs, + it will take precedence over threshold. + + Should be left unset (``None``) for inputs with label predictions. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + + Return: + case: The case the inputs fall in, one of 'binary', 'multi-class', 'multi-label' or + 'multi-dim multi-class' + """ + + # Basic validation (that does not need case/type information) + _basic_input_validation(preds, target, threshold, multiclass) + + # Check that shape/types fall into one of the cases + case, implied_classes = _check_shape_and_type_consistency(preds, target) + + # Check consistency with the `C` dimension in case of multi-class data + if preds.shape != target.shape: + if multiclass is False and implied_classes != 2: + raise ValueError( + "You have set `multiclass=False`, but have more than 2 classes in your data," + " based on the C dimension of `preds`." + ) + if target.max() >= implied_classes: + raise ValueError( + "The highest label in `target` should be smaller than the size of the `C` dimension of `preds`." + ) + + # Check that num_classes is consistent + if num_classes: + if case == DataType.BINARY: + _check_num_classes_binary(num_classes, multiclass) + elif case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS): + _check_num_classes_mc(preds, target, num_classes, multiclass, implied_classes) + elif case.MULTILABEL: + _check_num_classes_ml(num_classes, multiclass, implied_classes) + + # Check that top_k is consistent + if top_k is not None: + _check_top_k(top_k, case, implied_classes, multiclass, preds.is_floating_point()) + + return case + + +def _input_squeeze( + preds: Tensor, + target: Tensor, +) -> Tuple[Tensor, Tensor]: + """Remove excess dimensions.""" + if preds.shape[0] == 1: + preds, target = preds.squeeze().unsqueeze(0), target.squeeze().unsqueeze(0) + else: + preds, target = preds.squeeze(), target.squeeze() + return preds, target + + +def _input_format_classification( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_classes: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tuple[Tensor, Tensor, DataType]: + """Convert preds and target tensors into common format. + + Preds and targets are supposed to fall into one of these categories (and are + validated to make sure this is the case): + + * Both preds and target are of shape ``(N,)``, and both are integers (multi-class) + * Both preds and target are of shape ``(N,)``, and target is binary, while preds + are a float (binary) + * preds are of shape ``(N, C)`` and are floats, and target is of shape ``(N,)`` and + is integer (multi-class) + * preds and target are of shape ``(N, ...)``, target is binary and preds is a float + (multi-label) + * preds are of shape ``(N, C, ...)`` and are floats, target is of shape ``(N, ...)`` + and is integer (multi-dimensional multi-class) + * preds and target are of shape ``(N, ...)`` both are integers (multi-dimensional + multi-class) + + To avoid ambiguities, all dimensions of size 1, except the first one, are squeezed out. + + The returned output tensors will be binary tensors of the same shape, either ``(N, C)`` + of ``(N, C, X)``, the details for each case are described below. The function also returns + a ``case`` string, which describes which of the above cases the inputs belonged to - regardless + of whether this was "overridden" by other settings (like ``multiclass``). + + In binary case, targets are normally returned as ``(N,1)`` tensor, while preds are transformed + into a binary tensor (elements become 1 if the probability is greater than or equal to + ``threshold`` or 0 otherwise). If ``multiclass=True``, then then both targets are preds + become ``(N, 2)`` tensors by a one-hot transformation; with the thresholding being applied to + preds first. + + In multi-class case, normally both preds and targets become ``(N, C)`` binary tensors; targets + by a one-hot transformation and preds by selecting ``top_k`` largest entries (if their original + shape was ``(N,C)``). However, if ``multiclass=False``, then targets and preds will be + returned as ``(N,1)`` tensor. + + In multi-label case, normally targets and preds are returned as ``(N, C)`` binary tensors, with + preds being binarized as in the binary case. Here the ``C`` dimension is obtained by flattening + all dimensions after the first one. However if ``multiclass=True``, then both are returned as + ``(N, 2, C)``, by an equivalent transformation as in the binary case. + + In multi-dimensional multi-class case, normally both target and preds are returned as + ``(N, C, X)`` tensors, with ``X`` resulting from flattening of all dimensions except ``N`` and + ``C``. The transformations performed here are equivalent to the multi-class case. However, if + ``multiclass=False`` (and there are up to two classes), then the data is returned as + ``(N, X)`` binary tensors (multi-label). + + Note: + Where a one-hot transformation needs to be performed and the number of classes + is not implicitly given by a ``C`` dimension, the new ``C`` dimension will either be + equal to ``num_classes``, if it is given, or the maximum label value in preds and + target. + + Args: + preds: Tensor with predictions (labels or probabilities) + target: Tensor with ground truth labels, always integers (labels) + threshold: + Threshold value for transforming probability/logit predictions to binary + (0 or 1) predictions, in the case of binary or multi-label inputs. + num_classes: + Number of classes. If not explicitly set, the number of classes will be inferred + either from the shape of inputs, or the maximum label in the ``target`` and ``preds`` + tensor, where applicable. + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for (multi-dimensional) multi-class inputs with probability predictions. The + default value (``None``) will be interepreted as 1 for these inputs. + + Should be left unset (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Returns: + preds: binary tensor of shape ``(N, C)`` or ``(N, C, X)`` + target: binary tensor of shape ``(N, C)`` or ``(N, C, X)`` + case: The case the inputs fall in, one of ``'binary'``, ``'multi-class'``, ``'multi-label'`` or + ``'multi-dim multi-class'`` + """ + # Remove excess dimensions + preds, target = _input_squeeze(preds, target) + + # Convert half precision tensors to full precision, as not all ops are supported + # for example, min() is not supported + if preds.dtype == B.float16: + preds = preds.float() + + case = _check_classification_inputs( + preds, + target, + threshold=threshold, + num_classes=num_classes, + multiclass=multiclass, + top_k=top_k, + ) + + if case in (DataType.BINARY, DataType.MULTILABEL) and not top_k: + preds = (preds >= threshold).int() + num_classes = num_classes if not multiclass else 2 + + if case == DataType.MULTILABEL and top_k: + preds = select_topk(preds, top_k) + + if case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) or multiclass: + if preds.is_floating_point(): + num_classes = preds.shape[1] + preds = select_topk(preds, top_k or 1) + else: + num_classes = num_classes if num_classes else max(preds.max(), target.max()) + 1 + preds = to_onehot(preds, max(2, num_classes)) + + target = to_onehot(target, max(2, num_classes)) # type: ignore + + if multiclass is False: + preds, target = preds[:, 1, ...], target[:, 1, ...] + + if (case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) and multiclass is not False) or multiclass: + target = target.reshape(target.shape[0], target.shape[1], -1) + preds = preds.reshape(preds.shape[0], preds.shape[1], -1) + else: + target = target.reshape(target.shape[0], -1) + preds = preds.reshape(preds.shape[0], -1) + + # Some operations above create an extra dimension for MC/binary case - this removes it + if preds.ndim > 2: + preds, target = preds.squeeze(-1), target.squeeze(-1) + + return preds.int(), target.int(), case + + +def _input_format_classification_one_hot( + num_classes: int, + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + multilabel: bool = False, +) -> Tuple[Tensor, Tensor]: + """Convert preds and target tensors into one hot spare label tensors. + + Args: + num_classes: number of classes + preds: either tensor with labels, tensor with probabilities/logits or multilabel tensor + target: tensor with ground true labels + threshold: float used for thresholding multilabel input + multilabel: boolean flag indicating if input is multilabel + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same number of dimensions + or one additional dimension for ``preds``. + + Returns: + preds: one hot tensor of shape [num_classes, -1] with predicted labels + target: one hot tensors of shape [num_classes, -1] with true labels + """ + if preds.ndim not in (target.ndim, target.ndim + 1): + raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds") + + if preds.ndim == target.ndim + 1: + # multi class probabilities + preds = B.argmax(preds, dim=1) + + if preds.ndim == target.ndim and preds.dtype in (B.long, B.int) and num_classes > 1 and not multilabel: + # multi-class + preds = to_onehot(preds, num_classes=num_classes) + target = to_onehot(target, num_classes=num_classes) + + elif preds.ndim == target.ndim and preds.is_floating_point(): + # binary or multilabel probabilities + preds = (preds >= threshold).long() + + # transpose class as first dim and reshape + if preds.ndim > 1: + preds = preds.transpose(1, 0) + target = target.transpose(1, 0) + + return preds.reshape(num_classes, -1), target.reshape(num_classes, -1) + + +def _check_retrieval_functional_inputs( + preds: Tensor, + target: Tensor, + allow_non_binary_target: bool = False, +) -> Tuple[Tensor, Tensor]: + """Check ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. + + Args: + preds: either tensor with scores/logits + target: tensor with ground true labels + allow_non_binary_target: whether to allow target to contain non-binary values + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same shape, if they are empty + or not of the correct ``dtypes``. + + Returns: + preds: as B.float32 + target: as B.long if not floating point else B.float32 + """ + if preds.shape != target.shape: + raise ValueError("`preds` and `target` must be of the same shape") + + if not preds.numel() or not preds.size(): + raise ValueError("`preds` and `target` must be non-empty and non-scalar tensors") + + return _check_retrieval_target_and_prediction_types(preds, target, allow_non_binary_target=allow_non_binary_target) + + +def _check_retrieval_inputs( + indexes: Tensor, + preds: Tensor, + target: Tensor, + allow_non_binary_target: bool = False, +) -> Tuple[Tensor, Tensor, Tensor]: + """Check ``indexes``, ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. + + Args: + indexes: tensor with queries indexes + preds: tensor with scores/logits + target: tensor with ground true labels + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same shape, if they are empty + or not of the correct ``dtypes``. + + Returns: + indexes: as B.long + preds: as B.float32 + target: as B.long + """ + if indexes.shape != preds.shape or preds.shape != target.shape: + raise ValueError("`indexes`, `preds` and `target` must be of the same shape") + + if not indexes.numel() or not indexes.size(): + raise ValueError( + "`indexes`, `preds` and `target` must be non-empty and non-scalar tensors", + ) + + if indexes.dtype is not B.long: + raise ValueError("`indexes` must be a tensor of long integers") + + preds, target = _check_retrieval_target_and_prediction_types( + preds, target, allow_non_binary_target=allow_non_binary_target + ) + + return indexes.long().flatten(), preds, target + + +def _check_retrieval_target_and_prediction_types( + preds: Tensor, + target: Tensor, + allow_non_binary_target: bool = False, +) -> Tuple[Tensor, Tensor]: + """Check ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. + + Args: + preds: either tensor with scores/logits + target: tensor with ground true labels + allow_non_binary_target: whether to allow target to contain non-binary values + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same shape, if they are empty + or not of the correct ``dtypes``. + """ + if target.dtype not in (B.bool, B.long, B.int) and not B.is_floating_point(target): + raise ValueError("`target` must be a tensor of booleans, integers or floats") + + if not preds.is_floating_point(): + raise ValueError("`preds` must be a tensor of floats") + + if not allow_non_binary_target and (target.max() > 1 or target.min() < 0): + raise ValueError("`target` must contain `binary` values") + + target = target.float().flatten() if target.is_floating_point() else target.long().flatten() + return preds.float().flatten(), target diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py new file mode 100644 index 000000000..13e43fb60 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py @@ -0,0 +1,240 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Mapping, Optional, Sequence, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.prints import rank_zero_warn + +METRIC_EPS = 1e-6 + + +def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor: + """concatenation along the zero dimension.""" + x = x if isinstance(x, (list, tuple)) else [x] + x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x] + if not x: # empty list + raise ValueError("No samples to concatenate") + return B.cat(x, dim=0) + + +def dim_zero_sum(x: Tensor) -> Tensor: + """summation along the zero dimension.""" + return B.sum(x, dim=0) + + +def dim_zero_mean(x: Tensor) -> Tensor: + """average along the zero dimension.""" + return B.mean(x, dim=0) + + +def dim_zero_max(x: Tensor) -> Tensor: + """max along the zero dimension.""" + return B.max(x, dim=0).values + + +def dim_zero_min(x: Tensor) -> Tensor: + """min along the zero dimension.""" + return B.min(x, dim=0).values + + +def _flatten(x: Sequence) -> list: + return [item for sublist in x for item in sublist] + + +def to_onehot( + label_tensor: Tensor, + num_classes: Optional[int] = None, +) -> Tensor: + """Converts a dense label tensor to one-hot format. + + Args: + label_tensor: dense label tensor, with shape [N, d1, d2, ...] + num_classes: number of classes C + + Returns: + A sparse label tensor with shape [N, C, d1, d2, ...] + + Example: + >>> x = B.tensor([1, 2, 3]) + >>> to_onehot(x) + tensor([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]]) + """ + if num_classes is None: + num_classes = int(label_tensor.max().detach().item() + 1) + + tensor_onehot = B.zeros( + label_tensor.shape[0], + num_classes, + *label_tensor.shape[1:], + dtype=label_tensor.dtype, + device=label_tensor.device, + ) + index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot) + return (tensor_onehot.scatter_(1, index, 1.0) > 0).to(label_tensor.dtype) + + +def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor: + """Convert a probability tensor to binary by selecting top-k highest entries. + + Args: + prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the + position defined by the ``dim`` argument + topk: number of highest entries to turn into 1s + dim: dimension on which to compare entries + + Returns: + A binary tensor of the same shape as the input tensor of type B.int32 + + Example: + >>> x = B.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]]) + >>> select_topk(x, topk=2) + tensor([[0, 1, 1], + [1, 1, 0]], dtype=B.int32) + """ + zeros = B.zeros_like(prob_tensor) + if topk == 1: # argmax has better performance than topk + topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0) + else: + topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0) + return topk_tensor.int() + + +def to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor: + """Converts a tensor of probabilities to a dense label tensor. + + Args: + x: probabilities to get the categorical label [N, d1, d2, ...] + argmax_dim: dimension to apply + + Return: + A tensor with categorical labels [N, d2, ...] + + Example: + >>> x = B.tensor([[0.2, 0.5], [0.9, 0.1]]) + >>> to_categorical(x) + tensor([1, 0]) + """ + return B.argmax(x, dim=argmax_dim) + + +def get_num_classes( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, +) -> int: + """Calculates the number of classes for a given prediction and target tensor. + + Args: + preds: predicted values + target: true labels + num_classes: number of classes if known + + Return: + An integer that represents the number of classes. + """ + num_target_classes = int(target.max().detach().item() + 1) + num_pred_classes = int(preds.max().detach().item() + 1) + num_all_classes = max(num_target_classes, num_pred_classes) + + if num_classes is None: + num_classes = num_all_classes + elif num_classes != num_all_classes: + rank_zero_warn( + f"You have set {num_classes} number of classes which is" + f" different from predicted ({num_pred_classes}) and" + f" target ({num_target_classes}) number of classes", + RuntimeWarning, + ) + return num_classes + + +def apply_to_collection( + data: Any, + dtype: Union[type, tuple], + function: Callable, + *args: Any, + wrong_dtype: Optional[Union[type, tuple]] = None, + **kwargs: Any, +) -> Any: + """Recursively applies a function to all elements of a certain dtype. + + Args: + data: the collection to apply the function to + dtype: the given function will be applied to all elements of this dtype + function: the function to apply + *args: positional arguments (will be forwarded to calls of ``function``) + wrong_dtype: the given function won't be applied if this type is specified and the given collections is of + the :attr:`wrong_type` even if it is of type :attr`dtype` + **kwargs: keyword arguments (will be forwarded to calls of ``function``) + + Returns: + the resulting collection + + Example: + >>> apply_to_collection(B.tensor([8, 0, 2, 6, 7]), dtype=Tensor, function=lambda x: x ** 2) + tensor([64, 0, 4, 36, 49]) + >>> apply_to_collection([8, 0, 2, 6, 7], dtype=int, function=lambda x: x ** 2) + [64, 0, 4, 36, 49] + >>> apply_to_collection(dict(abc=123), dtype=int, function=lambda x: x ** 2) + {'abc': 15129} + """ + elem_type = type(data) + + # Breaking condition + if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)): + return function(data, *args, **kwargs) + + # Recursively apply to collection items + if isinstance(data, Mapping): + return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()}) + + if isinstance(data, tuple) and hasattr(data, "_fields"): # named tuple + return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data)) + + if isinstance(data, Sequence) and not isinstance(data, str): + return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data]) + + # data is neither of dtype, nor a collection + return data + + +def get_group_indexes(indexes: Tensor) -> List[Tensor]: + """Given an integer `B.Tensor` `indexes`, return a `B.Tensor` of indexes for each different value in + `indexes`. + + Args: + indexes: a `B.Tensor` + + Return: + A list of integer `B.Tensor`s + + Example: + >>> indexes = B.tensor([0, 0, 0, 1, 1, 1, 1]) + >>> get_group_indexes(indexes) + [tensor([0, 1, 2]), tensor([3, 4, 5, 6])] + """ + + res: dict = {} + for i, _id in enumerate(indexes): + _id = _id.item() + if _id in res: + res[_id] += [i] + else: + res[_id] = [i] + + return [tensor(x, dtype=B.long) for x in res.values()] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py new file mode 100644 index 000000000..aec42872a --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py @@ -0,0 +1,145 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional + +import paddleext.torchapi as B +#import torchapi.nn.functional as F +from paddleext.torchapi import Tensor + + +def reduce(to_reduce: Tensor, reduction: str) -> Tensor: + """Reduces a given tensor by a given reduction method. + + Args: + to_reduce: the tensor, which shall be reduced + reduction: a string specifying the reduction method ('elementwise_mean', 'none', 'sum') + + Return: + reduced Tensor + + Raise: + ValueError if an invalid reduction parameter was given + """ + if reduction == "elementwise_mean": + return B.mean(to_reduce) + if reduction == "none": + return to_reduce + if reduction == "sum": + return B.sum(to_reduce) + raise ValueError("Reduction parameter unknown.") + + +def class_reduce(num: Tensor, denom: Tensor, weights: Tensor, class_reduction: str = "none") -> Tensor: + """ + Function used to reduce classification metrics of the form `num / denom * weights`. + For example for calculating standard accuracy the num would be number of + true positives per class, denom would be the support per class, and weights + would be a tensor of 1s + + Args: + num: numerator tensor + denom: denominator tensor + weights: weights for each class + class_reduction: reduction method for multiclass problems + + - ``'micro'``: calculate metrics globally (default) + - ``'macro'``: calculate metrics for each label, and find their unweighted mean. + - ``'weighted'``: calculate metrics for each label, and find their weighted mean. + - ``'none'`` or ``None``: returns calculated metric per class + + Raises: + ValueError: + If ``class_reduction`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"`` or ``None``. + + """ + valid_reduction = ("micro", "macro", "weighted", "none", None) + if class_reduction == "micro": + fraction = B.sum(num) / B.sum(denom) + else: + fraction = num / denom + + # We need to take care of instances where the denom can be 0 + # for some (or all) classes which will produce nans + fraction[fraction != fraction] = 0 + + if class_reduction == "micro": + return fraction + if class_reduction == "macro": + return B.mean(fraction) + if class_reduction == "weighted": + return B.sum(fraction * (weights.float() / B.sum(weights))) + if class_reduction == "none" or class_reduction is None: + return fraction + + raise ValueError( + f"Reduction parameter {class_reduction} unknown." f" Choose between one of these: {valid_reduction}" + ) + + +def _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]: + gathered_result = [B.zeros_like(result) for _ in range(world_size)] + B.distributed.all_gather(gathered_result, result, group) + return gathered_result + + +def gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]: + """Function to gather all tensors from several ddp processes onto a list that is broadcasted to all processes. + Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case + tensors are padded, gathered and then trimmed to secure equal workload for all processes. + + Args: + result: the value to sync + group: the process group to gather results from. Defaults to all processes (world) + + Return: + gathered_result: list with size equal to the process group where + gathered_result[i] corresponds to result tensor from process i + """ + if group is None: + group = B.distributed.group.WORLD + + # convert tensors to contiguous format + result = result.contiguous() + + world_size = B.distributed.get_world_size(group) + B.distributed.barrier(group=group) + + # if the tensor is scalar, things are easy + if result.ndim == 0: + return _simple_gather_all_tensors(result, group, world_size) + + # 1. Gather sizes of all tensors + local_size = B.tensor(result.shape, device=result.device) + local_sizes = [B.zeros_like(local_size) for _ in range(world_size)] + B.distributed.all_gather(local_sizes, local_size, group=group) + max_size = B.stack(local_sizes).max(dim=0).values + all_sizes_equal = all(all(ls == max_size) for ls in local_sizes) + + # 2. If shapes are all the same, then do a simple gather: + if all_sizes_equal: + return _simple_gather_all_tensors(result, group, world_size) + + # 3. If not, we need to pad each local tensor to maximum size, gather and then truncate + pad_dims = [] + pad_by = (max_size - local_size).detach().cpu() + for val in reversed(pad_by): + pad_dims.append(0) + pad_dims.append(val.item()) + result_padded = B.pad(result, pad_dims) + gathered_result = [B.zeros_like(result_padded) for _ in range(world_size)] + B.distributed.all_gather(gathered_result, result_padded, group) + for idx, item_size in enumerate(local_sizes): + slice_param = [slice(dim_size) for dim_size in item_size] + gathered_result[idx] = gathered_result[idx][slice_param] + return gathered_result diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py new file mode 100644 index 000000000..7476c051d --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py @@ -0,0 +1,83 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from enum import Enum +from typing import Optional, Union + + +class EnumStr(str, Enum): + """Type of any enumerator with allowed comparison to string invariant to cases. + + Example: + >>> class MyEnum(EnumStr): + ... ABC = 'abc' + >>> MyEnum.from_str('Abc') + + >>> {MyEnum.ABC: 123} + {: 123} + """ + + @classmethod + def from_str(cls, value: str) -> Optional["EnumStr"]: + statuses = [status for status in dir(cls) if not status.startswith("_")] + for st in statuses: + if st.lower() == value.lower(): + return getattr(cls, st) + return None + + def __eq__(self, other: Union[str, "EnumStr", None]) -> bool: # type: ignore + other = other.value if isinstance(other, Enum) else str(other) + return self.value.lower() == other.lower() + + def __hash__(self) -> int: + # re-enable hashtable so it can be used as a dict key or in a set + # example: set(LightningEnum) + return hash(self.name) + + +class DataType(EnumStr): + """Enum to represent data type. + + >>> "Binary" in list(DataType) + True + """ + + BINARY = "binary" + MULTILABEL = "multi-label" + MULTICLASS = "multi-class" + MULTIDIM_MULTICLASS = "multi-dim multi-class" + + +class AverageMethod(EnumStr): + """Enum to represent average method. + + >>> None in list(AverageMethod) + True + >>> AverageMethod.NONE == None + True + >>> AverageMethod.NONE == 'none' + True + """ + + MICRO = "micro" + MACRO = "macro" + WEIGHTED = "weighted" + NONE = None + SAMPLES = "samples" + + +class MDMCAverageMethod(EnumStr): + """Enum to represent multi-dim multi-class average method.""" + + GLOBAL = "global" + SAMPLEWISE = "samplewise" diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py new file mode 100644 index 000000000..767fe9014 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py @@ -0,0 +1,17 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class paddlemetricsUserError(Exception): + """Error used to inform users of a wrong combinison of Metric API calls.""" diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py new file mode 100644 index 000000000..f3794801c --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py @@ -0,0 +1,90 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Import utilities.""" +import operator +from importlib import import_module +from importlib.util import find_spec +from typing import Callable, Optional + +from packaging.version import Version +from pkg_resources import DistributionNotFound, get_distribution + + +def _module_available(module_path: str) -> bool: + """Check if a path is available in your environment. + + >>> _module_available('os') + True + >>> _module_available('bla.bla') + False + """ + try: + return find_spec(module_path) is not None + except AttributeError: + # Python 3.6 + return False + except ModuleNotFoundError: + # Python 3.7+ + return False + + +def _compare_version(package: str, op: Callable, version: str) -> Optional[bool]: + """Compare package version with some requirements. + + >>> import operator + >>> _compare_version("torch", operator.ge, "0.1") + True + >>> _compare_version("any_module", operator.ge, "0.0") # is None + """ + if not _module_available(package): + return None + try: + pkg = import_module(package) + pkg_version = pkg.__version__ # type: ignore + except (ModuleNotFoundError, DistributionNotFound): + return None + except AttributeError: + pkg_version = get_distribution(package).version + except ImportError: + # catches cyclic imports - the case with integrated libs + # see: https://stackoverflow.com/a/32965521 + pkg_version = get_distribution(package).version + try: + pkg_version = Version(pkg_version) + except TypeError: + # this is mock by sphinx, so it shall return True ro generate all summaries + return True + return op(pkg_version, Version(version)) + + +_TORCH_LOWER_1_4: Optional[bool] = False +_TORCH_LOWER_1_5: Optional[bool] = False +_TORCH_LOWER_1_6: Optional[bool] = False +_TORCH_GREATER_EQUAL_1_6: Optional[bool] = True +_TORCH_GREATER_EQUAL_1_7: Optional[bool] = True + +_LIGHTNING_AVAILABLE: bool = False +_JIWER_AVAILABLE: bool = _module_available("jiwer") +_NLTK_AVAILABLE: bool = _module_available("nltk") +_ROUGE_SCORE_AVAILABLE: bool = _module_available("rouge_score") +_BERTSCORE_AVAILABLE: bool = _module_available("bert_score") +_SCIPY_AVAILABLE: bool = _module_available("scipy") +_TORCH_FIDELITY_AVAILABLE: bool = _module_available("torch_fidelity") +_LPIPS_AVAILABLE: bool = _module_available("lpips") +_TQDM_AVAILABLE: bool = _module_available("tqdm") +_TRANSFORMERS_AVAILABLE: bool = _module_available("transformers") +_PESQ_AVAILABLE: bool = _module_available("pesq") +_SACREBLEU_AVAILABLE: bool = _module_available("sacrebleu") +_REGEX_AVAILABLE: bool = _module_available("regex") +_PYSTOI_AVAILABLE: bool = _module_available("pystoi") diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py new file mode 100644 index 000000000..ff4b1b35e --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import warnings +from functools import wraps +from typing import Any, Callable + +from paddlemetrics import _logger as log + + +def rank_zero_only(fn: Callable) -> Callable: + @wraps(fn) + def wrapped_fn(*args: Any, **kwargs: Any) -> Any: + if rank_zero_only.rank == 0: # type: ignore + return fn(*args, **kwargs) + + return wrapped_fn + + +# add the attribute to the function but don't overwrite in case Trainer has already set it +rank_zero_only.rank = getattr(rank_zero_only, "rank", int(os.environ.get("LOCAL_RANK", 0))) # type: ignore + + +def _warn(*args: Any, **kwargs: Any) -> None: + warnings.warn(*args, **kwargs) + + +def _info(*args: Any, **kwargs: Any) -> None: + log.info(*args, **kwargs) + + +def _debug(*args: Any, **kwargs: Any) -> None: + log.debug(*args, **kwargs) + + +rank_zero_debug = rank_zero_only(_debug) +rank_zero_info = rank_zero_only(_info) +rank_zero_warn = rank_zero_only(_warn) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py new file mode 100644 index 000000000..d74928f6a --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py @@ -0,0 +1,16 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.wrappers.bootstrapping import BootStrapper # noqa: F401 +from paddlemetrics.wrappers.multioutput import MultioutputWrapper # noqa: F401 +from paddlemetrics.wrappers.tracker import MetricTracker # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py new file mode 100644 index 000000000..6a3e7b16b --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py @@ -0,0 +1,173 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from copy import deepcopy +from typing import Any, Callable, Dict, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, nn + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import apply_to_collection +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_7 + + +def _bootstrap_sampler( + size: int, + sampling_strategy: str = "poisson", +) -> Tensor: + """Resample a tensor along its first dimension with replacement + Args: + size: number of samples + sampling_strategy: the strategy to use for sampling, either ``'poisson'`` or ``'multinomial'`` + generator: a instance of ``B.Generator`` that controls the sampling + + Returns: + resampled tensor + + """ + if sampling_strategy == "poisson": + p = B.distributions.Poisson(1) + n = p.sample((size,)) + return B.arange(size).repeat_interleave(n.long(), dim=0) + if sampling_strategy == "multinomial": + idx = B.multinomial(B.ones(size), num_samples=size, replacement=True) + return idx + raise ValueError("Unknown sampling strategy") + + +class BootStrapper(Metric): + def __init__( + self, + base_metric: Metric, + num_bootstraps: int = 10, + mean: bool = True, + std: bool = True, + quantile: Optional[Union[float, Tensor]] = None, + raw: bool = False, + sampling_strategy: str = "poisson", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + r""" + Using `Turn a Metric into a Bootstrapped`_ + That can automate the process of getting confidence intervals for metric values. This wrapper + class basically keeps multiple copies of the same base metric in memory and whenever ``update`` or + ``forward`` is called, all input tensors are resampled (with replacement) along the first dimension. + + Args: + base_metric: + base metric class to wrap + num_bootstraps: + number of copies to make of the base metric for bootstrapping + mean: + if ``True`` return the mean of the bootstraps + std: + if ``True`` return the standard diviation of the bootstraps + quantile: + if given, returns the quantile of the bootstraps. Can only be used with + pytorch version 1.6 or higher + raw: + if ``True``, return all bootstrapped values + sampling_strategy: + Determines how to produce bootstrapped samplings. Either ``'poisson'`` or ``multinomial``. + If ``'possion'`` is chosen, the number of times each sample will be included in the bootstrap + will be given by :math:`n\sim Poisson(\lambda=1)`, which approximates the true bootstrap distribution + when the number of samples is large. If ``'multinomial'`` is chosen, we will apply true bootstrapping + at the batch level to approximate bootstrapping over the hole dataset. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Example:: + >>> from pprint import pprint + >>> from paddlemetrics import Accuracy, BootStrapper + >>> _ = B.manual_seed(123) + >>> base_metric = Accuracy() + >>> bootstrap = BootStrapper(base_metric, num_bootstraps=20) + >>> bootstrap.update(B.randint(5, (20,)), B.randint(5, (20,))) + >>> output = bootstrap.compute() + >>> pprint(output) + {'mean': tensor(0.2205), 'std': tensor(0.0859)} + + """ + super().__init__(compute_on_step, dist_sync_on_step, process_group, dist_sync_fn) + if not isinstance(base_metric, Metric): + raise ValueError( + "Expected base metric to be an instance of paddlemetrics.Metric" f" but received {base_metric}" + ) + + self.metrics = nn.ModuleList([deepcopy(base_metric) for _ in range(num_bootstraps)]) + self.num_bootstraps = num_bootstraps + + self.mean = mean + self.std = std + if quantile is not None and not _TORCH_GREATER_EQUAL_1_7: + raise ValueError("quantile argument can only be used with pytorch v1.7 or higher") + self.quantile = quantile + self.raw = raw + + allowed_sampling = ("poisson", "multinomial") + if sampling_strategy not in allowed_sampling: + raise ValueError( + f"Expected argument ``sampling_strategy`` to be one of {allowed_sampling}" + f" but recieved {sampling_strategy}" + ) + self.sampling_strategy = sampling_strategy + + def update(self, *args: Any, **kwargs: Any) -> None: + """Updates the state of the base metric. + + Any tensor passed in will be bootstrapped along dimension 0 + """ + for idx in range(self.num_bootstraps): + args_sizes = apply_to_collection(args, Tensor, len) + kwargs_sizes = list(apply_to_collection(kwargs, Tensor, len)) + if len(args_sizes) > 0: + size = args_sizes[0] + elif len(kwargs_sizes) > 0: + size = kwargs_sizes[0] + else: + raise ValueError("None of the input contained tensors, so could not determine the sampling size") + sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy).to(self.device) + new_args = apply_to_collection(args, Tensor, B.index_select, dim=0, index=sample_idx) + new_kwargs = apply_to_collection(kwargs, Tensor, B.index_select, dim=0, index=sample_idx) + self.metrics[idx].update(*new_args, **new_kwargs) + + def compute(self) -> Dict[str, Tensor]: + """Computes the bootstrapped metric values. + + Allways returns a dict of tensors, which can contain the following keys: ``mean``, ``std``, ``quantile`` and + ``raw`` depending on how the class was initialized + """ + computed_vals = B.stack([m.compute() for m in self.metrics], dim=0) + output_dict = {} + if self.mean: + output_dict["mean"] = computed_vals.mean(dim=0) + if self.std: + output_dict["std"] = computed_vals.std(dim=0) + if self.quantile is not None: + output_dict["quantile"] = B.quantile(computed_vals, self.quantile) + if self.raw: + output_dict["raw"] = computed_vals + return output_dict diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py new file mode 100644 index 000000000..789445be2 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py @@ -0,0 +1,165 @@ +from copy import deepcopy +from typing import Any, Callable, List, Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import nn + +from paddlemetrics import Metric +from paddlemetrics.utilities import apply_to_collection + + +def _get_nan_indices(*tensors: B.Tensor) -> B.Tensor: + """Get indices of rows along dim 0 which have NaN values.""" + if len(tensors) == 0: + raise ValueError("Must pass at least one tensor as argument") + sentinel = tensors[0] + nan_idxs = B.zeros(len(sentinel), dtype=B.bool, device=sentinel.device) + for tensor in tensors: + permuted_tensor = tensor.flatten(start_dim=1) + nan_idxs |= B.any(B.isnan(permuted_tensor), dim=1) + return nan_idxs + + +class MultioutputWrapper(Metric): + """Wrap a base metric to enable it to support multiple outputs. + + Several paddlemetrics metrics, such as :class:`paddlemetrics.regression.spearman.SpearmanCorrcoef` lack support for + multioutput mode. This class wraps such metrics to support computing one metric per output. + Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs. + This means if you set `num_outputs` to 2, `compute()` will return a Tensor of dimension + (2, ...) where ... represents the dimensions the metric returns when not wrapped. + + In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude + fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the + intersection of NaN containing "rows" upon each update for each output. For example, suppose a user uses + `MultioutputWrapper` to wrap :class:`paddlemetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally + has missing labels for classes like ``R2Score`` is that this class supports removing NaN values + (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows + + Args: + base_metric: + Metric being wrapped. + num_outputs: + Expected dimensionality of the output dimension. This parameter is + used to determine the number of distinct metrics we need to track. + output_dim: + Dimension on which output is expected. Note that while this provides some flexibility, the output dimension + must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels + can have a different number of dimensions than the predictions. This can be worked around if the output + dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs. + remove_nans: + Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying + metric. Proper operation requires all tensors passed to update to have dimension `(N, ...)` where N + represents the length of the batch or dataset being passed in. + squeeze_outputs: + If true, will squeeze the 1-item dimensions left after `index_select` is applied. + This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful + for certain classification metrics that can't handle additional 1-item dimensions. + compute_on_step: + Whether to recompute the metric value on each update step. + dist_sync_on_step: + Required for distributed training support. + process_group: + Specify the process group on which synchronization is called. + The default: None (which selects the entire world) + dist_sync_fn: + Required for distributed training support. + + Example: + + >>> # Mimic R2Score in `multioutput`, `raw_values` mode: + >>> import torchapi as B + >>> from paddlemetrics import MultioutputWrapper, R2Score + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> r2score = MultioutputWrapper(R2Score(), 2) + >>> r2score(preds, target) + [tensor(0.9654), tensor(0.9082)] + >>> # Classification metric where prediction and label tensors have different shapes. + >>> from paddlemetrics import BinnedAveragePrecision + >>> target = B.tensor([[1, 2], [2, 0], [1, 2]]) + >>> preds = B.tensor([ + ... [[.1, .8], [.8, .05], [.1, .15]], + ... [[.1, .1], [.2, .3], [.7, .6]], + ... [[.002, .4], [.95, .45], [.048, .15]] + ... ]) + >>> binned_avg_precision = MultioutputWrapper(BinnedAveragePrecision(3, thresholds=5), 2) + >>> binned_avg_precision(preds, target) + [[tensor(-0.), tensor(1.0000), tensor(1.0000)], [tensor(0.3333), tensor(-0.), tensor(0.6667)]] + """ + + is_differentiable = False + + def __init__( + self, + base_metric: Metric, + num_outputs: int, + output_dim: int = -1, + remove_nans: bool = True, + squeeze_outputs: bool = True, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.metrics = nn.ModuleList([deepcopy(base_metric) for _ in range(num_outputs)]) + self.output_dim = output_dim + self.remove_nans = remove_nans + self.squeeze_outputs = squeeze_outputs + + def _get_args_kwargs_by_output( + self, *args: B.Tensor, **kwargs: B.Tensor + ) -> List[Tuple[B.Tensor, B.Tensor]]: + """Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.""" + args_kwargs_by_output = [] + for i in range(len(self.metrics)): + selected_args = apply_to_collection( + args, B.Tensor, B.index_select, dim=self.output_dim, index=B.tensor(i, device=self.device) + ) + selected_kwargs = apply_to_collection( + kwargs, B.Tensor, B.index_select, dim=self.output_dim, index=B.tensor(i, device=self.device) + ) + if self.remove_nans: + args_kwargs = selected_args + tuple(selected_kwargs.values()) + nan_idxs = _get_nan_indices(*args_kwargs) + selected_args = [arg[~nan_idxs] for arg in selected_args] + selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()} + + if self.squeeze_outputs: + selected_args = [arg.squeeze(self.output_dim) for arg in selected_args] + args_kwargs_by_output.append((selected_args, selected_kwargs)) + return args_kwargs_by_output + + def update(self, *args: Any, **kwargs: Any) -> None: + """Update each underlying metric with the corresponding output.""" + reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) + for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): + metric.update(*selected_args, **selected_kwargs) + + def compute(self) -> List[B.Tensor]: + """Compute metrics.""" + return [m.compute() for m in self.metrics] + + def forward(self, *args: Any, **kwargs: Any) -> Any: + """Call underlying forward methods and aggregate the results if they're non-null. + + We override this method to ensure that state variables get copied over on the underlying metrics. + """ + results = [] + reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) + for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): + results.append(metric(*selected_args, **selected_kwargs)) + if results[0] is None: + return None + return results + + def reset(self) -> None: + """Reset all underlying metrics.""" + for metric in self.metrics: + metric.reset() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py new file mode 100644 index 000000000..b2b939d91 --- /dev/null +++ b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py @@ -0,0 +1,127 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from copy import deepcopy +from typing import Any, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, nn + +from paddlemetrics.metric import Metric + + +class MetricTracker(nn.ModuleList): + """A wrapper class that can help keeping track of a metric over time and implement useful methods. The wrapper + implements the standard `update`, `compute`, `reset` methods that just calls corresponding method of the + currently tracked metric. However, the following additional methods are provided: + + -``MetricTracker.n_steps``: number of metrics being tracked + + -``MetricTracker.increment()``: initialize a new metric for being tracked + + -``MetricTracker.compute_all()``: get the metric value for all steps + + -``MetricTracker.best_metric()``: returns the best value + + Args: + metric: instance of a torchmetric modular to keep track of at each timestep. + maximize: bool indicating if higher metric values are better (`True`) or lower + is better (`False`) + + Example: + + >>> from paddlemetrics import Accuracy, MetricTracker + >>> _ = B.manual_seed(42) + >>> tracker = MetricTracker(Accuracy(num_classes=10)) + >>> for epoch in range(5): + ... tracker.increment() + ... for batch_idx in range(5): + ... preds, target = B.randint(10, (100,)), B.randint(10, (100,)) + ... tracker.update(preds, target) + ... print(f"current acc={tracker.compute()}") # doctest: +NORMALIZE_WHITESPACE + current acc=0.1120000034570694 + current acc=0.08799999952316284 + current acc=0.12600000202655792 + current acc=0.07999999821186066 + current acc=0.10199999809265137 + >>> best_acc, which_epoch = tracker.best_metric(return_step=True) + >>> tracker.compute_all() + tensor([0.1120, 0.0880, 0.1260, 0.0800, 0.1020]) + """ + + def __init__(self, metric: Metric, maximize: bool = True) -> None: + super().__init__() + if not isinstance(metric, Metric): + raise TypeError("metric arg need to be an instance of a paddlemetrics metric" f" but got {metric}") + self._base_metric = metric + self.maximize = maximize + + self._increment_called = False + + @property + def n_steps(self) -> int: + """Returns the number of times the tracker has been incremented.""" + return len(self) - 1 # subtract the base metric + + def increment(self) -> None: + """Creates a new instace of the input metric that will be updated next.""" + self._increment_called = True + self.append(deepcopy(self._base_metric)) + + def forward(self, *args, **kwargs) -> None: # type: ignore + """Calls forward of the current metric being tracked.""" + self._check_for_increment("forward") + return self[-1](*args, **kwargs) + + def update(self, *args, **kwargs) -> None: # type: ignore + """Updates the current metric being tracked.""" + self._check_for_increment("update") + self[-1].update(*args, **kwargs) + + def compute(self) -> Any: + """Call compute of the current metric being tracked.""" + self._check_for_increment("compute") + return self[-1].compute() + + def compute_all(self) -> Tensor: + """Compute the metric value for all tracked metrics.""" + self._check_for_increment("compute_all") + return B.stack([metric.compute() for i, metric in enumerate(self) if i != 0], dim=0) + + def reset(self) -> None: + """Resets the current metric being tracked.""" + self[-1].reset() + + def reset_all(self) -> None: + """Resets all metrics being tracked.""" + for metric in self: + metric.reset() + + def best_metric(self, return_step: bool = False) -> Union[float, Tuple[int, float]]: + """Returns the highest metric out of all tracked. + + Args: + return_step: If `True` will also return the step with the highest metric value. + + Returns: + The best metric value, and optionally the timestep. + """ + fn = B.max if self.maximize else B.min + idx, max = fn(self.compute_all(), 0) + if return_step: + return idx.item(), max.item() + return max.item() + + def _check_for_increment(self, method: str) -> None: + if not self._increment_called: + raise ValueError(f"`{method}` cannot be called before `.increment()` has been called") diff --git a/EE/paddlemetric/src/dist/paddlemetrics-1.0.0b0-py3-none-any.whl b/EE/paddlemetric/src/dist/paddlemetrics-1.0.0b0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..e737dee2b0276c2b2e7cf108ce78c9f605845969 GIT binary patch literal 306551 zcmZs@Q;=p|z@+<@ZQHhO+qP|+U3OKMZQHhO+jf`he1A;D%!!z}UDp}8pOq_fE6RX? zq5=Q_NB}xCsp^K!l&uLc0I&oK0AT-j>S$zYYHMa^=IU%^;=*8HU}Wsz=4xO-@95>G zDiggW2;2Lq0h!0nYEu^Usz~X2VO8*l57k>UH9rQ;XY*lvnq(m5=byAY@?FR&A|{`J zU*7S~tt655YZsnxn6rvsH9H*MlyTcoHaC3Jx6#+Yfa{*^7MX(|ObS5ng*L{!*&hCN zeJRaIHuHcU3r#!LeKjGT6=E08lDl(99mfp|e>DFf`AN~gr~6>E+OTWG-u~q78~cZL z{YM^=`_`)eRkuaPDkAT)v!!C{bJEeEFU;UyN-&#JG7kI0NWwnT2Fbub4JNMkG&pcu zY>6{vH9}iu32bex+KDx$gIy)Q*qPoHYL}o!TK(hT*g!|GZNBZ0MC(XwPDj8XXKoJR zV!}}Y1;f`TxN}OwfcLt~?eK7<#RM}mBU zzw&!p(&$sD<8g9aO4ZD2;XvshV}mS%W=;r)=Dzs)WB!931V!-D@?Jwk)o@%ko4_9= zR4e(a$~P)ZXUIjF(-iz*R!8*GHiBI}H19YGHBKtWyz05^qT`Bexgtn!cvmT%IpPbz zD#m%rlM?a?phSMiS&)CSmM3Nbzgnoo2rg(-P)PwbG{c$=DMnuM#Z`OJ^H*lOk?>-A z50w{gndX2ACTY3glpBj(w@k<>q7~QtKC*BYtx1tca(zLR-`d`Y7RhM%)4e>st<^S%= zQS%c(5NkcxpHnwO-b4~nhTVhbksU{|=^J)WGNCl$kmZ)RFUTCLfPDpwH;A>ETJzhz zE$jGccw2jlppR?MuJZ+{J}ocWJ^Pw+Mhc?qm`^p#^Z`9Mlb74|J>Pg^#hikY6Dg8G z5!{$vEGON~kjIZul876R8&}M{Hl;_r+Twm$Juwm?=uDzQB5p=LVz6Ghty0PX)qP%9 zf%2x~dN8&?oO0yCwi>V%8?a-RlvJ1xk?9KD-h$5kmUXvXfjk(CoaW)o+RrN`EuZAU z#sf0e3CBs=Ecg$(r_2lfGw%WxQ&@O$neH8H(;I4TBuQU;ba9Z3%J0C975^-TCZ$KX zm$(dN;pt0JJ$e=N$x&%xJ4EqABFRiM1V$S&iNg9xLNw*PDF; z&lINuCN8P9-^mz~wBY0dRL#9@xgu07D3a13C+gCI={=`brm$XTX{wo1hqw{m%}gLh z=87%J8pHR<@X{6ZChj-*e!zb68#zD>?V>FG}MPu+O;zCSmXcnti znu;{y-fyg-i~kB(b{{G&9ayt<5gbH6$~ah<1D2?O^q`SI3^7_ODiET6eYelg!P&?5 zbO&-S(iExpoaw!WfaNE6eOY~=3trA6uO?zQ_Cbv3Ycg%vre$PF8`Vg2G`}(uG7x%a zE(AMr82aNh<8;ao#OI$+!;5Mhqx)Ad%l_0C!)#!B;qbSaq0R1qM|WnXarrI?g$X1n zPE0)x3`qw4cC`)M^`lslAOHu)6}}G(doy-+<=`&yr(a!}SyWpMYMHWcGP(Mp#CpII zYS*2{SswSg(~{dL^H)Qu3d>$cY(%jQIeLb*Z@P8YCK2_eHtZ7J=C&kB4r`Ni*&3im z)N1IcTM|{O?-i}vKR}U3L+2?cj zdjUJz050Z^h~!g;WK-=|sApCw;XH#Dl z-S73_Cn}5|J9t?#jT}z9k7cWgT*q#~+JR12z(RJgiXwC&Yb{pj9?r(}Tb{k2;aSEm znht0rK;jqV|JVmG=dSAgrdky>Gysq-1OVXvU-n^SVc~3MVdQG%VE^B@?MBzgVT%*_ z`_^DUMWEWRta1yT6W93+SSoV_tSyT+(=}QQaXm(Kt9vp@J;k^yx@*thK~kFBj_c;O z#Ihk_L5$RiGaGA`mw)n<->A&1f$GFpsp0C6p@4nu6SIE(-Gvl+6IT_efp`c-2!$tl zv}|PPiWaf7y3kkqKGV;7YQi_EG}%*3PeiYo2r{It3gZV4tLZX8qr4bZ^WmQ1kk{ny9bZ!-UfW21gE;6U@5ifoCmMHK*t6 z4eOSuc6Rp3srRO3$V!X7yu4gJ-5kC@1D#V_v}n=|%3sP;$hEt6m<)yr(|3kulNz)P zSb1r{c53pkwbEKd@TLC&It-t_^i#YyVU*}g3MW(<8G=$(>CHxH zzD?&UECU}N8;v6XFB&0Q84Ox2RFWBXJhSXXJSSGt1F#EfYD8VMl4J@Z!}u5>;z|R_ zq-e%^VrXWh45UX*WYUPv%Xb#6H z9ofE!X8e(f`XNHu&Y&NmG)}w{{L@^tSlBs5ML;F`h@i{UF~_nLaf!vqR^T|68%NHy zvJ4;7jU?hw!Sfkho9EJ^?TN{5)t_^hvIfmL$V^9W+H$vE+R;U)=r1x4h96WVwl}1K za1n#eQR)h#O;F%tMenB(C*Iz;Tjx(Ha%FoFjnh*OrAs8_7Lti@S%qiIu%X#hO3PZ= zYgi|5(xp^cQ$8u=J|Pm$RI}xuz3P*Bdj-p-cqP<`Kp>_q>l*ZDTKdp6_*(6CIzUm4 z)H{UJGOEIjML|tknNO4J=Cu{+(ae2mQYMIvCGx|)H|W!Zbp){5JD{F`U@3Vcj6Vby zcaO-mNRs`PnBp{VN*LoZOt^Aon|sQtZjpuE<19j@7ed94&y-3F#{pw1%ht&_64#xs zgQEGYN?EkG>?hE4(adwl5CTq^@l(1NjKlr-&}Ji``RJ*$G4H08Wmod~VwzJ~+w4?P zv)EaT{>!foofJ>ZbsjU!V1+zdOr;}`ni?=rJgPPwRD7Y~!czO^*49OZAY9FUV$_Gy zRxjglw9vMH)2x^;A#o;GRU{QX#*Y4-!`20sJ9=BG7JzXICgdD-Fv0w~M=x^R?+BaP zO0UX^ynejIlONoIt~Be%T_gTLq8sfxMs{pqzoCffU=%n!zr#MA8Uq?(fr`ko29s2p z=)b9A(&a1er~gvXOQOtYTv{5=YZ?Yk0JHKXS|^Qf$flrb^$fFRJ2CG%?XTFnM>NGYGRf1*#nmD}kEOg7Il7>$ zhH=#lzBe>$>f%)(l2pUsS9Fifu3fzsL~4dPgG*h}!O5ntK@B4Z3&?;m&FhXS6SY--XYRHUu)K{bY8- zwdw&C=rc(B6=v#TLj|~XC1FvcSG%3JZe3%(lc_6Haf6*p=#<$5g!{jkO|28xtQHk} zcou4fC^>a9``oeK^YTRmK;A)js@CgrEsv+-!1e9Kcotk8{Dkf+BE{>C@}psbT1VJl zqv%xwCPgwflYYa3QENw*&4WpPN>@5Zr6Jjo=H&2a4 zq~=fdT7;v{d1Z(PML1TzuC+MyuaBp(5BMY7GcjGH=z+S>kCG*MejmZa-tcXGt10?* zd7jE<$nlg9oZDX?@ofJ*staL2teOuk%-$zlNCu)m#l135AGc4vgbirP?C_e0ZZgYr zvAyl5cRK$Iw{68o()0Cgj_7HcDl_vzN_wtAKH({{XyyRS41ApSp_c2^DHTK5wVXFP z+<;7`oFn!vB%L>N%(}-%w zMzgw-hFHJYx;^pVw`mH%`RMAraj*3fpN~F==_Q?K>sB)tMA4*|X+5^-s7_nGuZ7d^ z8@v3c`Hf$-XTmp%FJr^6^JJFDeKlI8QMdk6Rr9)|0pgb19Xq)$G^i9VXlirxE)q+T zNJh;;NY3COk<&O+SZ~*yqc2VpaQ8;_*`u&tfHnIYzLgKA=>#0m^xT{S7y<*%my*x=eFo57kfaDQ^yNtP?)bTq_R(3Ct9KmZZ4JPvcEJ}l*t)T*;(h` z0_?04?_m~7&-m<&`y$yVY{S`Ksw*tyei>|%Ht1~zll4|z)ffoE*($s@TuO3jUUVy7 z`p%@UY4)Y*`rTYg=zjc&YM<9~X&Uwh|JKifHm5Hr?4y#CKYPvN|GU`d;lUkisw-oQ zm$t!`e@tueD+E+S`CM_+vr=QO3SPnQ^HpNAlXqHI&&zb!raHGo@TkJcryvd143(s^@cw-mPoQ7pVh7koc~R zA+R|!&PBnP+b+Z?y&qIUu<4aHft=6KKIob{l$-!&w-+wuj@lEhvCD{^u0; zXI%j3K=XEVV3gCfW|OaRxfhfzw>2*A`cqM1Jr3kz)ZuA11(g4n3jB4t#rR&}|1hQ; zAC2nj6Df)y3;+;I@&95>69-#cGn4;8Oqc%{^F-V3e;D&8*FdP9z__JyapvF^K%lI< zY2GkkmrN|xrfCY2kw_UHL26A!UCS^Y(1+D4nPN&I@8hAlAG5QH9zK*|5%yHtOL!;N zlH}`DG(G}HlBbR#OwMtP;j8@UEQDGl*8F6A2F5F+snwLiYBou#BvA6=OexGf^a~AS5vtAlWKS>l-ZPqG-vGMp) zoWLX_5c8O0N?4Z$dKvYc=I!shYbZ4xqrZJhrMZvK)CqH;_PXTwh`v4KRgXd39zjkv zK@WJ)OX{52>}96ptHcmlxpxn7Gq4x3H=OdRt5hhdD-l*TRnQ|nnMRI0c3MHHG8*V~)qUqhtgkBLEuiP_H`j2?P<>;JEVeb#_B8AcGNj1x?0t z6lK0jm(_J(TzH$QVn&}y>mEB+E5wt^al&f=P3>bsvcl4WBDoM?bIc_JizG={Ok9R+ zsr3;=F}V;pa&0`R`KNCtN?XmdRgZ^OgJ{5x=sjbV!!jrVmxg?%Z3#NQzzg!@dx6n0 zJwcZhjoy#m$joBwvBEO*G{#RYQFyNujiQ$vH(ClQBn@zvLt17?J#^p~z12)x6i;mD999X@g!goRlCZWRs+*H2LcYe-l*4%fqST4yy7$Ui5f491wc-&-%1QV&`m5@3^RD9!t*NN?@J z=}$X`#RX>Vv&EbsMr(8?FB6uRwola}jG<}?$6Qn66^wPDMT4`AnouO(@qj-R^3#i0 zD-dBl2x`-YRU@NV;z20{-)39lr^8xl-QNc2gDlE4y+_5YEU|po#DgV=mx_@Al!7BY z4Bs~wS6P-5{*5j*ECJ@VZb#$#8^LE?w6mr{VT*8mb&2_$a=fUFpnB!{+9LYkCl@Y! zI8=tH@N#2qW{6107N9Bd+TIPd9u$+eD>0{#|5SZp1?(-I7mUkbADCfnu_Q_b3M+?5p++r|iH^Sy{Tsbv-mX_=ddW z_jMVI)g70pOttlNEJwl$E=xM`y^F$Q+WLh!1AoP)gX8YD+8p%oFDZob`b`_4->4;Q z{c8#yUGxO7+^MKZ!=)A_GX?Q~NefoXUxrx1JnF_&gakqH5XQSCg}8{0pC9EUUqu#w z-b3E~(Vky{j;(gKTI-~?_&!hj2G}Y74RC(yb0?Mwt=~o#M)1B`{mn|1L%MRDxeVzG z1+5JZ?YuN>%a26unlrQ#^p4u|?kUAioiE%>+`i*Jakm%-ekK1f`mJ*aPPlHBev5YS z|9Ws}w;aB(3sJbQpZMMc<+vY^Ncb1{%K+%a%MDQHT&hLg2A=rb2@c_Ia4q94;SDHt z!ESFobLjUwFN(ex7}Zf6EGZxjcV)-hcE$@Wy_z}LG9gqa&<*qh$n2RTT+Kd*e(hyn zIhNNa*9IwTG3J9Y?j|*amwV8XZV%5{Fxly=U@fO>pT!+vs>FWtoL)BpD z>AA`3$2}cRC%K8%+B)xGGIs&{2a&TpbYotwukRk4#xK=5H)g6Q-0>aq>6uI64ENUnlsb41-CrSTto{A=|_YoIrL zMZ8yj^|voi)UMz@GnpzEvze@!E+RkG%&|qI*04>?n>@aG--QpR+4Fjk~mNdHOs`9 zWMtV08x%WT$8uE>%Z{m5Q%ONjRD6q7O&V=$lGTqx-vV*y`ga7s+?-eZUupyOCW_gq6^|?09p=U#bx$iUfJ3nfNo(@-(sbZZ61n&*3_s}>- zFHZkB081z1@k=L=O0_+);4Wj>wx z=($s$k$ncVej16-noi9X*+6UvODQR~8*r9?c0@XTmP0`1L&*ncZ96ANY_a?Ah3B9c zRfSv!*3i6@$Vm+xWl5~n4t!Ff<(X8z_`~dYoUHov`N)R7c-HVDfNpgU0Alg5G-)HW z`dM)!K_ksIgTB}k(34XcSO_7$8a-;fqCQjDA+X?Kq3s}H)GZOnscX}@n4;v`veEeh zIwu*)R<42p;{td!Hjxx>$TIO0uHbb8)Y8ffbbC0j2+qK zUiRU^7F}@uyq2{OPr=<|_0o(ydqmjg#FlNal5Uk%4b+Q6mI0xVis@E9Q7G5I!e%x5 zjWE7s?2pEL`(yQG6f5$*kSxorkfXiq*EtM{11k}*&EnG%nGO-eTV28yOmZi*Pk`1y zZWALG`d*%Pi3xmmxq>l~^9Bt}8TdRyTn_Cv%IEKIEF+n*#MZ!FOrakYOY>wCzHKWZ zkn+=^q{*ycfj{e*xHt0f=;sA5rsQ!4IJLtM+AG;G!1X#b&Z^{vRB~PWea2nrjC>R*yt5Ay%t-FwdTrQ5t-~MhB$^p6=e1+2mH1CPw;oSJIU>+}&xn zIr54vcyBbj=`*ct9(Efi;xx5=7dXPjY3}=nfF8K{U=NWFu}!HOkPM(+y0cC1Ckkju z%Y_}BADMrPdk&NN?A$EbOi`NB8hsvhGY{3ZXn1WHyYB|}(=<}H@cG;+-B;o5Mh&CP-4z3gKSpa#}MI+oPZ`y@p-^_nLW?J6aeK(PTOD^ z`u)q%_nq42$u0w=pv1dM49^V#a+hYm@>Zf!FRXyp?p+S+{b*#AkjO2!T%3= zdvNcnT4i!5E#LtF{5}8x#{ZS>{;&7T{|R>aY+Sa)llER{O3M+5ni#X!vv#6en{g)G zkyp6gXVnik7b^)9FqI<&0|!J<*G_)@clhqG0*mb~tp8nVR-}WPclvmHe{ivIo6c3P zl+I$T(ed=1UZ2f)sm;}H$oMLso&>k>>WxB#LxkVkj!uPHRMW8NH=$?dRWD+Fk?e_H zYv^-$sWnnOXxPTUi9fDbK4JvcS8%t=a#UAqud?9^9>ZmwfmwXnk803O$*@)44ap+kzvyK2?L0L3(C_$LC;sCl9lVas}8=Kll+`S)PukAn?cXM)n-#*e@ zUT=?QvXV8jLS?*1P4K&)5 za>KbLVnHqh4QKDwvFixks2~xB=Wx89J;H^H#K0Rt3=r1&RjgCc@a;$F8mgU>PUB_3 zN^>$BH@NMQ{Q;GYwfhk6E`}00bD8ZnPr#;e9F+*t=j2vZ!;DnMZrZz~zlUyr0~J$s z4u-WhOQAKH85Ubl07O%1!2Ps#H3n19vp*v$T^G!40c<4PF0VSf7&+Ic&=A$UD=XJKwRR`w)H>H`aTKbI&zH3g0Z20=sL2xf9 z)l#t^hTK?+KsWW``tg85rI{Iw}D5a^^p z{UIC0+*e(WEN6E;4YMBxWlB&0>QGVX35^sTRcr%2E znp+6EWOLAhV?7I~XV`Q&2T%{8C^|wQ(M5GFc6Pos_(vAv{UQ$fIvi*OcTRp^r8d1e zo`Ap$3OC3WaqJK(mIpQr+!8=A1>Unuw*_bZR*pvEkKQUSoNH+Thzz!(8ejl;US{mc zm;a3IQAbKo4c)K+;tW0qNTSu)otP6s@@5qqjPVd%T?CyefjkvtN9|p#DiuSzHJi>K~l6D(gVzXrhITS(1uH0^kC00yDs(HrW6KqWWQp|*t{EV|=5_Bsa zf6!9t7Ggazk1DSS6v}#{rTdh=3N%{P2Z~2)S?w%YEO#o5d%`>>^WYIh`ML_T z$s3j;!pLMJx99?O333T+mmoq-&omideMTk?*A=<69Bk7D6N-`<^ISPFm}9U|Y7RoG z0+)!)fAkuU&a)UY?CQ)~m3R#?!yJrvRqeJ8d2Zg0vbSifXu4 zFqaY%poGv;GHR)g!t5i(yqXz%GL885GsZUrq?U zDdC!#cr%tKu9^w|#+^ItZJG@7eTyg#(HLrIpI>mGqPG^!srduh1vAB=9hX9=y~}OTaLpV|m>7 zVD0kuIFT%%VA>3tx5Vj%P9fGJ*-syVp9?dZk{rhurZ1(8(i0XOPrE!#q>#SUE>54kQ9EGtT8{Zq3 zH);?|4nYr2v;u(jrTpN=pHbf#H)d^tDF*fio!h4WFD!$&i`?2qqOiRF!Y4M@=XRZz zad6!~Jc6M*-kmz_yo`cHZ`c{OA%T@x3{;mCf*~m~^+mt3FtMiA_P$0#zzf1~TyyJ8 z>d(2QbVpPb!YBy$*5U?Lc;|QD!b4Qcn_lHTt!b7;oHtx8Y>PZjU__IaS)W>dI}~<- z0G4bm;VSfe0M^yV!qks&RN_=fD_7nPxc{hCycJ5BT)nMnHkrp=_NMsSM#S<{v+ZeT zMAel9h9&#Z5&cpu-pGuk?Tsi{NPayB{EZ{FXqd`EIc_ac`ou&@Lc`-9_MwHKja?;adahGL3_Vyxj3(`=D-;1rih$!-}zT2Ewd zKsGDJHk8R2SB6>$Y*-^zM#ZXSgh&Eyu=&Eb6B`f z!z97mNtJuw*DZ}MSqe*6aT^>Hs?jqqtcZ{U26RjJDYVZWxpYkq`Ai*GTK0o43XF`e zx%Cbnang2}6N@Ei?9kNW?%hs0JQk^KajXPDoMgXtf#G8FJ=Y{)Bx11M7g z_FAB*)klxwZwJt2)k%H4#MQHblzX1A&8%b!w8kI`@sCo9n7&=@>M!8DbOnMf@lLQW!HrTNG~2@%h!3Fb$1S{10d|4qyW_NnRE-004fOFone} zCf+PL2CvXIQVisjf7wG55t1vM3V75QKqGszK z4_6e~;g2-v@7{!^1%L*<+kYtbzy5)+Ya47L0sDKS zzWASM+$Pl~_W3q@zR>1HGdNQBMyz8nj=doue*B5K=f%22@oNy~jXKuB?R`TpN}a*b zTVXSy7+Ey#R1&v2&y?^6d&h!8V=ypmk^!cpZqz=82r=Cm;l++gfb$Ih!z}W$WA&x8 zJ4*apQN;OotTlL+LWOFDrBiRT8R39beW>m z>w|5dXtY57eIVD3!Tm0|=STRGQ}L$AMNih07=~C#W)T>EE;zVz*!kXvBmhFsu(0`ud`d_`Oc6BTH8F4LA$ zHdwv?HWz&uwLj+U`eTvAm|v$oAoBKVJ^_remWrT#`But(4MQ6@k9Ehb?WbHMOK1Ij`2rKVPdUVea^G*rwI9BM~nHpVWKUEqZvA^$|hj$_`Xsf9ewSrHpZl$G-7xg_X zU`ob^!dr^)m6|NbhoV42jZ`l+U7QtRlfc`~Ux=}#8{cPXys_B>UgK$Q7$31!*2HF( zY(W}9eG%tPDBKp-WyD5b{jwZ`q$y62n=>GHZoCHc(s}j>Hg~DxacE;k!1LOjfq#mxzqZL0jA$JIH3T$Ior8p@e>|dVUrKUL#N#R==J0ulzchnCh=DvQAS!Yf0K2Tr<8A=R>NGwjr(SWZKt6%GTz|TY{o)P)pl-C23b$ z)Xo2v_DMud#J2cO@Z!%d*ghZ>7GMdq_J()l8(xp;r~>=bHVN6x(nwI94dt#M(c#xx zB8A?*&Z^~H{=Dt$-FSU3pYKpfAB;I6X7_h9G#KX*z((`GWh6hW{-J7!Jno!iY@u=~ z7}^^0A&RqHijqYi{|H_0Dv=h4{xeVLiP{bz03*vO5?Bxw30xYTnoCj_^tbh-(Zd}C zf|qZ%gA+J!?v}Z@mR`>*8XuWiW0!mhaZt9f0+)?Nzu>?mX5B*S?TV7DtOF? z;!K|1Z$sFQ$`FwV5Jw5G%w+p&YM%jAZ2;% zCDQF+nA$c-MpOBxuqX3JTzKBj`~7QE$p*YUb=~EL-p;qowMWZGU)32bRL27LW+i83wKOu~J#2!M2t6 zv?M`hvnPp>-w@Eo8nk0nC$Tt858or*SHqCKncfo*Q!V^Yg37iHG7X% zgyE(Igpv2~=-Kc-3xvY9^JoFDOff4uHeU(zw3c&9mKuK-mF=14;=|8r#Ex<_4&+O$ zpb{QrH$IgDB`OE;AI-+-L_vzvTYGpJ# zC7+|V?`da@9`uHv%cX9MFm2tFc4YfOky7>YoIu{b?j2&$#<|JckLYe|@3aoy9M^LMMMA>_tpZCZb>9*~GJP z1l5`QC`1Y1&wMcYBhBKjtob(D8za7W9cjz@)|v6+TdhQ%^bz_2I@ zCk!+RTHPeyJ#W4^5WC@sd7Gb^A;@!#HGnkaA!3c*HoHpQ)8V=^zE0z6C9=aIn0Hw` ze%5q7Yf0{C$bCZA73HI*-G4uIlug#a>AT8eI*8hwq(VR+;p&jObwe4}z<_K+#|EHF zT*~8yoy=I$hNxM^#i5b8eIvG(5zd^w_fagy`Sw!dcBy;@EziWbE>Rk zea<>Rrm+rLnh^KjP(v`14obU4rD@MwSEk`?&ES>v5}=ke2Z8c$l8d8fOa@Qkv<23z z*biNZhQq#tCeVsiwAJqpj>Bd{tO8T#V1X=B4V`&5dJyHJNltG^+ER18Bzdrq5f4!B z;9t57AsLf3V$#c8^K3gL4fE?2FILk^TNpSlr0MBC9dW21$)?PadT%w-8G@kvPv%I! zVxhm*#l)?$P0SSx>M2c2#}04ed}+Nj(2c6B#h@~!6yPmBuRC2TQ&f-TbV=FwIaN^J zpAfZV*Y0oDZF4d5=aCO&%MXVj6blBfFXp`$#oC zIhfv<>Yre_!d5BT37iRpCg+hP1NE1%W-KpT%-P6mxM1)&TU`Hr|J~R{6l^>>igb;X zKhZ&c)b+$8P>!FWTe;-M3}+V!6`?z@;GUr$mDH=x9v`olF#G?jPn;f7-=VFs zI;?pOA=whhYx2M2ttL|1Ks;I2Dizj?mnJV(=MZNMM+(3YNSnAe}z9New)H51f<#z#{%DAv>VxU6btk_c9jucGdeh9 zA~-OBbG-XUQo6UFN@lIJpn#OxIXxO*D0p$kO`p<`V14H>C*u>w*b%z9=Z} zQ~MKwD*~g|3BWz9>L)A^AWjIDNCRdMcgM<%c>j}Wc&wxD7E$MZLgn}Q`*Hd zp@ZIrM)v&gKBbJPd*$EYL2u2oIKFK0Q*!5Qmh8(3r_d-^*8pjGR%2Frj|#>~CQQ=E zAGTPo#=pg@E0+9{uJB`jX-f~){nOyp)bh^DKo zRvZ(IM{@bs8CBr&)XZ8595-4Kygf6~ma;GHj;7(r@H^U1sFWKrsRwwvPI88@b$*y( zt-7Fp!y@o&a{jeGx<^%jOz;2~|I}+S-#Df{couWV67x}+R}RYJ11Z5Z>@)P;=39MG zvC6^8E%{kxVrQ26?5T73V*j8xRPu!GU7Eu`jfplOBVLV3UziE&vr<^aUJy}6cy2Vi zNrkz{+guUN8~%2%am|{FPb;1Gpx|$JocF#^2d#=X^FgNH3iZ3v04?c(!V(ZSna8o~ zvaKY!o6MWpr`%Eq27JPb@0NnqV5XTBc*CA_wVj>SqXNnE1^Q)n=B4D9i9}g01RzA5 z^r}@c!O>I$IO|A&<}&+g#xD6ax>xjy{*l+h_bcqZy>uKk))V)deC-|Yf^Q(~z%##B zw(gvjw%8v_R;m^1_L#5}#QthAd)antXFMG~W9k#qrhz0QEX;RZ9E0>`el=yU>&ymW zH$>XzpO5_*s-Q`h13Uiz`g5fSAQJF2^J+WFgYuNc^gtW&s$ygYt3p^E` zP#I>j=s)3N(4-sF77(gMfQ6o@Ap?YAY}6W(>M~U0)X?Wc>uu?w4et|z-DM?HE0r;V zr%>0W+y8hhVGb~^kBx?Aa>Xs;p0MSfemckS5HNnD&t5!n>er|v{TmRUJa$ro+4SUW zzl|sVW`SL=>P`dGoPzyQg9@pbU5&Acp*UfQY22qNg+1y3@;yTx>0%xvlGig|*{*2^ zYEFW>ib+?y1yswqra$!p21m$XyiwTGL9g8$nA2SsM#Bf5Z{EIB{LXiFUc{G#0tQDt zuDy9Xv1B2RkaBnHch5;a-55JBm<7w{;dm`Y9!wH~4MSrXvjDiDj2do&>LS#q#3S9N z`VY98wW)@_8*J&yn$)Eu_sq@85z3smU&#_uVsy`CF(#Atw}_D)N@OlJa^P8IBGcU~ z()M?web04e6FZ)~W*FsYR=4O%Skqd{l(}Sb8PW-}?vg}8 ze_X=W-T7*em0vBMk*;MVZU?~hh;i_eT?b~=Yw<#_uNNh}VqgWS%pn+0&;?HewyU0Hj5)^Onc?knx-}`P`Aod^OZab!d*)|dx2jC$P)ES+y8Nh4A<*tGoy)F7lFy41 z{++#HKo#$ZSQwCfbwhd16@9B%+A@Ju(5zZ6b3e!Bqyhn_1j&No4(~Uej%o=vpv;d$ z4h}IYlQm4yaH{f9IC!>cSYO&>J1APW@iPhq9>5kNa{qzFLDVxV4sa8-&HbBTYN#R` z=Y5;L3!@f1nras}<$eE=Y{8%(?9K-r**z9wIklR{ku9nm6_E$xbAmEp73GFGgvK&^sXRA0eU7|0)D zcQQoo3evgX&I5;f?j^;ct#y#1<>P2&^N^tic8h_Fu>vGx4H>r zT+qi%RAd@&XE!)|r`s?Ix5HIjU$uU(Njc0IP^cOStLixLM)JuSr+2yIu}`med6c|P zw{;`3-dnv39tHQ`?(bXnvU#Y4zbb^k0;6@}*FB%4cenIm8hes3Y}2~Cn*_uqo#C;) zO0@rC6YSpqn1Hx^eot|n2hq=|qVB9)uQCmk(crEMLt`g~Tpy~qe;i_AKsYlPd_lzP z-PI7$-ZYwmcjWU~25ao)U6f|FL+Go;ctpF5G*FAM^>ap6i=r*f*tTukPHv1F+s=(`+qP}n zwr$%sI`6&is;;W8?*0pB>@((?bI*Mbg)aid(iJ1AhlXqdjqi|MwdAC~4Z$=LGfe4otd-10E+457wwZ#pV_ zop=HN?=`RgSg^Pdc`FeF2LNC}0sz4N-<$wjV+Xrmea}+0irHX8`qg(vTfV_z^QH`( z>Z`Y1faziLC@Ghgf8>I7YNL%JYa+(-gk`Q<``;f^v5U|vvK|}*zO?qQw|kvDI9!SK6QHBICaCbz3 z3u4_?gv4SG%%ORRyZqg?_h0B%r4JBT9f1R#NZq4*`YaWV`ck|M8D)RoEX zRtDo{x7qk!8<4`7N~zI=+&VXF(GU;5cB*|je%OKLn7+I;WGoqU2X1(@c6Yh8sL@*6 z+@A4HT6+ii&|7c+>77$%xf6@`-yCJWUyFb66`;c<)V@V4a^pC_?biPpu;+4+#fvi}zp{2}e8FR=`Y68iA-*mydlQYOOM3qz0RA zHc(fw{WdZXrW~P~s|-LDl0?#`Crkdzle;7H`37*ts;gRmdo;`>G`eS zlM8Lr%cz@aj)t}x>T*`Dh*))r!maDo8m4v_Jl)B}sj88+$a9ck<55>iYz zUnManN@}NRnVo$TC=OaTM%qS)Z>l@m+S+rF-nlz-ay>B{Lmp{&2vTo^evS4U!!O^NrkT!z#c8WihMdRibOApIz3eutEEDpExy?n$vqfav{bkHcu{JE&SV z9Mjg4Go%cUm0g31;8vfMbU3axDs_HSW95faoXg4H^tgC@XCU*_4eq2fXsM2niXsm(20q*Yxx+h7vKA5rUzOTuOHS@kBByP`4L-pURkFk)J;wx}gwm{8DqpmRd7rF>8>O(ld>$}_!?t(Be9m|CtW*B$#f?qQUhA;!Ga-DB03*-5&5_LiA><&pVSHYUVS20T|%N`94iYk2CY@5@|3_ic`MV6XOJmrxroBruc zzU)zLo3bvev64S}Yqp<=QmMVlQJU*ue_za<)PhhD3n!Rkz9sf&s|8ud?^BWcT zMfghlk6YU@+C>0#t3><7mNnAiHCh)zt$A2|TapN_#L}Jj`j73;t@ju&38_@LHVOxE zD5<>(ho{ML+8uOz{E%$e)9|e4z?nCD3f1~7cSn4Lr%Z4I&1KhVSFgsP!;~S_Ln<{B zLJkT|ziJcm?2VQ=p+h^wjF}$j`;v;!tDM^ESUgj6ffg1OYY1c>B_bvHH_Baq>?mct zdK^@;SY~s-d6|%iCMj_@QbSJ45r@=6%B|o-d_}tlSuy<#(IS!fmNNOs)vP8AXu2WX+O zL9?`-T38U9WToT+V8t_>i#0R=U9zGNQ^8rKoDEsS8I5mL%);A@3;yw=-Tn91p)+4s z+T4~$dwWM$_H>C$i>rrJ!iL=A#p5#<0bHpqj1N`f6TU1IG;s{A3G*~J9LYR)Av9i? zh)AnyH{DwLf`<3rcu?0a74kZm;y@RMib5RZK%^#TtJ11e;^gQhR7GS%5fjUp3!ru# zOVci>BP|i~thT;PZQ}_m6xs;oi5wXV+npMrg;n}Mtr8&9thdV{Gu$f?E}1c&n*$-W zA>n{fss?_{h|;GE?PG?=#gn`IxbMc7Bj2smfc#9cT+dDqI8sp$$NpCkB}oZkL9F+4 zHSj5nurAL+TL7-x2Oj%IVkv>P?*JS!R6+Wdm|U0||Q;or)X2}C?r zp#C%cA?ZfJVDubdsDWF%woPd4;^j?Gsr4|qMGX(7tC5fPT0~2n8R`A?P#6Rd5+S_iDY(r)c%rL!(I8n4ovb=UX3(&|c5mZG$ZOiJ_1_fr-Y87tz74v8mB?hO1Lu=%S0I_h``DN_UzXLk5rEJY6lhB zzghZyJGzW1#eEI3YnfPy3!TSaOx2n-Pqp!I`Mra)qT<_L6IR6k9mI1fApKFN!1P<> z5Hbi27}(R^*KWRcvaI58c0H>4rwahQM`@*a`*;5nEM@%JpvvfNlXfJUyF@804!I$+ zYYRd%(_^NBX#tL});b^%*z)h#L!uZevoF!Cz8SSL7JQ6A(xJ8sO-!}gBj)&|F+5$m zo8qcE^T-d<#Wre&eFj}Nq1WbZX7X|Y>FMixw?nT=VVO+7!pF7>?i_EP;5GBD%}T?U zuUSxuVSPsRTqD%{La#@TL=>6uOx@JTys64vopz6#L9#S<-%5?{N@tf1n)E_ObT8r2 z3Rmq9?3fCFSyH3gsg~0t8cQ*|{_s7LaSbCRjE*)8H&V1~W}?v5VL%VKBjW5)td8Rv z;^YuMcRFg_7k_#4Q35~kc8p?75>YEGPYRo8pwbpqgwFQj>TXNukf_H^=A*5l4x>0utHw|aiCuNbf6 zbCAL@=xCqWiNEsugS9BwA+SiEX)&k!m!IJ2+nbxYbLj{NR@s3pVY5MZYz|x>%q9j_ z!d^EO+=XXLMrDGUM>OCXH)MXyy!rVMXZuTUK0og>LlqOVu&G$cCNx3V zv@L$`^FdDR;1zLn4Fri#R@ktZ5HFFZKxq%)usOtM@ws*r%XTYrFc;h_>N5AgO zkyeNqNXKAD4ir8EGoR49{UT$9-jHZ>_uxRl$UbAKfEy3X-IfcgejDQ>peoFzr^XbB zFqhjEN;=C5DTFNp47@(C|M2UYhs!(%fU#%oyz;3p^L3}!lzn{627T0OZ33J=HMFmM zMkcqxho$fAcN_Ko-C(!RsgsLf1@wv&*q1fcOF%n&H1#ki^bCVL=S`oq@gVxx!m7nn zh;GxF#cpU>4c2;!1nWxc`mG4Bg||6}WaUmqAwey@_xdhipx>Lp4VuMUK!d6b%{6)j zD;1IAk7N1ob-bXN{VXT7*tKSPH7})p4;1@W! zIwFshFF?^k_{;6y<(+zI$6e9QNEP$d@vb_C_GNb1k@4_?AhoozU>ir9z-wycx=ZIh z)B7k0Vl(0G!mhljoF?7V_y1&Dw=hg>VZi_Zl;HpXewUQ~C#vFqiKm0Pu7i>NuYNkI z{$Ihgoo)AOYGvzHuoI?MugvC@W(r7LM?M5lTCyygeA=QoRrJS&3(1qgS~OxW+1uvb zUKfuM-L3|j$IuEka0G1smp7quz2A(psBIlsfF<@bojW>9O73i`5T$~W9t0n)nvm|2 zB*0JpycU!zcC@}8;Fm=i|7RA@r7@&tGdLk^BGwq_O6(mDpe_;(j4fc;CK{(Hk>ILb zgWZObY$%v#h1O`2exOMpy;&mezAvWOsyEv(ifT$q#}=JYv?ayTJJXAR*PEL+Z%32~ zG0~-TcPLI1SkO+Ou_d?ypeA)8D>3a()_(9lif?k-8e`BQ>Crkx zHp7)gu%;Nmci&?UlXA$cLR^zZUNSBhGSWmIkQ zOsUp7w324E6%*^6)R@$C%O=Bti7XuIGZSfx-g6LZaaR!&1L_ArwR-l(E~u@lnql$_ zPqUtRI&pXE>}tFrXw8+LAYBnu>GP!vc+Vdwt^9#XlrR_cdZr_$MV{rda_!eSPT?Qz zgKxV6otl)x$bGb_gVSUEUDFU%UQNL-Z+=O&XkxnTwEXeL?Uly6(a|^=lD9h68cRz( zgekE@1B`a(dtg@+x3hcMr+QJp@FS%bA%}1xWq_ic;VhsdwoRgiA|cI1?6!hTasR(= zN#XAP-5{h(fcz4FK*b`$tnjYL#cH(_5DVSgI`ebVfg)}(g7x)Y(Q=X}z>HHh3fxeS zZ55lA`Cy(OfRiG_vdu~^Lrj~|FY}M+dbyj8fuqA{#Vp>2hnqQ)ylMVa!lG%LKIIjT zz6jXOu+C9OBx2&gq)448=TV|2?NyN%u^tEuu}Ctx>&r zKQyy@pv#73qb=a=i6#8{Hy`>uZmI1?=kcgMDE4jjAeYt{TbmVzQkG6d#oBjhEuOHt z6n0eBcC%g=AGkLemXQY*#&PJA!GXsrU+h!270uKqEs}{{=tHxm>0q6_hn!~lV!R&J zR;^0k_=>asA{W{i<~vC$EF4;n2^bOcSue@udRAN+LJ-B@tgV9Ajwgc&-ZNL5&UCV8 zf#cduAeNIgX$Q+HkmD*?;W0I^=P=VBkmVX6i*WotcBzRV91G0~g?qPxdo@SrPOoT2Tf`t$K|IvhHPvqyyz{_+y?S?D~;>#|O9Q>KG84Be)P21@|7WfknC@g_f$_6t)p^Gb=U%!_ygKFLMT1J=7|@wz|f1aQ}V!%-#xYW@p@*? zlOab=$ivgOHE`+8mBTk_`vf}NfPcH<=!_3Aps>e!Nvn#+?|;}FNmIuPjrk8iya>F| z0qr4iBsE11)p1CVR5#7uY9?*n)+(b6QO94Nse3ONtHmJ^#k@$;y7$ulBGN_5SdYdT zaC45PVF-Gcuw|O%pW8+znZ)pF2H~(m^l|5X0B3L_mB95{59|l*K>tvBVNj?ShfqYI zRK8nbP`S|S5~bKrOZ~@2l}43HrhKI)M^yis;>eA22<=3!a|iA)iy<$aPMMI&`YCm1 ztErKkcr>VTGtDey3Gd@|eDcEw5$Zf4AeN@Zzn-;#At24f(;K4wA^2G0Q< zw1}>xJd2@8b!Jm`73wR&b$o8l(NghnaqMJ#xO20@QD@&XBdh~7HzC*yM|9{7L!&NA z6lWJQ(0Z1qgRq`-OsQvjmH|=WegzF969@<8+ArMXxw!4H*T*Nq?8+pRPku0CaF%#T zF;*2M=p1THVmr+EDS!daKaOP~mV2xQw3ykoNL2|GhkMjhP~5NCtFHWOABlAZN%2D- zYSuu`(`78crY0r!9mk=z8FpbGcu?Q#1@63%p1RC{&=F>cy2byb16)L<$NoCNpXO{(&Fw_^{JJ|cRO=~!~j^u&7u zbn(3*teVpJ#uWa{TXDRRT=1zbx7$=@O-#i}mB|)J>dR({rgCY#{u$9mN(7imyJ?Xk z2!JpAF}Gc)lDrznU+H+|arMIo=|-D>`X~Aq=qa3v$`#;>6V`Mxnyp;=M#uURnY;Nu z+(ukhYCR8>>Al25G#leQw+W75P+ladQ&W*p%|t9>=;Esj{3jPy^{Wq7nmhqgI;amk z#GQ1+22q}eirNlJ+TjGf7YCy=iR*NU?*)mYT&jGyR{YYuxYL5+i~k-btIW2 z9{zXnB0%tGp~!_`Idk*V*^3oJD?#$Y>#Q~U`SR%c3Lp1T7CmG1MNf@S*?L13{84o%)j2R6W&ZcBeN^zl2Zu zi6wYBf2tfn9_zu1pZ+HbncN7=GS9EuDE)OC|0~1&|8yJwB@wFHza1D9ua}xuTOCVj z`FOZ3E_k?Iva{nV2me)V`9eP#sYDqiG}id=1GKjrZW80MdR$^(k@MSI?@W)e%-N(# zc1fD#U@|FLmy9r_xp=QMl`QT0HvH_}|6ws|3w zM>3w0Xwb$9xvT3Nz12~F6-(hX79}${sQ=36aPBr^M=9bfcu;8~II4ow+pQ##QU!G- zp*Y!L%)mUOJ$IT zg#V~QfU2Hg8*pBRt>UN@TgoUPYEneoh-!GONMa0963`+yn1lzAqODUf(n}MoX|H?=z_Va@ z<@JOVT=;Sb1zEj1R9Aj6D6BHvuMS&Ix()b>xCb~+6UT)@cpFFf({j)rBCFC}ajH`1 z{o0Mmh`#vnB!G~kkcmHfl-NwSuRx)!Biur;z=>f~ZJSHP`fXGSSayo?48Sxz6e zyt;V9w$v2**?u^}{nEl|i9kF2#)}SKj79{+3*y&C(@{Ka|3Q z!v!DFIp@zBDjelo)-gjs*qK;v=@NduloxzXzQ6WCPalFa-FSw+zNZF^?+f5K=>3sZ zp61<-{VNZ5X(GbQ#fY9hTQ929E526 zNWjJW&v=gOdFVuXXq&U-b3~OHb!yP0`lD4I36kVA%}6x|iGL)cMn3#3vJ^C2ZNU?E z!=&{{B(VczZGE)Dw9@I%!(x8J;0TC_tQM*V19-@Vc;yn(V1p7UeLBtWsDkhOB&R z0l$G+?4Iyvqy_(ALSanW^JL@k9}1~G0wpBoiKM6*00OZ6fe3R3>9)~7oq&JW^;#Mi z8oc&r4%%eP*Z3;`B(KYoCc4$D6D&JTcq4Hpom-w1)b3Q0JMi`o{crPxm*Q*zFA`m| z??+wbA0`C`8k4D_I>dyME{!*zai}fn-DNtB_}-;Z%z5HhM5azq_XEG zHCUie-`}h2`ZNkD_f6p$UEmTmSzIWf_pdLpDTD>rd_2XJE(yYwS{fM=d-xR-NKAFJRkMzLtM~e_)Nw9dour zy0u;Kyy0emxp?7sz@CW^W^4A|3|s0(J6bx9qyFS*Hfr8q>xDU0+^j$LeWG0NV{das zb5KNaK~)})XVH5@(r;(=Tv->K6X9NwUGDA2@!7Yh3^@q+?2Y=BGvTz2qNLk-czVT% z6pfu_dNqu+<+Fxx8Bma0F5Dg;2M4G1)s26;1!1eISQ{jrTcVb13xM;gOc;9JKGU6y-Q$9A%hug9zdv9qw)}<4Hb=^BUIf!BR1mF4QD{dmHFIx_{5=6+KEyB?gk*7PyX;7xgM^v-(uleDm zsE8W6-jgf2Duow1onKivg|D5H+tJ$N8Rf7?tD<~frBdXBC~ef-S1P&e%`_cMY>BQN zTfGZrp1z1n8&++|T02sfn35h9dx~2UvbqA>XjtXd>-Nn}$?=OewS}|>y>2}^rnTH6 ztEVX%?`&5je8A1IT$&AqFMR$-XEMJ=Tbl?006_M;i~PSUW;i(7nEyxdQtdb2`UPM9 zM=^uHGDT$LB`D4iyJoGf~w&M=f%_q$6P^wFcOy z_vPPLt5UwV8bimV1t19U)~2xW^Ye)rz-e9kFh5pXz}T z6?o=afEqzk6EksN6c{zO4I4nXS$GMvz!r&VooWf(R6a3G13S$ZqO7>L_nei;B(BDQ zmPnH7s7Q$WRoA2EFvx_YZnQu;%erFn4B1F5yMpF2k0W7xgfBm7y2vki-ut9eIDbBd zh>i=X=!mE*)o-tvZ3%3KansII77Lp-flE=n6fq7>(v~1yXMm*Ob-4Jrp+qs0KdgDPRy zwRfA1oCCO+sV3SR(wge?8r!x2&^3k+le4ojnQ#0Y#`Ix7)3`F2_Hp~De3E%6)pw&{>x;7p@I{(P@FiP8Z0jS_03<8y*BEay4y}&$xJnAv1 zSqfN^dKX?zQh#)L6YN^aX~-lMo`BGtQ1@zy+-{dvMn0F)gv-9Rz`KBGc4P&mUX2y& z3&HTW-b>!V4euTtin#uZW|Y+lE|qyOc5;OPRHe1?eK|H>c5nj&Ne#9^Prg>_-P# zB0xFZRtZrVQxMo&cW%Bgfm) z*+}S0{TqR@6SVCpAsJqBEH^v(DTf|>u!gy@G!Boc$qRuZGY*VRZpPlseKA{T5?x@EiX;G@ zOlv7~mU@LOuow7e@a@fopjU!cY>`LxUEEZ|N0$j*d?jP#(dbjvJL;J%&My{He%&k@ z-%$D))zoL96tV24{@2m54>Z&ss_rKO;u_tkb3xEYs}TV>x-S#z=ewHDfN6Q`GQG8a7Ozg82j;^a%ZJ$O1Op1EV<_*!zTbhZj z%qI6_Pj6_Q8juD~V4|m5y6m1-)hD89=03M*9)n;jNo2sAgt z>hX*V4tiip(5jKyiZIsKgyz!<>{cIHb|k$S+Qer5o)$vDbi5M1SIF6O&!lTPGr(fX~FXmq4D9; zY9?9t8+pT~bt;&LG~o)9v2Y_(sU?vRD7{x$%+t_QuX&O(=FmDF!Gv9iGnYtcJJa;v zU`n^qby?kp9V+C$Pv^K0RLD!zg%%Eb+plP<*w@@$^ls#ZgQpO1XMRQI6Fv6FKgdM` zHCc8Vr|6&TapwTp+Mz26wOxjYBA1OGaPk-JkmVE$1^U;I+eS@dn0#{-dbkbo)|vA; zWC#7I#D<5r?jmgO+O#i6!+UU6DZBdyAQ2UkrxR-pGzFLlDvIN<`@yc-7Y!=cQ$}Ah3R5dsR2KH*yrXP*~;s$w%7TvT&QaMcM_i0*WBp=F_PoHP5vTQ=8p&?{hSY zFJcfEy&m-KN45X`jQ@ylG!%kl)q9B`a1&H1lCK44q;O1t+u?T!uSq?ALz1F<% zUNJ}?a#5~sGc?2Ti_;Wsl)TM0jJQKrNj zF*OJWL4Gh}`rx+ZP|8{z3!tb_ant~e(2Ag;PyCy+TyFhGC)}y2B0?+qAL+AON0B)U zGJ^1@iJDS{3Q!ZsJJvN*itK1I$Z(F;#_~f5{L6bw5OCv?c?iRmMFFo z4qI$*<^On%)z9>%=m1Nlk`3&=N$IM|_D3EnA}SzUJG)xHPb%0~_OIS9R!&k@(CMoO zD^~W`Dke_QSI}$d_O6RKdOl@79qrzDkf4-67fR)v0`3E1Im_jXN*kz)cmV6eLFbz^ zPSm@Dosu$StrJ(3@Rfy4YLpu$Sg{oUgJw^8uf08~Z?9`^kAV1mJAsd2xP zM`S;Z21-eXL}%%>o<=scIoVu;;;Af3(~(pC5V$r#uxgCj=U;0J~&&Z9m}El^VU% zaIrz)TW*Yu3wSp#dwU`J2nXU#$K1>*iBA)zwAmqMokf9;%$(T(j$i@H7Kh@IoE^>D zTu8_5Mbq^Rhj^PNmQbe`Ewo=l=OX4W?uvC%gas_$d*vCb$TV|DUh$}<#^LR4Y+^l8MFc007Qb- zPol<;AwPkz1-}>I0snEunq9}fY^F!y5y3MUN;vgQGGm7uFo*n>3!lSb6ruu2kQS)*L1Ep^aDE#rS^!aa6e z7*!Y;ingC%v~kLZm}(i9La}p4<4u1rU3O9Ji;%k_Q)}mn8f!O~nf^7|&Z05T)f>kd3B1|g z7D5pHNcfErRAs1Ugyk)rUk`){NPvVWMFLj zwY8+B1JJp$uNjvlVK04&1QT^!@b}>FJJ%idlCun^I9@lL6wmF4mn{YmP8tFXF)=m3iDM$al)aECkvuxpGrKKuxMJJb*;K zl*>htxQ_`<{@@-*suJA`!=4?{MpVfoJZw;l432+H!Za#$WWW)g{-ioE_bFVO0=!hPTM z-u%Cqmhe&;6Av&iQLricwHTW`(aVUUkG18m5T*6PR}NJa7<7_!nzJrM(_gJ~bF_Xy z%CbtMg|dI2>2N14J(pauXi&b66s@ULFsQ9W`ljLOELR5sb1tf~lFL8jam}irY!}%i zktE~h2}|=nL9hQtup=FVg#i479gsj4r&_F{Z&w_rLY8GcJ-U~auTrN7ftL&lPt*c& zsU0Y(QM^Zkdj&XEFO&(dKm7{8$667^ck9L1OVL_}cwU$pyQSC`JiK!?506|W+ zuRgj}8i+x;XH*8?7S9dr?GdIL#BAh|qyh1-{`r05V4u_)$UI~5VPx7xgBNT$Q zJI*#wZ%_n@v?+!{t5n#oH*rQ{I=x<9Uf{lcf=M7#PKht3dMpc*3cH3HL*=Wn{((SC znDsh_Iy_4y(B{=q!aD@mLiBaBM#3LRx856a7sS`t_-YvP^A=lvoG{-1hQ2=s5OdEQ;iJF8!H?yB2S_cMI)Uqh{pSmsvM@O~}2~x~*x3b+E z;oIi2Rl>@NO+jo}=&d^1(vOsZS@c1dS`1o|w#vH`$DU|d<`5ey!IZh0 zAx#LSw)C-EC|NK{+a?6`VGmho5RO;FzKZpa)@LbEgt);nd&bN6>+`Mf4Ko(9FRM_o z3WtpJBB0c!p!}EjY1FV-PFuqPV~v&RoGL8v&3Xf#e?N2QO0J=F^(Mwm(D5YqCE{p` z+DKj%nAfh~xB~w8)16z*?pSl#_-9Nvzz~w|TG#IkYZ;!Gr3n^DGNz`IIB_t<#%Q47 zVF$!YHT3qO3UhH(T-N@Vimr)6DODN)GudUcda-La@>=N`4K(|rHB&l13tofyN*1=L zzx`+*SBn|E??qX$uz@&zLQqzy za`54#$5nBasbE3tD^=o?bG1chV)MfKI~>brjzt-Hu`cak-lT&{)*qvRn~H zVQ(Ct5GQ0&-TI)vR0N6LJ$a}cE*7`=sFw|HWN-g4=|CiZjOtw1Zobj(+vAy{$6u#I z8id>ru^2~BpyeFqbfO_*cFmNaSH|Om+_R1bah~wO198t6@8dZQlqYd~WGC@;bdiX? z+e?n2#8y$!SifOuJ2W33XJQBb%KQodKh3B(RAUswQWEdzazlKRx!gwuU*W(XXcjj@NMzXoD?V3MDPL z7;hn#nTNLfg|27}mMv?IpUoNPwyc>tl2G%`bMy~`LDLq{R8GZ}8s#V@b60&S$D8&y z{Y;y#U_5j`=ivbc8a_bbQZR*Hu1GAt?Ly{hcc!IR5x!2w#C>=PQmKFJZdwFpb-_tU z-uzW}`Jr}ELj4hQaHJaDe*VPxv3$~jZ{Rr`O!QF;bDQlA4#YxCEr-?PuYf%4T~Lls zK6?vHX_f@Cd&zvBSNDXMN|_rr2^BE;U-9C7@1bdt@KN-J=_EKt4p0^Vh2&j_dnr4(uU4 zr&O9X(^y_LTQ#$#Aw{QcgzLszXkCJ(cg5gv7VwFP5>2#LW)2@<-0AHXJH&y?B?uj9 z)FBmFY)KIw#I9^Ot-9jRdGp!<@1KI=a>N!}3I+8to`M|}=z;^y?v@+29)>C#Grc&F zV|Gv9x^JXC+q#v){Gj8V`YbuHwIvC=yDX6|vBJ#;eA!ovzhsMVzHCcuu{)Efw`U`9 zRV+sem4`a9L0Fu^=N&WSkUGv8>gx+N{k)@)-@&j z4gKHU`G4#^zx?t1$o(z;vHuqTi2uJ5DSaoyUt=Ei-}ol~Q6u*)xBNKm1MV!`igc2Y z=cW$NkNsW23LDQCRj6Ci?Ku%2OTJg2VL*uMOgV|3!MzE$c3i?}+Thl>A@wp_UN7`; zpW!Z|aycyI7%ZrmLw#0I*GQAsHkpqNc+E$m{B9yg{34E0ZZFi`&oS!PIVz7Okt?gjDT`O|Dup5)SDKe^>vCDZ0)%+_3posQM7FRM`Xm$wJb2)~#(fc}v#ScfI(yAK1 zd(~-F6Ro~BYW=wV)E3J{#Nea5TpVqnZut~ud~|Faxx4vY2Qv)f}mVnl#|8ZLhPy*6nWTVzcly1|jnRV_6OwwKqn4Ayp+MBE7rT=|(UQv&yhaVP9)C3cN1b^YMYrTCK^ulxFML`y zm#bv#6rK!lylX+qMX-AaNPK{~x-OYHil#GCBqqPOsQ?VT`vX+flyw4U6ZKq6n{Ai>0WAhdpe z>Mm{5M90NGSFw0K*jk;BFL#gXDD0o<`GejGqf0nt?lnebbMt8fGg$n1%-++vSzQ%( zIL6|G_X>SCnNu0!eWb4TV9G5VBw+q|E6K9)lJ$J7~di8$8uYx2* zlr{^WES5d3j3pAutZ^f~4L>HNxi!In$q((W!@P*zn*|c@2a3|mFX$nF$815bdajzO z+=*9lLCU;QdUYirM9=gp60zyD{VB-gk1EgNuJD!Q<8Xs^;&njFii;+H#q+^F$42Ub z)N?Sr(QYjGeO}4AX=uQ-ksxxc1$FZk0Aif%m?6_^TG~vbtnui1CHxW7j2_!66gHUt&-4CN zKTRO4+yMVMAYnQ)10R+4Mt&Ih96{nY2P2 zp3l_$c3ZUNVIh*typxG5nCjLQZ_%MmzPOfB&;LcQaqU z%hbwtSeET`KO^p-sMnc@-b!=0F}eZX7|5H&)s~9~8i0+SkjCt#Xks?(1kR_?tj6pG z!p0HUN%fuT_Ue;UH}7$)h!0TF`ic~*ryu!;g{UP48-qq16;Cqj4EkF$JBk_LAj<`O zU8{{I?FmfzbqAI-N{_S>2HiQD{^j(3bLiL#&P;?Y(&(9_&GUJ?v$FT_arOX#T1cJ| zp=&ge{FFC_`m23~K`)q-S|c!#X`7}LS(D_;N-V!qH4gJ?KH2zpDw!HkEB-HbQWe#L zeew#Nl?}3Qhg+Zj8!C4P)au9E(%KS+n-^!t_tX9Z>jzU$XJ(FWPL@txgj91Y2R~;A zUk67>3A~lhH}}WP+S^zvq#bG~B7Bk1m*5!876sDdS`q>-sDe4_d{JlvOGIC_f%aXK zo~1iIN>CSyMS?j@G1wE*ZoFy6q@A$sb0idaSvOfB_sbLVq&OfZX|-fBxTP2(Srda z_F?t&;b_RftO{2eTuqq@!av9vJA=9}&&EqgD_qSeRs_K;k0TjObJKpvkQjvopw(8s z%(E|8Y8R>#eZmX{X@OqBdnqxK8ZxM79IBeYk4+hl>LF>A5%H=(xl#!U$+AG$MJ4t! zRoG=Bv6a=yP^06-9TafQ#C=*tJbgrYx(+~9tAi8d94W2Vn+Y zLGTl4*zi(iy_?5if>cR6KShe|uj3f(EBpkoiSkM?mZ*nx)QtXsZ&lED&E%IB?Rj7y@S4Q!uKQ`f@^aWf}PMm%c(IY}Aoz zz^3=z)!qPK9US$;0FWeosm9HK?Ag6?O@*3mm(e>LDur*N2;5m;<~s-lTs#!&fb_zM z{ywu4K(Q;MMAZmf1`wpc^>z@}g+j}ncI8h=Z$Ap2VTYSss+PRYbAmf!(-#Ztx`&-z zrE(U?Ed?bDYY)RSOIeloAV~}TyQAuKGkr?6{!l|;eK(KTQf0(QbESz80k~VyP7FBW z1Zlmt%Sx3qx+a^P*~Z_$D`^vkbMrgRvy$Hiu6CQLS<2dA&g2?8z@7&Z&fXf#pyZ0ePV z@Vk=ifCLj(0lFu0HtyZfYa)R4EzX7(#(RVfG^*baszwC4X=*8oj}=4#E!eSpiG}{# zmv@^`gN8)zWE+3_nM{3F;tct09*nCa8k`~>s=F)!n)rlPn*X=b;tbG|{)^&EPDQRDg`z$w}}jl95{zDPt2^G_}{-oGJr zJ%*#&LmFZ=bw{D;`FvElKWJAlq^iM}6Sziy68y-?bGWwVreY*F4E+-3Q1wGbNN4Pn z7CZh4-(*vfc5uc>^k}uW2Y~?Hpq#>_H8-l!0b8*7;qUp;>$)NH$p%({&;x1E7lWe# zb=OzdIs-!hQk}duPrV~y0ZndB0yNgvT@ zc+e>$h%rajW&q43-Udz%~-KldL6}>#g{sdch1QVOuIEzsE&cA0 z+Mfr3*wo!J<}Weflfn`q0CAv>FWqA(R9l1`PCtvJzq>?aVTA~-EdZEi>)g3?xQb>B zQp;yOY~LG{#hUygtN=rHlYkH(CrU!a&HVZ@r;^73=%M^rPWa5K+Rg0*tLlDZ7ZlT? zCfi24p=B@cXgh@%lK}vPI5cE*(i}#MSWt#k_`6EaD*9?X99`m>eEmiQYhiT&pGq4G zU)@wXY>>{V0VdjkyAzPiDl_-}v}#DzNI2&saJ!&T$p^&VTN34&9n8h+6f{e|io8mX zI>@u|?mx>4Gpw__L`!3I0VD&^l`bA1s&~tf>jQswf$)9_#v?`wbyK1C=WKu`w9Wj; zIVT_sX2wAN;dDFOB@1iSQ=+*Lv9(c7>mUe$q&7qN`8BNFw63cs__l^B(?Wm65Kvp^ zur)$156x-c7hv}X$Dyyx?DAdIilnzwNm7w2m6KMbz+|~im;jc8pYBQkXGL+IkNxX? z&aPMP`|sx_CuP#4etO4W1#qgm(HoBPZpD#~94B z2h|TtyD#==L}~VL(kEU!Fg8`sPVr|;4%l}wIP5Chrtcj_+MYSxuj)u-_utyA+DWg zNetMlEx)Hb&6VxH97u&Y;Z+OHZ;rX|ld)QfT(s*aJMg@H3M004e*g8w>q|1W+<-^JKL z-_%&w&cWEw+|k_D=C`q}`VS&3iu7Hr%P7aE1Y|ZWBaf3C0K^gPNUmiGUbm3T0Ansf zXPl3c(jujy67g~KGI@QktgtR63taoL?ut$gX+?oM^~Eipgx)ipnd{57h3 z&$1&&ImCw3h1J#HB7Q77taMbjsO0}ykAV37n&j(~yk1CE!oJ5TovL@4GQ7|eFrcH{ zi0V16ERhta7l$P||2N(%%t&4#U9qrPBsrWXzENs|oO+n~cZo67Aa|kQtKr+-t2tjp zEU(e~=}9$V@cjN{(*hj?icQt!p2aEj@IErM^fa~ggbA=goAdA&OIYK(Ly}cV`$ht# z7debo&cr}WGGWy$tVw}!J4;4tm6D(!-hsRkB!z>uNM=QOjaelMj>{ENk1lTE^*$QS z)zHJ$y`>ep_h4fGJEtY*=4|4qL^g%}CE|j+{p*iv3;PZFOT-3tr+4l|hVH3uM8*rC zL@JcQq110=h(NKGC}zXdb>StTcp|V76lReGmSBySVpvpRlt=PUfp(oEUqByNe#mgZ?QcBfhOYO_4d58;tGcf0+kkSy;=^-fAXx3RR>A6~C8g2@fMY zbTe(X*iN~zqAH)A&IJBdGRoq_o=u*!;KkHrMh~|yREYnEs1&j=dA=m()1+E-pmMQh zw-@yXj538&eMJJy#9>jTB&u#SEfRXmzpD=Xev8$p^B!5pLN}a^W}3#v=e@t>ai*E9 zP{fAx571rsrHwhz$CtVhs=%cJgf_D}nsVB%lLqubLO5Zjdf3b{OvZF97HLU?3I)jC zD4n+)mT0uZA1&X=4>!>YRHuk33y}m8#r!{*hrSupwM=c06fgsn^jxgPnRYgN<=U2v zvR^I}nOfS0%0ufNaaYt$h17I#l{pPKmNuBZbF}per%^QOLd8~oe8H3BzQl)MR!nut zX)9L>wv&^6druNYiBC3RMxF_Fch-l9;I=@D(rS=7E1{yKo4@|O(aZXc4`c(5wTg`H0vIp zR}<(6wu;Is!B7x*Po(Qio!PL5#g>^r#%b$) zhz;^FsR7p=RiTaVaB)uv+%Y`Ur{yx-9a}5hEVJH}JD6 zS|&7&(R(p!>-qMr#$H&nKWi3#?5L*7A+*D3D96moeXs`*vp9RbGR;qu#v3hw%k`DB z9q(!(8+nV{C+^tx-6wQ&I?|L8J|>Gt8b_IKc37&QD?#L1&l+=sr_=C*tE6SD%*%QeoIbtJ4JNicw6+B;6a|4N1=(b)ciKEM6e{-ifDwOvTnA`Dc zng+uk^)B{JH?VA*4=@j1eKmHiNuBePv8&1nCApWcyhECejK*52-}$yF49JPu;IWYp z45(+;ZTHWN$qMFCNfJdOhZ_~SB1)#MSLoRs&t1%<<{Lqup06~ya17EZ?HO_(o9)qy zIlG{>Q!;X6=S2i;f!@FE*9)Aq?;vP@uxIh${2vB5%g&S^BKTK%^%MQXX6HRH(D!B~ zv;6G`6~L+ZGeiD;5avGIevbac2sU75#n)H?b2@zE@p>3|dpmp4I$Rj>_;%@wOE=CWgA5iZOyRYf=Ek%Q znM>o9;%zO6*|_$FMZ1KWPfeDJR6cUxH1{QrNm$nI+N!lqa1!v6E9T|v>T_GvlNaK-?P&>FQjiTj5Pg!Xc~T|2><0U1%JG6K``Jhf-%^kn zbED22hh;n8p0|CsEr-FZ>PZGK_LZr4rgkT(v9~VH=NQbjiZz}9Q@1Bq@h+_jvqjbC zBAnZqJM)`w_cMo))KEi})SBS!<@lnp#-}3nAC?;8mYMm-i;)#F4AuKJ`#(3BOzNvw z+LW;guSmrwzrf@GsUiL$gT>!%{Opa`)@s&*IV+7Rwd*gee=FWJP5Wj396?Bx>O>KI!^YDyrbL(Q! zIBG#|ncQ{P6mPz&mu6syQ-UFa#cMv1PxI%e*%ACil}=^JN*golb4(Lm9ZhUZR+S6H zT6t>%$Y%xPH?YLWq%&yTb#NZE<>G->1)9?WeXh-TRwqL`4v_U4EHV$qp~Ecnbu_<6 zoOxO>2h})Fv-TtmL}qKc!Rp?(N0NE|K#CML-tx~g*liwMtj&c-rt{SmeQ~!7{Su_F ztV=B3pzlQsT`-(lVB=|4y0ymJfH0U6uS}^>aX-l=M?a-j`nzfFLLJb6)6FqiRS4ID z0q+xz0$f(W_ynr!X#sy_O9k9rIQ_U+l6AG#W|r)%%5q+)P1sX1m$xO?o0K0f?ic$1 zT*>XvX7SnlisUyre*e4tZ(WRmxs8pn(f{AX=>B#y`c_uIz04K%?~N_T{Z@~^onJs8 zRe6?9eF%3#WgU!3$ zQK@_PmtqnkCeEIY5E*DnJ~Ia^Da5D0HSckf+cy!@0r?TNfl?T|$jxO=$xIo@6b@hb zPJo^%2}Ih_URX?hjfbG)9*ik7r^;IcST_J_wOI(i?pV{bUdG;DJsfZWqTv%V@VCFb zg!v+Cs)rXc#*SFsT}9}&rF(KV|87g?Jtaq$^u+&@F5UqT>{9{6e2zUDT=Gu z30pPhz%(mqBRDWZ2M{0xx~CfmV}To$P8%YiLU+vr*>F24BP@)RXkc-(a`c1LaX0z}s zuHI*P#8R9Fm@pDV_|m8oqOo!^_1o8g4ab0=GO3l&gCb?;(kl1b@;q3W04Wf%c76ob zNQiDnYIR#)`pEKG6x~BdNx7t*m~Ri!*ty_AwNcq3zyZ}8xDqT7vtpH_J^Gtv$XLCy z)-Nl-3Qa-QS}Nio<~lt?K7?F0nvBx)^d<7aZ@o8$}}p#qVd#R);(CM7Q;-wDl8!vyR& z#KS{F`l2xMcP}|FI^i4VbK&DNd3YDI*I*e11Qtd{r#>A-@;iT(RG$6G5~FQuA@C>I zN-6^?1uzKH5Onan5+Nu(c{f0|KI8NBV9U1Xf?Ql^$QtLA{8%aQ_K*o!xdjwwYY9jqW2V zGyote*?P}J&mMda;pY?h=hOR|`&KfE#T4eKM-Z580{y%@5-yEJ$}=*!S(TB?JfC~l zA0R3m=bfrTz_A@XNS|>U(eR-1n8iNh#_wrs^VGu}w^QXI zT2{+zCyRrpUBPJW2~NTSvL9wn;+X4uI;tL6Q;>_l0jd*vDgnVM&m9a1ed<$@^ut*o znqRYC7H7=o>xcw<9pSbZ5rbsbh`!{e+6U{0it&1on`;b#%{Y^(;yZHMv274F&j^f5=jVAl?EqGIQENJSYW=pL^K%cl=@~iM z_bmvM1YzL0h_QFtc!4C`NQG48b@3#2{K6^E zXOtj?nJ2H{N_qv$nmfhJ!BgFiAF?gQE0^&g+C_*wzR+tN{*&^muU z&5%KwI2%1gv=$aaJ`?&n*|>VH5L4^zBKk1LI|hJRZXwn2n_Wy&MSUw@zO0lKX%(0t zs-kLBkmh|H)~9ljj=ln|>LzF8R_fG@UWV;MdrGhk|A8z3X+L+2V7$)pK8}~*gQ79> zdU>IxSlynk8Xg5>jINGUh!o_hLd=|&8|=;BTfEXAPIpcs59rYyt4JS+_?xO%7nV~w zXxpr}?HevS0F>;g14I%<@UWnRmoD-1iW1nGO)~ek#Dh8I{8A~@_CuBgIEk&69@d}&-(y|9vRqLOI@(Z zlX+%7PGWM1){ekjl~-L&N+r|f=|}noyH+}&7tRg#C!SGnr-Z+rpFQfFz6J4IPD_H= zakII+NU9(X+r-G>avcqF;?A$JuxKPC6AtI0B}sA=#MF^o$?W6}+9}3^GQ~>NM7Str z1rVGXrE{I6#1;$b_+#?rjPF#9gu>UV-A7im%CAY+6q#|u3kf#ADz`w za-zH6^QfgY`2bI*)E8cP;WCON{2Eh$mL52Y7HFIu4} z#+2o5A;Y8%Tbl~?&&ZhovWi*o&FdeDcqKlFW5}-KENa zL-rm)*b_lTY!v&68gWwQfZ-E#Dapz!KV7CSlyQkyD5yvqhu4#tZ(PhC!YS>m*iGAN z8dfA<^}1%rwx`HFW6IrR)I)5SU1wc=9csr#e@n-a%}4j&3lSK7mdF9W$fV1hWYG7x zv2KHWychAm0bo6~=Tt5!SALVRgo3CjmD5Px5GMNL$0?ULh!lxpNUsvNIGCG|dUJOy zQ9X=7ZJ;;lZxfqu;%ea-{;@5i6|~s8c4$-Jtjc9PZA|LY?D(YX=u91}ngU$@@tI;) zPc}NtNn)~dZX%Zlkfl)(NDIR?7dEganXWYH_8S3lFFB~Tr988(!-tY zJ9!2Tlsa6@nKb9RS@;<9;)2gTcyPMw$+8oH&{RW+bBuevF+H>WRL zE00f52hsteOLTr+SHoy-s$&^@0i;W0%gyiy6U_wOV2wvFXELK(eSs@XgQg@L=Us=BLemunQTD`D^0U8V~r?6 zv^^hS9>+N(U64E18-}WQ)_p%+d!dV=e41&ozE9i#IpBH9uz$ z>peq1bh#-b?j?rb6-~mYv%S+JpK+^5bTH;bHm0GZqkdiVJO*>?ieD$itpgm}k=W#yjH#K=oGOzdut-0Ux zF<%U;P^vhYx}0f26^{jE%(ic(@kE9!x+xt|m=>jcyA3^V-c3HZ9OS~yh$oFYH>4fteX&5ASeWdUQqfYCCo=3E_tLg`q8 zcgF_~xM_E5fAm^VnNdLtSqa3TvW9wOD4#_DWrACj9X+*q+hZdu23^XuCb6}#!yu{H zM2XMjzHS&yU%yyf5-l=MM7~4LNK_nWHk;_lIlcM|S5mIeqO285D70WCqX`G&^1`S? znuk_o9`bzBU|1z#n{O`Mjfl!!>a!sPTMSXtxtX=okoackdWYu|VjD48$(CrJlRL-- zGq4!v@4cTjPFT#{sF<cX1=Q^z0(-a-L=gpW%f ze;@W&Nw(}_UV$7a&qMdC#M4sCL7j^%oF^+wI8kvU1s0AVuqGDI1uiHb{=(Z!p9H*K zPOm5U_0zt(dDdPswePMb=H6)xPzyN3!X0#4rt!A(0Ps!Ae0;o7qT+@Ehv(mAB_4S|3ZwR z$4kv@Gz|GADzKbM#C19i_$(Atj z3Pc;rT=_n&@$5*FSy7#;!B;G)$Ax#QAQW{6PA4=(Ug?v!*vWvR6wX@RgmjIk{}heG zB3 zi?=yX7cQ&(HQdAQmR%%R3v^MZDrABAq${k;NXu`x9Zi3JY?+TVR@8qR4<$bQE}Aw{ zpS>fW=5Kh7xcDv0uYLn=5HGidY#LZX_%VVPOXh?u6Jp*i?hBG0(z_1$@@hWg$LfyaOpf7;A1}s{PuD35^JnI;4B-)--=*? z$rw3KB&cxT1?}U;GrZ8oTVlX0!=UcG_Iy#q_EcpBy#P*33|>rvUk9&oEanlIzf8%Q zfP9EDt23>$uR%*i<+`lw`9Jh^IT~m8zRYj(y@(jMwlc(fKN2l<20l=^yrEjsxT+gH1Kgx zf#cKu`@0ukNq>NJ6Q@>+;H=J1ZTDAW+m91y@Av3RdEzg9-NQbExx!|9NqJ0qm{m{Z_xuQ^~)zu!Y{V+6$>Hfx z;-ZY?i|bhQ`mLGvjpcNZTT5Pl5)&l6G$Ovb6YuAWqRYrEMEd7La(9EK+#>Px#4^U@ zu1WTWHj70RkNPCjQ-DhCDPvfsp5#^5Y(w+RP>lmjF&kjz#fN1xJm@pM#uQ#{l)pA= z$P&U@!j=5PZ`229E+^5Ek8{&xO1?*`C?1LfEu=K*KbIAHO&qNc*#e}33KCG71#!MG zl{w9;{{&?sR+H{XsL&NPJ!r)TmG=_slYa}O6A5nGBU(M|{PT|@ojU)!fVz~HxxhNL zJ+!m8i3t+lcx&2%uvzZ_SI4vTyFwvd61^FeNg97kyvSt`eou&(^+@`DBvu>dxhc{w z%g+*Za9sf!LueKTrMfl>0|^xMrlO~`ZcNyw)j3#&khnm}wo6wh%hub5%VZ6#t~K#s zrmCR^p~d(c0N-u?kOAv#T0*7YyQ8ScbtHoPPrBzh1cC&Sa*pa`-57jZ9-@{!CO2XG z7chlm5!P@aD!N35m7%Q>&?Kx=z< zghcO~mVs#4UfPrF477PVVDrL*fk#SUlW zQ#=KHr-R+mDaz&;sb+mMqH1;t(c-|&{KuPJCq-zI{h+;HYlJ>W0AKKUDOMcHI#%Ux zk76OFAb)K8I`-20wv-!{0OGB*XxNCj^pI!!)6i1R zWZtWdo!a@KUby>(dMP4m6ZWx=H#>UFVP_)*#?X*l$59gHJRVB07U|qjrE}dkvpt>>@IAF4@Cd!vOz<(YnscS~?2VWmA zk6vKrAu&Gd7`UONP7?U@a(Jdaol+1v*j_$aF-F4PSmE29WalFVF3}h;TmcFVpVv*q z%(fI^s~%-$s3bjYZDBfQrliIVT~tskxNhUJI};dP0=MM?2f9v*dF0tN6cb8CTFFyf z#GCN(DN@AY|L!|qn~*Q)Q|JB#DZ}aC3(5D5k66H$7#^rX}^afwngc?*9n?S!_!u)=uPZI9B3j14okloIefC-;8YPdtQa~>1$%ka{8Izjm88NbQ`?ad6KLde3Da) zj!Oe4=s)B2V;$DL5We2^SM0UslFpoF@Z{|y=eM-~G+RwQ`b3+b;$HD$X0~JacfvBU zC9dvK8*z0CS?$wz-A)H{#3L4*B>VvX&;N6v%v-9wzm!o88UO&p|6ZeQOq~Bi7wKB- zJ2{xU{TAx1|Jy^jI80>`Kj3}W2fE49#aitgKDcE-4`HQIY-M09PcDDq2LG}1f*J48 zkk!1JTS{ur!R&RMhJJ>+b$S_FF||DFMSb?5U;SVvh5sf|J`amY>7sw5e1n=T!bE$R zKJG;8_#07WMARRn3wes3hAvs+gsU^}3;OQ!o40|^g5xB?ZJ(=zO$7~!TAr&$8h_|W z!YoWxBz{x>CyXV&QPsq9%8k@6z~_nTaU9~eRGYR&Y&eiaI;Z|L4;vM*+dhAIQ&cwO zO|#n~k*jsSadfq1@owt$>USJTrH|(4YJJxzaYnau#g6XElBnD=A$rbphDp=anXr6x z%p?KI0HSiSdB#bZsaNzLS}3Q(sU^8`IBEjeg(bcxfT#04o^2iKzat2YtekzGbkIA# zbG7y4Odc<6fVX#)WOS^3oOyY$4{U+|&bLKBe@$;4%G8C*xtRHWfTJ&d(x)nje8+<)2%_J6OF!e7J5`=@gI99lsLL%7* znx|o<=Y<7aF>MhgYEE)Y47y|rz1Z^f%ftT0QJiKe+L_GR7I0pkcCI9ENaSLm>T4Cp zH6V$G`L1vzKCwhPVWoJ556K8Tx6L`0!Gd#+qORU(hl z)SM(JGuurJ$j7*+{hD7q?GG-|(58&7GcvD^h)CYcA9kQpV5yVBKM_{5I+z&8)1f&2 zT=OAPry+czu5yHNR6vb8bRkhE`IgPc6VG7>ojm{XckzhgZD+9?R3K+{2zb@lT3);% zqO3!90`FNK_s4Dis&A%Wwg@3u6HpVB!et`b5l2Qil^K_-TQc^ajMoC=h-wyBWdkxw ziXd7FSM!dqwabJsG#Za=p2v5Jw$ep0NW7>!bOF z<3bSn%R73?@UP;;<83&kIZ4dhGy$QNFrY(dEQGJMtj>&qr>o4pfWY$nG++^FbC7ijWZj#7KV!u&jJkY(ww+crbMfA-(m04 zTQm4J*Cxx%$0F1LnTqElv4EI2K*^AlKG?+DLZz&Kf-lqX=!&v%7t4V{b-9BHnn-z#L=ph?_Ub$K58Ki0B{ zFRC-0Q)H^fXtPg>XfOa3D}~%~iKIMiFt)?gg59tb%(%e~2<_eh$-073q#UQK zzwd(ft7xL}!Td&cJHBOJIBU0i082Sdx@-@dgvkJAue`~2qlJBWOgux^Jhz86BSria z60sRQ<=zL1s%2ohu&5ZXfn`G?rbHlIp#awsX^TFbPjr`@6l62^S_3?Iqrv~<-{!~j zAUY_ap0NvPE{`*R+W7=tY2}zjq&WJr3)BE9Y4&T)Wa|`P|0^sb73pn9=YrSZimOc> zhI*D&H68*Dl%h*FSG(h|$fZ+pTa1>Wl`^8U{&v0_W<4|-ZtuKdi@Hg59>Bpwt!a$p z)Ha+wWV?Ixy8}ItWG}}wQK}AuRG8A}_C~|_Ah;?Umd9h0>GJ+amMKZK@ZwBYKVz@2m3H-~Y zKCb&ArPPgTrAlc-26EU(VfFTT%-wokR#hy>l20Ztp{{G>t4CZ*5NzHn@b!i^W>X#& z(sPm*g$|u@9M}9w7$G%rTvsUcr59-D!=9Z*E?xNZLz>>S+c83iAg!XUvN6I^JInbdB?&vosZ69zT zv0liRB7A!{M|XG>D7Wl}te5|ZhqGP`F)-x+9vQpOODeToH_83zoydVy!di6QQT7A zC}&b4t!cMa@U_h4G-{Q@CYY@SMm>#KIw7b3kX+7=&v4y(c3StAcKB?L_Oh{VVh;bJ znv1LS`ebjN8Gk?`+bRib)~o_jcnVJGK68Thv5V*FlScv>iR6b;G`=gbH|`YNQ3ZwW z8CntKwdm0K2gBt)Wr@()5^#c+0q%#}&&EyZrHiZ2I3i@$)jQa({qbfd#&P+l?G zP3anN3!63htg*Eg`>N`X&ff#O&H(m1lV5&;{?AqfnPybi@D~-*_~nGi{mYozNLwXoDd{OSPmwSe zp`}Xd_p-D06dwQf@9bb?JfKj6cal8I^ZkDQHuN+#1c`D?x`WonBx?3FL$3MgxbACde*U|4$BSNpJ?8dq~RwOpQd3tQT!aaWD&v(|BF1UhaNm& z+dDhbWM}PYZGr6Gy`Mi%r0i^mubkBAWa;kcXm5t5)C@kPtk756)ad9OoGE1L{t)~c zKz$G)Kotce+bdYk;7OU83M5BO*d^Ve3X7CSm8hoYX@Tk3){3f!-0ks0Pnc%L-2B|s=M2*DaocW^{>E#S zb#PTuo*E@Vnh!EhSj>pE{M{5XD2oYp!UpvmCW~~0{B?w=u&~Iv>F*YF2VM! zweVSo|AO}>Np#8O$+RgVlCscHDjfFsCB@SYe}b9vsM4j$zj45Vi+-xlozrk|aqCd* z-(WV$x-1sf5J78xCDh0GGgUam`4h<7GK~;iFMYi)4aCLJ!X#o{*=t9Su5GVzPuUS; zX|D~tA^&*N!uiAS6=@G%_?H>(JpV{%#x?NIg8Kxq>wWGCkDPrC(q4@__POr)U2IYH; z{w>oYsZS=9;);P3h#Z|zR)Ico7<4b1DAM9mg?LUt$zbZ+uP@VAnoi1Bp$2Jm9_CJC zN1+Ers~|aWs}%wpuR|i2XcnY#=wx6#2`??uDdTD+Yo%e8BSf61GSkftr7(D>KuTbt zC#{G}iEKgHT)?CtTLI+3k`WB7d46+$v6yJ~mU%Ax^Cy9B!{0a&va(tmyX8`?zNj4YAjg3KlY9Aoi zUY&pt;uTZ63VXGM?2q91-;ORduHAb1Iq?3lD3Iq4`*K(jHF9TzoNuTHorlLye zOB_JFZmnxj#}cLTwLfjk7qao(R7I?T7W}tgq@dxmE6Q|yh%XGree=!M0V`8FWXC@b zn8%MHRud#!IL8$99$m#VPa$lD%|7gti950qP3bj58*!LE48N4&{VsXUL5HldbzdOJk!hsEeV&C&5z7#L)x!IDnadOXS!5n zSeni9_SGOr`@06mYZI6NKPjMf<#L01oQJ zrpC~GZwqw0#{#r8YdoLyca5beI??o)6vK!YmoGeJEe5YZK-#0|H4WjYL^~5p!z3fq!A;D5P%YoK64;A{j== z^qXkhdfUnuRJiZ)Q$#9el29j=g-XS4f#*0QQ8uW~MH$*R;g+=p8a1*THk#2JFlQZw z`)Z^83s$A~J_s5qnkQ`6^%7nE_do6%>bWS$Op5B9ZucaA`d_wBClOhuK|^@!Hu$jL<9S=4<7{Q#vM}3f)v2F@uP4eHzV?qhDt!H^Pq`hd2Vb zYJj3GoQ5M6tei+#J!tG&x*|FY4g><9VYf2q4GipdGt85wi9Q3g2K3i?L>D#$a?+{- zIm#%JrOWQlUKU=26VK_^aZMuY{Gi}%3zY4AS-+U-5{I+x%Z#oa(`aqEudy`JG;z1h zYVynj=7yh3H1#^^d0ALniWE>Rq}pk^L%?iwm!H&2`~A?G)yZlQJ2AuUygKp2Ruy(+ zre5|vA0N!Re@*BoNY^JFuj>gh)msdM?To&Ae!LCJet6DtdwM=60V7ElM_v-4ipF%jgM!sq*mb}sr;q2 zUG=$c4JV0n6%;&Ym61Q%Tx&xqCRbJC=sa6kNe2}+jFiideJk+NN%=lI-i_w2Xz0k? z6qM7_-0Xtzsj4}{+|WI#k_cIh!Bzx14GHdCR;njzADF1~ivCjWv8TMZS>3;TK?Hv~ z*lB3QJ~BYGjK3SJ6k?0mW=s*;Imu2NbUvJUOi|V&^Ux=$;METdM?F40NkP(rHx>VV zBeH?^0QarW0D*+UkatkylXZOfLH7kf#*owQ(q)s$_k4s2OU{Zs0ug(@AuGRU+*lt( zL9nc|rOZi68$vT_XkfA89%fyKw@&8FAjF`jT;6`k=sa(UR9LqIqv^7LgPhe4S$llb z4r^%$E#h=w6Yb8Yo$a+@**N~IvJy(!U1dX&Fz_@_N?vv298IKrDf#ijA8SX@Ja4qgu60PxPB9C@QCg=pDu4+PJ#Xrx=j7nsohY+W|+>MV&L*AGc zxU2eF?-G&g)}`%odwrpv)c;h5)qC!&Z`a<-RES+jZfS8tUEwSpPMPCR1KttCUyp2* zw+?J0N27A<&ntDBH1%!4%xv*o?+%-OyZ5EiKP=ovdRvlGQ`t#ABg@{JzjAz+EF(H$ zcpVnVQOlnHFIz{zz4Pjhn&<(&q2 zrXgr{VODrp`Dm|!Kmrw~mxw*%UMS|%t#x3qo{78>`{V^S01>#^7`b&@UtwGTh$CIF zV{7}%{uEyLKq zcymTDZO(wl`)D*so;Q0vxbk(LKVxkm=#+)wg1ah4>DPdjc9%7cGukfPLd4>tgc64Q zv&4Q=1Y%Tw3KHk$@1lJSrC~to-O;8qmisN^#VU}Sd0l#fXX|-w4)46+L+Zx5=df?$ zKPaP})82sh%r~_=ta)4RO!EYqEiKLVA>yfZZQ@e%68XlOeu7oA>oWj6o!j#@uo)fv zS+TaEfBlU2D}=E=ufQ#T_l}r_N~Ez%55OkY4H93%u|RN5X2L#{40fH-+j9Qblkc#r z{Ky%d4QE&;)PCW{ZSzqgFsE_HYur(k?aTDr%~bk69tg){Y08nY@f4rfG4&@4@F&@R zM!)2tGyClG^7Wul6pY*QQPeGG0xv_Y$$DmxDL!R?FMm`lh8Pz++Uwuqfvcb{wrX_Fu-&wE}OPyxdIr?1I?4jiI6I8R;sI54Vjbp+8qt( zI5Z=ygDL@SBWgI*BZOeI-XAL3(59}LjuKqee7y25NEJ=2c5byn%t z9MTnWYiWA-ZY?Vgswfq0X=EKkJa;o-J-bXY zVd$r=!LcU-mf!Y+1z6n6-|Fy9jxX<;5!kfm$t2v|9CXC+U**>#iLes6+!@IA& zIkl?GehnVUZ+(T(I%0NFP1H7E3J5mqPX1Kd+R$7!MODdhJcuG2J%@f8JFb4Rf*(snQpS)^I&sE;;2O9x3lZM;fpQ+gylpY` zO---X9{_`D*c2s@n3h$~P`zEEmC)*;5$<1%;m1OqbFsF}(H3=xb1IKq6d2VPMGoUt z2GxvU>atIwr9@EPyNDC1bRR=DOS?G@V$FJUfQgG&n!3Tc4`Wra`4?`Q*9_MbZGnsr zfJ$#^W?8sjoE>a8_^`Naa#hD^_s0@eXP4b#7d;1uY`Pm;c&-$yy-NPqZ**6*T%y{c zpYgX)r`ov3Dy2(lu%T8Z0tFK;Cb~EQss+BCo=fxCp&jrZ70k*luo0~A)M|Av6vPrJ znQ@8>bnnFuoKuq)d@k%P!4!HTCrQ-{`6pt#(62FRNb$o)&$e?{J1yYC&6a90sh#@R zPG_zA39EdT-%5;MT&Zn?aV#~XAD+A6pX#P^Mc;cfHT$ygaVPmVivuWm!XeB#;4@|= zYv758TG7bWwt;E25y7{>tM`}SFqS-rN?uksR6vAF`0#mp8gcrFg}t*A4ZDk*-xr#1u(Wp>?Vrq1;_>4#`Ahc3a{J2uyvqa z&IvaQue-bc?7caaN)I-!y|${;-r8`B-Wgk}LyN9PNJv5U3Dt3fhQXFlVvf7@&u|Mo zYkkz|{k`|dKUMrA)Xiy0n@w<03#`NFpnvauz=DfWZaT$+e`>6>7CQa|_>UY(0UpD;y}}A3-`JJqfV9zPKVz&m;virq3@E zbwsvE>Ud~)I0(M;yJT|j*E4X?S^ea@Rz`tO3ZM5YbL3799i~vx2kDi*qHTZe1!-Zn zYATT^UQfu-biIr9-+YSE%p@P$$%E2oz&smw%}{YD(vpr6{<0Myip-#*Ylx={nUzrD zeih*_I>0(TN_$+3wauVk_3*DFzND!hj{DqRgu$F0CVN_k6;aK4&`0HNe~x;Sg54;v z&A`>NxHru6%(ibAJv?U}SEV)SV%GPCaT+nq8(hzO&BEQ?h&hvFENWqj>ad`z>DNqN zlX*)keC;(>U|WC37SxpRc*7@uIS%&BT45h~ZAWLCZxjoCZ38Eq24CD7B31%6``W^{ z-_4ytZT<6}ZsuyPX3%Gjz7iY%KJ?8X+cc}IpAtoGp}d}`Lb5;{h8&5;>Uanx^Y1+Q zKxI^0)k6mA%Uc*D;{PYTd{F_zO{&-VQttnoD4$7`5C=o485^V}Tn`QiSH=Y=;LVE)5{CAx};REh_` zrM^!WupLnO3EwdPMr-py7{xl{69%)c7O8=T@%;Tils%vN$Q1QoBhU%*|3NR8+3&pk z&zzi6->|{r_;t;yAt!}4#i?rAyZD8pLgtxqW9b{X>rudhwzP^guOj;Qz>Kv}qkq59 z=)BW%t!}xa_tz~N)zg;3R*w1rH8OKKv}{PJUr~JeXccp>4yv&v)ER*#B&=Ti$2eQ> zM;c&5AlH%nSC+!$EhLE5vNK0O!2PlZD zkQe)BintCqfO(owG{JAcf2^C;FgXwFHdH0vhFHKn;2wiF*qH-gk*kW=lcMo%ZEer) zHq2#N>g}4EF<}4l!po&zT?cwjzxz-VQGvj3fzNT6VQ~sUJmeeLC{RcA5P$-#;Q;x> zsN$2yjue3mO&D_=Xb-vBDm^aC+F=c#@)p*x1`1z`*#FxJ;=ve?S1$FTIFhqxMoqV- zr+W9}@!g|CeTpu{&Kah$a)|C>>C?hru6Mxc4(yXv_j4amq-+-2d$R2wFWU%Gv_mD0 zhQgX1zGRpne$FqLy00xkTh;~HEO#p_a_YlNiYyaY4TAcQYZll+n7RmePXuNgy*%{7_1It+lA=2%oy+b?!X7{Q8yqH5IPAL?6D^%aly&6PL# zTTo$4i(sXa6Scka_W{vdBdAS7>y=H)_x*{A@$8p2KsXTsXAp+{?@i;+ X{SGet2{=0o9EgIXk}7}tjAv>2_i#m^B5&zs7C0*ieeO+EiW#Q z$`O4hK+#mD>7~jt52_xds2N#*sV+S6Zsb}1%0yNHw5%R=iK1L#6AJ#@9~W`b_7jy{ z3H%h#-|XPuJr|Uh(Ken7;J=hj`890-5JRC26x`a_$LlZD{0PIWKz(!$WzDbDgS3QN zKdlUufkZ%lav5`FwPfRmt5r_xp;xg9{4_9iMjH!W8>oPaUCX>qD6^1x;~Da|iixnb zx=mICDx;AMP)>q>5dLp|3`Vs+ia3e@!kYJ{mIv<9{7rtX}vbDzt1<0payLIX2#| zA_M$$4F)67XnEn_Cd=!3=kx=ICaR;kS!81nD@1mTT|L_js|=|6G5SG|84XG!Pu;#( z)zU31@MxfX!WVV!^16*x9>!bYd-6iN8Pk=k)wlOzacIg?K!kU0@AUXh7xT6G;YRqZ z@ozDz-9o4@aCS&X;mtNm@PuB4=*KrUj`3@65Da?w10r8>=lpHkKsBp1{k9dll61sp z?6KIopwrKKS@X3Ak5!g3zNoYpLe z&78Zth(nt6DO7T@p<=W%G#Q0v|7iA16Y)vAuGn00xfWrYw~5zC6jz?lE9+4NcV%^3 zwPejza49lEQij?pQMy;PzvL_}=h;H+LRzicV)imu>uF8H)!Iba`~7@Kp>$|JpJs}v z>wb3sD98b7;&sCW%R5r2wqyMUlUk0uqH;O*SX)~~%SKIMH5A9-dGY>h%`&28Og;;n zOa#SDwsnEgoUEdP@i;iJP%4sQ?BKjQv?g%Kv3UhxyDSy-HSj7SNv(Seae;*dW{P#& zf_b7+vyHc0?68F=I}5T5CbYS7zo~bdg*0P6*zx#7+qy1bXc;6r5_VHh9wAXF{h{ay zbEF*0*)jsG`v>5^Z?md4x4=%nqZ#4Xqx-*hBAeSf|Gx|D=zlJ540gO(z_U@=LN)9t zkoa5>vMZI^|0Dy2Xya55CD(C>j9pVxmUMnkZs{Z>3Jp+;e%99xF30JurYW_j%c@+a zsu%nV%_Y58CCkQ^@4>E;N;TwW%@Urq9<_C;XY&_X=Zq^cfd594$8T%h6K8jtABIyN zNxKVpp|qH5X+eEMLb9vP3tg1g`I}ed^|@(e@+J6;J0OiD$1h7LqbQkVG?A7KN)!Om zA1w<9XafS80=$Bk?$u+I;rl)tVe>J2e7d)6lt}cHE>vqH$xn5E7~8+>UT*gg{f7k;Xu11lqO$|2ntn*qiJt;aYOsY>r?)rUKsN%3VN+&iH)kC>Ydhrvuyolk)) zMZj`nLEvds=7(N?qikqSOg?Gc#ItwCj9+Kp4@o^yeBPKlGGutTr{73h+dJL4(qu2L z>fS|nTeXj=V_P3V%z_|H@8u4^^~f-x3Mbf&VAWgzDXf6^$S zCbf0zs1qZPzKQ%|dMqU6xNN4<`R(~Nm|*RpQ2mV#{sV{9S??h+<+%%%&gAh2!Hd#3 zP+Arh48Nv{kPNcp|+J}D+mTFVgL4RUbqW=flluio)z z$BY!`bdj!7Nf^l5Ii|sfD7cwbM5{*+p7@gRuT8#sM)c=4*sYMls{Vq zL)iA42rAs9SzT{4loLr43VQ8688aa~9GN9$Uv;>!XD8q6?HIFRKAN1#Umvk|w&@k8 z$FV=7hBUEvlW2>Gq}WFB!zBX=?nK z-QLO~)!vjQ{J31^lOQomk-mL&Ht<>AR_p==){^XU?T``gxW?(0xg10!0g1s|u!ai=>Zi1n3q=&Gu zTUtkYO&_&YAT;0~KR*oy3aql02soT6EoR+Sd!}22(Wv(SJ?3(kmD~JFNH#%H3HW+s; ztF(}D{wZic_uJf-w5QLj30rcWPWe(6|- z?sR!?#`eBhe@36m?AK z92Ga16^Soo4#%az=3E^9_;~KH*j6Q zJX7`qx|aQTv1#leE1$6A&cDH!C-Y96zYlFfcaMCSPP%~BA~5Nx#bxYL4b>DeSETNY zLRdDZ&hqpV-lgv2srr^(Io3^-y_qwiW=T8F#PHh%K1#}g4+lNlSy_G4?ds-IG!Df) z=ONVs6b|H6?MXPZ#&@jrgW5x2+RlM1F`$~LT=Mb?Zt7)Jbph8*gO0d{dNlu6d5^unTsh0uZfMD@1@$y(YOsPySl+@$b4&{kN35S_GwXt?@Jr5oJ9)R0* z&0f>UKpTd=M{`FIKruv@L1;ZGT-S_(F)-m7c;e`FdA`I<+eTh_!?(uA7!CFibx8E< zafH>(Y^BKmWc>6V?eyn425~9D{$p5+rJOeI>A~24%?(-EA$vTyeZOME?hrlV`KWQs zxTz&-&%?DpJDu89F4KkZ%>gvC(=O)wz8zFSDtVT){Iv2FW4h00jz~ruiu;qHZ<0=* z`Sb$m{zx`6BLqzCu7B99i=<01*yE^}h(^w{%_ZuTtV1$$yEymJI_BsVD97B@&3+Ue zc{p@Ew6%!J&gA@rQ1MV>c|O zBJ7rS*`KnNJ0+1mMth3lkea2cU0uh<)k!#NM8J@g=W9uYg)s*!P)pPRrDH@OLe3kb ziV;E(R}(hYSx6Gl2Y!=_6e$t$)9~l+Az#nO(=Y3NQ%8+?l$KU5jiW59v#C5RwBaw7 zstrC&TzpI*prj$e@PkN^0z>M#&hKD8vaf zB}ooJ(I}3EBICFKu4;_qwN1j`CM&jWL_@n~Yl*X4?TFlhHUG4+AqjlQ`BQzJGw z;n~q%_>g%Kxo76)7`-6J-1Z%~4O>fx{ToJI9Ca>P^+Ct5M?Z=(bfGgI^rNSNo==A^ zpoiR5W1Jb%n&m(q6M3Q9ym9p~KyM9n)!SXL{%7Z+nL7KT z0B5p5&F=1Gz|+I?X|w(7@k~4Q{W0&M_@&7wECGxZTKMzv;5ArMVo^@=)y}&X(9#)>Y}Krw3dev(eh%ji-4)b^^>#+x%PD*^!m!&d`U{DZ{=ihK0&wm1NsydhIyM3+Q2G4 ztASawsFhR#DVc}Tq1-=ZlJ#U8k~W0JP+?wGeV>=J1?v^p$UK-ut(EXmk6^CO9MrTD zh3U~Uz8)7d*1Ip73B#MT*!$iven}o#nb;C@OrU&|7N9mNp)7g%OQtf}bKnBizlH}O zfEe@G{gQ1ISZdR%7{00s*hB#q6#J)iV`Xi~cz2cIz+ z>5B9}CHfk~)g}!fj1`Dma6T<}__84b^@EoI3DpT;D9G~OEht3plW!K>NQL4@Oy&#! z1OZ&>$;J(cBa|U`mw~`5oeM$GV*+|O!5#EQ0X-4_B{P8Jve$d&GIvN;mh`VB@zsCG z4x^7fCy!!H?^32znd-wQ>(MP~Y#E$?J%Md_9{ee;^N#e2;x}HdXfB>!kNnw~*;|E8p$Q#0^=ZP2gv}#P|JUve42B zueFdlnd51F4;@KXyD6X7ONHhu+$NnrgqJ=zv5d4)Ybe+%E;jpM%(Oj!%A9@@8ALGo6YVE|7hgB&gIjnSRl7<%+l);En zHVHfa)=~^rkk#<6cF6}TF|HP*<>_EAX$x?v%B^C7<>JJZXZ2L_bBzkw6&~n$1mAbh z6VeRawk1Bd1tYVz727OcG7jnY&d`ji8tEi>HPm&sxscR5*<`(ukoJv#$U~~h9Tp*By`&_;d z)Gl088JFKmxJ^sL2M&DkUk#+O!gY*)epH##CpnkibcJ*g=2yO|@n{Ve&v55axBX{% z7a6GI$3W*oD#NsH@cF|guYb~I2LrBuphBFh&L*ZGcel##T&tDHDxb&J61S~?5BpL; zz`xot064&ZI6m;60rjiD<1g`d{Qa-ZxW7piCo^MLM_of(2M0r2W0T(vHdswR7Lx<< zYr2*^o-Xd{%t*~uBOECnd~7p+gT4^D-K@D2aG|Y#y=1F?OMjL8=SyeJ<}}B@69Ln% zQ|n3V>3r=gprnHnvw4f-msqofhqi9S^&Xs?RI!S2~CW$xo};wO+LjzMLZ!k{Xc9U#_rl* zwvY6@KzlYEQZ@kS7pC@UGg2LDT`m3ih6`~P*rYGlNyS{HTIe7F3a(Yf)Uyf#^i`#< z$->J+=nEUbWvUk?lsOH?Td^vrrZ%30a$n+aD=z9j|9adiW;jPF#!WwYC^w0b^EK>% zxZ4`}MU=)$O=1-&&hsWKe}}Nd*BHJPIeWCjActpM0}%eopSWtH5{G1c z-J=#%O$p5}6-?b{$&Bs>Eomq>EH{cp9ge-vMAJ-3&-OV_hmi<0q88Q|(j$iaa&kyn z9YI$VI#iKbv=gfOHDpEJjzxwu;bL)h4=gS1w5SO)|FGXq1{cq8FB8_O3q~k~`6~Fn z*emzRGadF_o>k+L=&`0rEZ0EX2ctJDpD}6S-~`JD=Vkjg-pS~Uu=2j+AjH3NaR|Dc&s zNtp}J{SAHhEf>kpy1{;wXH#LO`kHU14CYWIO>3I=G`b$wO5UxmE6njtDLEBy#R7T! ze9`ADX86j%iucys?A4k)3$eM;-Jb6C{n+whxZcpS{~5gcx-I_6d3fQDIpO~P=5gco zTS!2CfuK2t;HF19bCEq|Y$9J+DX(o{_h_14OrW&R-VbGkfRk)T(ZA$5E~wk4M0XF} zTN|S?1uU}AMU&m~n`z)j>>jx#UsOrPm&UA{X-+(eqE^3 za~|$L*pFThoXcr0vIN7cA{FP@Ptl=XK}^uE)4pt;-Jp4Ce(|n^VFnu-Rp+yIa})aQ z;ng!#qryB4{?!`q1U*?!mN8=V82BX@m~jTc8}+$)cp%Gj9r`)`7fFoE{xK_ z9tN#Rj>Ol1f!CRExGVFX>8IBgr4rVU@Hw?VHulTfM*x2BvgQXyfrSbKP(^Y(bJ6RU zM#A9tnMNyvRtV-WX$#K0s^4i`9in>PE(s-jOYA>;dH+QvI>Hd@Tqa-Bs6ycs^~aZM zX+-KGF{@1<1P!uJ)MX*^J`hkINUltK@7R zYwW`D4rJmYntD#R)>JU$4?1itT1dQ#(~*D#J|MA)0zeBFhM_&OqG2E^TKAn16cqZ% zpqV}wwgY(XpR4sxexiErNh#@D9yS4WYBZC929^s%{kbU~|3n;)45`2_=H!7b?76|u z3Am(?3!k{kX2`^iz|nmzcyQ!J!^k&efy9BOZ2+1$u@(n&=3or5Y(>^qGS@W$V=7Ly zGHAQI^CpPBtUxcg(z;EV1!gWBVSV?ju&=qKxYBmJEB~bQ=4{p5VcI1{1O#(ogSl*C zw-pa*;Dm+O7n-Xje{Q3swlOIj5{=_aP#||(}n@QA&qGA)G7WR8DRcMF?pOOHBbpYW%aP(nqMvvY90~9bFv~% zD;Y$?QoAqggY69uW^>j3b}aduV~g5%F!}*booRvA9i(0EWiDnd&Sv{{Fa2)|C>+?} zqq{8arRk?pmQz%&wdLofgsXt5=fU)8#z_};E0}4cerbw%WzZI}YK2+8#@%VtjYixkHJ&oW-8-+rc&nmuVN`I=R*v zQ}L@Z7-vkYONqjo?x>PYpp0I^wx zKmKpLNju^>tEw>xBh9{n_Hp`RJz{2+1_MScuvOR&PlSPj1iw^Ys+-h@ouKL@{gNi`3XYdRzBON8@Vxb6sZ>yTu{ZQN+)G4u5DJ z+Yy2h9L$lID*|4((y9Wo0QDoA*2N3`NXD#;8$`5Wf|l$;nO^?j)0Pk2tu?o`@uJSm z)X>!c?YgF^OuK5 z87)DFQY4kAS+qt`HF9d0 z3~JhM7iu=0vqfUdQ%@`8-P91ssfRhx3NX(K$AF%d3Bo7eZG>j#NnbH zDZ^i7Cvu;>^J8LpJ>CLLr&Y!Thnx3e=j1dn+Lr<1JZnqK!Hv((I5;r$VR!Yc3ez|> z^YQp+sSmzUt6EyR9}AiAwkYU@%5*1YVe~mhv)T3b+3tz#dv{xVyQc2}W~s2BR4+oK zCtQK+<1L~aP1vZ|VUT>`(e3v+&UJb*STh%^a8%gjy#p?)#SwY-Uuapt>v^kseU=OA?!Cjt;XtHlIn{#YeG zY^-9HzFAdn01Ma)r3PA8NAh(q>64Mx$vA@BH78a(iYHGH=CUAv`zO z93^u{9|7pGMm~;2+0PY86hR9mBlaBnB3Y*|RiYvAiwTkoNwUqDp=HM?r@>vu4wtTv zi=>{Zpc!iJQa3@NU>Y+(i_(LNltzk7Oxzu7>P2-6Z{!98wioS%V9!28C{ZT*&78=W z|1-T)IA;0%QF}#x)*nrS%2r4F1w25p7%?ezKX?d0%Rz0Xhk&R81y-vz5E2~*1&-3x zsENRYQ}gTmUi*saXAFQMcuARi#j3I2H)&00X)szfZBkV&g2N@IH3n98@?qOC>+pj(W9}P`0xU-Lb!np22=p<8MyzY zXX4DIUF$cPrtO5z^f8H*%nC)u#rf-Nre;x-hB=cA*jE&q;cZ|>kUrH{a;UwtK1Wwr z6pCfVwjP9Ow)(~h^OfN!u0 zi3IeKf0Yd1^SDaHq)!3tpk4BK~)sQT*nRw|)70BGUo0~6=UZtOauh4Nm? zg{2of3h43c+qz`<2tD5BccO7}BDYH_G*~X`dq|v-6>DsV39BZ+3W0o0a}4_|WK(V; zb!4rxrmI*vpsgnZ6y^7+R4RL|M`**PZ>*?F%sgriVkkVL?mdMEDh6~+RWbebT8Q2% zz#I`W%-REoTSAMWP7N~TvTl}i_EShO`~!tkqm=I=A)ias?9ZC)s(>D%z)Z+cU$%6U zyjH4A<$7X%G_9S;=5v#72v>AFJz|D_>F(4jLtW&21wm5cP3b6MB&O|6;V9DwcE#Ql z&A_?w?E(=sWBgt)&5YQDtYICN`o`Dq&zGv?jatxm-nP3{#R5r~LX91{fe;R(GNr)$ zhu4W!30tPCfHa`;Y`N!I#fP?BQ&3wIxHE^lu~?Lcb7od)3pQ{hxMnfoG9Ig$rd%pQ zGNZV{YqOLvs5Nw-5JgIPb$w-8<`qCuz1V?w{E6lz8;G-SUlKtQ^H5P4A1r{$qr+LK~ZOC1AMw3? zGpzG`4TUkXVd>joe(pLosXGX84L^XaCZgz`t_D~wLa3FIQVoxUvqKmvF9{lQMgl3dv>IJ+ZQM`OwppLS&t<<5T_; zhj3~Khgm$CqQEF%Oo7^BRAOP%b1*Syt0vA(sb_R@G6l2q8ZVvvv5L$dC|p5IoZ#Q1 z23_Rh7`1&%Kg)c@gmA;g_|itpQW1g24I11hFY^eG$>d1Co_JIGem!Jow#ujkyNNa= zPE#U_qe1U$=XD9|{g=VOgYhHfG;z>jw&1bfl)+fB$0)A01C7yV>BXL;ahbbwSzSZY zr7}nl*Rog1X(U?_Rl01lzuVpZl%MdySF^Lg7!t<(?XWXRqk!!XR`_QzLomiss#9Eb zTiim=GS9Kj{VL1*$mSypos8pB*M~#7I|=sp8qToe)4V(X$Uvj%%)e_KEyw!h?HlM_ z3Q0`@lYMvfJC@zUFF1d82;gs@dwov7r}PH64`6v{PsV^Nn^2Q(%MQ}ew-UBf-Rw!9 zE}+36cWCZ6*wF_&MBcZv(q11NXPb-OTU{QBMWIs*aKzRX) zqRc{=gWyKwick9s#5^Uml`xV9BwHZ58$&*rdmq?gB;ei_I)ex1sruH@^c?9Sox_v= z$_rGOd}nzt^gh0JSHBt|MOHR&ozy`ZianaVD{%yyzv(S=MFEV`n7%#RX5sj{PY{r* zho(D52*h`!OukYHR!8$p3ttFsW3*>ndZ5{$FFje^I-wjZOutfLt7;vC%IlcSeilJk zeZwI7>!sT46-`sY6RzM*W_I%~BE7dX5@25?bD(I|3Kbqq7e}hftb4AOywtRyqUm}S zz^jecw&$CoVNE@$HkY}cTY1QMK30+t51zFO`Z3hOM zA>Q%$Y;N1;>%6fuYk7iH-ni1{Q%|4@PO{z}Bxg<5Y^R{Ig{oaxijOEOL(h!x6yKQ# zc;gjTD2~LV9333#Gv%iZiL{#UiXN^RZ07X+o1iU6J07%r)?nUh+&`TwNi+W29Q%$F zFWEQRK3`rp_nIf38OJ6+gKs%pZ#XHN(CCJKjZ69& zHnIEn>G}I0>K+uop`Ul`{jZM;QIBiMlg`P9&*0gr_K1%RiJXm(q4xFI*R9mkoIl$< z<=7>zp=UV$|Mj6=SUmW6{k_onZL~|@@!^ZR1@lwdS(~x(1@_+;f(l_lU+&+om)l>W z6zl(XHu|3nfv%ymgUf$>na#hAFPIXD-~T0^~0pA5# zMUxY`0GIn+A&;;SLEI=n1iIYwPgHX-4#iK1{$?0BSy&8@PUTN1|71iC&Em}HFm~iyTkOUa`JQT<*!D-KSj7wY}l`k*=nsB%C&KV%Y>FAY=gE7@8d%= zJx`@8`j}Q8{+Xo6pCZ?!1Uh&P%7rKYqBc)vc?Sk{W7A^ECM1l9(Xdykn_}ic+?RA^eQ`U z(~T-uaVTxa~xbTyAUwyvv{RE6?S`p&`~g6EfGSq(am zB1S;F|7hFN6-GbrRVw?oJvIMB*NIFkM|wlRo*xAdB~U;NJoh_HT#X?1g~Z?_4{?+@ zS$o-0Pa;ex<;;V%Mgrk7)X|86f^6! zsdEoup~FM;oPVC7?2;(gAGM3Nco?bsxo5Cg8=s`)+1S|Zat0rYwo-B}H7Shy!M8tX zNgijNcjp6FaJ&9KTPD#4H_h(v)CpvkRiVeMQwbxa6;2!vN38>?5q3%P@Y`@4;O;#$ zO3T%eZ=>3Mwc76V2sM2#*|8VhSM(szH{Ub3_l6g97d)D z@^aBEo^2&3Z{Pc#p=2^-3tgDTC$`qRBLuRt{s0AXW*)_I?qM~ZKxNk@5Q?es&A{AY zaJgd`00sH~w4Qwp)*i?u`u+}M-}K2FliZP3*P&sNGn2!Vi5@VyUX1tJ+V#!0rneUf zD&&~2`*!@Yeyp5+OV0;GTSL@NwTYXdnF8R?>x%M$w@m$#XDKxY;??dB02EGg4Q2h% zHicrd)#01uqwskW zOO`%x>o@-C8`cmN7fJ3Y;-p*R$X;qEY*nR@T4QfmGJeswGsK$+DpA6Q_4NMPgr?ya zi#Dwl^$KxydM@e75Ajs?q=abP%)*P(<$znPzE>urnhrn|`R?q8v~vvy+t0#9`EbZ$!>$LF;RMA}5YG0FJlCEMN;Mj9% zYt_dWWLr0M?2x6<`?Se!w~i!Q2VvH+u+hY`N;u^t-JjG6YkLCX6Tft8QOpp|=TcLD znxG%&RL0_RC+<*xhyZY!DMnJ1Wi18v4bzUQHt0tK`i?wGWyxLJR5=&%FyZ_t=D0YB zd!UCpbTqECUVwENEW|X9xx+j4{-;$;K0j5iqMnU;qQz2s9*V2%ee-4+yv?zmV7jo? z9b&2$lYjmGZ+OH$eFRGr%Q>(QVw57o`QZoI_z?MTN1cv-?3NBP&1Ll*;Y-Z>3uC9U zhH_&wm>eHX>dF$(cnwFxT%P@czKpJY8W>jwv)A#IpAwi{Dn2l9a@d&*B3PR2LlgGP zyUT?%s)h?ooFmd3iojaZF2Bx`g~J1r4rdX;xruW5AXL2*LS3n*w6!Dx=0+$JY~nFU zR3<8hu$kyUo)Wenwux40ewe5w*H*iK7w?vaW{>jOC9qGx@L+C@efWXah5K53iIcO4 zSJuWxTtI+J=5zZh=K*3IJbzfuhw8FP`7xRFu9NF2Od;-mM-P@c$XKHJ+4pW z5=Rc?PO2cBB!E7Ic(}q}0$KnJ*er1%PV66KZM0lf0v|xX+bS5Of$qNuc&6k9ndyWN z4OB9-p7!{8G;!r}0R?(N)$M_yIdFLtdNy>lbanIx&?H4pg1Qm9>C+Y0oY}b^D4sBf zpBmnnx+RY}EIFmk1bJ`kd^9hYdsNx1MNlfKA&RlEP{an?H2HM)nDkGF6!C2C=D=eM zoGn@Ff+n1?SI#f`>FW%ZSI)+URLyl^LbI*MKk}l|b)oa@35B;ibZp7?cIIq}3y!!0 za%r@-ob(f%k&tbiatiSh;Tz-6FAiu#TAIbxKsX7ZDJKzS*p&BGforn>rK?AQ6iPN4SboT?fBjik{4p6>hIDvVBZgivspw!00}HEC|V+ zppgjW0iFA88V>%~pW}l3_c=(anVb5C7sgGIf9^(A_!P;E$WTc`>vX7KpbohV-X=oiYR1U{m zgTwVEmT>AiIJ#KZlr712@0&F$xO5aD>CB6xxVSa+Heqo}q25~cs#)dZ$5ln3%}F7c z!CMrwuHpf*-btuAa5qNe@X+lo%6kB7HAI`%&&dwdbsCJ4bb##l6pkI#j)!+K0sUia zj0|yqnan#Wq=$4GhG4DQB5;knfUJndB6Jj(#E`JKr>bK=7Oa4h*SI0M=bnn!bj^gknIgS z2a{?Qq;J~%GqqpJP$WOqWe>k2m_fZnl^h7V=1}lVLUVi@5pO6U2+xu17mUUGvYVHeRu2e4EuU)J6ku)%UwT3;32P-Lkajv!^KU9>Cc7XwiVBFE=>q zr-F|)6$)EmZH3ePu$W(f<2?DUw1hiMkU}cKa)bZZJOXP_eP9yGJ;p&Gt($KbSMFKE z|Axo>6Z6G-CtD))g)oTdd-i#Bbd;Qa?e)2hrVO~9U4{kjDkYiVh_F z=846C)7f{nD-!k<_Uvmb?WlBj_5$h+?w6^2)s87#>G}5Y z_2~HUF>f6r*wTiwFMf|{O zVRnxCN@Jl}LyxD+2({QKx!IKGlXbNjdDvDXoLpS{lKlBOQ{=U;@)J3HCrD50*i z{e<{1j>C~ZDYw5zv-xeUNg$K9Nv+L&RPIl~?>Kb4ro8yd6!%w{E9u!*+8nQKqm_#= zDU0uQWH#E>Ju+)O4tqR$!{Q=pL7A36w(<-0)3Dt$^Ia=E60V;e*-lO@YYtrR;UKA*b1U(&AHo+ zRg4qLJ~~OEW^bK(cwj#MM)=frZA9PJYcb(@=Yl)p`Qj#dg~>tH{rSX!Oo4EtJRT{v zJ5{{%&l(8o?lL||sHGHfSxZ3Lj|Jkw9g2D)D%E`a{6&2Xj1GaiLyz!e)mpgc7e?4p zgX)F<27KG1=z0hi*L53`G7H0ALQ&f;YP*^sZZbYsY&6fh7%JAXcB1@*t3v8pQMIp8 z@z!@86lh0BT6T`j44C%5Z`B>!v00HCMLCYmtUBOECiNF zdm8d5ls1yBt=Fexqkqf-Ko0(qf6%1+r>;+fB)#I#N7Q@DCj>pLoMB#kd52!u^a*z2uUpQ29 zC78;qfwip3x;Myvhg%KkVh z*!Sfkd#QuPR?iRR+o1e+wMr3*Y5eHN|L?Qp{=emfH=lhoK>x0J?ePCEDEm(Kzxm*% z7Dg7%9>1fU?q4;v)rR=1rUcmW8=@#9eTJWdHeoN=AuYH1Aq`I#glN?rNG;RCQx_7H zS2hWLx@Y5;pqnpGGJ$lqI6N_DU`~%SFfR~0E3&#%FJnyHu={teZY{Iz)@GlCC7MlD z?`%9D&W;dsjs-Cl>Qqq$nV=sgLr>qxyCl}=1hF-!UW7bHOoi>SbhBrEpx{0`OtgB; zDq5;ofT)WMimO17U)#;8f3?DMOF4;JyFQl%o2;}x5X)L^fJk=(&JxZO?9BuN?%~QG z>E4*;>EXrF&TZOb+4ZKC%hD8bW;wr}9#0Ra&j&~pSE-$HNsPg!x9$`yEuCvjyF9rW zwYi1^E>)|MRn5KZYKh(KR5Vw~^m3`rLrs4v!kA^p<&81*Y}d8NbbynC$ZXx6hRaQ* zm#>GH4_3gq*HyP4_wSq7?q2WDmxq&^k7F9I?^K=c9uEf}x9<-yx7xMi>DsH(%Pe$A zWO~%0@K4dCfAWfnm?8qKov1{Lc~#wlC5Y|RCb2%{E|aT))t9q^T&-+^cE$f;=BfEh zk+v>4UgqFfQ}XyC0Y3}mTpC@GqYXjI*`Zma*QkigQAns9i-U4(BkfJk&CUS(@OE8 zfma7LlIXlurxBY?MQOb|9UqU-8nX>^YZN&7ItVUM%+1N^h@$G9sIwN%rJ;2=&{NVK1umX%d^I|`MKpQ_SFK>6kl5Lh)K1(2%Khp zrj4|B)Pw*wra;2LjOGb9lwD6>ow)dIc6*Gn5C59rjQdxGFOzr?8W+VSTH>erduv%g z9w)E_BjGOEXlc9`#cKuE86o7nrH^wkM(CUbhs-rq)S>Jf#CwJ20Y%d`QP44d1+WDq zoSe7NYU;0q6c&M?kU_V!FIgQP#`xMFEJPp{hCOwjcVp|8UWxiQ?|a_X5l!ORdDL8v zYNXCh%gVbTfZk+tG+dRaHXYZ!?|~(~7+!&u>@KMwO7&dgHHxlM^(3NlTrkyc04Qcs z1w~a06(K?fHpaJ)5`m0JQl4Se=VX(fJ~HSaZS6%%XFW$jPi=l4h4SnQGKc5CL(To0Gu&)3lKc#peLq?t+p?qcnXT@ z^vClB6yypC1hPJZtjxmf52SMKGll_BmvBY_;J{6N(eS+K&oLiYFUTy28oHbl6o#zkVi=59&f@F9 zyKK~SgW|q9JyXm`=OQ00%(omP#223( zd7O|akgr~WlI3;bEq<73d6=fdLfg4NL-tL+c5k>N)fTI?Tdlmnsvj4Py^z?2nll0D zn?(}ISZOe|*do&e8IdVS_Y;2kjN`du(ATrGvz#2nZ*0l}45_Qr#u+M>(Nldyb?fau z5NeSj?VZSRuckgl$HfgBmzi0I{$uT*FSPV)@n}gZR7iH=>Q(~nq@Q3=9o?;GMu7@e zlYM3ODughM2)phLIg#rUu-Q7y7k$Q5J%B`=*rUic!sQCUmFP>dRmhCv z9|q*@(LIOiDs?694pwKmn}aFGZZy_C-syN-kBJ(o7SY}+LGrDB@N^=|kXeP*@{V?f zL}1z;)cgX0KO)t(17)MjA=u{o^%iMYB!&-9LGR0-U>$f1Vl4Ozl3 zh#2Z2GFK#-Nz3AVvgd_uk@RaZAn-0F*ve=I)=MBz4I%6lY{sx_kQ-VZ+-wl8G?e!l zRM@wIpkt7TN0jTpIwef0kCSE9r42S>EPXEV2@tcG{g&N`)ITKQHndO^&~d6~e>!;A zKuO-FJ?V4s%gGoBq?@45ZUryB6Q9AK{hxAOQjC1{VRn(?rs*j&ic%LTU)H(cIq7L| zPx>I8r4Nw+QWEw;G-=Z8oUxh?Cp^YYY;z~x;O&(-pYF^V))@v|#kzru*^5i|Hl_i^ z3vcQQTgMJD^0}E&U3P$G1PSQ}$VYZ#2~@2-lds*l>HW~i!7$?+X^I62>mDbTqe~7Z zpbMsuUUgh9(n2-cs+N(Qbdj-9?(2lGys}Qt9{+f3fE8;^od6d4n8Nb-!}MbbqLhp` zs4*8RjkrXgBUoCzQ1a2fqS>)_s4{zp0SS?6JL3{ChJ*?a2wy(ZO-j~Moul4sMkHDv zEMHxnV>8`~h(3Ih20S}fU%FPLgel${%8lS)_P^Q^LxOn@chBo+6rS_mfD0CA!>DG3 zJOe~+eH=t1B*=TKT~?&y*K3y>KA$yN4L{2HVps>4oG&*HWU;uFIEenogX0a6~ zW%j2)dfgw$kQ4|luDmgg5s;)J242#n&A0tPXM7c}H0PjHFj9Ke5G&_f@VW?{m*;-V zZ9lDTJ1c3r;eXWJq`C)N^=9NPOXUIvLCxmy7=?v7d`E2Cha*=LXfsCBtUWGuAos6_ z;Ul@G(tq*%u*aDT&EqcS1WnB6$e53YiAJ5_)KWzlgvySXQAY9cjhUP<%LF0;Ys?){ ze^0)@zx&k&aZ$bD2miY}T;Jj<;)+Q&T_3D*Cf|WR(H~4AG+IOQiEWxZt}+XE{u36- z%xiKX=gpUw@d=@e0t$R?PZ8WwdvcWB_%yoj!xl$K$Gj85j8yUvtsHNs?W_Ma(F_eeNpk5KMu?vnP=;k<#@0f!Inh3>Nu^up+TQihs%gjlAHMX*xC zLvm0HUcq|syI+qF$1mgVGqa8b#OK_0aq(T=P6m4J@ATrJzUx4_0$NnveEo&|r%==N zy>3Px(;Yunt2wPA@TS9(EOvW5eAGWnLH>Ytd%r_;BMdAZx#TlqR|f47l0#~ZXgk9O z34S`VSB855Z92G|A?SiPU12wP2ij#rZ>IPdAGmbMFn>5T;JOf*sqmTN;GYTRAyWQ^ zA5EvH%>jD*Fy0nIx;7=RRsZ*#m=Z}E-pLM@W#1*>9+&R)~@q7zYoZX!ndcg z%d<01+^^B*9TDd;63ENR4g0Ui%Nv4wViiZ(_ZMOLn39<-D~9aboWDz7I06?MpztPmr2F`j;zvl%e zPQRLZT2squvklevwU$36OrB=AiB^QQ8rv()mA4lU0=! z?7siJJGVs>Gh!YbpSfB5Ki)eZ7aOA{8_o37TfJu#i<>Ey>0A^Am)>Q^fWD}YaI{Mt zsU8%^RLbjAkB4qsq$$t-pm4@jR#T(aQ6uZ5jCa*=o3LyNn4$w*BxYb_Yezm#Cb;`J zdiY`jjixTNnleqKQP1Y6-6Y6E7&Ln9)|8a+}eqok-y-kZX_QCi4B1-+b;y-q(wn^vjV zwz7<9H%Wx32(1=&BtWF|8p`?cp$~ME!rUdlux$n{RJBQ*w8%ex%A<&|UzyI)yu6>u zWk+WGQ$JhHY1}C0TUlT-Ycze{%q;4Y8|hSH_U>9U_vv3XqDho=3IV=ad`(CY-~x= z6W}^39MW5{yehhJw#GFC>GTM0;LC$nS>vQQM+P$$Ej(`Ls^rhh zAQ+ZFfrM6{t9Hd3#?)|yvbgETJDJ{!IPEBmL_)z+m4I?hQ$6{&vQ@fAcIo6z%6ztu z%|UB&#t1$(+zMmvB!|aZtRUaC8sq?!A{jJtZQTy{xN$pjzkxfm>#86XC-ezoF#Nhq zpM|o&xN1yQf_tH1#Ogto^u7cDU$kzYH0c}lgLSz~D2c`Cgjo{MejYMAUCGZ&NVoa6 zdyHg}GYd8GXes%hE@5(U*%6b&p$-zK<}wgy1kWPLlnOh>%J}eoilR;ZS>TZzXyymK zYM&4q9v_g#b&;I@$q@?UyLb*pKQJ}m)*~Wu2QeA4k^+5uJa-7XRS#G%BeRMFD3fZ^ zeFAcvqW}&yfETKTO+3NFr=CoMev-zF!l+40Sjlkq8gB|hIQ+bIlueBKbUTApqug_| z@P^VXV}%nrM1$4T!z?3bEEm;t-y&j0mu>^Ll7qnlZL5fj5bCLqp8GlI3xqMcnC>7I zs9yf?IAVv;kDYunNJ)`rd6N{HW5FO^i+(Rf$1*mcqZgTT= zwyjHtC8ICIuW8eM3--_*${Jh52Rb(*EGqG9rIN@YhRggC0Px%jEQIlfM~r-!vpVI} zr`zNUA{3I9`G$lkw@iwH5_w$wD7uyRzx=7NdaO!3fAhSk70uBi85M2mn=mWY5O8X6 zY<5#JuN+BeQINVyq~Ft@<|j+cX^Mo{G)%@Ult^e@E|GhVTw|Ur?m)WyrZiI7L=L#o zsFrT1nX6rrYWyXso!AjscC^LXU!UrLqsD<$bWG(rOIPIOL21*rP=pSAuT3M*bYUUo znWL%OfxOPLIC(6HshK0zgA;C}n)Deo0A}(QjORl|;CqW8JY0`5<`}N+H>|MYK5-{f zT`eV65TK6-WFmaWy|q1=%QvTEx~1d(p%$qwNenq#Qm#vh_Z#=~xO&~(Fyfmt|HNI0 zXin4p9s_VTyA1h97Y0b%2|8t&4?~`CH`b7!bFM~3XYx+6? zSD(K!wznCV#C+X6pO^TvEy|6D&8~fxj@Fhta_2a8)GG~YETu3?^#Vp0<&X%u-rJ(rN{}Vf=W4=yq1}!*7``K&NG8Nc zb{6I>Wp1@P-fi zht?E(#W9nF%}oaQ%;`=jmH&x-?(oJSN}MJQ#`hHvt1Ww7_p7C!EnXYDmr zLu%(*J=#T%={a=>T$GG6f}H7FXw4Ws(h$~R%Od5ChM{t%cNWT%7d(T~P05wIC(#C- z>FOJ3AF1C}AiX$C<%5X}uHxY){zkHRh?x{PvPQwy4yA=z)fg^B7&Ok8p5sQNg3jBu zsxK|7WUTtsSYMkWWB>Xbk486L1jqADwkWa-lVdx52TiO-97>z}Zo~{U*LL7&14|UF z9~pIG!I{y`c)gg5GA4(6Wyt<0OOQ(iK~b>>EzT2-dJSgA@TheoVzDh}FrwJ)ct43V zo8@U{SzAWDF;_YHBu1>dehX9b6)FVHzIa@X@TrYXwYU)T21JJ=d&Zvxc+zoi#JV4K zEO#@1l?RBGCA5l271!lkG2R}Yw~=f>oVBr8Y;b`y^8o(6z}`4MCWgG;2Vm*VHB_`^ z+L&*!ahJe_@`tN`aBP78Zjc+*e4R2*jII1-FGd7pWbk!ZiJEScOl`_B0niKycg0Qt z61VX}4I5DSLJZGOuLV^bM~vjbdkxiv@u<7$vpbC1`*LA-oQ90Yxg9{H&(1Z>1+aK) zz7y?!+oK~lYosSh)YBFWD9{Zy zN_E1=`%ml$d>zXHTEdLn(q-N$FBgiRm9Pt`O{X8ZOtB-A-h`2Z z&mtA<-`P;bC!Ll7@TFBz6a;=E~wcaO_PYPW7c91xZ z$@9HMbFbH~wp{OCS93!$c_EpqnC#CX#kRupWw-_;PN-mg5%SNGOEOStTpkPfvW1SF zXtuyOu=x;0pn{5g_~@5k%Kh$o*{S2-kvY=G=O(DR2#pi67dLu1nH4+p9a+tb!y18l zaEBX7>058=%$$)Y*J&rUk2bl$@KIJv&5^lXA5-s_qUoC79Mt=^*m}zx=O!+EzVxGg z;l%lbdY&Lg`Bf&wI;QM9I+gR$iTC;a=r~-03PnWs)ujA(gCpiXC)ew&CWQq)d1r}^ zhR`W)thpN9zfv^%v9*(|8 z-OSRiYQ4VU@?Lvp>$rOU>gB)=S*V2f=B-n_s2S=;fU8zSzZwI3xs%?gzBK+V#+o;0 z&;nCoz$4nG9u=BYS-F`Bb#0y0<;)nMDs>2C9MrAmsJB1alk?d)T)49<%@!E*wl0y| zrryoqK=hpV?h9(DMH9zv*b14htI5j`z+AQgfiJeXrOB)fEI8qo4ka9a2vH?8`y zkmibJ_Fg2}i6$vCVTK*FMiW**i9|QTn#s`MbqI}Ca!JFxd8O($27_7c;T7Zr-I;@M zWgXOWJ_m%Qn=XdYz3Yn%JMCRc1m8lJT>Zebnl&x$i2##Bdv%)gk#+!wNAC&C_-u|n!L#CpNBahTY5VE%oRuTnhTZ!~ z%m1gQ^7C$`x2}h;%DTb*5gyQ-{bk9UT0jRa>J83~)%IiD4DKBw;s2X8>HCT4G_?v- zYBMzeI^x$P(e3rhH&BHhS3eHy3L48d)|CIYA#|j6t;;lhY*h=5`8v5i*V+>hXYGCW zUp7YsLX65XjH6sipa1}&zkXnn|IH6<>SAl;{G0V?U`?l|XJKpMtf%+e-WIFN*d4MW zbbqQL%$T6tXWPcoc#1dvv2@-?acGGWQGj4(X^0Rn5L9&C(%a29PE0&oQ_d9P_GWry z|H6WE5xEg|iT7??KC*y5njK7vx0dR`C^|Y0HP0LNc4TQ6j20*-Aa%(IB*B`tqnfxo zEz3yg<)@^TbY%KT@TLb_EVTT%H#24r&qR6Rm&})_2KduPggG0~M-?QAtcyezNqO32 z%>SZomt`AEf&upKTZCXGGoM-D6H;AeC01q&#OkM2U71@``}+C)m$iQ;WJ~+BI=|yz zLtCd_&5d85KXzKJ37jM%Bppi{`_gm39MqwKbBw}?vs^HVGcllxX1e=pqSfD0Oa-Dv zFG@fS#hRw0OU+lZNzpK)8>~I&`9x_uHRJlsL_IsTsAU+A9-16qzqb$Jo^4*<9ok&9 zSE`3Q@f*C zVNnm=F{j@(Ssj5Cp<3w!3`uI&1T(meT=Qb*g^b!Ks#t}jpdwppbL!&ajAZW3HOYv{ zBOyJ`hhW$njA1um=|7o?GXO^ttSuDuWhJCntdKqvY{TP>MbzX>unxaCc1XnRP)6T* zPcfKpCSjdJ7lB9b_a6)yw<|%^#2EoIj$u#^-M2F&#r(g?Wb#A&-+HQ)`q`7fFNfmbLb#R$`&GKTkd3b~t|XbAjG#2baa5CKT1MHCQ+p5=HDsDsFq&k= zm!PzXIsT)w^snRY{f* z@9^;=$uWVaz0PCLAc^2mf_xA8+BVKF_mPk@8+D!T3XeL*8hqr*ZqCgs@nx*8hMyZROs z*&LM~hG>BmH!$92XQaZA(DIr6>CtK1NjWDq95g4~5Fa!yh}O)cZ>d$GKrW634gv1h*h$=29srw1ou; zp0`kW+t@zc56FmUzm4$CKA*7xx#ZXyoMV2zvN%~=V0tM;8*KetpA&?LYSG_gLc!tZ zkq(P!1emv-bOftu@vH=2fll`_aA~c+lXY_)kkL^UmG`N@=Md@GMNI`zVQ;RX~IRq(~AaA^lnsH4)_S$>A=;B#9~ER5b4$ zOLSwF^9oZ!paoIKd$Q>CT5a|WI#XzP>utF-Ki%zG1K&?CuAF)^HR$$E&}nRsmFteK zOr_i8=`+29r|Vx6z$6u8u(r?^cgW(l=0julE&Seq1ZXS-D*9tac#7H{iVYxyh_e(n z8_>y^YGfIadm_f9$C#_qn@MZ}@3Pb)U?{!q)zNN*&D&hEjepcpio><8t(1(a_WDq? zm)Kq*vaISzzy&#lx?v)E0OchOfNZa4pUc7v778wf+v@^<Hc{;bIH7ZoU{6pY)oy!x`qu}zK}I3@YKx0!q{9cG@bEG z9t^|~5sZ_ji@nIiaM27}z{n`T@Ss;TF-?Wh;0${BYkGGWGb2p;f&T=DxUh}FUj*w7G*M0#@wVO>cn|@Y=wtXrYHk5i0Ke48W*9T zS5~c2F>^%&1~md=>sO~>z9xodC|!ev5SgI}0VYpH&J3HSrn$rnVXhU3tN^?HM92Vx zz?7LL@IOWV5juT{#p0uP!L9P;Al#7+MpXt|ui(pQ`lIDSORvHjyZahslKv1*;m-KD zbp3k?BW$_Hmhg#L%*&RV$-_2^+iN*kO6}${PL`(fGdR;wzA?#{ z59zpRA_dT_-e18c+fNu(w!s$~B;maeSM?VI zG3MEvYAT8n!whxarxOb^@12L&X{jcvCb}_J$~on^qr|SpuItdD4bqcXCd*XWphs+knfes6^pPI&hM>*f~#CvMTt9usUG8C`@^V zI>0c!qrY5W-Sp`Bd}9Q@W%F~#wVs}BL}BUnK|kS8{)BSR&<>_EMYlsZ4Ya}ZrS3pF zc*nl|w;RAk6gcy=U(Jv5yWRTVLstKb=Kr6cguRKA!+%u2s*T;o@5?e%i!l$+l6XkM zqqBLKEM8PhX(_zHucd+*IE*()`LeOC6J}rfdCS(=(hp~`v6xS>_dM13_%PM@3A1DT zt~$BAG2k3?=fE}X+!iXOtDsojS0ElV(>TMV39Wz`ohOfIS!c{UCL*r4jE&!u$I7U5 zN5l>o2ivh+X?rUtxMm7kWC%^!D33t`VL^}z3A7WIt`!e8Ks}B8M>)JM!S%YOoSy>b zO;x~HifT+V-bYr>VG%Ca1@O_##%70)cgLDFz`h?WLl>94IG>{jryDbte2^Gwt=(G{ z5(^ls7hO@QVv;8O?*Zg+VuAD!7$dA35eY?-#8fE#_ld=WHy(75>l^2qA^)A#b}A8%Z3J@}Eud3zwW)w>hRFIQ$m4sS-*24LDGinbgmh0*7Lv|Sq105Lux#>G8q2)+8mK1+hci@O1QP>qr+e#; zWB>5Ar$k)aZW;Gxo?|R z8SS^7L^Ih7xM@W;3rIea$h>H(SmxZio^hdHTUH>mUWwct9xje&3AL%bM{j_m7x$L! zdVl6CJPKd?P z{4+1H4non5LMc{Oi)>3`C$F?cQBSfO0NZZ4BvxEitQkd_QMlsYed+7wEN#m{yPFtk z+jz+tqq>2+J$LM1#kLenC}`+A90Za_%r?GoDON1jp0^3V!AJWSJI%Mfy~9n?x$VL1 zy_f$so7HJxWp^7~i`Kb)W37JcXYbuAp^7N3%7|7bUSjVGR%!#Al@vOyA}ci4l?)Qw zbPq=zKDY3_bP4LLy5Q%Q@Zjki@?F@9V|%O5#b%GXj!l>x`YDy@Wbj7G-HMzrQ|#%i zd3&8>7b|PljURl!r%g3q(ew{qyVH{ro__jz#ROT=e?)Fcqaa$>JD+S z1&@ujLu1Bw;g8vokl)E|q#7UJJgn-4N5-Cj7#_wdpIfulO7(2WCgSaA>AjIV>!T%W zy#r|Bc7JE%#TWGddhP$yIdoMNJYW2F4Y>sWUnx8Pdug|D{;llS8kcg|5=cMZ|EMHS zfM0It*GP;xEtI~!@mC=sJ6q@74D?n zcsefBYG2^~lAd!at+H`Q+mUG3luDfJ_E~u%SH596{vm2@TwL#W>xpiL$qNV05QyCz zEx)^u({b4Ao@$abWVZ*OD}>eQT)O_j<)}&mBB~E%AJ3md(x8BqLY0hv?U8npXLyrH z4wW402~L@?Q;#+YwCn?)B+@tna)aLby`gSshZmlKT0AA2U7Y{-&$8JOluF=dRz5i)s(shQ#-wO;c2ZfY9VoEA~Y%^NhSlJ;+GEtwkJ!`m;_e>KZZ>CNQp z<@(K#lcP6~&2Il{dbo1j`QZd8_aES46#U)o_%IbXNhZ|r*zcs{TR}p^G#POHN^&9< zXmW_6Na?(?_~Bmr-i5G+RGZw5(WteQF)EqN%u<<-pEf^Inby&_(Sf2h=pm6Bya{0b za)wcgNWOjQfdrCe&BRbkoz_&UM^++72Y=KGd`1`}u^ZRD_-iY`B4J^$6Qi87I8PZw zfa2Ka<|J4>ff4~r7Iu*)Dix-r%r?;_^OGoI=B`=mh*H>0@|sBYYg5u94xfjH=-k4X zESHQ`ndNzT{pcZx&6h8y3oo~a_co8FTiFXR#|DHK9O0nVBm=i;t0q+a<4h)@8q&(N zRM3m$=rV?*a@blllT?J`J zVp^P;5}*R087kfslIYKXvk=8%k0hc#ggMN8*O;M@q*%Qt(1o)22sceV4&OL^#R3W3 z#25wafvfu@ggpqE)x=rn6@*nh^Wb@+>A#&&F$W88!3g zi7|U`YyY)e*wep^4%tY&PMI+{eSYW? z)Tcgdi9|QkFQIT(xa#jyDdn5vXdd+K8Kne?h zCsJ$=ecDEvFh4{n94A-LW`Qp2`aFRx0u9T?*a{D{6o+4#@6k&maPOERVz)+rf_#+V z@GnMZ|UEXGa9*fx5&O@UaF>q6VY>%r6kmHa5?P!ZjFk+Q zmGgZo2#P--G2qsSLp2~V~ZsZeH8**Mg`oTS8BTapovFR7y~hyo}@y;A;V zRDoi<;#IP$HPy)IvGg(la>6q1d4g@8`Fn*3h4iA5h{n4I8L?#Hj^50Xb{Oj#o&HmMc;}H0`=LP6X;8k3FeR5+F+A@Ife_9UW7ZJ z9x*kFGw|}Iqu{n|AtxuVJ7&yp%7xGYw&sGfI6fz^Qn+pE7k&KK)f_-LhvjG5*d4&d z{&hYEh|x9+rw3zSg3%^M2R4gH(8T^_VU7+D7WQ?#cV^F?yV+i7Y;(VM0xRTZ@BEgi z0jJ09H+~cb4o*jNdpZa>F{m?GcnaQiY0vK7(*K#@>^Y^8IIC>9UT^9El2jG*&9vn8CNHNvN+m!eHT)YpC_^TP*$b-t&$}NRt!k_K6IR;*?t8OV zPrL(SUQMy&E|;W__V;03wzk{yXtvRPE_?2>=336EX4iqaxanGfD7O5~KpJ76Wo#O9 zY%M7kjvr#LHel8FjI=och$0HJGX5^#`1WW`v`uz!IEdUcV|SO-bVDiyMJq1rqi%|a z2X0EJ#3i16IFZTa5b-rcPPS8yCJut=P=QxMFN{IFi3rI5W6DZV8qxtgNV5c=8HF_A z-nFaL&!f?~EfpsolM;;WKu(2N5-Uj?X|jlyC9CShS`j^xhT}ZMH@0n@DVDAk#{w5x z2>k2EW!nJo?T(XF@i8x5vDbY~)4pU40=L#6M?+mVCA4;Avoh&y>TlTfT*Tw0WU7EE z)C+wG@b!~T`G-t)07G5KtoPay2UQGR4?DA3&RwJI{NA3~hjoixc!qx`HC(p5r=)pw zN&@QDxiUjI+G1cb38Y7mlCKMNW9BY89uWU{XWH#5V)Qf4fF%FKDgsF!YeUWi$@Qx$E5pjo_;KLXMaA zuT2tx!^Ro!b-NaK!eaJ!dx|Yt)ywYZ)UD5n-kjNnf2AFH7c&t^q7;4Wdu#W4`6N1V z&PdfQ_9BPB;G*xc`W|pIcyD_Mn&g8`#RI&U7Fz`p$A6lq~1mBh%y ze;>3W@0AYl%Tdw}W^S&o;<6)T!YQqYa01B0F=&JDp_uOd(xK_a>hGZLr4J){({ zCGwLfEb37BD%|;MoydK;Wij$dGwT4Jw0WDhE}y-Mq0JTfM^^E_VNCIg5WpD}2zNC7 z6M^yZWet=dX^;bwYbHu1GJ?!|URi=P)TQRK_o&qk-|TxWgjYGm&CA2<>z2)iU4)a# zGqX_m<})`1S@$G(VZv@nHEm)OiI93tYD+JIl~CO0E_{AR2gq#&7=^8nO9h-^z~eWxY2VHt&LF$Ob7$rZ zkwc7|QhO)9_88Oo?_gCuedc#KKjEZJfcT;U#y3@q(OhvTa^e!%MvQxZ{t|9NB?BVH z1bIEiT94THO0}iwHL9gzdfW8Sv*P4pgyOm_R=pjTVNPW^3usiiMWFj>#>?m-D!{Ei z7xKjl?w0ideNm0%o2$_jaV*l%WmS|S1}xD-DcvbBCc9LpL8-sklA+nMFJ z;t@;QO%cGqQ!I10%U*>_j|gquKEZMz+I?s`m>0q>OCws&8UAhC*HCl`9s4M-gJ5(F zsP_pLk0r%w?FXVJN@rqH)tT>&a*%FzsKZT*-;~fQ4!wWaT@MAe(fnW&3I!)3O4Ni7 zpb}Co1afT#cLjMMTT58klWCSBLW)f?zLhMvNS+dzj==cmTkb12_dwQZHx|Z$r>9~g z>iQxPaH8p$THVv9&wm4n-U9uJg)izx`aC0<+Qf;O!m_E1n#MmGRaBFe_ zol`(>8L&AY-~_vqyeC^C@Y>G6?_Y2SQnTf31J|3*MzcRqyVGN7SiYvYVEEwQF`<|m zp|(>znD}9G_F?oY&sqSzNl>oiq}#~MVSxgx8TR8*)Y^CGZr~Q`gd8*~NE+w`_wFzW zP0r}{bcEkBV=dvGb)pd!F{CNL;Z#G$?bVkhIBa1+cuho-Bim%%XQn8l@ILJLFn$JE zzofg}>;t37X#4}wbLlbZcYxFlmk^)OlzYnXt8~KFLL0x}~ynRLZ?I8s^oBYL2R&`BIUq;aI`3Z4Hya!vSBo2V1EU{AqlU4l7;xc- zEL6(8I8QIHnmR5{dN_HU|2lL{(J0D5*_a0q4D(J2TKAW>585|Q1ED|x%RyCxx3xqLyZr%8T0-Q~e_NZh76 zEgI!$Vo)#Gwo9Enqh^1(BTgt)H)e%~11k1JD`rvWuZmXIkHl@^F3b0whR{>MDk#Bj z$^e6_Kj()P?OO0%2vK#w&v#Y}_Y= zFW9XqYcBg1Dm&?eV}A+SX=Y;M#EVzEYBkcWUgFGA!w-pWbJ4&E}ROboSA4$UK<%8yCa8sLQh3QX# zhA1fv63n7?fSN?=X~D>Q8l5kPR};Y9#CPrtngt6mH>b9qtnH$xC);0qHDLx%U+(!8 z83;)w1#A!7#yNSyz*V+55{n=|N;+wwDR*t<6w0C#^^=@s>$8F~U`b7MGiA{eG z*jfg|bR4PS^n87VjTVhCd8T%?Asn>C5R65T+HMD(Z4@0txS}o>RO-#d9NgSOl6dkupv^uTR)RU@|`}Xx_dWbI~kK}vRNAq=7_?0 z&fy3*wK+n9yVk;%^aKCMs5t%o*%cR6`6uFA!N4-Y8Sk(?J#XnqI~9v^Hra0*%CtUR z5~bnj(1%teH9$0{eNf{hu|q<~HmZcqOv405n8H`2*@N1@Q_+$DuGhrSLRX;ez`ogs zIwYZy@BojtC{&tA6%r<92K9<#3=l#?n&_SeVmyH`9)_BQ+FPRT z906*dWAsrIL;Zn?dGVCxM42X?YG6iC!FVm%Hsr*0nP7x@|9f1?M(I_N9IdqTFA$@& z;&6Hi%YjV-jAvECb4-i(stndWH4r^&^D#^$ImQN6W6gA;wys;s198CRJ~$gBam*ma zU${YKETplvh0FNxde&Et3(BGcR7 zY8rx(Xe$yiwU)&Di2-8X!kxoXWF$r1Q7!w_A~c3d9Zl&K&V_c&>=V(E1mI5rVNBrIxvdkE961v(zQb4wo;*;`*q8;Wn5eU)ZDV|*U4Gmxw=do4yA~ww-ifZ>FALq9FlSN57 zB)?_5tKHl@w~_kzzku1$5P&@5Pnk3YU;!kQc%x-ID4Hur65lcTAYGPZK6mwRj!Q5N^85yh{ zv^$w~M@?XyUr~wHWz5+WsPu4-V4AnJ#8>0~KzT7gN$hy}7}59zbsImOL;4ey1Q~~E z*O}OAcWZ4A883rbTRNY(YUREV#qTACw|7sU*W_?SLvDDS>%G<0RmY;pr`R>`&rCPaQ`hzK{_Seu&&f{3t74-35##AnaS!^9P~U$@A}6!d zv@jd7Q|@vKATQ!~l#hPKJ#$-%SG54>C)llZ$X@>Eyj=ogn(9@ceM)&^ z2C7PV?&*NO`~@MtNn=EsA>^Y+f}xu?Fdu2!^NXDNng9WA)bowLBmJFg4eayF@<1cQ z@XfM$?WNyE*cnaza{n8*o;_gmHhHEC9#c2rgfH|>X!Bx_J95TseDS~GmpHy^}W+@F)NZtvVc zW;9lu)@)!o(4}F!`;EcX9NO$rT`HbK!Xj?nrUzRGdlMc9@i-rin$A zFVuLisSwA^>+2UsUlyIDp=9#Z!q}8kIqA}gLpXl=BTzSMYr@R0Z+z>m?;pR<7Z8QW z=?KT-udMB*7NsJ)?Zi8Fl9@fv!tYUdH9cSd+d)AF__>iqAi1B~DOO_e3$s#ox*;rf z{AW4+`8H=?jwV`9P)Cb8iMTe3B=~o}*KyX3K?I8s zW;6JMPFbki$e6xEdaAghC3Y>?Q3czMh=-H*RtKXHsMhs@2p;vZww`(KvDVlSez3)rD7PfLIl$2yxF8W(j)9OhsWFO=s1xCQSDX$G3g+& zSxTU38%_C$oMO}Te7}QY*CGu~2n#W?Od|$lKxubV80dkV;}Lt$KHft`o>(GeTplAz zeS#&%%n!+Y+in5n@c=j^o4i4GW6gX#?I%}Qw62qr$Mb1?10!$Vf;xf>D3&Up4GgN$ zi$VepCtgGwIl)s7oh(7oQ>~1~iaDlK-UIEfc4L=*-z>1a0Kw~-kUxAHY)IcSd6dr3 znpzBBa&{CpO4@WGhMVD@+s4_5N=?+D>#UtXIrzJuQE9u|#Cy7YG?|g@X2Lbx{=m>4 zsF^IrY8Dd@le6gv_V1d(b#V)S8q_L{83)q2K3{)ANG5FtwtnJQ9M@@@W+WX(zi?RG zVdVmnE#BF0Yyu`e=qo~gPM#u=VR}=#CKgPfE!aPFPY0Nb<)?q)X@)7>?vQA;P!J} zuzh>{Nlo`Q7GN(4OnNe3%uAB=xOf>0?xJrP^?e(6uA|ALXC_Iy*1nPImmG%;#6em9 znmao?YZRj@>_g+891UijoPh?^kdG;ELeTz<;uAdf-J9OX3bRP(udGXkv# zKQX`Ni(6;+X+p-(+HdS}llEsjBL4;dPX1>mKEn3|#`W;|#8EWyX2sD}=5fnD2;_e_ z{|)`r4{wA0Z%L3tZ=u3iUT`lz%QseS+~V@nw0P`*d1k9%*GY>KnAO=FYgr^Q-!Oef z!@DORIML;)=4x{ec1`~G-t`K1F|5SE>PZ}1m=A+r9?avJL~t&i zp`Y;@-b`G&MGPS`qg)24tJS>a#f_8V>K4p5YvCaBg9*R)ciwDVrW>?{eicf~!uD1d za3uNQ{+L1AScCq4{r69DHwKE3A+P+5ao08f(lK_`ae5lK@a?VY;IHm1BmMaZI8Xl% z%UJk-Z>|2Vr#&XK$;i0#)-V6y|2o~OO^6w6=&yoy-*BX@29!r{@0HmUq-K{>J^l6+ zew;89r0UeCH&93sjJ9fzcxJZA^C!Z#Z=w(OpF_D9Yk%uR)aSJo#cca^*V9Yf9by@6 zud1fd9=70zy(a7S5ApxVcmE-~Mz6*VB0zwEV1DMktp9%vys53Ri_3o;pN#**^8bGk zui8|3hfOB9u$wP*G3RxlIY4-Br|Jsgv{oXVTW1laXd*zjg03S;kMr9v<)B@`1*gS; zCUN>9DMf5*zHNIZooEEBx!bcj75Gi0xN#v_~hLfG=A#i$J(j&X5fuI19nv{a~4Kk+3( zpWrf#6~eO8N9Q6bW!N!22X1N_0os_lLSrw6MhTj@&2}sFmpi#pJ(i$Eji2bRlUifn zljc981MT75`f-@<&}}=jjQ{ji({@7UD@y-t#wdhE|8v6|r;GzapDkI- z^2;q`{(#+8SHLHx=@d!jUrIJ2^*4bbsV;hwAGelhbhvp%dmdfxZtN_B%y>gyP#ggEGeKM+n&)lb0hX`_E z%lUa1B{&|O7+J#*FP&I_k6uaTWyDSR1OK0c{U3-yb<#+_|EEv1lKJl)Y-7_Oe|lrn z|G1i6@!7a+N+j(((ORS;Qd|>h-LIHmw3k(zX>Xg?ikH)Q^-zmT!UYmX>LBfc6aXYn zKCU*m9{Q(|$jDD}%x;Qg$oMS0-@h^duek2Y44mp5*_@Xa3~L_Q*-bLXm+%7AO(vr_ z1DFd7oRT~J&0=W3rPiyY#7A(j{CW?eKb0Lso;8)1wY3{87pO1x0!*LIxn43F& zcX2gR;B}Wlgv{8pEjo<$RY}s|r(n`jqRsQ}NRVxF*{3*sF&HHMGG? z6gXlSf%8KYfu?fmwY8ZAs=>N#WQINT&EapU&D^-{R=QDu$Y;*WCMl{%jnhUEtXd&~ zx;zKs=IYmP-!ppn`UnidfewalkLMhAb{Y^*Znksd?ZeVnFRysr@7y1)Jz0HugSu8P zeINNhrW_w>%s{ljry>-mVD2EvSj~#15-nFnxS-@@Bb9p4q&a3#3aT?|U*#IK-;~wJ zo=Yn*v||TrfP2si4%bO+JVg~-BckHU)H{r3EFzPQubQmaHp%0Ty~gb|ru6XeD^Fze z$~utpvnqtm52j-7vU*a8cnqolmk=5>)ktb6-vsLim_G{cp9NQSGUo-V3GQ9QL*!Xy z`1bgV60m<(0j3GJG{~kG>f@JPch;Hr60Y9it zs@d*hHsZ@~V!-AF50q$T905$R~ zb@hsL@C$&^!tw_v=Md)!BjZjB)MR*ln5JoGyQhoaFMKOBWU+XoAcb-_!{~eh_U^36 zQ6CP-#F;6+t7d?~y#zmf`a$1IEW@^ixC`YD5M(Ko*w&k0<79RWgjRhb;ccF0@dVSS zS1vk0-a`11ww`RAJhe40&g#~Sz50kPy8mWZF|9yR38!wy#`;?=)yS)+QsWe>hnfC~ z^Ib*9dn6i%J#M;_hSE1krbU=CF}=B_@Tgf#hlA_4w8pLVkazi>woxCfa)D5SHi|!r z0cxQ&NmH*j7T2wyt+U5%P;78IuZpQkhl>Zt{5 zX5bf4qhq8pu`SnRagIx*^!|D{zI4~^B{(l=5E8MZFyz@nQ3G=4yF^TQ&of`q%S-9^ zMO)xc%3LV4i4vMDGtiNRCJdJ+DihMgfH8?};zusOrd?@ufye_ShZPXeMyWl5Ehavk z)X;uhz_P8dgRw+I4jHX6c}s1wx=Le>il%I!sQGW{I*$<<}782{rCS~C(0g!(zlx9hgp4Rbtjlop@VuPCFi)>5B_*Sa|s;g8yU&f=Uk@=s5c4 zH5_*^-!(KmN58%Lk8oLSUNPcFKN78K!bM9=2jn-=Z4?+d;pdRaXOzyTM8KEZLZs=0 zG({}4ffA*q6})ThgJi3xo)l@&6pM@j#}K`RI0_!9%N$WkwXkzj|I$I0N|No5LP3VY zQZY}dgDW}bx{yw#-315Wsr!|nbRwt~pdnw-%>~NR`}Lo5F?b@6rLj#!8eB~}Z((K1 zt7qUf#5o5*CE#rF5a$JceW5pxOjQLj`Nz2VgFLqBLI#bn(T3qv?)Q@m-^+XE)i~RC z$P_gX`Njtqb3laI;q0dw+d~i++cbm4!+&!U{R0H5wnj?`p8F_pJ>p#p{e$kpJMZsA zXyY$V<#+WWqjYlAkC0*IvHdeiGpOK#y~erce!SOYMGL9CS5v-rW`V2vV`tR;vfZ=z zwRJYJ4V-%hBQAY|v%XvIN6EBjiwy?!EEM zJ5f?l$b?4q_{xL9=yL7E`hn=;EkqkURMCaP%{Oqj zgs2CqW)aZWjpIFK+=YW=T??-d!HSu}g0y>dc2*hH=^eS{?%MF!#~(Y}Q1Y$#>W0bE z{>8t&*z9l=Y*}>}uIbUMJc+W|D$o7CYLwTfeM~&{>8)`_F8+-;eZT3@_a(Y}e94|i z2rXOckn0k1YVCPFO3iC@;s2`F0+y>+_6D=hBXpnMu=l zH3;;g6Q8Ipx@4)5{Ds%hnUEpe|NUX+h*rg=ww)fY$F$+d+fEpN1@Lk+<#(ta7XGqQ*!w$|2JrgcRVBD{hIf_U@gdlkJL`5Wlcn|Y3Qxr7vy{1& zxAc_@y%X1*ziVi6VJqnE&Xml8*N`W30GXAKJoi8_ZsUj{_GE4Rc3x)d8Aq&CM(ymY zHut9T*Q7T;Dqfoe;=H39R!wgwGJ|h%Gj^>HyW5?pxPgiBrRIo(qa}K!<$+0{f#bd; z+)W9|weSKIpiBxO{Pfs2uD~m=LK`N~zeHrqm}_)8yM`{{N>eTIvguBkQ|jrQed|I=EMl@tJo88@$FAYLJtKsrvgZK})0RN1^<*)UVkA)M$euS}g z|JXHc2iDo!tsKsm?70KE%n8WJ;14j??ctWOD!E7wCUd=_(;5_0b`9VpRM1f`Canixmy^xc&>q;h7a7*8=rf_jBwCPV1^+nS{-Rl0re#CvqnZT| z;GZ=(0g*#rs^>YK2^Vr*e zf(Q^B@)p!7aYjOwQ4f0%0$J@!L_Xx45^;EyT_AX<FJ;H_M|r%dYXYA|wi@vmYdl3^qZ(5Vv!*U6wKdN|c?>>T{GP0t|`f5^ILZ63 z2>(SBI>VXYlq!CJKEiDpuRBU~*}xGSYT}H2j<_qcG0!TymM~S_v|)_(hr*E1Uj!ql z@XUCy z*vo&wI5mT=Y0f9xbQ3BCo zX2!zJ($-C-GS~hN-Q+7;(8ErMyP=mPK#OZO(|`&QruSvzc$%$((ROus`}Qv01?_8E zcN%wFUmn?S!X2=!fn&p)Fse=hNR{)3;&O8Q3@q?-Mw)(&%7;vjYn=>v4akC%bK?CA za=Kk8P}Q}hUW?>XkXt)N#>(bV#=n}@0xhf>Z^Lc^ix(y4O&|a&LPJ<>4!#>liRuk5 zB~d5X&(Mp_NDrk?Ah$}ONf#K%sT@paVm7V22}Hix=Lpc7HutsgwxRbfo!KD5W09(d zS<~`byXV)LVw5l-$*r$ zSHu^|{-wl`^lccLeN_#5Y<}c`+I|Ogt@t8eyo+P9cic+q9-$FwBAJSSD(fK6d?Exp zPJS6SzbflzU$aR;ObW~C^L|X_a4iaYTKaX*>V%S?zi0cGHMkl+N!WF**l zmD+5muj(7D!-Us(00XcDa`_sV;20#dHB_iTo>|4-nk+Ao2dd4i3+tk_z0w|nS8b4& zU$%ydtAz`hZ>ha#?BxJ_%CFalYFooA37r=tKHjw>$*fQxc9P#+pO2M9suXaTqrQ7O z*$jF~>ywC9@froH3&`~!*$8zy`WikOsbJq|5#P^ z{M*-EN4Di{oyX(A25=?wCAvCLT_;5!SieAx5tNf3@->f`Jz$y}`EXrDR7}i3ou=>; zwh!gqpl%pBN?D8L9B8li{QA&75dIzqA5lok?SQ(&ty#h0)GguaD&l1l(Tx zqj##p6Ku`N_q2n>m?V=Dpzz4o*tlEhwHk{SpKceuPuh$HGMeNWS2C8=8`H>yDTEGM zIhg#}j|SwulFH6Ac0($)ztSIs-oi?wr0fM3K&1xJNJXTK!4g~|f|D@;1^fV{3d<5G zGy)(KM9cBkwadHg&X!Ije1W_U5F< zl69atG;p+h+9Zm=o483PVk&g-A!iNarV}7FkuM}SF(pMO3_x;;6>ECGx(&4}FX~YyN(1ZQr)a~I~kpmfzv|CneA~^%W<3Ztc@S!6b+tpTfR2u&Lr#b2F zHySPXFTJZqTx{7hx|CN%MLe5ws(kSxvUQF>(PR2yB&kpp6-5}%31^az#tr?uxxm^Y zHz^H5(|;IJ;3scH_==iOQRq!V$fCEzILsfc>NastPxeI&qvKNj0sOGy- zsY*pN8!l`>LiPEGl_E+}7IQwNsZ6=Te=O>97JMEAL_Y)Q9cFYd_vjHzk(lhnnU1uG zf?W@_9~xbclT~jUACFXB>}IRqaz?Oz^SuTJS0Nbg-4#YH3az2SN)|8+y-Dp8uEyEw z)y;Mpq~C_*02z$=hfYB%giKzF_DlWUM6jDg#Xc^(8lY1dOoQK#xQN3_d7m~YSSoN| z16|Hkw1CHd8fZ-h8pr&IL)Aqt>-9)k+bv~<3eCA#b&ko4f zIn{>{s+^cPkMmJhJq}@T5f(+|oXyS8?{e?Bof2uiR@j)g8v@|^^}e*OMSa6_=iiiATl2l|vl-maJ{_805#E7ejQ?P36`INdL~|F8kvY!) z3NS5?lSo?+9-Z4&EDjcmf7cT?+b@lP^V3Dr2+nZ$eS;EyfrfNA%2Uh^pf0w0+5jhS z!3f>U#EkZ6AzowD`LYM@65Sp+v?O1V;yyt8!j)4^)5PA%+LdJL^>f-fNRiN%BK>dA?0XA_n-WX*XifFeg}dJ9cHw)8o#Aul zywPv-K*WE)a_f9?E7ukK##i7oBopV)<9VB2D7H$!{!^coTH|WPV9QM8wxyalq`7?# zxDN;3v0k>507&-&Ddqdgtr1Uv0M8wa~2g=evUpY15?(g3zT^^jSZzjHv$pwvai;Y zq!ETNp?`-bYl{}lQU4$rXxwPBn#ros6g4P%J8)(>3q!?uG^}P+n}}bzy4Bq@XllOZ zpAxFfhPk~w=4Se9LnzCOwo|Gu1|B;hWW;DCVC=z)Nk{#%#x|B7?F!T~sJN<4S@jnsS&sHItym}Qr3 z^8{SB*EU4zuQ>+~A`K&RCqe9~J6o;Ozwf;CaF6d1*W=T5c8$P#1P2cQ^TBGVG$f=*k2{k9`DQHu^*-XJe-7UV zl|y?S#YP4LiGEpgz4ID*O39)uK_-<1rGSH?brabTFb`gXra=#)qLescIAwvwT(+-G z+OeS!1%K8km(U6WXMy-ac-Y{^DG@`D&S^wkz8+0&*`Fv&*Q6h3rze8Y8Te)7<{Ikd z69Nx7PspP*77=lOlpb<7bb1r9fqH{TOR?l%FjxKbi>e_EDs3 z9$wz>f7uFjbq93aj6d#M-p8+x_@px-q1z5!H{+c_l5j^QhEg@#jN*XRsER8kHU`vt z0r`?=C)s1Imw3>nThD0`JbCy$QFF_F?mi_IAqs8V%Nc4N&kr+@}+C^`rrCLD?VEOssEQ+)#j_Yl$ zXnY_;#-Wr8nCReqfivj^MHz!D@GbEN#?siDR%Hc}N~e+#bRi60ir~3|W|vNI)^_rf zpZDjN;YZ<9iYJ;=6ZbPlOi=4M=3spEQ*hQj8+B+AC^I>FYE>-Lv1bi0XxN338!srsQUpht(P=vaS)ol@S6SAP>= z^djXBm)E4TwN#+(JQ`ey%=R}KUS)x9nq65&9JM>^J;9m86`r?8-L&x`!|tAt$g^Q3 zbfhHt!$nkOmOT~cAu>x!s51|my;rJ+cmp5gHcY1G2$2^XKMEbcCj%+S=8My+QZA;LhwiCfI#+kK`>lSDhs?24{c zKwqgU{*IF17%ojhYMIc$1!h18mFR}pm$vk>5Dx5JxuXUwb5aZ|N|o6A6IRO~$lDMpc-Tbx7=PlwQPkWD}XFQqA9i z8iV>%h4Xi0`?G#~+-wWaF7Crx4d6J)CGb6|)w$S@Z6E%Y9o??xeD(jt_*U#4_^$tM z#bU?M6kgDQ4?sHmV+h+#xocc@kw)WY-JIT5WP-@i#xa1(>rT5dx~+L|r!}^#!;;3z zC(J{X=P;_mWLWtP+orSUMR4~07ss>t*{gbbqrA@BrZ$VuYuT)3_wIZ1ZeO}nBgzll z-@m)Amdf+KQ*zVL|LbW}xyyaYuON!iaWiKqG_+DbF+=V=cb1_{Q;_gw#K_3j^OG8pjl>X8s%#DUoT~D4|l>8QsTo02PiM*oCdVI%q^{G=ll9Rpf#G()Vu^4_=ij zRgSEC-U(6o)BNhZ05#7Zq_-)0x!JEK$HI<~$ER$AyN~&MjDor?+ef=!>J&j^-VdWb zm62Ik^XAyiVi|RXME@$=XoBbDgaV!>?5BdS65wKMPVtWtFK-8Wmh{=dJC+V&B3~@l z*5-=73aXLCvMClosVC*!Ros#qwbR^h(Y`Pn*cI1tg4{C&B;7BMMrMH%HT|qgnJwLS2OT#W2Gqw5Wv+}3| zLJKiVbH+pUUr(@J4HX*~V)?U{JKs!#{*0(%Ujx)5w1&GV zAF*6B+RK8r0AOlLeRR4$)nIYVEs=5*cS139D$4Z6Xj7rf_!M>#Q55ltQ721Q@juR~ zWeY52RTZzPM}R#psqV6lJt#*^7^O96hVuCcrR*zsFS7W_|) zPlOE^-VGU3hItBeO5UE1PQL1WZXh^)XVCe4w8=U%TvfCfC3zl9AGHPazs)Oo-d*%l zv=6_>c#fCo`m-`hlRCRw@IFdZ`7;$)oBNXCzrfg1@fx{M5j))=`8oBvX?L<>_`PcD zY<`?gXDo7SY-T0W|T+7 z0?YAa*5Hl!z~+Gx0-y7txn3B&0`B`kJ;Yasb^mPC^SQ)`NJJ0#iJ+w`B8*_BW1jF= zio|}pI;brDdqnzn+DNPqef<-SJ<@=f2E?a}%O@D=%IRgDYw0hi9orkTZDq@M$SgKH zn`ECj9}YEH3xuc4we`+;3_biFyOwuJ?~HRVlI1-tZlH$S%ce<_6_dpwLTV+V)m}3! zol{5bsl_Bxy-_^*Pg4T&+%EaE+!b4ON$o9*fiNCdUA`!a1{>%PV1h@SfashMu~G1~ zc4YCU$sjNcY5SDY2bYe1Me=V0g5PPB^%0Ai2cOXTfUe?BK%+K(gPsLK)w~oE+PMn6 zzI--dwjwjc^jbMi<7p1&UCe=N5!e++M2NIuox;RAOrijXxe<8?f0p@yE;SKI7MISm z)cOyYZb@d7>{2UF7>1^}&mo~>x1Sr!obu*9lA!@Dj%y{@s3}Gg=|rK}uMkqU2GKD- zZP)E8EXGPb7(62l60Ro_&r>K|J!>Ij=cjF$V46e4o&cB=4C6R`DTyVpcCb43-SxnI@@mV@jt5(<95 z#rM>!4W#0Qc0)NMI0aLjo7op7lg9%};G!w*h6bM)X$U5wF%MIJl@%NCqTXxnui4_O zx0eXen`_QVzTZkJ^yax8Ka>mXybT`U@4sqA1V+A1E%={ToG7{|1c%e2taS2%8$E=vN9}5qt_fr#ny$$&8zoCI>CZHAUd7~ z=<*2gW}#(fg_y%c)YHes2s0ZxwAQ?W*Ghjpbb(Lp1Ryv3uuxI_v`fkyonAw664z+Ce3I zj(Gqse+l(%GbN-ceh-Z$)0IHL&d_m^y-T>tobiwXuY8W%&zI_`oM1nyLaDD^esN0z z2WmiSXZPa}o00Z?_e|$n39Mt+9Bgiu^ar!_~~{)%OTwz?Vm1TQDG0bLf#PkFF4bZTM;+%52Kg% zvjWHW-+3Upn>!m@nj1Pg|Kv5g{E&P9;hJ!495%=AejZCg|BxyRuzAA^0$^|(@CmKi z8lu-ac8Q?&qx7VsmB<`Cu(dtfKdy4uAq|s!<80c*jY%V?bp*c5qwx2B>d@Pn9o_MYbI*cGnU-`Yv4;dY6(Rb)3}`lVBhK z=-s|j@DtZ$?U1}?4Wda;w34d`3FesOsQ<>)zzPX__^nC`e98pDrZ7D$Ml{H_e-1%h zU(J|#r!VmSxI5p!e}BG5!sgHwu%y<6 zA^R>xLlMul3qT)NoVFM@pIk|hHG-XRl0O-~(SeEeh7me)>G|=EN)w}^AF266Z|-%i zbW8zM6>UB**cDp6DUg%DWv~SiaWJ*Kwc{YLGmze_m_J*0Q^(-2cZ{m{SI@t_v#YaX zw2g4@^4;=vZ22i1h^q=_;`T!X<$Jt@i2Xjq)qTn$r60pOZ*KG!CxvF?I z_PVD{Wcg!M!}bdwB}$N8g*q7C~^G;&8CTG&^i5XdE8}Ddh)6q z2WO%}afJ|2Jk)`YnsjMigkIr8sy5Y)rh}}(iStag1N_+7HD7+;V|sNN9M>Wyky<`& z&MX5l)VR=KsN(RIMO6KW=-THM7(1{O*c<7xqStZ^Ddd0WO_HnbqSW(l(I5c5 zNE>3Z#1Gink!Yrw)yBVfs(pzclG;`AR@+}pld1X>ixXdxR6wEMTtYQ(3vypNv z!5d*&z`l!lA&XlTCR^C~qOm=?dGb{n(cFmYrjBX^Xy_kDB2O^+SasAI#DZ*27q>0% zp>lnI%j1IozFzoE7`HM|Jg?<}?7|3=qcjc$levNFv`z%uELHRK7J951R!`^4)Jdaz}(-P{N>9$B`p z2z!O{)~mf!yHg%p1G$)1M59VqId-1!Gs>)gPh5~!tDG8^S?VVla@`mc11LG^PhL7f zY~}0dNBMc%mwrCyRHhW54dKj`+bGOvu=;-tYLT*MuBMDJt1GAbQ3`yobUY8jRK*l~ z!fSd1p`V~cKiO3I%BObm&G!cTaGuJ644REAO;z|`@T9*$G(6`mEvd@sDP%KiJgh`1 z3ZE2mT=NL6?{Wor0Dm%Jw+dCu{Ru7g$6vD`5!|MD&3No@9y4nth8S zG0B;T`i0jCrd8dx7Z$H{_e%A7@c|S!Sd9Oz7-a2%71r=zc{$)w_{IR%HO~huj5~AK znD+q6qXXHhKe63cbHm2O7d_XmPnoi29dfYY0J3)Hi`MJo@7nUsZ@TWN@}zXU2c&A0 z6i;Mb9vQJ{mT63_=*#{4FQk!A1yC7ci-sapw1R{A+wKJEX7y-+@s8qN`&Qknucn>q zk##@Wu&*Gw7(+_hWx_s5;Hjx(K6UmVHC$@6o;dZ4wTXp&s3N zjyrGE4v}WvjT>!Mnl{}s?8}BdhJ4rplao1}TK~c$*g|$s1)?4`Um^504HnCWV%eRfV zNv7@KMEoqc71=!Xv#VId2hMu_vFp5pBh>oxN0qk{)gc4j{9jQ2c69PBJ+{81`g*12 zo+i>9U4-hrfd5TzSa?pTKuWH%Kk&&vu*VKu==rI0IgCyl)XY;SE(SbQu&t81MIuiU zYo)+Z%3sy7II4+<8&Cd5dcHPo&8vd@ z>Rz=Y^ zZMz42#i|PmT7p=QzDcih2w7b;qN@ElnBF@dJ>8H__G%MPT0+i@==VdqJ(A zOwTFp%GPsEGzG#X`gIbXD)FwG%SvfDvv<}1k_X#Pr1*WbMK;~n$R^6svkgz9^Eygd z>&{FkQnrpLht~E(49Lx3jpse*h4BWKU`gH3O-xPSuzj5MIcLZLzq9YfcspFX(l~Hy z(_vY2=a++d$M;?rX^=Pnt?xR**HeY-jxxXoH7ssCXY5aNi~&nAgv(C+fOTc2d-C)j zq!I%X_EuxX>E%Z&DI9&1mT zHAmu3o!@^dD*A7aNDHc|96u6&yr%40XkkDP;;{Vbwgg_-g@zm>hQW)D>>7LoM{}&s-~=SpT$RZ4Ah>Q(v2(X8t~&6RyJLiN@fin8dE4JFSOs zXUZz%O-M|{CA3pCU0bO}sNcGx`ZGG>b>*bUf8S;YAobrMSlO_my)i*tgvtCZ1nDTw zG2&}rJg}xjp47ai)QHApx2Jm`2!{ju-S7+Rc9+oiRq$x7DpMDA{(%t3b`?<6Y_ll1 zEeNLxb2>eUPi`xH%L8{q81_BQ6kFwd|4)z=eDMY}Bl-i~e|q90dgcNiE_HWkESK4z za-|?SIjrZulH*BuFZeP)t=7sFR#VkRr`MN|PjI&644l z5{YDZBo+TJfkzV{0y!Yy#aF0{;P&M&JV--tMXZrh5*DQ=5;6%WGm*Y$PRS%rRmxy! z(uyQYX;C}gwpWD-#Ca@5OH_#Mw+(#5&a;^DB8ySa5PTUm3-0MTa>jEhjiS zo2Cm*OF*goYsF44H>I8G-q67fe2oI_7B9hu=XUmL#fJApbnxK#nLnjy`tIxM=H|*w zkQaA)AEm&+xQ#eAGzH7|tDrcV@$ut!|5rSjUZ~xnX_>Y^RpnYNi%EWLi7hpgY_5nG zg($l!@ukdhXohT7IikvDgQW_i+U&q|p#d0mqYf2m%&&*ZiUQW9v!H33gON4fWvL31 zX!CMa0dtZl6KXY*hq|5xyJGsYMp&^t*d{i+}$k3UP zP|mOH^(%^MN$3_J*_eZJgIfJmYqs!jjd+=8(K^#U#>1Ye133z0mTb^RA*|@gk32b+ z##z&9WQnbB1*OgS=*C)B$W^hys;P2i)Os|J$I#HOgh(u52$i@mUDtMGEHK^MF{?P8wG`$yh=v(6*&+0@N_3D+M8{l6cZTTrfEH+(8NvtZ~W*^tI@s->)fufF?||F$iAHAJc-V88?sl zbf!jn@j)Ye#PTNQFR0t83^X2ma+Mkz@Tgq!MX9b|w4uy592nLuAJksgJz?3dCRlIe z(w8wK{|@0+`MAWB;DL^px#^nxAC#R_dthM}W@Fp7ZQHhO+qP}nwvCEy+ZEfDNzYvM z-1UDr&)Isvwe%u7@UC9$z3Ht^y7)VuzgvObIVcPfq2X+b4>36=KXf-@=L0oWdJ{^u zh_NV;rf0NP1TZ`AeDZf&%l^zNSqrh2qFMPWOS|z0VWPL6fkRUw+4G`XE@tRO73o|0 z&sRk=F|GYYLt2Y)xlY@$OWu_MXm2wV>Gc>cqUy0z6dsnN?u6GBwg_l!flNe)zU4qz z+OAfzYF`c3)r@q{p6sBEw?0Z0=-H-oNJd%SK&x$X=@gWRhts1zRk}QM^5L=w$UKsl z87G>h6gw(K%YkIgIR^BKZ<@*G_EtL7Eiq$eP*HC<)zdH;?=Q-5Ej}2CX=-m1YBF}0 zVr9l}L27AkMfH8S02J6ByM3sg^O`jR?Ir3y05demlw*Y0fat+rgPr;8)qr5&5hf}N z{{Bb*Pfxl?In#ai;7Y6Aot9Obu8jA=3d3`4{4as}Ug??s?cOHqwN2X75?5NJ1Q}T_ z0AJ7OzX~;s3R5)a;6FLrik{HQbnb&+h>~zw^K>yqq0p+MK1-v}rE}4DP^OVAz;}4y zqin$TzrB%d{XhyBFLhG{Vk{@1hSCT@?7XvOmHAj&Mwwo_huP6VTbW|NW*!L^%`hV| zInqvp4o=&-S}<;@b_WzRprWAF2CGzh>~K>$h>sufZBtmH`Xee9(?aVQf=YF%dTZY0 zvD+kDT5cqz&PN;E?B;N#h3`5ziLP2LVj;5EN-?NKU=z+3tB_v|3KT>8w23=!U+ct= zEWHXKzhKGlf8w-|g;-*ZUQVpoS$9;ohR6Scg5N*F_s-}PO$mhYkNivsCCtU|! z;o~R-bL8SOXj~NC@J73ec%~#_e;y&J5mNjl>IBU8K*|`*IJe?*unnLY%~w`)T55C3BM9sHRgc}P zo^CKKv6#bJXF%8?-I3JS_q}ONHvjg4+^9`RwLE6vO_rUHmy76>=prYbmw+McJuef< zwB_b+ZVEzLn*BV#xP7Nfc+dKjYC>`d1{4Rk;a4qPjE{>wxp+B!!_e)_?w*x@-Y*W{ z!LHqk{`C}O_wz6ap2{Mh?0uaa!ys0PWU6-Faz@!H()_z`Q$HhaXWS_$Ad~-_iItr? znp!S#OC|b{7u>N3K>}9yRoopnLn>2%bq>c5)X~0$1!mvI1 zZYLsBU0rc~{mDKP-lSU$-AC3ef;I?%?vxZdW`}g+e ztE@YY$bGGZCfr$sjww{=pfA8})T5ruYmj)oKdyGN>^6W_yv_4Pih}Lm`$E^(_hwwM$;v zWEtm^zq4{~W-4+f=~9pGPSN2MHsqQB+6uU(SE=oO|-Xo7qM=HXYF?h zCf@-Re2D(L1RJ{zxeKJGHG!)yCIlgwf7SjTthX?vmmXiobFwU0#*u}9wN7-}k7uNZ ziNG<+M6ERNoHit%Ud20#C4hN5mK{$uq?l3Y8)$dchRbLSxM*fA!^q9MaqEuIY)Dke zURs9yyX3w!v2ykD7X(l{_0@B-6>OwGMX0d4nzsSy{*O&4g>oBkWq>Wp7%ws8tsG>q zBb4A&>jvYj;bRARS1rzI#^t==mlVL_9wR}#Lmo+If!)H*+y-g*I z4m!!qqTU=m3XP=FEWWHsgz4{wi$#ZP$DFMfYx*9d(#y6p#u&gDw|0uhtz5)$5`p2v zSWv8SdX1E`VH{(UPi++~92>d_M^E%^T1}H=`c4nty^TkIn;0`gKja?zp*N(yu=p4r zQsOtCy<3Ukmg6KF|JPxMke#piIdl3QKyJ48xI*0L=JgZVT+;Y&(YrD13xEdW^}5K~ zVa(>&4!Q@o+N+RlOS4cu)_92%kWT@{t%)Urtm>%J{mN+bYW2O}RgT~VLNg1Ua#U=o zIt^;PK~rjyM7X>9R>2%3P5SYnlcb9-UA(goryiZShh6YbMj6}ozNn7a=&pc+I~tH*_}e$n`MA+0X%{iqS6W4U zckvuyC6G16lX-YT`o_>h*5&74aBPr&)M}*( z8AgD{R*@bpbZ-MU#=a<_s2mZGD4GYLHElhQ^~O@h>zCcz2GltdcH8SDjf4o>XvVIy z-u#hHs*l1%9xt)?OAd{iB%bmGE_dtRwq*IsSQQ*V9Dp)Fv`(Rk(&(rt%+AwOIhEX% zEk(0-$GnbMM-6&qhka4FqY+p=5r)gityr-dhTb6N$@g8PVP7-E4GGBDe$Y?VKIZf3}SI#smAzDp^=2KBx*%lnGsOsdtfPPzY4;{{bU9 z{CK)Q&x5}$^au(qa~^gLvmdTB=8Ma`2Cs4sZioGp@40qehyp5ol}GL7ay>i_N8XM$ zenjMKxIUE;d_w6?Y}E8D8(Y6Kpc>fU_A6&vf8a2j+=q#M($OJo})tU5W(SkkZz zYr=x62wj4*aX3xis0BHPQAdFgiemUyoL0{J7A|vGfrbQ(R^vX(;r=wg_YCeXQF!8t zUeR&Vm2&)%CZPkIA>~RWs&=Fb26x*u z^Ms>~r89AmI*LWIZ>NjA1^@Y>9dh|%o!2dkss@hkIaj|W24phS@i|l=v`r_D_lGZkZrB5q)ufoAiUJd#Bw$v zh0eu@q#VOLh^grQv&n+9aZ>dE>?{7ExBN9x|MWf^z{dpnCRtZGz0LmYW1U^NotsTH zbY{%Odsy_*{ISM-`Um{qLA5<$N%?=ju(M`7008d)sqomDx&G(=qHk;H;$-RZpEHc6 zt@EY?(yt5UoEV>|Nn>=ko4$`ob^U%<>1gWtOLD0*q>+U*jm zxTZR3#nJ+DX({`D<@xyL0|Y9Y_O|Hv5f|yhD~*J~Un6m%Fuptufhh$WNm3HE$Uj+X zHMeY#EDnE_tJ1uvks#^(68@5Cm~;WE72?{|Ap_7MD!RLfA9d5|#@&p%``&r*=l%2g zw3E6Ox8I8+Te^S7-RUc>-kPYsck$sKtk<_-c^CM>lh=Ee1fq&ajCUOR5S|DyOyM(j zBR3TeENy}+ZF~wc-5;#8NR3P_N&O%}K6=q0%O)^2FH@KUPiI&M(Xx0v9iV7$mRSya zqC1^Qs6~=(XH<@ESenrPGxRD*HPw26GH!lf3VkHqtl0c;VnNr0eIEh`mluo0Ph|Vt>~fC4m5%2sVNR zr&m4SUtn4hQbq{7?f+z%9I7BrW8xtQT`JV`BN|e9^>lu0I(s`5apl&18v{1l2;rJb z>R?$(rpY>Sa8%9&OU2g+KnnAZ!Qjb)Pzk#e2MGIQ>JS7AVkZufMM|v_RkQgCwht)& zNKd;Q?K1S@M+&7V+9m1!V6V6`CLb-HE$iSp1=n1NT@kv`VDIXUAjAsQf*8+9_UUk+ zE=Yoe+X?KuSYRW4--yt8&|S9X-WaPll`KleC4;M0xJh!`J-Zxd$T z%X=F<&ex*Qh=j^=GD-vGH%AhuX2Ud>fOD3khw|muB;@V!#=WZ-M6Z2 zZdXyH+70zQL$5dA>B?u6(%9#6zkfhtSzam8f5=FaA4fDDX=scD`{>5WB+>(Z!1>RM zmcVCXcN2bU$gITZHrX--8dRr1PEP&zyQoV-KY?|LzkDHX$*7QMBmF}?RZR-b#-vLO zoyEM6^;ask?xhozvURXxTV2`5iD2Tm)Aw1YON)9$ovW@~Y?iC=J*PHzj#F~_Y>xQL zgD+oSU@{Lk&YoHQHek@%z#I+V<6tK`s84E zh6pD9dWT4N0tfXn`BFR(sRAvAUcaxzTomanXIF!gdA`)z9{22Djr<$-{;hGnptea% zl*=88r7KV#4aiH9{WTBFT=q1dH-r@=tNPWy9M`|>cgOLJH^eDB!DA)>#Q&lRxPF`> zq!ychC`I`F`9(VOw-m5&bg^&-=k@0GgA7Ukc$6A=;mjy~EoJv1uX)LG<`5zJ(9VNU z_=IZFi&A}AI(@!8pPPN0l5Wzmp~gv$2-x&!uY;sV4k@GK z=BRFhjagVHc5_%powTSHj-a%>AU(q4sINbt4}SIYx9vP=C}G7=R#sQP_RBV7Yr&2F zOuo*NEGO@za%xSGiLW?g#ge7^KQH-nh{FXkDI&CufVIkJ3N~?1s zYq@UC|4er<-g+Oe-to!RX0~bbi^}je|5XO^Ebf;=TBMK2*(bn^ zIo*PHDo+U-y0+zV2`2T&smp2|gxzI>*V8L|Qv-gOdCpP#aAeB>U;~kXzrjFQONwPP9M-Gza37co}cpb7A1j zbkat8FQ4v~V`)I_Y>9l`<;%WEw@qwwU(x2>7=_t*{UoloL!yf+2XdtmU|T@v6&<{1 zo)&x_8;>_bSk~M%atMd)&LxiRHd_qCt@;Uq^r=!cbz$Bt*(VhG!4O<1cXi|Z>r2>= zJskFo;iH=c!`EQ)h?(?2K0dFe4K^(s%G)8Mac?~txq8-x_YB>qgN7c=*i_`W^<~UQ z9sX|MEu?##%jSETS@80zZa2bt+sm2;r}NZJgGlYFK|8_gu=bTP#@ClEhK6+hkm23N z${=G&)#vr2#P^%n2ZdL@WyaRQR~s4UShK)uq*!OkWd83LG*viS?I5D~?6-zf_?t&_ z^}(7MG8czz&9tk3)c^0c%9~|w6X18Ahxwi7>Hmke%EZ#x^tYAzzlg{GH^{s4sc4~| zzyd?T!|j()n$ss#$HhUUPY>jXU$Lc!N&}WY zzx1E;&tktfa6%hXQ0g{NLRJ+h$RKJ7&$xhW5NKE(0U~wL*-Qz9-{sx9H4j82pgvEC zj)mBA%sI4HsV$emCX`Jccy09H!h%Eb7f^~xuh*Y`Pj1*}yiCXtTw3K_as+2f) z3&ezutbjjj$YV1+dclP!7UJm9%oo(TVLMd8BZ*k z{%+0Vfewg7d4aWqQi_-#av~Pf(b9%Z^a>(Diadde;eb+Jf<94|MtJ;~IBoSBG>y70 zh8Z=oog^ivzQb(uhDVX=rnFQia2G8s^CpcHR7Lf8{qkLPFl`FQvDnyFCl1nq;Bji`<7++{4YuJ2hdwm$ZyQ zz2rfMG-(>^Nk$+sFx_guH0mnN%rva4JN;Oul9Qa<77T@K^BU>k*IXgp*Obi=l3am4 zbkNznQY#*S-+v2+R49%0o=(4S#LK^-DQwfKP)WH?l7ioaq`pv}iPC0tRbGL>cp6D*+ zu;`P=mOpkq*8pev`I4vl8;l1XE{w-%_W>sNZWMoTPv&dbQl!=jn=wC+sfpbb+y@M^ zbrpV#U3M7kaz|1MgrYmtiK2;7ZH3C&znB`(>xGbsMq97`flTXzxiYot(s+lPQ%0Lo z-sJIQGtr4Hofbj&{HYwGYXYPxnU*BTma$W3IA{P%I8Dk#^2WjcWagA_zRlb0vUlA9k`_yF%Uy?5WB&w^>f_GGwVSe+0|9c(iIQPs z%B+K!B&m?f(yLl_^#r$>HU(N{zi|=}#-g0tg$LO)gtN{7R_ZSCoS^Igw)G4Je7pwh zaEUCH46?fTCsNMrlI$)%J43v_*1z_?fBf5ZDXH&s&!UB(z|%~I=Rz~J2#%~wft`HmNk2AB2xGqJ_ zonNrgWeuFRkri1=)rC`Hncu%0&#Kc_rNx`w6?+sSGH96ewQM38l74|pR|c^y7oJ5M zM8=cs&J%Wn+AeO#4o9!Awlpj2zs27#pFgL<#v7BU+JigP`XNHq3&kj$^V4hIUPi|EWUOEUP_J_^mT(lF@RJt@+Ze6ICmxq>$BGOO5y<^KhgV_CQhbN0Ch+R>ldeP}k9yC;I zgAc(8;sbVQ15 zG&7niHIpf*MT*rTG zG*qI=GjlZf-wnMyTzs5bpkbDh@E>LVLw&Qi)e5Y5lpdAdCqH!F|3#Opsl?Dz0UFB& ze$`dC0P=#?&?TA8Py?zQw%ApiN zvaHS-bi0bospynEEjl>!5FB*i&4NjneiWILyzQLuKI%0u;V}CJsEPGSpmXB<@q_|% zV`M{nBAm%)<_whPjm6w1csE0>FLHj|qzQD?U@FNLrUVq2@X8|LNHJr|> zLam{(;%JJupn{yUIs=s*9|__{o{n}9(Z*AW*0QrxIYnR-uIN?2!P{xlp=Dm~Htp1F z<0M0knb&)W3NCT5H-B>)?RjPbt%$$hp_&oI_b6No$^j&>A|=W*jc|O!i}Qs`Hmbzg zv)RX6XR-eS)Rr*s6lpM^Ib0oTfw3bmB&&tgWI2R3V1p+iPB0dU5Djerz9XV?3`IE; zR}vePkSP#~(44tx!*(e*m(J$uG6=XY zYOXey&u^|D6A!Flv7`>pX-Gd5R+sW>NN?~7=#9njd@}8>1^_eS>OZKQ)kVJcgvLU; zP4rd|SS{hC!8wp@t`=fA-sWp@xv!>Ns?(lLtvh~vsfwjKvevE*tl=fMiT-@Nm7U&! zbun01T@XO`WQ;#~>63EdwWU*D({a#p zQ=^@F4t~Pc!*5_3;1kS+xc|dRi9A23d5$m>J^>}5{b;LUdjW(qu|He$Fa5+ysdzq* z;Vxef@!0blqxawDvDKS+)S20-VIbzU%#WGT<(UA0A&(b`***zlWDx_I9`bk3khDS= zY`A8@v!K-h(mHFAbcwh`N->gs`O{FSD^i|)B%h}G17b4RJSBt}RYU-lvy+#ks7VMx~p`EC2H7|H8w-1!>;FkI; zTKn+zXjWhQVO};aBxJ5Co4N;`@C|Jan*dD%Z$G&A>uNfD&Y~sMak*@&$2E)tV)ghLfU-eO z4R0;7-?J-uE3dB010SGPH)aI8srz<1dF{L`ljxI&kj=A+9dbV3=EQPq`ttsJf9ei5 zs2u}P8W3MFlSPwZFSfmV&y9^H5l-|A9*Gm1b8#RcMGAJ)b4noYY{Mkbe4L*Aj@BWv zj1zYL)lMR8nDt#sTV8*|%gdkT`}4J2oc^N9V*19y!oT{T8*m(Z8~!nhKB*oGfCM1~ zgEu8w;pu9N)XAy4aJ;) zuN`CPQcEPOwH}HlyDjlgwTl9C4ro;hED`ga`IMviRQ+td-=JrH3wT+%f8x(KX{CqD zvotYX7WI(<{{cX6lHbb`x$xzra`a4!U21&Ej%mw{uj04XR1(IqVNVwf&3!3#;aSX_ z=qQ2AX`u`saZSyN0q020oa#Fuw20;}G?DiNhQJqbxT(lifLw)7DKh8D8sBr(C@Ucu zuq;I}#J7!Y33sYDIg#o_Xm<_YuqTPN3abg{Znp=8h3d?Zu57wk5j3Gy0aXOhQi-;S zge47R18F`aJCA)b1|edg3DKg`f?SkXc7>2+jM7$6lTQ7avGPc+wp*!Q%93Gl!Cb$A zRQzDznjO_#j93G2af9Usm@+>Xp-Kmagjy7orzQbJqUjJ_gCu7rVBW-^O*uQ4i?e## zAv1iVBC19ur+mC9=-ns~(}~y!AGB<{Yn(~*YSYY27-d zW=D1>3#yn+$hflUTwuZx%n;cDJY+OgVwCv}e5eu!D2m*mfmQ^=I4d$#TyYNUk`{_- z4{KPJjh=Sbd6uyOP9rNk6`wNa+gr88qFoi5jh=o~%y7_gpq+{-$P*HzGssu8$V_&)DJ!BR6L_p#=(nZvY*kZL;`<~ZaT7&#=hRpn_`6vP7& zRRdi6)$j?QcEwRDB99hTPX(x!Ah88E3(g7aCo0^#Vk^{4h8zWt5bC`)^lH(zgFU`K zbhA@i#BJ)H0AnO(v?x{`B$mofnu^iA88a&WwAt2rxQ70IB2auYW>@1Js&JSHFw&D` zKu3DgC|X)d;kHnr(`gu1>mpc&qSbPn$93bBq~G~2*xf3ESoD2HuX;OFL2Z~0mNthD z?c%&QGm7mUMzqo~z-4HOKsuhi1^#DS6RYf%^Nvhn<94P$Y#e)(R&2J7*f{BoK+qh` zxE4p#+YS7Ini~2KU+_eIfMT5B-dkA`}f+}o{Ms~MVjduW`Y1ibP zfEi1Evp(%Lv6;s{;x&g6) zk|9Sntj*Di(DtnECkBG6ldB^VQ9u_6lw6nruy2|&aZ^Es_T*_#ny%VYhqjy|*H|oa zODbG9WhS?nwQbenL%$5BFp*l@3#G>i4Z8L5#aI47B$A?*2$m4SiX*YG6_12qtfXd4 z7tXp_ENRuoG;kIm_miN^3luR4l`iHIcGR88of7eT8`wab!RQ2XuI$euFVQCAp`CJ| z((|x@W`HjDZ6<%GHSEKn*veuGkvD|x+mUPdYF1gUD0en)do|~I1Hxfp%x}FZjDq;Pp`pV z`xp>6tJ8DVx#%eHgo(_iXM`U#uE1IP>(K201=W5t&Ah$xVz|03oerBaHl=B&VRTx0 z0k0zaFwvxO8))kx{5sCIRZUjKfXedt_7G^Rl*u9S93j|vs`MQjPYVCO9izYNGb;Dr zyC=cV(L)Bnx&UoZn9ofCYgpV~jD0bjH5u(v~yTZ1?ni{dUxZPV2SZVYqxwmzDOgwqLWnaf)dY-DVZ5 z_`CQ_{N)!d;7a3l$pxy0Q)KvpTr{VsI@mS=r&Z3#Q5Cn)`fI;djbAMfpfyOQ{>Hd* z!6hn3Hgt_RPqJwvy(5{sEPJ?e{bwd`gS{jd#6G}zVP&kXFKs~Y{-+_fcg+D3j0;+@ zUfWab(2JtS_3b?IYRuebuV3xYYlZ+p!Ky*q4q#x4`i|oM8xo$PlY{?yaa&y)Zji>q zh7CdW7M6D|j`!8ZpWK%#t_`x}?2zKeX2{v~)bBURN5D9y)Pt<(>5EmTwMt!cqY=;_q zwJR7hvgi8N%12P-YPUuVrWqWt(mQa;Kofj>GyKtx*$t;_3^(-#GRd|-g~KmrF6&qN z8hx8$z_IcwGAMx%K=}*6!QSPF#9hc0;)> z-T!LT)_7YwZ;D5rGp}oBh`W<_SI=HsZwkww=jT4Ao}2%b!`6oX-te>VxwQ0G7rZPi zd0Sfk-D+ul&Srg{n_ug1O?R#Q$~}qyfd2Qi zET?Twt?-YS1L~){-faTDL!$hti`FB$!5C%U;Xwl4exM7+f{35J-40IHB}`ZfEocoO;?6 z&@PEiL_H6uemzTXVn|Fqz5xbhS+FQU))5AZ4)K;@WEp^-Fxi8WgGxD+m7VI09D@M! zh9ooIX-G07(O04$-G#$&1hCbg%4quY4qqLl|lUm;%~hMr!0e*y{UM%5lF zP%C)w8buA|+6SnHl$SCCS!(dB_gYsX+`+WGuR~=|DaUBh$zWBMP-Ci#J$#&GNoo}p zXIulN(5(r0F@PC%#NLG;9d?vqQjJ{k5KQQs&Ez<8F zGI#>@!;j5>ii04P>sP6lt@Umlt5`4tz^k6bU4X0Q^1EMvQfsE z6uj)9ca7f(bJ;-1uEc8P#Hoa&jH?m`T?J$ra)tf7tes0)FC{fose_YwNrfRQe8`ot zMXMGDok6#ehkRUF8&SH;Qq^qQV7ckI(8R0@UuW!QoS{|3*`ckdq`qYQqX8ialOx#f zfzHl#|9G4HfGz^ra9VhevUTeknkH2GDM7v^FHb%KWs7{> zQGA>;V=K#+7AZv6SvlNHZ$1lM#)jZ}q*v}|u6&G?#1zZj{}L|*BW1!|A96q*G@}?$ z-6XdUOoxFx3{OVYa;O4g1s~;*)->MY-#`NGY%|pIn~yGSt@r7fdPIq7`;mAX`{ z9qhipzk4=_Qu^e3z=^xN?_hYrGj2a${e;U2^PwDNc)Uh2xDEMEq@o zvEz1omW`QvQIjHWOO)qbhq!1Hywl(;)TQ^6^O9}4cD4rzDE5)J7>V9uu*rA2j4dK! z1CrHh4{8_><(JF3aO3($eX5`Sii>yiht6cFO2DH~Q>EoJLeshh?(z?}vtV(_x%{u5>MccwHw zlJb)!U2_!Mpr5SI3$pS`D|>rk0$>z_)pW*syij`{r%%^QQ@qR`v2i;i;*UG=+q0XW z$)&bR=&~%j5#8~#^E=p%g93dO}jFd`#^*iV{CN-spFyuxNWOd(4wd15H$EMMrlck zP$uo@YN^5O+4pDeoK$0I_v$U-Ifx-dbvHLN&rQ!xJ)q`{PN;!Qr3T$c4a;vi(tM%j zEJ>hv~B_2CV6my~%B=qDn;N!cEhQjNN6U6PWbAyCKXt`Cb`~!Dd zAEm6>Kr7M^q~dIXAfX9L0QSH0EFy>)h+mV$Xc#|hfLxZcH_j};xSME-FpnZ+75)I( zLo?+vkb;)+r!Gyp$!5=<88z0@phufxFCBA70P|2VIq}D^8Gx4Ejj|15(Yk z4J4(ApEIK{9cM#;Hjo~4lp7(r6$62CbP^neZZ!-dPWL28g)pnE3P_a<=Sqh}phFX^ z<_{XI044ukFgPZPj}N|wPhT^BTkr4s3qKazjHshGcyV&~ZNSrkbGYR`kM9?R4?g@# z=MVzbegGdy@UA!>U~ud_Wb?;UG(gk_Bnk30SaiQwZ-L2F8gA+>58C!6B-)i=A_Hk8 zn>d|813Ze7x2ZVAy#e7KYniQZq{E#CMTbmeMhJb>5~;{rNBqst!Jr(OEk8VmpF%!y zu8{zso}iNYcb~$KTpKDPUG^e z9bl(*wQ5n|W8I)+R?iTH5euk?0oOj1)eg0!vw8$4mH@r)n;mY8T&OlZ=LO7#5R1h0 zuyY4F*S$791~kgK7gid%`VVw;bM@o}gSYPu)Te?IZNJ64C6)By=85oy5`RBA6bQL9 zJvR*uqU2ys{Q(hR{zMB-K0)xT0_(fT_}Sp3yYW4_mi0KZFsoTpcIFy9B^jDds?p7P z&vfA|Vppr0YIj0*Qt;!D36_Q<^_qO1SM2%9)bOcQ(%}#v_%L_k?tbXy%6eHhCL9L z`?xr$lh8oGQL#BbVgJx*r9?WZoKVXN!Ho36|BP6aLUHBNv_$oacQ^ZaKWN#__UA&E zD>64ddzl_Qa00LCdnuf*)<%5v5MjaBN+(Cw=)saXNduu5VA#@@H#L!3=2vN7fkpX7=M z+R#u@sK^iB5W2i7O;}@I$S^DenHg$}pGamIylnAvEMc6g(DxC)f0)@Vdv0G^{*rk!N@`QA=gINpav<09@ZW7!2U ztbjJ6L5a=B=1P8OmBZ6fQrHDi@zYkSv;@^JBSbdc93r(wtMs@TQw>-**q}B7f`m?7 zMvL{*981CHvXpXAi$OxW8wvWP;`VJOFfxMXOYXTIbdgHSksB05Hy$t6<)vD(TzS)L z1sM$Ocy-aL7cNsyCt=>)%mSC0QBG`u3>dYI&LZ1uQd!^E0No3sJr^q>2n9{1enO|0 zkL=9vSjC<1u5PVR&TR9X6pQnfb(PWpHQ z+~-&oITbpqxK&!P$!&Gyo%rR(!m{DW4+?c=1+-&=6n!3NMZRlJXT%8^oL`%IDU!KM zzn?%I=aNf>@_Ae|dC%xsS?0khyy2v{^#(Al4`kUs z`G@RlENeOg?h^Dci#b!NYG4t83$5p4Ix?JRH8L5t(?KIAjHU=3i~`M!zn5f}W6Ujf zf}1xRGe_$pQ43zHK`Y?jW)i-SH)yhO|m7U$N6{;%q)Jf_LSz`>867@*o>&qR( zW24Z{V(!2{D*XP62idc6ZW7o2(x12mP$lq+ zDMA_JQjJSaxaGkcEfHo*k$t02h%>Y+>CU*-VwOer&wK`{(e>yUtw9i+9L4bHO}4}Y zFm99j5Gj3kC(tDMz{3!glDzQEQChCi>#o&r!Co}S9zvpal_rG321uJkpln`9cIq;` zcKJ2k{(o?$3ac|^6TzUv#^6R;$bBt~wXrVQRYEm|ri7FC7UykVJXC8WWSEPs*MpOc zPMWe>oDG`~BlLP<&aY9vU|Hxt|SL4g&U5|nk zXE##R(T_P@7uNoQ>TIfAiLSkw12>b{dmjBFudISi!ZR zkNJEIq_tmvn2nb(IopPPUc=~e*lzXBWYpW%Nybd2qTe_&7xCj<%#oXP zBjAM98^#2fcKn^-d+U!v0tit)k8B4k)>6O6YRyoa6y#Dd#$$J84y~x}Brm`T9TD z|DNp&7WmRCe}%cTsQ(-D+S1J%j%?jN^V4u~bq$BQFZ=dIJ-7imy=R z5>4$CivvZZo2R)XA0)r-xn2K$bBRf`TpM@qb$N4wMiHl7^E}KCP~E>XbxPH(gauc`hMg^{^M0@T_x%FoX_tA zk$YU2KG1O7I|_tuDpta>w1Yw`Eh?QP?-`S^gsMta(Xw?SnNpin&22_J$Q6S99%x>M zAac+-d;7K5ur`MDyZ6vpoI;*Uz z4|}my|4FDD0sAU72BdYaC)G*RP(|_W!hE49)~1_LQ$f;YR>_v4#X{z+Sb=AE zgYVNRE>0YWm9dk%Fm`6{itOL}d3_GAbZdL@clPB?+m+kfpa0FGt2eVRYhS9y-0dr$ zxI+Kalb@!3|X;ZISy63c9yvnzo?(Xc94=uNEDszPx~trn{xseKBru~}zt zqEeX|tSQt3qwAzfK^(2)VWhgDrPfW@u+6||ay31-8ZH;zy?hUi9dFW^6i3FMWolF; zvFdVYnX~a?>I#L>eeODgf&?#f?Xj6$mW*7|5indgPK{C4rNd@3(1XbFwdtIRXFr`G zLl)y3>t&=8_b|mh3bEjE?N8}Q^!ECQmE3G@$AtZ}0?p?ZV@Rv8vx=`wJHd35goW4XU7jKzvWmxBr zpWA6$iLw+2iEVTy+Q?h9QKdZ#MLCgN#-6{hu3sEi%FW5qDKawDvwZ|q3ItqaBAO*T zhT_-4zvh*neRQ0A$WW=?bcCkq=i#!w7hngwQ$!_F@896Ky#P9!&N2er7y#`zN!FW} z;vG*?Ajx#1B5I$AfFcB4+@P_354Bs_t+gqj&%rJ4vZ$=ZiH1z0E4x^fjK-Pn^qq=j zf(^w0F|3x4>(%%d0R7pNR8}c<91E)%@|s$~1g*eegHJECSj$S)O9~Hc+n-^2B#mzY z%(@&riM4M093??ph_GrsJg?mzvAN@}oj%;n?ghrN=Ic7^;Sc zSFPyJRi~1BQIo2t9Q4IU`1GcgqgG#mwWfflIy0Mb(||OboF>thCifYE#7@nz+>quU zQaj}LoIdPu@;>Ov!;aCLF7sPl436(0CcGt31DTqSylKzLmFJEV)M3`|M?hap8W=zf zz~$cDKSIu1?y4h@BbH*m^l)5o9YqQwc0mb(U7x$FOm240bRku`qDIgG z-+;NC(wnbvBRhg*+~w5eubxMZYj;F)TX6G!H$m-jZp8#%3*m8~5B(*Cv2nAzw|ii` zdyp9JKE2Pl+2;uxN8d{WuM8~=<;>w#aNcRqCLJ%CTxpuZjjglIH@ns5S|{P8U-q)~ zhg_%OzL(N|D3(wTZY$Z-xx_~q+G}1cMfjh|=SBek`st_>|NR>~UQ@9-KaBrb9~GPoK?B1`^5mgL_(fbF!68nI#?-_S~NC;xqVeK;EQz7A(B@qwzasJavVq`FPfX(MlaI6MtGy;~T1U0xAX5#N zgR>=n`mqsK=aBai0(f;KtUmD22^iytFr7-Wh<8%bLua?o)W?*glMVwo29?er>3*h& z-YpmYD&ZU!azo{O&(22<2sjW56R$b2s4K%k3onYcG!pkUj@)TD(@w`9Ci2oS{`h@9 zZ5qk-d4+tc7{h0}`Sl3NZ19$7ncTPLqmR}>CS8p)U_#4~=)1Csdx`)KW(<&hrsC*# zc#`h7(fo20$!sQ}y>pBiz_dTgzJh9=IZ{{0&Fy?w*bmy$e%%E8NT+mgF)Xkm=KDwY zVZ&OPpv*Swy^fZ*+Q!qT`GX&Si*~0KG&I^oP zudnS`Q}uY8KeU^TA5aeNTb_6>{t19n@LvH-@Eu^6MZ&c3UBu6e`2L?Sh_3ClVu0^% z$M=Fa{r~f$?<1Fp{rW5DFaNUC9RCIWW^MDU<^R?Nnw$P#HUCOo+a60C#qUHtCn>xE zRc})gTU4bj#I~rY;}u8}YaI<{3V`}1Hgg?dFsYw3`(K4r=n+|d0tEljv`iU2@&v}84-`qk^T9j zBGuIxVz1Ln#VFA7r|R+>Fj|_URH23v3bs^h2&Dev8a&BFC^@pQpNJj~Ss*=;{K6Ky z@&y??5XK`XJ0g5+Kuds6(4vQSTu5WU8#61J<>mG4iL<&!gIx_-w;)-CyZq)@tz3I6 z@POKpFp5O2uqbqJSEl0?85vTy;+pBy7mqau=>%#6F)RQmztTo)cT=R%U27$4gd zei_-w8dk19J+uH_A|lIX=^NWrxUAh*cDQVOt=|55zNdf7kB>~77p>d7T-+qb%yX07 z>U~!R&v03}^Ur*{ot4NTtMkq&*p)J=IZBviHn8Q4P0ni$?Gsff)5`*BRvykF zGh%@%(ZyJIY>4b9&%2~#dhDCliSc$sshVojr4TGE+F5-9L?&#A#`S9VuAFTe>qjd* zv&Kx=Q^$uOS}*r&^-euI_Pg%Xs!$PgmVMhYoR>qIn9y1tTAZ_Neq#Sg_AA;q3=VU| z;b?Kr5cO+CyCrsBtZ*BiJOKr%rkP?_&8AdoI$q7vrk0RTZKOR%KQR+i&L!t z^36ZZ4fdB7!xSpBUslGuvTeUrF+XL*SfbSvEIF>P$V7rv&Fq+p> zOy!m2U^dK%f_AjcXb9>yPNRiVJn}LH*)eL9J1~tZh4`{fg2tJV2afqlD{XIWB_ShQ zn2>q&$ZWW&k#^7Ry$L)&$OP70o@i()Ps42Tu!Zs;(Vo(8uEel(IS}$ik6BS`P{42L zJ{z#$S%U^{6tL?=Q-D@6DK$5OrzA~pPKXBWu5SHG0Mv_5oq zLxY#q532gNc03nbqdkf=ynlS>&00f;*8x6x)r7R~AAFq~wRZH+UHT1$Dk}|d;N`$y zQccLCb2xUQce`kAh-V$cyC~;I5oiPf^z~1U?r!zv zXNZI{UwQ{?i{^k4??k=Q4!OToGQg8pk~eo5(ojU>_D)|%TjX5g-|L>$!KO>}#vR(o zK8$Q{cQU_vI=%-+E3eq{r;n_ri+(GYellBW@A+?$!ISAarXJ0yy8-zB&c-{nfnT?` z&8=LzQ2hY?&##GxADmJ&$ZskK`oC*K`;9rfSeUvy>l@oUIT_oVn*AP>y{d-)>D&1F zPv1u0cpg#tNEi?v(tsoKv_>^XV^@(zct)BWo1&D81XUd3VgHT9WL#>8Wgs9jj0O=t z{w^=xO`HKub?p9BYM@Cd!#0UhDfO#?xRf5 z8Cgibf+d}_2X?N20pPY-)#gVo23u2DkZnN-b9TJ)FLKp^zs>W9sM*zf$?}OOmNG`sxl* zd;|H*?Y|HbXo_y``IgrBAx+fAW@yftMbH9vJ_fmjQqFlwIi^l!(ITeZ!=-XRL#Yn7 z^2*5GGZ`WF?1f>n-xF0-2!BYL78q%gA>7EC*bp^F0WUi2Or+Al$+Arm5TaFbeRKd# z6sw4P>Xx}_lNG5jEQloK#{I4i=xN8YgA!kUrClAr{gKr*oi->p?w5SlLKdl1G1`$C zJfp7W!aQ~`sGL#d8Xll4SvN!$g^!!dS~VOaGi=|)^{xnuhlSQaL!m=xut7l`TM)r* zHz`Tmpl_SA3%*k$@`%LqahyACxr6j~a{~*jB*lul;9-)JzJ19?ICU55_xGZuF_kUT zo=?6J#HL)SzwyN6=xR_z5Yj|vzdlsR@3U?GT3aIbA-*DczuNBB_trM<1wO`xv{Do4 zq6>;aFSqwnq$6Pt1XmAdAh(q{msOzgMVbdXHOINq3@uY51)+R|^n^hZTzGGBuLlQ) zm?ieDmMJLQ4R41n*Z_QYu-HMw$hu+8d>v{Y7&F)1JvKPCbtIq1+vJWVL}gecP*>ql zQ!){D094E{aar?BoSO8q(;t;7VLhThdi)fZNn1I}xG@>UNncsT)WvH@6IG*M{crK;l><-NMc|AHPrQ1wY8-ed?%X)%~aOD?a;X&5xhY>zGwlC&UCx2r2xJ!3BxV zr~q#O(jc1p@Qs2^j#jZ%h?LdvRY5O~TfraFhut_vb0O{&a84oqHefLZB&{K>D*KH# z{HZG%g6b$O3_qGRAkfTHjIRXegn5^GxU_UKXCVLk!~TT2hi)UZp-_7|nB#{4qS&Ge z_I&omKFMz0C%46`gMSCxQ1)wUc*&`!;f8UMTW6d>&Gym@xnE$U;TmQFS+t9}5KQ61z|8-gG^c%~4B0`EWyG6YN z=%Hp^b5<;f!5fwI|6%>7UHjK%6<1Q!(Z6oroxX&2r`%A(`NR8Y(b6VCcjB9$xvhBw z$vu02L8M1R&87+UeC(Bk`Y-7##C?H7w?XPQb=P~r8|XzSF^$ORf&~e*+Tc}jR`|6z z&*RcuF~u>hss7WKZ2-{sse=ELTcKli#b%^lGfUM*7m5He-3E_5C=$MKoEoj1W?Ib? z>462s23cL&s5l}3;}TtzC|ear%o!tpczFwN%tI9DV6Lg^e%O4DEjxBjQiGowZ4QaE z!RLAL>~7-q(*z!*Ons*`l0sT7!oT(aNF`U7IWL%pFh4K_a<`$zz(*0nL(P=prYwTG zA$e@lM$guRFhB|=T@`U|CDoXQ7WdQ!9>Do7>l!dNdT|2Y{D$mu z|MY6J`0nuWFekUybj;Osv&# z%{`bJ;HIW>OFOX0^RQ`}MgDtH`N0`qBoYCis&CkE$79xGvMY&13%_h_1C-NLWFD^# z*w zlz!o9GJyxEI}lJm+A32N&-?5CsnNN5^+MEWwrnqtL9AMiadz-07*CAyPsPG)@rCCD zmur1ziSIZ;%weu-3sm`WTiYioPyfA_aNCPeufIg`$Jck`p*|BaQZn720RUwN%P3(r zRO`X}8j#xE;=*Gd%3N#~0#xX07zXxFyha`>v{Dm06gP-$@rn=Xbn!2mffB;hNM|%)bNoLB4J+?I3Nk_@DTx zF|ljzexig-9iNlNA9%sL;OfHWHY3rY6*-OI3{j)k>|@6WP`Cc6L%S$DmWU<^6ES^P33sOoNT11qMYv=rMpb&EK{5fDr$9->v4FogSo7nrN11-?Nz(KcK}*D zB_i<&xD@UU-IMz6p{5b*~v&Zu7us$WzIy^j^&z`rsSps#B zYbp3n_1LS8&poCU&!Cd9++ET+x=agEZ320j9*rSEax|kT1l~ z>sO}GlG|ePk#*ZaO5wl*t?3bEp^1t0yr&2m1B3Z4{OeJ}3s+}khjBbM~@q^em zG6q#f<8Fi_^=RSY*XvPHoY>6<#0x$ydiMUs`Y?Dt+!=o%`q1$A2W=)TfX>ti7olYY z0?JaITvPXGL-0~EJ_UoCE0h>Pxk15FnJ0P_Cj_om5{yP4ZkVHcJq1#u79fF`C~(_) zJAo1}GC_Mvxu6q;GDz(2g3=+z-2c2PstO^O(`h0eHvp>PjMvhQsbGbZg4S>tOaOw$ zM}$T219++?l1AzjoI(}1HYEUnEDO_oW$RR{8+70{KXZ4~{1v&Oa~Fau<>!WSyaP)j zi=+!c8W2i=gTO^I^1~RQza^nLpER$g(E@(NS&-0Sj9{{;MO>}{0Ty$lSjAO=1_VrG zkmy>EnNwlCZY0tEgIeo@#0R|8a9@%&xr>fjqs@Rd(0Jq7WJigYt7Ka)v|xTRWs@5~ zP9UXJm|o=UAm0XYzGvj68@faUh&yP$C+bn0 zLPl39@f-hwBKkzb)4A^mWlC!TS{bIQOa}7iLs{~a7Y5=5NFJcEKM<1A$0^N7Xl5 z4?CP$6&$gC^NXgS!f<0K!09w)v#kh2n6%+qxpt|?VR_t^2&VyXO$wlKC^Ul^P{R}l zdQt80k`$bQV^~aphmO%HX>gh#0nM66D%aKNR;^j=ypm>u)&_VjpVU+&n|!n!X=2c^ zmN5Y=OmgW3NgLQis!bEOZ~U*z^O{x zPh_o>m0;(f)>uUZi!$O7=%g=4){%aWiORSoxVSiKKR-B4zrJAA%NZP_&M1r?ge;Z# z*HCHL3cfR#V)Ej|C14fF_ZK_jv%cc5>(XHH89rX%|IcqH-GJI(sNJ~Sx zf7Glen;E~6T2V{ATy2i3%mi2B~EqTJJSY+V>$zZLi zyt&K)Bd;t~!+>W&?uNB1`a%{5622o+w*Dg`9K^q>YWWsW1QNi0hFF-LlHdwHy*S(7Atgv8@43D6WbucSR1bFB%8ghWIWS5E*ge*# z$7ayLd*a9eJ3BRmu6+#yeTI~t#6;sLZI!}0g$@eKbte_9!LUYeoU?}fy=|#x&k_<} ztwq@WeAl|ltMR)uOA~!+F9nmPOK^0s&z+$q-;*oX)>KVFsn>)FdU=s*(sx=M;@Y(@ z^ocA)G_blH=7F(Q)FEjmgxbsoxJV~w$LN?eLq0{dJ8I!?TalVB;54DGm=Ze*Rn4K- zz(|7-$v$s*s8Jf#626UTzM(YM6XFoARHAWQ;Pd1BADn>*3IQ!pXmBnOX;>Us1Dg_= ziX0Z`XpV(}QUNhtd?DtRY_bYVO=zF+G49Bz0xq^K5y4uPq2F9L14)A%nZ!`sHW+Lu zGtS^l0q!Sw1P^&YYQQ-W5Q$g8pe3!GEXyD?l%xRGb^_Ds^Af0IL8uY6h&7R~^Ay4laFjs9u@P`>N3e`vsPBi|1Y}l%J2^9u zQhb|F9ZX)5pX&%0$8*@L%%~^=jxk>ihn1y*x?QKnN*$pxX3`j3*LO_waS6rab<%gE zC5$+ZR1l|c7jDyv5~8RzhxAZiB0OuW$u-Sb+D zGO}QE+rnXU+}GR0AU)Y9vpJE@R>A&foxtW*sLH{fcnJ%k6xz-juIZ?|GQo@DF|>h0 zG5vSn(rmmF7IOi{jZm{^mLO`@(3sCzX;{(?^p$qYeVI${rN%MK+#GK)#;G2N)~oNv zT%lyU04)v2Fbq?DK6CJ4?$*u$R!TB^k(dp^f_Yh5Ha`6!QmV>=;zsAX3tee!Z5Whg zG4NQ}r3FT>%jOee_qB&^7wp(;HDY$S8EXRM%^NZfF{Q7PwvvNiJZV|CQyAL34HR3Cm^Lq?y* zMkdZ)_+{ku{0a|PoY>sVE%|yUVh^CLA+AoQnmp8 zm&0kdgHyht?Z!<0$2A#ut1@m@;ma;%=6Elw+ACJGKO(9L= z%f|lL--z#l)oEsX*ZM8wliIdJwahXicZ+?V^;ym@FR=D!m`GFdUs!&t!Zmx?h#+*SiF zu@df27W)VIegCK(uAsdDXD0!_e0=H^OTQ_U#fBXXTxod4!kMiOU20|MLN{9*en)C$ zu&NZ9;yph3TcV%;-p}AH!la<*r7K7tSr-4>-H}A~d?)-}$}MIE01*5y+@1e@s_GlN zI=THHELiL6SCqs0$?Ff2=F8cpjM^r`P6!4jv26q0Bo$7oTf+>im1rSWNn=rzI*0w( zy?IGU5s`AjX+LvluMSj0e7pVk@GWE&-z&GEibi+sQLXXp7@Ws4?nKJp@y;Q zD1}l$4}eCo1IC#?otU<4E7voPMzc@KlJQxt3CEepea%$Lx@Q~^w;vTJ%5_aPQOu(! zma)0Hcu<)hf@qfi&`yr+w3P4xgj+R(o9v0n4HzhCI5ph9 z#l!x~;;v_!Od(T31M)3r_D)!1dTp5>Oa8m4+nhJIi4cM%LENP6p~AH(bl#@%Q1G9( z$H3$ZfWAX-ohDvGVzoGtO z9~3LQ?TUQ>@`ZS(LE5$CwS8aCIBH?#CO7{2k1$gp>$ygTxwV=^({Y2LL`vp z9}tKCg0^&fL-|nr6R#|Pp!PuLE1^*e_--T}#DyGX#-jhqsyWCO#Wu%U8eyPGihTct zY}ce@CAUI)wb^U^c#v?Z#UtXe5=8!5nDeENQoDqs>w4Tr4R!~;+=+&w(|HD-L+GQd zImtXZ@uD1wR>)}#J@P~P(Ds(m#@L)0gC~bNPPyW`p;O-Y@>6I=rzyiZx>f&%a0acJ%bwf1u z+Ro;%;c-}V*ryN@-2b}c5+vp7{z{D&^BNHaxB z-%^Cb^!c}dzwK;i*66v4gh^gfHpS@3F6CxV6_iDqSp%ky`5H3n^)qjH-3S=EGU!&} zka$_bJvEcG&||EbWP0d{3J-HGnubgv91PbM zfOfrdDL|6HB!OuJzriP!piC5N0H=Kt*|9Nal3`?q1TeE^27X&0o&-GY2a>B+gj|Po9~op+Ooi8o z{{Zp*@RiFj{h_p>Zw_e*on{Uy3Ysm3l!svyy+YUoU~roDJFN7~Q*x^*;@&}jQqhTK zS2anZ8H9#DRGYAT_eobtq~b#Xcd(ftbUo$|Z?>dMl|V(>N{!}(4if5ZjueHea54yT zAXJHrBAxf}30?#tKUI4Jgl7k|Vt6}yDJVo$V*~|U&_Ll-A2nwyXY=iZ7B&)HS_xI* z8p+FQ&CnuZgD}DsVkX2@MbkbgtYRiK8jVY4j&KY!OVGTgmL{qb5neUL1r&;HkNjQi z=%fb)xHWd+qt0X!N2_r%sg;w1bFQF7w*cdD-w+q}Q65s>P*tcoAWMUPX>&)BA5YHO zX^Mvs%0|D!DSxrVsH-;ymt5jcYeY5QYsi{z?M36+>4?0kogn8x+mo+{ztbjJk zP0@vN;@0GFCdkbTJVQ%3agmgBUO8j-6iZc;*{+4e<8d1E70K&LywGWOog3Aw8hQS1 zax3i`!y42!O-xAyQ56*7Jt4{jR7TOhHphg*tVF5JUtn7l)&9~O;Z?j!kBkic1=}W_wOnwrAXJYOSzGU ziRN?#3r=>H;zLByzOxFp>Q#WN5B9{)6@V=so<4G{uWhZ8JH6rC(lXpOU#QY@W@TWYbB{kh*1mV7zd64(5&9);3kDi4lLB%W_+`pYP(X0MeU zO_Aj8AkUWUW7yF@CDxls_XmP1oOIwXNmS<+v~2FmNGt5gx=}u>wnj-#iyMPXr_esU zRBZ!AxVh6OU49^O1w__;0XTApEYpCI^5Zkg+cR`qv6<0;NU=XtLg#u6VBTpe{$!sbw zmT0tJVPa(<7C3wRs^F@i`|@Y=Xb2^&-3Tm6fQF7-lkZHNHPuJr!roPKNVFmG<>^pM zRi3fT)^1xiX3{%44;GW3t(?~;btzekkXp_xHo$AVa$jkVwaIZ23*L7{C0kXxu02l+ zDPt&dpi0S+ilb7-ZWhy6CyHQyrX;HsK9?9?VG8XJ9Pt5dvGI?-HeUY%Ev&Oe@bO0N zcnRs+D_Jf?nf%!QeVjrp^b}b$osl1>ce+{!ht$xXxw~`pH8>bPugPoz@;Hk*u*u^F-h5Vu@l4XRYNN?* zLCN-#ZQu8XrsNC{9t_9}e$SaQ7e;}~NsA-gTNY_NA36-NPRo;8hmB_ZI*eQc#^dWv z;ePYFE0J7x(KNf;0dgbMj2cA8f*&{_#_Ul?j7elp6 zFvx{8A+xWqqvw5fLjL53FT3yK-{)A1Nja- z@`9gQuQy}5#aL(nSK+{Q4zz{9oo_ zlm%WmXqysU^S`DDE250VsMmVRu)Oh%d1u?c?ge7VJ?he^g4 zM@~g_5jUNDp7kRTRHIt{9PBoS6nqI3+90x}z2d8BzQn2|V}0pjhmQhYCv5UYcz)*a zp!q?=&hGJjSzQv8)z*|wi@C$rq>>9>vNVNTMSk}E(6DZPgh@cSN|YjpnXFLC6diV={FAq zQ5}nqAt#6*8zV3sN2f`PkuMG4L@n-t#-wOS9$xS$LeqiT+Xt^q8stz;Ym9Lza83o7 z9wBa9?Kb}E=5_nqeX+|qaNzv&U7jufa_M~Y+~)4P+uvo(-hO$1{Ou1t? zp}*e$zIua8HlWfF`XK+?&J z=!92tI*sz@Ay67)8h8XXwua(}jf^Re`6*-1XpK7!>R9QhAsb}%b{>$^Zw`cG#qdQF zY3M*1ggI7BF3({XKJ>LpClupQa=>Gfj^!k4!UwY(9MEXJVqpd}V^w-v63x@geHEIs z0(c_{if(NJTA>%wXU~x1p6QJMX3h+_kr@jdF+HA`KIha@9DcPP*EDhtb-o2FHjJLk zhQyd2f9ca8H$U&>-bycOs#4)uBV|UMq!z%)xK|LNlJKJGf@5+-V&4Vjz7b*-v32d6 z+xARwcE<*|qn0B~4R$W_Y-eYOJSn3G&ar?A^4o8ZX#k=^1grpG7?nVVqXRV$0)S^g zRU!^?kj`AkNgfiC0fMJp5c~0vWm#L5<{l9*Fnq9}g?^@oagN^W6RpZ1pGKT(!JP=e(MZ85uM5=KXM1L!CPW1QkQn9s>j)a_ zbqd|6oKcB}R+W0&IOINbsA9nq)dq|tfg8ZJrF&{88WP-)K`?`sMi8wTKS{!h(86Mk%2n;9CvRKLML8#cly zOOhGb4Y?8GkKDq+)aQP&r8EpHB)XifFS+FrIZF-dStpt}B?dbRmZ#*R+2?dTX-*PF zMlFb%uw?_5Oq+6p#W|({y4H%b=L|x9>sS^s0TvA?h0YmZ<*}f!kC1RnNGe;~7?ZK0 zAdD)4`ZxnHQWLarzbMuXa?BQ2MS%^Ie4sIhCif3B$8Ri!<$$W-^6%OEo09Z^v~WQQ z#|Bu9nYan7SW`Xv zN;Jv=u~ILzNTCap5z=>4=3IhYxH*@f-<-7NT9}3Cz^?CY4eExW>Hc#fxIkeBnr~0t z)#4LSA>#~$Cdg#5y>Al1YRt*{>N;wBb{P@aj#H+W)D*G}nj!5Vutoi?8lsa&LVS}| z0IyQuEWNChbFFglNZp@bswywtLzn2*mLIBaThGfrvomUFEOyV!8pux_9OGGT7s)75 zi)*PLJ6{#~J`|C&uJ1!KjLdSs*%8Fg+Z+I1(qs;yJlADa$Ra>|t_p|at~vxGS-!0* z9PyWKdD84!bJ)ZMz*DDMwUM^ALMrS1$RaX3D#yJcN?S{7G^5!Ynkc#NKcLgDbyLLF z>)3a3fx z(gzmyd;ctD3pS@=g$e5(LYHk9(G~=z;qbErQ13292f6lYflK=r#o($Y(8FU@yj0-C zJ>p;sq7md`1zL%}P4nm=TLSRF@9Q(&+BlN~Hl_ zQ0&Sy_pH6pHNEqLy-vaAS8r<_RPzlUOzD8u_x9+` zT^r79W>2L}@r~WLs~9`r9AN(&uT0A-(x#yYJxeoEw$46XN%M;-+5I|hgh#LryJJy! z{2*^L^T7$3hf+h%yIXJBcwg{U)}+D@~e=TMluVLWHKo@^K0X0}@ z5)ESNba8ArI-h>_^jB)lYc$Q98@IFILKAu<=v$a(7H9hXA9N~V?>h)kPth)somtgg z_SHLx=5GIvc-q7Ehf^&RBU`J%Nz#4*wBE%dOy{EuC~BUe=zm+ZmWls~3d3H(P3H1H zc9!{QBRoD!W2yFUd(24l>@L^gzhq{lvPDj=O-8D? zojhD?UOD;4h2~=1bUeJgze&Xh$;?__Ef00(;TkwD_cik*R0={hX zdBWP`|DOb9PeiE_?yq92NcP`tl{q_@8e5uK8e6(}{)e5^)c);1m@z}1lmDAFb0mMm+P0OELg$OYbm zjK{L6O-SDM_WA(b2A%r9APV zn`KU=YZycb!CzJqD62`vY-68D_R#Kl2TG7KKWa*8$>wuyo3`Vr4Tg%e`KDe*UzanN zhc13@UI2m1N%5X!=UJ-z{Mju*Rjb^i(&NYv))JX_wA56i>8S%5G!i?usZwi{2Cj_> z+Io-*k_%dDXHQ~YqfHBf=I%h#BhG9*e8$bqgdhK2ZQRfTMn^6`%iZzk)<^Eb;l~~x z*3U4#UjlP`viq}g=XPf9p0jZ0{7w1ij_ZNQ45%6q*-b_7g~wB-DUh1hGZFFtqJ^MU z(uw5IEn#}^kHY9dwbAlLOSkl*ta4Wlr?K`)3zAANRGcdNE-6vr&{P}EoYWVLZt18d zc2Zfg&4UBq6z)kTLOK!)ODO(N6RqF8wKv!dlN%hGD67LsEhr4e>OZ68u`Fo}a~vRH zd{Pq);k9ZmIs*jhQz*LaKl_uIP#GU?t(3@gqWHzJGRz+TDWYY+2s>ni0h{VYby6h( z^!_-%ihQwTz#uUKBpY3++|pHy7=9`S1f>{SJVmc~5k1j8&b^!EUfm*tK`iaBNp|&F z_Gy#r`14wBz+eVWKl2`$#ft+c-@?P{9AdJOk3z}-zj&O0EjO)PVk3KmWD_=xTIY**)38rronJL=(QV>e(>zGgEZ}G*Cs8qs zD|iHVNhqd16IrIcskK_GkE|f5IL1%YIprE$$dWAvljshwOadN9M=1e-)95uLGSWx| z>%-E&ql<=Vq7Wic$8n%6Yh>R%h7os*S`Vhv>o@i1;P86xEr2(Xuo^2r3sb0m#?bu^ za4;CWdOuoT&ZPEeL7mO;4$-i_G;p_zr0n@;9fAeB^kvYZg2H`tE>cYzg#Kx@Hbz86QWO`I!A~ zKWA@7Zf;xSXGm0+T#}Y6l*)n#T_iawrxlt4pJhyLe>mygN0whCDev5-jy|;9a*Wlx zYE%{~@fIb;1JRGL$_T`$$q%XeKY30vRk$%j9Xqkw39M$FX9Q>sjIexx`@IUyJ&Y?x zX(~c?)gARLu@5Gh?DB&6-6c;uavM-q=T<1tRmWXdTz5yVkGt}RMRgejr{{$xLsP$C zJ&&9&dauc~B|-9nRxc`SE>wBb(XU^^2)jxH|J$;6SY<(LPCA`)y6V&6U|QPwY|eS$ zu+Y*86u&l#CbL0Q%|WPp_|}_QEk(VbzeQsiXyi|(t1gNrw{5+WdP>#f{L;l~4Xe3J z=^_WrU;L?MlOexki!!@hO80`V&7%FgT0E}&-R;(R3A?Y)_pj~TGS|Wd+oDs^nLiSy zSud#4L?TTyDpeH?`>b9fRW^FDYTG1;H!W})bwR;;O}gWBHV^XHpbC*hrYh}NU|zt5 z)wt1}V6~;=h)|mE;RSHpia+}N+1CeK)UVd4pEe?;TUSw)kW8pZy1ZF_E?@J4B*s8x z9pk2|CYd|LYx~>UEv!t14CNbh>$5b2F6?gGtS!wABKX%NMse5KxMMB;K_!}I-qA1w zkY@rGN|7TZL--!ZA64(`T@q{qC6QT@vfUNMJG0X!C*GeWjdowE*?>Sq#uTO_to>}h zjso3rG5WwfIYGuwZ(~&hd4WQJdSBDcvhu>mt^Dy;vqEldRBV_!0SFVJ+hBBi=|<37 zxiCCg;0zs=Nexe=w-Lh$`DB+N=67Vl_RCJl@>4LAgPIQbYD*h!tyVoM2+YBVhVbxp zo?F718^VFT6_n1c1nts=8j2+tKG$v`(>3+A0%8DcO4z10nuu6nm)ty~yh7)hL~mwJ zU}7BSDY#AJp(sGB*-EX2#3WsIEJYRaHYs+3*TFW5A`)+{`xir-i9OvoRM(I1c#1L% zJVOCq$9d~DgJL-E`vmoe8T1c515@EG_Q76(>5YSu^)fjHX zThD9WUM|dC%T$+us<;Z9EofAjW}S7@DBOb15dmH$0)Qh+m!?SMl-UW_4s4B9M6gjx zJqEx9$&kBF*@}?Gi`3a{hT(9izB6i90o|BO9;s5!q#Z((rYGQFy@^o(oy2e^4H_K zTX-=R9tFTAtvn<=D!1lZTG03eRRs!16;_eCihF?8VAwreZk4u+NML!OnBXJ+e= zW5rkqijgut$DNP9`Pc<)p5?Jxs#Pk_Zk+H4dF|L#BVo2BeV zh{rB9(CGRPg1a=9Dga-Bi~zgNLY16;+e|qhk$}3xe*fb)coY(>3tNjD8h@f~4GM`4 zF%MFwVhiMb_NrhUSH@pq%_A4>5rkz}ZbrkB)q5QHEVGkRE_RguER82K;g9<=5e1i< z=TQ!AoA#)~RD_Dg)?Qd%fQORQ!d5b^SMRZTF9EZ9bTn_*z8ZO5xd89tY;^$C*I^T{ zo_TYCX7*_N2#^O6%;9PS77V{VgrpTj2f;TfD_~V97s$s7QGIXay*wqsSj>ovrr;`s zqU5w(-)rCtbgbD9s$e9M=k56lEx2l_x(rPII*EedD>G$@wM5Ar=7{YVHYNNpsuj7( z?Q^%NF)L465%$+d=aXxi#gtO#k)^iKRLZvMJg2jGwa~-E4gyLsrZTsEvmync^BYa0 z_9Qvu#Ui+wSy-O7<4XgBVA zEiko|5qXnXG$Ei7a;43PEBb;BsXcegHBcK!vi1iS+*qI74lf*v^2y@#3|UX6jMd=_ znrq7GuH%hm5$fH-2cYeiryA*vc5%9~F3^_WO9!7~B|8ymjs$UulfHgK@m;W8XF)Gj zs>4w13ctc>8*28bx(6vKY=BGLqAmWV#X*!{Q5S|GqR{p?ix*B4y0ZU9bht~l+>Z?B z5fyy)TJ49I`H{d+4w<*@V(swNY?TG;lDhHvhPbEIT;R*QtgcCB<-5@_BYVsDox64i zj8Kc&yn6e4xihNS=(GJ03gKptTz0wJJSg+Yx~8n|kII*823ze)i?2<%uDZ;2`mvWB zb;%7;|K7%Wza1X@H~9a5a7-xds8m6@AN3&u08|P8mj{Q7;cu?wH?L*t{2!?n+xqw5 zxbOA{m3u>|D#hfsy$QxLA9dW~o?~?ybIWq!!9zqNj6{wyfm~?HzVPqg9tj2zov=#Y z;bxW;acEpaN9&G0LUvP1fmh|yU%Ip+T!V*K7iHCLxN#(rf8eYJ)fe-Yf6g0NQ#n*3 zsrfxQ8nFiArTIdC9o-j@wtX$?Fmu-XpcY3Qw1BmgOtCd@3! zGCK03n#?R`%SM`}mGb04wM(CZ(*8;Wz&t05DQ>Oc6mSlt-?BLW!>Erm_-wAYaC*F4 zw`z^zK*UB}{?sf3IBLF`llvWBEdLN-koC+q7m`wo%+GECQnSa$pbyNW><6V{?lTe{ zdF?^Et)olWQB7STC{LI$QmgkM1&~gf?2@w2-at+(g!6C%6wu)@4?d7~bK^9qkE5TX z6CSv`zC5o#v9Dk2K8TJ!xLaO7BjxTlAU`=+HCH!3G4^ zt4Z)9U=n4F0x@-?3=towQ2?1xl3CQ`0`0r^96kw}iRM;J+>lnJRgRoY5zqtDXmZJ1 z=_yNOEi3BPRHf1UQrI`q=&rExAiqe+P3H(VC+xPGgo!z{g`4yC#;Q1!OoDXO7iZPN zqgNg!^)9`A>RDP`L#5GHAzUN>?mro#2ZeB_DlU^_R)|YLJoSAS%<4>Lq0jaLGSayB zVw&N&g)}1AX*)plY;LiL6I3H;VK34{ksROY@pImXy}G(0H5w(094A{OvQG0%xXp^E zIkR-)2STn2xA`f>!?`N_3kMdwN`&01l$`}F2HOzX5x++(s9I!KmD3v)aKDG8s2?xH z22{gN$Q%#I(?q}iFCo^>;MKvRBB@|1G6&6o97*6Cdc6Q1337b(rrJ&d+>tZxDZ- znbNFvC0l$*0&_>47-2W27~?IdcS@5RM*&QjqOs016-^h1fCK`(V9|%~4~A9M;GKOn z6W)wg5Z>vN&H+!y&y+u}EOEkKd$YlZ4W*$W;-nGK3;xmKyu)sxwN%z3FRf&fe%)kJ z%#gW#A+e+}@TzK|*RryvkrWRiMcGx|4K48{BcOgkVFwzG5JZ%oBZkUX4 z9@4>5|7=UK_bu#BE)QseU&axY=qDmx5+Jt*J#EF<_xI!Xftuuo)P}JYIH5M;tS|q) z!#XLtJx>&A68}pkVTO};8))se*>P{g^iMCDO620H8?+2_>DjBtcHs|?;VlfdEl^i- zUdY_ByBzPTa2o><%kc6A@Vf&OoX8a6gvD<>F%Wdn(!&qCe1n}q)C9-B8Fp_#Ty`$C zd4xj-38)=6%<&r*IdVeCGYdD{llKznJQF(a?99l{e8eILs(pS5)T;#v7^s`*YoG{A zv`GU@@Br~o_0RgH9hSu`Lb-4D*+icH&8|Aw2dxyY6&IJ>8(SJ10ZYn-BpBQAb zkmdyMKjhg%w8*3_PLPc|CPIUPk!M2Dhqw&PoA5StQIO-e0nr3xiYAg0vrR!kfCzN{ zWzR|>t5(+fGw7g=k{M5wdqR*1DTI=~CST@Y6dofkx~9?xU_$5PcWhTAzV)t76^DE%*nv8@c-wMQBm+#$C^0dE1U z8GDNBRvBkK9!1o-oU;$hgde^PT8Ys85^+gf11~(Tvtyiya1P4T@p#x*K@iFXWj7$y zhqNk7hr+T!+u<+7;`QHU3=J%#vth(V4cVTWB-D^FIi? zzh2#jHnGC+qBb;T3Ec&2f8)Wep<#Pd0YrF}GDNeiim`f&H$>4A=Aa;=CaXG*+A(JN z3BqbJ$_oHo(DYb1Lj9E_997c5fh1f=^i++ss>kUbfjXcxdJho(AI9D>y0S)V7LIM( zwvCQ$+t{(4bnK*K+qRRAZQFLzF*`TUJ>z>d#`oUyd;eT(*IZSzW|b|PXA3T9_U_o) zfyQy^_##b?%%mC>{YBwUG2ZA>3oc$T(j)JUu&hP|JVS^CAC#oRxun`0Rz9)=*0ArQ zq9w?+gp^1MxOi9+EGS(kx{O%9539TgeUsGg8CZJA}A z@K`^ND>)UsvUq5|UBa|EP)IGIbS^EEz2wZ?Z~$QZ0K8W62YuEJ1|v1GUvC9SITaB# zTm&yjG*el6kj%Jz1 zz4R!BLzaz99q3M9GwKxtS3{5RiUl5bkH|<$?0Rng`isN^V|C#H8q}z+9PUii2!SWS zhs%&L)~5`UC$bRhy4o&&fwKbQj+k5d?RJ9P955_y_ZxgE z-CG!V4{BS_F4dOXK0+6I7{k#t7|hiPwnv@Ku5+G<^%D(ADI9P#R1>;M0#o#?oe*eW zcf>%ocxX56YUm61R2;uihh)?I373bWMhx-Ykt!o-6RNHHZq;3B9<6?m zW@urytgOt50)NCWm&(>uz0H$U)eoGZEt=^>XL0vj zY+|?eypJbQ!X$od0t@m|J8?0JPP01|lff|C3<2GDKPCL&AWt?UE~dt+sJ65Mx~axN zIUtCMtq{ND&DE8V@X8=xoQp|c?k-F%eOCQtx#IVFSG>(=^@Yj_td5Ofcwj9v=V zPK0+Sd@Q}fZ5}oSwT{K@`XiJEfi80*ec@X!5zJ_Pl_r9;c+4_ddVIcq6vx!S!N&_p z{(dI$t1Bu2V5v9H({^@#%a3&By*GAccL9&be#8%$51!Dnmc48Q{OJ1*#tFJd1zIZ+<7OlQ#yF^Q6gr?JjW z%PY%5VLml-bxIg}J<824RXQv}#r~t&G!4>4Zo6e+5wxpaF$g5Xel5oyfuino#OPd=?j}J})LYU4&UyjyPnhZQ2kRX)OzI`+yJGvKWGppVgyd@uZ8XZKXJk81gO?plI zo^YrLkFg)CJXeV|u zh@EQzrA~5NEF|ZewuG3A=f`o_jlxd0UQP>4T@&Cz2G}LP2r&z6XQOO7QEje|sb411 zaO=l%OyLT`vgo2kfixdz+~%%hebvn;L#1RFAsIlU#<)nq_K*LMd7f=tcD z;0$0bowM;c21Z%#;%X3&_tDT*zs5WYgcY4**y!)HL_S?*^fk_W`R4LBlHIEd6)t)= zSGkG}s{C$W@noj!-jML2(JQ0qRHp1G7Ot)L6fw2=q?E};Zi%cQesn%^+(gp0Qnf-t zoKBLs@eOhOkaD%eS2bjW73bl`RWs%Q=_uPq%)e_ii)X{uOk;rczcl^HbYW0D#}D1? z2@Dcn-`iOYwNwnF$5f7x)0{)}H68I-I)+S>%$95E_~qIFlW{2?2u2hRcUf?(q6;ZQVx4>m8Z)NaqDWi! z>$)g|w9YQio3dEiblF^ZxO3!tR z9P<3*Ng!7z(u|+H?Koj@YdmanQ1~*xNZ<;GEC;s%2hJQUoGt)`#~1E8bs$+V(Ry3N zf=A74=aKwz_#Yf?B{OU8v+!(cO?%!!Upw?SF0GfT#4YW=cedj9+eE)cH>db%R<7QL zV@vJtsOi`>9+&blr^?^`U-2pC~Fd#|CSDfZ9jU=1g`!t{}8Z!FIa?CVNzecDn^6=LbC45$0AnC z{6mW1hzN{ML@9wQhFk-7YL2aMe@v6shW?|IL6^Pt-K&MXEXt?R`*F}8nCYo#HkIX~ zY_9G?Y$d0jvy+v z#%CU?EAf|1eKkpb5{B1lUa1vP4u0ZuMKQ1Oj?t_fIIJ}~KX(v}uB2OxjmPm@FvrQg z`umC-s@M}O?!=%HM+h6T+4BOh-eENnNZF+ZnC;OTNB+X&J*P`{-e3e7h@-2uyB*Bl zAr#a5dXFgVpY=0{${RnyG_St>~ z&8ItYbc?b4f7JF;1zi@KJdf-Ozt-1Hb^?=NtyeB=c-(DcyrMQ+!$7Rv92R+2ejF_;T;Gi*t|uJ^D;}~&N}j(a!X520%t=&t%7*8J+@}Cz6qQUUJEMT z-#S-J!|DmB`5ufMeue*CT;a~G=#K;7TQSPP-&CjC>70Bl%xIA>)88CBv*>lH8N{7U zli#0Zd@2wBX4vcqy%GK$qN<43VyVPzuD2!Ide3u>vN*5du}Euji(+-)(pPdrbR+Ag zYY7kewBM;{MP181UsHU)1|#Cjw~f%XoxV4{Sz}_~BNA>>3hOKpEguI#v(Zw`kriXz zP=xMI)$gf>cEitav zN}S~Tp((K^`&h6#f_drC7J|2tCCe}t33dlzEoVpDu+9Iumnc&u1Cr3*49~v0Sl%7N zcnQ8UyjuEP!&_O+UURdUbF#C@1#e@;>Qb@W(b$Ubb^vnH8dUm8*Nc7mx|>()f9mTo zn;6+aNc4aK@~f-H2INn-<7WB6E#ZGELKrY)s)`xnHg&*2KzkrSKs5i`(GbAa$ij@# zzyM$ea4|6WR)Jy?gzPt%;6iR*(M6rs{3iin*&T_gC24d0)W+^&3ZSG59KbrtP)-q$} z3JUlO)~e+AdGKH`5qcR7Aq)%xcy?B4J2CYkT>Cffh^bGL9Z zG6k4@Uuk|n_#yRgr{=#Z)t$Q~(+dV|?}w~GxBwkEv{}S%xM-ostn*_OL`pbPLvCSz zpSR-gtQ+7!A!PlH&OM$t2fPbocRQ-owWp6U#GQk8Gzyg|onIzV$d};gB1!DtS@tCbfGrG9Hf1pm#*hUzZhXi&YS5D4-|U!PLv{Rlx)H-it+44|Q3gn$;t88wad zp8%tURTfglP-6&@M#7k%&d!7gw(M<%DDY_tYyv;icu2WA52%Y}5_Gn%ygoZKt`83( z#ZC2c%kUO+erx~f%Zjy~g=A!LCw62BXZcZ>k`epX2BL(VE6Rs5p-Y#yNMw!!m`Ry* z&o3_K5;GkCW|-o_QpNS!0?TfUn$~VE$9mu}sfx4L@cDu^18VlGgyO-DJA}R+pDcmz z+gp0}Oy@grdrt^8_TN6x!oOGk+XBM&xA1-!hytSiXx~jf6Vj|dNvOG_)0`$l!@a>8 z=22PDDOeJrvt_2Nm_^c(70UG1X%|D-vSdXbl!eF4h^y=5?=BS`X&20Z-PG!tUm4iZ zRrQWmt=R38B^U4<0VWgV6g1HNy~&(DMvcrt$a|?BOIF2ceL-~LCj+ukss#@79H8hRhO})XFEKP#({~Srfw?6jv#%iRqOpfFhQZ zi+DXzYfOR+LVB_xm!^ta@e5^0txQ~i^tj42Bt^;=(hN?!{EoN3Oe{J`G7$Jyf9}UH zLOJaVb|v;x4aufgLNHl%;4Y*>CW-SNtyQ~(z=hW73D4pd(=y_YIN}MF;tIKgQ0hoM7Y&fqc5W_RHN17#sN{MWo&@!cFy5cD zB~j?$XgHGCFv)8m)AtlI$n7eTNdAf7ISL$mCy(Gd(eJNx5GK*m?SgLgKcn$WXoKRg zKyYuP|H|3mG0)C+t{tOC9Hsw)Z*SehVAAWBl`pJr28k8!u&KBZq^UTog$MD7$CcPr z^Bv9=xOJ*+JI&Y&NRlF_pNDRMV^?Mz$+1$HnQW=YE6)i$W5W`4!0#)Cx*j-78s>Y| zenyc)igB1)2!hP3h?2;lG;W}uvW=21 zTMAF{{Dp8eb-EV$a%N#VgK9Lc>Y2peT7=7YQ@d}DG=y9E{jXHe~cT$z3@Ra6Z_C?|D$>&^8M#yl=07k-)>@|)jdG}9S8%XB4}qJfPkpKgV4WiSU?U&rlvM# zwq`C)0260Mb5}bP7l6H;kqzU&2B3qpozp)n5KWzL3`FyLuH|wjph1syqK^E|t0R(X zoYp)6bDS@L0?ACF$}F=ZFXy_h`|5Ux|8?$6swqqT_!|2k*uE8($aJv%~|;48su`XG{SGV}IZZZWlCF`Vbor;(Zrc zXvg;PGgqmfhmqLKdejNN`1_cTDqO5TKrpy#mWT!FzDW{nLKuNIKUF*iHPF15l9D9X zI&=~44E6(~^~@h&!sMHxjXCi9_mUMllZj2-#E`x*^wReG+}f*$Q+E^uZb~NIo^-&W zfi&^l(kqT;z*DeItU84acsp229Nx2w{8LTQELJIS8Q(H31-EoW@VxS*%oen|8UEWQ zMXy6W+#4k-mfV|z7Ynu=1l7U8>Dx+FruB=5ldCIBH$k?-?mo;k!?3-?N?{bU-#fVc zaPDsF+{+aZS18QVZJ2Pvr)5zd(_OeR$6lAok1U-a)jkpAD5TGE400U@&BJHij3!Lo z5@974^bE}WB+(*aR$KL!EEuo)NtM})z%(Uy26kOE(WwMr-Bhu_Bge$OXZ;ylOu9&v zX+oF0@?%Y6e`>Dg1cEge8;2MeXYStdB8ytNTqii|vcT_1!-ameAu_Ee&$r|WpGfL) zh_u}@IM3RZC=a*=VKXP`kTB>!UaA}wvZd!T5oggQy5qVCw9V$cna9pcdXZ`7DyogU zTaVl-Z77u73l3nA?|t2RoEVBeq5jSOI==Ex7GC6Nxg}X) zj3QSp6paIrx0hb+QbcDkmeGDqP+Z#pS zt=jhoEZL;kPDWE=Bu&Wy#g9}fw0w^^zGb14PD=x#SiMxCjyXnSP_}O&))gag8$-+K zq+pC@h;^`SN03aJ?Aqq6k}}0cv10Vsp36!d+IP`qVv(j)Ik(}9;Oxm$IIIc{(LxD~ zSS@cX#od&*cXckB>8HdLOSRZK05kuiwn=!!RWDGT+`Rs*eK{rh)hxK1sQ(X0kwub{Ds-M`9R1tcj|LZy+NNN(C!BH+Sxg_Mv3lH zm-Hv!H7Z;*5Ic5FZ=v~VXpJSdy37chG{OHJ-8FU=c`7`p0s=|@j^lT-YjpeexQE+g zWrC|hni6Rvjey@T0|%v^v62v!v(`*sU1zcM7E|V(5~pKZ&)CPiDxpj}h%MsXeZxyO zUp%QB;nkcbq=G2%n7I=mi^*A;6Sxv0fIAAGhIUpbfMquo;6TfA6$|x_1J<{-h2LRB z)EkhuR+G0QaAPIg#T=TJINn~kOR$kFf3*yVjX70*(YO{dp+2O~aDF)%WNb{1b)#`kfQ6zE4B7uD=*&5E$>mV->2vJIX3}c({;A#9` z$Q#vY88}xTvU|?tDA%4>E4w(ZlJ(qwUDf&^kcc5|wdrX#T3NhRiCJ@Q#vED1!zU#W zyteLypOG2hA*1^RdA*Xh*^iti$)u5!@(H{}V7&&S;v?Td-H1WeKl|}m5w8S zdhf5_|Buka(f2|D{O>-wD58IBCOSIfTKM&wwR2)Zy6K?kR3zY~@9=thdfK-qKc~+zqx#;# zk#u)IbRg_eEaRQ?mUW8Er~#-?>+&JzF=sDpCNU6Fkpg%|i0IX5*vID-9oO&36S)LB zI*V9SbaqANv+rlz01rK}xwWJ+uvuTcm}c*HR>W=c3@fk^h#!uH(kM#q01F!D9o7`s zkpB)_)Pputk-22>@$<>ppRuzCQ%LX> z9RUhtMwrpp6P8J(r3>~txT8O7?9$lvA?N{z(Um67~1so-pl2~ z)}gVJTV&x^-`<)1?bp~DFD_RG%&)ylI?C@Nxwy$u(1x`nXAytT3NRX$e^MeOekX{H zVc=n$;m;Iru;z>y2~E=}b^*1E8sWiE98RVv%iHIaGKag0Xa2cl!*obNW{(jW=GdFKgA?jR$O7Cl2AkpJU2X_e0_L!$SIA z?O-gWq~lG)VBBj3biEv8ZXnF#G2uxQirs+wAT)y0&hgOI+szsBWuWifd;lE?84?9Y zrQ!GTNn^)?G(p$kao>TDd?3hCP#$goxoum8Ab6qGfBJo4IRpJ-==FtM63Rnx@=7>m zGC+&W`U{7B`ZE~`H}e8sd$9b+N&{bkUUT^6m$Fq{I*Ys-sZh$)_?A@xIs@%!AZud%)opO2%9cey6{M2h|} zAT4`&IzF*jMh2$7^22aIvO)Eb8`CVuzqsY9;!0j=c-e3_Nh%;>@aM@W5dk73&`hr| z_WHc7_^B+tb^>pSQXnXPx%j3kw^xLWhAY}id1a9*kuh1pk&NwT^eD-t<^fb4-w~rI~tyy_10%dj-dcvOEW3L#-g2g@+8WE zz3sR=lhf;~u@lK78UEo%+Rp8b54{%4r=6~p8BW0r6WyN<_O7wp2i*ki5Qw^8I@7+9 z-5OWYb47M0muoN!ZH;#Z2$j+C_TpA>JX+O+rbo+Fn|A6v3FQTYvV&%(wqs{dup&km zvTBUlVMcoRy-~3!^A(~za`hfY#+p3vWgH*Lo5CI_XpkLztNz9A zePZnD$w5x{4J3*)l^tlGVra(mB*KId775t zx|QzcFLu=!%kS*6-V7*&uQxkA)A#*VP91h7`HZLqup0FbutIw;s?u@vz zu8B&AiMlWp^&`Q}U_W7UO@hb509D#SCKZ1>heRS!fKoFnoE*ms!#U*vop@+O7Nj%? zXNS{9VIkO>STDxkG3C4}2Oqo9`{}?^=;Yd@{9O{_PMAni5vn@|ohFlE9r`84pne9@W!Pp;vLXPGl%Sz@)Gn3*AAhzEaGfZxay`S7R@7x}e@8-3{ zI@fy}So;WrUfy_4m43p|o7Z?Q2A``@F2#^evV+XlbRHVI?}=arg7km&dq0Fyaap0bDQwx)qw z3?RbyM)GHylu1rJuPfiVgN<=(<*q;J*NO&cka`FyN~IiQ+%w|)#^z*s zc+IvwOx1X^{Ls6&RinDbd4s8K_3lpDJ{Bz6S$p8u{Dy9KXasN~fs)tXxPoLS0Md^X zjC?4^ybe5x3g;aX31YdmFRKw%Zc!dts#EqFi2Pg(G;Ds>J%W&jou! z?a{d{ieb!#!-VL%j;#iK5Ij@$#w)^PPQ9U<$a76Z`WymOuY3J#ZFx$js$FJ9A9O>< zZYWJHy=O3d^@y_I?29ALt)t+{Jyej8dx$H)lF#C;HFh}_u_9ko1tP5gYKoTKWk*|j z^X1TT2ZKU|%GyW-DQ>K-hy4I0dGmQv@|$1AW(-IrclTWwJ>zo~mbGAWtGbhwxO@O+ zyk(HwL)0=Yg9V!)t-#8I6Q}ZMEf>#uoNUBzxl=n7X}{(pxty}<_SO~uHb^jRB?mU+ zmQ%t6ONE=V70#Jcv}?8ZESQn`VU`K5Ju&G~TM9;IGjgpY2J-asi3xhGBM5TjEj^f}Yo^BW@ z{BE%o;tWQJx)WFblOC_t(3j}wcHjO1e1nnNhm;4h|YRg(-4)e=tJHXlQ$R zYPtYHr4x8#0?4x5<4|u}ga_+qNT)Z4t%07;p*eLGLjajji3&6*oksDCyPvmwNz(1@ zq)N-xzJJ!ZWXd4m!4t{^`c2sqM_^SBbt)AHPs9$jphd0(8z>a&hU3g)1n0q}l_v5- zo%Dh}_k4w?mkD^J6uwWtMty*ZjH{1gn4Q)Y?QO}nW&3dG7!*_?cM~AC9aK1?&!hjy z1fqS6>|rP!0*VE>%C)c06z}f9;_w~uUV;G1W1K4ewtJY5Xy5Nua*%BR%f+zBqyRe_ z$?YQ*Ri)W5;;ZhrwVmvcd+i3$H_rrV;nlRizo?pQ;m)-qnj+lQNikpy;G!WAQCJr9 z3r%7cuP`r~UTFjB zcU8~x_j%y|s?Yz&8)9Pb46yqrwyjv*)_I)^>F;#yYCa(dYmLbEI+?p^Y|$B>GkrVs z15b8RU>&&Sbc(JNLtDU?AMF?fZ-~D42&T!!ng!uijoTxfooIp-N2WE_B=tu+d@$Vc z?Xb^Y^vn+xl=o(fW{Vg4Jl#1YWddbroW1N|u$NH{zLzNJA+Ru$9&mWjvJ;udW5&_A z2sGmaU`!lkvG_phw1#jHJqWhBUM?^ac@lAtA4Yu-n;m0PiV!}d)Ptg^qNszEFC6p- z{+6_>AII91sbOJJL;aNMRg8z1Oin*sUrM~4H!gMrB>I*Q@qd*m6P=wTNo}5!fhOeT z_{%DqiDD3u#ibXum4wW7#>tvw@hKWtV2Uc6LZ{Cs$SLbVtLGy;>p~c|L4|phB5wKN z*YJMY?TExaZ}r`{-$2}UJOA7{gYRhdZR-rn?A)%r&huUwXkD2-^Ys_(_^m;p2#4O0 zKOu_qNtKY!G(S67r2r7?Rpk<*$p_AC0D2y>piWmxi(ph;Z=zHkmQ6RMNx~$N88j}5 zXffzsOESomMh==rF^TRDS#(InVvDe9{tyxFXDVdgSzs_jA1b8fp6QtAv>>CZ8p(2ST*ze#w@lyQKJf-vOe670Wm@hXU|diOf@sn1S@K) zBixuQ7Yq(mbz08^vgqt-DkK`&_bO8mBDLukKN@oDuZ&oHgbj?NQe9tp>Q8ChSLG#+ z_hpU^Tk92$cbmef9in^4Csy%doCY~ESWv_`j6e?jS+JT*|fy>TUzd3>Pn)GYt1Tykm- z{3R3~WYRpYUimm&bw@iXP{BsPA9_{zSgKKn7t#(0{aS8&(K;_hdOGvSE_|w~&3?E> zqX7ACy5Z)iMjpl%DCRC2mz5ai_XZ2(!!ajdU|K7Asi!x(k3nvRo((<+FW$_MtXDh+ zOHTXjFWyx$$4qYdqBf|zy|$;yFp zT*1sqst#~|iNi-0*qjWZ5(D@ksq`bx4!i)Nx-iu&3JpwKZ8jdjiY5*FOf0iGBg1p2 z)E6^7^0geqMO9b%>fQ9TbfkQ-{bYR#xr=o2M(r<@pO>UHVIvqtYn~g`#VTnTMiTnI zY+z^78|oQm=W&zz-KtlVv=S?0jOeT~D*71Wv@w(|fYzpV%Kbcu-Lr+m%h)Ot=nmKM z_$}h9O>Un1FoJgT4j4Hrt*+eC=*n!gZoOFX@fmIiV0+yvHX!~ zyoapO;|=S{ACoxF@zyK!V2tT8zxppb?|*imVV-bgAK$xA&u?sH__wC7nX8Enz|_ph z?*GE;hST@@(|1MVk_s_J8KQ-3!gZ_O!gM6vGGi2920(GZMJ6hYLP6FCWJtFB)Z;D$ zg>p7-w{RDNnZ@s2he>D98!`$rb^;5Z2p+T(6%n@wvYpOIt#FcuEhl*(b^z`Nt5u;1 z=841njjut`p&mN59=DK*kx^(Jhd|m8v$~Az8$Bc<#7>fyz~!@Xw4rFaebFOut2CzR z5IMqB33SJ7@YrpwZW+VEl+4f1#A4@GcTi+?oObEmc~0f5;=_=&R%#?u&tB=eld9OPgN-KpD$ zxjgd*Wf{v&M^PM$$#ai8Fz8CZ0T$e_%@Ss+4=eP5A`5YNpg!z-QJ&bUK}d1y=-|tb zB^R1%($*0gRXKODXX?S;-^$5^*Ug6$b7Ccy*bhpYT%n= z-u~1((se|9FBdI`{R%BsqEB5etT{+<))m#nK=cE6XqFxt%@x{3Cq#YI|kAT zJWnMD=~P2Erd`b7DF;gxO&vtkhV40Li)8a-NPhO6=X6e-q(lz9i0aqIv6FSp8o`)| zas0?Cv!;e@<}p*WT`NZs*0==n0F+we$~rJWM@JByMd&3mElhndaUmAc0DEHcK=gPL z1QwExrk5iled7X!}~pN}VebuuA#Fky3#3nATw1B6O|GqEQ4 zv0-3dZzmgk(WK3OxrVw$TZ&c5915%PIm!$i-y4dk?V>`uK1)gpg)&_-^)(EuBZC+Y z<`Ia7;PDwWqyl>*d}p>#vRmlLXBX=8_C6od^{y*Ma{nq{2FCYql)7}2c?M~~rEJH{ zLT;i+ZJJO@5z2L`h6T^FB6VS&>l@N68kJc|Z59)lkz5M%^g0`|QivbS?3i+USTtA)vW*4&`9sPvjAel0$j#e_9P6X1l8Gfv)Viuj%gtms)UHN10 z5ChIpzG7CXamQ09g=b`!C9B#4z&z+X7YWb_$#GSv{_|d4|LD|Sz_quwJh8TxjvJo& zDvh4C0yKl5Y4LZ`QI%B3Xe9@gS6iEtQxDOrm0~b2 zb%Q;&U^+nJEudbkW?FDvz}+T8s6ovB#kp|+K?bkJ;RO!aW zr7&Ki0Rm$DfrZoJ*#W}OABzZLki{s4WgEQxSI=XPwT&w2B3w}f+&rg*SKC2*4p0_G zNz2EMW;lN{cM2#i?!8+0{~Rni-6X#*+I@ikCyV(9M_=aDCg#5x%*%J^7u~;wqn2hi z4rWgOm`IYR)zNcbHP6+d*;r~-!wGMCo$-QD#r)t`6l z&10C63eNA0-7Jp3bM3Ozb^Csg)6ySQhYVrtnKUc%toB28kC|cwiN!ztroD;02APBl zRWv2Gsyh@I7yFU>yBIQ83Adc3?cxgp-|hC9qUW8LShd1mRW*(pHbf{e4uT||$PnAm zPRJaoj7_0v6eKviNhj4DQJ8J_fiLrUTouRf+dd*JEM-blU3Yz~rsT^wPXn%VgeTjc+`=@mrbEWB ziRKxof0Y=;B#ZPt(l@P0Qey<^pripa_rlSJV!zNo8WXhiS7S%)wGf^N_{$(5AIyB8 zK}6CjqBCwGt(swq!`HkHTa{=4U0n>DA4&__9BET4ukgC!JI^=i;K?*?yb&-qA&$u_ zBB;x`NRwfwL?|&eD)||>S8t@A>`Kb zQHBt-e(%=6MW7^TUZDSQB+gp@Ov4S`*PCALnEs!K`qND?Tfdg3E)Smy9=F%OA+_TJ zKQ9L>4JZ@}rr#<4qu5AL#+C{;t?z$wCN-``I5pFPZzdyFwFn^Kk zU7=-TOEO`=##K{Ngbl7#Q=!*s3JC*ex3bc^{P{u!G=)s(73~RT{xLV2?PmbdWw&jw z*k~BAcw1vYJQSe`Qq`>Cz?%(pN z!}DPf@MOX2_2FIVwn^`5OC-7Wa+y9uS!KqJ7noZx`cWuVskUi7Z$S&4gkZnz&N%XY zQqP+eZ7;!jBQ@mUvhQ!K-v5`s>_7Oc%{O~B`o~-G&0qgNf7$o_!fG| zh17Qt;2@PUY5BVL+*da#U`^TF{puJ|smlAhJ?y#v9(r@EOVy^Gg-J{4r8!G7xoS7Y zmo!Z?FrN%ohq4;gCsi>Y023lv4ia=LB#+`_Ll68Ro@}n%E9QZK4{K>JcXf?8syGh9 zqWp~vFmmyM08J+O06UZnTtg6)^rI}~ctuUtMkk||@9(Q^d+NhYq;CRy@?xtr0?aI5 z_gNJ^_HJSET?R-dZ$q_o7ww%V;LY0ANf>iukY{*ur|wX}PW5KdheqJ44>G2tAbebw zkjO3xZdy*Y9^K@%5{W&j0;J6_nL!mvN4+3k?`ecHR)U7vG=nNSK^${nd%%_X-Q_CiY<&QbAbzt%7u^YxS< z0(uLEw7KE8%``L-DJ0J;T^U_n6fE7Vk-5i=JGe)ag3Cn2LV&f~Z zZP$7Or2rGV*Cb<74AVukS_#4^Xu~}dplvl%j}E}A#s`YQE=C$zP>P%0_kg593?#_N zvpA6~uYbfWi+!D|uvscoTiY5JG1hX(!n~xhy0p|BKCcrfOdGR+9spaiUf~8hdvm_& z*BgNF3uMV^TcN5c_i@h~Cmy^Axw%w2>AE0_%n**ix7xrP@jl4R=x5JsUSi_9`D(RR zO&-&}!Mn-qR?x*-hpB#hpFBN94HPtKm|z%wL(b)V`N)f+drQ)P7Wn!L!=VI2n1EJ$ z#v;u>=K$;+@$A?YaTNmO7@=UstFe*YE4uqevS!t&V;EE3iXYGzsUaU)mc7Z%ZY>?+ ztmIT}tFx3gKvRU)K5RKv(iM+2uw{WJP@aeJ{+Ry6UEqX_S>fKVa z1#wDMSB@8QRkKPDO#Ivss|Wt&`gdmFz`!)_wpybx>E|1sBgOu@JMj{ZXuDyXVbsFQp0dg`lf&WG4u?_j!-bGHJ)J#375NnrUn6Ic4iH4yaHg;vhMU2nTrXTnpM1Z zT}0Yrz_awc?swMm7vjGI-u8p<-rhIhVgB2s($>h%^1BMv>|dnP$bMfO>2tak%aITy z;UKuEG^Y$D;?RlaJVZ6%;!Kp9?g^-Jy|a7Ewb#A!E`cWr!5v?$zLD)u-+x(ZuL){yAne}@|8Dtr{|?&T zMg5p3?0N*ERzri}j4=8nQ+C2(KFoqgwg$oIKr%&zI~0a+VQ7{eLNO{QoINyCm>ryV zVx}L1jUZ2`cj-S;`0Ga1Q_sW$kxG6kTi@*HkjkVj8q=Q^&J>NrIqkk_LK07oBtDog z2aic=MpgHhL`_KOfKoW)`_7FvS~8b@t5kJ)ccxrb>YOi4d21f1G|R-U5I`3WSg>Rh z*6nQ~%O9~K&k|{tD3PMVYr)y%%Dh%jhSYL*PjdWY1A@sk8_Dw*7Do=ucx)NQuEtfh z0LPZAd$YmS`Odu2lfHl`hAM07UUNJ!o31YDR#MU1OcAfo{&1SVu?c)iy>ue|6-9HL zrr(Bxz+=ypVBx!?^x+qC)VvPp~fC*R)>GWS7R!IKLBJt}}4{LzeI_d*#aHi!E zu?PxYzPRiQh5cQ(HJG%myxL&13t{Qzx^8l|<1zYt>8wgp^2O1an-(Z0;vUSCiCNEY zk$V=N@zrQN%ARV0356K@x)N7eE}Tzg$2|md_^|od{JOE*i{TsI%E#)~zRe#&C zPo2O%jlDQmC+Zj|rZG*CcgWern(X(b@x!{)PxZ7?RSaFq}M`&0B`Nix>wEPyZyvT~Lc0O02*dgBg@u5<{UHZidSy&f> z9LC-?Mjvg6-h8CFX8(08G0`VB#3RpZH@qBN@X(Ps$_yTE!#8~Q^#r|(tMuhQN5uNXAM3-RhcI@TsMRhA1Y+0^ z5Wj(Qr4|uX%3LDBat0{XCI4=Chu-3jQ5PEF>Go=&&hB)+ooH0mv7;v$!>)Uj;WY$1 z#g&5q=aOk7N(ZqO(v-BTN)&d~ zfX?+pgx)FD!@3l1IPhdjG%~_%c01L=x*$NUB$F_>wu2j`OGJ{ZU=%=z5IG4yc`n>8 z7x6slSuzRmiaNZK3`pXb^D3!3_wGN1*f6`r>c%*G^ z$sId#MHxCmE)0F}L=OyQRpX{j67E@W^$>tKRdT@#Xj#2rE3?}UHl1k&djQ3yQ^yz$gQ$UGUbwjzjmmq_>FX6WVo98NKHUd|JBG_v| z^}XO?2AwI`MIT~YtCR1XeMghvqdcZkIu)hMruIliA!C{O`0}fJAd1}6ZJBM=|4eMZ-S32kppSe;x(`DJJdr%)*HLhk12iEUx!D;A8hOMH zj}<;KeNlN4@*zMJoC=N5lx#5dz~hz@J>d_U{HOcu9Ek0RmHSU2Sn?TSV)7zgAqMIU zLzH8>k{?lV<}jKt#0m{*@oYtP##Vq$E>b8^0bW#}5-4fZq?ubb`*C0;h&sO%X|vCR zgC(mYRM6!UV@Et4A7KlRyLkph?6lK3RaAFi}eqduRIDjH& zjuIEh-YlITEQQ3GjYtR5nA}>5nzs@(vTLj~C={;D_Sc1!jnl*l#M;#^t`lc(~B z`no-h_9f^^<~Xwa08taD0`xiV`F2z4d8VW^!@Y5$8Il6a|HIika9P@JU78uT?F`$t zZQHhOJ2GtBwr$(Wup`5ff!gm^eY)y9V|3T){tJ8TXWa{PUbC#k8_XGFqSyHhE0M@Z zP*wI)=yrS5^;9D6X`cNLz)Z8M!J;|Dr_|B!!&}I_BN#h7c(YIwM;ky~t~k&xbUaiN zE3Pgj4nzjMb3!AEhAVZg{7(DZp{sIMpYw$DB z7z}TlX>POaM2v)Lk+ih}zQm`0f)dU>x{m1LFTKw7S!w+93v5DOL6Q@%7b0gd)SKTC zq!w~@t2$PE;3oz6bnv`WN7T-XG_mW%(N4(0u$(u*IJ2sP4tVpMPj7FYy7@4BM%UXr zIUWud1M>JEANzt2kMlbVJE@f_BIf-#l2<@Qbigw9I~^bwwL;sp8Til(fB|0Q31Lwo>u&TPy zoQfLARl`GsWKqZ9Ls{=yEo-+nEt`H05Tw~fGka75vx6**Ri1xURCFf5YWlOBaoI}z zl4Od8MD?YxI)U_r(8xORxDD*6qCm*u8m-Ox=<)iJ2BCKmd=h7U_EIVJZsLcFS&peh zE2(%e!|*f{*Z=-_Z)Nu(xFxjOs56H?fRKmuTcYf4jJCKr4GNJ5+YZ+dqF|T5bZyY< z`t7A#yN1J2wOsSaD~8C^(3CWjh8h(hh*b}#)Cbiv_SdPCv!4Ra*jTalPEDY|K3;CB zqSqU7?dxRG1mZ4Xm~+H)A%SbdNWL{8h(I&(8QuI_f63DPNOb4?ku>We5AECk^0EKZ zB)1HlIMP=TMYHYc-$cLS!H!#^~Vp+|FHI%dN|k^0`@j0`fh;IXJ}{qU$JrOTXtJw zh`y)lGN$hk}fjF%T!9cLoM8Rv{oSKdzQIP5x*G0N6K z^dw;qQ5J#9h|jK(-hfgYf%mn8F=&?nFFFS{i$=UO3 z)}`COz{}R3p*Ne_Si#-X8*=$V`pTZ6*U87EViCTDseL7uEFF{(Mr#5q!2?#bgNzIU zr!fM0PZ=SNq#S<3JAPk>nN%iQHiRz5Ad(tXB8yz6<}KqPKlFtDQ=l+u!F`H+?MF$8 z4}j7(O58c-6tw3%l4~sv1m+J>9XCDoq*2jHpMc88jvs z8cvSqgPsysr|&DMbO8fe6!$xX1(*ON)c}8CzK>YPJVzg$Fu^4z5O$=2^(2Udr?k;4 zuYb3~@gmH0v8E&}ugX}weR1|SnI|x4; zXK$I2>X(3b@y(AZ1@W!K&H6jsPqyO*2HfR2uV-`u8U-wikju}>l|9e6Q`p3`3v4!~ zv+%%c(G}1d=v4q6)~^@c!D!km$hYKoaO5tMDd5hx5R1a`aA#|SYKTCiFusUFjoU~e z)KMz}N8Fq2@ia)wQPWhVYF({0LgXszF4GzaQG(XUV;sT|bQbR|q*0?rS zt*UOg<6aon^j+auOa0Wea}+s5Qa_z%)fH+E-<`6L*|k#53d~rsR7$$90N4p6k>ppE zE!b3L*2*n#<)9go4fsw%aq$p(8|rkUhfbR_I0nn0hEYjmgXTzP+=W`D351Bni59*z zCMF)DVBD*QUOiroBWn?B^ZN!q&aq^oy-JEYElo6745KKNQN`%<`NnDm|KB6oxE%O- zIw{A-d$>>YybCGJ6ioiWI7)lgj9fj09q1q~xULL?IkqiB`MzF0oJulWY&WDz)s>e8 z%`jUQm<>D_=|q7N>KN{{7f@{)Tc(G{M^r7-L6zzInVGp>|aWvn%cF4l6|i!kS@ zC^*wC8Hpwy(~W?F+WN-j!7bZBZENm&lDrjFR#CCzeEe!g`aY#xoU%VMpD$aptl$1z zfx&SaX*z#v&`0i?=GR^X4WI7(@qw2zPY`{*Q&ixM-w^!<`Ia0}#&eHkrw#!IH4jxA ztLUB2Pqxe1Rm4Zc&pz;1>MjV5udR0deHmOM(soj%8N&knKiZL%Q|7K>p6LWtm3j8d zFR<#+U;}U3s#-Z6Cpe?YXi3C07x_5`MF1b`vI+{fh>MRGtU=e(v%+k5a-z*PP!>1 z=t?Dj;qvl@-dzn)7W9NtVW*_fDvM>NrYN@^!n!uoH~YhPZw7*oftYEjxU5KhUh1cd zuhq|2Fz@zUD0p2CFDGygl|+ra=GQ9P`@pV05-+U|>*D&jcyifg9AAHhr<9ld$6fMs z`S+Qxx=u;8zjHtAtY4q_RXe41dqD`$hYq*AYtl1EA|KfQ`;P3(y~Eufa4;kRcuTnc z!zacTpt{gEG;+4Ladk1(H+6Ed|K~=KqpAxyAENrc)U^YcNSt9aR{D*b67VHm7TQW+ zi!1VCTDx6|uD>~c6M7%a>$zqg%dLt)Y<-^)E3nCmwQeK=~1C*AI5 zcT~Em`hQX2pgblTIQwWOjn!c0s$un~W~W;$qLAP|c2eW9hNv?Xy`@XfJn-rF z0RV=Pj4v<#?aSW~&-zG}5seT5UO)D{S@fw5QlMu_gcN}q!GrHqYHU|$P(UU_j4fn| zfpzioQ_PuFR5a9x&~FiwQ+h>I9d1TeL21fp1JZWs_v~|FQiU~% zr0K@ji4|=sCKTE*F@+~8O*|M85J{NkjB3sX*mlB3bH+*F)RpwAF8Jz14Qc1Y*mM!T z%TQ2RpJiLwE&R2t)Q&Y3n1K;a+k>4VYBd{T(t^u%ij)egh*+Zm!;aUVHLo# z-uMH@vY4mRIQ`TQPh3vuII!Rdy9PHy5pB) zWD&3MjO8DglbVu%u-NLN0*-z_-!6M*=8-k83mHDe#z&%J-LZg*NtZO4N)dKI>99(R z6&^w$xfaiPb(F*M__iijk)PM+w`hZ%=vs&B`{=i3YAH=F3lsT>z8ZbJX9;1M5&$L0 zuZq25yq;ZV1lJl$%Z@6h>8dbOhjG6l`A4$}q$p(+B{wT8>t_c!{~5;zlcdorZCDki z-#6FAF!-)MWbZp&yF9ei=Q+45@1mM_kC$wfvKNSJqRuB{MocH)brW0+ckj97N2aR( z09Ea#q@&LQl`O<^SlYC0p#cu+`vw{MY1aDiYrZ%CMuou-)GwsHvL*tK!c%i6YifHM zvQ2_aLk&aBLg+11S{(ahh;8^^Mw5q% ziU9U%Msc=8D^C(01p?n}ow%~5dqN~PGWXDbK68CEb<_(ZxM~tUC~ZXP0kxlK^(8`F z%!_|?DeWlaLFSp5sY~O#i*;~DFjCG*stm;$+klek)CBBVf+wmWFp4%z*nSe|lRfO! z-JaZ;x_EPE&XWJe(%Bn4etNWG%F~gze4>si)j#+(@o4o!qN33Hu0kc1KjDZiHhnXD z29X<-=^W}D84&yua!8emrcFe1OiZQzjI27_LR(7aMd^vea~g)#)`X&hY9e)Fuv)@e z+Eqd%s9Mthvs|52S~>95gvgk5R0>{ZD3pEMStDy8`$HP7*^yJ}85vJC3}%w=A_6*# z=WLYBZAzmF*I0HoLL}Y@+%c{?Vnaw;u;Le`7?C~29cD@*RqbRl7Ay!gu9Qh-_|r8= zn`ydM;rWeWMMwMyA`@XoAc6yOPWRBz?>3ICvv6{?1pIDJ`@t_+$x3ca)s5CGBR0!)Pg%tuYJikM333?oQX=iaF2Byygqii-3^S{{hFb_#G2E=OP@l7^ylbwVP zW3KmwTcfUr?UCa>G=P7JoogqsPsjhy{U~Gm3X1K^w-zrB_T1NnM_kcTQi1A_v7}* zUV(g3R)e@~iRJmkm!~yZ(JgFddKj+((9OX|0m@hZOo2dYRNl6<<=(ccM% zFVQ-0z5Qyc?5hlm-Q6|nd;jxqTCeq#XBeez?)a~)us>msTHj4cZI3Elme^|pd;h&h z2?KuPng;YJ6wv?q82n#7inF7up%Y*d{#T1qqxP>c81@LGBA;?aR8M6%pqip)UIbpG zLl6;kFbY>GI#I{V-BQwjZv)0P(qs&762r><)X8i%qome=<|60lO>4*~_Rg{mkx6q1 zqDlFl3L>^TCa7vZ>KVBjwRB#EQfl6WuToQp-XD2zj{*$?ZCeE73_CcE|KicF43c|6 zV8TrS3NBU%dH~I&TcF=Is0Cb4kZ7`Vzgh~BO{dNln;^uXAA>-snuXX}?3uNu(U{v{ z6PRsI>(Xoe)}AdZLbE<0-m}jktn%BCD|h;Ef)%Kh4nZ|gDsXr#Oq$BI1q9Hegw#Nq z4zT?ssn;ROu{6B1BUxK6Lrflj43jLTpj6lTqgo`Nxl&k^`3tUGw>oH+BFu6-uuG9A zRygY3v-9=g%Md6}{Nc@_HC@iAJ#XmM@7b<1eRs~{316&<{%+0XnGO_itij%1UEC*+ z-)=@yUTVU;2NfX0{i$j^$egR7tGZi{ONb#)L3_ZmL`+W1RM$T3H~E9%RNO zz5|fN_+D1NZDQX^`Qw0#8Rp2H4vf~v5}lukDMja|ABnMu8xaj)x}sd(B?Jgu1@sU9 zh%~seVz|OQGDyI&e2DU~p|fqOqNuM*m`)ioz}Fuj_qrd3oWi=D+e-Y34>|*(#bMTk zz^NSSoy$D+T6!)vk5$Swy<0sFvGzBSS5eM&OP{_W+Pv1v)32_AQKB7@T4;EPt1KNZ zyo}SqmbTsC@6?8UCn?sBoz_VKyMTjZ)4q6=5aGzAw8jMa3u8I zP7af8v-aEY#a7GYG!PYZ&lGw)60;!~56)pdj+!2?as%DsEWXim*;HqJvC8(3At#wA zEVvB+na^hAGY6filP}j;3LqSA>e43B7F~iM9B=RLdNsmj$+7O#1yh9R3%5fZwAuHd zXp5gSkFxJ!@sBZed|MdFt8RdQ$amgNz$5YO3%Wd*F`VU0k2*Jo^ombw`zZ@p)hd1! zEuTtihv%yQ;oMKNFU~`(m2~{^QtjZYrIML8twRi^tw?FUCgJB3>to(DM&?h!4>2(D z_+xNo|LaHmB}`%Wo$7ezWE=$b=Iwx|yS$N&KIO#@^^;HFv@K+pI^~sr78w0X9#&{i z`|i6t{p-JN%IF6KxF-N5f(%e1c>lljJO3&XHumQKTba=PwaJ0%J6G3kq$eN+lH;op zsVZ(nu~nGEL_FABSqYtSgj_9awB`o>=k{e&?)9wSrMb)V$CbCQkI&6LoH_$~qZ-c^ zriKk7{za(51o+LB%)}HdY$FkLZ4h;)bp0}*$tf-GS9q5I{DPShka>!hbW-hc;PWj| zIb_S{pMsDr<@pAz$%6tV2|wGzSm%%&((+mQr6oiNoo6RO4X0*CZI=QN;a)cdB2o;K zXDIX+Eqt>fra1# zbyfyS9wybJHB4`yhXAKi7N;Yn-l{nYbA{%QY}#Z1^oSj7QFT<#L~2uUvp(4ZP7TOh zMVO_I5O(7CEbvgjr(EvuzZ`+|#r|8B@cXYS@kkzHrhia(e&q`(p|rr(o;3M~JaKz6 zqGE+cjEAruJvTlz{f<@sOvhBM0hSO&zFcR6v@*zATSoRDjF#sB9JAfYMnR!LjR;U_ zsvF}qEh<)JA}6>Yb0Uqb^KedNObccbZhDw@-1{KFFb^Nlliwah5(t9~E zmCc_vyEU2}hYHO=X;}jiA_+&hDPYZxWqv9u<<4o*R*{krEj1=n@UCLr%J7*mLd;So zmg{y1d%KUw-WI5Cmc8&mSIx4ejG_uu$%kRoHZx8pB&?Cn%u2Yth#$kbKei)b$x;PC zjpz2%MjOG$Ha9k}ON#ohnUyl0xi`xW{tD>xG^qWV`0L>vfFPwX&|f~eTP6(O@LWIl z_(!Q3`B5jaHtKY7fV(}GVzDQ-czuyvbXTe`8#UOi5D5C9^K@adM8i_+8bnbOE#2K$ zf|GRa7UMm%uiU)I?S*Ezpda4KV(KjVvh)QE&l}OV!t9jF$#AHh%7)f=_^S}J9aC4e z)3N_gHdm(D(-HkT{@voz4sm?gV;5RFNgMGwt)N2ew)aFhLsb--Eg*zkv(`8p(V>cr zTTB-e;k2ux(2Z35xB=h^DNKfnI~=SF&E~!IUu%WF}Oj`3bP!JKHV0RQ`@jMk2U71 z-H?ToZi2l7B>(hCfB8ZFe&+~mFZ+D&veM<**mJ96uF5&6$-Cs`LVfc_NYIUN86*1k zq-gvrVc_5(oj~)(PIYjn@2A&7G*jIK`XXLjDIX6HTb;fgwYlj!_k`c~t$V)XiGLUH z_rGDKREB-Z-vKib68!(cWAeXUxE%ocM`!#0DF?FsM;#5Y@L?v$RuyLE#_{tXuVkja2Le4*jY1a{)@ zAhjl2euGVPn=f8VT^+Zs=n*2L)^&+-Pj=V1%D;{dTLf|MWh(V>p3}-I&?J0mI*W?G z(dvUPBsFAIOKpIJfU~5Fd1+;OU?y0fC_zchyaq^+CzGjZJ#{X$nkp~Ornlgi=~9Ke z*noCb!>SpOtlUc}8 zlgzalt*JS}qBR+)SZAt+L6mLrBIS(|4J&`Df@I4m50%6>6=KkE(}Qp@+Rc~32h#6C zZEXLVO6*9hs95DHy(o&ZtRLyCj>ud&6>5Z`h#IJfSVu9ZqmCCNzmXs*kpY`0HKsGg z3Qj46YO$-P@#$O0W=?QkbkYi^%BOnURCQj5PLyUbvUbamCYcVU%R^6uZewygcwKM4 z&1CJn&fTV=*YDWzZzgRTBKNpbTE?g#un+GN@g!_+3mB34IL#1+#v?0pd%njq8^Mn0 zh&xla1 zAvaiwy%MSbJ@<=g#g*P0b740pS@Mok(I9mNGG2fg$x+T=x-qOe*D?HIiasw?kg-WiCK1zd<6-fT?NaReJN^~ zGUubn`vY5hunqa^rCKJ-Q+ia=hp(x5xHnk{{Q4^qb=(5L9yo_Xm3JZKRJAiQi2;@M zSf<@HJQ{TxON=joffb{2kN^yr<*wI}*aar%MwYuTeW0-i5u_zGp_T z240|Md@k?3MZ@}cN|PPxP&zO>8OPHxPJir#+TOWH<@5(3t)Xtd{bohos+b)>{HIl3 zc0Eu%X5>py*ZrM}jGZ-eJNo-^^l$hsxyz|5g4UbVU?n>Q>(-_6U7sqO9dxyBHQK}K zvEnl?`|mLKUi4^nNu%jx|7dc_j1-fS5+-}8&Zh~#^9R-MWZchc*w3I31>wx7CUFZ~ z*{*NRqAzyNA)7|BXRP8BMihQnnqt@Hu&Gim+<51BLCb{fH>u(?XSOWaakzB`<*J@x=!+hLS@~JIQgh-T zY*e?4A{H~ruPw>YwX?TabAjY1&iOL0VWv^{Dah&6OoLsDjRM)grA?*{2U7i$LKlFp zN7E%pwIF!T7APdTY??c_@u6aD7gv6+49KRNzwiBY%U%8I^Zxkweq`lsBe|!?gxsE9 zZmw>2PjCO)+s)^m`S04>h-E*hP^js!ZY?CgfE09*hQf5IkjoDmW*CW6qEuvPNbv~B1SdeuXX@lb{IE~M2SUd(@G%WcAPB>Kvc zi7*bcw4BZK%romoWQ=|H@o9z=XO>M$f6GK_X3%>OhC$v55pbvgQMYSF7J9vGoQ-#W z79g*G{yjGk*f{sxec@n`%A-bYi&4gZI`iu_fzh-Gt@f1q<*R_{ij!JJQoJ!C5M?ua`h%Qc#}Rrhi@5ng zYChr&7*E+%_`~dV|D}eO7wj0kV{B-5&F4z%>JK!%iL2ldOCC=fn%7lL-CFYvohIRD zvf+n$U`UB?hpRQB_ci1vg9hQ`9s_O{2bbf*BdCy^_3_cE*^5yr+P6Zr3k|8A%vG84 zV|ybGw|(10TM$^PGGHx=dUP-I{*rx%wCJY{i|}%8$f=#cX8IwFZ%S6T_a7VD{h-m$++y-P zlJl<33T>^BZYo<;d1x#*`fA!`TQDp_bR-zZUUA4-wAvGP7w>XYvh#~t@`Yljz~4$z zFKAC-QZ7{JjPi#}2g_|%s-+iq&2v)~8crd(1$mdi;jvX>i}oViTOfEDN|M@!D=C3e zIf#VWF;@U?2kB?rfIX{a)9h0D5gL3p9A7xb`>N0FW z8>k!3w93UIo%=N_EFg|+WK*TowcTfLI5xX17GKWJ$}6!{h;3AH{9YO0^}%hvmHYW( zo_B`YfN*-=A17|lGMSo6u~V~6^ByxwH{@3{SV9q|>Zi}YD2qu;yepUH296vjOUkOx>h=g5l4vX$3L&939p zEoM_rq3^WrsAyD)H=LhV1D020y8L6THZWR%w3QFarwWBz5jqH2qj;HquO>~oS*#OM znNxGoVvqy2;mp5@HVS%l-P>fkp`iBb8;<;URc>XPV+Mk;&?Y)vt*-6sjz(+x#NNR! z>8nSQBEdU_I!Y67C52nf;j;yV=Yce=nZ9rxx8yaBdc`SA5ytj_FL270P_9RTsA7pRvA0 zCEZn!OZLSsm3w2IZzkw+Xme~$XnEK(NLOhI`l^WhrOaG?|IQ)%qzxBJw)vqbi_2YI z*|O7r^E8g$Jd1WdXW&^`xx>0v6$H#w%Iub8mX&Omg~YMH+?<*uL*w|AlB`zd0=mk? zDnpvdIf)o%GZTFYqR^^e>EWwGh0iZj)&y~M@Vm&9s`2n1U`tWJ0p(_}QY3W+hzsXj}Po~n|e zxaeKp2qV*@P9~ll!DCG475HdCi9Gm%MEPRCyZ-wN;rz_3c@GM#V+|GSbWGNy56(BX z^1YS{%e!2rW3oT&N9tC)s4Qu8f4j@9&_ND8J6G0I4lD55Ow6Fh3= zwYb91rE#Yq3nBR@h)nWoB+*C-TJL9K=lvN7=_NQrx+pH_eTW7Nj!Gm38ng^R1j%q` zowV=ZI}49(uE~#tU*mL}hvl-#g?5)QUMv$M#q!C8G5 z#*H6uFe`C$;Zsbk>W)W}PZJ9*eRaAcUr}309kH^&&)4eq`Y4IG;l{*YUeet+?Rgy{ zoW$)rJ`e9H{IA%J{NIV~I1VqPt8|$1?G3KAHs3?i<;Lv(kMNuGcd7PY`+fg?bF92l zz2629P~`vu7XMRGGa$*`&X-J|;)vg~`i!LdKL^ZzS0X4HLh)x7in*Cc z6ebrM`?f(;ZBuu7gB!j1-%j(z4Y?gks7^t|ZRNaO^Y}40bq6#RZc8=jSf95SkSPLo=o5~R=!hHsq@c+;g&IB~$oU5r-(mMw}LNE&|XFjYF~IdMvcLaJ2g z!AL|BAT?bwJ5Ep*q)re8_tH*e%GH%1QBf`JUU9M~jsBhd5DU+u7?T2=9;96LI$Ww8 zOnHdWxRN}@Ax5txSRxv=a7xubES8tN1~p?{t}!YcYG5HkeF7B{IsRMWPa<;-7vqx6 z{(hH(!=WF%)^L_vm+asXccY*>Q@D1jn$Cb1X}y5jRcf#>Fw$MW7^-};Q-DlBbv&x4 zjJ=?bSZjPd+Zd=v5U1orbqHZRtKc-NQ}YsBVe`0W*%oTPctBL=QV!mXwiyg@it%Qc z1Gb2ayIENv*zI&_>ug8{Cu<#3u3P6rS!{-TFo`L)YIYM;D*oXuJaZU63&yw> z@s!JC#^M--{jyxX{i<$nH8nFm>kCOd%{Qu2d!A`OmUI#HOw+NjSE(Dv2_(k?!`iY} zx_pIYftbu6P;J5%$VCyNf~Lvwla|tw;8TX<9Z*8iDi9Q3YTQFL@n5K=reZ^ zjyf6XMf5*F_p3y*2jEZ1rpSl3!Z2 zw9yNG#dx0Nw$hEU))GRRY1$8|_#BeWx~(;iQ80Ei+tjX@k#)WK*ga+x$GE@q@|>dW z6OY~uhPTR{nafhBbA>6;j{VS1A0n69y@w^HrUbK_I@vw}ZW6Ga#eyCp9QRsV*Nl!Y zK~MvRZgHPPP9WiVt3PGRXy7SM76+3btQ=PMfWUFD=D}(ZW8~4fO-kB4pN3gK?LN8S z1}VaZlO`lA&L3F8v>2Djtmp7FIFuQn{?SO!T7&2R04g5TUku@3)3e%+t*HV^Lyi5xjYX7QB5%|{10?g!RQc>X zKc3jn$v)BoL)n0_f&+eS6de3(S_88_6yPmRKu0B9+P-XLBKi4|-xHqHe88$_lI`pc zH*{m`ldbM9i*t+fGnM35SV~DySpXW7v;>=X#9;4e-}1x2xFBBi=MI;N#C(hvi7tG0 zvA!2(`^&O~&5_$cCnW=AiEKe1`fC%T*}CJqN8Ny(U>bPpYKX<{%3!X&K0WlY(Kqv#;Gko9A4icl zr^p_u50CT=4`b;>XI*(fk*nlo2I^|7mc%3=MbGKs71E9djj6mHBd}AjuBn`n z;H!fL1HQi=D;Mg8h{h4WQhw!AS;`xpba`dl^4)v!Ocwg^sw{Z-_le#F;hCA zw||@df_&s}ZL5oa*$8WP|NW`XyNI^=(l+k@Z);C3*bUWgz}oW-;DVI@4+nNf^TCe{HQ3${$`LDewZqRa7+yzlWNNc$}33TzolhYHe8C=ySfExiP82G)rfq?@e znOv@#fOvJ&&BOdKeT6>&Hm4pma{@i;482yxh_Qgncaalg%B+xFEcmD zFq+_qGS5K?bVUs&R(88=q!6II94J0T&=RQ0)Ap!KCxK+3YJTUEKLDeoQwMAW5RL;Q zw`6rb&l^Yk8G`tyF{q7-T~mk@RM3|$LtufNnt;?jq zB!t88ES3R;;}-p^c4&zbCnaADxxWoc2R3cXiTFhnw$|;<|Dx51#6sYVtP9SgwQ;N zaNyb|CtNZW-t&P=D8-z>MFu-meGca<1~9Lk7s{tfP@6C&@)c+Xu9UG142$|qDo`>_ zDI}$YBS#TMjHQerkEvQZ4W%T}6C;nH&ekmUaRI3{}8^^B54%It2DRp+Vod;(yTDn7Pcj^2JCeq3p2G2wSfo zVkg03pVX6^EfQ6M`2#o@i!LpLE1nsV`k>-U0mz{@8@d@(%spV{%256a1N0j~mTsKM z8hA=17G*M88Q9V1`T4m6$J{Tp)S<_)Wi)Y&FQ-6s)rh$2N+pP?TL3ONbqR_}6msH8 zuBw~Vo$rpuRCux}fsV**9nGS{o()8=FfY%&n_RY77`6|Q;;a{xLoit`cMB>9%VnCE zeQ8rsth+&Dp@U^-Rfgr86+Bg(o(9UFr)BRbIr$%$XceW0K_p;WWB#ft3gDny0sR&Q zxbRdzC4(>mi;WCVm5#;yyVchnL$$W3ekwanIkU8i330&x1Z(|Uqjp>(wNWoY4hhj< ze>{!23uONIjxb&yV#n;{=5``sU}3Eda4=j9S>p6P?yRlGRUfA9Sz{3|>D^SSNUPCHdJ;60n5MOFZUP zvbM%7#p&heu)i;N-GtZF#*#qZ$rhdL&}~z9gAmM|gTuHGz##LwvHjXO(Hrx8H#cij z`ma{ZZIF8H66p{@2OMwt9`*hf(1S8*n?@d+Z8n56OZ^ zAuYL<-rJUyTh-dZwOAhiUepyTF|vTXO$?E-AVRoF)cmA#WD+|YRq?T&R~za@k(~{~ z$h?Yt5!NvQr<0sa8?vtW)7NsFRqnl>;r*%1I-k_^svjG+L-p5Nw8c7lYI6u;T)R72 z`1f1Z@Lt|j$=rKAg)jTpzwsSArarB90mFh7#*ZKD|6!qYaW^#uq%i55n7aMTYcZl> zYmd$OUj-5Ae6lqQF5a)aokS_Il-C8V0@zkH1%2Ac!1b?NE5@Mc+bz4_2D=7u&$CJ~ z=xVN~*~hd8&n|I$+#&132@Fs$Sm?{8;P5tBcM@>{#;l+MGDl0%CQ@7n%aIGTBu5D> zXq5iw@CZG#@sRjrJ4WcdTjqQ`uq(4_o1b`yYqEgADxw%LvK-RjZ-xgS=*QiXh9C5x z?9fJg#0AoZ$C05-#sc$juQopmBY#rJD)5b#X)3};D8Nt62}QI!J2h(6;1*`P60z~$ zxBTSyT=Z!SvK_uuFnO+NgBvFe3_7937(2cO~4WA6_-(*Je*VT6VMlYYPG7)y31@ zeFMkE{@uYFVy&%bZAb8>HC*0)zwrg@>*`r=OCaGNvxPC)r#LyUByo<^?0a%5Zo23u zA-?ijmM2{;bvC()oFP6frLy5KN*Erx;E9DE%EDK@eJG;k@zuiY%@|Rj2v?ev1-D^_ zg2eT*mQ2s636~3HFM3=;<(Of7M0j zs4mudK5f=?A=6tL+lm0Uef2WM*(oJool^Et?M`TTAeI{MKu#7;zmao|Z{nBcx%bB( zos1AIj;y)VuT)V?Yebp^?FJf?v;B1498KiT97on3HP9Ui*= zOn8OrF@)X8T47Iwvi?QW+m6+ZI7Ig#UiT#E%ggn&9l_=h7n!R~u~gsf=U^_Ob?8%!x!{|w2DO&q$xWLog`bdnj$>dasoa(k0q_f&YPaeJG6k?m>9WO z{V{!E&xTwXN(#$M`XQRF->ZQ z79xo~M1|(&b*W@hQ4illx`0zF6nB`K6cYV%R!nV^k=QL!NmB}AIHQ9A^u$VOK2=oB zpqt&T`*l2=!#de|4-&NDRukH^*NYwK8)a=5W+uVp^pNobbEC3>F^iRH%Izf7xGym` z2XrVNLS!pV#A0jc#D+!lA90y8pGB(RHl%J1)M6}UHEq)Vj|y0aOi%OWNx$Z{6*m>K zp5@=mH5lN#^#e%;DvtR34n;iG*C7=6vK*$;OCsp-chnH(6@3*i%arBb70~2Yw9?+4 z{7devtp$Hp?NOL=7U1&VGhBVvuz%UJIHkmDq|EpskJHiNfwRWI6Az~9J)+Nwuo$<@>!H)qw! zJ^0<7vy9}l6U$fm{+(+}5^`#6mYq>j(W4m!v2`HUA0ws-TTEk^`k~2W>}#$-S(bpE(10SBzO{HS}LEaU75QBt~LI68nbcGNVz@~E0!glXejSBgVx>^dfVy#lt%C6Yr zB%F(3-DRn{hrbD_Q+^maNfQ{`kc^Z(C!$FIo%5C=nHnd~qO36Gug%$(;Nw|8h{Ez~ zC_Ze7h0V$#l*nU2IRU?J><^is%t_T6J(+0gAIMQ#a^(?7oVr#O=!{3+G?h;+i5)=V zmffSxm}B+*Ykt=ypKedM)Fa-hEU`iM}fof()yq=`?WkR#sd9RhKX;xH! zV-!VH-`=2~Uvc2Iw+!pWKz+Hql-&~E?Nmb9zI)y%THV0W z6h>s+n}J?{Dgj~Nk&6EXi}ywPE{BS=z2?`PT){k6R@2{87MO3~|FvKH2O40C41E6= z0L|tEV3Ge{v7Ju;gm;;`0W!P(x46oZ!~r`%TxHm;Cp1xqjlhY^klgizwDPpZ0c=B8 zVOcKGVypxJR8Fb^mCO!`8$EJDdU37Jl)J6x!E4B>!MVtFbI0W?@72(;yk})|gJJ%*KmMii7FkQCw;MRZ?(pgWu zk7qTn+$Gd(I^SQZ=}Ho(HQO#<78>8%j&t;2Jaj4yam*5pj6Ts#Bw0+q`6l}4Z=KqI zsaJ8#B#DAgp-rcqyC#xPCCjrTmLc2`OV*VM@KDhS&y9kA+bD%GDj}JrBH>|CJk7>4 zlbBF~y_8hKQwK@$C*ZBxjkhj_nsT+OGn6FMJf4M*ODZo|sA%m34HR0GlitX+mtQ4A zV$?_4KrW#86Q=EJS-Vjk-L=)L+VbL(Tq;A!yq0+mOSivy-M{ror>wJB@4PKRhfjO% zYrVIK%9rg6pYYUEiMT?mZnmzE^Q%+~fAKuxJow#aUu#b_-;O2iyY>j}jsT1)#x0Pz zDt-DT1W<8P|4uCgJfKO}0QH_D;I90C=)C^*H!^fHb^6Df;NWCxZ21q20$VBwSKu(C%7 zeW6#J6!gY0Yc{YwY?P)3OE>RB1 zrprMN#6qAw+z=U9a%;I~xSBuC-20`(Hg!3SGuJCC2Mz|^#8brp>HV&PzHihGLF#3= zG|_g4Plfa{HtN?Fv}gsH_&(gdq^A&#wBL`401W8!tc28CL8yOpd!}~qI)nu#LO7&} ztKEp!2I8D~IPp5&VG~Ah3tqtP(XPz^-JW&x+r#}7crTq=)rb=cZ}ovA*vqdegatA$ zoy7}cn$*4hI*T_#P?8D?SU&cDbbEu75g98sv9Cb_blyUp11d*ECEXW=$6!K)FXf$$ z?B!Giwv4O`Q;;R7DGb{#-25aXJW=E+fnnyFk*_xC4Y9*GKsrM?Y1EoMPYWFbVG702 z6+2f;+k{_0r(PIm40`rr zu}@pE8kJ2VUytd#Tk(KupqU1Q1E+gkTkdt`go%^9uhbKjz6*|8py;}AB5A+3lWC{KmQ${kc?Oj7&;}YjJWg1SI2P0A zPVYwdV;&e{roUwdVd~>xr*(wBmXdYD8BE!qpTE5C23yJ9&1}4>LVk5O`-69R9N$ej z;*L7FIL|9T|DBnUWr!(szxd@~a(}6O;2IAvcC@osv1i;_^I@>uMi7xO!s24gdYP}$ z=O9spzcaOjS494nL1;%-xpB-5Q*aJDQyyrhR@^i;<5zFB zklx=w_fnkD$1jlo^$`6-2_XT5*ql8VHY1I*_CQMo^#ErmaVnq~#vAS%uHf?QPc!PWPs(&X<)TThnw^YmMPE|6f zJrs^LrydOqy#-$XcB^u6^<(r>nj{2?CP3__xQn_Yp@SQjM`5yRFzF`~73Q5pBJ3&P zR6xHXP8&>v7o99v{DC0MRt4+e5H6rm`6oJ8!Xzxf9w`;w1SkQ}jz^{ct*wb?+m?-e)4MAq$6OzYYq!9TNyicduuLS zBG;()>pwldC_<2^DP(w6vzpCIOx_lY=`hC`js`_mB{z|ZF(D$ZtdgKkAS%8iDFHRy z?|uFp85)IaMHRBlYaZ|0o#zv`GBF97oQ#POKtUW^SZMC$cHg#S8&XfWPOOZ$Xm)F) zptk0ji)IWA3>|OGsk}<}!php>>zt?D{PO9(rt(P*^$p~#RtZE&-~AtqeFJl*f171& zyJL5pJaNai)3MpHZQHi(jyvwywr$(l=l{-b?aaGXv-1`1yXvZ=-#KjTEril`thl^! zW#LwPnvdbvr9sF>ltNhd+3j7AKV-aY5Pqk}4@;62HQ4>R{0<}b~~u_Wg}ClQ?raNQ(S0juf5?|83*4XqJlpqzcDgcEoQ>~^b|>!PK`xx&2WnC zxSZQ#FHN0`hBySrusoOB+qvzyPfIBlu5w&>JGZ#7iEFf1X`Q8t5xRaP9&<}=^bT;Dc8vVEaLZo-G?;|u%JBuNY)?%@37 zuSGO9mx&z*tAma$11=dqc(0B()_=p2LQZ4!(%t)f%p#1=@gUwBxKdQwhj{Pzbrbd+ z0&czR(YQH9?>MO_F6*M>%)w-hgq=RG=3jn$!&H+Q&}`qJf5-uEr>mw(l!3D0Hi_4G z^ycR?(ojf|^5W&uM;xNt20)z8GSBYd*&@_C-S*eA-3#Xk+al4^+duh$|F4%1S1)+k z3wZfh|L2#_3^*6>VBqvmF-WkAtS#{Jb>7iR?SrA&OQ?E|(99&rhy|L8sP>GTVTjrr zVNdtEc($*RyuW!bGy~FC794s`UA#Ly9xA-Vo=SMsJ6Ak3ZPGuv=%Qtt??r9kbgYIO zMhiDJt`J;dv161gB9iFc)8wU+w`V%O&2Ti!Wd_s~01*50wmMFczXXB;%c<0$X`)I; z0YYZtmf3(Ct={7AItqrj%yShIX^*S6!Won>`qQzLK^pFmmXL2~r4Qu<)I=a-wrAe% z-uAz@uE;QZ#0G5`>Q|?AywW%M$FY!)K=8G@u)cANr+n2kik4S`m!r9%HGz%FT#TqI z;f=W+MQ?9tUHqL3%hGu3tdF89ciB8uIoOI{C=i|~h02r0oX@2*YdfCXFb3#Eq3pdm zy`0>fC3MLj-i=uO@K&GR%eo}`MN~3&vWHL_%bNa?H#N5siV7$b!u6r8-iDT-XLX>- zqEFibRnNqYqx~^snVnZ-s9cUXH`q~+XwA5CxGKp)uw5LMaH8#b&^N&-D%#n^_TxpE zdUDvhs&q)HN@ekk4ZB!sF6w1MT=!>*l2VC6qPw-h!?y2e=TC1>C~O%QU?ySmXt_|cFgn}($Q>>?A&O~( zg%sqD;qkMAhwNF&rBTUu)5v{*K&>6&u6c5m$>8tFc&x%E8WLx+k~a)Ci30A``H>!f z0Wrb@_H+b8&3^jw6L)Zsq}F6tZYDBVk!?mOlXzB{SeB6;4`EB`id~Y1{hqZZEExTZ za2La6c8O4N^=XgW@}^wSm6)8LMIZRZ-D14w z9b%q>uI3X`)wt*5Kh_vT#e z3e>=ws*UYukaFXT{w7QL5rAaoC#j$kVpXt@deMUWO|yk~OB;f%3O*+^CwsnX7*u%h zXxeUIqQk|rLz!?jZP1#LjdQUP;*#tGx(oU)2@J^D-r*%ppFxxKEXrNucF@Zww^SZeRY zR$zah`Tp_AnG4AU2B)g94k>4nF*o*5=u$u!b#m;E?6!6rTbo_Pa(iSj>atOI3UrF;e7Yk3Q~s8&1G$Dny=<^H z9iqELj<6D+Kc3EcvffE(V;#w`sXv2yo^y>ERtkL^d>R*kHrvS{2E9q^N{Ep3glW z+Aw!=7YB8Y{YuVn(!Z`%!UaC;gjQ7G`>n2+lD_QEPOKE@bjW(t&N7QCPom!G!fgc?FRBVTat6l2gou!8)>w@<{1dR|Z-6|JEK$J5@5L zRtoybpMO)2ZRTLEAtQZ45tJ~Z&9(EMs3t1T? z^Ev5|4ka9HZ?E&gTXx?nqA7+@W}am#87j_NnG-V^=(Jdobw^Y5RPPo8Lgr55FxHvU zfC?9UdBp4rF$}QIgEV}u3!PHbRS_Du=S1b>hIE_q^udE;K7~9rSj*f7&pN8);uO?hJwAi{4R>4RY@t^BjENX} z_(Y>t{2bBYWTq*82nw<|rl(@GN6(1#8BQ@_`hnD$)d+e+kp-X_aWTjS&73FYOa{IM zfw#c)Dtq6$5F~o1&CDLgBVoQcO2&e==DEUL{EU1-?i@p`w!gwgj{(k{U$9`0S^U7Q zQ$Z&1+wVecrylcrDa7A0TEj_=6a!MqYXso)HHeU2@y&pV+WnISnl;b*ECzk}VV2^o zU_Q23Di)x07+SX`A!NNy*S=3&Z>50wvW-_yuk2Th8Mi}iu4%{%dqJbt!a;-wnvVKS z4X@b56FYrabdwmJcHBz5BV!rb^pEfbHj?uYZsXK5ZtaMxbQ&ZgzBR%hKC>2Dv-e0|+7xdFIzGupTNQJq+?q3Ge2p)E3+jSG$Uux&} zadGWpS5&A?>XH=WK8gg|lQQAX+}5zmo=sY)`x%6Zc2(aVsH4}LnM=U28W_tDs`gZ` zY=u)Cx4*l=t1;m>uDZ!t#{cDM9k;O%q)V!;1T%<=!+&i|^L{ikQ~sCniTqI%lP zW#^n_aRNwm6oUFVhrnm?@bTI%uI3uPTj>Y4yG5c8#OA4vKUj{&uMuoi9B1p7wU#eY zyRw?4N!>O?IpHUd83tDM|Iy8$qz}i$ithJvMXnK>;wwU)Zom$$nC@at zcZe`{iZsjmVJ%GZ*(6Ac{IyeHo-+)M1Q+8fDyWOgl!FQzJyQpvNpPweD;h^~Cgh@7fiYkxSr%mU#~t#u9`^c(EfDlebk zfE5vTQ~FJoRLmJLOAHr%KTb2~IX|nEOUgvkBAy>#U_7f%?^^9WS4Bv$z{8g35{_#+ zhFHbU(yC#jni*#3rDy%s_64NBW1*o39_?*wJs;rfo3;E^&6RI#*?heYAHZR!MIurG z3(VrbOJXcOVAOx2)9k0%!)!tJE5MC$WlR{9D=HZ@funGzkU;t?s3QrazX7R=NlL2! z&|e>F8YG8mn!3`EZ0eR-tV_IREEAOHNHJrekY-^7q`%QX`df1Mks7***qkJE5(cEd zf9Jb|EQ~F77e^C-^cQ{TR*o(=Q&Mu2*H13wE5nj4hN2} zsp(^Y&S}Wi8?RP;DX*j^?*Ll$N?c*i?nm}iP)j@Z%#DPKaO*t}c5*)?-tlx)jXBo? zT%G_}qi1IIu}R*gUX`ATCG`XiYZ7I1Ed$vDJ26G-0>PwW$Lkph%Rm);&USKp3QAZ< znbH~|u8d9#=>-1ALK6{Dy?!{ZIi)UQT35HE9e8Sz5EZ#O-e{4$MB_>=nS|jZZKoxO z$^|w_Ckx%CK?hC7W8>1F>2*2ir%YIldLC80x0_@m(iBzl1-wtWPJszdWD!O)V5t_2bp(+QJyj82VQAk*u=| zp`>@lLVn%D(E{FsS7B88xSRdT;nRYON6uYG?G)&LzJoN?PvaJ0^a_xWgLRoMtD~?P zS6CEnJBLiewm&hEz|Q#Dk_1c9mzv4lAVx>bac(c7yL5LpO)MOkEICJc*2|pSDn0ri z-p-v~`gH!uMK|a^kfxu(nSOSqWhG$a1+fj2A5=D^c88V%XJnhrP>HcE4W`I8WU8nm zSd!3iFsQ;Zx7-S2t1D#5h5DBj-~e7Z0b!GxxjXs z#ufSOsq*5~K(-LMPTMyyw({W(C$5MT$IH5LPo6wD5!JDD1?mRTU+5u7CNP~!MC_1VIQ*{@G$DlF8TSuEU~Bea_Z=CdfE7qH%A{nJe4 z!wg^*NN=LZMcMwx)d^RR#60=iq9%!?B`eSi@aE{-%qF^Qgzv*!nJ-B>M zZT`NhL!0a}>hIqB9YFV<8g7DD=g2TR0nGC3w?lFh=3)ws0(Av!>a1;Dg+KXA4!N+Y z!wk+^JStNaI@phN>7xao(qh34RqEucQaifaJ@T7b1zcv3Xb%%Jv&V|$c|6Yh>>W(r zvYcb#VmHBGXM0L9jK=6tl>wSxtmrq=3Dq_L$z8uc$E7F6)iopCr8~ev%RodMd8K0G z-SM2TRYS?tsUBy2zHWFR(TmHVb}9>jYMnnY@pCell5L(9WExpkm=1FZc|-Ld zHvptlq>&%2qj4-J^Zw!rcIDD@Yl9yFGt=sVgLxTjn#DYbB`%tzItoL~oOV)tNnr$c z2uSoz1$+>dT5Xi>6`GzY#G*?h!eFK7IvxJ2g-cIC1kl1WXh-{VSi(dxYS-mc!V4Sj zB8z%3g}VfEq^nOX_h{hy5d;lDD`t!$Xc!|%Tf4Pb0m1>FEu9`#p{3)wB>TV?uVYtt#vb%7~ z;^o)Lws*VKVBS*nx7K&;|HJd0@Z;a?0D?YjtwbOoY6k+M|8@rL|N7EE9}#E>{EJR* z{l^TN9^juIL!^llT?bOGgc7d#h@Ze6G(`-?$K|TJPT=eZ%-6?6)zWfNat!oz&Cw9Y zoj1!N*d;K5z`UgabP>A3jGZ!EssU6QlYGEGXh;SN<~c>*T2M8vte&B$3@s?Ik^4W; z&|Ij3*1yn@Y^fD6Z~PA$($Lz{)*(O&mEf$4pM6o8!Of^-jC>0{c-=_=q9I!qe;^w2 z#RH8AbnWAAp)u)$4x_5^)Am4D~X@CER% zmLRW6w!QCh32R~0Pgc=xP{hjg#L7mbEb0HW1cl*~sMVGcT(F%g;Zn|_bR-pE)F-K` zyWUAFD#n?s1n@4N|FeU3t=`Z?70{zMPbv%F&w;0*nqEc$1MXUVWlQM6v8(`BEIdp4 z1tt83RvsT7j!O$q3MMOp|J`%$rK@!>(v-yub3l6WrzHHDsH(G(ED8j*>9m%ajEG`X zd1CpW6G!;$!i%Z5Ol5ER`uZuRz~Tx!u+)~#LrQ(n7_man1Xe|B?Y`CH>D>^3BMI#* zgN~&qP9ltIb-5J*3q8IaR{LOp6%Bm3R0bMtMS!FGFSKMT%$uG~>&d4>#cllEWtDG3 z(Yt3U5@}V;Oj$@V`D+h@mG5i%Q3wacMvz|+l68uhqo!YqI7FzjF++@~wm5GG(b_lQj+bVeHN4o16GF#T9Vx7+ z*gYPuuL$8KMn)O|86@|f!AOc8!0R>mPAz9cce&%k~cY2LY&n4=S21th} zy)~V{wz1{Mler}0S$T4k4Ioe znnl+tZ{}FrkPL7?{_6Si$xm9X?LrUoNRW7;L3UfeTu<&iX;lGdiah~OUleXok8ReX4xNvA+Cm3z-i5ol+32> zwZK@XjNbIvMU`WEw!4`PofWp6x<-de~#eWvv{x?3N9Fq&}GL`@V z;v=JMmgRr&5ve8+A5kcgQPXbLZV^z3x~%5=#EINKZwy^^6*6wisfm-_bp~D#?$qw7 zW*Q^sx`|092MQ~S{S~znbcCgeq@Je{1&`^)PL?=cs1Ca3_-EP3bq0SBpXoFO*aQx< zQ)UUga!D=X8dXif;N8$&IIhz;+mA& z6K8!N@9uP5Sg=}Tl_lp{k;wE?ZS;3}GSanmy{vsTJA9$xKk$*zU$fu-2c{4$krNK+ zz#@}_$EVOKt>=36TI|Y;TOV*_%T`ZyZ*`kgWLjMBpn+4`_utyyFrbpaA``}BgHI8q z`lz&eQ@nl_uyh;P6*%#L;{JVBF$G8L%v{7(F8wnGFWv!{PQvt{KrO2)>d%a^x?M@h zo5;D-#U{vk9Q_vfHS9^qX<2nAGx!hClmPfFB}vQkcP?#}trvsNoO`xC$C09oJi&$# zy79{3Wdj5G`zx6BZWs#_e4O?5^&aF^BRlUcoUgMd7OE-mnjLnFDQ2H6@hhP;@$TEb zkGXxiHVRj)Pelx`r-o9Y>p%`qpIKM$Zp2J3<59lcsqfxXkhPmI(761zpDmI+QRDBq z3f$N$ZTRv~{%fW<>f?Pi*)_aB=aIOI)F>~8_N+y0SC3vh%IRuLQxObKDr81@mgpAt z%Xwp)XSdL+%4SdXSUuJiqmj@uZ|>_={1{x|+KPM^zeT-S$KHK^4*vdzGnFM2_@5cm zKT*8VaxL&yU=;5=@G?;UcUQs5#LekHr|_~91Z{ySyx^-BOc93_zwwih?6$R-G|l>A^yf)3I%I*}mSW>BsA zjmT8NkWOz3UvAzAN_%V_3_*80PfOot{tLD{KENKUi(KuR1n#i3`en#=m#K}Ai!Hzz z!CT-r6R5=;>9%6wFCf=H^Ri4@fC=2`bD9_tVcNbi-g&GNsTR31QOP|MxPid$25;!r ztqw7en1{&Zv{1c>WU9KXf1qS!&xLT~a2D`UEKC#qiSG<)|)U{?w2&>U0)5PD@Ek12iQHatqS_J&3{ zyTLFqW~JQY0)1xflbF21p1MF^^g{MNew#^=Cql8bJ)^vjErD*-Ph+Y|tP3)K2}$KP zU&5~WS}$9R8#MSKn31zV=1kc?gb<(>E^W!x7g0LuD@)O!AzK4pTTwB9RLo6~(AL65 zs9@dewlBhbPL@U%!s!dQiV8-zAM2*NWcT)PdZ*yw2 zZD1<}5DQZR5Rg7w5D<$0@9#G>arh_qa=~lqxHcTW^*~E`4;_dHli=m4hP^rCoIwA* z;@LUHrA3Jj3kFpf764-hs!+c>=iBWwj*gu1$d&qrvb2Z`Vd~-G@yg4)_kCCNk9%2t z=9-xHhSbgHPDNEp%Pw0Nb^WdwE3<|;v^-KyPkS+)MS6sF{HPS`^t)3c-45de1qT0E z#pI|EPh^a!AEMaloY^}O$WH!xg!3(NL_O-7CRkV%RwZ4kkdNl5ELpt}-Pi-=RDvP( zfcN71EE60|9?55cK+=*dST(v?x4#~3OSS~A5YE>+`-G{3lasG65k-*IPgM@_9~@P& zx_TX)zMk$rFfhugxe?lnO=bJFYgGZ|y8L0r;Lmi4mOae!m!cZs`y~n2)zqk4Z#0&E za`|OrDvr57(ejEkHh!)=G~a|3{|@}oj_9W?Ng`I7y1VN@J}Jk@$RO-@Gd8f0yZPZn z;q&9;dguOPVc@0M?Mbl1?dkDgU}2!W<@zj{)B6qk<|wj-QJBR4CsWjWV>ybi0JAXj zD2BNUb#Z7lenw6|2P4h@J2FF2mmDvhMz2+}A22;f2Vi1a{MU6z(`x2J& zV~|1GN-?@K?vGZv46Toj>RX=n#cn?tP6_Y2icz*I{hToha8*Rzq_u*06)L4aQqAs( zreh&^9LRVg@=e_IghDr@BaYfR3LQbFBzZskzP7AU3==|`yr12hHuNKN;oRKwZkEZ- zGo)Y2!*TatjT6I*uzv9U&%(HIE&A2IK5k{bUhhr@&gL4Hjz~H{Qd+$pF8ea?kW(MQ$1U0JtsVSk zd&sL23ZMwp1M6^wY%*_U)1`>9`E-+sOyWOeRpE*__yUy(GWF&j{{986N(}RVVpsVC z0TqRpCnj)lZ7yWV44#u4`mWTMJB>Em?tDu5LX%ND}&H0fZOnW>On+GX0YhRXES<03$5n~YQtU4x+(`|fq zfou2S9N7wIbEt2n*1)m^MxM>LV|8UqAuEKOi<{O|W;CJUt%$@)_(NfQ)@@h!JGbNfO!NsHg6AE9X(gO&o*cceG50Mcm+2zs%GYh{%9<$IlZlc)%>-}e*oH(M+?-!ThayR@C-9xPmHC3x-*{ovY z$es>i@N>M{#MeSX;*p-bHPA_cshmW$8H2`1%r^~){`oU50*w>g;FvPm3!~$(nR&jD?Ka|kLC`{|%F;qv z7G|>O6Ul%c|^2i@)>;#xG`%a{xaTQus?H66tgtM@r!Q@8f4 zhbHVcVG@1|tn_N>UFZ-xAWW;VYN&NeIg=p_BdcRiy9zmA+S za|vM3Q*kdQ6S2GrtCLO9FMMMJ7y9>6D1Wdx5IoaUj?yHQ?~5A|ZL!F37v)iaHi5UH zaE7v_gE5*i>gr8OCj3f0x;l~x!CirL8ta>rLIbb64-^vM$c1uf!t3Sc{<5R6AolHM zvo1s)t-|Insge-}qv5BpbikZ>u+@?nLP0WP6V*v?9=rv1(ce(MP_5xx!3;nY{6%VG zNtYF}=YjU&--J)00JKXx^qt|^j&hN`3pi+YA`jR*F~_8=Id|$nhhC@snee4$F_hb!UcAAu$igo{c`4ixuDbR-Jt{U5 z%;#i`ulpTG8EAlkB3dq-6=?*hZP>>HM1Z@ll$6^Uictg6lcb+qLn=P-tgi`$AK%l3 z_szXe57|ML)K=QCED_}0pqw*b2bV;Ib1?6tnJDSOd5;kvFd>q&>76?DSqEq6a_emy zkz@t*+bT2|N#O=K!HB#GqT1ohxwxf>f?W&5(u6y3uD89FOx{|cw_t-_Mb;4M3J|(B zwB1xg^(_dgTVzuu&79{;8AnKP__n=Qh+i?zE=bZkqd-5=RuBg8c-pQ=aYbV%+|&u; zjvvHSG=|vvFVKM5fuhq1$^yncm$?;|eRFQq>GV&vLkegVR{qEy*#?`@@sFrkArvrZ zq%gI+x_D)voyj)n{G`W%o+n{?cEU2`O6D&<@){z)zXFbOqgtcqs{EtxmN^ z1ObwXUajyV90hMi8VzYWPXm3(CE7B*R*1jX_LuOnu)J8mqL{Y^>Y~2xD21-Sz=_wg zmFduq%r}sC8WR6(H7wT{Roa&f zqqEvrf)Jx|5uH>7x08@~eY_uw6%f?BaC7q4CqG5*Stm)xXq4MVnF7a72Ww&iZI5sK zGRQ8V`h4qMyn6nS$q+1+<=U};Kz2nJYYpXbesI|+wzF_Y0!%aFjJF86xQiBP_vh9G zl;`;(IYH6KHE98J^!yW&NfYu2j))k?0zSjblAMi{`*DanF^4wmDDC5Wub=BnI?~&M zO)-rgbrFd!Qw&#JAevr=AWai~rB{0Am8J@r=`j~(cwYkjAJgjrq;|96WA!}~f^zss zV29RBgnZR+&QPHh(wos4$JW$&jUI64w>V~LEf5F4Vbx=Lz{AIqNfkMJ9`|xEvIte% zZxIZpYB+Tc*g*TU($7KV(tvshYPU6Mu7} z3I5Txp;euHwiriDS+we!7r@YZ{&A(q2_AJhSpxU)cTyP27os{ud!=+E;cLK5I290Me zGk>e&5X1lGjKrBNK=!^bX{Rx>>a@9Fw_IAY@2h@`Hh~yhLx^H~0!zY@>kIafokTd= zxRath3EWFAJothWX%PP4(Al{e7hdlye^L-v!)orujca z9)=#!CEY*i*C1+|FS!s?9$`h_P?FA8zD%%+yUdsMhw`e@DaR;21z}R1W!|oU-_N zPl$>SfTh?6*p@cGW+Q5IP?%9V$`UTuQg&8a6EwVtP+SP4Q@E#)hOpDaC>e>BSZ)w>3(bZkUO(b_5i2EiWE$T@;^iYyLn>*wAqqd^HNk>L(QQD)X#W_%O>eRJJ zvS7F&O%aM?@B>HKU*TYN){0jzES{ssB1x1Ptzl~E!H9~lk}x(NW}x?8({HmqF5@m)%zr5KN)j`h2m4$Gsh-2Aw8%L9XZVTan_BgK!7$OUFPqSSLe|XUcxe>H+)HS)SZ2QG2Bg$>bD(lK}8isn>GgaY}=1H!W-Hr`W zUZIQEX_VdXiPXx;%onm?rE%eI!+4Pio%fsU$}$VR;ae3wJw->_Y!+OcbcdjVk|j@^ zEHcuP+C(m`AeYT9VS7w@v0+Cg=0w$yQD9(xdB;6=_`V}!&U&{sZG?G7T?YM;mtmqB z)&7z{DWI47F76RNW-i<`&0NN3X~k}%@M=Tqy>MjR25Y7k(+yV9Ry1vfI?BYV_p2%- zboG7(E2}pW*NHp^SG+TCNZ4|;`d>BQ>UgbBok}`_KxWAd_1>&u&HtKlQN~W8Xl-1< zh4&Shho-5F@@s_8Gl7xpQ7wgxJqHjcw!4_s{VWjl4sMv1wRGjXG~+|i$*Rc3Dv~48Vv0O`zJKj>4m2L3!5x*KQc@^HGEdnH}f6FW5c}r2@k~V zww7QOl0n&@EJvqXZ);ZWl_y4yFyO%jC4u-m^T4P@BFtO>}tCO`Ztggo*sjsnm1D!3!!5D$}p*Dh1l3c%Iu7b zDSMd#H~c!`y12@`2>Ym_XY1r4nFol~ar=u=aui%!N^5?@R!U zwLAx&0HabBkA|t+EFDxy$S(!czi4Oi^-Z$yHzeiZc>>DqmUg2{fgj`Fl7^_A?|My zwvRAtsc_MBM>~L-hk^glX^X*RxfbF@?Uwk(aNH~Cr~rO0K)VpS-224UM}r%dvAB<7 zV8n_jS!i@oKWamqH)3rpQ3`*cIh2Cbd;K+xfR7&v=2X4dsGF@8+&*kGsLBG$DB8hg z@GS_rJdvXwUvWg`%JX!32gPQ~oj2>pO0pneI3&V8tfE$~RVb>{t**yBWUZ7Y=+uh; zezD5c)YC>3eUBGB>F;oHgZC_D*j5uZxUG69C(dSIf|G`h3H$pEFEKqB@?RWEQErj* za5z`xoFK!%Uy@hGpVJI~eA7YuX2VslSDfvuo1L%Mnq2T0(130F5*5wfm^ZB%{&#$$ zel>cv`|T8cc#RS6$@Mt46^hHcx9moi{yw5;Ws?zb*(pd_+k4PU6Q4xCJdEfiq_$+A zRJ!vkdsh>Ri_=wiECp^O6{Sg&FVwU9=|q%oq7bUYUZHvC<6+L%wklipTz-Av$MFB;L;})@bFFHm&|s*P%37PmSJmV#tP#y!5e`@^^Nf4IqRuJAMJoFE z#y!BC62$O9{twh#bUCI%U(^gKmcX)X$dD{kg#1@q^t zR#%WM-=ooZXmy;ED$7g~%WdE`Kq$%tQ-SQv_Z!tQu7Q}57$i6-R%@9ij$5xl3C-0} zGbfbKjx=l~8EsYfOkuRUGhg`8Zkmy)FnQ_jIG#(CIHVlqCJD+$o-b;Pij81TtgR*w zpKre19rjfza_yUH)hB)AU< zj7-_UD}|?ln=phu7zm*x6O_xBHPOcr;OrAI{Nr^ERXju9S`G0jRfJSm`*S?&mkk== z-avlM#=~!~=S^#{7@F%1$2E-SgxJr;jz7CBcDJo?+v!d979OpFHwbxpw8%}ywGa@E z-7Ua>5O)zOcaUHt1g)TC3xrNPm=paLIvG)O=&^y^K@ej%-Ci`70igHF?A zk`jYa6!Ua~#opPoiv9W1^Hn4Aes@#CcGpLB`-vWEqW89B1uyz-tkNDiUilCn_qbYg z1MRVoPlm(XzWmArnw#wMN|Vd0%)m}~dLkhKo85+=8)O3kU$K&TY)QJPVll_V7b3ethawh@z~<)F=51rPxwc zuGqUxh?Tz1b$^&iZmuT~mGgh#a3M(Ks}Ti!@I|Nw=DePreSU19w__o>I?ULGxlL9R zYGr*3M>#sqBzG&v$#%r>O@e$40s-yib;gUTiZ-;YAn)@wXD-gou-?U5s(&dB!0H%? z#D=6o{WDQQ;_0Tu)aQ7sw#H6Ky#;eC9B) zcELs6uh(*dxZs-axzv?;P%5$TM~K6T zX!Ofm^w|C5l&`d(|EVm)6I!Dj*TBGCUQKNrTlY_xBaMJ#$mbjb%XP#LCo=JuOaC%1 z_YdEg1BR!hGRA8>qdsTF<&ZwGwi;ONQ=4LZay(1kSqh zXP;7x-KSmM%;e2d9;gRvP^VJ&*O2 zo)tmHGf1N&P;AU99OZXiB{!ro=6fW%Wko^qq#RlXkDwtIC{o_LkU=JbD|2Z z$pgg4jng&>X`|#xu`U0+VKL*IL8sPHqQr{C_qN^fEEz$1iyET##I%E2Pnch7VZ(k$ zwiH~;!9AJmf{2MGAR&2`w)3Lg=Yh5%n!5w+@JFV2WFimSO}wP^8w8QQDR0ys-b)Vr zh)+Z+{VQr!tNe$qm1-Xo3xD;xAPKP5s zfxuN#4A?zsO+BAXFO5UwYVCOa{Lne*bPSvQ!{mAWBdk#DLh^Xo@1D8+Mh;YD4{K|e zNCm~8_lz%gbM&X}ZkooJ=qLctQFuVQl-OKi64SQlR&GQHHJ1l2eDMPgbYIq8ylCr+ z%QjY@Zxp(&hmsYX!{P7W^m^sWM9A2aJCtAh-T6h|VoiV8qi2uzy0Qy><5o4tCg+m< zr4Des1QqXpQ4+GlyIWr%J za-frv(XKu95+Sm7^>TM)vmfDUTBrrv+mAiH8?J#=;;0`}&@n0ISX--8-c{29JH0h0 zNzMf$lYKPp&({=7V*X5h7+&^{KJG2t9?;$WemCIBGI&%P5}Pa6f6n>ZN5pwFf;C`ZI1)^6Kv*cMN>YCN5!}WI)$DQi-CIwulSD?#@>Fn(D=j+&La` zX|oOZR&daeI$4MBTz|qvLxmLaX)}thv^cB4&RpSb1ILP(Z_!;j{MT}Rh95xpv)oHX zru(k#{)191QA=G;`xs52_An!|YQz2U6w0@2BjG%LwAQ#U$!?o3_Y;SJkxAk%!|iEu zipi2w?^(F120VY+{8SH-Xr?CZfykr6fDS`9sJ!!+&(&u2S_FaB`@NrRlX%*q@<7OY z?;n!*O%GSXw>AEv*o&znIIy%<6&K-O7b*jPCZCX1P|t@rD&q;PP8aux`+7-r=k)i# zKV9lX%*a8wg*}~P<@a64T=18|?QicBx`L~n^>Og$vY!$)nx!W#P9BLGJ@RafDjNsN zZ2pE?CC3~`Rq0B#XGS8wOZXh3oi}%Gss}u8{nob4fUa=K8&kXE0fDT!A_5w9r!!80rSqf~d^L{^rv~Z`GRCz;rBN^>xsv5PW#KYM>3LW>O~?YiA%)MlJI?%=svzThuq^>oHHLgEXsYC7nR}ar?N>jB<(xzz|7j_|C==1!y_&^Q48h(;2HD@pNXo0`z%EoEq6rPct*+0H|Kx5#v zEv4KsRCGHJh3gx~RR<>Aqql~)h+21EaMdZEoJ(hGpU)>4X+sE#Un!wYwU5z{G*wB1?-SgHcqdHSb@QJ) z`WIAxPG2Ulbbp)tK7xHMM#)XFhzH()7)s>IQvQzEQ5dh@Z^@J;+V?l|FFgL8dzCwg zIldhv)W{I*-A(+I+cmnH<}i*fqWCt-UtPYYBcDJR?sTDk7Zhpm@@BIHxh|0qOe8CJ*-dlsZ&psN}3OzQ9@y0;DrmfvFb|x z&$B=#vU%vM$*n9MnW;xbgE0q!P;EuR@I!7-y~mc zl9|lDBR6WN(<|dEg5^h}wUnFVR8Au;X@3`R{EhQCsLYvr`PZ0-H`gP~-jVO^?BD^x zW>8ISFLLE&1%sh>d*Wc=1Bti9x+Tc3KxlBZNQCQ9mmq`ez$E3>_Y+`875{i7W1jT)N<-TA^Cu$Bf!r zsGipJ^X2fXN}*BWM!!cAF;YrAf|Ap=V7LbgQy&s(%;MvNol(IxBp0)eg0Au;LCXQsp-)E9tRmBIhU@srT;qT+EW&>20?uB2R)+Ij zj8!5;U_Acu@tlL+fUG5r`vxg0tQ9Ke1VpPG49ZdJY7 zIYhd<#?&aUR91YzkboJ^4nCMfZ7`Hz22-Yn|D%r=UKwe)Lu^ub^!HNwF|CafW=rtf zi>5UR)d%Oete?s*8B#o7N=KbU$g&WXTTCR!(8Mv74#di!#Q{DRRw(1nbTtNPeJrNN z@LtT>G;i3vaV6`YJ#Gvesw%FmyI4C2q#GY6Y#U_9t|)idCO^j59VRXR`esWdtbEnt zK4w`at=7bb{YfE@N3dB3)&PRC0{xDP)!!>C<3XH2XVpyy(L+{xC+U?e;hC zz^}Dk(A2rp%Jhj?SlK*jtD!F0jhjE*AfO;@V!}H!T1`~5^TEbQDeESBIfW7rB}ofY zJKyn5YF2XbR^?XnMn?!zU(V}y!0EJ;UnWXqHK%sOyjia3)nwKuONk+FSTjzmf<^8B zK;2EV#O%e^a+Z?CBIpvRQtcnIX_~_dpg17`_ZT*iQY0W&s+VJ`g~x$g$Kc__L6-Ag zcEIaWIZqaV zXq%T)%iJ@j>GbB|GwI^VxXWH^bXGb`D=@^*U-LlZB+@Uhzh@zYd4$_K>_}=B#42R3@QBHf6>~Sp)Tp&nl`UUzxDCd=)l7B~RZP zPAT3&Y}Txs*|zeGS-5e*(V#4_Q2cfRj7BN zPW9kqrGrBc8{_Jis@f)}JtjgA`qD3BMhwju7tIeJDRxagTOjwQtkZ5&gG|-RoK~Iq z+`28c$V_~QFAncv>)=tN;L%hR(dKI#^Fo_v=`_#TAW7~Hi|WcFe~zx)J*0GI=G=!K z-=#>Qz7h-jLdx~M)p_e(s-|<%w-VnFRMN`*_-tynMzek|#-bY-QiVi}L(dtUKsg<$1M;_i}o^VACB^do&;MDwsk{R9k4%*E0~ge4cv=xw%vZ z$;im$J9f#dHB`+lXZEP6t2r!OXq~p34a{TS zvt$5J1=jwtn)V2Kf=ye3Ob#%;dT&IjKI)>c= zbISj5W@43%`eil3nIWS$*C_dZ#e%I?TD&;Qyb=ATw4kcg{pg;;oL}}O1`+yGrENDYY9el1ne-Z|C}cy?9Eccqc#7jK72INJO#R&hPYw@RCel zo21TTRRkeZ=9RW9uJx39r0Mj>kPWDir`cVwk;=uf z0Cr(c7`2FU+?pvCHgnimj(6c24E@>a?}&0Th*brn{x>liIB;QH@4R9{p==pPsW zfR$hG6zTtZ4Q6lSWcD9nAXk{?zl5&m4sTG^?X_4K8N4%;8&0P1gU0pJ<`Mh!v2sdK zso23(!ZvQg3XGo@uNy%8z5&&FnU<|Ii(ynyZtl(xfVu(MeAk562$SLJWPcsY>U`sN zV_!Qipv&1UGgKhQCFPZG zF8x(u4y`*FsDj9&G5}=n@ndiN)YhzUb|df6;yFrKPh440t&g?%W(9Fy=x5cu&}t^w zS?pb-R~EKC^qEloPwED0-t`TwniX0y!!2@+OHf4mEv!d5*_q&y4t{zXH=I9&G8%;6 zSsxI!?p+4lf82{dF-xIe(?uW-B7}RjQ3<*(250zXVWkS(nvcVN8CrZ(h(DY&l>PII zfHR~3p%EdK8(8(`-no5K+EaKi88q6|W5FhQm8^fAN&=G4XCn>j#wdxR%YeW-ID zak1-0ah0FQNohcCOudM&RD1^*QOv3KT(x?M#`ta1++nJ@$@uU}5LLyp`P`GjgeMI$ zksGCDx~+DrP(JA#zVtyqCk(VlegoC_>;Mz)IQ=)#+a$*g8u)`&$EJU)xt}rQ)*Qmx zO%!R2)cF4;vg;E{pbvKr5tvt&|k-~Uoro5vx0T|`PoeR*%<6GCeYB>|64f8wJzH~>I8N0A^KJ)`hbRD?P0V?rt(g^F8#;68+PlbudNs%M zk&P=ZW|si=M!XJS`y36>&LovcRM$~FqE(SKl*-F9LZ@7s49?#J&CAbTEQKF~j6 zO6mH;ov<47gx)8;Sxs{m`zO!{2eX&V1+{7bf1jxW>ts)6*$Lf}u>PQe7|a7~xOo4S z1WwUlLL|&~;dA}`o5=G|tpmjHfvmq3}w zOuqY~^-;<|D7D$i@Nv=^Cg+lA&+#If>j7gH4C4CKlWHsQclv0I5&{ItSjzA+3a#+w z!IxFOe`i%8g9AR=K-&dfn_wiRJN1oC>GLS)7%=RA$hBDzS~ZC@H5C!yi=i26yZrS{ zqzVf;r6t0jxqx@<>dsTIIzECwBnE{4Q~`af2b)6STUjA5)xAmj=8^SdtI+0``1T4t z2MG+Kr3dq#BQmmgpb9-9j@DNNe%}Udmaq|_1fooy<;dFLoYyq@j2F zD^?T+# z)Qp!iA%8@L(DQl@mu<;Dr06h{R!w^$&3Aua&33_rD506!6=P5bFfdsFB`E#Dz9{JafLC+8@ zed-wSR&c`v1FPevj!B9@_;=e&P>{KpXOKg@9K8Nb^KTkwd8QVtaFz3^Z<-#``16k- zS*ng{BDDI8_>#dJG%xV@h(iPmaW}d27lw#+q^f>-qAFo|q1mNPIM#X`s+4)v=WOEY zU!H1nDAB{{e~GdJ(V2}@Z#_*94yD25LGJn!af+?%rfLV$w{v#KEl@PTk)7vu;8&kF zNPi=r%cpjoM9eduPH(JcSLbn3roy*h47BpLJChd6%}3kGW6aw9VIU+Xi`dI3xW-zr4~G8sAKy`L@BheHRK+M`_utwOS)c> z!G;3Bjr*lvaA9!!@PrG+pydUl&`r;N zat%oWOIon0?hrp3%x0Kha?h>#hF)M}Ro*0PeMBc}Diq|!Zuu;U$pf=L@{aoK zGJVHR4|v`xfJm(NTbEwV`K~B#XJcRQz}=^XLU`=ZLtV5Bl35$TiL!vE3F!lo4KEHF z9wzYI1HOA?yV!PKYGgyux)PM0U0=(66!c$hZ3jj1aL@2>PeV}*01O8T2%{ItjET!4 zw>eO%zI}*zTH($Fs1(t=uu|0Fevr@21TDay#&QI|J_UnZ?@QGA)Bsv5l-FOW@^AgkwC^MER_-Hps_5w1cpOL72vvXm zW=m)r@QIJ;(oUhae!aIMpr9)8!)B1bja*Bh80Thng+?$3$zrij^###y|gc zB>q`KK`1lo=2;58VZpp#`i(REHj7S2w6tXph1yX~jJLg7N@Hq% zMJIDVJ3?ub*=`bw6n>iDZH*LFY*LRu6FJK&xmqSfW?bye&^#0<&2=)a#(Q-Cf!co= z;Zlzj^hx$4X2m0;zDB=Si?pfvO=;(5F;R_c08&|t?h9B&pcE3#iR!inAfOXrfWY6u zlPNe3IGYTqY;p%Z6WEO%0|uKJ3I*t57D@e8(eP_Vip*Kyx@&qRDtD}&LxwWHJKRRYP!4??}mEYTcJfu zBcAR7D@j{n$r z);A6oe1BJ#_?nid`_~y?bS`kuaM_nAPJXgGzc7*w82(RY8$ehU+b_DBF9g7+3Lj2e zya_4a*M%sG;ts~fiYc0o;Y4b=yWMvEH0j(T*O6YowAD)1aE?`tZWU^(7-byV{CZ3s zA(2#eDwr4+o1S)X>ou)Ht{d-=mY#tOdd(1%L{q%ooh&E=Of4=qtpiL%6Y;ToU{lhI zZJH}Cl6%pH0Gri}2~lP1rg|>g!hhnbB1WDtdKC!pd_0ZSRIn};RKKQC#Tbhx9-7{g zOv25N>>~WPJf@TU0&dlvM&+d(g=_)Ct`-^!sav@-fwL+cWK3|@^Yhb)Bm)HFGU-C~ z^{TD3c;u+#`fzGE`Rh)&(1+>>;i%!NruJ(--g5PkrgFFZY5!zZ^SMa&NOkJH^i|$$ z&n3bs@<}=M%pFYA8B79ruWW2LK3V1OTA=Uq?s>10#DAJ;VR2R%>23Zww=R z&y=hRav-<~`sWwE<`44bnQ$0bN>z)G$OtEC13@98g5#_6GhY>CcMI**%FWw2+mTR& zNqVfGhltafnwgoM+)q(EuiC-W61i|jtm%`kb{ye{R$_Pv5p`e4X;8mzR$eVS)0;x* zL1vJ2(-5KtbYj^<;hiOreuxOq!+18^T-R|Tpq~*H+~KfO2Lw*N#lHzmB2x7BCr#S% z^bL|p1u{5znSwsdI<(n9G}|7eX_A@%F|5{5MCcz9if@Y?4$mqjjP-`0aU<>;b}&Y7N+7BwCP{?wq(J1-UNaSJn00mc={kONc_VT2 zKK|j$>cXpq+j@AoKFf8P|9y(f*ULu}n^`Wdp&GSHk9R7MEiSJ$W1#YW#(gF9xKH?b z5*Ek|$7Qs9LSLl6e#Yd~D|R%O6FH=wl&j3GsBxUn^1=>k^9GWkW&sf=(_5({7RFXe zjnc%}2oTtrQ<>zcR1s;y+?w@Dyl7&xXw+1wIkxPT8G{Y@@kIi|Pw-X4-m$v&Bmyt- zBdKjYEj=si9-HUf3esN{L{8O+U|6(zrAQ>GS99F8NZ9&ZC+p2_lN%0zR$B9!Lk>1z zU}=$sD7%FgTuFSNv~2_5x+iuOK|gZpF4KHq-4F|w_1QLQl06K4fI3 zIM>BW2}>r%1g$r+eXlacp_XQOm+SNZWEHU{j{BP`23!x%@zxsRAUxV^A_yNf|t8ocI2vtIP&@V=ZQTSUN! z(zJT~e!V;z16O@H2uoKbswu@9INY_hi6IXe%j0=l{>zLp(Dwa_X%6fRqIJDSm@3Mh zDd8M{7U`71&M9ZBz704pJ$xx|Jeke;X>AG`5WO~rp}$@b*=Jl&5-@-Wd}2=OH}Gs7Mf8o&3U=%rHg%90_4O!VIi9h;voYb^G=sMmqbU0H5}=kP z7WE-_A)?WasC6PSy}XCx0lZP3`32GnID9dZfx#(T?J34i(V+%WKic1f(A)8Kpj$JR zX$Z3SF_`0Comm&FO|8&ZTE>1|i}8|~c*|nHjq>8ggok`I@;>*^6$=RNr5__Fv~{)A zHU3J8UG@xpx(x>XaBO}Wa%9xWyVgZMTNx2qg1Vj3)Unc1R4ma9p8-TIzwkxy%31UU zQ{g@#LrR&@*?wQ}IqiaofXUv*n!s`n5uz|$SR!5fd)k}zZVP&jP}Dtk;*FS`#$k4k zz>`` z-c`}fzBcE^u7~qv7vIM|HwPbbnL36jCi;Cd97OQ!12B5Xg)ib%T!tB%2#-M7u%at25{a%5)PTOLYHdftDI=9ZXyw~W6 zLlj=QRkYVcF^G<;&_Y;g^E7G|$n+1YH8PGJ^OV+0Nbh=?7#KHmqBxcRc(TslOMg&k$-0QU|A9X9Sb(+5N69Ms zxU~;rR0DcFEEDy?5-pne1~u9j?bcL>RMz}mdzoomd<0!6S0)*P%@?ku)Z%(%#T95X z#|jyZWpgT4+26mFt%P#^l73&v`|!ZZ2XS8jJSo_(r19Nj{090#j~*t`J@W+G2V^ppcKO zSx@Y&DF;6cZ?ne`bpu&5Q-w$f*nJ4?cvM*dtfLpkyw_e#ucSwjFI$l4u+%g?_Z$7# zHTR|W^s6Yzs3973S-kn~Q*KDWC!HuDjv=nJcMYH=ZdDdbzptpv;VcDpyNPBy@gaD* zu~YdeHwJ-(zcqKq&TLLoP=d|_AMo%HeN<=~?y^@O^#iD!gk5_nd(>}w=JU0KQL<$L zQ5Z{28+}cog7D=(4?>Z;T}W_bTFM2{5w2LAaO}-OcV)53_3&7BxxF9es^^29$mnBc zRzE?;#!JGWq2>r0481?Q60VL25104kQPZ0}5O~G6sDghbYnBDtFYLFG(=SZ$FZu4F z*tabH%P(_b^$X-+S?YTCoZfJTU${aT(e)QLw8pYf#9)D87Fb&$XvQhs@Tx>DU8JPJ zJ>TBdR@AO=#3IH=sryL>UiT3TO%}A9hLy_|6vhi1;>mcU)N&wZx^lGvlLwkMmz@GB z#psiPY}ZQ+)91#M0NM_zZWjE)@GwA#L%SH`H)mR(w4>Li5tQ&!1$D zEK8FCs_3jNIByHmTOY_nogqJ-z&^F9K#mStrCyRnxumDZgy!=Tnb8SK)JfksILHcs zbohqUhtE-c^p!(Z-w|4(pNY4#|A8*uYPT~$NL;@U`b*@D^7f^ohHu2GLP`rjmd22X08D8aXLi?csilr0UuT(-dRrs_lT4U3s5GUzw|dHb`>(T}#tCHM1%L^2EJP`D`UbB=l6 zK~V0HDa2iVIsiXgg-e!VhHr70(1v=wxTUc%oux?bD_g)j%rSEt2~e*5vTBh5w=QCu z!*Zr5Rw+>va-MW&oSw^%1ZuydReT${=JO0j;Ja;Rx(==*3~ z`iXIYHAS_IO_|5wN}Nh`LByJrW?{kvp6H{R(S$eQEDe-ZkLS5V-FkKhvSX2-QrdeE zTxeDdcc|0+T@>vi774HZU25-kFk;Q1?4yFFy#GjOy|NEM;dDgXJ;ge0Uh(tmWap=k z+(hVN=$R3!a(G7)kghWArhjn(T6n2VBeV%hNt=S=Y??Ea`I-_Pr!GRL{(7r8J0NC; z>0NGFCdYWIBj|5StM`n-0RxNo@Bt}RWGV$n;*120n#t|r;nkwCYC*0ug4IE|o?cJ2 z=!`6{9pjAu>+i|p<#dZ}{YKeRs?uJqg|h8>gbvD&Qw{NK9UgmOAzXT^#V)hLRAiv_ z1VX^P<=-S5L(QXhR6Mm5^J)q0b1SN+rI*Q(9o{MWT{m1@$SH+Tf+xjf_X@>;alBIX z0FELkXF=N7ltUXOHOxY&1^U{3E?mP!7pTGebx?g0fZ3C5F0&H>dldd%0f541KQVxC zkbGY7mM12TKAQ&Nk?y-v@kr#5_RG|I`Z34LyU=Y)0ZlG6$8_d8dKd}+&Lg|YZ35z? z`TL>1qk!j!lfa4=we1Sk^$dXS7)Ii&o|lBM$!*pN)ZDDcn$6m${v!JRcVuP~R3$=% zkKCsU^;xNm@JE#8_epQjuA^F}-u63E0@-R{x;z_s@Txcyua?#j1wc@4wyrZ3$nH&0 z)$gGFRS3V?b|ZXFGRWviCe?$bbZHZm5U;htk6;;JuVVF2Db}B1=5VTgl{eN-UWK-F zhMGaeWh|Fa?J(x(yX};UyvXgp2-)j?W7}nHQRx5zfQgwmLj!4xEy%@oO1_n%%?xs{ zPb@J;KJLZ1m7FI!A9j&OIAo@o+rUnX(ja7t>nTwco3o61tkn)i%*)I_mm1-+iG6b% zo8C4)EzMDNvLFY)TYN7L?4wW%zeu}j-;`{zb00Y8gISmchUEC!FJ-sZ^+X2Bybu5T z!J}2`oPJSP_Vqxx3jVm*H4y|DRtx zrubz{M<(RCJq$Q?dk;jNBVXcFFj%FOmUoh9ES;x#mlKGC@3UAZGAyjstJ4ezG>87E zP&DlEY`=^CX`wF0U{fMrE&cDbj33F&7|>tFYc2u+0P+8-YPgu#|E|TC)OBq3MUlNu z)U@N!PB{as7ON8}<5r1P6x0fT3D=N$m-=w@S{nz`kwU9_I}Rg%HuzaP6jjQ^5mp^e zHXVA*>;=_0H_)53InpbMzv|NdB2}LP_eiL;6O)O||I2EWy`@boOF}p@Cx+n&g=0k6 zuh51#l{2rLr}$dEPznKEKT_=YfP!b`ED^ROUy8P;0?ebPQKpvQ4PkUBRD^VjACfH0 zD{Qi-swoBmgn3sNJZ(dYMU9=gZ7iLLv(2XWQJpq9F*Cb(YA54f%VwQuXoYVy!wXhp zE4Z7Y0RNzQF{iE~A^Mq4@+*6Q34`_&WB{8`uav+-lp?A-W`5AYI@&Gqt<-I8qDLf; ztDi1)yT+VeuMe1V0&JBkT*QquXSZJX=yPR<^}VCd{B-u<%M0n-+R|sn*p57Vj;^*A zTNxO;bq*}iGpBzS?|~Cr0hDNkv^aDyP304(bS{WoH8Y~X0zqxZH)g^p4}vsMYo8oa zoLA(NI2g#5NFbl+Ys^IhBvB2s?|)+h6MfRG3YvpxlNs*F)5Z!Bxd3XPa!m*YPCwL$ zq;8YD(;gi#sGPQTxpk;e;WZ`AkXnXyw0myWA_>|S*&-};lOl=_#39%(67{b{oJN(k3f%mNTANLV=^W7qZfFFh_KBv)j{Pa>#%GDPnk4`+L>{RDJw`I_R-)J0^9#p)SM zk(`Ux74phsL%u$nCzm_61!H}UE@ogG#{gWHaxFlIipus)(){tI4>rpt=7wVMo@Fws z2Re{iuQ}8DhqPuPX5v(^luMteawf3S0;sjhg9&Uc+cdItD@*g}Tb2!pY#zzl zsW@3Ii#X)*PmAv2fWi;GnmH$sEvE=uTB7Du_t*zFoqfSz3x0OpOvU^g8+;y^Bc`SE*_L+~vTSU;)1HVA6 zJk=1XOq1}{ry{&8pTU%SpeQ3S9(u6kv=)gt9%|#i6%XaMJ5=&}HFi)^xsUR9Ak+L5=LlSPG3PerTzb?&qD7~w44QVF|H)(q=ylimX!!l0tuc_W9 zCMLejm!8?oO+Ai><4i; zDqu;dC{dB=-jsOv>}=&6f(e)se$Ieg|49mXMO-uY;f(zjrZQg0yrJ8{;a>IDx%MM~ z8)|CO$~fz@wpf8Hr<>Y!x8Y$dlB~79!23^e{6E6h`u)|d7k|Unihsk_@czHwh`H4- znezXQSc_7e_ywK9cb-t8J{N&%bI`a;%9i7iBdD=T7YWFM(o`Laki-*|QNDlBqF~8r zBG=Lar7$%#G-USoul6Z;LC4}DI15>8+Xc#0{+(w5QMR&Y=}Nb+;=ig3lubFsr{``e z-`zPg{j)RY#)!#FvT{ocL|RfY_MxSUE)Di;6wCm~5=+L1Ltz1b)y{?C$Any14^E_o zcZVFQo015$>OGL?$M!dDH0(T5dcypLF$EvEK=*A=o_Ot$SukgytIcGf<>uH78YYb` zfw6KcUONSB2Mm-%W5>9o^J_dSl>&EID#H!?mE+v&(9hZ2klV^K-4;W3*^g3^yzQRO z2C*4rZQV`Q0Ai=i(@T(mbNJDfA=?jQ!V&(ocg^e(Y~3tSqftR@@K86eDIOYT+?m6e zKZCmvYE1)hP+AAfEQ|bD#v4Ye4^I;BV*w|*F&xsMDl7CkoV6>l2wHjPVXautH60!> z@@!CP6y+uRLmD0BI|j*>n#>jEGqy9(_G(fC7I*%TIRRhHtl-sr zPY~kh342PDRmY|%`h1(ay0{Gt$A9g)jf&n$hBWk>k~yQfH0yRS*^xDT6>-D1^E?zH z?|kon{rS&=_z(VIbaoI7|Gp{Vr|>DoNUKYd zAe~DtS(eQd?!X{3;ToH~GbT?BB(`?N@^7q^sK05LvD3Zu+yZcovm`UK&@qu9{!Mnx zv+3ozW2Rd+=~P$U_beOCc={z}O5KHT8{0L*C&icbIM|%vZU<2=P$D{^mMJO&@KGCw z(s3~&-YAzHa3dM{$hm^h*9ryf?f4pGtq>;ti#tjxZ2}5ck8sN*&#M+vQ6`)gPAw5Z zoS;@VL{XE{9VP_C*jI}n?5QNqCE11Ip6aj_RF$UxqNylP-R$YntnwTie0OVW%|Yk) z`dNBByBk}(qXj>v_-)9|X(XS-a8mT2>}iYv-38qsD3RI_-6#(8mC-Bg^IcSiQ4X0A zIdjygG${4M40V$3OmviCobl4(0my41-+Zy#1bvQBq@& z+1og|-C$Oh3rxVx4J(oEf>JM$(SCR4g1Ry_emYWFfG_WPss;EfEyvowqJrHIhy03P zD$NV}TiFD6aE%+!DcgW(EsOk9Ty=ggY=2Q2%3#rDnh^;T$iJavfvx9H$PH{s4Qz}q zPK>ba_xt17#O~YU+Fiw&n;AG~>JBgGTAr8jq7)<4m{Wc*62Wy`U0GNtnntt? z*`*#JGgojP3?Mo$wF+ls1k3JZJ=vQZgw4Fp6|7|NAEm0ow0E=ehlepnqIj?r}^GZf! z9zs2NiQ zo$>e8Uf;tNajRQ+ti^8)gSoM`B{zC z@C*5;5Lc#y2EAx%lmSpcQW!t~LZ{GMr{VW>NkvV4s>UF50I$-jb}Pj}kAP^xEk*XS zAjM%Ql3U>e0RWaPqWVOC#1e~x{gi;i>;WT%{2*C~U+yQBk zTv+o|&AYl7{eVl6Opu--v|zaG8FeY}k5U9N~%CW04@1;>%tmy0kDQ&w0uclezL z;~;2QCJvo*x9vOPhE6Y!CC#H8Mfjliu&?o>B1q^SYJdoZoO+0roY=yBb<;~kM+v&4 z!T~AgZq2K9^n26H!^Aybmb4(Olei)|GPjuQ(jdMni7ge2JyGqp)1+{KHih$l)MeUyJ2O znbs4Tob=(ePN_kN4`Jg4WpL2Lzly`YR(}7cZNxn&ecF9r78^ZUb{{@FymM7krv_1O@WO4R)p}DsFIb&CDM}a$MfIU~dIkIl*@PR5h(`htY zAueZKk3IzFz<4`R{CGgJaMLT7npVO=r)dxo9lZeiW;CYht|)s5t|SwG#4_zvO?%tZ zSFt$GYLc?#Ntp`fqxe08UqWR14vF|P=zf$u8A`HF0v!jY-(d*8d_G`#N!=?pLGj;F z>(X7$_y!hXcLkO5C|GQqjrTsv5aL6yH_VVr;To&I&FD=2AT5&=jFv+4P324Fcy5-r z@S5c+q-DtghJ=%Mpkur@wBHC|#!&NiFNVHDz`S!1sYp;Jj}itPo9Bj-*%|=l&kNsW z*Sd0d9X%YnKS6vKItFfK5)GzitbzCxbiJtSKS~Y>JgCAzk{i@hLqE^aKRXjmaeg-m zdvs(~Cb0y)Az#VM`an5-=S;5^k~b|(`!GFIG5R9X?Jd_^w=Z%%Qv_jhA9}n|J)U*( zqNd^WTzg5TJ#%Q%R|{ikaIa#{*S4aY@NYahk?t%(<}5t;@j(rhqsr7(5n=>hbw7QH zyUnH>w&;nl;l$ZgRB+EHY)Gh;qBL4A2xcuWY>2K<237f;ZJ*#=F7NsJrdU)|=Fog} z2cSm~W9h6U_2h~+qLmw~cd8wipaz=$hAri_EL3y*Pow1-6tEq&Dph-XhM#~~i-{@P z%b1FbPI>I~sqTzHD{Ebe%1RcDJzyPsil%9jgJ%&$WcHmW_cE?xboU@m=7(K+0G#_yKSz%J+X*VqmN&@1JGgk+V}D&^&S<+1Lg zL8()t%%)5k(`t)z6Qx2Xx9am&1XfVAx{;EArCf{F60CLE9ILQ|XH9IoY##8sS~8?j zh`pg!8g0kr$IkSwx`Yj2b(u8N`bo&E@q=h1#Us2-RHT8~MR50DVpUd@Fod`$` zX;s61ql+%u8ok%6GlDh%cIGzOpyHwtYm?4pWp({Jwq%fWbdaf3C>(#0&C(vxlbqLwCgF-2$!;OVv>}}(=8wJzF zh;7BHoQ1WMd(WOAGODnWrY&E`Zt;gy;ze><@G;)#VP9n8t3IBOuEfJ0eB9f`#bUHA z-%)*12!fKf0b0W_OeSc5L2(j#nD3pQjpD((1Yqc#?HI#HjjgWD`wuOh8UOXeTr9aV zhbuQ|EJkx^{$1QTD-`CrxQy6qE25dvS1)wj`n!b%#6n}(fIACVH*fD?l%0r4Wf5Qy znnB;7p*0h)C)Af7#8%@G9A}bdHWzI=eO*q1+fAyfV;Z8qgVovKg2u0>jW8f=?@HiF$c#`i?Ow#8yz zbiDRPS+@*WFhm!dSXOp5#>)hBA_x_@<>Zx6^Mx+LSRd z+STjMyG&Chda%LF#$Bql^t@iYL)-^O4SdQVdo>Y9%r`Lrvd|>c%Q5_KL%2|@)*?Ou z7R6Oj$uOhT9qsbg_Q8UQ1Oc+v{-qpyz+WC9kXhp-S2Cu+&08W$Kc*l}8=7MA{kumv zW$abm)OzV%OtF0*+5-{PiX_*ZAF2cx>y_!mBPY+Hk+gfoogJT6zUP-`T;BJ(Ew)kH z!H5AQn$`j97^a~&O0*NZa_6#3rTwa~&k|dBeXtb7fmwI8YptjK-CjS8Q?wpm0OpQMk0k#kvdjQKE zk`5dLcPwmJnvHy4j|p7V7dhR|fIzfNN@;oAP>qUlEqvbemwuKx8mwdi3ua!U>_D95 zq%>q;ztwBc#wf-s8)CAP^X2ef%yy!yWMX?GMV7cj1S_9F5*Nf1Ra{^dKAu*LS)WmJ z4&#l+nY-t8ToL%pFttHMvpcX+G6P05KJ#XD8vaI;_HL7JaMkcd|3bz`Fy<;?d%-I8WKG~D z5A*^X^N~*d5d$;zIWe`bZ57ckI-%9J&YX^gRT~Rf_O)JAoc2oq59TOce3gPWzD0zf5ESUQy+TPM9IKsHp>lI`(N0O!P)}s5 zx}I;nobz-F7D%^U?C=Z2dhtb{Op4B_>}X#u;Ch#Pt(1qZYCu#5A(-RIpvJ+i5S4A| zp=rR?)_Pk8Jy%J9lFj0Q=&B6tA&l;{RS>JB1_f;!)23A*F~W1Tk}qHg9~Yk zmu5-o^$XAHWf}-l91PF~R_4aP=&kuqbEHR>h1n}k+)TGA3^q&7v9jdx4eNa?m_b^H zj6BVSH1Qb=c8N0&nPTIOS$B(KBE2M$Y00Ns*o=cU9vqt)Yvv0^ymZHdCUDy&?C{&R zz+xp`JBp?*2+Uw73$0T&VNwSu8}s-rUEB`4va$i{z~wICQ>%yqlb(^2C?pT~tq6nQ zr{V(s!GQuIl$A{9hpbTT!KB>?yy6=vdty4QBNm-qN=cOGiNEJgV8ub+vnS)3SzF_G zf2a>$tWO$hcK&ou=oYZ>>J+rs@}OJp)>jp6RaNUkIrubZHokHQUHC*IL04R28#bPHcC zp@KX{W46hy^aKe=N2XTCXg=65} zP{Znd=Y@-DAfvEw23Y@pMPg6Z3O7%3T z*iifCNW;ECBm8-Pk$6x_iG$Dy{#qI3N){|B5W`wMYKSIs_FRr#C2pDx+ekD`{dWyC z5%=fAG1G>-Hq>RcwQ~tefCsXagB3Kjga{+>N(F0U>ibNS{T9aY+;hVTS4cOYZr?

M3PYRyEE5%na;*)|ve2fMNN0thU*_e)P^ z3_0%ESMKcYA6F)m9I=&^f{L?O!+u`fhF=kEJF?^;M$OfILR77%7Ve;e4Ht6J7FbB3TNd%ebkFc(vsrL4aH(FXc0q!B2!e=a6XFpWIU@j>5u8ko^VEUfr z``!D~$J)kSM#RM3W6Ij;!e37g&YsTGCwGBK1o(q6BV^kd_e4~Uo}CrE-R#^*H^8&8 ztR{(7V)-=vY1jSwDNaEN{?+6*2oGEkWRrhQ0>!uJ-a%Qag5i-$!v8#oEExkq@6gdW zkB1oJ*^k`hdY-(9?dy1%fXrJVuoS_$e)0l__G2RW%wjGFiV`@ro`ZN8Q@q~#Z+F|P z^XddRN{m^%dPd_`?w}j+SRyOr%fYsz6v`kV1F>n6_e!3690} z(QbRPu7g9UtN|ZRkn6Ar6c|C1u*~O%5$hYuvfqqv#~}rhqB-8#Ym*&xyCGD#&@IST zxvRsDSKn763X(yGCHq7+PdTiWY)Ywu%vPGzLgvzKw<%NY0aXi++8G%kIVUN%f5JTX zY8$>=Ay0X3NmMFxG)&kcccVRXO12O$DC;1|4PnJPcm*UV%E+mE3zWsbaqx)lBrJ*0 zaN%;2WOBq}UvW5h6TG*OLgy#l zbAdgNI_PFHrJ3e0iC891(pTdE(xsT0wX_}}XmV1{AGm%F;~&ng@c~a)q(aH z{_x~ADxu=Z##qP#WSp z3N<$J^Zd*HvgYLn&&s^=cou1qdTNj*U#~`f`o-~wAP)7wu)XJD*R$G;EiTK4jMtdx z!Y`qVnLz#@zTUAtv~X+EO|p^|+qP}nwr$(aif!ArZQHhOv$Ics>D_(3=ReGAj(WzZ zx~m2<^K_bGPaG4lz$%FqdA_~Xou7AEyB1|U4738(AjtI9mnUWZM?A{qhYKhtvIS(U6zNcdxD_09WU7VohFXhL!}E?z`{Ts%K?@D_@DY^Zu_q zhuIfdr`YX?6Uf4(MO8-(KFT9oIHq}G@k#rp6pN8Okr0~QtTWh>HTzYbOM&dk{kD$IK zB%B1CeD6Mnq|l~g^|j^>F$kafsn=a}(xL(dyfe!>=s+l)ooNu8@v@TrMOToV?{HN5 zW-dBL&ZjG8MT{%H>=Jm@??YVEEqfy@d)r^r)7u&<=)5YIO~gZ0FRr(VjY>(hD7Hvr zFiFrt6?_ES+Ps4=nm`HtQum)NbzOEG{0X3@ueCmYwrz^uuf=2_)W0_H2)3olm$5Xc9}MknE=R> z?ZLEgu8Y?8B*n$`@}%^sI%%oyz2OkLv?E%db(O)v5!4N7C-d3Ya6**pjq@6ikg#Ck zT#2%4(Z{jyf_My7lsRQWrafeJ%ml49I6=}^AwRjPyQxxuTHgvD$GKYDgIN3(EM!Rl zusfE^zGl6%=wmCE&)pv2=m zzvJ>X$;YZ5pLR|TYh|b91W-7Hb4CZ$O?GQVb#a1ceB{p>BhAk z;%SN*G9!l6Wpd^%z^3pDx?AIX4j<(RL3-)NwQM#qm_K73x+;Jya;dz(g$hN>)xt45 zRLEHKQB%1PU103S)wyF#_}2+ff0oOVdX=xGYHgHeC@fg43V+qU6%Ou35vSYbxQ_ic zrgJJ-#j<l2h%{jsxZ@-nwMjVVNbY@`zmcF|hyT-pg&{3R#qX-tw{Xy;Mo)P7wTsar(OVm^uzjtJgx2VB!SMUPz%O zTI5_sG&)6ca|me z8ehP*7}T>wmmHRJGoR|BJt?dfYS8M9t&ri8vOVl_Q*4b0HcU&~vZ3ai^ql<-{Y&Hll9Brzwe}n&FS| zib<0lmvj&m<dM&#ErA`gZnz(_j0`Hp>V}mM7U?=bQ;6b?_(r`bkf-?Yt@V_rGG8`y%EgV&p z-zMS>rZ06z`*WfIY&JLl7bq=hh=VE=769Ol`hPyDTK<10)gg@y2h0)pUxStuKkQml zA2OdtbR=*&9|19bJUJ@3`jLT|o*y=V<@J;&d|2sY{CLgLZ<(#>GXsFwSa^AVyqKP8 zaf$7Mce^QWsHu4SnEz5cTtX89uQiEyvs%Q;BN5Dg5?NwoX87UhnzXS zzZz9M8T21?+Qe4yaAsS>P(SFq2>iH7$gnkp2SqQ<<`Zyrx`D?AnsBGv+r6o&DAF`_ z+A$SaL2i2ASC7xj$IJakIH-sC7W>kBEu-@nhEcq8FG3>-9~1`|8bNo$c&Sn4VmcL~ z!5e85q*+DP!?F6-xHKSS-q8rE)Rds2%5cun&1?YYF*PR z$C5Ap7u)7mK6Zo7G(n^9L`bDT|_042i` zJ;vI1I5YB*eAdJ4{oQPWz;kcOVDKn$lK$Z zq$6j`metAb`|R-U<>m11!S!a$@$Sy#!|n0@{43irfs3nwz(Q{*CJ}@05`*=O$4BX`48Mq;6EsuNh;Ko>u0*YfwWrrnE z>rRd>?lss4JG}d%c^M042wvnI`f<{BFfKc>EPv8~d!>~>Juu^pdEXBr%ebfR@xJu! zk}~|?AK+dfEo|2z36VIg#gY#Pbdvhk{#A8^gIHRGCM+F zao%@&#o)k+Zu{czq-7dN!E?>$p&h(X)_)R@>9%RCDN6Z478Sy@Ru{zKULmGbC28^c zlAH@gohrKWf}pyE`4{FrF;B_RX$#e*-xkwQf*uFs;4#rI%kfG{PA}D0H`P_oUxOM2 zm!AEyJv&F(?URJlgM9>vqmFWZ1%%ZV%8AMzeCz6oc~o#Vy5<)v`dXQI@btZQwcE(7 z7XYE4G|I;u6%#}L@nl6UJCb2);{b+521()yJfz{9h_wDM6n}+gMHe6N`0H?(WW=~Weypm)4FYnIdFGKMyyO*yB^H%eTDGU z?SyXVk{T#xZR+xxjl8R!B~-SF?Gs1}REhW0zi1(|LXc3}#B0!N@Xf2lR%j5c<{~8G zVaUN3hEAA8!;ZzXj!(E($7=1-Ir7Q$hdj~)nrN2^WW-8|mKT3{r@hUdf8;Jhehw^1 z^-NJZ6U>e>P*FTnNX|o)-0i-M*ezR#1HIzl<%ePlmvXO|baS;kba3GenuD8FX#;@C z*BP2M7vRCboYVu*O@|d72m%zRLOFf!m)XO3Oz$bNjqIy*4*s}S|2^RW$Uz`B*ff-l z?4PZi4F;3eug6cHxfxj?r4cI7c3FfM@jYTcSrTLd7pszkGto~14aY+5R-L(_`APR?#B5>x%bafnSD82Lmmgcxv<|_= zl6Nv(6OZNv$Lj8#i;W7Ea0Rtr5d@aZalflez2k^}jArRl#KbejdS~#{jf$pjDlR2s zMu4UTtV4*Z4#bvX0^Y~5iVW&^r4fR+KatO@D%sDMK1DHe?jdo$DD$x*33YZxAvtbc z-8xb~hq1H(EL%`p)}W`uSly*&2rI#jLX5};8nukopc8a#bkI8j!jRc2UO(&lVrGNB zaVhU=LW8=(K~+c*uo*`Z6qj@;mtF|$h2A)L48?N(G}zH(A+?4~O3JIQsRW~uXW0!?;{P?)|*zRaJN5^9>7fYer5I9 zXI9^mfQLigyi%vst|0}PM%*0?{m!x&_YL4g*QBg22Tum=f zcmeW@&|T$==%~HLy9SZ7&AY}Tn>f0`zkM}1P~7u?43{BPx#CYl`?eSaTF$N`tIb)r z`$O@i*+$L5D{w^{SP=%1FCJm*-#X0#!<4fd+he3}Lm9Z;T8`I@}n`lVq zJQE_hAQPE5?sPuv-tXRJIzY|f7R0u3?BOl71%tbKlT}E49sGT5e_Sm^=T)jS1 zE$-hvZ%*SZF*=oZ!UNM(y21c=(zMQTa--!J8`~_kTt}7^q)N!LBF+}g(S?E{luAw`)<2Sr4kfH?C%>@w`kmkvxb|2l|zv#7zFI zx^y&dVARi*rILa(`>qLVU|sc&agwZ`R_{3>*;??t2v)t{PoxifYj=2OrSJ(y|Mpti z#PAJN-Dq3hN2?8Fkv;=ScIG`@(Tq>Ht$xo-Ibxa_?bxgVIXlfHW3Cqaj~#gi2Y70v zBsQEJAxrrU#zRcJdihRxgRxef#U_q|GDo3_K$Jec5*qcU0hUQ0o%Mb zlcqv4cCF+1Fu$_2@vd)iMy0l>+gvzZ*FM&emD9z~6i89rs>6aPX;{KNyS#{L-3YIt z&SaOq5p!W)yC8j}e9EHo8&b2NJ zCh&+WSGTUKWX|WY4=VghJd<*WfK+EB3Nnn6CiXqSFuc|+v2?f!!Piq`t@bRXqb*~3!B>)9p3!` z&>9_x5kMUoI5F2y^Q7I;>&QJz04+5PtDTenG_Xv7j({ypH5pACZO)cEERIvI&h~x6 z$MNO^mHGlxQqp0x{!>8il$Axvo<*lo#p#=CdgEKKgz&S8pWzli-fGU&{vaBF7AFuzX85 z-Br$Mw#v8JY%4Dm0^$B{r7gg0ua8f5&INPb5z!Zk!R#*{tEm{IPJ`0A?9&V9MZ;?k zy)`=6YnB!hGDwP=v{~#O$yQXRX=XP9mLe18a+#+Qs6Mv9lGeIqy0Dh)Ys>LH*F?3n zQWd3Qe63mb1W?KUJw9(cim^HL78Xxh?+AuDS_RddhnsGcWE$gPqp3U_t zq)Qjlj}WWR9n#bQ^VUVTHVI_bDjF8m{c8Ky^gxgyx3#S<8j_pK^h-BoONKZg$Rk(H zxJN-ZO2~=kiP)d0|9#LqiYjvm0tWzq`30T-zYNm#Gq=-qG`BXl(swX-a{oQ+ zWvN-)Zj8czSL@Qt@u&%)54)@j?uq<891KA`cZ^&Wx1)n{5UwWDA&Va&rl#co_RL8; zsvu%D2O&yMZg^;Z;9{IWePo$xc5l=|c50b-YUq-D&=Xeej29Y&SqKFQUMkpo)t#-Cmei=ACN}3Unv;V^*AS{B z`9eCa%q7di(>Ap)jOSY*p-?d>Rsdo>jgb)MphYS~K4kpVa5@r0-v5nU3CCr1c{HtA zX%SDin|FI5Yw|ywTRcDCzdzp-VjNld({&%p!;sj~2MAKgKJ2hNN}?iKR%Gv2_)#UB%KAw=`&ia`4Pt z`*{A`Mf1q=dZp^@Owrh$eSvy>e7|p9+B|<_^!OB=J>fpytUYmtFe!jAzLQU^`Ldzg zV>pkpcPzD^^b$0|q)CC!5kSDH)>ADhEHHR4wqbb|KR6q`(m`)R<`G9P%jZF;uR)Gu zQ{6jB53w9f)n*-oNGDVY+A3o?-eZNP_vgK!SIFJz$%z|NKp9r$K(y@6E2;~P&ju;G z?dms0UF)$2~YQkaCK(vXnQKoDQ>7Z8(JdH z)V`$7hHmW@vd$D%Z%`j(3#Qb0I;MDKkVWL4t(!CbvxYP{e#wKY-73ESogHjkIbteA z%a-vTnQt7QUtaodw91YKRT+HoNCOHbG*?<gzoZH7hSa1*|c)=7n?Vdht=;1Ci zP#wIdZfMMgb$>?EzFPQ(%23f5DcE;~b!$MC`+m0sPynDTX7NW(g66RxT(S(2{#Zb{ zfMrEEjnqgYamDOXW5h>UY!Z#Wt_H_NG!L~Zh47QJLPp)oxTGrN7dG05s+4OmKRWI& zUnItIfYqA3r$-t%95oI?=9)mKv0xx2L{Xgf>Ru4b-wx7l07A1iKiU#>9K&8Z zuU7un0F{C=tX|RA*l`@Hcp)T3V2}ZPpt1y}Fe?rZ7@?`JMi*0uRa?TJtSl!KgS?ui zWf?@q%VSH3Wia1VB^f&{iKR1mxAZ2$`R~QDN-E7#qf8X~&up~9IuX{a#=cIHWi74% zgGT5-7LI7`bcIYCiXCaccT?%GxId@~-nM)lp!540wc3SdgvxQ3S3rj6xwYgdq(#7^ zJv}a`O@ck_k8gb`jG|3{LM@e6RVk<4+kM^lGr3m#fx;|Q+sPnEG*T>I@01(?OL;S(_Vfb4}O*0bT}avSTH52m;NS{j}ong^P9<%)?TrxE6MW!n5q<1 z%`=WA7aTOQ)))mIf{Nj^vou{jfta`IkW3L)9nY>PhS-@;Z)21ybE3UKcW_cB2s5Gy zA5_bpRJ{*fTWP5NJUhbQtmTs%QqQ{E%YN=ECw=Rh| zgT!H3a6D6E9z7N)3LVUQKLTpKz=`qE+)1hP_+@DXWSLcnpi1K6P5pP~Y3Q$D{q&qmM~%s}XH>@`X6Y zGkILE$2vi!sbzA(VTBUivpHF^3M~xnk7GmlZm>C4OB9qi7?q1hdarwVWJCjtWr0Oc zbRR{sewynZ8w4fH2q;5~*!%Te?;x^y2BsRceN7bwmpa8in{ej~4Plb4$M(mxI%UqI z*$!7$Zo~|*T#%flt|Er{7Jd9jeqpwpLzlNgI@Jr6y|*`a?w6;L zm1;jx#P;6a*j#pZy*E3j-)-I|;#!W3=Yf&zvP?#T#cJsSGdu^zjaxGp{Dzqfod*^7 z)#0n!_XZ;Om$-c0%)we1;l_GNkd$S?hvCZ;ug}|2A7HgoC`3-DJ1VzME(O~9$jN~R zUA=fzB&tY6(6q78-ZoucN9W>FXK*HUBhaOMh(lOwi(sC02jr75?~4|&9X4f#d8};~ z+ksG3+r1fJbtn6Ez)`YI4?_izP zgzcIWrh@hkUX8lDT+Tc@aO`e=7STzi4gD&g%}@08f35rPCf5lf^4k2;rqh_$l&7*j z40m64yNDQ@xG(SPoPFzrLrQ3R%!_n!ug%Uo(Tb6BDP?-|J!XdH3buA&2OC(1>xJFJ&kM7=D))Ny zlJg24eJuuEr<#jDUa!`so+RHQxfy;g_N;(gVCgpj-B9Dh33G;ndF~E|j9`!bU*+^? zQ8L5P!jW0-T|)~-q@W!Z;YdZ#iWnH-Rev=;vI9C4zn*(t%CT(&q8&hJ) z$eprplj64+o|6iL(t*#K!%lTs4z-3rHxRF6JjqWQYF8$&sU2M}&VK5eKB|lMGv$Ax z#>do(zrOd++&*~~=ZwAWvs+dym67wxwqcxaCJ@5&;1NS3uO^kA=e4%-M?Soo(8623 zI^0)_72NWCnL&EV-}V0tp^v4^(EXS6-aLejG|D!0+Eg`Ub0`(P6pWIrRP`+NcZTgVYx z*nHXjHr7a>0svtC56&z*N1Ojd<%|BK-b4`oquxrq$lOI4nR)|0Caq1=3I)_>J3R!*2r4-f0N<$&T_l4rUpp2`@WymL&-4P;~5*x#w#;p{|0ioU-6ym0u{2Kkd zqFYKSMFlW>KQ+y>VsyGUtysG0^uDC1bxBle-QGS9pN5Ve_rd(8;$O@OBK#9SwT2-? zvuwo=dlAFgA&Gh~W!oV-#uzdfG5lX)_?HZ!%iWoU|>oCRxYXm+G*NL5qZJS9z2eCK^h zQf#^6>5!Y_V%iKo0ni)JkNmA!lIlV6#;I}%QuMKg@TZe#BwT(-J_~Ds_slS!I-n%sfYvQO&V@%rB2X50Wq&TM^@scG-bl4O zLq`d+$BRj7ZylQlZ=Wb?lBPk$Ndz(s7*s7fW=$jfvGi4ofDuJhp-c+U{0eTzfo{Ob>w`~F6)}=bb%;;f-305z*<4l0%i*-%~-G=fe7>@pRhmi;^mRy zX3FHMx&ggvLSO;w4USi=@D<{RpFqbDLX*z>F1aebjqDK2XwVlu3olj04_(Ew82p&S z6rWI@!~=MvV@d$6h$(q@2`o$5#BB&lvfQ8wj1ete5iyM;MO;3u>Khwy6E?uWgnLOZehCoXVdH8iM-a8-B+G0(`nN+lqY9l3hbMV1*fsfdx!PNK{z(^0n#ZiPF88pWMc*Fix(=0*T^SK0yWM zzroYkNnEc;$l?MXhT1Wi;u(`p>7!Qc>{1~pX(e&@J2E+U&c&zmCCc?0SZC$k)tj*N zd8TILzN;BL4M?f1kLE>oSJ#p6I#JT^yDm}JlFv`V?3<5?MmfnrW!tS7Do=}*tlNjX zZ;rqFsoY%>qMW+q98bRQXOE^Wx-`Fpe-^YhFRF^~^Rj^VLm%19@HDernct;I#)PfIPZQ)MlP zzHck=J^Bp_MG1W-kM%!VG&t(op<0m# zN7ahBRmZ^(GTOJZG9FaudAsx6`HGI&o&v`o7{AI(Qk>^Bn%dhHCi@ z%6Z})LvyxvIqYZ1_*y{-`Q{U+*-Nr{!^4czo16HI1R?_=6ElElX~npgc2RX4dtsSc z2UVntD=u59(plK{b(OC=u!Z2-oVfKK!E0*=fF(V5&8a?x9Tz}SMhR{?^{H6CY|r)7 z-_-y)Y^htUXFE5?ivJL(>VJY9PK_&vQA_KpJLx-*Ps4<`CH`H@^vt`q?%Jtb{RQA& zxwseD_phiKvNFPq-`#+C$1{w83%!QX5?X!}1rgpUH=K94OfNwOvSisMCabt@@?%|( zw(82yAHq_at3`}x5=Twq!>l&YepzGJA z61qqLq7T{1E!^(|4HB9wM9oNeTpfq-vbDQqLWw{0uUCaYXp>2xNg%CRqxpfKx#X(X zbra2SNlC{RjZv}{+2J#jGoSOjy^A}uu2vUO|MHP!cD@4Mo#z;_wX-*X3S3TVRhaR( z3m#Ezl~BF8{_8c-N@58>;_4}Of|xu)b>rcMf@{2TLjiXSU?NR&0IPPd)m~r6nL#rc z+3?ERear=|h zhXfHRscDo@Y#_`%ureUS?mhY|%AI7Zh+5P< zOmc?Vkbbs>bpeai;}Aap2~az2G|}Y0LdTwAPVE8wK@@87g`u9<7KGd?c6t!oZGJOo zl&os=?E*0Ctx)6@br7t^I2}&{*>l|j>tZULA|`O?Y4Yy@7HTNq3$I#KFWQ{G^#vz( zMaSgifXdKGRKB4;xk&J31V5rFu_&fZx&ls>noL`6Q|HjH!srx>gl?q+&Cp;fACzp8APsq7osLsQ!hd?{Q-3Zz z*6ZWz1<#$pZV@+Y8I}xhMGF8#3ifWvL-#i#qrxD{g2sRmybe{Ue~8sy)ANIBH1|qh zB|L)-%k3pryJ|s9OW?}|C9Kt{=|skYmUE0y_Gi{K0-7= zgJdaAE>QqfThH)WTQHO)?^#rd1mx6l*Wqan{OB_U&q&ozx~1(Ne+s(ko_tbqVKVPN z_=5B}Y}(4DFxVCzgwHx#92`au>eoC+dcd*lMnpiKrtnvJ9&G>2LN6qb$^)+1KpJ4PRJo+RE`o>E_1m!d?zK zc{bG^ZtmM^=h|FzM?DzTqb_6!CCGNf7WPqOPqpzsiKw$lZsA&S9qgv*$W?V=wo8Ce z;lId-$fl%d_XRt|Mh!vbqMU2@X5EWo35Z^=45JcJ=NHwWo~RgBaNZR=jGI@B)Tb7b zC@rGWjGK~1FB8OhvBOm>*XR`pPp$4YqZC$eL|Y(UJmL&YL3LRLQ%rK&dK;+qWtoA? z!ioxjX&rH$&p6|7a+BpK_`7-@k)>HAx#)-rTp2m4TKz`$EOr&*!=}7Gv3cR4j{mT_ zb@`aDo{|AQ4#xwwpfWZg!H;hGp~pB=gQQLi)np1fLx*_JHv^RiDLxL}wJgF>4_gK= zCv$rK#H}f#1E5Tqy}1oSlTm#ln(;MQKskr`{rhCz|AC#OU{{c^Tnt)MAoTmXPEXgs z_he+#Ffq!Hi6)tuI&^ofZ{s78o?@XMCiOAg9u)XZ8UY~iqHm5BdKv8S43RS zQpHc|F#GuWoN#3twQ)SRe!8WLw}tCs2W+RqoF9Fccv?PhU2ILEtukVrEE_}T;aM`N zGZXU&(BRvvaVb|jic=H2uyF=@ePksh_MsUXyN7wXe!uzp z>N5OmE7MSTo93?t~Kv z78x+N0}}1l<0>2;(M_FA$B>xwXMwSZh?GNNP{!L07##mMk0-5;EMXQlOV)A`jgCk4 z#|AWTjiYbL|AJD-SoVUiRi@V0Wm-AMw4vA>Xf6yI>Zl=~wy`0?tXpk30+yM@1EUGw{N>RVrT?s)Bh^b)tbROhr;BOR%*w=w|NT z3Sv~{j{a3HZe`{-{0FaiqaOJx#b@Hd-z-qZK-@Uo#Iw-y+T$ftL%}QxK}7f zDp!(erMdrYfJ}@d&qFZR&Cu!>C}gp<;ySi*C}%!5=v|LAZg|fWh!eZ-a=c)mcA?m_}WomF+|YZ z`OtYz9<#H2KQYqaT5h|icU8EPEu@eRdhher1&!$n>!yC%c?9`W`6B+guGgK0j$=y2 z4$NQRHulDl6bym>_aq^4S4xDld=qc5Q6FRTZ4DglQ}Cgy-f8-2%u`%&z?#?^{G$!Q zO1{neVGz&b!xH#&xFY-C+mS~sax%f+k1_t&s`~3x1+ddMGO{wZHg<9_H*}=^KR!r9 zTSs#n{W2x?GYx0gTvJLOWs29w{V?5fAfI{jp_?S>(idsgfgla3g(sD3%1H_3?v-fV}#h4y_J>2T?XW-8C)sw0F?w>)a9Q2ma{A!E{1EL5Hh8^is+qKf98Y! zd5srC;I`hCL^7mA?I^i&Y1m4hxN2UtaiuF=!+t_ds_7?Xaclb+*GFU^6KEdTx>3N* z^7d}xw3k~A$ZBwidXmYrrOrZTTu=t?8($0z`g1|R70*;M-jbc%m_fMS1qUqiiiuyB zWQZ5{kNAj1x7W%rAsj(@Nw-(_9wS;)7{=nG>GO8nV5U;;)&u-N(-4jvu#luZGd+d9;`{ z!U)HB|8QUz30vB;=0br!!6Nfz+p%&WA~y}bKx^E4ij4 z6hUbusZA(>8_95UfqSKwj9PbMUTO0}?EuB9|(@dXf1z)d!Z_ULswBGF;AF7KCrsk0dEvlV88IvU_uy+#^ubeyZ z-8kolM=L5Q*@nj;&|v9nhg4YJ0dzpChdMt#Byn|kUhRJ7J8?azJvRDE#%N_@adj~Q z45g|&P++wozeC_=H1<-4SqF>6Q~GXtzV9_>Kq~y*iu+TE!SiuUcA^RYq>g90Xj$iX zc6S-vYz@8Yy=3_Z1jwkH!39S$Vh8J31E{F)=LisHs>K9+|Lc-Ib5>QXw`_9iJ zXI=UHr2VZR4f4z+#ED%`+$JwmyLw8TBe>{`pzN@MkUmmN)uCm3{tV;|2x!{5UKDKk zE6Qmdoa`B}e9k@wj(bUqQU6MssJ%Q*XO`YD-26by~FlR@A!zyJ$#QN}< z*T@np)^F$YX-0RiB2l?m=-I)4$>>O>q7j`EAFMvb#VF>N>t`{={ozF94a)ZCV6kkk zZ0+J%;|T52Q|0HT(HxFN08&Y3r5T9BGltqa;KXK@ZVjrPO&wce=fG*|UU9kIdAwu# zS#rs{y63a~Y_q*gz>kp;{{Xjn*X?OBSTDl2_Ff~kViR(w9z^Oxf}U)5ymqm?yB$#pDv-r^z^R zB>EoDIl3y}44j^vqGRQFl%(&dV5b@JY6G{kOXLZMN;Mlx5r@Ey{gLZOeC~OTm+VgG zMEAd^QS-{FjxZ5A8Blcx3ZIc&`d?gv>jBj8{g3$|}`l;-+e<_!f3M+&A8@H}!9a8e)Kdn(d61+o=&0yPYW z^-wS46nhbXfNv*~Lju!SlV8{^Qp-{35lU^FihH(nHrJXJ3r~ZdcUiJ*yn-C>o41RL zkG-=GRIkdBce-7b_?>K!oSkzWaWbInC<RT%xQVKuhqDKB$GzUc!R7nP#LLR#-Q!lrLd3<|8?ttH zLBmAEO2ft45lHs-?AcJxsDjdux-#x@INztYXcWX|?#& zU3aH6bmL^V-#Fp!sy^)9@T}f05(A1b_{mFKP6m}x8eZ|4vbpC>p<;n0U+lPGb@~s# zrq2}osoaNsT=3C-_r4?|5!Y*|sz1}dyxoXWn$z^LnO;5VDI_onf1Uj{|389=)HK|i z0r_v$ickHVzXANIbXFVg#+LyN%oHHQh#jdIFd@&!m4UhDI<&G&_YZmuwF2ES0%NKP z;9p6E7CD~7xMmWvXQ|)e$dJ4#0;!&8)>FITM>#F92O-mguX&AM@MFW~23IpJjtTRV z!aK_ZijvuX6cp^P(muYo;bYK)E2Wx*F(#9V8h1+x{~2hJYFdWpy~+jU26C!ui($)~P?H%#yRoo6vh10# zN3{oof{-|*x2R-mIu=jLlb+4bx8|*b=+<%KCqdqnX`C}OOfJpa{+)VC9}&a6TtRSd zXoS)WtvPZ28%*^i9)du8T=1%RP%Py>l3#G4MiRD(<`fLQP*g0BOyifu>oNzfrG+!| zRX1^U{tlxMZtc&ofHT9t>7K_YXa5ht@(?eXqVMt1V>t^?V0}H+L(${ixy!cIJeliU zqf|s1@?Qf`WTwczj1UM*!*t*QFNzsVyD#8t<#n>Po8NnZ_J^hmsV@jV>V|OVcq_5lzq}?M4mRQ9Gk!4}l?V%_Q=M{Z z+C~j%faWPBMZAt+43^@IoL2R2Y&o7LMx?wU$k@=HVIkzIb0;rQ);X*bJzj6|@7kpn zyj{t&bBkZU6+0<`F-a9X&`P5kl;HLWAX|qF|wO=W!Ctm^b5D%LTJGsPtoFZ)fuhY70U9ew6gzHR91wS zW8i1>ZoqaaWIc0>v4d?<6IPTszgjD*I^OzkG{&iSRy3yd*7czL&PjLG`pQ;Zfr=Z9 z;AzFpB-T}!RYG#L=xS$Um}$?zrc#=^caHs^mX?HPlNDXucU;_YR@ICwA5QAcjxki8 z5-0VRHU1jjZK7XAZqs~~Vt@8x{0B5>o%XHj`Wg1W2lC7#Rnt8%001@=008a(-$1rD z*0<5sH*mDIa&|J-HFj{Y{m(%?NX7b>JrDOiU4s@!E8>jhp}Z6xE=DxcREp~G%bq`T zV2{dVz9y8_Fpb_VNv6sa6knp`FY+MIkMMcViT^2FWZ*y0Q40Hy{`9@d=7v?MROpvp z0f>qrAq?+NKB^}3kB7K%wwH~_K=Cm0Hn2bWtO@#nKlZQJGn_#gxCUY)tz8oHm7kg9 zDwkzdMBmVmOGi{FF(!Ch`)F2P`zu=;j;!f{BaxN%wYy9bkoXUI+F$_IYt01ZDVRGmkkYbi&y{9!TBGjNG6SNjpm9I2A^niuL*ZFUH=1J+C+g_H}x9|A+UzpNTQXBx`k$6J_bbo}|x@ z{VlQJ<(mG49qulzl$gY2KSv<0NCO`hFg&Q5-Mzk5>|2B zD6C#Mi19aC-hRh96T9Qwi9=kbR*p54YLlfP$6&-4kjjFB~&-;2{0f^y3w1 z2X`2_JgmQWpQ=1)j_U5Uc}nCk+~P&#KE8YwqOjRnj20ZPbRuyrzH~GQV=T#`gegYW zfy%9tiTVlq`1Kc&OR{-Vr#}d2kzSphIJ`$hP!w$$4ROdnB}p1K^^3e1GWH|zn0#UE zd}Lx9UawZN6MDasVI@U2P7KdLRuf9*r(_?xqCVkns(4{97McPP8C@mp9H0I0ueM2g zmP-va$#Blagr);j&(Y#JwvFP!7ufvd7r6eAa8-rG8ufLPYN}vXU`rFQNtdbEMA5OH35XV^Llf=FJkqr8KT{Mk%0 z&)DEx^6GTd-RcYU$c3&6gKf=vnj6NUWyNe+Cwq2dzr&YBSV!a-c~-RnsT=Szb=Qw{ zc*_ShNl%4CNI4y{_bQxM8F<*1Z+E3P@|-*MEuEp*4+f46Y>(dyXv71z5H`XPoivrN zgPnI^UCYm&1<-+(lu4MTvxA4rHaH>8kL*FtiDqT^ef%}nT6M54Z&WBRJi2JndehpD z?!Yzeaf~@N$nHHOPC^PkAC^&?v)!^0w5E4>w~XniJY#9p&p zz1P>QLU|-mG?lL-#|n7NGv_q}Enl-BAt4ahMxI&!eWl0j`*gJRv(hvCS?O{7|Hr4D zv4f$pjnhvI`~OYSB$aPlELN27te*X_54u^Jl)`zAs4y|2B5V!Gd;wKuY1h;Na25D> zi+2gN@$c?us{wmIB}@sB=AD$8_oq79T@6%A>SJi=1JIC-N3}C2KwC3WD0c9PH3pkZ zCK)^0qxoCRM2BH!HIy4vs-YS?q`n<3>^!2*(3w18peegWtM6#EX4diGjI5 zy4bKx;2v3beSR^qrTS6O1oDuiTIdrJ41SDT`cyf^3Ayw<_S(AnhlWk)^aj>|i!ZxHUg2Juqpry|`rOwQ1v41ub}*G+*@@DK#?vafOto#t zZYHRE+UEN7vWOT-Yp4r2Rg-~r8d8#)lFMkg!=1FgXFtc%yAWYSq+8FUzzQI&y)Yub zJKcGM;k#UKJO=J^dHpk7?YtSim@(jOf?GXcyY@t1Ea|*Z0Y{YbuzhsR`eS~97RAkw znG#N00x%o_ruml;Ad?RhpAcs_rdHT$b=0G4(FWxiU{u=ljo6%`u>0hsKLXK*-REb2w#B}LaV zpLNvH_8g|JEDoxZE8b8qd&#i(33nb!kyu$MMW@Nq2@TrNUk6%VDT+L4!dhe=j`Tx5 zlH&=@Akjo%60SYbSAKb3d{${PV?{BUs*SanwW42cUr#=bSxiJUbHe%Cuw-11G=ZDw z3AEQL2TNdQz7hE?Z){uRKW6FR9O$hHmjJt>Eqw8rVxRVF7puS7)w#!Ja4OOyMEoh- zd5e~~5b7o0q)zqj&B1=@c5l_Je_zI>jT}vi6wB|r!{gD5a5!b7O?V6h^9v$qhcxk+ zruH&)ACnf+Zb^E4Vg<)qy=2@ww=%fNFF6@L3E;^S35yS_>nw#keb6w+QBCjhf+cq$ zH?i7>Ub2uSneI7mUPKC(*k3ituDPB0C6)QLLNziSf;pp>wgDIEJmyMcHi+Did}e<` z$fa&ZJk*0D0F04Ib6BlJS?QUl9IhQUtv_O{EAUz7~0f;IM^G8Xjz2@q@Sk!D;k@OJ5kCgdpu`CmvZzWvt*eq zVKsWRzE*)Fr^y>IWc(fqSX(?VL4eO}yPj6dAGhjVk`337g1R}>ma6Qq?9g%XGngoi(gYWo;ZY1hU7xSm8;&j8CXsT>)51id zB~0f|sC`GTVxbCkEutfqKOHdrUtD2Bj+InvnDtWP5s}*j zf9~@mxg5cC%655Ff(^->Us$E_;Ph5@ zhulv6&L=We^;A1ot<*RSVo1M(q~n*Td1chmxW@wMlqukK3h?QcRi^KR|AOdWUO&pZ z?feljPk%C0VO(450<$o%X6tgAD6f$UgEVV87#NKnT<~s;w&(}fcZPR4hibZQZ(34J zW?b9SRWiu?yAeOgMjUqVs`8LKKV>$Nx~2V|*+O^&woxhN9>3)(Ubp5mUuhZlZECS1 zy7%NbbpT$a$la_JV!NU|1|V~~7Q#EIwa?rL9DMb)y#|e*;W>39pWC6=wRCkQo%05J zeX-}ed0oaau3nBgV;Qd*`A4s6w3TMA=w#Ge%~SiS6aGX1>+4B?8V>LenirWq6@9c9>*fS#%9@mW5M{_hd z<`w9?%?J2@*4#~2mbJJ)kH+jz&HXElEf0HaNycw%X_VWh|kHB}>N{i#24`r|qZ zb;b(liUge~Rfkp+DDuaWw54{jklmn7v`q|e#{zw=tKfG0_gbX1z7-0T30`z|@5JH@ zne(2g6w(|D48M@JH55fzvaqOT3DE%+6nns}U#W*WNf;)}3F@{5_*XbcTOnE!QR+T) z^poaOq4XCDtv!IHlUr#KEO`vF{7pg28Hhc0c!LMJEooCsm@#1DGxd^a=2kb?)6EpK zx1L3ZQ!wB{jz@xof9aQX1w6nFzcYZ{IPmbJ5@H3f;#e?Z^=0)ha_=q^3fZMV#&dnV zz0i4fCM>a-?3irxnA`$gvj}nT&WL_*NLucrxeMtVO;SBePO0oOSg%KhGArK(9HJ)q z6^l??N^?Ik#m12{4#nm<)1z6M8!pC3+D+2eg(pu1`)oV5@O1W$j$2o|)NnVGyD1!-F6jFN(5 zHLSjkS){r5dj>Dm-ili)-<`)E*1a{=o`M2*3Qme1bN&K@MBQhl=*fiAou%)h1I!se z;4Lvu4AZ@gm6~a6_0I&IoE4O5yrk=7G>#WjYs5D zobk-Vymhhbt);U?+~gaz8Yo^0IRN3QHGdI2zVh#!G`PmD6%%eH7oQiF=pPpyhTRq6 zM%)$Q6YMhHDb`k?}*@$i0WJ7v6(^1N|px!)+)W zNq<1Q`U6^q|Ih0Dzd&ncYx*CsT^IXb^Nr?n4sXuKXyiDv^yX6q1yl#+rACvw6%*nE zXV(>Ct;BDih6B(IK!=c6y{?D*ORky8H;`%LHhBU~tW;`tul32o@Dw}q9F+XgUb|)} zPtq=AFSG}N2as}tJt&y;7zwn4m<`BX+n;R%XeUIDpDFB{O~uW37$Qr(Kk5eGZ&;;t zLx{ZJsO;cA*bno*WWSOt7|<(2*(-z86|m4j^m|p&1!!OdErXwktLK|>=>&bROwFa( z*|@c9R(=!K<|(b2>tLTROx(Q=8F}2H047X0$2nH<-{mC9xh)QXsfD>2H1j30|1FAz zZclsX9$>m6IlM6>4R=E*)jR{QONUo(EZyn5=eUt1x_dQuht>c%l0oJ#)@4*aAmmn< z?go}DN1edGl9+#K->Vf83_GH#4>7={eGH+_={PhYt;d%k>iEk$)FB09depm?)}~Oj z0X2|UF&-z6_m-}L*iGMWqW!>jo};02#qph3GCuP`6B&z=Lj_6?YkM&d&WeLIRbQkG zD6#nSs_9A<=K|}3t}48B8jT`*Eq#|j%U(t3%`}LLSe~XKlu|imeE88rg>Fv2VDud6 z6$+2Jd^}Jc`x~M7%{AkSu*#!LCzH|Wk`OF&`HsfeMZdtkDU3Hav3%!dA?E=rG`I~E z5bh^sCs;wNnhw-2O?1~gQ%5J4_ciD@OSC#1n4Mv5rK`&g-cSBJ1=46YJ5J#nAq*C#P!fH$bVZ2zto^gXd?@eL9Ok-7&?Jp5PgSum8 z)@j!LLlh#0Q5d^SaR{0!L`|nGHRx#0O}`@hNN!Q%#_7eu)E>}8 z-y4BeCHZ2bM+9Q#?_dKq#$Sw_vCEQvwSTXFEj~4KHK(#ErF$vy&a6FTj)*9V>0sdG z)+STPex&zCXa+NevC>UXPwVx_rh~0rghrPvEFx?NHd%Gr()RBF_1;tf_H1%94cH+P0mMwrMD+&$@aAJwA zwo{bJy_|lQSh6nJJ06ilIB&th<`oDPlCzNC3%IV;&NW`WagPGCnvC^}4cu=bdjvxH zk|71r$sb!L%Eds95*3}>=t6_m|I|UAogN8wOT34E)LE88XKj2Lf}bUCAqoHh;s1nP zJ7awZ$A70~tI7XU)cQQvu(8oPRFH+W=vRS*+toWUE)(+~RFlo~6&sKLCXvJvA5w|< zdd7`OBwmwg2TIddJG?vC8acRgy9dlPbtfO%*BLm>9&lzKGjf8rjUqxl17b3r$aS1$ z=Tje496FIJbfn=Y^h#`iz_%F;>1kCWkj%r!2`~lFW?j7c^v4>n^d+YJJ;j6)UI3N% z3k?#yJKC+9zYxKAf(wxBE|sMJ*s;c$hmAv{ zN(GRxo}UC=Gz_%7pyl8pVj?3#kVr~#y981dis-up0V#!p((h=L)PT__trpb?X;r`h zXCZB`BVM2&$4gGVQdP=fkpW#olAh9OLLHS;%Msc4+JM|il$Eagtbgs)#Ot6S8vbDH zM)ijDR{!ty)~N$ScdK88tDTSY>xCObcl+QrHiouOAn$NW&s*=DvRQDAqxlP=xc=to z$*?JFW(&k%8gZcmUYMFtYqgVnEog}FQ!lZ#=k$>RSwkAs*_6B_L9zg&bbOXDXqK9Z z-dyV@(UgLeV{7`TztbT*Pd4uu=RfQ5m2f>V}*dBF9QjE1*_`+d%pVsx=3}iy2~z)l+Bgh4oUK zb0{|Df`^nWKeWhzyyv(Ar*B@lh(@=<;u^c?m}~vq=S&vP2Q3d^|I-U0#~*UsHAWOo zH+yNDtw9t_jNkkBeNlQK9E=;DjtoT-E$bEQYA6`Cd;=mg_M8zR4z~Tunwo@ZDSXNc67$NjylLw& zAiSs!#?CAo^E)?Nzq+-Ry5R(trj217N*OnQ-AQEs@65jlSD1d5ys`W>ctW%oe#~+4 z>lQvSB-r60$y*WVvo}pr$=^dpZPxWaK9qQ=Wb6d&jNv`fs{93r7F)1Ctxs8~J2nO0i9W^?fyql` zZ8S!2DCNk+Zk>DU#aoUk&fFQ&TlBHY6l^W#8hwj}%irlc^$$SWU zeYrQ_kM8YoJs3c`T}?5jJ`gzuV+1JNvAY*3vJzCuFop}v<25P^;0Y6YeCx$Wi&}@N z=VZ1Dp#6vbmRbf1c1MaX_Nz@p@$pzOs*hT;%1dH7YW$Oa!o^q$DsB)9Wq##nD4lF$ zsV*EIsVXJgMLQWXtb7xILotAm?D5ddW`W>sOoNoN=GiO2(h}{4=swL8o~< z8burX6I4U{B+bgWKByePmW8ZTeQyAao|AMF2U$eW-SqlUd2&hSjXQ6!(Ldobs5$Jd z9m`s{YJ0*A1F2)i!7ZR@4r=#+BlmD)L&DGe8SHdrUu?B8-(BPa5Knw=E4;8sdZ>U z$|MPGoW(Ah1jrqjLb_Jr{bd`1C^+Q2~as;x~Agk zfegOq7@-=dBsT>iDQ>rVFF-pIpAm3v z9zW6CLT>Ct=d)+`xvd^8^bRN(WngZF0uz}#tGBkQb#dGG)Nj)aN$zAkx{U+w6oj}U zR|Ff*w-?gKPiBSc{%VD12gOBfp2!^meR(QbPX~4wpZ-z;Z}ZSBo36Q9k+;}Jy(LD$ zHi$E5-$59#y379C1|9gNX|~zx6sKObWNpQBl`$Xu3$e{cs;P!z(GhM$@MG1pvZW()Jm;&yVU;J=L{^(4SGtL| zsL?N=SdfKS$Jt#d3W~OzlItfc@37Jn+oGyuwT$ zxzr3=Snrxl?G1d1lpDeF&P&kTqhd_6?Cq}YVpbD#t$0BScOOE*#OGM6`}5k6`sTFF z3mntI$@UWZ=WSrUeG9;Os`mz8{Z0Xcnu-gWjSY|d*)Y5;`;PvO%djR<%eGsEY-l#X zK~@6xai%)#rzlrF(lm?$v3p&%lZph;L)4g#eV3Gt|q`^HSA@KPzvi3T3^kEPP zwLysg5NQb;{w)s?R7~^K+gvK!k5+0t?D7k>Hfg`rl<2HOu|GElXqJJQvzX069qOl> zK`B0{l;CV^lm2;O*kpbXlzZ;9b4OjA1WqX z8#30GhPxqN0B0<`{y4!(J zxZy$S{@!3K#%aP-uqthiRiT1T_WLU6g<0{FhBVkP5bKA-@X?@qto>8TE+NPzksYlZ z3K^eLEvA-?g6W>k)^KzaRQy)@*@WcEH4@IXA8-mdhR>OODJaME%|^KCU}CVtP$RWI->G#9}p(FJ85z$73C+I z>iS6UV)^RsSuM(!4h0A#krc$OQyZH<+i<`3ImS^*n$hr4GjAX64a2QHaK!TVfb8Ex z>u?yye7aO6pQ*uICYvt`^a+z>O=zTGKEJ0H2%vhKF~wlX*t=iPpD7zrrs>WeR zr9Hrjt{5q%89Rug$-z&kwjY$ReGL=yANGGs8nE0S4J zoBNdoBGzK)qn_tnO4=2s@>CI3BG=C3No<*on9uJ`0OKt&zyg>x%AqLRtLE!vJx9q` zM+h;*&=qgkkd>#Wu%8E#$HrxhlN`vS8aQRW+J(u0$0eUd{W*llWZWj?yRW6_hTJU8 z4}*n8j@v#Nvp;j(RHsaf;AL<93W7AQT|^?Tv;OUpN1hJD+G{T)a5S%AIA1+r*{3M5 zudr4R0|`bK?6m3vlenX>NTJhUm!F>$Ka`j3g0_}Ys}=7@T<~hvf>xSb4QszvS;c5S zY<{Tz?H-+N%0*B7%a9rhszPaP9&n2p+03b6XvSzbNI}7wIiu(_7Hj4}Wq}XWEZ$g2 zEx5*xnD9dxq@In>pjhuzrvF=30Q-7BoLQaF+;tUYf-V|ZgPqJ@AM|=a$L1hea5Un_ zMfclwu*dxY?9EYzdABvJSWvqe!>Y-4puWScg(N|qPgj+4Wgb*F%ymY3Ij zi26(yw~@8xb~j=mz}r*9+B_W2EXaFZWU$A8CH>*;DB@dqjv?t{jFz!QfR!NP$%)^gb z?6;*dp)ijXD312esjAaj_+`QQ9(x1RT}29=3)bz7PBc!cAMvZVCgdsWgnR}uuef{k(8cL%HgXvhW#n$i+-HoFbr zDdlZ!0?>J5e~sCf-@z$P!jIz&F@nCabmbsr_y}0A2$#{?Y-EshSF^4HY(OOUWj1Cld`l?zbhPZ?{`@k9 zo~~!!iWmEn_B9~j$>7?Q0m?xzKtGJ|6KxcZ{%Y?_%md6`H?{UkB&FTf7*R( z{eO;QMg4Dkvt9RUK)DfaFf1lcuk`$s+6hWiN4^;F&kidsRmg$}Rg?N(J1nZ#qEuv} z-|n>iVA@N5q+T7Ly45>d_%|-`~@$K zMD$lra_PBVVHJ&(KgOco#FMv*>3A!?zc6Wj-QC(l<`m=eNS#cqeFEX}^j-~o z*feKyPuf4C6+g_}o>=*y0}TC$9oi|2`hHee;)K#a_U6PVfWm%21>z|6af8SRQ`9js zqy01q&nx02P{m^g)PBiXiD{@G-594Re^7u@IFOSzmhs;oTM>B;a4WI9O+0#T={z7; z;xNb z6hx$uKdQt+Cxvs8YI}NtUye4=hwA-> zb|#OZKLE0vKkxUgY~mUh$u42WDoOMp)lJmpFj7M?=c+d+pBUpNZ^iu<$_7{*mZJEb z18S!)^C&=Nl?hW(Mep|yb#61b`z_Mw}HR`Tz})5oQ*7!S`-Ao0khCqDgN zJ5e1hwV<m7)&qf{wV5XvdX^l8|dqZS_}KU9=5AhzRiDB+RP<0)2FY7)u4|v}cyn_aPtk zt|n`r6IF|m-?nrQKBJNrat4=UqzI*KZB2vUm373jG?v~UGDr!jigjn;iEB%I-b>YKCb{D+jr7xx7;J?$g_313@T~3i1|LE$ z%GgHWu_hbQ!V;@c25@z~@uX>{nxW4#;nEoQB=yFRwtVLF=gz+Vy}h~FYY%UpKCJ)e zw9K44^4u zqE9&c^S*ry?T>TdsI;aF>wt z;Q68U#RX=I1bknGeQQu5O$h97%taVpC1}{}5wvWIP*;^+gW_+2^TA9dl?1ZS^;5l9 zl#zQ&QQFsM`4mJeW;l2wZUpsXeH^Ed_@Hd;>t~dIfwKOpoKTf`&1+u{Jzbv8EckE< zvcl;*kM<^2unT8f2e}KZs>Tq*Mr^U^SEsKe<*Bh?EWbb@wGE>l23&=jw+kuosRaii%>n=w7y3x7N`Pujz>0aLwd+d^_vb?k@nKuf0ZgXawInYm|nH zCWbeBs!4pfIU6wg{BN2D>5@ulkDphd7#09P=znTR9o?<}Z9digPet+nQ4*(VizSNU zbE1l~7vf*9V<=gLLP4cJVlHoJ1v*a_zF@32-A^_#>0-i?%JFr>?WnE?Yjc1KVREe1 z?f7mIv#Wq+IdlRId;r>iyVjV0Qcl0VB9pfD-t_xsMLPvH%c z6!9ZW!jXKJ>Y8r?ifKT;;geEbZ2~FT3RVD%j5!{%`u71l&~}L$#x9UT866T7? zow>QiSoy5{WCu_p4N2}AexKfAV8oXhUHItmNZ-qI2E5_R>Ywj!UL5#p5xWOG8rxu7 zu!Fv-SQXnm-VcyJFu}G2d=jW_eeVM5A0Y7?O#73^R)K23vEuMz=&ARSB`7JWDOS;R zDLZm)_2{L^S{nuy{;Y(Q?79vEksE8!KY}<3n!ZZt3aRcQhFSCr2paY5jYDBOMK$AO zR;;~Fy$1DYH(LY4pM5z_YB03zd9S`)0KI!6MgDSQPn7m6v4V~wu4$22SBu)-=5Zie zoepgmU%e0s63Y@p2^J9wm9Rb;FeJrMf+=?sR&!81zOtm%%{>2Kfu{OY`idnUl(6qA z1nmyxf;llq5obJQn(8q@Ubfb))t>reO`Vc0=TfDHHK#u`+$ylxse1YXpvcX1eWX9j zaqVZ@8WM3%8#ZKnsRA>!Et*YCip2muJDHS;!@sor@z2yf-&sm4 zkRgVxnsg6*fAII-39UD#Zf?W7Il&P(_4C|mh7314KE<-4XPv7_FT%1&(LDMP0TmsY zZljKf&qrBoi7%>nuFYpBCtEGiaS1Le+MZtPWyvfFg!5t1Hh|wdlfsL)qwTwc6VtT^ zjP@zl|0R+WP^179BsY6wl=b{O2a+ImNIh`{pbSRMDt8n5x3&i{iz*T213yrXRoYOz zFoi;#TAnOTp+iwW4UlM+aHe+jwESUprU+UN<|X{YRaP z1V2n>>QUp8e}Zin|3&J6h_$0oCkW%DT3SZ6tR&^AVRYHtG!ZktJzZttxM|*BL36u^ zJ{`3@Rsj&B?b;f$Uf6k|k`GGO5q1V*pZRDc^XfoTdd=6_*}LBt*Dyb0!l!P-K$H=n zlfus^F{YGBw&FxB42cT$-|W||{yD>Te(Q3{%t2;NoKA%C*ruQ7+3G}^swXEWwY+aj zli#DUD^fv_r+vbg8J&->O#TaS-ghaqSLu~k-rC4lgEiu}rf`MvetebGxi$A-( zspyUT>HwlW{|q8?>rKTCHl-8eC<&L{rl=WLrc>=`?rCMx_@K$IKccm=4sTX_ zSe>dCVqmsHsB;HI>*ZJw`NZD}|)ZWT+^#yM}&NBE^q!vhIcdx$7{vuxp)@5xW}xZC3O7yd?9ke|rpI9`7>DQBt~qCcH4# zD77|dBu`kzrZM~7eGow;*_@##lCE~R

}1T%6qtIwSd$Kn2Qb33W4pO|#G3xj>0I zp^{6s7TA@r4Ouncv|kDG9Vu zT0HfSUXA!4z1ph{sN(65UQKo}86uoa@<*>0A<&emTsOdY1C`q$2KM=YDP`jA=FO8a z2{ib^)%N=Nb^z5&6H zg)h!2;95&&Nfp}i?$D+a_g=nnu z1c&;B)W@1Tu0>g3O@oqYY7Fq>3~71*YMpLk5clM6$w@e3DEg}tBms^)hKT@KL%rxH zT>{2x78kKyy&0U(YlulAjHXGMmrHpC?FF%fb`vpYCVpdO#7FT#Oz z7wnG@h58zAhWU+v{oYdlRN{XA=wUB$8*M|iTQrbi&NegtLTIS?_uc#9sGlKj_`tCQ zB8dcJ$sGi`T-+o#R88La8kP<(d9qa~+5?OVhTd<{!^rwn*11sAljO!ldQtF|o6H+y zMW6+`-C#{niq5)wr6P%nD`furg2^{$S7on=`xN6n`+*wupzq3>`VSOufa-otVom3U zP%K6gKF+8ses&*?9ze)#uy!X{1yKJVunJ26`4CY#*^W|`wT)QM8W^zd@09^^AWy+T zI;?J2jXz7h(`rYEi8aFVHLp4k<;0d9ZviGQi|IE$2b$H6-V;RD*L%q(6X!OQ`)I;s zk(1Ih6KymcAeL=1IEvKTp__4yxeRo9`+>L>+(}ttP*qW^T6%q&Kw-{z$u4*7lP4NV z(UHXRf4D4+ zLshmNsoGZ&@t7Tp)y*}H3#)Jod4)s|Uv5zRoFVtrn!iJWMm`H*JBc%3-GR;%_;m?R zi3WTEaWAOJYvU72YJE_8rtuDJ!nWS8?=akX9&v7w&A8IRNa!3F)+<3=)0{S zZs~eqOLNN+Fso(VMeL)9T?G2^#gnpGj~`fxl3LMOhOo&+>nHaITufUV(6|i0f9$mo zGv#zthJAI=NB$$L#C0ut=khqQvmyp<9?Iz|IiN$h=Vs|^2pe%_zckjQP|SMrj%y2& zc7;=tF}WoQ4hMTf&+N@QOhyHlC#Pofx^h2@MbotliKWHj%Hsh$D8>z%TSSb$1?9loHY^hMLD?>4x*{U@yUJ6=VKC$~xn60lFd?zNJ zsgX64i`%(S=A+}M#f*U~5mn7=t|6lx8-KgIxPZL_i!z(zU{Pz7E3?JyIh$QnJv(E- z0dcp2-zt22pu%;(QWIl8bWqe-m=~~aDw@MfmU`Nq+QYtxmS@xBDGg%FiyC8O!}++M zY1%3qA}7qn6GO5HbowOhi1DE;qt>rFnY76>tDRd@EhFlRQCwe+YT7<&&GMtvMV67> zThL(Gi_f0P>SbUpEByVL=mA1cD~`~-@LQ-l6hncCB30ov_Ur?E!4^3~;0X+s){HzP zr^3(7Od2BF)%OBCe%O64f=@GEbAew)Ug24Gw#=)T;o5F`a0F)>9c+b&REFVHRs(PS z+8U&@QU&053VA7v*hoUa&=>FFw!QEu+%cp_Q41!o6cxuFg?G_|ey72yn1$d()W_o_ zNS%r+!`J!4%lFQo;E5r_hM3N1V1hyxzZugcv%?#{8g+)YE#JtotKGsNZxg|vgyO8D z0xb;yn0eYF@X@cbW`o&J^l1j5X{@-O@|C5&;3NW$&$4YX!jIPYtj>Q*7 z8@nizIANn%(bj`%BA}2z`+ai@a#cRN3Ua2STB|B_JOAecZ;jlsPiE68zkYlUqcvu@NgOXL%!nWnEBSQ_(J>V%C0S?ab-t+%?4E~>#C-6u>9q6BwCxxGm z=l`T|`3cwg(T7?6N61sWtZfD#!qCkt3gP?D=1is{dmi~Q2aI_LEmnkG{lK`lf~LdB z71Ey98~jqJPOf742;*xqKVzDXBU!|jmtNl%tJDsD-OUV#UVwwFkRX?o zzX`mIKeb#1sp*$l(az%r&Tkyx>llcJ$59)jKT{R^P4KhNejTPK-rmev&DPj@_$+kI zndJd<(zf|)1TyG1Y!COqC*?Aiq;(;I0`O?g`FAd5>80DbRm4zB)_4_TWPgf z)Q}R7+Khh`wt)NWQL!emJsJ%vC79 z1Ky2)M7=QOcb#{Xk_A3*`YT?NBh0-RT!nww5X*sn1b~69nHSGXo-@;(Ir90rSEEZ| z>UV`dJN8{4R+EBd0Dn(@|KIMAfHc76Ff49pn#t#3u#_Sx7 z4bA__5&IdY2B~b=9IzmGKdZuIhBN^Rx1kK>>k#=~5S7kpkA!w%Q-$~E7u87%AvF=9 zj(omN$0pXCsUT2AWKQg*WlZbkmPaK|Bn6#8>3614WL57n)+E+z`3uVrRy8bRRu!pX zP3F=tF6xPQ>V_ra>67}~mPRQNYmKDL>ifB@TDbDY1x1zTAz+gw1+D)|iv>et5bbS2 zp@gmvR64y zb_?$<4bp~=G9peGwqUQh0{j`Iay)bc%n?hNJ(JR9Hts!t8L@?p4nNqvjU23yb3Nx| zzZIO_`T)hq(z)Sm!Ilh+Aa-%{{rQXgg%f>Ez?UDsxvN=($?KOWMQAd)266|`E8xyo z+^XO6Qt~Ef0=$Ic zvGK3YYF(}nB7NrsqS?D;uIDYhD<=`eLHkYV_^R5FzWKI%nA{(SQ_^=#e4&(^V8fD! z?t0h_q^RdAS;tcrdQ)9(e(W0xDyTveicu0!0s4Ulahc<1X&f1<=VTua zX8PUw0k_Bs!;E=~Ml4B*WYzij%11Z!u&A5_l+~Y1D`y3KTY@dNLQvr?c8D%A_A!72 zfE4sxGri#3$H(~>u6^i;aT}BDm^3>v*9p|P81-pGOVwYPNoS2`wM7c)T^^vSG>c^ZEO|ghPhz!xz(5 z1ZSI$6135a8EUP%AVuy60?LxnKX6UFepKrg=>&T|o7F8Y*KTAlj}3`|&z-@2b6uPPfUj(9WutFtgCKz}P#mAWfy6fq&D z{<>hK6-UFfFYz3F#W4p5R%3l9kI_v-RB<^H`+D}}*wsYtTbf*&dncb@@lT~?&GzZG znR@zVGfc-VMIv3B#XF))`qS2vKmJ5bTH#4bCSWg1oTxwJqM?f}G5sQSjO%`>&#M!~ zLFq0uHznWHQf|D~pS!B?7IlW1sph8RIJa_jJpcxm9w?3aHi4jlXe-<={#o!goJ!&;88O-o;~_ zPpUa5FpVk!1iZ}Mw-0-L5wv_z<ko6y++Bw)7>RahL=-XKSi>uZ#>umqh{?EaKl26rh zE0fAQ6Wi!Ioocv%HddTF5s+Z^t`o^vo2tKE*#1aOdY@>*A?i+!?ig*|fmh1c&D*xs zQf(3NTpWuQV7-o$6p}4#$--M7iXVcP6{uiMI^#rJ^ zewp*%qjpuTJN&8yo?IDTw{Wws+p;m->m8P;p>?1&qv46Eu_bNTaQ#oP(zyN$ti(TH zMf(Bk^1r}Z^>45yZ9)ATtZkK7i2s1qQPcMniP%Y$5o4zPcEpS&TiEEpp6mPJ>jDQ) zbm7d50e@$D*9U6lIU(cD%ZoR!g%d-%dqDSKOd3Q?DGuAKjOlrJk*8CG#sj{iUz7&7}W%#u9X#g zln^my$UK^!jDdt`qU^_I;}J9wg938RIfL;#K@2nPUc-CJxI~50AvE=P$?&$0o;|^B z`d)LB$kYw7zojz)deymk4xPEkUsSI5Qt;9HqV=P}Bmjjb;K<;_$-PghNC&w3I&^VJ_$z5 zRZTjs7abRjooQ6G-!E;u^u0hxi0fy`By`D~7f2UkhcxDA}=t zpEcc)52B+bZBcV!LhtF}vAm|YS!~Vevu8sw94UrLOBj(g%0z5x;YR#TFUUPWBp#Pb z$!`MQqSvxb+_&KyW4^@sIUwClZU4z49&D6Sghg9cE;&Ie8_& znyp6ACmzS*Dl&23IF^4v+JN}&%_`*woDERA&0@qIz+0MV=s8=WH7KtpEq}YK&F-_t zt+-8KOOM~90(Hq9^{Z7z56mfLQ#|0xl{)gsY0Hxv*5JpF1o(zg#MyV)i?;Bi_j1N5 z9t8I8ARM<3cR^<(KsQh>taQzonHDIM|8e+glQE%E4G?R}?;bi64e*7m=Xr=XIzoJY zBh93ZbD{HfCd`R{NyF4PR*g990bK;#$$+Rvs}9wXRc}Td^s2g>eGs;M0=F*+I@4z` zx5NEl|MK9+bCfEZYuc)Hr=o_nTBXP`{Zv)c%2HZjW>*jYJC*_~&AFd@`zXa}xi?&v z?+hPdqk9fsZs_LJ^BAGj$n)OE;^CQ7jz`RyN%i3<%0qDIY?Iem;VzyybO$ycmlBf zcL28kzw-kB>z``>H(>Oy2GTP%u(JB)8RccmJ~P{#Nu)&N)cQruV?*r4 z#Z8%MbF8LIQD-Ql8m`0W%H|ri=a!zs!pmueykIAyizSK4_ z+hp>e>|QzPAp~AoyzAV*f9$~&pvdlX#Fy}g}W>`gp?ZfwjgY+NL#52d$haeEb> zT;Shlr?1fw#o40H4=*X!f6y};v5k_voePX+0aG2oGZsqMt0e7e540j%N?1M+Ho|&P zJkT=XNQ2%b#l)GljRStKvIvQAs+t$Wc#@{UHia}Y5q+Um!ddlqqK;~e^}~<>HG1I( zjLEkPq)G9z37F7A*&QWJ>3)%Txe6!W6yv)fbzU1X(+v{|Igolq3yqtNbYFO@xm$Xo($onagKFgF#xDm4 z7EvtWz|gCK^zgTul_FMi`Q~J5jsnWnVf=@yC5eWTiMg+W^t|NmIPTJ4G6?A=he$iS zWgWTlh$VJk00C3Z^iEYVbi+eIiZ6Qf;P@=sN2R!KA+Gba@L}sJo#np^BVBJ64EkmB z=v`u9$cTsR<+C1CA4YN-yfk+B){67mRF(}R z(pIP@Hjs>D7Hn&(Yz(t*Hs@%m`aha(atl9&bZnIVP}oh}Nvdmvk5QE~79(Jl}{gBqvapjmq}C#42n zBW$dVmoZA^6>5upf(NyHd;}6=zy(!a8=}~TZWvB?>zT(d zXEsDC(oF@vV8&t4g>r9bu!+Yfqmn`sp-3sf~WhWeHDfi-Ueeku6KILJZqh^^SoslZ0~T!c+{VA z&B&r!lQL&dO^yeVqU}J;vq-momHQ`%f%e7Vcq=ZZErkPtb9Rj$7j)sM?iQKeonM)5kes= z-53#7Mm61q`YkO-j;o!E`@kWKqjEAv)*+YxyRo3dmj4R;+3AE1hfrVVNxo!nTr&8E zuPN4S1RHHPnAZ0)!URPWmlpn}6e9I25h3N}kJE2CNAdAorArszz@pjQ>zSX=eDr5K z?oX@Mvg}-(nG3nL`K+m;9mh|zE5M&q(43mBky*Omv9ATs;~o;fquyrk1zr^>j9(sM zZ8OOq1=8q2egoseLAYM=ocZk9vntJ=a^BEiKjGsT3N_`r(c!^+$Nx`JW>X?j@HZS^ z9Pz);Bmch#h@a+;07>TfHvl$B?SGSGG6l?1PmX#e>S1deWykJT07+I>7AgU!PHbnv zHD=%SX?ot?YlBGeXFE76!ON%u)2`hQCAztG@e1B*mbb+Rz?@ zG3&B?*@ns~*u%on%EgDL+ZO_86Q$l=!r%J^%AnqS8`*A{-(;rPzZp%$VqlhK?bey% zc6lfbkcxpC*3*cTa4Lxfu}N49PyAHGLd-5^MT#A0U0E(VC3`S#wXSX5)2M zZQHe~OCOc5d?4eY*}la)=9Mz)hEkqXELX2n+Pp4GLNC_uc{ae-S=CkgY)wgaPG+-Vv& zGNV;)=+C}h(oJbJ3|?vHH9rH$hX!1gI~z`Q|%l(mdOdEZ$1iJJjH-O+Q@h1qIV5vLki2)YDD^HwNS_kz~DA z{xW9yIBmv6@2la)83&6>B1*WZr)=95QFW4A$`=h(xb8O<^u>(4X7AuuxRC^yea1C~ZN zF#9(t*$>LLRO@MsKJCNd;3`&2swR2H@Gg;Z%KXOhrPb@a?7P;!$|sBmsKz?+(4hA+ z(G7`8K9C_}9VzeU86lu4wBLsP@|sfJV~?LY8_HR+i6;7!%Wt(C`+H(}Cz(r@&H``Y;?T`rV;;cE2M=HWQF$)~|6d3;Ssu&AJ<7?;O9zuj6{P z>e<-XY&ml1ifmcdM%ZqzH*}ZPWHUMZeFFk zu;wa(EpodTj$YJ$Mc_?ZT`aI*wKmJ+kS?LyXGL-MNM2ix2b%wK_ zKe_fCS_T+UnuvE5(^38Aob-)C8>io+k{=I zxI4JOqKjT*6P6v|BvoJYJFL9>B~NFgcX~vwXe)Ra0`y{Sw6&W>I~@5GB3-TV?+){Y-)t`6V(u59C6wy_XU+0& z;rdTZ5%B8#Z4KRHMFx~{G1&6XLo}@3^A_1EkhS%IFG-=LH$)IZQETQ|6V#Q&DdTMI z07^rJ6n{0vU+(yWw-bjj^|`s3ikdkVmUT=nVM`%mOXxi!N)}_kCc2EaYsV)VVnY{arAo-4?cYfjJOg0F58B2N6$zh+Df3>iJzM6iPcQ@zl{wY(QspEIq8>uF zhEZ6FTb9Jc!uA4X>vBSL6H4>swaD+^Q~2RuiE8AfsQ794^$Z$M=VEJQ4L;Y@wRAk+ zo7XN3cM>8Kb$zJ0b6=j%CKis?-lp(#YT9nKG{lX>$N0{)_e#fj!eCwt454XYYl#Xa zDjNG!RVuDzkICQu`m)tw11kQAu|=B%x`iH{W8qbIhfZ?2wg zZqIKUzYy=OKOfHCA9oJ#cs9HW%iGrE*bv!V{;2EBy+0zp4ozGXhZ0MJGu$@LR`4Z^ zul!~-O5c#;kk>D47c7c1?;KTXFV|i&q%p75EpWD}F4#N_&NR=_g`gGboT#j^IXYIj z`B}J-)X!+rBdL|9rz@hqmr2;DWzI%<$Ly?E>l<9LF6KxQ?hlXSms$I5LxoxtC0`+@ zOgITuv2%6V-Zh+(%;0#IiQ5jNY6jF%iBk~i?y5u%G0`zAE%-j$#?Ajnm9=j7z-)gg z;G(pn)^;q&l<|cyTtrv3k>#p>I1m*+ps^vMQPn9ZJ=HVOl$o=Qee=MzG%}jah5y5cbm4<)k~1?_VGuvRr**^qRmOAn$v~)sF*lx%5*D2U^q+C5 z+$ZCpV9I`&A!a!ap4jprWal4SZJAA!@!+MayGWsBuiIx4 zSr>`N2vRS7dE;Z&L9kq?Dz_3AIas1gie<4G~4!P@ve#m*sU>_hzJnMT`NgtjtOLcs z;Z`z z4IAc7mqrBw>R1|gjx9JNuo1M$`}N-BM~*;jQ4RPvA!x{GD~m+L9w2Mls`-d@>Lg^0D)Ynq30y&N%^b8N=h~EpU%t6&Gi^U0nCSBvd<}6r zQkgr`+}vikb~dJ4w!=3=tD1hf3#&bVSv0LuRN1<$IlvDW-S3$FIq&VsNje^3=56W; zdm2Yrobi1=)~L_V+zINrGYweK%1}F~=sF%Q0^J&L?O3Ie41M^Jz1B6_o$=_@DH&bb zz?2z8pdb1ciD~w-#E~)R`(#J<{n^<+mP#{)U^A9sTq=457f(*rC_!oIRKea~3S?c9 zgc5g@nH2Rhh})6mi&x+yn>ct>h%0l}-C1%cz~k5l9RZ4TFQ#;_MB->=PibE#Sh`ZF zMZIH^{TM-KzSfrF- zCY>(^K!MQ(yKwUC)2$BGcV5+J2@;7-kYeJnH(ho(o&Hl3xM=Cuo?h{q%uE-CZEhxF zcm@ytWm5j=I_NY0)maO&kDJmBDB3jK=y#oE1nJ94c>LG&zWL2OZaS=ijjMbj56&Ws z#>X%7Xi(LYq+SU>cBL|<{F-{^fK#TY9ZLN>lpV{E2-*2O#$VgG7=>FyNJrBy8g}?S zYAg!xl=nIV%YV)aP(Pqe+4mNNU zq+??YkYoS2%KP`ilSDCX<55>+Kyv-EDWqyh751X6tV|yn6fG=sb+T@~x(kzw4lJqc z8;I3ZdNwBCUc0ZQ%6X&uFY0kN9Nx<#NiytbekR3X7WK*7lBc{UkRxPTc)8pJiY0vo zPwg5opRzn7lEV_@&a zmATQ`>zg@BmTG6je|J?l-Mnysr|x${Y!#+pY6U3?s%05vsjASXBmKYDtLpkEZ`wOcayWU2BhO#@7 zK8%tCwa+Ov8PorLc!y{4i7?7t_CesJ@&?k7f_TnD(k}P6Z61izQUX%Z(z@U%b%cJ# zbZqZkB84gN86j%eRF;shK$T(^AUs!8xy7xRIms4g{0!maoJyTqe2W^5{`%0<)SsXV zu82Tb0fibZvL-s9fJsCQbPv{`R9RHv>o>69!$ZG}_0I2x?|euIh`ar1JaEKxkwNet z_mDj`8GD%HGn636xNAMG->6JEaBnt5f8cx5aTrb_=m(M%Pyb%}bOpMSHO8zWtTlIR zT`Z&Kj>l=ud8S8ssSnz2pHKR(Cz3+8_ZaVB91{-eGmAGZ0Xccvp#o07Xt@`Wh?j-# zjPHSDbl9{+JImw%S0@%jUI?RjV3{BdBipmXEsMOvb#Iqq7hF@*-SKEtLA zXHANtlxh$Zq-~I_0aZk+fHbI8e+h-`Q7}x#QFBGDXgm9n6b;5j!Ij7V1?kuN5AnU6 zd3`hmzEaZ9{Z0PQ({s8hQB^TAHyEk2)$u+oL?U+)TW!%^@VL?7O%VuJVLh`e#K6K# zY@v6%@yKGfjMZJJz@<5u+T!>I5z6FS&GjB;Q4ptpNO&``*knh`%d(w(yt4T_ZmIpu zgKdL^-qw@y8|3|5mhUARDV6jo8bEwwrLG(PLwrY%Y|I$Xp)whww^nYi7d|<@^6QTlV(D9}4utup|t0U_3`TB-0;DvZ= zzPzmku~bWc6Q?D)H|*f2)eqvwj;6TGj>mj>jZx8y&q_|M$T&ZwJA;Vm_Wca_2p2NT zgwsdr#vqVyyFhai)IAtE4f{&19omQMW`iStf~SQDt-G9x&5CMa+gcFO{}?5W$@h0w zDlC_z*pJ^)gE~9ksYS_{bb=5_^Cyop4ia&25j1OatOJ)-A=X~kzYF`FaWi8<5fLk* zb_yXhT|S;HHQ9nE3nWW^kmYek-T7+5E?vE7QOnw^Z^eD_4cPqp&&O$q zxg~=P@Hoi>9w*xW(c|`Fs4Wk2R#8D^2m_Az2av!{|eO47;&B8dwjdZcqHZcesgCEs z4pm=oLrH`ID8-f@ZZsgDknij+mQh1UsRf?1G?QlNY~{p3AgJ#NR%k!xAZd7e@v=RSRfs^|u!aSoCzIRlmYTB8o}5K(LfAVb)}@Q+1u&=1T= z5;iCp>ZIYRJ2|=@Rsr=6*kj0|(`&))ME|YDo*uljdO7M#CgGOL&MLY)IQIrdl>3JP zOk?)f0EU};VSBdxF9Wy>?cWA)55NG9{HFm-^w$8USxM?s?X>-G3}6l}fB{SnFo4ni zWdKM2cLp%ae;L4G|7idh{mTH>yGF<3MQZ}0lA2g7TWK|}Xq={O?pU2x`nLgmZ2D2# zbd?$U*Z$r4YyXZ8{s!2;a?`_N<+l1HS%s^XYIl|Z{}-%pKIvcn?ekn zn2-Gb<^M{U+}10r$Ntm*#s6RaFX-z3<^R@4EYv6c!~Z3@9d*ig(pU|52@H_6Qg_xl z(kO*LUydj@nP7~vPRLaOBj9cf$P9a??<g^#nvV(aejir{)9sS9*icoRoJvl7>p2H0-IN*^s+^Ap z>U(k25PqZ98#JZgYc8Ae?ix^JBO{+!gaKY2fo)D&I+x?0NfSo_(Y9&qrv}XVo;G!~jOe zoerj4obPoto8Jluy0|vj(`ta;OW=F_4}Z48Q8#;=Oe&H@mghi}-#p|fI3lpvfd%c)Q$#E8Dc-@dL8wA54>>eTg-RnTz&hGMkhK|}99yVh|YH(r?T4nEc^d~dm-$@D5w z*GQP1U-vg$UY_TCy4lBSMy)LRmQ22<5t^qLA#>_gGm}A51K##tl*cR00i#~>_+E-$ ze|A;WF+P4W-=3|73i*UQc~+?kRxhs0Kaxg)oRcnsSEv%An>OBJIjDc6ZznrITgJn| z+X=%3(CQdsZnqY8vO!O7&}d`y(3O-VIjDE`fK6oHaczqDWV~E9nmQ<5guPtv4DvRC zm)$VtaPx789K!RHqA&Vq%|b)P#~*cGW1_{^Cw2-V#!FnC(2S*f&U*bEcz6EXVZM2E zYWptSiOYV!Z?JGUEdNY5xu#@}_2*9rHpYt=jGa*v3C6F`Kz}t9nct0v8)p(SMB7opg9@zBHGO=cic5F3qsDSvct4L)fi5KKfstApiGc4H+IP zWr<-QWMBXWngPH->HkMhj{X0^*IWPkR|YhI10AswmtBxhj_f}W38^CaHZ8hv5{T~M zt72W<)`qa-{V{P~)q`|7Sx3K_nQWHrcE{BWxOp6V80_^7xNbU>^Vg>@!_5 zT^f=M&FKSL$K?Y^ZWKrq-2*eBI&)bS6K!!3bA~`RY8Ebhh+(-@gb1AFDPbE07zyAj zjH7(3DOAynp-U;tw8I%imL=QlZ2Z6Z18)UDH#YbefRCu%H(C4>u*bG6-#It{G+g}y zun6>0I(F2a=&m38qHSki#2R#y(aUwrkPQ z+@SaYL=h_1h$Ly`g1yEXQ5}|}<6+ZZ!2WN;RhHAvpBXFe2moO3`uVoE#K9e1G&f_+ z+nL?7hhl?I=Cb$lY$x~&kv`2cgl{mu4JxLDknLI1d{FKO$y_L=1FH}>NR1+1OeuXL zRqAZZs1wa1l;wl}YEXxuj(A@WGZ-BaRSR_i>SAR(hw{3Hxe7l?9krNeW9-8*1{lPr`f*SG9y`DvZSv|LxGcHUao^e*`g6r+iu*4d{+ji1!0X zT;PLD9RF1Mo##_X!(6&B(vTgW#A7cWq|{M(o@`jHGUXROiNuVLNc|Eg-M<)66Xky~ zpu2PP00vYLz0(WI&Z7X8*x}HvNkMb^0d*3N68v z;ju9fnX$}_Hu!6u)tZvaODDnL^THtC*e#U+yl8LmpoqIg($@(c!B|eTUvzBduRW{8!={_ zPLf4?K^ZMgHm3x{jqmp#eTgLwYd)5K?Vo>wsG6=-r`}@{mG=3DR`CU@_~qfSEaz=9 zF@oq%4OC@U4QK&4|I zQj>|PqMMiac09ux0)Q*9pA)+2#-XU%+=;(vauA+3g>4&cgB*EHRz#8R9tMe~(5qJP z`{B`w04uCor|YBVGjO+YhQ`BT`1Bz_$7XQltlJ`A-|lHan$5_FVgaA$%5()00*u~R zLp_>J3Z>>#&GDm&cMfN;*%CemZevqhTQL!&OfdkfmxrQ_;>p1Ii`AFsj6_1V1Ga$I z&P^Sd`XK^N1(@zwo-jJCPe(rhT>ZsW*Q*$3mDC@WT}#DKpPg*PcL%3;cdtDB;I$rK zsZ^-m0H%*EJ=t3A_)LenXXT0`d9fb1d7O78SG<1E5`p)#ke-~aaa+01U|tUex&jUA z8O>2YxlY;*QgGPbBcW(yJ5;y4;1IcgQ1yKSGS41RrpM4`UHVaS(^v1H|2e^z4`l8U z115MDzy$w4TciQddH@6Z-{Kp}nZkb^Ce@J3s7l}eW_(}15DDGB0q`4acMvuvc8Z37 z*lJcCo;@`sP_K4=RuD_@1)rc%=!#;NN2 z=GsqR(G>hCHg6k8VtfwP!YpqQMHv`eDb8_yl!6W+j8Hosu3W=?Xp3U{YeHFa8va?e zGzPmbCY`{?uiQErF{Kt^uAZ$3Lr(`cdMHN2OsLdZt<-2<&1cHy>S*lE<+uiL=v=IA zDgSOpfr97c0HT`1$FP=@Ou^-^-0d8`mleBzr2rkk4xV$ch?BZoLh@~uyp}=+v0{~r z+aVcv2arBlER{kNay{h8p#@R0XV>(4w=)KVR~#|m*qOFLw_^bO2l1|L#p02EXn`zc zva3h0{zV4}Cs&Z^y+wYXkCOsQGz-a8YSIJ2s?b3kof}Q-+g{79&WJOP^KBRv_M?Bf z^iP-~(I@2jxNvCbxb4_%LO1Mo?hk_`1e{+zal{Aj zD}lj^QK)%I0jOmOl-=w3F425@P}&uJ)~Q3XIwhg~+2_?5l?xVFud6e9igSsTEgQO( zCJJ+3wfjB#NX2jjHpj5#>s)(xL!gpfJGQP7(2;(NNE2w=Dfv$9HEYp|xD8`>k;AE+ zeg+mp@WkUaNJR`P;7Q_?slag7gofxq9w-p0(ylOd%s+D#UnD5Ud%;rTak);pLJ8xR z6u`2Sj=CR)n)fC^q57ty^MY4i1lCLYLk$P53Z%cp#Pv0~9X7-}#;sSl#s!Ahq#LVl zD&`>`&qofbO)-aNN93vek+RNuEQCE%c9c_xI=Wf^YjDSwtuiP3XaPM@&_A`R;S3ba z=OP*FW$~?ozD3j-At)nU2_2vvhiY${GzCx_q6_I*z?2Nz6^~UgYXE9P3e(+HHDzwK zr^a-#$2mT$kY?jMUOXWMK`d>Ac;WUw@NBGj=HhP1SOOC_hCcyI6Lmo&e~4{ca-1gU zp-Lj-gOHzB`rXDFIaa>zZdql2-8eihd${OMXm8dv4VBWKo&|q-*Wz^+v-apo0C~la zk7Fqxuv&1+XH9Hpy|G|FSgdTYMhB&`46@|a7DYvkhj8dL9P#(j*|@Da(aef_oWWyi zQal#k)M7GhWg=L1pknVhrOZ(qV{GO@`eY7P{^a;Zv~*I;m#sV%k{QiIOKaDeXg8ge zMa~1{TvfDX%H|`rmJ?Zvi*M}C-MeBA=_J|e;gwiee^kr%BcanIEWJkek}K)#%&LuP zKMU8Ja22sBS<$G%K577y`;3snKPGq1Xr3?X8f?JiZojC@h%`p#pBZRfn*R}%zjz}m z0!U-9ygz9Dfs2|>Wr9_Y;Io0#kHF*~(%r8B-Qnt|c4Opfl<#3G&LfIb4P~1^Df5ml zzn+yy4c`kNfmp?A*GUMGdWPCuZ{+MNRlxP)yqOAUgYpT6}bO7Pn65Q zenQNT)zjm{x%6nCa%edl34IFw9f#; z5u1b*Xx4hpqp`8&Ezp7JS>$`!3rcwI~E)7r+Brx&l?L%Zw zTZ?v90-ZkRa)wi5BlWUCB5C?M~~0H9~({~M&;H{@a7Rmdq%7BkKSiF2j;fl@)k%>+Z3SGs}Bm_t{Lb=BMX)YBB^8`e7kB>Be+}P{4g<4nL{vjTCsU zNO>fI>+xl(QSHcmX98R;^;12VqyGi@vX6Um>D(?fqWYxMS185AK(Su|!5m_Z*)24J zUFP`_%HS4kYj@uOez<&>)wPcKhN&QJG4!CJ9_I$0-$d6pL!^|F#En+xDNnRyg0A3Y zg+{S>{XQk?M$*|_rMUgtVLv;!Sw^G?lxnnY0Ktv(1SFaaA1C%**yy=Ax{fe87)gJ? zr>DL-XlRNJAN^~gY58mBGVg;$laWWx5zXx6Q_3-N6ij~{vI2W+&_gk4H(@pQ^9c?2mS?EdY37!zP694p5LY=XO@G6I#x*dT0caggF652| z_mEf+jjt8&hIkvuBTotkO#}4~DNckPYMzCgGs~a9JfBEUPEPi0JUVyhHAFwUJ9|N4_wLkvf4dw4e|vfV{Csz| z^I|b`c{=~txHx#&eZ54J!(V>6pgRvaTN|VJj)XcKI@K2U3>i-mra(+w--3h(THZ@f z>_RB3X8zmZ;4HivTnl|FZ{A`n%syRGu?Wlqy2ki~RYm)RtwRW$EJrONTmYm&FRNnI zaBSXNZ1{ZDtuwpsiF-^uMaemJ&;kYw`D^n6ieai~8mKd4{@9sYf{}o?oIh6QR%f)? z7@Z8FLt_JGm@%oRY}6>AkhB@45XMBCpF(U0R0IX8brVxCs;w@RU_1sbRpYFy~-(B`(^{+`fn=0Gqh1|ya?~E2iYjH=&W$V31 zSkP)^>L_xAaz5V=MxYyQP$AHhV_?ht1EpUk?V96^?{G%D#t^Q){oiQq3No5 zGrnBwq4+j+bT?`+u@k|=LbFZVwIjYwlY#sm6v1;Kv47{2oorCW*WVDWM?~)dc~nS< zj{ffaI;mleHXbXQKl6t6?Y9i&#R8V-seI0cS0--+&ZiNx+9jyfJXE;k1tse2=n4`h z&h`_Xs5|{vF@*;yG_gQgziJd&a0EhL2-&LhH{Z&eJ7J*&K^2or5CH_c!5v3*wWkOU z%(dm^-U1E{#~BR5Hupn~0Se<9aJYyMsr|fp!f3%H=x2N!1&f7l2)Wz?=8&czCUt!< z;mB)+;>h-lMo_|PZ{!MV_CY@&{ZLH^owx@i(_*A=z|X6NDae~8f2MVchuhf^2n0zq zvnQmNd39Hzu5~XYf$S!w?~)X|hI~&TBr+~QlCf2?{?_@aL}FQo8*a*SsKhB#G&0%| zm~sJYKmtT$ginAR&M~Lz&I$U1QIgXqLlG)1ZimoDnngtLd52~VhPE8pZr>!H&}b(^ zKWTQfY_uUtgzAJ?=fr`EY@Ws5EPRX%GvntztfWLERmy=%tjRzG_aTkww{Mv}B^)p` zop<&$T5G*w6(^i@fZeoyXyGIyy$3902b%>9*0X5HkRAE0B1<6ukUvtbh9go<#;>4B zTUOn3c1BwgnYuBUteRnXu5S#QS|yt^$Rp*}DK|Vht-|+OyD@?lF_CZrd3n-`LV`Pe z7`C#@j#kdVQ)8mX=VCCY+w<EJ!SZrGEQrY~u_=<#K~McUqLr3v+EI7df;%9TRo@74uFiPXvZa1P+pRIROm> zC2LHAgr~AAq0NarOuVbB50lStxOQHevKi2ryK*^}0x=|k_Pqw0hXUI@e^z?_++e9* z74QCDYNT>0a-*M3cw4ke*c^=vB3PAZw#M+)D~p;@um|;W#TRRknJ;uq5moICLNSsu zqw}nigNl&f;y>D+9VOsR_1~ffSej!Gl7h~I*M(}~#AqWLX%Cazi7kWj^Q@N@#wFAh z6I(ezC5mx;`vz4^&;Fzdr&UDFR6rAYTDv8E4p#CyZDa+QGs8)IOE3A8Hwc9nY51rQ zCDYI3?q7A2^5+p}Rg+uCeEguhN-;rNTLtb3bP~XBiu2u+DXxu zH6M+vBERB=KQY*1dB;m)L5X|^TW8iiXhw3ZV`~7&}rv7Bofy3FF~i|M+r|)F5je+W2^&(DNshe z@DP=-bl2`dvNCm{2@dbBhnynyv|j28^+2 zM%VRmtgVuyA&8e~VzlVZ6FA2C=-RtXMX8+-Lto!D;nQr>`0{opI$b@r z+{f1Qv;`?ZZ}K>{*Rb$rnIBZ`U%83k?^!CwFDjJo2!9e(3YRPxdoAr?j`qMz6DUza zkHc27gHoOpW4=);gBUR@yld)Q)<&gI{ATUK?&S)bdRT^da=&J!fr1I-$b2mCGR+Qe ziLX32>=MeR71lqs(qWq*?BdAQs^BnsiqQAeXUd*@g0lHbq0N;Lv*LUOOg?#h^M82@YG% zVkPve%Mo!6uKNpl-3Y1yx;Ht-Up&LBy2{(TcXygQ%}HS>&Z$Y@jF{AAYWlF?j}US{B&l zq{?k?*PHE)_L(%VrxbEmCzp46VaaHoj13cci$8LHjU5Bx1AA&S>(zoQhYnJ>rdP;%E#AkeKgsV^a*8#u9i@VY-gK2-KP-mg?PDnr%&%8)16Y6v^_0 zsPqI<=~Y=>T#C$|KPaFVwIY=n44ZJ{Ai@(F@EkcubP`rX3s%B1w^Y|k7W>dBL`xp4 z^c3=_V4-}03N15*t@!ZmA;(T?8_H6}+~8De|7f^`NBZtG|)R z3qgnHQ$zcq&?`j1D_b^d`Q>5Yc{;2}49-Tsa$^58kFtDSn#3#38kgT>yYeL?-xE!rxrCgJLVe$y^GFXm7gpBkgZeP$G1S8VT481 z!vHvEW!K+)=yL>*?#Hj|EH_nI&PcbDh*r{VWqWIsRLz3h`Rq@whweKb9>7HHwhM#b znTBsOKYVz`?3d3PIwuiZ5rP9#p8^W16r_tFGtBkVnJjNz9+(!+`F|Z;S#lL?6j$Ev ztR9*#o>V|Q+ls%HcRo#UuCVLClPZ@mWkfjPKCRHrM z%j-u1#SV{oOnq>+bLKc`R3)lY!^B4T;Kx!2m#<80%SXYvS9wM%~QlLaCl1%g~>%_aiPRRc>?A_!oB`%LtQLn z@Bv0Trk?PWImwhlCM7ChxL3vU7>B+e6F>BJX*QCGOmXzzw6K)2@Ng?t%VRhI-K!D# z&p#O@yVPG$=z^v1nJ4fBoVD z6AM*oyPpz)tdiqYbI^+rxSpG6{LShDA2)$jq+XoZa#f_jj_W!yUtV`^k}i8|Yc~(~ zr}lzue;mCXyzE#yT3ZI!Jh1e9a<{6g=s{q(bV9ewQRM*RB@f zIKw1W#+9F~&?&_P*#B9~B=hXSVkC}4V$gudaZ#Nj)SUB9^2us(=x4aA1Zqz0`PXKX zXQTts>rBVxH>kO?P@bA;cTCM}*5bUyo?+JS;nc^}swDm(eC~FG@>FK{w0v^sgWYS` zS&vqGfHcxTf-1)0*>RXyF5U>MKLA_PNtid`zu zR<&xn2sO!*{#-pi!KxlpjZtQZ8Y~_NS6laW41SCVIn*Jj6Gw<+>pH7+Cd0tnaG|*y&W9GQjEgzYXG~}x zP})!iA+$6JAe9!gznfpux0_+61tDMcQOKwaU}Gnt5_Zt;ejJi5Yfx&0{>tzqi248c zdWR^{qHakyZQFU$wr$(CZQCbp+qP}nI%(V1%m2Q{t-4jC#@Mah+nqJ%j#%*pD=XLU zb%}85WZ|@a%U!;GafG>IyoQj~d6tv%*%*-vbG+_LqnN=M9DP3PnWc_+8V#PXBX#2| zxMjn`A?n8J6d-ozpnrE^(P6OoYvs|w&}(@qCz!F(u9O}IeM3zNlv1Hyg2`w)>@995 z)V@Oo?m$>Uh3#q~*`N<10WR>Mey&zrIr-Jufu?Bl)OnGSa=5@$@g{$ByZip(%4eQx zmPLNuhX;guMBZ+7y8YHh@~CK7V9kF~d2xd22b%(ir>o_Ig=hBP<`?FjmD7OA205le zJ4HO3L!P2~jf<~J;wH?a3BbHSz~mG{y5d;Jddy(sgzYV3g6?RIgx4pBYVyE^gH6j7)z)Hfh}of<$lc1dreX^JdX&n>kZe zib3`%r4u`c@j`0D?>IQTR@y}T&{E0yG?uLza(TQG{OMJzGg*1m&`QQ9#*o2|((#Cy zmS9=Uj5IOo6HMcdxWAQFcPt9;c4@9h2hQRQ3mByHHn|ZXd&g_(nXgrRNV2tFya9H2 zQ!!2U2_#zqTify?c24@cWYP!0b{NGi(Y{48!e2uOG2PB!o~1CUPCKbE61n&;=~P)3 zn%(>hb?B5>){t>0a8htyxrk0G5Jwphp3ggS&ndPcyzl}BP6I4qyAt(5oztGMK9IcC z@!a{VQ5uNu{>pXUQMCpG{enS=_s0g(I9F9(m8vtHr2{SsExCz6ix5;o&mn=13j-cxqeoH1-)cQTlOpF0qB&GuVs|4yp zV2~D&1mpbmlvKljGI*1#pKM9v;5)v-c)0@u^rg>_+>j zmBh|WPH*&4!l1)`C+Qk$LpuI>HJkX#4*d#a5X^lN5&PBSq3TMCNJh7#&~i1*tm0X^ zZ+1yN=8Cwke7Uc8)g_d22X_{njuWlxZ*%8GrJ<6)*KtxecC>Uf$8ywXY0=Lb7vsFr9`c3| z4XJ8>k|?OcLJtcZljVN`$E*NLBuev`(nID8h`q3+g%bGeJyF-+ZQsshJutR*;A(D# zd&1f~5f2xYk@H)p?@)U9L%ZjdZ8$nQk0F5-{GvR^8Na$EV!1*{QDe9w9B||mqw*o; zUo-`P_5~A)R7vVa@Z$B$_5}&$I0ooCbRa1llKA*gwdde_S$?`jQGQ_O9(IjWYDCjS zxnE@zT#KjdUZ$4(0aw#5vIc|##qa! zeYhc}t3DE_3fKs&Ln1#sfLk;ZEcJ>UfI3qS3FU3-1*hVbKvxjgq8g2GMe>eY9Pt#va}$ZAAxX3>rDMrrl*A-0;|@f~ z*T>IE1_bH>+NLc4dI6dKk&XlATk&K=IfZ2m$g} zN}_pfGsB#V1Sq}g!m$;*!tsY#ukjtyb%Nr|uhlQ!N|^&9eEy_RLK&^T;qnb-fg8^? zqO<{k=Lukz0gnz@)dX!K3+vE>$Bv5BZ-z`3X2HV<;SpcF zN&c%_$z&p$L=I&D-$8RsOjA^Ket&Cqk;pIiIEV3u&or6ApG+Al+fQne#cTGxFlnN2 z1>G37qRTdaUTbYPFL@+|xrrn!k6X&1Mp0<3v0Ei_AxFPB21UX(tuaQ{(tX*~3I*b+ zbM#DRc4^;0+%$sd@8DMZe}kpuD8_kAMWC;UkFgselw>HBSZ!MBDd9ZetJ(%)D*qfA zYMbT4+Clt%3sa$5dxQ5}*Y`E+_k>W>^yw?~E>u8xBWO-oEBWz@c$e}-k*n%#L2C_B z;|c*tsAD{(9FW7hfh#nNXt|U{j>q^`4iu{Xm>Vh>m{GK2nX0!|?S{2%ygv@5qHz*?P&@wI87N0$7L;a$(@WBt}d&9m{`9$3Y$ESC_+k{@u&@=TuMGA}+ET1OJ z-7mU7ta|6&UsY(1$83$jP~lr%9cv#*lc<7vLytM62%OOw(@j7$2$+iY*(iDo;F=2v@GwATAz5e7stHu-WWxOJrj`3VaWhg(p$MPt+4{4+yVR zk2R?kt%#O=A$bUUwC1o}?UC0Vo!T~->z+b}dFHITs>u~_I(Y1M*1(B5H{Y(g`n&?O zud3U?aR6ttYK_v+&on*?0j(yWIWtnsRtRJVI^(tVp1D2GT)|v!Sgo>izT68}9M~Ov z&dJgo5I=GFY(g~+#L%x)jpqW4zPQhrH#@J|=^xD}CaeFgUrAGH?%u+lHgmpu$||sY zC&f#2kfJ}3n4}ztDpoU0O<7r_c{El7TyZ2g8q&RzBU!g1!hQjm#N|x-Hpa>u_+R@p zvf7v{QjRP@pA$l8X~O**=@oK_oVj5(lk6>C?;spv&%M1^7g`NXSy{V=V=Wy)N#3={ zU2@v|PU__mn>#+hh||+?pBeh%efHv)hPN_4C#>?hj)EP|)ty0?pWPOs5);5~J1B;d zh$kwEKS;(1CeVRw!-NxQ6Bxx*$gJCQRjUMt%2B|o5K}FMkea>~$1cFjhR|M!fXSxZ zQYqh(lSrlPC$q{-+RYB^h`O2rRJec24_Oa;{~4&vS@J#;oH8B1$(jf3~0> zHm9X2=ZkFQ3kK$8v1dBM+Nm|n@}>5$}>~8_3AGEE64q7 z+q9}M8JTCey&5LDlrh&OQ7D#6D8JOu!oL!&Y8F}2oxWQ1at)MvEIZz7cKg1=X|4vJ zv)RVIV$Av|_}mkK!3#W%DD@=uto{L&DU6;Oz3k(>IfR4?Hpyi|XvnM5`JV>Hbdw^F z-MVX7X|U3@6I|3q$48VM%?2OR)R*U}(@M-VaP!jLch8zE*`g!}JgInutsNg!#U|yS zN}-RMnMCb2rQVKZI|F#5YJ3RN==o$bI%`m;g5Ws98xO`=6)acGPBTW{y^wPPDCJsI z3KgZn&C5%#4n7xdAFiHDxD=`C6z&~*gCyDvheSZ`*8u5-uB5ocEnT(u;x~1ap?%te>zF zeqHFgoDvBJy3``V4!{(Se-h)twgsPbl+}i1)hwVU_lFco3~Nw5vxUw(qu8*@-o42J znk1`0YC;vbO6z?a%Y<;(P4cp;2SPq*xBL(*g9Sl z38QmvVKl3AW>AxC*^+JqOLgog3zI-QDptK$7p@?#2&SuZlv1Zl3Ber(DF?fwaPhqV ziB{pvNB=TgG6;07U%vi^MPlVFOkR^Oz%@}73~oRvS5NrFnOKUKBwwmzRwaZasZQ4N zWC=m&%Zm+x;Bf+40(zv_F3{mbngKqZU1)#2e01*CeF*{ITs3vTIthM$v~*_Z!h@Fs zSZnN(Gqj+=e3L4HiQ(Lg6AvImVakgd&S)a2r$_RVW!i35fO9>MUyj#;CL%DYsbaEB zY0s!{Iki95`6X2lFX2U5FxxI(^t&@(3ygf3-^XUYAa~&Z+k&kjE8I)7y*}MPes^TX z+|oX{f`zT&TiBd`GKu}gRIuDTSz_?I@*`{LZgU^E{MC^%Er%A;WIK3O@-DBOXl?JL(q`% z4RSG${Zqv5Y!oNdGjw@$!#0ycb50!yco=R(pOww=@w@*7r(^cue28hjweIk^(wQKX z4NA^2yWO>*cygxAV%L#K<7)aU)*2SOHAY*LA5?)!{6~6(JgC3Hjzye_r}LNHH1_56 z_f2FQIMsK;-JWKrHlXtE=QeqWKh;qYqJuz6#2a^t)|Wy`aC;YT&;?Txjh_4-R0B%g z5q`YB2*i6v76V;(4^~g^DOq|M-2yWt=iDsL!33qt`BXWE1Fa2TDXkp#KUhDUZz>FL zjHntggn7$ky`b^w&Aq47lfA99nes(hXTnV$OTHi3+k?r3pUR=(Pngt!rp{_rswhyP zHtry?C5m2`SaE611;hKDWm0)5(KtXdYpv z298w^<&dbO)KzXi-w>%DC5AAof+pmPn0QViUJ^J{8Id$DPfc168>p(c8fcGvDSTuC z^V_$BQmM{L@{XcDcKp7j(3&Azs5qc68)&T-+zC@S68}jYe?hG<>|eBL8R45zfLGS8 zC+9JFvLB~$UmtP1du!bL#Yp%%O5>@`Apk7|4_q5yi!f))N|Tl@;;H0|YfDn)5Pl-) z?XlV`WVSN4o#kVKa6(okTIlp++svu8DfD^n#?YV~qZTY?%ldRPFPMO_oMs4ViFlX| z=O%C8+X+Wu$vpfMHXE{+!u=No)G0Aw`ebxT9c@}FuM0QEIm0znAzD!)O5`yoJ?|<( zc;!z!LBQ0NQI#HbpJmkK+rdwM5)Wp&lbF}rAVq-9P|@h4-s54ztt23t6NP>5S?)iW zG8VaXI}*#GDJkV1}x{C@F!yMW*N_s7j-vch`6CU!Yz59CJP{pnw<2Sy|FF3?8Y zViiQs-)+<@2SxH@lc|nYAR$=qWvZhd6*M7Cu>ATy0C6`?uLS_^`3H_IqG7M?pclpL z&!M-^2gb>dJ7#m0wK;5XULo$1NOgVFZTr%WMD5h{d30Gjrb+&7$dgkvn}%MZarr>yBq*St;2-NdL|WzTy}2c;s0R%K`tyaiB{Ebk<% z96p{i?9+EaJ_{nBIsw%SXS580&=_Vl|)36=B{ zD{?H0?b5l1h?sk;#LpdwR{eimxL+MdKh-_(vdWG0Mf8ER^E%>z0#OLMF{z%fRr#v5UUhHCj}lUsv<61MPc; ztLxV<6~M{pfDM2p?6el2RxpB=A6xfQ5;7bvGD`&aJPNSr zBRmY+9lfE{=8T$Kp4#zvmT7qBwAVv(J5MwTy$+%&mUv%|E(2TL`tT<+CYCpI(d0br zBhQ`Y&%-<|&wVDU_S_roUyy9_ zjZ4F}dWH?L&)RHK4C`Z#Tm!Mzf9O~(w3_Jbk;bNn5fRoTEE;j}j);4eU67}2RfoQH0IYi^9h7MLLGRWM>Kx z)45W`UwQZ)Hm+Rr?MFf-Yikwd%xkt|ZEr>vJK5;h>b<8F zEnqC(%;L@4I{qb&8ORZ*)G?xRDP|05O8wIqe!g}#L@f1>inx+Bhoq<+QqQJ_-h@y! z!S${|{-aG;_^dBkXpy-MS5jyJfC*o?KKp`i_DG*54R)}8YAgA>eRSi)i#5)FVc6Z6 z(|-&1MvV;m6AS(>92~3Gd{NTuQI5k5nUd@De^j^sBuDSwJ)TAt@Gw-#h2i812m0*Ip;RQ#DhVw z;&I~xjateKkKa_Qz9mF1QzqZ9holr%JwGz^)nu5 z8zZ!nHP;xCJ&*8-IpHWo)PVZbimwe$KKU&Ti+ECdS+Q&`Q2v7r@0J+DcmAviBe`YU zx-}TIK$W0LM1Yl04!q!zia5}>Z5t4+=ooHa1iHT2ao>mNPXtxjA}%#_mPD>YL)1Kj z^c*p@tsZ`FqQgYnLKct4uipYxP|chyeK_a%P2dQg_^SN}Qjy7~h}n3_+p!p7r$K!26FByoXWZ!ZvdD0Nle_z_|hG}$;$E7VK~wckyAvQNk) zMb%%V_NZ(SL1)0$lXp7?pY#3Et4ie2L^73m=;!nz^={Rvbot;)jm|b~KS4~x&U}Ky z(5%6*Gq|#D8%4?pS*r~S(z0n#X(L(s2y*K*iB`bk40&KvX@F6T*UM$&j-qAT=Fk)RA3&v5lc7;;gEWO<-s&+% zv$zRN2h5KXE$d=oShTYbbaj)f)G0+mtQ&TpQyk65X zrq4eG$}DFF3|5b$y=1<@@=ttZiDZtx}*-s-#v0Btan6rP* z$txIC(EDo^0ubkT1+E%z;k~-${wo?6l`>W=5 z<-bw2MuTUbG2vmE)%R>9v|H~lcZge~`}bi1Ar8G@qVH+z_9CbKe5nfFG9$Lh*V?US z@TRu5_RE2V4Vg)gq~54LTx>A><@Qu44 zC3}_&JP4qd{Z*B#QZ;i{s(3-H%%liJ7@fHjfBR*^ROu;0O$(NRtF4_3WXBu|HQR~S zy_hss)Bu}n&ACS(u2VCp%1bz#!v9Q`jXQc8lkc5wI|%G9oa-wwkyL1~+V|j!W!zz< zwijnS=8x3=qA+8_8yQqs-ny_m%~caR84mP@K9P=OQ`ir;qrCiJ0=S3gZKQoGU6TJ^a!yKj_LUrrRI7MulDLaU*Vk@@{gs?QX+db>a(h8qU|Zo9 z;#HkIS@`KIzG(Z1racR3iIxbTiGFo+MX}1Ou z0ZsC`Alm-D>26eFhp!-^Qh?`n#`s2X11 zFC->0UVn3mS!869YOHTp(EjPo@9wqHA~ZrcoT*d7+Q>~}f53*=8Dv~#8qi8c${toh zY5%kUR3BdC#yxcM%>LgrABDw@#raHA1cQN9f12kZAnzNzhD=1O3KMnft??p1_tQlk zX3Ib=r2gey8W{rs5d0tG`@g@!$lS!p%ISBEUuj?1ZHl)1T=yET zydj*?R@n93C5$!h5XVnEya zi!+nO;WTr@J&wL#qHSAY{k`@#OpL;JjXP=iP@!v>NgqLPye+JToHl?TVZ$`<#fHvp6MNRoOXntVdE$`Vq^xH-+F z9p(N(m=-%8)liW(#Ulj`nPNrbL42}I!XM@?GNS;8jUr0c;1kJr`kw=#&0qabFYAn> z;YziOC(k&f`k;o4-U$!=uk)*`>CDa#HZp#Co_nMC$quySPnTqsy5&33BB?yHgqVN{ zZk@;mq_eIB`&x?lZI@)0zEa_(qbiNkRp(174Vnfl z_(rh%5_+i|ng&vvQ%NhOph685TH!{G{lPj{4VL{aB(YKtLOj(;Z9Lrai%DnB>Eh3S zFjy!p>PzXsg^Jhb5lU+-m|4-M)xmOTA#^y`DBkVW`1N766?zNERKE*)`$SmKF1`Y%mA@=5(z7A&dJ z-&^QpwZpj7o({nU?%v+sL9@U+iuyp@=`vW;`cGx<(z&m7_J{(JeP_q^mMR(jekLl( z%K#+>6pfhEO7P}+uam_Yp?(hyucNa)dZ;ojad!3*+jsB^K)XAr6-0kU2O075PuprS z8D%f58K)wSDlmDAJy=Nq*JsdPzaSLf)sGjU873#2B!&!=+182WtlT9dr^6gHj`268 zKLu3(^g|0+L3dIX04OLjg_RapL0TrUMwVyQkdo(G)Ca_~hkGEfK`c;R5f2$pn+&g1 z;UV?YkK1dQiY#7)Nq?JV)G|jRDu{iaLcFRIag8D^UmSi z<}&eEcd%0&;+zf@GD*!TJ2Q7dJCqlZHPFbw-_QDhkc8lGL=J7HW6a+ojwf%pMic&F zlWh8Ho@y*q*~uIE7`7O@zmV$K)|4ob0rF>?9tOldSq}Iv$3z7-0a#P<&vSc{I-&^3 z&o30x;>om5oDY~wH?k4;k{??=s37*zJC%9;Z<=BggfLv52Z#;#W>^jl+4C?lBX}oV z6Mbuk_=`LWyd4BGoWe~gavwomL_?bOGrtNWIX*KtnfRy}FarFk>Kq{yGh*y<$9WlTvyxYc}Ra(i}4BI=le5j_}f z-a0ryAbw$dpiF2!K|SX0eMkK0cVLGLeGHqh72%63dgv3NL>4f`Jw4{g-~;`Avr0h% z??m4~n|eKU;*{td&q<0Mnq90fB=O5i?Nk?6K>?U-ZKa#$p3iLy?!}P2mVU z&yezfVr+$o6{w1G7YXLsb*}~EvU6sDm3qbORcbF5x!ja;ql~2rUje)agMCwMDWFJ( z3Lt)((QB(kqRpe7Qb~AVeAYLyi7VWVMz_;gOvsQFusxrtLRYxnyNxyj@R8L!7 zZ}i33dalNSR>9g1DGZExU#^iDeyeq8f(Zoq^MpnMrZLP!G;P2}`*o*g(f9pKjD*{L z1~lPjs73F4zGxEgw6XN;?R~u5p1#qUgpYBN!T7s;7H<5!KkU68p3lE~oc344kR+XY zx;uOyR?-_&ytp5?u3m(~2q!sLp7K}hi=FfX!SlWuS1Zu=!V9hUYim%Nv!ig4j-gP> z;R72O)(sU@%M{9+izLib$0Q6PUvTN=xYs(_dtWgE`@$L{WAzF^y^w6GRXMC~cH6o^ zDCS`=;)l(+M6`mMR$MORN5ic|y}8d-D<90nKb?vizYd=d*nJ249Z`}-NDNA1r+Gk5 zeEpS;>sxM@%+Bh;t-nt*R|v|0^(UpJ#Rn2^vnl3yeT>HH)|_Z{@*W)3<4~XSfm)3Y zsTms9u_F2`T3!1Qhc;CtkM;UlVIK^s%~<<79&BXK>5eM)UHm0;=n#$U`@Iu(O=C`! z>MgUm98x+lCMpE1!z4Z?l!CFgdmG1&-ozD78A*&zuJbj=1<2`M^gA2C28p_&nG5xL zWT892ViXmeFs#?Z*|bL^bfn~J_BYK2@4KoSGuo7QwVfGY2Eot3h;!3FescjYWQveSk6g-Hx=U#7B zpgqM4&dAAvdz9F-U|kNiL^823XY|%cUR*Ak z2XxN<65V7rpov5Q89~gAEFD$;q9H-XBTom|dwIz!cwU6037bYlRP*lXe%x=>$zc(# zj9vd!Sc%>(^*EE1y2ps5j}`UgL63-8F1Xoq9BahKW}p_-O( zKgYv27G(Km_BfOqu^97*Ni{6)QeQ0iWfQ07Z?98bK&kO=Z6&i}_lvtGqMmEiY0`63lR8*p;bi~5EJy|MxXQul;8 z0caWZ!Rq#$X|Q-Gq7VwkT@$;~3PS-j6xPNHV;iY6UBv7N+!p1*)3=Co-bBX$jDiTl zSLPP$-L4AkzzPnoN~V?+Arq{*f(1>N8Mf6rPMIhkR5BIT5UOAUqE+A6y5S+~UbCyV zW@-hy6w?xu_mNnDMG1~4pdqVf7nz}?=qMA7Rc(K??c?|#>u}v0ypI58TLG{nJMled; zi!we9Qs1r)Q{)n)e!Rj*p02?{fKEtfAvx@C`atuq(CLP4j=<253(1k)Ez>krx-8p1 z$mEDEyZJt4h8J8RmduStf!0Av0}zUl`yMSQNZYo#b6B$>=|x2b7wI??7F}|N z?hF%OuMrZ>b`cMMK@S)0{Dw>%I9b&%@42U{!*@zM`z)~&frnhAzZ-6iGA-UC71l@= z+iC?9=LbW<32cF+bo-FiP(#ZxuP<#e#ez< zBfF8{W~~tnN39SLgDMV(lg~1{Oo?Cf?09)M*No+_FNxC|2wW~3mj*RBZ&KBWj{dol z%9$PsTN!8?D&7-|Q}@|8Wj{(;BEYIy2VlbrF08ZvQk3N&EGVS`dLSp5-*;##Nh|P5 z`dInUw+i#8>La+=w$KY{dcq_odmd>@2TM1K=7Yt)o<{piHRn|UF@_TrNO1t=9gO$ z5;Jt7UYU}VCa$|H#_Y*=)P`G%#0%)>eTjRUT_aN8t4Q{H+M|qf<$)TfUEp)mi5Pr} zuuo~mh?eLaAGv1MN&c94d}eEqE4teoVXrhgOgUZB{bD1ID>=^gR6&-W)815t*6U)| zxb!mgeNJn~%eU@mP*yd9kjrgOo#?0@EiPsktK`M_vypEfnc&41OPw>$4YXlLldeV8 zKmqOR&7b0M=rVhsKZ#fmMx|Bs^7x`boOzp-!zXplA8+`H#-)HQJiGr~G}tXuvrL|vcIp=K5w36i%a?L?fIX`nb|%xn1!#D1HpFUm)>NIZ1`# zr=+;9Fog@n%gLEN=y#$El^S6vC;Rof0gasLEyLD){42wp2Yr5r7#V0#;#d5@LXaa5wtd{Qnxm@ zdrn-b*y`w#{lSs;9%*9TDaZ_1iG9_?oX$T)!(Y|$qYN)vZG@!v^L1EM%TL-2Kc`zO z5I;=ve^UtJBXcIDGb*X2mfoJsQU$;oNs8z0xm+}FN);zZ5)#5l= zINgRv>kzvXxnkF^%c~vnSU#n%7wN#Nr{GHjxk%SW+kwHlx__jOv_hFZOraJnAuI)I z>7A|%D9>4GuSzjAMz(*%grv~Ip$YJDfTb6SXL1oN zC6EJC#+zHlEZUP}O}}iAH9YlN z5O^x!?7hPz$oD5d*}t`(CoC2P&2c3MDlv*X0IJNmN4wQIC`nhtf$YwDLRug>Rz74h zs{#gV`qasgi;6l5%p|>*1pi}|YnaBT`u#IZ)yH)?@1+{bqw%H!!9hjbmIfMEw_X6j ztTaXG@YxDTuac!#sTmA^&rk;Y)q9l(Z7Ma{u2gI61yt!$j}a@{c^(25=+jY(f189B zZ@vO=amv5Wzvy&6eE_|HD&2fbh%6cXkeLgLL9!EnG3!1qF$yNXZb=aL~#?na=lx}M3z9&CsWaAIyP`@$hS&J`=O1DB= zXX;D?SzLUP%>k{=7qzJb?H08=2n21ma8vAPYF1xK?SFy}ces!6&=FT=qIbC(uh4nue)gSK7|@|+D1}h{ zR|o>nPt_>iN;5uRkO>I275QkGNhCyZLA6aD>wdT`xDVS8$bfD{!-NCZwW}4S8Q; za>w{7pT*gxIciYW*=4Ne8{XS*~SHKJbmx7K#2>T%6nF}wb}4n>=B3yS6y|x ziW~PFoH)ny9H=f_oVhA!4ztyEPanF$6vSHz?N{6dGc1nHX4YiIy>-U5aO~ArW~YBy zP5cU<>8;=CUFW7YfJ;xAM}&|0*=ua@o8RD37BZ%40KcuIPlj>Sn=P}c z89>*5&-O_<3eE}oy)g7-ZKaB;ZzI^`3Qs4Fm>s-?NRxvTCxgkq6o6i0M25Gv*dh3d zM|nSXCp0MwGRpk89hM>JMYlMjZ?SwgR+Tjz;l7rCr6<@4wdQB_DsHSSw#fw zC)}Thd_WI|C3qwrl%>iTRG{EABc>h{;A^Lc6}jwie=%ZzPfbRjWH0RjfxT(pG{EBS z$nwS6GRP~~BVR^^A`Jo~Q4wzBEp@kGP!HXJ^M16;=&hODZ@Ez*0R^Y62nIx8u7G%f z`J0qU?q*Z2B1tm30Ayg6J8y>$zP6RGhA=aqnhq^9lDH<;(M{iVvluH&W$Om|Y9ms% z$>O(&UkTXJ<-~Ac+-*bE`m0R=ydr^@Y5d2-&Ec>5h5epdd)~D^Td{?Kla%U)E6Rhe zE&wHk2@Ms9%5Awi6i0k3dBgPgD;g#HW1d^`ku%zv*{aFMlBfZU`7!xdO}m!2h>l z@E=Bv!+}Jh2owN7IvxN3{{KU2H#TrK_^*_Q&9Bgo^j)J@U+N8!&KPCFb|wN2UZ=B@ z?NF_>W*a1kP?X%10I5VQMN3mI@a>hGuG16Yn7tc3#Mu7O@^C+O`5w~tH&F+x>bkkD zQ*A$WODAjQW?1f#6sjP3PO|1}J8~PkE3=YZ2|8X3IvEP~zEODQ&P0v-E>p7cPs~ug zA2AN$>1J`y2c7uZaJ%|s_FqGqduajTxNs8$%KR3mWOGDeL%iBV`nY-RaGAUtjc8OL zrlA{EVeUnwC8S5loD>&M0v2rPT(9cHz2C7pJ(6l&gNi@;r3}qZ;Z+w68p-Qc+i8)Zmr~hUI%Nj7Ik)S^}krCQ!1d zpK=k}2z60+!U_0~glL|x6IHF%$HCXt%jH5~q_nd0$M^F|)|<1loujj-v!e?Y-)?Al z^a8*3x`tQx;B06A^4;yOxAFp?Cx4f+OktCl>R5MG2OHO-? zp7_M$&vX-+Zz$io9zJpci4gqrS3DKSzo3u1iq*!?aBXVA;+Booq$Ebj;HjZ@h2?tl z^=p0kEYC6R_v_{HRVdqBGvAsZ1Ouh?+#-RZ#z2uSjINh zuktLSRFKP0a=pqqhhd6mp@3qwIq_LB(V>_JBe8aykbrnfv=0=zU`;YlGI5sLd9q}{ zSKmZ~8rnr>+3ar@YQ}I7WlR-`W}RICr(($y3*s4{=Ug!_ULMEIndy5Lj#c*B$=E;Y ze2ZblqG+Dt2b$xj7RgAUg30&ZA)?0u(T*QYf(YKxj7Vyy_xKdV>p2sS&^iWj+i!77mzm(8XS`WN^1XYsft}u~HP#u)79ery_%mwL>BE-h(k! z6nwxvlT9}6QhOp&WKMdv?XFY`_`Kfw!W=KrC}tpdby-ncU^1wQHqG*NQ8Cj1s-4S=}4$iSN5j*tU5tPUEx3%A03H0#oYnfd_@t-U$AHxLhAA=)0-q z^3zYkqvJt|MR!#?ErF_`C{*ViFf>Pt$i)$oBo>aM-9BoSOBx8{&SH&O^HWAPSzW#e zFBnd!eWD_1(Dbm*s1AdIN`tD*5p&85&YXfz_ACmhGNt6!QASPn4B3jZK)wFO(Ot6p z)VtfDsUY#`ERc4kN|)%`j5_&hVk(&|y5#_QCfrv^2Ox68hH)z8g4a}lS;y1UKK_`w zTn|_n`A~@9@k5?<8*H($j@4Db^eOY1e80b$AU)Sdtv0j5|c_Y4t#3;fM ze|Yoo#lANkpq)L>IN^YyhW%BkqY7z92z~C!#dTas3jDcp= zxakO@mIiM23epP#^XD_Xz;GQNc})Eb&4t$`gBev-k}`t+cs_YQM%#P(O<2K8l04d{ z9#615jRjiNVf$u6xn2%Z_@tA@{`5(}L_6_o77qjp%dm3HJ5;*LFzr%~e9{bBe&1FF zkosyx|3=T&OBsj|SV8R=3?TWB*aL2<5SUW}-bh=%SiwN0y;o-iGmhj6nh<$s#`^?d%xS~8?1ViVizaABtHBQ5MEw4mk4EiN%_A>DxfiH-x zWQ{SaCx5q4z}i(L#qDvA2A9LUU&e|u^Geo3;i(L{f;c&;_tlTC-71Hw57OY@uE(?# zk6zL;TTY8!w-aIW?-N9@%?EtyL#R>l8c5+tqT-&T;wo{qp0O2(R76`;*N=92TkcY< zSxnNHx{f%xJ8aa}BMtTlZ0DeHS^I2JuPT}a)Q}wDdpRe!9b5Ebot-vbF3K$O#CV<3 zYkJp=e=Z;@EGh(5?H~n8YS1!30<2E42&c=;oU|bKv?Zl`Az7DTy_XR6*Z-zE)^~01 z#eq|KC3;rMSFrMQHnV6_~Y3m3ictmVM7{?^B%Pg~&uL z82n9Au%e}Xm5<=?gwdgHpvi92=kD__I|xLRoY`5>;@ZmPha}jHHf}L!q}ffdOOwea ztUcsAWwK~{mXWuF7%?HzW`uZkl`}mG0^CqFmgj!PPjnt1-Ev+HF3}n-7+6u!_8{7F zXoz8%wJsH$r+RLBUO>i?UU>tWZgyHn0%?Ub-=Uw(!DbhxCHwWtKeeJkqGGb9<-kYD zu2;`LK9REM!VGU+?LWVz3W*d+96A8FPbc%CS`54iW|TGJvJqARtUi? zh^1A)L~;wX+&aj%4l3t6Y^%R8ZfRJ}!rS$`8MUOiHuYPWPFj%y$}r8qWm;RWKQ^RJ z*mE7)E}XbWK%q8Z=#3nT_c03sihe^nUyhR^Bx_{|BQ|~q@z9xzoD=50RUE2 zRsA*F!Gc?cA*|V$>XAI)PQS&4D{=kUYWq|3BT{Hx`(v<{vfRYU^8!1qEM1*cu6COA zWRhk9>23t|U~HqfFMthFRhG209wlXTu@8^i`Kp_)(t0(W!+$m~_ij+x5kvjc{O2i8 z&k8{!QIriY_-?ooS#+iqV5k09-JKetFKj%{lAXwfUASXuCnE(k`&-a^SHzB%Oj!ww zI8O1^H0X6SX40RtB4%7^AKYC3daWE42sN7d3j>f8QkQEtY}`DL3ArYaF|@1J{a6JC z*ZN;;zi4o`m9eGR$_anNA~t9M0Hpt~4b;Nvw`1GT#o5I8KZ~gPx-AhKn$M~l{W-WA z=y^k;$6_DgrLJi(^JTzQ5h6yYu!;iG9%X}I12vVvx6cfTMkGmlBb2$6_ORXx-1%b<;T8CGr({`N|Zeu1O(VA=}zyNJD{sG-a>>+$x=A&H+`9^jhqI zdO7L`E0+D;wLZxCkL6Grdc@QCBE0ho0bMf?YamRBd!$`eM<|ia0o*OxiM4>yfCx6hSqU z<9ZwVjAe7s^dPi7x|+4O!=!1ek*kLTS7)~W=*jw)U#(x8Q7)aWEp5%UDZcIc6Rek; z&y%k!TW8O}IZOwrUshCRL`D$Ctq%4)zLbc=DozKdptlqM7z=tn3AN>fr?cfaGlGMgr*4kJ5tWJVL~{ne0lS%>}($j+LG z__RVro=Q)}WFel;NyJ?bTFll9rw6j!*3Jy2wF*T~f>Lh$UuDEFM5nkY^JXkqs74_Y zXtH!jrN?_KEO_z*ko7rCAjGEjy;0%|h=Aricsm|Yt24`Yk`zTixZW~DIkDiC!5bDH z8w)?S+^{+)%<-~#&Wc}d#0eH|gA<9gqAE=%WngE5IQ^ipu!tM4U7O*+w=|rH#bv=x z+LpszNIE$(s}b{3v#qujNh}r~#5%@{55+72^^ZBdgZ)Llcf|DAxBVCc%aH`sOUU%5 zjI#wtp~A=l#!&*E=*>M3J5H=^K7P|9@9VK)SP7vQOog9(ey5INCVvSX#a(~WM;v4b z8}D~8?MAzSBnHMaCt|!*-KuLGF}oAecD5$TWhH0*>JY`?6V-WM*6%TZrr?Up>z*)` zU+=~;C{#5)e4ulI*rr_e{YEBs8x;{w3iY6i#-)PvPp*}-M9@C0DQK~gNRi^x6J3<; z_wgJ@-5j8*1#Pg0mw(W%ZL3^CKBzjcVFIN63lLA!H}$IrB-cNK1?P zIuES`HL5fB=U4Y^f`%*XPa0%z2)l6%n~Jb72KMsCG9PIwqjoNak^EiEARSV()t&AL z-M>Ug2P!CY9@I5$S>B{kQ|$Qd*NhyM#y3xrhK<%ebzJ~;(!3(>>Wv|1NBZe_g=X|i zuNUcGm6H3LsU!Fux=DDzxdTb11}@?~y}wpozVY1kThXA;%%X--(MU*($}fvi6?Nhg z1Ya9Q&sH8n%GRJZpNYWoPv%K}_cdum`=X3Q4G+=37|dbK)%b??sBpQ3uu1zaWy4D$ zN)vrUMs?j5?sG?I@*56SSNeNny$7MSJsefX$0>?XSoirw(!fK*KlI5b4@&V9V5@`} zfPf*BxZwEX_=`C9PYUZLMoIxbPSA=a#P<*uIT;c=G1pi*)b?BOV%;Q+U^V>qc*WLjU?BBL+0_!by z&<=E@JVK5KY^&FQM%rtQ$Fsf9*2Xd%_L;J5$^oXhSBSL@!bBggxlXk%%<{rInyO|q z1zguB5TrWxCY{QA-n$ca5HO&U;Hn4mxuS2>51w<=`y-A>T7*~WjCBtyW|HtQxd1+M z%!D0%hk1&@LGBG?u?e%Ix5A+s3~`|wbM)=lxUu$z_oPhp!sNJVlbmNKCh!G@nf$AM zA>U-d#uhXeFuf(JphQGOPT9>06e-p{VA!K)FP5O0B@>;ql@0|2kBJ>2IPyy~OT6Hc z#A6=TKN1#C;0zpuCz=8NO?@=DzqCI21#cer^;w@#o!XFzO?eL$5_ceZB{IK4#2!RZ zh70~3EckaGV?#yIw3>lkVDxWnjc5RVqHCdF+l@zK0&`-)G@b=Tw=nKh*LFh~b0K3? zHn5xc=^!iS7rrM4PklKwdDkg`e()j-2xQ+uWNl*iWoGyme|)8IYKuxYHS zp|HWEf3rF6LcuqTsa(Z<2>ucLgxomxraW+OoLOQ2z1<2+z5?ao!W+P1%h_({vTq&@ zb^XbQWq|2t@>xydRXwvK+&?h&zHbs=LVr!$&X|alO_x1tt*G3N)pavw6h#?AtPH5w z4400wdG&T2zM>4cuF^FF7kPiZ+@}E&YFfK+lokC+P7vMl*~UNS;$Oa~Q1>j(5n08| zBWo?F{EEE&a_q=lHP371tswB;*sCZ44nziQId^g|7)|xHnH9)M;*Wz=iaCw!@%lkC z<&z6`FPYrS0DtNz#|`;OO$%Cdh$Yj9XM@pT%lm07rd;%_4$<)JTxFM};0RTbLzvCj zTS5!-wD@AKctc;CH6N>&v2_so2ZP+1sZ^8(&Y+=+;X#yt6l^X{)&+iYGnd_Ca!(om zJ^Qe~rX4bww75NIX|mf`_N@m^ax=PRO|;O^}>?3d?Ct(QR(j3$pf#u|zaR3H(b(J*xK z^H;+Iezr4zNdlsujp7gJ&e^-S3LyUbWJltmi z0RZ6t>1IOkf1nIgJ6GF(7^Nv{8+IFF2tW7&<^)_iK&JIHOAEW0PB#1uDcuQXkRceA zGZBr{J!31yl<=h2-Aqz3aUXzEUt-F3hLgGR2^7Yqo!zP1kzt3xXM3hulAXaUD$xoS zP89JUU5JUaqUNMQB@@do1rL>|RPFjg2)#wsA{1Y6uRs=X&^oG%Un!J#cZkr1pe49= zl!}m+#yAe3zA_E21ZX+kA04O^qKl%97vnP`5MaD>B(GBtQRqne)0Hz8xZwMM*EJ6H zr;SJ zX2CnoDGzrsuK99kwc1TNj3TOxrs=W6<(vpt0r6~kUf^ekpB!D+XBy!z~TZ9?@ zsjK4~gsm;n1y3F+12^zrYc3cw`)kR5pP+LOh+na#gLDLvqNBUkmKgkj$uz`fRhmH_ zU z_46iwBnbwokoY`>L^)yOx(|g>pY4-$-UvZYu&*DzbJ;y>IUQD3Sb)uq^PzRkYFR{D zD?Qtgm;00~{4J^94D58M-fa_wov$aGW_HPn?*H0(L8Sf`1 z+U99*0Cb-5qFfznS#Lj%5p=?Y5AKKbjL0S>MEQ=f^=)N%Rs%WeG1%MX>sh9{n`jPd z#ibh9QWLfTJ1Z`{3Y9C0MNrgkp~GuBIG3-9`R+ptY!sS=+C3PMOAc3N*Bkh&&%=K9 zN>O(u$nzLO=g!yrUCQp_K5^4y;77UEq!r|bLoEttN5{6gC4QA# zeuDoiv;I%SsW-H>YyF|&4+8-Jkp5p0$HUmv!NtI^6hgU=kCDrbY_78%tAb{-^?f% zdxJon`2YYooJ`tjGRhdSZVcXdH7mG#w+S`gOWPQ>bHalOtKTr=fDAoPo3ia^8d8~I zh@?JC)_0LWIwmX+1*lK54p1bRIZlP7MMQ%#(mX{rfN|3$na&1LgQjEuy4YI_jHtHz z5<|G!IcTVvDpOUL;W#MRjrKMu>`7D6@iNqRo)V>Y!prf&oV2gFK@Bm%qtql*J_Ixp zh=Joomp<0X0d=u}*L6Bbgwr4;&c|VOeA8dW&XxP>3%EhbQsfD1nlsSaMR(3?&66?} zw@@5d`r$KOSwdsQk+bJfaWzrGqw4bp=lKZ3KXK*d=U46n8f_&#Q zug}vlz`1W1>Duq}`a~DuExF(#JLF!cIj*71!-fA$xvDux@PVf_f3QXt`=#Yd=uaaE zRqQjuHO~L9ohReoRsj@IjG6nnYvy490EqsNowu}guy=C#XWNU_Y~}Yk5Wcqc8KLk~ z&!nI_!7I;Coz3Cx5+TlgsU_U7k`YV83VFGT%=|I|JzE z&VWXhLIqKVbb9uQvV`R7jccI&Dj;db29E*OiLxL&#fY?#0i_7$=8U+RxMqdB!RH<*ncpU7#VBS=WuVSq$NlP|DCoJnnfDZ(>_EY=V{5M6HR8SNOv zvb)x{ODu{Yi|~7Y!2}gtBqjLKyh3H$vempXLsqJR4c(EuJfLCgzQbtJ*K9>X-no;nX*4=?y-rf+t+^$`-8-%_jW*n~1K4)JHn0)RNeXl_Fll=u$j!*NK zt4Dfm0Y+617~h$Yqd1ZbIbz)N?APQP#PplU_({tlGMa(5hBgAmkn~Bk`)%?%epadPffOfk z0foV`qH?*42e=zn8N6PF$sHg#;^)q z-KRi_Vy0V`+IJAQZE|}$hkyAhQd=3fb^9br_o}Mus4Cf08x|AUqa`8C6!)l3Kak*{ zDzy|TYJbuwv55BC0}UirsNsl5Y0R%>jd;#f854lNlpHW;Llekq>h<}$ym)xHAOD6j zQXWMGk8_2mRnMvn1TR}rTRl{b^QRm&3~Q~JET!>YX(eL-b&A|lw=Rsr0ue9BsL|+} zc9ci6lmgMC%95hf>f^chNm>1ya!_ZsK9qm}_RH{@>;g7-)6fIyYkkea5vCy%54&%{ zH_)6PSPx#8T%gSRd-s8lfr9uQ;QNu-hxo#Cgc1NhROW-@fwP zKIfpNy(ymU#e%Wl+RF{26l991^`m49dU7~^dw$nEQt3(MbnOsEuZQSWyF?=IxFvwg*L~PuywNN z+6N2!`^c=p87eVH!y`fsf-iFVq#^(Z0zFjNWoVA3}V^~rp` zW-$DU*mR&iFf|4T0KfQHZ}<^~iK$l`z=E3;FiD{72#!gCdP>S@901EVD-xL#h!!{P z)|q%A#{)`pg=%PtS%|v|bp4`%&9wXNY^zik#*1ps1DcJ1%cCUM0H;jwrtHvfFG(To z;&{T5VgO@$scOoXT!Cd0a+9kiO~3;L zEz99P`oIu$rUwXO2&2I7T4RXcrgPvG9=zWX29Dl*@#&Dv*}_bCr_-XdDp6 zVuBr?2roX+w#%?3zRrZ=sUU9#wRH182r*{Tt2sowlPXgCGv7y7opI*eqF7_{8$y&h z6q8!oynTfbWugJ8RidQWJ5-f2D#(NZ_~F=7FO<`QELXi(aUuo9l=;uF%go{6;qj;0 zS^MeBd<8-Bjva1b6vd=kJBBWUZn`D;$=|>2+xFUIZ*C-Zx2-9_ts&>uZJ9tYx=|dU zdJvMpK;qJc)lC7q&BBB`_px{2s*A@6W3kJ=X77wngC{`MQ&*r$2uzfGvoW(N9fZSA zu8bYuA`1(I`)mg|)T|9vfWIEzs@z*3DP}vQ3KuRgz~~q-kn3?Jk@GzEv?2p`3;TUI zp^vGtwzz?GUVFZ$L$3zZ?Bu?T9)%*m2Hb>EapZC|mKB{6Cj3i^)*#`)r|V4HY9hZ2 zxVaqvxHHs+#5da?HAlUR3y@#JOozl7PRB7bpg(p08zT7U2N3bAZs`BPg0#T@2@$wE z89F$aI{i;oW}@Ife?Z9fE2@aoTEOIKXii5`R!MsOG^UZe;E&##d%#Ar_@w>oxe(Xg zn!%vY2T_EN-`8gzs%0A9fOtMe<3|S%C=d|4OchGH*_;E zw;^vHCVts6fDrhc!Hky;1N?!GQwvdGs{j|zvl2S3$M*DQqT#G2Y>V_3rJyT@lqorR zA*w>>%6M%S_^X7L>If#;5}d^$-O%iTlk{=_MHpu6!!cx;d%?7dZ_p`kpTx6P+R*un z!}mv{;?)V3Ty}ur^TcuoU4&^o%;>LIbEV zUg)8;;_!TX_Q~J9UV|7K>pMy>r~jQC`+Z3c@&-O{Ut9irW>`Aw-y+~YG@B*Q3ew~s znhp66&4&DcLBjt&07mxqEeuS~7zm zR&_hrXv4q$<{jnH-?HG(GuAW>JZMs>;&pqs>eUL@q`*d5-O@1!-))sfHQrBT-Q(xg zZ98R|3rVR(@SisYmBV9?NS`3zgIi?WvQkrxpr`a@Qc3K%rck|58Z@L=vetsMk(8^b zz9~>oXjZ$`c}f7d|5!rF+pP{yrRU^s>TH7S-X0pgO)Om|`*ODT@U>^_W$)BgTD!Tq zzU~=$Il4xa#$6oTxj)rLO~3;pJD?7QA2$$v=9@^_p+LS~h(SmPdK#i67Kbl*Vhz^# zDBgz{ME#Xid0UlMtX7f?qs-R>qSNe14e?LVibWu%A1ASOc`O@ z=$2>?U?Q(^5}~ zLR}GXXuz8H)2@#%HUHJF+N>>y(vBa1fCBS%$RFj>;0a?xTprGCQFL(#rx*whe2W zwX7+Ml3OEHbBR`?lsg!DZbs-IC*a_mI_jz7c>o|^v&My+V>d*d#YF00%bE(JB(W5+ z3kl{oKSJl>CHK z&x7s0XYloSuiZiFgR0qczi?tC9no-DMbjXDNP75F1z!HVh3t#>T54{GGl4x8&t)oj zweE$wu@Dv`V6G=Ju|pASE|}Eurize4&h#^NRC&jX-rFj)V^vj^?H4xx$tc(D^3~z7 zRdKT+Oz@f^u3 z39vfd2OhQofPCgc3g&QVTiCioz-!qQ@~~HyWH$KjwyptvX=I&p3ppK{ZK`NTnl@Nm ziyyP!XEKa0dfu0h!j&J3&Wei0o>K((oeTvS!lSbafLKrZS5Blfw>gXH-Fr=ONzy^? z=3KdgySUe2J8+N=GU`F{KyYv;xgvGj6q~4=$!JJ3M6m%dM>~ zWG=z~*OIB~!HK_CI1oag3Z6Pdeal_n?M*CPG2E;qKe{5QnrNZZ`fl@d`EqwrSo2iP zpPEpw=?x86g{1ilOCwn~F^tW#v*;||lO_(Lqq66cl^X;W*bD{H_WqB_WziJUFKE1+ zYXB=NBs`A~i#Hb(ofjih4I|Tj*AaBm=6ftXZJg^Zyasxdw`wO0I({ENMsijKq(uzD zmTW;q9{=TbXA|llG5+M_uDaSHUCVuipI4w02a(%?1I`L+93v9^yLOnMNrz+Z3QtrK zLWH}+OW}1q>>pOPrmk+>VBAERixICr!d0rRq`^;hj1bxNoX~VRB#wS>e1>ZWd}J6K zXZ?<+jD`{tx?4 zN%$;z%_R1Zx-9CJItqt9!a8Thx`A$W#1~w~mOYGPg{|WjLnq?beKq36epty|Mwz@Dh4&K=B_kIWJFGO)1d9Tn2*8 zNv0q?z3$S-@OU(LNm(Q7>ML%mWNEc*fF7*%%a%6~g<6#; zlIy}lu;-_TMcjv)P6s;(my&bX{x??6Js%Hu?W9>OM@QitqV#cq#AgSsJ2e9AIh&nw z{{HgbnMf@z##MFrd6=U6%xqqEkB)&oC#=ko*!|_ksOxi^aT93U=2>?`4*FsZqB%#* zVb)V-&5(oX+f<8}W=ws?gvAqb zR~UObDs47GnXwx(S1yR-jQ34xQHftM0j;iPH{_OKMCT<+=PWJ0#+mP9KaG9cgxFS` zAW0x%> z^hgU=QmAan1Jjn@H?XmDU&gVe?jT%*6;WwGHo-b3N$625EJ2k(=l622?8#t8w+$8^ zRBmg%sysE$?3BRfH`MRdEUu`2a=U|V*^IIDlu=JgRq{$}C9OC-(RtN?sqgrOzuhxr zr~c~}>N`~jn)A}lJGHCN>=Me=&YlKS!_{`RryTzR;}iW~Py2s>Eg(XVe$@}Kz5gMP z|L-jKe*;@vSDT*{x2ubT>pym;cdu-B(6|*6wp| zweX>Y3rM!e2^6%Fq>lF2Ur$+tl#0nIC2e8A)9?`{aAEiRS@sfM)aQ{0uFeEqR#xSb z{Tjcn8);)tga?OfusadMg1d@MCD}=($9CH1Azz#4y3m?r#*SMk*KCf0v9(Z0jpMkM znkS!|_JXOLtRLlIIxI!P*Az;OwC;3V2d}5jg9X;D^qi(N>7}wtcuYE`?>cBSW7Ve3 z8iQ;lTa3AoRZMhHc+9D|mSoDbDy*Hd{Mh0cIlUKpb!H%ZB^6RoRj&2OEREa{W% z28@pZkzXlC>>x|Ap#<3xinz$83s@l6+=@2G04jy71FR2OshmzyybfJu6dgj8Uikwx z0z$9>*v7|4FEw#})u1S&m# z%&R`+^#x!@+399J*O$~)V}Pc{0yN$ZJLi~NEcSg}aNr&tpvk$DStk&}wYJS2uAsm9&Md{tE!|CfYTogE&2<#1ckzjAT$eeL)}@a2O$k_x|o_>D2_4IvY%(qB+8F z5Gw6R-(ad>ccd7O5ToA^3ykv4aL0Jb2#B+;=!vqR_>37Sq3AHwu?W?HHqEx*CPlG1 z47DDmT=Oc>T7nXMtPdK2ff_Y}0cJOX1ayf-ZXDgkusgM?;u@1FTdNk5CYod*a>(P- z!ALmL>81LRJU>Gvm6EYb!qAhJ*?lcmx?a#Z*G3Qt%<4Xl^ zvcsEVToGdqu#upyBk;>>i;{hgIRl5JZXzcJpX$!UK4Dcsvft{)Jvlnvz|dekhw}sg z(D6~{NaimMjcGhrWL!KN^b&SEHUp!Qwwdv9?@(N^5Kx`4j;q0S`Ra-h$K@6=0}w+! zJ+VLupF!0e&#^rJiN$zWP^Eyb^~aACoI%6U&>_6iKAqYQR3_!TClAjcd5u#`NY^vz za(-1=DC&E8ElAPbqfRZzaI!rq5x&`sZJa=vDeFv3$O>8@xPrRRV;t`lj2$D)WPZb7 z3kv&rgsaHHR2oEi7&H`>x2}2LD3}{{vrRx45irb!*TgsJnD)g-gWE}X3az_;fD_g2 zS+Kw15{)DTPKaxlA3>B>AinN*f1K)g#B?J7lH71H*Dt2Q=Zs>i!dxE6G(OLTCB#D%{o{U zSTi(=0b3`5ZRKhju1D_%_BMMoITYChunhFQ?R-k&j)`q1w?fkk>a3b4 zeSs{Sz&_Y54#md4SIm_vtddF-ZHsU1LH9adv$zbSwtY4jvwmIzj1P=FM{OM}6iSF{ zgSu)p6}&MpDrGUbsRvuU zCl_%(ulubYl1kAM7V92BL2}MbYYdMBMPN}@J^nkr5;&sYdV7T-Duy~Dn&a$k{9@3%9 zBtH-~Huth+ot-QE;#LYJUlaI?ExueBBR8*!R1xqC5l3bUbIM7yNoRUtA~?Gisey;v z9iE@$;KHgoa&UZX--?Bu5PeJL{szlT4{s_N{f~$#qpOWmsF=f5M|v)LMhdd|r-0ch zOhzfco^B5i=$G%;8HGfoUp{Id9C!*c*{PJ1vigOIC`!rG^i#xso^Yp9x!Fd$ z@qg=&i#>r%iqz$0(`OWKQV>6Gk?=2>?$v9zg!Y>1sj-nuqqnacN}}dj%u0x{x`%R* zAF6$I(X1Rf1Ix-ub3@g`Y8ySz`d#7trjNz1Pb}ek({65&`X4;KTwNc9FN^}w*`G{` z8u0WtHxX#YZ99^-?*`A zxvkXC0r1ybhGDiaN)E@00>2DZCYZq!C|v0attXKhBZgJ3Ty86o!yiOs+hM`rO{|T*G(hh-2q@ z;RdORaH%V^WfYcvJt;B9Ks0FE*3E>QPe>?cGUral*Y*TDzL#>%XaOv(Yty@+M*Yd( ziU#-FxJ+MuWUdpr>M)?-2n4?dTt;4zBN7SG}`F@5m0?pp{-A+zVO1FGG zw}>i1hnJS}^%DPe&G@Irny%0luBotgXd%kz&gqiLcyZf3 zf+z|oa#B72FA~?4apiTMm^EZA6o!;n)X5~2E-b47H`JS%>n9iX4vv%OW)Ex2E93Wf z1ZR5tElekwQOz|(t{?e?#nYl#9y1FZ#5h(${PEwA_Lo0d0riQJQDQ7dH6l76a^e%& zxv}VXgx6P}j*>I4yrbZRe&=$lTdU9zmZ$*HLVWN$EGIic~H9 zqZF!L)YGI2*S)ExzEI?DVf?QwIoPVbyOJALe8mNDaFTzf+5%dA@r@KQofJ!Wv3M;| z#EyKopGoUK65@d0!&pRaJy(N0^9B4r z;cm?Y@0%Ss0Kho%f2w5tH{AV6;Tl{2SA*ed4Q=NQHZ;GLT8_#t1TcLmz$jTz~B9*snva_Dco}s-VJgPiFL@{k3F@KY?LtggOd|pDNNgGf?1@U>1 zPCAP1uCK%ECZJa9aEmfRg&czspo|h6cI63YSi?~UY?8=>iV4H2XRV#1fd#Q{Du0co zX6Ca9zvArtSYI;OLM${cbK!UUv~Sq~n~i~G=yc0e<$1k2dU?9~x<3LzX3_p$2&M*2 z{O&|RQq{c*I2)h`WC77lmOu1JWA``tPI(zNQpRCo)OC?)$bDk`LOPNNh!&gUM)*^sERtd7;njM-l>MqKhb5C&Q-W!PJ= z(yo_mk-BLgLKG@lFiVd_dPfrzUXZ8uHQgwGYSyjWh!HKxT=o)JWZkRh*v%mDOkaf!2OdYx;N)y_bqQ1AtC&YMesz zraX-zg~d{bml4WPjDg4V6)ujMxhkqqv@n{HrNvr9vp1Zv2fmGlR-H*r9gKHvSa)jh zW0Od`n_k)pp4isr0fYL!VDHMd?AM`ZDBI9FT|J%Cb(Q)%83fRMIsE>Ozz7Kb!k!mh zLFx72+^-aFrF3%{rQL*7in!N`ZowV!kA-J%-tU(gaX{(V76T93k3rmalF7DnDZn}g z9RG#O%^NR&7jJ;o-3A5YfSBE|^Q;Z|q__ot-Kk!m^y!)_zg!(X)m7OlKL!k#_lGVo z-=`A)uct@I`mf=v)+0{r9;|G5dbx4f&|@&WRs7uFIo+PzJR!%sbfLfchdsYPK2Y@h z4G!Pz5gl&MWU1+7_zLsjH<|^~)-f>E#q5qcWnGnzMv^(i_ZxNXsUU6N(%tQ+4uRM9 z2YeEHp1SRK6z{|I)^crPMR`JBvn%#|t|)Vd{8VLam|osP3}8YDyF9>GH2<8>s(`u? z)uO97u!>h$O` z__+~Y0Zq$tlv})*`46U7X%FP6^u|Ey@RKNz(b=bNDblhhs+4@mKuVg9;9-6?70*rDvIhrZ}gFwnp z3>pY%mPe8ruGWaMQK;5(EFV@Ftvzw!Ia6|`LLXw4J4QT0TW?XLM{15$F2dD-sI=yCJ(y6n~N z^yy#rxJFRcD}y~@K|di8)zF-+R$_yr+O~JVzwRzb`Ka#ICz$&ru(bfUnwhA>*uoyg zW;yzN;If)qu;ee9In7V<#g`}J^Ki9gp?Aw(8rd+vkk?<|7k>q@kCd(xceigKz)EthwBv>)v%P_$NSTfy4l zwuCL)&rUN%`^YE9RI)Yoh5|sf|l4xaGdS60rUlyTAg* z9k!7fwb;PKb^(iM546-P^EA!6x5n{D(dIWBbuHPHY)^ZuS$p@7;Xs1cQvYYcVbUV( zsXU73rQgI{UK828S9W;rq(;zBL*}{lMnpL7>a{}4Wa4F_E&+?4{2Yk$p6gO&<6^v= zSW{D0u{~P%V6{~7f@N8eQSGgI%M)~_YTU5DSAVlazl{aI6$l6}N^4~4I2-orEVew( z5&NYK>tB%nby?4;Y^&I{bM&+PRNX;-s3#Qvf6b5*{l$`EjMA9;0{psU z$H{SrVgXJG9NfBa{5j*CkZKFD%7Q0k@d=eKf~ZE4^f%-EWn5zv|EkvTTFtw2U6CF+ zGN(9dTtBlXTCMS9myEv7!O$pMHcuZ6y-e>d;MWT9fKBL3kGUx=kO=aeOl>TunkZt} z2o^1NwdewynjW*dyQwCcc5z($&|jj*4BaEV>8DcR2##$id>+U{anCcFY}7PCc)Bp) zfPeE1{b%J*EwBP~epdXS@xQWy|6%3TB}7GKpl2jxr{-X#X{n|sXBw3l7MXXQ6EW=XJN==Q+G$>I}Qpp}dNy@Y; zQIxSPPfg57FUd|;9{dFY_@4&@%HJ;y;b&@aKZEvXI2f9k*qGXy{^$#ho&Ur1TK0s#2n{10lz|L|!gQDJ!{kz()B z)XK3I+i$-p7rJd0J>WP52*N#)ut4N(2zdSPvGrpW%HqxT`>vIWO;)E#Nf8?62dBasY=G&KjfM1Y3fvUlwe`x83NSL5xbeA$LfM$L zOl}N7LGV7Mg4F? z4oKdDuxAC~LGZIzkST+WgG6Uv&9omG{Ah@W>50i{JdP(*CGNjO3VeiQ)7f?c0U)|- zzE=Ey160moM}HyC^FjYDD&pbQtd3Z+2F2hdGH9tX9g@YHW(YM*ZP2NgX|hdE)m(-^pz+*~Wu`?~aI*U))T~3A@L}+)-_fERyH;ASY0z+z&M`Kn;I%6-^Ei{%W zalu=f6qL?FhZL?~o^k#LA;XiGST8~6;^n{-YHC>e@%8DFdAJ|kyQ(_kc1wLEY})E z^Do60+9&^fzn}{Qh}yiDriWie{C~p}N|I14UqgnT1-y??Xx(EE%Qh=vk;LRs;(YP2Rv%Vw@Y4e#UkS+)?ygpCRf#@MZm!VV1fs$ZA z7(lX=i>n?J^&6M#PP8B%k$)Ua0r4xfnB|*d7XGNUFuXi|=wXH))8ta7-GYc-J@W!K zQ{r(p^ZjO7s|VGh1k{G2wdCM2gXXv}c%j|duG@IS3xPnAEwzrPXc=2h1EhS4}RfG}) zwo-M7eh5X}WVB2X*S}`K@Q@urC*!ZKp!f(I+w!}K{2M_pwn_?2I)1_RwU?-t_@|%) z->Pf>`O>s(w5zCWh~5KzH4q}Q2laF&7~AWt)&KKC%vPJXtu zIA6rIDYmOITS6{HLHiC7e)(55g@C_?mIArr;gQ~ghdhp)wpRv)kvjo^IXYOxB6VIn zCzRo4HZz&raDSkx7rt6pb*i90M_FFY)u8^nn%4&Xq`XM~uW>umnR`@J^adX&dUa-# z$K9icB<1W^>;7{y-;jc+24P5m@7$BCH^uKRIM)h{3~g~(K@H10H3{0$aaA0cqdblt zBlsdKDCy2OlU7{l@NdILq*|H8u}AUULHegPR%@bOku^#IkS-dQl@NH!e10=2z%9mIMh{!&kmdp_~xHs z^fOVm?kz*ng){HS8YwPpYNd!t71Z*p2&HGN2(-3G^!u(k!eY2hm>cf!hs0LjbwRYT zZzP#q>}4BlWrgp~3r%Z;3vEiM!-q!}Z77wrSJ*nyYOop;vznPuO9zq+pe;!PUkU>~ z%cKx}b>7Q;C1W-NW-TVyg+K2eRTkBL( znd2gqrGpUl)0LbT21oIBNA>xP~a*c*&B}`;R3gNw}Qm zTILlC+`Dx`4PYv5WWZu@GP;2Td=Rd*!dz_Gaetp^((`QwXpLsUxX>ZH!X% zT+n8tu*}O}W_GS=j`>-5?aDH5hMm!uU!dwgJ}E;nIJ;)DdiQ;7Hq}|Xj?s;nrYr;e zJ@m~Yd@J9h-JWvC2uAJ&zb^QD5d)>nJ+wo66|ZTarS6`G#r7 z8+b;KUM82|!+>l8PH12HF?6ea+qv#$S?Yd-B9($#Mt!!X(#E9%Sk_OF&Dsy)%3bc*zRkq<_ASH|NKX`cmO`cRy>mj7*0`elymCKE(dS{?6lo8YylKsSYoy zc=uwRD%%{1O0_zi!am2RBzEbfb&u(i0K&66OdrW7vH7b5LJj~lu!Q+9E$zHYKiP8L zU5bnW0Po~Y$$eauTgEc}_5m`Ih4>IpIF)My_F$SPV!iz=&Y4SsATyTNNP{NQf$T9) z|F@>p{?Rtzsf2ws<^r71m1twK7kDDTovfWdb)7WosqhFpt@M>IQ=k@I z-GBkFrE#`Xe~`oRU${!))}}1!O`kh|!Ta`t(ov5VS_W+cw0wbMEqd?9#(aY=4M`W7 z4g)a_KEB{Gs|$gf4VDjj7xwD%)l0clRyk|jH9#yVXAN(2s+qjSJ0R1HEMw3@iIu5r z0ESnpHouv-91^|)E?9>7bR;*$M_sS{dN(Ftc4=J5P8TSgd2xPgV>$Jf>F}?QiypO6 zBV~_wP3}<8QShzucDyItwBN{8Th8W$UNC_*G#E~2QyXxA_4ZzqGa*bzK(11Or6eP5 zWOP8Qz>61nTg{)7GZq6{mz(A44+jC1mbw+tH!>+diJVev^7np zr{h7TVYXhBm? za}>DgnJhcGUP$M4G*fs;o-z-&KcdR16AmiFaifs>;YIc?+)mMmd!l#doX2b$&Ce_L zdSPM@`2;#ueFEeJZqMWx17$G>2mT)2b5G=B*Mc>F3*VO%zobxVJRX8L<%5oc=#v~j z+=X?t*k-n05CM6jI`x--i|OEpQLE?&A3}t{?h~xwcL)QCoSca4j2^q=S*U`gS9dVD z;47iwuQl;UdDA#uv%Zd5VJ7?AucR<6B?Vq)?UdtMU@B>oj8qZqwqNZs__9HB`MmgB z8)PM;Wa{lQ54tK~$un=2g3$k~ngV}#ZxKdUK2$T&NxZq?)d+7glcINR=cV8SI zREUU*$dZyIC0p6EBw4e!h%p#UmSOB$nuFu<)f zazKJ@O#i&6(TTKxWP>9r56m){7-Y0+uGqW@Lk0BXKz?&;-1oH0bGVF+e<%q?hl*x8zMYB z#C+#y`DIh1xDVs&F^4rJKlNSTx$Y4h%Q2qVq0hN0p(c>+7RFmoU}&2D&5g>)_7^#< z$0AA&KcAGkKUm&Xaf--7ARK?$UDzfXq-pH@s7JZdj6W4q8Esgnxc>FnR?&`QiFuVv zwP#lLCm4H$9Ekba{Q8S^r=rl%4y}H(=M5W@KF+KgTJ&|R$=BzZ!J^x+Z_PhFwHFOz zW1VEWH!&U3wM+g%+?V}F^9ov4=&vcanCUz6UNiPlRK>{wshkq+UcTMw15JhX89_X$ z5zF}O4Y*fIx11QYf9Lis!Nae6a+Td+tR$}M!LyF!76wKX9%X3s5`d5bd5IG?f*Gr#-M$4hdwHo@)MuILTb#(l8BoZ#|V-pYMs z`D*bi!c$5F zdM(EoPKCyNaKrSxs(H+AG~~Tyh@m1Ws$>5H&F6h@pM>^Bo)fyK{>n$>ys)wEWs~iZ zUaTBuQobV2Zxxv~Jkc)S*kG?aF?enwTamx7SHSST;$1~?k1I0MMnbC0cHFm!PbYn| z_N$)0@{+4$^|$OWm+KpsOm+>?xj+3NEO+uniQ>7(acfRJN9B}DcYaR1!^8E4(^+=0 z2JxDv<3v`>unC9NQH2TzLy46iVox}EFuvKgQNMQE4b0|mTib0kC27;LewzcIdVh3&HLc$%)3I!W z@yA8dF;N*M2gjATyV<5z_Z72TUw_EtNFk_H1C)+$+sc(;L60JTc0CGan(+6%s<+i3 zafn&OyI7U|Twa|}kg?YJq(PHfAv*O^VJ^=R_T; z*PU`4d{R}vW^InF9Gi7vmVFcFWno_m*TuA{g2dVEUjJ08_DwgysOE9mkT8}h3t&9r=JUK7Zn%)Z;^WgaWt z=I1u+SkNIk3A{JGdNUuG>{7GP8t>J>JHLNh#->q>S09iV%@t6I^yw8Z__(oGQo}30 z;;HJ@i}JD`l!ErZgz=;}y%hXF&%_=~}WYgZ#d-@8|b? zO(_2QQQm^cs@xRmZ@F#5MdyR}YA4^{V4Q4Q-Y*fe$sg`qO5^9RQ%g|3(%#JT`MPcA zo|S6E&_wzb-m9CtK7DU}IqgWmD~?*|268s(9VKK4n7!w_yCHPVFi+7j$?KwQ{vFj} z@}rmUHAQ5t?`jknW*NV#f<6_kd(-0h8ZjTs!cX6ApVsWK9aPV{VY9jIg>}F=fknNZ z{*M{dVtkj4p)d9~3|xxK^RiZx;8_$E?Vwg5BhpTMp)Mr#W0L|}kg4FLn$APChx=l0 zIkqC#ZxWq9>MMD5UnRe6yXLi?F8<`%{^HEZPZz@06&DRpOlx~ZY~C6}pBE%PJN~SP z+rBpCWn)pN`!exn)QlzzeW><5`Uu(KOx{XabwSC8Z!a~44+@N$5o(JE^DA8_wMeOlVX>w zO#hN8o1LM4TE=ShJ%e7?k=m#1pWa_+zTNkuJrZ*{HNBp}c$>qWE331n89A7C+>Z)K z&Oa5)z^62F{hpR|2T^qBpyCn7l0w_Vds1bRBlo=ez`1S@vl!v97&8%%lPode%jG=w ze#D>g9DW~e=a^-8k4{s%&Qgc7$EDXrnk(Z(`V9EWq|Ge2ZPE2?;CYH}E@KagM z>T9?a&+h2@@MX;wn?Xkp`cK2~S7Z{wFgr{*3GlPW8S7Ylo!C7r%4hN~(2 zte#l-zT&p5I|X@5${rf=4iD*!K2Z6w!)mEY3I zEK9ez)?6XyEoj;xaTHXzz zQSY&zij8)r>oNk~xY1oq5wFEEj|U|2DMn>+?M-`paQJR(RV`-KSHtcv_9FqeO>bzf zzFOk08Q!^6YvXWCQ<>Fa!`(f8YpqR#7Axf6(Qs45@cC}x{~0_h`c=VlPg`aGo;@yG zYyCAd+-D^v8O~-jN90*3ES=`s(E6n{ddHJtX_K6%@3De@sml`@W7OS~vP+W^of~#q z5hs0jp1pD9d20IOH;RjbA07J9z|1pNz*Oe^P-H^JIBQCPxKB0X{2H73fcOY;g8~UA zKZ!^ByNmH56M~=77UPkO2X9LA3(4P2d){-f8s&Z9-aetF-@hyPjpH6q)s@JGh8{l7 zzpMM6Wq(NjMEYn>-TOAFnt;sH@3EFylkQ9xhY|(p<)yAC5_gpe^%?KP^Ij}#ck12w zbfRNtfuQ+7cKt)8^dQ4`)8C03Go~^Hw@31YO7ey<$(RtEx)VNbns$`6NGirpsG*|+ zF*#D$;6Y8>k>+ao{1%N$kv4Z;g~G>|y26e%Ur8U7a$qXsdiNngbt=5)%iF+haf=`D zn7kZ{`n-5Cbwz;bFz?q&l8*PVnyP2wfD51T3dZW;o|5z{)f|bZj()A zCHcDj+j8U2uXEwXCyb4YjcvYk@5-T~``H%sg>o^sEe6JpysT8~QN5gMY>CrcU2SkO zL$hZ2Fvqi<@)vfj*IJyxw?4+5JO6lOQ(ff4Px0HwiXvEVNw1l%@@(KMFOzF3DQj6i z>vJSs2y0@J+LFtzF?eD&UQ5;dPLskgd!g~T%(G^mk7~+JpN|}$vPrVL_(Xf=r3e@5 zM&QYjSeAX!Hbai0u_~FKn~Y@6WrmtnBs84}YpkEClHXv^ZPF^#a4upKeZn_w)11$% z)!jv2eL1=5)^uxrO_=R^#YKKjH_`8IjPS6!E4x(7NUoi+E-P3kW*#)qCS-0Q<8WI4 zu-eVFUGLa}f7+hwun-=IDwYh^e02K>*GO*aZHuk4MIT)Z%@2N4u|FwPsmrbW9^pGSZHqQWmagyO>*El7QNC)T%2z2>8d7i zUYl8+ly=vGy}{Vy;nk(>D!p1Ig*7@%fl7+R;GfS0@sjV}bns4PA2|4$cp_r^dUi$! zR+&pj`3z3yZrja6z}~Zy>u2J+6p;SRM|5`l^QhqWG~TL@%i^Pgyo>x3I?~T=u5Ml~ zEZ93^ezYbtng!Fw+_r7p?@qPg-pb6z=JFNae&8FwpFVPax5zF*V~*(VQ=$IirK=P( zs=V%D8g-TR_N`Q!RCeP}<)7UyFw3a4CE?+WXY-myC;3lT4erI&zei=e1|J++(abAr z9ww}z8sGQo)Rx<i&pQ~>p0+96F`u02w>MW9!uy#~WT6BaytO$Aec^L`!59;Qdv2i^V5yR#oXg_pGvt z7C$`weFtWA$JBV2+U%_uN45IQAV#OQ0!(!4>n~=8H}y>YoPxH6+X&X~|h-3%h)d-_MZ5 zyT?j&cgVZw*MeEo58WdiPlpGOdHnRzP(n?o>-A_#)pI0o)lL3%H;y}K;OL=s7Ps49 z8LTL1xYhpHK&q%S-|e(N#^=$`$tQ-obo473R>B?{6N>ah=VqrD&bF}KS`XKsz%JO_ zmqH}{6S2<*Z_r|F;3olSH2fmqPmZaEl<6ZKouf>^Fj@BR1Ma^-DZ~VzlXGycxW6&< za_y$y-kIGNgI_0HRzsoqelg&03h^Dh4v)lO92^K(2Mp;Z9x{VSzjJ695niFnfviY7}fHlP$~f^6ufae_oWc?0gNR5yCq3x z4$03JJ1@K3fZG)6;ffigIYkQO5D%=UmmAs>k9Q_rHS?!AB+|%?+qH7oB)(!j3CR5dsrC&P(Ct@%ZQzbV9P>Xcz_SPyqHt4N?N z^^vH>09pplMaB`2&O9I->6azs2DPoWcUuNcZ3bJt;PbgJg=lU>nd5;&d)N^up!z@Z zPY;00UqDDAQCIiPk8-7i+FL2ppafQ>fmO)bm08Y%qR6Rh-jaD9uFe%%Mec4nyLmXC zcpO>XNI~GTdb;v1bSzG|9glQU;@6i#T0z{k*;h*?GV+jO2r9QBcX>+p$^zc=XDmEkPz?eY|k9WknqMhKD1k#fv*qiwMN zJfcEw@#my4nI70S|P62eu%*YTZc%=;REQhd$$g zrCj~x0mrhM0DTV7$S_8Kq=JSZ@N~rbc%W_Z1cEIdYfr&)KZ#DJQqTpAisWqP8A^s+k3hGBkeHG^9_BZL5eTFGL!-IYYsRT=>sM4 z1&AJ4XM41V7hDG9jq~{Dp8G#@1!<&aJy1*c0_56PkZWQHQ^U3`0H&(k>Q}Nk#(@h~ zf`5^DEkTMZcODT}n4UED2^?^3EqUhgmZQ$3sOKU-qSMxZU(2CWh$5<=tWFgq)N>I)`S*80U@up=mUWC4l?XfI#^MOe*l z?`{OK+X5+mE20K^PXD0*isFz-i*0X4dj@`6a61bT2SM4XlU=9 zg>ID%*J;7PJNKm!W8(fx8C2z7fK1Ih6jX*4Dg&d=+?PV^Po%vdI~+x2c1+CPwFC0) zb`5fAvOSe{_`I^xz`|j(A#@8yC<5suyIk7wG$_p0KC|>5h^_O! ztW}A<1v&2(OmdOETeX6AI%VA%Xb`>TJGkgAG_TxT+N7#%bU?7kH zQH1P!$KTLi3N32%!T9kOV`y$J2l6I&Y^J>=8ic9#D!(ZqNQ=p`C={%%&3!4v6@CAI zLGxRrw6`JbHjGi^rb#M zMKA=WLr+JRv3)5$na2J@V3LioFvXik#FCVu;icWqnV<~2U@S)}!##Wpl!0@B)rN(n zY}P~}PccNVE<`U<%Bl)bG#6?C>ewp+<0Q6(~gH=oE;WJ zX({}Bt=Wq@@WprNkm7T#MoO9|RB2&5Vx391osxN)6cGYB2S?Mu>f*mbEpZnuWM^;& zhSFkF`)E*C7l66{a!0f_EnpXnt0OE1V9DJ<3Xj#LEAn-r&t*XMXb5ebGloEUVy$)D-JccACvD`w@0RAi6r!;? zZKYs+-JCJteLJ)_6f)0B)T~c|PcQ_IFmzDZrI`Cthz}0YR*VZoB*~=av)l5A%~GxW#?PX45gn`sD`SEE+zNi0qlP$(!<>g zLjWhvSByl7s^W7fMg)pcM${}2_kR@QjCYu~DATgQv?*{R2gnIz21#_GttdC>iyjm= z(|T%_H`qh=aNvRLkElj3fi^ZlibDD@d3&cb55o8gjI_v}crJi8ER5n90(5o?a0S+8 zvqJF4B&3kN2%F=M(Fpf*`FB4LjSOQN!mHrL%;9#Z*htR9V5TT`VjV89Moh|9MXmWwIe7kRa2ynf@W@LZxPoarBHxZx!uQ|xV_zhYV z2Q4azFk0^xO>n~RO(+U@{!(s>b71=;upKEfA~I;h&$q7?$XXs=3WmRfIVH?q=Drl7 z<9%AmI5(I}V4R^rU7g5`C-tPNTNVuY(33VoF`MQJAPqoAau+DV6YJ}_kQi__L>;z= zP0CU{`GR6lJ~gZjmOv@2O9zY^Nsq6xLMGmTuvDahD$*HCX)os}*E-gpAXge8Dnt;- zh<0j70^W<_hEq>pRX`-n(Zi(3b@Hpv)R-O^TLKnsL)qA$I+xEZhQyi(LxwQI*pwM+ zV4r_3`1!Nbk=p+0vHh+vt4H00wnv_)7+_!^o9a=WdqrIhS+*2RtA=!jJSgyEF=d!N zj`TY2pGJ{{b>B75-Swb`gkimGEuxw#+>|*u*iHNARVoU!3*`#pVeBvz!fXg>)!y}# zX-*Wa>WREX2!a012Em7n6n1IK9A`J2+rm8z zbsI)PQ#NuCQIj%gp6TN=9hb4aFhAG=VTKGbtS&i-wn@P#-zHHFpv6ENASa~b`U?-dWn(>M1*XKUg@p&+x&eJR9hzXgC4#rv?`5%)5v zyf-u;vYYHXwg77Z#jc-vhs7U=H(@Z^HD?>Cx|U%J5&wUgUaZ4X^b9&?ES!r#iWd2( z1xlnrn=_uKvtofHVhr^`Hl6N;zsWQR@pt;9mox%?-jJL7oIxYF2C|# z7%Z0s%aNYef+ZT{0o%EtkeK@y<T~SqK+Ma>ves7JCb#38|X``YCcK zNp+I`y?|FtQ>Y#tR1Z|*+?PU>{y~vOS(F=j zX3K=Y|I7%jhfJU1a7qW+a$cS|XPhSvi+GNUTu?W6)k%p%PqwxD_a4w+hQUz10MOPE zYwJX5;oJSacCseS{^KF6kdp>WIPEf@eRi0?juUR-Z#P||yWR0s^rgS|H8t7)~)k1e?6xCr}s!S@~3fs)AFPip#Aw!?w?3X zZ?67EX2G-H^Lg<13#@IPwSJe&b4&$Z>PG$S!cEhWtIT z-@G!Fd3ofoi~VUNX(St~epQd z!%?_v?brQ&V8wREvZxgxx0=WNq)oJZ=|smbtK}xIdU}l4RgQHIdmB`p#QyZ X&QO=M=mGK!{>y_iS?PW-FGKwg0hu`% literal 0 HcmV?d00001 diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO b/EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO new file mode 100644 index 000000000..8499b0025 --- /dev/null +++ b/EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO @@ -0,0 +1,22 @@ +Metadata-Version: 2.1 +Name: paddle-extension +Version: 1.0.0b0 +Summary: Metrics library for paddle, porting from torch metrics. +Home-page: UNKNOWN +Author: Mingming Sun +Author-email: sunmingming01@baidu.com +License: Apache +Keywords: Deep Learning,Paddlepaddle +Platform: UNKNOWN +Description-Content-Type: text/markdown + +# Paddle Metrics + +Metrics library for paddle, porting from torch metrics +## Install + +pip install http://public.bcc-bdbl.baidu.com:8000/Package/paddle_extension-1.0.0b0-py3-none-any.whl + +## Document + + diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt b/EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt new file mode 100644 index 000000000..d359b6fb1 --- /dev/null +++ b/EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt @@ -0,0 +1,152 @@ +README.md +setup.py +paddle_extension.egg-info/PKG-INFO +paddle_extension.egg-info/SOURCES.txt +paddle_extension.egg-info/dependency_links.txt +paddle_extension.egg-info/top_level.txt +paddlemetrics/__about__.py +paddlemetrics/__init__.py +paddlemetrics/aggregation.py +paddlemetrics/collections.py +paddlemetrics/metric.py +paddlemetrics/setup_tools.py +paddlemetrics/audio/__init__.py +paddlemetrics/audio/pesq.py +paddlemetrics/audio/pit.py +paddlemetrics/audio/si_sdr.py +paddlemetrics/audio/si_snr.py +paddlemetrics/audio/snr.py +paddlemetrics/audio/stoi.py +paddlemetrics/classification/__init__.py +paddlemetrics/classification/accuracy.py +paddlemetrics/classification/auc.py +paddlemetrics/classification/auroc.py +paddlemetrics/classification/average_precision.py +paddlemetrics/classification/binned_precision_recall.py +paddlemetrics/classification/calibration_error.py +paddlemetrics/classification/cohen_kappa.py +paddlemetrics/classification/confusion_matrix.py +paddlemetrics/classification/f_beta.py +paddlemetrics/classification/hamming_distance.py +paddlemetrics/classification/hinge.py +paddlemetrics/classification/iou.py +paddlemetrics/classification/kl_divergence.py +paddlemetrics/classification/matthews_corrcoef.py +paddlemetrics/classification/precision_recall.py +paddlemetrics/classification/precision_recall_curve.py +paddlemetrics/classification/roc.py +paddlemetrics/classification/specificity.py +paddlemetrics/classification/stat_scores.py +paddlemetrics/functional/__init__.py +paddlemetrics/functional/self_supervised.py +paddlemetrics/functional/audio/__init__.py +paddlemetrics/functional/audio/pesq.py +paddlemetrics/functional/audio/pit.py +paddlemetrics/functional/audio/si_sdr.py +paddlemetrics/functional/audio/si_snr.py +paddlemetrics/functional/audio/snr.py +paddlemetrics/functional/audio/stoi.py +paddlemetrics/functional/classification/__init__.py +paddlemetrics/functional/classification/accuracy.py +paddlemetrics/functional/classification/auc.py +paddlemetrics/functional/classification/auroc.py +paddlemetrics/functional/classification/average_precision.py +paddlemetrics/functional/classification/calibration_error.py +paddlemetrics/functional/classification/cohen_kappa.py +paddlemetrics/functional/classification/confusion_matrix.py +paddlemetrics/functional/classification/dice.py +paddlemetrics/functional/classification/f_beta.py +paddlemetrics/functional/classification/hamming_distance.py +paddlemetrics/functional/classification/hinge.py +paddlemetrics/functional/classification/iou.py +paddlemetrics/functional/classification/kl_divergence.py +paddlemetrics/functional/classification/matthews_corrcoef.py +paddlemetrics/functional/classification/precision_recall.py +paddlemetrics/functional/classification/precision_recall_curve.py +paddlemetrics/functional/classification/roc.py +paddlemetrics/functional/classification/specificity.py +paddlemetrics/functional/classification/stat_scores.py +paddlemetrics/functional/image/__init__.py +paddlemetrics/functional/image/gradients.py +paddlemetrics/functional/image/psnr.py +paddlemetrics/functional/image/ssim.py +paddlemetrics/functional/pairwise/__init__.py +paddlemetrics/functional/pairwise/cosine.py +paddlemetrics/functional/pairwise/euclidean.py +paddlemetrics/functional/pairwise/helpers.py +paddlemetrics/functional/pairwise/linear.py +paddlemetrics/functional/pairwise/manhatten.py +paddlemetrics/functional/regression/__init__.py +paddlemetrics/functional/regression/cosine_similarity.py +paddlemetrics/functional/regression/explained_variance.py +paddlemetrics/functional/regression/mean_absolute_error.py +paddlemetrics/functional/regression/mean_absolute_percentage_error.py +paddlemetrics/functional/regression/mean_squared_error.py +paddlemetrics/functional/regression/mean_squared_log_error.py +paddlemetrics/functional/regression/pearson.py +paddlemetrics/functional/regression/r2.py +paddlemetrics/functional/regression/spearman.py +paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py +paddlemetrics/functional/regression/tweedie_deviance.py +paddlemetrics/functional/retrieval/__init__.py +paddlemetrics/functional/retrieval/average_precision.py +paddlemetrics/functional/retrieval/fall_out.py +paddlemetrics/functional/retrieval/hit_rate.py +paddlemetrics/functional/retrieval/ndcg.py +paddlemetrics/functional/retrieval/precision.py +paddlemetrics/functional/retrieval/r_precision.py +paddlemetrics/functional/retrieval/recall.py +paddlemetrics/functional/retrieval/reciprocal_rank.py +paddlemetrics/functional/text/__init__.py +paddlemetrics/functional/text/bert.py +paddlemetrics/functional/text/bleu.py +paddlemetrics/functional/text/rouge.py +paddlemetrics/functional/text/sacre_bleu.py +paddlemetrics/functional/text/wer.py +paddlemetrics/image/__init__.py +paddlemetrics/image/fid.py +paddlemetrics/image/inception.py +paddlemetrics/image/kid.py +paddlemetrics/image/lpip_similarity.py +paddlemetrics/image/psnr.py +paddlemetrics/image/ssim.py +paddlemetrics/regression/__init__.py +paddlemetrics/regression/cosine_similarity.py +paddlemetrics/regression/explained_variance.py +paddlemetrics/regression/mean_absolute_error.py +paddlemetrics/regression/mean_absolute_percentage_error.py +paddlemetrics/regression/mean_squared_error.py +paddlemetrics/regression/mean_squared_log_error.py +paddlemetrics/regression/pearson.py +paddlemetrics/regression/r2.py +paddlemetrics/regression/spearman.py +paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py +paddlemetrics/regression/tweedie_deviance.py +paddlemetrics/retrieval/__init__.py +paddlemetrics/retrieval/mean_average_precision.py +paddlemetrics/retrieval/mean_reciprocal_rank.py +paddlemetrics/retrieval/retrieval_fallout.py +paddlemetrics/retrieval/retrieval_hit_rate.py +paddlemetrics/retrieval/retrieval_metric.py +paddlemetrics/retrieval/retrieval_ndcg.py +paddlemetrics/retrieval/retrieval_precision.py +paddlemetrics/retrieval/retrieval_r_precision.py +paddlemetrics/retrieval/retrieval_recall.py +paddlemetrics/text/__init__.py +paddlemetrics/text/bert.py +paddlemetrics/text/bleu.py +paddlemetrics/text/rouge.py +paddlemetrics/text/sacre_bleu.py +paddlemetrics/text/wer.py +paddlemetrics/utilities/__init__.py +paddlemetrics/utilities/checks.py +paddlemetrics/utilities/data.py +paddlemetrics/utilities/distributed.py +paddlemetrics/utilities/enums.py +paddlemetrics/utilities/exceptions.py +paddlemetrics/utilities/imports.py +paddlemetrics/utilities/prints.py +paddlemetrics/wrappers/__init__.py +paddlemetrics/wrappers/bootstrapping.py +paddlemetrics/wrappers/multioutput.py +paddlemetrics/wrappers/tracker.py \ No newline at end of file diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt b/EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt b/EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt new file mode 100644 index 000000000..b722df99a --- /dev/null +++ b/EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt @@ -0,0 +1 @@ +paddlemetrics diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO b/EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO new file mode 100644 index 000000000..b6b4be7d9 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO @@ -0,0 +1,22 @@ +Metadata-Version: 2.1 +Name: paddlemetrics +Version: 1.0.0b0 +Summary: Metrics library for paddle, porting from torch metrics. +Home-page: UNKNOWN +Author: Mingming Sun +Author-email: sunmingming01@baidu.com +License: Apache +Keywords: Deep Learning,Paddlepaddle +Platform: UNKNOWN +Description-Content-Type: text/markdown + +# Paddle Metrics + +Metrics library for paddle, porting from torch metrics +## Install + +pip install http://public.bcc-bdbl.baidu.com:8000/Package/paddle_extension-1.0.0b0-py3-none-any.whl + +## Document + + diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt b/EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt new file mode 100644 index 000000000..4850ceca3 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt @@ -0,0 +1,152 @@ +README.md +setup.py +paddlemetrics/__about__.py +paddlemetrics/__init__.py +paddlemetrics/aggregation.py +paddlemetrics/collections.py +paddlemetrics/metric.py +paddlemetrics/setup_tools.py +paddlemetrics.egg-info/PKG-INFO +paddlemetrics.egg-info/SOURCES.txt +paddlemetrics.egg-info/dependency_links.txt +paddlemetrics.egg-info/top_level.txt +paddlemetrics/audio/__init__.py +paddlemetrics/audio/pesq.py +paddlemetrics/audio/pit.py +paddlemetrics/audio/si_sdr.py +paddlemetrics/audio/si_snr.py +paddlemetrics/audio/snr.py +paddlemetrics/audio/stoi.py +paddlemetrics/classification/__init__.py +paddlemetrics/classification/accuracy.py +paddlemetrics/classification/auc.py +paddlemetrics/classification/auroc.py +paddlemetrics/classification/average_precision.py +paddlemetrics/classification/binned_precision_recall.py +paddlemetrics/classification/calibration_error.py +paddlemetrics/classification/cohen_kappa.py +paddlemetrics/classification/confusion_matrix.py +paddlemetrics/classification/f_beta.py +paddlemetrics/classification/hamming_distance.py +paddlemetrics/classification/hinge.py +paddlemetrics/classification/iou.py +paddlemetrics/classification/kl_divergence.py +paddlemetrics/classification/matthews_corrcoef.py +paddlemetrics/classification/precision_recall.py +paddlemetrics/classification/precision_recall_curve.py +paddlemetrics/classification/roc.py +paddlemetrics/classification/specificity.py +paddlemetrics/classification/stat_scores.py +paddlemetrics/functional/__init__.py +paddlemetrics/functional/self_supervised.py +paddlemetrics/functional/audio/__init__.py +paddlemetrics/functional/audio/pesq.py +paddlemetrics/functional/audio/pit.py +paddlemetrics/functional/audio/si_sdr.py +paddlemetrics/functional/audio/si_snr.py +paddlemetrics/functional/audio/snr.py +paddlemetrics/functional/audio/stoi.py +paddlemetrics/functional/classification/__init__.py +paddlemetrics/functional/classification/accuracy.py +paddlemetrics/functional/classification/auc.py +paddlemetrics/functional/classification/auroc.py +paddlemetrics/functional/classification/average_precision.py +paddlemetrics/functional/classification/calibration_error.py +paddlemetrics/functional/classification/cohen_kappa.py +paddlemetrics/functional/classification/confusion_matrix.py +paddlemetrics/functional/classification/dice.py +paddlemetrics/functional/classification/f_beta.py +paddlemetrics/functional/classification/hamming_distance.py +paddlemetrics/functional/classification/hinge.py +paddlemetrics/functional/classification/iou.py +paddlemetrics/functional/classification/kl_divergence.py +paddlemetrics/functional/classification/matthews_corrcoef.py +paddlemetrics/functional/classification/precision_recall.py +paddlemetrics/functional/classification/precision_recall_curve.py +paddlemetrics/functional/classification/roc.py +paddlemetrics/functional/classification/specificity.py +paddlemetrics/functional/classification/stat_scores.py +paddlemetrics/functional/image/__init__.py +paddlemetrics/functional/image/gradients.py +paddlemetrics/functional/image/psnr.py +paddlemetrics/functional/image/ssim.py +paddlemetrics/functional/pairwise/__init__.py +paddlemetrics/functional/pairwise/cosine.py +paddlemetrics/functional/pairwise/euclidean.py +paddlemetrics/functional/pairwise/helpers.py +paddlemetrics/functional/pairwise/linear.py +paddlemetrics/functional/pairwise/manhatten.py +paddlemetrics/functional/regression/__init__.py +paddlemetrics/functional/regression/cosine_similarity.py +paddlemetrics/functional/regression/explained_variance.py +paddlemetrics/functional/regression/mean_absolute_error.py +paddlemetrics/functional/regression/mean_absolute_percentage_error.py +paddlemetrics/functional/regression/mean_squared_error.py +paddlemetrics/functional/regression/mean_squared_log_error.py +paddlemetrics/functional/regression/pearson.py +paddlemetrics/functional/regression/r2.py +paddlemetrics/functional/regression/spearman.py +paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py +paddlemetrics/functional/regression/tweedie_deviance.py +paddlemetrics/functional/retrieval/__init__.py +paddlemetrics/functional/retrieval/average_precision.py +paddlemetrics/functional/retrieval/fall_out.py +paddlemetrics/functional/retrieval/hit_rate.py +paddlemetrics/functional/retrieval/ndcg.py +paddlemetrics/functional/retrieval/precision.py +paddlemetrics/functional/retrieval/r_precision.py +paddlemetrics/functional/retrieval/recall.py +paddlemetrics/functional/retrieval/reciprocal_rank.py +paddlemetrics/functional/text/__init__.py +paddlemetrics/functional/text/bert.py +paddlemetrics/functional/text/bleu.py +paddlemetrics/functional/text/rouge.py +paddlemetrics/functional/text/sacre_bleu.py +paddlemetrics/functional/text/wer.py +paddlemetrics/image/__init__.py +paddlemetrics/image/fid.py +paddlemetrics/image/inception.py +paddlemetrics/image/kid.py +paddlemetrics/image/lpip_similarity.py +paddlemetrics/image/psnr.py +paddlemetrics/image/ssim.py +paddlemetrics/regression/__init__.py +paddlemetrics/regression/cosine_similarity.py +paddlemetrics/regression/explained_variance.py +paddlemetrics/regression/mean_absolute_error.py +paddlemetrics/regression/mean_absolute_percentage_error.py +paddlemetrics/regression/mean_squared_error.py +paddlemetrics/regression/mean_squared_log_error.py +paddlemetrics/regression/pearson.py +paddlemetrics/regression/r2.py +paddlemetrics/regression/spearman.py +paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py +paddlemetrics/regression/tweedie_deviance.py +paddlemetrics/retrieval/__init__.py +paddlemetrics/retrieval/mean_average_precision.py +paddlemetrics/retrieval/mean_reciprocal_rank.py +paddlemetrics/retrieval/retrieval_fallout.py +paddlemetrics/retrieval/retrieval_hit_rate.py +paddlemetrics/retrieval/retrieval_metric.py +paddlemetrics/retrieval/retrieval_ndcg.py +paddlemetrics/retrieval/retrieval_precision.py +paddlemetrics/retrieval/retrieval_r_precision.py +paddlemetrics/retrieval/retrieval_recall.py +paddlemetrics/text/__init__.py +paddlemetrics/text/bert.py +paddlemetrics/text/bleu.py +paddlemetrics/text/rouge.py +paddlemetrics/text/sacre_bleu.py +paddlemetrics/text/wer.py +paddlemetrics/utilities/__init__.py +paddlemetrics/utilities/checks.py +paddlemetrics/utilities/data.py +paddlemetrics/utilities/distributed.py +paddlemetrics/utilities/enums.py +paddlemetrics/utilities/exceptions.py +paddlemetrics/utilities/imports.py +paddlemetrics/utilities/prints.py +paddlemetrics/wrappers/__init__.py +paddlemetrics/wrappers/bootstrapping.py +paddlemetrics/wrappers/multioutput.py +paddlemetrics/wrappers/tracker.py \ No newline at end of file diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt b/EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt b/EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt new file mode 100644 index 000000000..b722df99a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt @@ -0,0 +1 @@ +paddlemetrics diff --git a/EE/paddlemetric/src/paddlemetrics/__about__.py b/EE/paddlemetric/src/paddlemetrics/__about__.py new file mode 100644 index 000000000..53a9cfa4f --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/__about__.py @@ -0,0 +1,27 @@ +__version__ = "0.6.0dev" +__author__ = "PyTorchLightning et al." +__author_email__ = "name@pytorchlightning.ai" +__license__ = "Apache-2.0" +__copyright__ = f"Copyright (c) 2020-2021, {__author__}." +__homepage__ = "https://github.com/PyTorchLightning/metrics" +__docs__ = "PyTorch native Metrics" +__docs_url__ = "https://paddlemetrics.readthedocs.io/en/stable/" +__long_doc__ = """ +paddlemetrics is a metrics API created for easy metric development and usage in both PyTorch and +[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of +Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics +implemented without having to install Pytorch Lightning (even though we would love for you to try it out). +We currently have around 25+ metrics implemented and we continuously is adding more metrics, both within +already covered domains (classification, regression ect.) but also new domains (object detection ect.). +We make sure that all our metrics are rigorously tested such that you can trust them. +""" + +__all__ = [ + "__author__", + "__author_email__", + "__copyright__", + "__docs__", + "__homepage__", + "__license__", + "__version__", +] diff --git a/EE/paddlemetric/src/paddlemetrics/__init__.py b/EE/paddlemetric/src/paddlemetrics/__init__.py new file mode 100644 index 000000000..ea557086b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/__init__.py @@ -0,0 +1,143 @@ +r"""Root package info.""" +import logging as __logging +import os +import sys + +from paddlemetrics.__about__ import * # noqa: F401, F403 + +_logger = __logging.getLogger("paddlemetrics") +_logger.addHandler(__logging.StreamHandler()) +_logger.setLevel(__logging.INFO) + +_PACKAGE_ROOT = os.path.dirname(__file__) +_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT) + +from paddlemetrics import functional # noqa: E402 +from paddlemetrics.aggregation import CatMetric, MaxMetric, MeanMetric, MinMetric, SumMetric # noqa: E402 +from paddlemetrics.audio import PESQ, PIT, SI_SDR, SI_SNR, SNR, STOI # noqa: E402 +from paddlemetrics.classification import ( # noqa: E402 + AUC, + AUROC, + F1, + ROC, + Accuracy, + AveragePrecision, + BinnedAveragePrecision, + BinnedPrecisionRecallCurve, + BinnedRecallAtFixedPrecision, + CalibrationError, + CohenKappa, + ConfusionMatrix, + FBeta, + HammingDistance, + Hinge, + IoU, + KLDivergence, + MatthewsCorrcoef, + Precision, + PrecisionRecallCurve, + Recall, + Specificity, + StatScores, +) +from paddlemetrics.collections import MetricCollection # noqa: E402 +#from paddlemetrics.image import FID, IS, KID, LPIPS, PSNR, SSIM # noqa: E402 +from paddlemetrics.metric import Metric # noqa: E402 +from paddlemetrics.regression import ( # noqa: E402 + CosineSimilarity, + ExplainedVariance, + MeanAbsoluteError, + MeanAbsolutePercentageError, + MeanSquaredError, + MeanSquaredLogError, + PearsonCorrcoef, + R2Score, + SpearmanCorrcoef, + SymmetricMeanAbsolutePercentageError, + TweedieDevianceScore, +) +from paddlemetrics.retrieval import ( # noqa: E402 + RetrievalFallOut, + RetrievalHitRate, + RetrievalMAP, + RetrievalMRR, + RetrievalNormalizedDCG, + RetrievalPrecision, + RetrievalRecall, + RetrievalRPrecision, +) +from paddlemetrics.text import WER, BLEUScore, ROUGEScore, SacreBLEUScore # noqa: E402 BERTScore, +from paddlemetrics.wrappers import BootStrapper, MetricTracker, MultioutputWrapper # noqa: E402 + +__all__ = [ + "functional", + "Accuracy", + "AUC", + "AUROC", + "AveragePrecision", + "BinnedAveragePrecision", + "BinnedPrecisionRecallCurve", + "BinnedRecallAtFixedPrecision", +# "BERTScore", + "BLEUScore", + "BootStrapper", + "CalibrationError", + "CatMetric", + "CohenKappa", + "ConfusionMatrix", + "CosineSimilarity", + "TweedieDevianceScore", + "ExplainedVariance", + "F1", + "FBeta", +# "FID", + "HammingDistance", + "Hinge", + "IoU", +# "IS", +# "KID", + "KLDivergence", +# "LPIPS", + "MatthewsCorrcoef", + "MaxMetric", + "MeanAbsoluteError", + "MeanAbsolutePercentageError", + "MeanMetric", + "MeanSquaredError", + "MeanSquaredLogError", + "Metric", + "MetricCollection", + "MetricTracker", + "MinMetric", + "MultioutputWrapper", + "PearsonCorrcoef", + "PESQ", + "PIT", + "Precision", + "PrecisionRecallCurve", +# "PSNR", + "R2Score", + "Recall", + "RetrievalFallOut", + "RetrievalHitRate", + "RetrievalMAP", + "RetrievalMRR", + "RetrievalNormalizedDCG", + "RetrievalPrecision", + "RetrievalRecall", + "RetrievalRPrecision", + "ROC", + "ROUGEScore", + "SacreBLEUScore", + "SI_SDR", + "SI_SNR", + "SNR", + "SpearmanCorrcoef", + "Specificity", +# "SSIM", + "StatScores", + "STOI", + "SumMetric", + "SymmetricMeanAbsolutePercentageError", + "WER", +] diff --git a/EE/paddlemetric/src/paddlemetrics/aggregation.py b/EE/paddlemetric/src/paddlemetrics/aggregation.py new file mode 100644 index 000000000..a95c51c0e --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/aggregation.py @@ -0,0 +1,445 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Any, Callable, List, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class BaseAggregator(Metric): + """Base class for aggregation metrics. + + Args: + fn: string specifying the reduction function + default_value: default tensor value to use for the metric state + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + """ + + value: Tensor + is_differentiable = None + higher_is_better = None + + def __init__( + self, + fn: Union[Callable, str], + default_value: Union[Tensor, List], + nan_strategy: Union[str, float] = "error", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + allowed_nan_strategy = ("error", "warn", "ignore") + if nan_strategy not in allowed_nan_strategy and not isinstance(nan_strategy, float): + raise ValueError( + f"Arg `nan_strategy` should either be a float or one of {allowed_nan_strategy}" + f" but got {nan_strategy}." + ) + + self.nan_strategy = nan_strategy + self.add_state("value", default=default_value, dist_reduce_fx=fn) + + def _cast_and_nan_check_input(self, x: Union[float, Tensor]) -> Tensor: + """Converts input x to a tensor if not already and afterwards checks for nans that either give an error, + warning or just ignored.""" + if not isinstance(x, Tensor): + x = B.as_tensor(x, dtype=B.float32, device=self.device) + + nans = B.isnan(x) + if any(nans.flatten()): + if self.nan_strategy == "error": + raise RuntimeError("Encounted `nan` values in tensor") + if self.nan_strategy == "warn": + warnings.warn("Encounted `nan` values in tensor. Will be removed.", UserWarning) + x = x[~nans] + elif self.nan_strategy == "ignore": + x = x[~nans] + else: + x[nans] = self.nan_strategy + + return x.float() + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Overwrite in child class.""" + pass + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + return self.value.squeeze() if isinstance(self.value, Tensor) else self.value + + +class MaxMetric(BaseAggregator): + """Aggregate a stream of value into their maximum value. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import MaxMetric + >>> metric = MaxMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor(3.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "max", + -B.tensor(float("inf")), + nan_strategy, + compute_on_step, + dist_sync_on_step, + process_group, + dist_sync_fn, + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if any(value.flatten()): # make sure tensor not empty + self.value = B.max(self.value, B.max(value)) + + +class MinMetric(BaseAggregator): + """Aggregate a stream of value into their minimum value. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import MinMetric + >>> metric = MinMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor(1.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "min", + B.tensor(float("inf")), + nan_strategy, + compute_on_step, + dist_sync_on_step, + process_group, + dist_sync_fn, + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if any(value.flatten()): # make sure tensor not empty + self.value = B.min(self.value, B.min(value)) + + +class SumMetric(BaseAggregator): + """Aggregate a stream of value into their sum. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import SumMetric + >>> metric = SumMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor(6.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "sum", B.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + self.value += value.sum() + + +class CatMetric(BaseAggregator): + """Concatenate a stream of values. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import CatMetric + >>> metric = CatMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor([1., 2., 3.]) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__("cat", [], nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if any(value.flatten()): + self.value.append(value) + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + if isinstance(self.value, list) and self.value: + return dim_zero_cat(self.value) + return self.value + + +class MeanMetric(BaseAggregator): + """Aggregate a stream of value into their mean value. + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + compute_on_step: + Forward only calls ``update()`` and returns None if this is + set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. + When `None`, DDP will be used to perform the allgather. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from paddlemetrics import MeanMetric + >>> metric = MeanMetric() + >>> metric.update(1) + >>> metric.update(B.tensor([2, 3])) + >>> metric.compute() + tensor([2.]) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + "sum", B.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn + ) + self.add_state("weight", default=B.zeros(1), dist_reduce_fx="sum") + + def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + weight: Either a float or tensor containing weights for calculating + the average. Shape of weight should be able to broadcast with + the shape of `value`. Default to `1.0` corresponding to simple + harmonic average. + """ + value = self._cast_and_nan_check_input(value) + weight = self._cast_and_nan_check_input(weight) + + # broadcast weight to values shape + if not hasattr(B, "broadcast_to"): + if weight.shape == (): + weight = B.ones_like(value) * weight + if weight.shape != value.shape: + raise ValueError("Broadcasting not supported on PyTorch <1.8") + else: + weight = B.broadcast_to(weight, value.shape) + + self.value += (value * weight).sum() + self.weight += weight.sum() + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + return self.value / self.weight diff --git a/EE/paddlemetric/src/paddlemetrics/audio/__init__.py b/EE/paddlemetric/src/paddlemetrics/audio/__init__.py new file mode 100644 index 000000000..efd0b451e --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/audio/__init__.py @@ -0,0 +1,19 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.audio.pesq import PESQ # noqa: F401 +from paddlemetrics.audio.pit import PIT # noqa: F401 +from paddlemetrics.audio.si_sdr import SI_SDR # noqa: F401 +from paddlemetrics.audio.si_snr import SI_SNR # noqa: F401 +from paddlemetrics.audio.snr import SNR # noqa: F401 +from paddlemetrics.audio.stoi import STOI # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/audio/pesq.py b/EE/paddlemetric/src/paddlemetrics/audio/pesq.py new file mode 100644 index 000000000..d45fab53d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/audio/pesq.py @@ -0,0 +1,130 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.pesq import pesq +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _PESQ_AVAILABLE + + +class PESQ(Metric): + """PESQ (Perceptual Evaluation of Speech Quality) + + This is a wrapper for the pesq package [1]. . Note that input will be moved to `cpu` + to perform the metric calculation. + + .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pesq`` + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + fs: + sampling frequency, should be 16000 or 8000 (Hz) + mode: + 'wb' (wide-band) or 'nb' (narrow-band) + keep_same_device: + whether to move the pesq value to the device of preds + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``peqs`` package is not installed + ValueError: + If ``fs`` is not either ``8000`` or ``16000`` + ValueError: + If ``mode`` is not either ``"wb"`` or ``"nb"`` + + Example: + >>> from paddlemetrics.audio import PESQ + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> nb_pesq = PESQ(8000, 'nb') + >>> nb_pesq(preds, target) + tensor(2.2076) + >>> wb_pesq = PESQ(16000, 'wb') + >>> wb_pesq(preds, target) + tensor(1.7359) + + References: + [1] https://github.com/ludlows/python-pesq + """ + + sum_pesq: Tensor + total: Tensor + is_differentiable = False + higher_is_better = True + + def __init__( + self, + fs: int, + mode: str, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if not _PESQ_AVAILABLE: + raise ValueError( + "PESQ metric requires that pesq is installed." + "Either install as `pip install paddlemetrics[audio]` or `pip install pesq`" + ) + if fs not in (8000, 16000): + raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}") + self.fs = fs + if mode not in ("wb", "nb"): + raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}") + self.mode = mode + + self.add_state("sum_pesq", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + pesq_batch = pesq(preds, target, self.fs, self.mode, False).to(self.sum_pesq.device) + + self.sum_pesq += pesq_batch.sum() + self.total += pesq_batch.numel() + + def compute(self) -> Tensor: + """Computes average PESQ.""" + return self.sum_pesq / self.total diff --git a/EE/paddlemetric/src/paddlemetrics/audio/pit.py b/EE/paddlemetric/src/paddlemetrics/audio/pit.py new file mode 100644 index 000000000..9d9dc7576 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/audio/pit.py @@ -0,0 +1,113 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Dict, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.pit import pit +from paddlemetrics.metric import Metric + + +class PIT(Metric): + """Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method. + + [1] in speech separation field in order to calculate audio metrics in a permutation invariant way. + + Forward accepts + + - ``preds``: ``shape [batch, spk, ...]`` + - ``target``: ``shape [batch, spk, ...]`` + + Args: + metric_func: + a metric function accept a batch of target and estimate, i.e. metric_func(preds[:, i, ...], + target[:, j, ...]), and returns a batch of metric tensors [batch] + eval_func: + the function to find the best permutation, can be 'min' or 'max', i.e. the smaller the better + or the larger the better. + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + kwargs: + additional args for metric_func + + Returns: + average PIT metric + + Example: + >>> import torchapi as B + >>> from paddlemetrics import PIT + >>> from paddlemetrics.functional import si_snr + >>> _ = B.manual_seed(42) + >>> preds = B.randn(3, 2, 5) # [batch, spk, time] + >>> target = B.randn(3, 2, 5) # [batch, spk, time] + >>> pit = PIT(si_snr, 'max') + >>> pit(preds, target) + tensor(-2.1065) + + Reference: + [1] D. Yu, M. Kolbaek, Z.-H. Tan, J. Jensen, Permutation invariant training of deep models for + speaker-independent multi-talker speech separation, in: 2017 IEEE Int. Conf. Acoust. Speech + Signal Process. ICASSP, IEEE, New Orleans, LA, 2017: pp. 241–245. https://doi.org/10.1109/ICASSP.2017.7952154. + """ + + is_differentiable = True + sum_pit_metric: Tensor + total: Tensor + + def __init__( + self, + metric_func: Callable, + eval_func: str = "max", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + **kwargs: Dict[str, Any], + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.metric_func = metric_func + self.eval_func = eval_func + self.kwargs = kwargs + + self.add_state("sum_pit_metric", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + pit_metric = pit(preds, target, self.metric_func, self.eval_func, **self.kwargs)[0] + + self.sum_pit_metric += pit_metric.sum() + self.total += pit_metric.numel() + + def compute(self) -> Tensor: + """Computes average PIT metric.""" + return self.sum_pit_metric / self.total diff --git a/EE/paddlemetric/src/paddlemetrics/audio/si_sdr.py b/EE/paddlemetric/src/paddlemetrics/audio/si_sdr.py new file mode 100644 index 000000000..f6a463780 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/audio/si_sdr.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.si_sdr import si_sdr +from paddlemetrics.metric import Metric + + +class SI_SDR(Metric): + """Scale-invariant signal-to-distortion ratio (SI-SDR). The SI-SDR value is in general considered an overall + measure of how good a source sound. + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + zero_mean: + if to zero mean target and preds or not + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + TypeError: + if target and preds have a different shape + + Returns: + average si-sdr value + + Example: + >>> import torchapi as B + >>> from paddlemetrics import SI_SDR + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_sdr = SI_SDR() + >>> si_sdr_val = si_sdr(preds, target) + >>> si_sdr_val + tensor(18.4030) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + """ + + is_differentiable = True + higher_is_better = True + sum_si_sdr: Tensor + total: Tensor + + def __init__( + self, + zero_mean: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.zero_mean = zero_mean + + self.add_state("sum_si_sdr", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + si_sdr_batch = si_sdr(preds=preds, target=target, zero_mean=self.zero_mean) + + self.sum_si_sdr += si_sdr_batch.sum() + self.total += si_sdr_batch.numel() + + def compute(self) -> Tensor: + """Computes average SI-SDR.""" + return self.sum_si_sdr / self.total diff --git a/EE/paddlemetric/src/paddlemetrics/audio/si_snr.py b/EE/paddlemetric/src/paddlemetrics/audio/si_snr.py new file mode 100644 index 000000000..31747a28d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/audio/si_snr.py @@ -0,0 +1,101 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.si_snr import si_snr +from paddlemetrics.metric import Metric + + +class SI_SNR(Metric): + """Scale-invariant signal-to-noise ratio (SI-SNR). + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + TypeError: + if target and preds have a different shape + + Returns: + average si-snr value + + Example: + >>> import torchapi as B + >>> from paddlemetrics import SI_SNR + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_snr = SI_SNR() + >>> si_snr_val = si_snr(preds, target) + >>> si_snr_val + tensor(15.0918) + + References: + [1] Y. Luo and N. Mesgarani, "TaSNet: Time-Domain Audio Separation Network for Real-Time, Single-Channel Speech + Separation," 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp. + 696-700, doi: 10.1109/ICASSP.2018.8462116. + """ + + is_differentiable = True + sum_si_snr: Tensor + total: Tensor + higher_is_better = True + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_si_snr", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + si_snr_batch = si_snr(preds=preds, target=target) + + self.sum_si_snr += si_snr_batch.sum() + self.total += si_snr_batch.numel() + + def compute(self) -> Tensor: + """Computes average SI-SNR.""" + return self.sum_si_snr / self.total diff --git a/EE/paddlemetric/src/paddlemetrics/audio/snr.py b/EE/paddlemetric/src/paddlemetrics/audio/snr.py new file mode 100644 index 000000000..683cb8bf3 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/audio/snr.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.snr import snr +from paddlemetrics.metric import Metric + + +class SNR(Metric): + r"""Signal-to-noise ratio (SNR_): + + .. math:: + \text{SNR} = \frac{P_{signal}}{P_{noise}} + + where :math:`P` denotes the power of each signal. The SNR metric compares the level + of the desired signal to the level of background noise. Therefore, a high value of + SNR means that the audio is clear. + + Forward accepts + + - ``preds``: ``shape [..., time]`` + - ``target``: ``shape [..., time]`` + + Args: + zero_mean: + if to zero mean target and preds or not + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + TypeError: + if target and preds have a different shape + + Returns: + average snr value + + Example: + >>> import torchapi as B + >>> from paddlemetrics import SNR + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> snr = SNR() + >>> snr_val = snr(preds, target) + >>> snr_val + tensor(16.1805) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + + """ + is_differentiable = True + sum_snr: Tensor + total: Tensor + + def __init__( + self, + zero_mean: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.zero_mean = zero_mean + + self.add_state("sum_snr", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + snr_batch = snr(preds=preds, target=target, zero_mean=self.zero_mean) + + self.sum_snr += snr_batch.sum() + self.total += snr_batch.numel() + + def compute(self) -> Tensor: + """Computes average SNR.""" + return self.sum_snr / self.total diff --git a/EE/paddlemetric/src/paddlemetrics/audio/stoi.py b/EE/paddlemetric/src/paddlemetrics/audio/stoi.py new file mode 100644 index 000000000..1c8cf3788 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/audio/stoi.py @@ -0,0 +1,133 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.audio.stoi import stoi +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _PYSTOI_AVAILABLE + + +class STOI(Metric): + r"""STOI (Short Term Objective Intelligibility, see [2,3]), a wrapper for the pystoi package [1]. + Note that input will be moved to `cpu` to perform the metric calculation. + + Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due + to additive noise, single/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations. + The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good + alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are + interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, + on speech intelligibility. Description taken from [Cees Taal's website](http://www.ceestaal.nl/code/). + + .. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pystoi`` + + Forward accepts + + - ``preds``: ``shape [...,time]`` + - ``target``: ``shape [...,time]`` + + Args: + fs: + sampling frequency (Hz) + extended: + whether to use the extended STOI described in [4] + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Returns: + average STOI value + + Raises: + ModuleNotFoundError: + If ``pystoi`` package is not installed + + Example: + >>> from paddlemetrics.audio import STOI + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> stoi = STOI(8000, False) + >>> stoi(preds, target) + tensor(-0.0100) + + References: + [1] https://github.com/mpariente/pystoi + + [2] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'A Short-Time Objective Intelligibility Measure for + Time-Frequency Weighted Noisy Speech', ICASSP 2010, Texas, Dallas. + + [3] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'An Algorithm for Intelligibility Prediction of + Time-Frequency Weighted Noisy Speech', IEEE Transactions on Audio, Speech, and Language Processing, 2011. + + [4] J. Jensen and C. H. Taal, 'An Algorithm for Predicting the Intelligibility of Speech Masked by Modulated + Noise Maskers', IEEE Transactions on Audio, Speech and Language Processing, 2016. + + """ + sum_stoi: Tensor + total: Tensor + is_differentiable = False + higher_is_better = True + + def __init__( + self, + fs: int, + extended: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if not _PYSTOI_AVAILABLE: + raise ModuleNotFoundError( + "STOI metric requires that pystoi is installed." + " Either install as `pip install paddlemetrics[audio]` or `pip install pystoi`" + ) + self.fs = fs + self.extended = extended + + self.add_state("sum_stoi", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + stoi_batch = stoi(preds, target, self.fs, self.extended, False).to(self.sum_stoi.device) + + self.sum_stoi += stoi_batch.sum() + self.total += stoi_batch.numel() + + def compute(self) -> Tensor: + """Computes average STOI.""" + return self.sum_stoi / self.total diff --git a/EE/paddlemetric/src/paddlemetrics/classification/__init__.py b/EE/paddlemetric/src/paddlemetrics/classification/__init__.py new file mode 100644 index 000000000..e928018b6 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/__init__.py @@ -0,0 +1,34 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.classification.accuracy import Accuracy # noqa: F401 +from paddlemetrics.classification.auc import AUC # noqa: F401 +from paddlemetrics.classification.auroc import AUROC # noqa: F401 +from paddlemetrics.classification.average_precision import AveragePrecision # noqa: F401 +from paddlemetrics.classification.binned_precision_recall import BinnedAveragePrecision # noqa: F401 +from paddlemetrics.classification.binned_precision_recall import BinnedPrecisionRecallCurve # noqa: F401 +from paddlemetrics.classification.binned_precision_recall import BinnedRecallAtFixedPrecision # noqa: F401 +from paddlemetrics.classification.calibration_error import CalibrationError # noqa: F401 +from paddlemetrics.classification.cohen_kappa import CohenKappa # noqa: F401 +from paddlemetrics.classification.confusion_matrix import ConfusionMatrix # noqa: F401 +from paddlemetrics.classification.f_beta import F1, FBeta # noqa: F401 +from paddlemetrics.classification.hamming_distance import HammingDistance # noqa: F401 +from paddlemetrics.classification.hinge import Hinge # noqa: F401 +from paddlemetrics.classification.iou import IoU # noqa: F401 +from paddlemetrics.classification.kl_divergence import KLDivergence # noqa: F401 +from paddlemetrics.classification.matthews_corrcoef import MatthewsCorrcoef # noqa: F401 +from paddlemetrics.classification.precision_recall import Precision, Recall # noqa: F401 +from paddlemetrics.classification.precision_recall_curve import PrecisionRecallCurve # noqa: F401 +from paddlemetrics.classification.roc import ROC # noqa: F401 +from paddlemetrics.classification.specificity import Specificity # noqa: F401 +from paddlemetrics.classification.stat_scores import StatScores # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/classification/accuracy.py b/EE/paddlemetric/src/paddlemetrics/classification/accuracy.py new file mode 100644 index 000000000..325a18d42 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/accuracy.py @@ -0,0 +1,276 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.accuracy import ( + _accuracy_compute, + _accuracy_update, + _check_subset_validity, + _mode, + _subset_accuracy_compute, + _subset_accuracy_update, +) +from paddlemetrics.utilities.enums import DataType + +from paddlemetrics.classification.stat_scores import StatScores # isort:skip + + +class Accuracy(StatScores): + r""" + Computes Accuracy_: + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + For multi-class and multi-dimensional multi-class data with probability or logits predictions, the + parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the + top-K highest probability or logit score items are considered to find the correct label. + + For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" + accuracy by default, which counts all labels or sub-samples separately. This can be + changed to subset accuracy (which requires all labels or sub-samples in the sample to + be correctly predicted) by setting ``subset_accuracy=True``. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + subset_accuracy: + Whether to compute subset accuracy for multi-label and multi-dimensional + multi-class inputs (has no effect for other input types). + + - For multi-label inputs, if the parameter is set to ``True``, then all labels for + each sample must be correctly predicted for the sample to count as correct. If it + is set to ``False``, then all labels are counted separately - this is equivalent to + flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). + + - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all + sub-sample (on the extra axis) must be correct for the sample to be counted as correct. + If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, + in the case of label predictions, to flattening the inputs beforehand (i.e. + ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter + still applies in both cases, if set. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``top_k`` is not an ``integer`` larger than ``0``. + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + ValueError: + If two different input modes are provided, eg. using ``multi-label`` with ``multi-class``. + ValueError: + If ``top_k`` parameter is set for ``multi-label`` inputs. + + Example: + >>> import paddleext.torchapi as B + >>> from paddlemetrics import Accuracy + >>> target = B.tensor([0, 1, 2, 3]) + >>> preds = B.tensor([0, 2, 1, 3]) + >>> accuracy = Accuracy() + >>> accuracy(preds, target) + tensor(0.5000) + + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> accuracy = Accuracy(top_k=2) + >>> accuracy(preds, target) + tensor(0.6667) + + """ + is_differentiable = False + correct: Tensor + total: Tensor + + def __init__( + self, + threshold: float = 0.5, + num_classes: Optional[int] = None, + average: str = "micro", + mdmc_average: Optional[str] = "global", + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + subset_accuracy: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("correct", default=tensor(0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): + raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") + + self.average = average + self.threshold = threshold + self.top_k = top_k + self.subset_accuracy = subset_accuracy + self.mode: DataType = None # type: ignore + self.multiclass = multiclass + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. See + :ref:`references/modules:input types` for more information on input + types. + + Args: + preds: Predictions from model (logits, probabilities, or labels) + target: Ground truth labels + """ + """ returns the mode of the data (binary, multi label, multi class, multi-dim multi class) """ + mode = _mode(preds, target, self.threshold, self.top_k, self.num_classes, self.multiclass) + + if not self.mode: + self.mode = mode + elif self.mode != mode: + raise ValueError(f"You can not use {mode} inputs with {self.mode} inputs.") + + if self.subset_accuracy and not _check_subset_validity(self.mode): + self.subset_accuracy = False + + if self.subset_accuracy: + correct, total = _subset_accuracy_update(preds, target, threshold=self.threshold, top_k=self.top_k) + self.correct += correct + self.total += total + else: + if not self.mode: + raise RuntimeError("You have to have determined mode.") + tp, fp, tn, fn = _accuracy_update( + preds, + target, + reduce=self.reduce, + mdmc_reduce=self.mdmc_reduce, + threshold=self.threshold, + num_classes=self.num_classes, + top_k=self.top_k, + multiclass=self.multiclass, + ignore_index=self.ignore_index, + mode=self.mode, + ) + + # Update states + if self.reduce != "samples" and self.mdmc_reduce != "samplewise": + self.tp += tp + self.fp += fp + self.tn += tn + self.fn += fn + else: + self.tp.append(tp) + self.fp.append(fp) + self.tn.append(tn) + self.fn.append(fn) + + def compute(self) -> Tensor: + """Computes accuracy based on inputs passed in to ``update`` previously.""" + if not self.mode: + raise RuntimeError("You have to have determined mode.") + if self.subset_accuracy: + return _subset_accuracy_compute(self.correct, self.total) + tp, fp, tn, fn = self._get_final_stats() + return _accuracy_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce, self.mode) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/auc.py b/EE/paddlemetric/src/paddlemetrics/classification/auc.py new file mode 100644 index 000000000..99b64048d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/auc.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.auc import _auc_compute, _auc_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class AUC(Metric): + r""" + Computes Area Under the Curve (AUC) using the trapezoidal rule + + Forward accepts two input tensors that should be 1D and have the same number + of elements + + Args: + reorder: AUC expects its first input to be sorted. If this is not the case, + setting this argument to ``True`` will use a stable sorting algorithm to + sort the input in descending order + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the ``allgather`` operation on the metric state. When ``None``, DDP + will be used to perform the ``allgather``. + """ + is_differentiable = False + x: List[Tensor] + y: List[Tensor] + + def __init__( + self, + reorder: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.reorder = reorder + + self.add_state("x", default=[], dist_reduce_fx="cat") + self.add_state("y", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `AUC` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model (probabilities, or labels) + target: Ground truth labels + """ + x, y = _auc_update(preds, target) + + self.x.append(x) + self.y.append(y) + + def compute(self) -> Tensor: + """Computes AUC based on inputs passed in to ``update`` previously.""" + x = dim_zero_cat(self.x) + y = dim_zero_cat(self.y) + return _auc_compute(x, y, reorder=self.reorder) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/auroc.py b/EE/paddlemetric/src/paddlemetrics/classification/auroc.py new file mode 100644 index 000000000..6236391de --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/auroc.py @@ -0,0 +1,186 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.auroc import _auroc_compute, _auroc_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.enums import DataType +from paddlemetrics.utilities.imports import _TORCH_LOWER_1_6 + + +class AUROC(Metric): + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). + Works for both binary, multilabel and multiclass problems. In the case of + multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + For non-binary input, if the ``preds`` and ``target`` tensor have the same + size the input will be interpretated as multilabel and if ``preds`` have one + dimension more than the ``target`` tensor the input will be interpretated as + multiclass. + + Args: + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + - ``'micro'`` computes metric globally. Only works for multilabel problems + - ``'macro'`` computes metric for each class and uniformly averages them + - ``'weighted'`` computes metric for each class and does a weighted-average, + where each class is weighted by their support (accounts for class imbalance) + - ``None`` computes and returns the metric per class + max_fpr: + If not ``None``, calculates standardized partial AUC over the + range [0, max_fpr]. Should be a float between 0 and 1. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``. + ValueError: + If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``. + RuntimeError: + If ``PyTorch version`` is ``below 1.6`` since max_fpr requires ``B.bucketize`` + which is not available below 1.6. + ValueError: + If the mode of data (binary, multi-label, multi-class) changes between batches. + + Example (binary case): + >>> from paddlemetrics import AUROC + >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = B.tensor([0, 0, 1, 1, 1]) + >>> auroc = AUROC(pos_label=1) + >>> auroc(preds, target) + tensor(0.5000) + + Example (multiclass case): + >>> preds = B.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = B.tensor([0, 1, 1, 2, 2]) + >>> auroc = AUROC(num_classes=3) + >>> auroc(preds, target) + tensor(0.7778) + + """ + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + max_fpr: Optional[float] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + self.average = average + self.max_fpr = max_fpr + + allowed_average = (None, "macro", "weighted", "micro") + if self.average not in allowed_average: + raise ValueError( + f"Argument `average` expected to be one of the following: {allowed_average} but got {average}" + ) + + if self.max_fpr is not None: + if not isinstance(max_fpr, float) or not 0 < max_fpr <= 1: + raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}") + + if _TORCH_LOWER_1_6: + raise RuntimeError( + "`max_fpr` argument requires `B.bucketize` which is not available below PyTorch version 1.6" + ) + + self.mode: DataType = None # type: ignore + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `AUROC` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model (probabilities, or labels) + target: Ground truth labels + """ + preds, target, mode = _auroc_update(preds, target) + + self.preds.append(preds) + self.target.append(target) + + if self.mode and self.mode != mode: + raise ValueError( + "The mode of data (binary, multi-label, multi-class) should be constant, but changed" + f" between batches from {self.mode} to {mode}" + ) + self.mode = mode + + def compute(self) -> Tensor: + """Computes AUROC based on inputs passed in to ``update`` previously.""" + if not self.mode: + raise RuntimeError("You have to have determined mode.") + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _auroc_compute( + preds, + target, + self.mode, + self.num_classes, + self.pos_label, + self.average, + self.max_fpr, + ) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/average_precision.py b/EE/paddlemetric/src/paddlemetrics/classification/average_precision.py new file mode 100644 index 000000000..0e37da588 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/average_precision.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.average_precision import ( + _average_precision_compute, + _average_precision_update, +) +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class AveragePrecision(Metric): + """Computes the average precision score, which summarises the precision recall curve into one number. Works for + both binary and multiclass problems. In the case of multiclass, the values will be calculated based on a one- + vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + defines the reduction that is applied in the case of multiclass and multilabel input. + Should be one of the following: + + - ``'macro'`` [default]: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be + used with multiclass input. + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support. + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example (binary case): + >>> from paddlemetrics import AveragePrecision + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> average_precision = AveragePrecision(pos_label=1) + >>> average_precision(pred, target) + tensor(1.) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision = AveragePrecision(num_classes=5, average=None) + >>> average_precision(pred, target) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + allowed_average = ("micro", "macro", "weighted", None) + if average not in allowed_average: + raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}") + self.average = average + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `AveragePrecision` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target, num_classes, pos_label = _average_precision_update( + preds, target, self.num_classes, self.pos_label, self.average + ) + self.preds.append(preds) + self.target.append(target) + self.num_classes = num_classes + self.pos_label = pos_label + + def compute(self) -> Union[Tensor, List[Tensor]]: + """Compute the average precision score. + + Returns: + tensor with average precision. If multiclass will return list + of such tensors, one for each class + """ + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + if not self.num_classes: + raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") + return _average_precision_compute(preds, target, self.num_classes, self.pos_label, self.average) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py b/EE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py new file mode 100644 index 000000000..ffc86ae69 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py @@ -0,0 +1,324 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.average_precision import _average_precision_compute_with_precision_recall +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import METRIC_EPS, to_onehot + + +def _recall_at_precision( + precision: Tensor, + recall: Tensor, + thresholds: Tensor, + min_precision: float, +) -> Tuple[Tensor, Tensor]: + try: + max_recall, _, best_threshold = max( + (r, p, t) for p, r, t in zip(precision, recall, thresholds) if p >= min_precision + ) + + except ValueError: + max_recall = B.tensor(0.0, device=recall.device, dtype=recall.dtype) + best_threshold = B.tensor(0) + + if max_recall == 0.0: + best_threshold = B.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype) + + return max_recall, best_threshold + + +class BinnedPrecisionRecallCurve(Metric): + """Computes precision-recall pairs for different thresholds. Works for both binary and multiclass problems. In + the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Computation is performed in constant-memory by computing precision and recall + for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. For binary, set to 1. + thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. + It is used for computation will lead to more detailed curve and accurate estimates, + but will be slower and consume more memory. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``thresholds`` is not a int, list or tensor + + Example (binary case): + >>> from paddlemetrics import BinnedPrecisionRecallCurve + >>> pred = B.tensor([0, 0.1, 0.8, 0.4]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> pr_curve = BinnedPrecisionRecallCurve(num_classes=1, thresholds=5) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision + tensor([0.5000, 0.5000, 1.0000, 1.0000, 1.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> pr_curve = BinnedPrecisionRecallCurve(num_classes=5, thresholds=3) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([0.2500, 1.0000, 1.0000, 1.0000]), + tensor([0.2500, 1.0000, 1.0000, 1.0000]), + tensor([2.5000e-01, 1.0000e-06, 1.0000e+00, 1.0000e+00]), + tensor([2.5000e-01, 1.0000e-06, 1.0000e+00, 1.0000e+00]), + tensor([2.5000e-07, 1.0000e+00, 1.0000e+00, 1.0000e+00])] + >>> recall # doctest: +NORMALIZE_WHITESPACE + [tensor([1.0000, 1.0000, 0.0000, 0.0000]), + tensor([1.0000, 1.0000, 0.0000, 0.0000]), + tensor([1.0000, 0.0000, 0.0000, 0.0000]), + tensor([1.0000, 0.0000, 0.0000, 0.0000]), + tensor([0., 0., 0., 0.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000])] + """ + + TPs: Tensor + FPs: Tensor + FNs: Tensor + + def __init__( + self, + num_classes: int, + thresholds: Union[int, Tensor, List[float], None] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.num_classes = num_classes + if isinstance(thresholds, int): + self.num_thresholds = thresholds + thresholds = B.linspace(0, 1.0, thresholds) + self.register_buffer("thresholds", thresholds) + elif thresholds is not None: + if not isinstance(thresholds, (list, Tensor)): + raise ValueError("Expected argument `thresholds` to either be an integer, list of floats or a tensor") + thresholds = B.tensor(thresholds) if isinstance(thresholds, list) else thresholds + self.num_thresholds = thresholds.numel() + self.register_buffer("thresholds", thresholds) + + for name in ("TPs", "FPs", "FNs"): + self.add_state( + name=name, + default=B.zeros(num_classes, self.num_thresholds, dtype=B.float32), + dist_reduce_fx="sum", + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """ + Args + preds: (n_samples, n_classes) tensor + target: (n_samples, n_classes) tensor + """ + # binary case + if len(preds.shape) == len(target.shape) == 1: + preds = preds.reshape(-1, 1) + target = target.reshape(-1, 1) + + if len(preds.shape) == len(target.shape) + 1: + target = to_onehot(target, num_classes=self.num_classes) + + target = target == 1 + # Iterate one threshold at a time to conserve memory + for i in range(self.num_thresholds): + predictions = preds >= self.thresholds[i] + self.TPs[:, i] += (target & predictions).sum(dim=0) + self.FPs[:, i] += ((~target) & (predictions)).sum(dim=0) + self.FNs[:, i] += ((target) & (~predictions)).sum(dim=0) + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Returns float tensor of size n_classes.""" + precisions = (self.TPs + METRIC_EPS) / (self.TPs + self.FPs + METRIC_EPS) + recalls = self.TPs / (self.TPs + self.FNs + METRIC_EPS) + + # Need to guarantee that last precision=1 and recall=0, similar to precision_recall_curve + t_ones = B.ones(self.num_classes, 1, dtype=precisions.dtype, device=precisions.device) + precisions = B.cat([precisions, t_ones], dim=1) + t_zeros = B.zeros(self.num_classes, 1, dtype=recalls.dtype, device=recalls.device) + recalls = B.cat([recalls, t_zeros], dim=1) + if self.num_classes == 1: + return precisions[0, :], recalls[0, :], self.thresholds + return list(precisions), list(recalls), [self.thresholds for _ in range(self.num_classes)] + + +class BinnedAveragePrecision(BinnedPrecisionRecallCurve): + """Computes the average precision score, which summarises the precision recall curve into one number. Works for + both binary and multiclass problems. In the case of multiclass, the values will be calculated based on a one- + vs-the-rest approach. + + Computation is performed in constant-memory by computing precision and recall + for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. + It is used for computation will lead to more detailed curve and accurate estimates, + but will be slower and consume more memory + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``thresholds`` is not a list or tensor + + Example (binary case): + >>> from paddlemetrics import BinnedAveragePrecision + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> average_precision = BinnedAveragePrecision(num_classes=1, thresholds=10) + >>> average_precision(pred, target) + tensor(1.0000) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision = BinnedAveragePrecision(num_classes=5, thresholds=10) + >>> average_precision(pred, target) + [tensor(1.0000), tensor(1.0000), tensor(0.2500), tensor(0.2500), tensor(-0.)] + """ + + def compute(self) -> Union[List[Tensor], Tensor]: # type: ignore + precisions, recalls, _ = super().compute() + return _average_precision_compute_with_precision_recall(precisions, recalls, self.num_classes, average=None) + + +class BinnedRecallAtFixedPrecision(BinnedPrecisionRecallCurve): + """Computes the higest possible recall value given the minimum precision thresholds provided. + + Computation is performed in constant-memory by computing precision and recall + for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` with integer labels + + Args: + num_classes: integer with number of classes. Provide 1 for for binary problems. + min_precision: float value specifying minimum precision threshold. + thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. + It is used for computation will lead to more detailed curve and accurate estimates, + but will be slower and consume more memory + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``thresholds`` is not a list or tensor + + Example (binary case): + >>> from paddlemetrics import BinnedRecallAtFixedPrecision + >>> pred = B.tensor([0, 0.2, 0.5, 0.8]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> average_precision = BinnedRecallAtFixedPrecision(num_classes=1, thresholds=10, min_precision=0.5) + >>> average_precision(pred, target) + (tensor(1.0000), tensor(0.1111)) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision = BinnedRecallAtFixedPrecision(num_classes=5, thresholds=10, min_precision=0.5) + >>> average_precision(pred, target) # doctest: +NORMALIZE_WHITESPACE + (tensor([1.0000, 1.0000, 0.0000, 0.0000, 0.0000]), + tensor([6.6667e-01, 6.6667e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06])) + """ + + def __init__( + self, + num_classes: int, + min_precision: float, + thresholds: Union[int, Tensor, List[float], None] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + num_classes=num_classes, + thresholds=thresholds, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.min_precision = min_precision + + def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore + """Returns float tensor of size n_classes.""" + precisions, recalls, thresholds = super().compute() + + if self.num_classes == 1: + return _recall_at_precision(precisions, recalls, thresholds, self.min_precision) + + recalls_at_p = B.zeros(self.num_classes, device=recalls[0].device, dtype=recalls[0].dtype) + thresholds_at_p = B.zeros(self.num_classes, device=thresholds[0].device, dtype=thresholds[0].dtype) + for i in range(self.num_classes): + recalls_at_p[i], thresholds_at_p[i] = _recall_at_precision( + precisions[i], recalls[i], thresholds[i], self.min_precision + ) + return recalls_at_p, thresholds_at_p diff --git a/EE/paddlemetric/src/paddlemetrics/classification/calibration_error.py b/EE/paddlemetric/src/paddlemetrics/classification/calibration_error.py new file mode 100644 index 000000000..5fc9d10a0 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/calibration_error.py @@ -0,0 +1,115 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.calibration_error import _ce_compute, _ce_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class CalibrationError(Metric): + r""" + + `Computes the Top-label Calibration Error`_ + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + L1 norm (Expected Calibration Error) + + .. math:: + \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\| + + Infinity norm (Maximum Calibration Error) + + .. math:: + \text{RMSCE} = \max_{i} (p_i - c_i) + + L2 norm (Root Mean Square Calibration Error) + + .. math:: + \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2 + + Where :math:`p_i` is the top-1 prediction accuracy in bin i + and :math:`c_i` is the average confidence of predictions in bin i. + + .. note:: + L2-norm debiasing is not yet supported. + + Args: + n_bins: Number of bins to use when computing probabilites and accuracies. + norm: Norm used to compare empirical and expected probability bins. + Defaults to "l1", or Expected Calibration Error. + debias: Applies debiasing term, only implemented for l2 norm. Defaults to True. + compute_on_step: Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: Specify the process group on which synchronization is called. + default: None (which selects the entire world) + """ + DISTANCES = {"l1", "l2", "max"} + confidences: List[Tensor] + accuracies: List[Tensor] + + def __init__( + self, + n_bins: int = 15, + norm: str = "l1", + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ): + + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=None, + ) + + if norm not in self.DISTANCES: + raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") + + if not isinstance(n_bins, int) or n_bins <= 0: + raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}") + self.n_bins = n_bins + self.register_buffer("bin_boundaries", B.linspace(0, 1, n_bins + 1)) + self.norm = norm + + self.add_state("confidences", [], dist_reduce_fx="cat") + self.add_state("accuracies", [], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Computes top-level confidences and accuracies for the input probabilites and appends them to internal + state. + + Args: + preds (Tensor): Model output probabilities. + target (Tensor): Ground-truth target class labels. + """ + confidences, accuracies = _ce_update(preds, target) + + self.confidences.append(confidences) + self.accuracies.append(accuracies) + + def compute(self) -> Tensor: + """Computes calibration error across all confidences and accuracies. + + Returns: + Tensor: Calibration error across previously collected examples. + """ + confidences = dim_zero_cat(self.confidences) + accuracies = dim_zero_cat(self.accuracies) + return _ce_compute(confidences, accuracies, self.bin_boundaries, norm=self.norm) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py b/EE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py new file mode 100644 index 000000000..3a4817cf4 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py @@ -0,0 +1,119 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.cohen_kappa import _cohen_kappa_compute, _cohen_kappa_update +from paddlemetrics.metric import Metric + + +class CohenKappa(Metric): + r""" + Calculates `Cohen's kappa score`_ that measures + inter-annotator agreement. It is defined as + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + Works with binary, multiclass, and multilabel data. Accepts probabilities from a model output or + integer class values in prediction. Works with multi-dimensional preds and target. + + Forward accepts + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: Number of classes in the dataset. + + weights: Weighting type to calculate the score. Choose from + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import CohenKappa + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> cohenkappa = CohenKappa(num_classes=2) + >>> cohenkappa(preds, target) + tensor(0.5000) + + """ + is_differentiable = False + confmat: Tensor + + def __init__( + self, + num_classes: int, + weights: Optional[str] = None, + threshold: float = 0.5, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.num_classes = num_classes + self.weights = weights + self.threshold = threshold + + allowed_weights = ("linear", "quadratic", "none", None) + if self.weights not in allowed_weights: + raise ValueError(f"Argument weights needs to one of the following: {allowed_weights}") + + self.add_state("confmat", default=B.zeros(num_classes, num_classes), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + confmat = _cohen_kappa_update(preds, target, self.num_classes, self.threshold) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes cohen kappa score.""" + return _cohen_kappa_compute(self.confmat, self.weights) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py b/EE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py new file mode 100644 index 000000000..a3485570d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py @@ -0,0 +1,141 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_compute, _confusion_matrix_update +from paddlemetrics.metric import Metric + + +class ConfusionMatrix(Metric): + r""" + Computes the `confusion matrix`_. Works with binary, + multiclass, and multilabel data. Accepts probabilities or logits from a model output or integer class + values in prediction. Works with multi-dimensional preds and target, but it should be noted that + additional dimensions will be flattened. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + If working with multilabel data, setting the `is_multilabel` argument to `True` will make sure that a + `confusion matrix gets calculated per label`_. + + Args: + num_classes: Number of classes in the dataset. + normalize: Normalization mode for confusion matrix. Choose from + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + multilabel: + determines if data is multilabel or not. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example (binary data): + >>> from paddlemetrics import ConfusionMatrix + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = ConfusionMatrix(num_classes=2) + >>> confmat(preds, target) + tensor([[2., 0.], + [1., 1.]]) + + Example (multiclass data): + >>> target = B.tensor([2, 1, 0, 0]) + >>> preds = B.tensor([2, 1, 0, 1]) + >>> confmat = ConfusionMatrix(num_classes=3) + >>> confmat(preds, target) + tensor([[1., 1., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example (multilabel data): + >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = ConfusionMatrix(num_classes=3, multilabel=True) + >>> confmat(preds, target) # doctest: +NORMALIZE_WHITESPACE + tensor([[[1., 0.], [0., 1.]], + [[1., 0.], [1., 0.]], + [[0., 1.], [0., 1.]]]) + + """ + is_differentiable = False + confmat: Tensor + + def __init__( + self, + num_classes: int, + normalize: Optional[str] = None, + threshold: float = 0.5, + multilabel: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.num_classes = num_classes + self.normalize = normalize + self.threshold = threshold + self.multilabel = multilabel + + allowed_normalize = ("true", "pred", "all", "none", None) + if self.normalize not in allowed_normalize: + raise ValueError(f"Argument average needs to one of the following: {allowed_normalize}") + + default = B.zeros(num_classes, 2, 2) if multilabel else B.zeros(num_classes, num_classes) + self.add_state("confmat", default=default, dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + confmat = _confusion_matrix_update(preds, target, self.num_classes, self.threshold, self.multilabel) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes confusion matrix. + + Returns: + If `multilabel=False` this will be a `[n_classes, n_classes]` tensor and if `multilabel=True` + this will be a `[n_classes, 2, 2]` tensor + """ + return _confusion_matrix_compute(self.confmat, self.normalize) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/f_beta.py b/EE/paddlemetric/src/paddlemetrics/classification/f_beta.py new file mode 100644 index 000000000..4b24dc0e9 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/f_beta.py @@ -0,0 +1,301 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.stat_scores import StatScores +from paddlemetrics.functional.classification.f_beta import _fbeta_compute +from paddlemetrics.utilities.enums import AverageMethod + + +class FBeta(StatScores): + r""" + Computes `F-score`_, specifically: + + .. math:: + F_\beta = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + Where :math:`\beta` is some positive real factor. Works with binary, multiclass, and multilabel data. + Accepts logit scores or probabilities from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label logits and probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + beta: + Beta coefficient in the F measure. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import FBeta + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> f_beta = FBeta(num_classes=3, beta=0.5) + >>> f_beta(preds, target) + tensor(0.3333) + + """ + + def __init__( + self, + num_classes: Optional[int] = None, + beta: float = 1.0, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + self.beta = beta + allowed_average = list(AverageMethod) + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in [AverageMethod.WEIGHTED, AverageMethod.NONE] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes fbeta over state.""" + tp, fp, tn, fn = self._get_final_stats() + return _fbeta_compute(tp, fp, tn, fn, self.beta, self.ignore_index, self.average, self.mdmc_reduce) + + +class F1(FBeta): + """Computes F1 metric. F1 metrics correspond to a harmonic mean of the precision and recall scores. + + Works with binary, multiclass, and multilabel data. Accepts logits or probabilities from a model + output or integer class values in prediction. Works with multi-dimensional preds and target. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument. + This is the case for binary and multi-label logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + + Example: + >>> from paddlemetrics import F1 + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> f1 = F1(num_classes=3) + >>> f1(preds, target) + tensor(0.3333) + """ + + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + num_classes=num_classes, + beta=1.0, + threshold=threshold, + average=average, + mdmc_average=mdmc_average, + ignore_index=ignore_index, + top_k=top_k, + multiclass=multiclass, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py b/EE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py new file mode 100644 index 000000000..855d7f7e8 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.hamming_distance import _hamming_distance_compute, _hamming_distance_update +from paddlemetrics.metric import Metric + + +class HammingDistance(Metric): + r""" + Computes the average `Hamming distance`_ (also + known as Hamming loss) between targets and predictions: + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L}\sum_i^N \sum_l^L 1(y_{il} \neq \hat{y_{il}}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it + treats each possible label separately - meaning that, for example, multi-class data is + treated as if it were multi-label. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the all gather. + + Raises: + ValueError: + If ``threshold`` is not between ``0`` and ``1``. + + Example: + >>> from paddlemetrics import HammingDistance + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> hamming_distance = HammingDistance() + >>> hamming_distance(preds, target) + tensor(0.2500) + + """ + is_differentiable = False + correct: Tensor + total: Tensor + + def __init__( + self, + threshold: float = 0.5, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("correct", default=tensor(0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + self.threshold = threshold + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. See + :ref:`references/modules:input types` for more information on input + types. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth labels + """ + correct, total = _hamming_distance_update(preds, target, self.threshold) + + self.correct += correct + self.total += total + + def compute(self) -> Tensor: + """Computes hamming distance based on inputs passed in to ``update`` previously.""" + return _hamming_distance_compute(self.correct, self.total) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/hinge.py b/EE/paddlemetric/src/paddlemetrics/classification/hinge.py new file mode 100644 index 000000000..099864a35 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/hinge.py @@ -0,0 +1,127 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional, Union + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.hinge import MulticlassMode, _hinge_compute, _hinge_update +from paddlemetrics.metric import Metric + + +class Hinge(Metric): + r""" + Computes the mean `Hinge loss`_, typically used for Support Vector + Machines (SVMs). In the binary case it is defined as: + + .. math:: + \text{Hinge loss} = \max(0, 1 - y \times \hat{y}) + + Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction. + + In the multi-class case, when ``multiclass_mode=None`` (default), ``multiclass_mode=MulticlassMode.CRAMMER_SINGER`` + or ``multiclass_mode="crammer-singer"``, this metric will compute the multi-class hinge loss defined by Crammer and + Singer as: + + .. math:: + \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right) + + Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes), + and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. + + In the multi-class case when ``multiclass_mode=MulticlassMode.ONE_VS_ALL`` or ``multiclass_mode='one-vs-all'``, this + metric will use a one-vs-all approach to compute the hinge loss, giving a vector of C outputs where each entry pits + that class against all remaining classes. + + This metric can optionally output the mean of the squared hinge loss by setting ``squared=True`` + + Only accepts inputs with preds shape of (N) (binary) or (N, C) (multi-class) and target shape of (N). + + Args: + squared: + If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss (default). + multiclass_mode: + Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), + ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. + + Raises: + ValueError: + If ``multiclass_mode`` is not: None, ``MulticlassMode.CRAMMER_SINGER``, ``"crammer-singer"``, + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"``. + + Example (binary case): + >>> import torchapi as B + >>> from paddlemetrics import Hinge + >>> target = B.tensor([0, 1, 1]) + >>> preds = B.tensor([-2.2, 2.4, 0.1]) + >>> hinge = Hinge() + >>> hinge(preds, target) + tensor(0.3000) + + Example (default / multiclass case): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge = Hinge() + >>> hinge(preds, target) + tensor(2.9000) + + Example (multiclass example, one vs all mode): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge = Hinge(multiclass_mode="one-vs-all") + >>> hinge(preds, target) + tensor([2.2333, 1.5000, 1.2333]) + + """ + is_differentiable = True + measure: Tensor + total: Tensor + + def __init__( + self, + squared: bool = False, + multiclass_mode: Optional[Union[str, MulticlassMode]] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("measure", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + if multiclass_mode not in (None, MulticlassMode.CRAMMER_SINGER, MulticlassMode.ONE_VS_ALL): + raise ValueError( + "The `multiclass_mode` should be either None / 'crammer-singer' / MulticlassMode.CRAMMER_SINGER" + "(default) or 'one-vs-all' / MulticlassMode.ONE_VS_ALL," + f" got {multiclass_mode}." + ) + + self.squared = squared + self.multiclass_mode = multiclass_mode + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + measure, total = _hinge_update(preds, target, squared=self.squared, multiclass_mode=self.multiclass_mode) + + self.measure = measure + self.measure + self.total = total + self.total + + def compute(self) -> Tensor: + return _hinge_compute(self.measure, self.total) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/iou.py b/EE/paddlemetric/src/paddlemetrics/classification/iou.py new file mode 100644 index 000000000..9e89946a1 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/iou.py @@ -0,0 +1,107 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.confusion_matrix import ConfusionMatrix +from paddlemetrics.functional.classification.iou import _iou_from_confmat + + +class IoU(ConfusionMatrix): + r""" + Computes Intersection over union, or `Jaccard index`_: + + .. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|} + + Where: :math:`A` and :math:`B` are both tensors of the same size, containing integer class values. + They may be subject to conversion from input data (see description below). Note that it is different from box IoU. + + Works with binary, multiclass and multi-label data. + Accepts probabilities from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: Number of classes in the dataset. + ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. Has no effect if given an int that is not in the + range [0, num_classes-1]. By default, no index is ignored, and all classes are used. + absent_score: score to use for an individual class, if no instances of the class index were present in + `pred` AND no instances of the class index were present in `target`. For example, if we have 3 classes, + [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be assigned the `absent_score`. + threshold: + Threshold value for binary or multi-label probabilities. + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import IoU + >>> target = B.randint(0, 2, (10, 25, 25)) + >>> pred = B.tensor(target) + >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] + >>> iou = IoU(num_classes=2) + >>> iou(pred, target) + tensor(0.9660) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: int, + ignore_index: Optional[int] = None, + absent_score: float = 0.0, + threshold: float = 0.5, + reduction: str = "elementwise_mean", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + num_classes=num_classes, + normalize=None, + threshold=threshold, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + self.reduction = reduction + self.ignore_index = ignore_index + self.absent_score = absent_score + + def compute(self) -> Tensor: + """Computes intersection over union (IoU)""" + return _iou_from_confmat(self.confmat, self.num_classes, self.ignore_index, self.absent_score, self.reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py b/EE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py new file mode 100644 index 000000000..cce887f09 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py @@ -0,0 +1,109 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.kl_divergence import _kld_compute, _kld_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class KLDivergence(Metric): + r"""Computes the `KL divergence`_: + + .. math:: + D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}} + + Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution + over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence + is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. + + Args: + p: data distribution with shape ``[N, d]`` + q: prior or approximate distribution with shape ``[N, d]`` + log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, + will normalize to make sure the distributes sum to 1 + reduction: + Determines how to reduce over the ``N``/batch dimension: + + - ``'mean'`` [default]: Averages score across samples + - ``'sum'``: Sum score across samples + - ``'none'`` or ``None``: Returns score per sample + + Raises: + TypeError: + If ``log_prob`` is not an ``bool`` + ValueError: + If ``reduction`` is not one of ``'mean'``, ``'sum'``, ``'none'`` or ``None`` + + .. note:: + Half precision is only support on GPU for this metric + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import kl_divergence + >>> p = B.tensor([[0.36, 0.48, 0.16]]) + >>> q = B.tensor([[1/3, 1/3, 1/3]]) + >>> kl_divergence(p, q) + tensor(0.0853) + + """ + is_differentiable = True + # TODO: canot be used because if scripting + # measures: Union[List[Tensor], Tensor] + total: Tensor + + def __init__( + self, + log_prob: bool = False, + reduction: Optional[str] = "mean", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if not isinstance(log_prob, bool): + raise TypeError(f"Expected argument `log_prob` to be bool but got {log_prob}") + self.log_prob = log_prob + + allowed_reduction = ["mean", "sum", "none", None] + if reduction not in allowed_reduction: + raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}") + self.reduction = reduction + + if self.reduction in ["mean", "sum"]: + self.add_state("measures", B.zeros(1), dist_reduce_fx="sum") + else: + self.add_state("measures", [], dist_reduce_fx="cat") + self.add_state("total", B.zeros(1), dist_reduce_fx="sum") + + def update(self, p: Tensor, q: Tensor) -> None: # type: ignore + measures, total = _kld_update(p, q, self.log_prob) + if self.reduction is None or self.reduction == "none": + self.measures.append(measures) + else: + self.measures += measures.sum() + self.total += total + + def compute(self) -> Tensor: + measures = dim_zero_cat(self.measures) if self.reduction is None or self.reduction == "none" else self.measures + return _kld_compute(measures, self.total, self.reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py b/EE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py new file mode 100644 index 000000000..2ea52673b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py @@ -0,0 +1,111 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.matthews_corrcoef import ( + _matthews_corrcoef_compute, + _matthews_corrcoef_update, +) +from paddlemetrics.metric import Metric + + +class MatthewsCorrcoef(Metric): + r""" + Calculates `Matthews correlation coefficient`_ that measures + the general correlation or quality of a classification. In the binary case it + is defined as: + + .. math:: + MCC = \frac{TP*TN - FP*FN}{\sqrt{(TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)}} + + where TP, TN, FP and FN are respectively the true postitives, true negatives, + false positives and false negatives. Also works in the case of multi-label or + multi-class input. + + Note: + This metric produces a multi-dimensional output, so it can not be directly logged. + + Forward accepts + + - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes + - ``target`` (long tensor): ``(N, ...)`` + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + Args: + num_classes: Number of classes in the dataset. + threshold: + Threshold value for binary or multi-label probabilites. default: 0.5 + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Example: + >>> from paddlemetrics import MatthewsCorrcoef + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> matthews_corrcoef = MatthewsCorrcoef(num_classes=2) + >>> matthews_corrcoef(preds, target) + tensor(0.5774) + + """ + is_differentiable = False + confmat: Tensor + + def __init__( + self, + num_classes: int, + threshold: float = 0.5, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.num_classes = num_classes + self.threshold = threshold + + self.add_state("confmat", default=B.zeros(num_classes, num_classes), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + confmat = _matthews_corrcoef_update(preds, target, self.num_classes, self.threshold) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes matthews correlation coefficient.""" + return _matthews_corrcoef_compute(self.confmat) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/precision_recall.py b/EE/paddlemetric/src/paddlemetrics/classification/precision_recall.py new file mode 100644 index 000000000..77920cfc9 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/precision_recall.py @@ -0,0 +1,320 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.stat_scores import StatScores +from paddlemetrics.functional.classification.precision_recall import _precision_compute, _recall_compute + + +class Precision(StatScores): + r""" + Computes `Precision`_: + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Precision@K. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import Precision + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> precision = Precision(average='macro', num_classes=3) + >>> precision(preds, target) + tensor(0.1667) + >>> precision = Precision(average='micro') + >>> precision(preds, target) + tensor(0.2500) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes the precision score based on inputs passed in to ``update`` previously. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + """ + tp, fp, _, fn = self._get_final_stats() + return _precision_compute(tp, fp, fn, self.average, self.mdmc_reduce) + + +class Recall(StatScores): + r""" + Computes `Recall`_: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Recall@K. + + The reduction method (how the recall scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import Recall + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> recall = Recall(average='macro', num_classes=3) + >>> recall(preds, target) + tensor(0.3333) + >>> recall = Recall(average='micro') + >>> recall(preds, target) + tensor(0.2500) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes the recall score based on inputs passed in to ``update`` previously. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + """ + tp, fp, _, fn = self._get_final_stats() + return _recall_compute(tp, fp, fn, self.average, self.mdmc_reduce) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py b/EE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py new file mode 100644 index 000000000..341419092 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py @@ -0,0 +1,149 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.precision_recall_curve import ( + _precision_recall_curve_compute, + _precision_recall_curve_update, +) +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class PrecisionRecallCurve(Metric): + """Computes precision-recall pairs for different thresholds. Works for both binary and multiclass problems. In + the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor + with probabilities, where C is the number of classes. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + Args: + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example (binary case): + >>> from paddlemetrics import PrecisionRecallCurve + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> pr_curve = PrecisionRecallCurve(pos_label=1) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision + tensor([0.6667, 0.5000, 0.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([1, 2, 3]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> pr_curve = PrecisionRecallCurve(num_classes=5) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + """ + + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + rank_zero_warn( + "Metric `PrecisionRecallCurve` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target, num_classes, pos_label = _precision_recall_curve_update( + preds, target, self.num_classes, self.pos_label + ) + self.preds.append(preds) + self.target.append(target) + self.num_classes = num_classes + self.pos_label = pos_label + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Compute the precision-recall curve. + + Returns: + 3-element tuple containing + + precision: + tensor where element i is the precision of predictions with + score >= thresholds[i] and the last element is 1. + If multiclass, this is a list of such tensors, one for each class. + recall: + tensor where element i is the recall of predictions with + score >= thresholds[i] and the last element is 0. + If multiclass, this is a list of such tensors, one for each class. + thresholds: + Thresholds used for computing precision/recall scores + """ + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + if not self.num_classes: + raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") + return _precision_recall_curve_compute(preds, target, self.num_classes, self.pos_label) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/roc.py b/EE/paddlemetric/src/paddlemetrics/classification/roc.py new file mode 100644 index 000000000..a01a5b94d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/roc.py @@ -0,0 +1,169 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.roc import _roc_compute, _roc_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn + + +class ROC(Metric): + """Computes the Receiver Operating Characteristic (ROC). Works for both binary, multiclass and multilabel + problems. In the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass/multilabel) tensor + with probabilities, where C is the number of classes/labels. + + - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels + + Args: + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Example (binary case): + >>> from paddlemetrics import ROC + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> roc = ROC(pos_label=1) + >>> fpr, tpr, thresholds = roc(pred, target) + >>> fpr + tensor([0., 0., 0., 0., 1.]) + >>> tpr + tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) + >>> thresholds + tensor([4, 3, 2, 1, 0]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> roc = ROC(num_classes=4) + >>> fpr, tpr, thresholds = roc(pred, target) + >>> fpr + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500])] + + Example (multilabel case): + >>> pred = B.tensor([[0.8191, 0.3680, 0.1138], + ... [0.3584, 0.7576, 0.1183], + ... [0.2286, 0.3468, 0.1338], + ... [0.8603, 0.0745, 0.1837]]) + >>> target = B.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) + >>> roc = ROC(num_classes=3, pos_label=1) + >>> fpr, tpr, thresholds = roc(pred, target) + >>> fpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]), + tensor([0., 0., 0., 1., 1.]), + tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])] + >>> tpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0., 0., 1., 1., 1.]), + tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), + tensor([0., 1., 1., 1., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.8603, 0.8603, 0.8191, 0.3584, 0.2286]), + tensor([1.7576, 0.7576, 0.3680, 0.3468, 0.0745]), + tensor([1.1837, 0.1837, 0.1338, 0.1183, 0.1138])] + """ + + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.num_classes = num_classes + self.pos_label = pos_label + + self.add_state("preds", default=[], dist_reduce_fx=None) + self.add_state("target", default=[], dist_reduce_fx=None) + + rank_zero_warn( + "Metric `ROC` will save all targets and predictions in buffer." + " For large datasets this may lead to large memory footprint." + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target, num_classes, pos_label = _roc_update(preds, target, self.num_classes, self.pos_label) + self.preds.append(preds) + self.target.append(target) + self.num_classes = num_classes + self.pos_label = pos_label + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Compute the receiver operating characteristic. + + Returns: + 3-element tuple containing + + fpr: + tensor with false positive rates. + If multiclass, this is a list of such tensors, one for each class. + tpr: + tensor with true positive rates. + If multiclass, this is a list of such tensors, one for each class. + thresholds: + thresholds used for computing false- and true postive rates + """ + preds = B.cat(self.preds, dim=0) + target = B.cat(self.target, dim=0) + if not self.num_classes: + raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") + return _roc_compute(preds, target, self.num_classes, self.pos_label) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/specificity.py b/EE/paddlemetric/src/paddlemetrics/classification/specificity.py new file mode 100644 index 000000000..0ad44268a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/specificity.py @@ -0,0 +1,171 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.classification.stat_scores import StatScores +from paddlemetrics.functional.classification.specificity import _specificity_compute + + +class Specificity(StatScores): + r""" + Computes `Specificity`_: + + .. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}} + + Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Specificity@K. + + The reduction method (how the specificity scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold probability value for transforming probability predictions to binary + (0,1) predictions, in the case of binary or multi-label inputs. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tn + fp``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for inputs with probability predictions. If this parameter is set for multi-label + inputs, it will take precedence over ``threshold``. For (multi-dim) multi-class inputs, + this parameter defaults to 1. + + Should be left unset (``None``) for inputs with label predictions. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + + Example: + >>> from paddlemetrics import Specificity + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> specificity = Specificity(average='macro', num_classes=3) + >>> specificity(preds, target) + tensor(0.6111) + >>> specificity = Specificity(average='micro') + >>> specificity(preds, target) + tensor(0.6250) + + """ + is_differentiable = False + + def __init__( + self, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + super().__init__( + reduce="macro" if average in ["weighted", "none", None] else average, + mdmc_reduce=mdmc_average, + threshold=threshold, + top_k=top_k, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.average = average + + def compute(self) -> Tensor: + """Computes the specificity score based on inputs passed in to ``update`` previously. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + """ + tp, fp, tn, fn = self._get_final_stats() + return _specificity_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce) diff --git a/EE/paddlemetric/src/paddlemetrics/classification/stat_scores.py b/EE/paddlemetric/src/paddlemetrics/classification/stat_scores.py new file mode 100644 index 000000000..ec099c867 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/classification/stat_scores.py @@ -0,0 +1,267 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _stat_scores_compute, _stat_scores_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +class StatScores(Metric): + r"""Computes the number of true positives, false positives, true negatives, false negatives. + Related to `Type I and Type II errors`_ + and the `confusion matrix`_. + + The reduction method (how the statistics are aggregated) is controlled by the + ``reduce`` parameter, and additionally by the ``mdmc_reduce`` parameter in the + multi-dimensional multi-class case. + + Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + reduce: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Counts the statistics by summing over all [sample, class] + combinations (globally). Each statistic is represented by a single integer. + - ``'macro'``: Counts the statistics for each class separately (over all samples). + Each statistic is represented by a ``(C,)`` tensor. Requires ``num_classes`` + to be set. + - ``'samples'``: Counts the statistics for each sample separately (over all classes). + Each statistic is represented by a ``(N, )`` 1d tensor. + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_reduce``. + + num_classes: + Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. + + ignore_index: + Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + + mdmc_reduce: + Defines how the multi-dimensional multi-class inputs are handeled. Should be + one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class (see :ref:`references/modules:input types` for the definition of input types). + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then the outputs are concatenated together. In each + sample the extra axes ``...`` are flattened to become the sub-sample axis, and + statistics for each sample are computed by treating the sub-sample axis as the + ``N`` axis for that sample. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs are + flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``reduce`` parameter applies as usual. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``reduce`` is none of ``"micro"``, ``"macro"`` or ``"samples"``. + ValueError: + If ``mdmc_reduce`` is none of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``reduce`` is set to ``"macro"`` and ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``0`` <= ``ignore_index`` < ``num_classes``. + + Example: + >>> from paddlemetrics.classification import StatScores + >>> preds = B.tensor([1, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> stat_scores = StatScores(reduce='macro', num_classes=3) + >>> stat_scores(preds, target) + tensor([[0, 1, 2, 1, 1], + [1, 1, 1, 1, 2], + [1, 0, 3, 0, 1]]) + >>> stat_scores = StatScores(reduce='micro') + >>> stat_scores(preds, target) + tensor([2, 2, 6, 2, 4]) + + """ + is_differentiable = False + # TODO: canot be used because if scripting + # tp: Union[Tensor, List[Tensor]] + # fp: Union[Tensor, List[Tensor]] + # tn: Union[Tensor, List[Tensor]] + # fn: Union[Tensor, List[Tensor]] + + def __init__( + self, + threshold: float = 0.5, + top_k: Optional[int] = None, + reduce: str = "micro", + num_classes: Optional[int] = None, + ignore_index: Optional[int] = None, + mdmc_reduce: Optional[str] = None, + multiclass: Optional[bool] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.reduce = reduce + self.mdmc_reduce = mdmc_reduce + self.num_classes = num_classes + self.threshold = threshold + self.multiclass = multiclass + self.ignore_index = ignore_index + self.top_k = top_k + + if reduce not in ["micro", "macro", "samples"]: + raise ValueError(f"The `reduce` {reduce} is not valid.") + + if mdmc_reduce not in [None, "samplewise", "global"]: + raise ValueError(f"The `mdmc_reduce` {mdmc_reduce} is not valid.") + + if reduce == "macro" and (not num_classes or num_classes < 1): + raise ValueError("When you set `reduce` as 'macro', you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + default: Callable = lambda: [] + reduce_fn: Optional[str] = None + if mdmc_reduce != "samplewise" and reduce != "samples": + if reduce == "micro": + zeros_shape = [] + elif reduce == "macro": + zeros_shape = [num_classes] + else: + raise ValueError(f'Wrong reduce="{reduce}"') + default = lambda: B.zeros(zeros_shape, dtype=B.long) + reduce_fn = "sum" + + for s in ("tp", "fp", "tn", "fn"): + self.add_state(s, default=default(), dist_reduce_fx=reduce_fn) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. See + :ref:`references/modules:input types` for more information on input + types. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + """ + + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=self.reduce, + mdmc_reduce=self.mdmc_reduce, + threshold=self.threshold, + num_classes=self.num_classes, + top_k=self.top_k, + multiclass=self.multiclass, + ignore_index=self.ignore_index, + ) + + # Update states + if self.reduce != AverageMethod.SAMPLES and self.mdmc_reduce != MDMCAverageMethod.SAMPLEWISE: + self.tp += tp + self.fp += fp + self.tn += tn + self.fn += fn + else: + self.tp.append(tp) + self.fp.append(fp) + self.tn.append(tn) + self.fn.append(fn) + + def _get_final_stats(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Performs concatenation on the stat scores if neccesary, before passing them to a compute function.""" + tp = B.cat(self.tp) if isinstance(self.tp, list) else self.tp + fp = B.cat(self.fp) if isinstance(self.fp, list) else self.fp + tn = B.cat(self.tn) if isinstance(self.tn, list) else self.tn + fn = B.cat(self.fn) if isinstance(self.fn, list) else self.fn + return tp, fp, tn, fn + + def compute(self) -> Tensor: + """Computes the stat scores based on inputs passed in to ``update`` previously. + + Return: + The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds + to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The + shape depends on the ``reduce`` and ``mdmc_reduce`` (in case of multi-dimensional + multi-class data) parameters: + + - If the data is not multi-dimensional multi-class, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)``, + where ``C`` stands for the number of classes + - If ``reduce='samples'``, the shape will be ``(N, 5)``, where ``N`` stands for + the number of samples + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='global'``, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N*X, 5)``, where ``X`` stands for + the product of sizes of all "extra" dimensions of the data (i.e. all dimensions + except for ``C`` and ``N``) + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='samplewise'``, then + + - If ``reduce='micro'``, the shape will be ``(N, 5)`` + - If ``reduce='macro'``, the shape will be ``(N, C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N, X, 5)`` + """ + tp, fp, tn, fn = self._get_final_stats() + return _stat_scores_compute(tp, fp, tn, fn) diff --git a/EE/paddlemetric/src/paddlemetrics/collections.py b/EE/paddlemetric/src/paddlemetrics/collections.py new file mode 100644 index 000000000..3b03856e7 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/collections.py @@ -0,0 +1,239 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Dict, Hashable, Iterable, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import nn + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn + + +class MetricCollection(nn.ModuleDict): + """MetricCollection class can be used to chain metrics that have the same call pattern into one single class. + + Args: + metrics: One of the following + + * list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name + as key for output dict. Therefore, two metrics of the same class cannot be chained this way. + + * arguments: similar to passing in as a list, metrics passed in as arguments will use their metric + class name as key for the output dict. + + * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict. + Use this format if you want to chain together multiple of the same metric with different parameters. + Note that the keys in the output dict will be sorted alphabetically. + + prefix: a string to append in front of the keys of the output dict + + postfix: a string to append after the keys of the output dict + + Raises: + ValueError: + If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``. + ValueError: + If two elements in ``metrics`` have the same ``name``. + ValueError: + If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``. + ValueError: + If ``metrics`` is ``dict`` and additional_metrics are passed in. + ValueError: + If ``prefix`` is set and it is not a string. + ValueError: + If ``postfix`` is set and it is not a string. + + Example (input as list): + >>> import torchapi as B + >>> from pprint import pprint + >>> from paddlemetrics import MetricCollection, Accuracy, Precision, Recall + >>> target = B.tensor([0, 2, 0, 2, 0, 1, 0, 2]) + >>> preds = B.tensor([2, 1, 2, 0, 1, 2, 2, 2]) + >>> metrics = MetricCollection([Accuracy(), + ... Precision(num_classes=3, average='macro'), + ... Recall(num_classes=3, average='macro')]) + >>> metrics(preds, target) + {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)} + + Example (input as arguments): + >>> metrics = MetricCollection(Accuracy(), Precision(num_classes=3, average='macro'), + ... Recall(num_classes=3, average='macro')) + >>> metrics(preds, target) + {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)} + + Example (input as dict): + >>> metrics = MetricCollection({'micro_recall': Recall(num_classes=3, average='micro'), + ... 'macro_recall': Recall(num_classes=3, average='macro')}) + >>> same_metric = metrics.clone() + >>> pprint(metrics(preds, target)) + {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} + >>> pprint(same_metric(preds, target)) + {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} + >>> metrics.persistent() + """ + + def __init__( + self, + metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], + *additional_metrics: Metric, + prefix: Optional[str] = None, + postfix: Optional[str] = None, + ) -> None: + super().__init__() + + self._modules = self._sub_layers + + self.add_metrics(metrics, *additional_metrics) + + self.prefix = self._check_arg(prefix, "prefix") + self.postfix = self._check_arg(postfix, "postfix") + + def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]: + """Iteratively call forward for each metric. + + Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) + will be filtered based on the signature of the individual metric. + """ + return {k: m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items()} + + def update(self, *args: Any, **kwargs: Any) -> None: + """Iteratively call update for each metric. + + Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) + will be filtered based on the signature of the individual metric. + """ + for _, m in self.items(keep_base=True): + m_kwargs = m._filter_kwargs(**kwargs) + m.update(*args, **m_kwargs) + + def compute(self) -> Dict[str, Any]: + return {k: m.compute() for k, m in self.items()} + + def reset(self) -> None: + """Iteratively call reset for each metric.""" + for _, m in self.items(keep_base=True): + m.reset() + + def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> "MetricCollection": + """Make a copy of the metric collection + Args: + prefix: a string to append in front of the metric keys + postfix: a string to append after the keys of the output dict + + """ + mc = deepcopy(self) + if prefix: + mc.prefix = self._check_arg(prefix, "prefix") + if postfix: + mc.postfix = self._check_arg(postfix, "postfix") + return mc + + def persistent(self, mode: bool = True) -> None: + """Method for post-init to change if metric states should be saved to its state_dict.""" + for _, m in self.items(keep_base=True): + m.persistent(mode) + + def add_metrics( + self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric + ) -> None: + """Add new metrics to Metric Collection.""" + if isinstance(metrics, Metric): + # set compatible with original type expectations + metrics = [metrics] + if isinstance(metrics, Sequence): + # prepare for optional additions + metrics = list(metrics) + remain: list = [] + for m in additional_metrics: + (metrics if isinstance(m, Metric) else remain).append(m) + + if remain: + rank_zero_warn( + f"You have passes extra arguments {remain} which are not `Metric` so they will be ignored." + ) + elif additional_metrics: + raise ValueError( + f"You have passes extra arguments {additional_metrics} which are not compatible" + f" with first passed dictionary {metrics} so they will be ignored." + ) + + if isinstance(metrics, dict): + # Check all values are metrics + # Make sure that metrics are added in deterministic order + for name in sorted(metrics.keys()): + metric = metrics[name] + if not isinstance(metric, Metric): + raise ValueError( + f"Value {metric} belonging to key {name} is not an instance of `pl.metrics.Metric`" + ) + self[name] = metric + elif isinstance(metrics, Sequence): + for metric in metrics: + if not isinstance(metric, Metric): + raise ValueError(f"Input {metric} to `MetricCollection` is not a instance of `pl.metrics.Metric`") + name = metric.__class__.__name__ + if name in self: + raise ValueError(f"Encountered two metrics both named {name}") + self[name] = metric + else: + raise ValueError("Unknown input to MetricCollection.") + + def _set_name(self, base: str) -> str: + name = base if self.prefix is None else self.prefix + base + name = name if self.postfix is None else name + self.postfix + return name + + def _to_renamed_ordered_dict(self) -> OrderedDict: + od = OrderedDict() + for k, v in self._modules.items(): + od[self._set_name(k)] = v + return od + + def keys(self, keep_base: bool = False) -> Iterable[Hashable]: + r"""Return an iterable of the ModuleDict key. + Args: + keep_base: Whether to add prefix/postfix on the items collection. + """ + if keep_base: + return self._modules.keys() + return self._to_renamed_ordered_dict().keys() + + def items(self, keep_base: bool = False) -> Iterable[Tuple[str, nn.Module]]: + r"""Return an iterable of the ModuleDict key/value pairs. + Args: + keep_base: Whether to add prefix/postfix on the items collection. + """ + if keep_base: + return self._modules.items() + return self._to_renamed_ordered_dict().items() + + @staticmethod + def _check_arg(arg: Optional[str], name: str) -> Optional[str]: + if arg is None or isinstance(arg, str): + return arg + raise ValueError(f"Expected input `{name}` to be a string, but got {type(arg)}") + + def __repr__(self) -> str: + repr_str = super().__repr__()[:-2] + if self.prefix: + repr_str += f",\n prefix={self.prefix}{',' if self.postfix else ''}" + if self.postfix: + repr_str += f"{',' if not self.prefix else ''}\n postfix={self.postfix}" + return repr_str + "\n)" + + def to(self, device): + pass \ No newline at end of file diff --git a/EE/paddlemetric/src/paddlemetrics/functional/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/__init__.py new file mode 100644 index 000000000..365d93c97 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/__init__.py @@ -0,0 +1,138 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.audio.pesq import pesq +from paddlemetrics.functional.audio.pit import pit, pit_permutate +from paddlemetrics.functional.audio.si_sdr import si_sdr +from paddlemetrics.functional.audio.si_snr import si_snr +from paddlemetrics.functional.audio.snr import snr +from paddlemetrics.functional.audio.stoi import stoi +from paddlemetrics.functional.classification.accuracy import accuracy +from paddlemetrics.functional.classification.auc import auc +from paddlemetrics.functional.classification.auroc import auroc +from paddlemetrics.functional.classification.average_precision import average_precision +from paddlemetrics.functional.classification.calibration_error import calibration_error +from paddlemetrics.functional.classification.cohen_kappa import cohen_kappa +from paddlemetrics.functional.classification.confusion_matrix import confusion_matrix +from paddlemetrics.functional.classification.dice import dice_score +from paddlemetrics.functional.classification.f_beta import f1, fbeta +from paddlemetrics.functional.classification.hamming_distance import hamming_distance +from paddlemetrics.functional.classification.hinge import hinge +from paddlemetrics.functional.classification.iou import iou +from paddlemetrics.functional.classification.kl_divergence import kl_divergence +from paddlemetrics.functional.classification.matthews_corrcoef import matthews_corrcoef +from paddlemetrics.functional.classification.precision_recall import precision, precision_recall, recall +from paddlemetrics.functional.classification.precision_recall_curve import precision_recall_curve +from paddlemetrics.functional.classification.roc import roc +from paddlemetrics.functional.classification.specificity import specificity +from paddlemetrics.functional.classification.stat_scores import stat_scores +from paddlemetrics.functional.image.gradients import image_gradients +from paddlemetrics.functional.image.psnr import psnr +from paddlemetrics.functional.image.ssim import ssim +from paddlemetrics.functional.pairwise.cosine import pairwise_cosine_similarity +from paddlemetrics.functional.pairwise.euclidean import pairwise_euclidean_distance +from paddlemetrics.functional.pairwise.linear import pairwise_linear_similarity +from paddlemetrics.functional.pairwise.manhatten import pairwise_manhatten_distance +from paddlemetrics.functional.regression.cosine_similarity import cosine_similarity +from paddlemetrics.functional.regression.explained_variance import explained_variance +from paddlemetrics.functional.regression.mean_absolute_error import mean_absolute_error +from paddlemetrics.functional.regression.mean_absolute_percentage_error import mean_absolute_percentage_error +from paddlemetrics.functional.regression.mean_squared_error import mean_squared_error +from paddlemetrics.functional.regression.mean_squared_log_error import mean_squared_log_error +from paddlemetrics.functional.regression.pearson import pearson_corrcoef +from paddlemetrics.functional.regression.r2 import r2_score +from paddlemetrics.functional.regression.spearman import spearman_corrcoef +from paddlemetrics.functional.regression.symmetric_mean_absolute_percentage_error import ( + symmetric_mean_absolute_percentage_error, +) +from paddlemetrics.functional.regression.tweedie_deviance import tweedie_deviance_score +from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision +from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out +from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate +from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg +from paddlemetrics.functional.retrieval.precision import retrieval_precision +from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision +from paddlemetrics.functional.retrieval.recall import retrieval_recall +from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank +from paddlemetrics.functional.self_supervised import embedding_similarity +#from paddlemetrics.functional.text.bert import bert_score +from paddlemetrics.functional.text.bleu import bleu_score +from paddlemetrics.functional.text.rouge import rouge_score +from paddlemetrics.functional.text.sacre_bleu import sacre_bleu_score +from paddlemetrics.functional.text.wer import wer + +__all__ = [ + "accuracy", + "auc", + "auroc", + "average_precision", +# "bert_score", + "bleu_score", + "calibration_error", + "cohen_kappa", + "confusion_matrix", + "cosine_similarity", + "tweedie_deviance_score", + "dice_score", + "embedding_similarity", + "explained_variance", + "f1", + "fbeta", + "hamming_distance", + "hinge", + "image_gradients", + "iou", + "kl_divergence", + "kldivergence", + "matthews_corrcoef", + "mean_absolute_error", + "mean_absolute_percentage_error", + "mean_squared_error", + "mean_squared_log_error", + "pairwise_cosine_similarity", + "pairwise_euclidean_distance", + "pairwise_linear_similarity", + "pairwise_manhatten_distance", + "pearson_corrcoef", + "pesq", + "pit", + "pit_permutate", + "precision", + "precision_recall", + "precision_recall_curve", + "psnr", + "r2_score", + "r2score", + "recall", + "retrieval_average_precision", + "retrieval_fall_out", + "retrieval_hit_rate", + "retrieval_normalized_dcg", + "retrieval_precision", + "retrieval_r_precision", + "retrieval_recall", + "retrieval_reciprocal_rank", + "roc", + "rouge_score", + "sacre_bleu_score", + "si_sdr", + "si_snr", + "snr", + "spearman_corrcoef", + "specificity", + "ssim", + "stat_scores", + "stoi", + "symmetric_mean_absolute_percentage_error", + "wer", +] diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py new file mode 100644 index 000000000..a7e7d89c0 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py @@ -0,0 +1,19 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.audio.pesq import pesq # noqa: F401 +from paddlemetrics.functional.audio.pit import pit, pit_permutate # noqa: F401 +from paddlemetrics.functional.audio.si_sdr import si_sdr # noqa: F401 +from paddlemetrics.functional.audio.si_snr import si_snr # noqa: F401 +from paddlemetrics.functional.audio.snr import snr # noqa: F401 +from paddlemetrics.functional.audio.stoi import stoi # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py b/EE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py new file mode 100644 index 000000000..ab81723da --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py @@ -0,0 +1,100 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np + +from paddlemetrics.utilities.imports import _PESQ_AVAILABLE + +if _PESQ_AVAILABLE: + import pesq as pesq_backend +else: + pesq_backend = None +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def pesq(preds: Tensor, target: Tensor, fs: int, mode: str, keep_same_device: bool = False) -> Tensor: + r"""PESQ (Perceptual Evaluation of Speech Quality) + + This is a wrapper for the ``pesq`` package [1]. Note that input will be moved to `cpu` + to perform the metric calculation. + + .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pesq`` + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + fs: + sampling frequency, should be 16000 or 8000 (Hz) + mode: + 'wb' (wide-band) or 'nb' (narrow-band) + keep_same_device: + whether to move the pesq value to the device of preds + + Returns: + pesq value of shape [...] + + Raises: + ValueError: + If ``peqs`` package is not installed + ValueError: + If ``fs`` is not either ``8000`` or ``16000`` + ValueError: + If ``mode`` is not either ``"wb"`` or ``"nb"`` + + Example: + >>> from paddlemetrics.functional.audio import pesq + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> pesq(preds, target, 8000, 'nb') + tensor(2.2076) + >>> pesq(preds, target, 16000, 'wb') + tensor(1.7359) + + References: + [1] https://github.com/ludlows/python-pesq + """ + if not _PESQ_AVAILABLE: + raise ValueError( + "PESQ metric requires that pesq is installed." + "Either install as `pip install paddlemetrics[audio]` or `pip install pesq`" + ) + if fs not in (8000, 16000): + raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}") + if mode not in ("wb", "nb"): + raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}") + _check_same_shape(preds, target) + + if preds.ndim == 1: + pesq_val_np = pesq_backend.pesq(fs, target.detach().cpu().numpy(), preds.detach().cpu().numpy(), mode) + pesq_val = B.tensor(pesq_val_np) + else: + preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + pesq_val_np = np.empty(shape=(preds_np.shape[0])) + for b in range(preds_np.shape[0]): + pesq_val_np[b] = pesq_backend.pesq(fs, target_np[b, :], preds_np[b, :], mode) + pesq_val = B.from_numpy(pesq_val_np) + pesq_val = pesq_val.reshape(preds.shape[:-1]) + + if keep_same_device: + pesq_val = pesq_val.to(preds.device) + + return pesq_val diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/pit.py b/EE/paddlemetric/src/paddlemetrics/functional/audio/pit.py new file mode 100644 index 000000000..3ca729a2d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/audio/pit.py @@ -0,0 +1,206 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from itertools import permutations +from typing import Any, Callable, Dict, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape +from paddlemetrics.utilities.imports import _SCIPY_AVAILABLE + +# _ps_dict: cache of permutations +# it's necessary to cache it, otherwise it will consume a large amount of time +_ps_dict: dict = {} # _ps_dict[str(spk_num)+str(device)] = permutations + + +def _find_best_perm_by_linear_sum_assignment( + metric_mtx: B.Tensor, + eval_func: Union[B.min, B.max], +) -> Tuple[Tensor, Tensor]: + """Solves the linear sum assignment problem using scipy, and returns the best metric values and the + corresponding permutations. + + Args: + metric_mtx: + the metric matrix, shape [batch_size, spk_num, spk_num] + eval_func: + the function to reduce the metric values of different the permutations + + Returns: + best_metric: + shape [batch] + best_perm: + shape [batch, spk] + """ + from scipy.optimize import linear_sum_assignment + + mmtx = metric_mtx.detach().cpu() + best_perm = B.tensor([linear_sum_assignment(pwm, eval_func == B.max)[1] for pwm in mmtx]) + best_perm = best_perm.to(metric_mtx.device) + best_metric = B.gather(metric_mtx, 2, best_perm[:, :, None]).mean([-1, -2]) + return best_metric, best_perm # shape [batch], shape [batch, spk] + + +def _find_best_perm_by_exhuastive_method( + metric_mtx: B.Tensor, + eval_func: Union[B.min, B.max], +) -> Tuple[Tensor, Tensor]: + """Solves the linear sum assignment problem using exhuastive method, i.e. exhuastively calculates the metric + values of all possible permutations, and returns the best metric values and the corresponding permutations. + + Args: + metric_mtx: + the metric matrix, shape [batch_size, spk_num, spk_num] + eval_func: + the function to reduce the metric values of different the permutations + + Returns: + best_metric: + shape [batch] + best_perm: + shape [batch, spk] + """ + # create/read/cache the permutations and its indexes + # reading from cache would be much faster than creating in CPU then moving to GPU + batch_size, spk_num = metric_mtx.shape[:2] + key = str(spk_num) + str(metric_mtx.device) + if key not in _ps_dict: + # ps: all the permutations, shape [spk_num, perm_num] + # ps: In i-th permutation, the predcition corresponds to the j-th target is ps[j,i] + ps = B.tensor(list(permutations(range(spk_num))), device=metric_mtx.device).T + _ps_dict[key] = ps + else: + ps = _ps_dict[key] # all the permutations, shape [spk_num, perm_num] + + # find the metric of each permutation + perm_num = ps.shape[-1] + # shape [batch_size, spk_num, perm_num] + bps = ps[None, ...].expand(batch_size, spk_num, perm_num) + # shape [batch_size, spk_num, perm_num] + metric_of_ps_details = B.gather(metric_mtx, 2, bps) + # shape [batch_size, perm_num] + metric_of_ps = metric_of_ps_details.mean(dim=1) + + # find the best metric and best permutation + best_metric, best_indexes = eval_func(metric_of_ps, dim=1) + best_indexes = best_indexes.detach() + best_perm = ps.T[best_indexes, :] + return best_metric, best_perm # shape [batch], shape [batch, spk] + + +def pit( + preds: B.Tensor, target: B.Tensor, metric_func: Callable, eval_func: str = "max", **kwargs: Dict[str, Any] +) -> Tuple[Tensor, Tensor]: + """Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method. + + [1] in speech separation field in order to calculate audio metrics in a permutation invariant way. + + Args: + preds: + shape [batch, spk, ...] + target: + shape [batch, spk, ...] + metric_func: + a metric function accept a batch of target and estimate, + i.e. metric_func(preds[:, i, ...], target[:, j, ...]), and returns a batch of metric tensors [batch] + eval_func: + the function to find the best permutation, can be 'min' or 'max', + i.e. the smaller the better or the larger the better. + kwargs: + additional args for metric_func + + Returns: + best_metric of shape [batch], + best_perm of shape [batch] + + Example: + >>> from paddlemetrics.functional.audio import si_sdr + >>> # [batch, spk, time] + >>> preds = B.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]]) + >>> target = B.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]]) + >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max') + >>> best_metric + tensor([-5.1091]) + >>> best_perm + tensor([[0, 1]]) + >>> pit_permutate(preds, best_perm) + tensor([[[-0.0579, 0.3560, -0.9604], + [-0.1719, 0.3205, 0.2951]]]) + + Reference: + [1] `Permutation Invariant Training of Deep Models`_ + """ + _check_same_shape(preds, target) + if eval_func not in ["max", "min"]: + raise ValueError(f'eval_func can only be "max" or "min" but got {eval_func}') + if target.ndim < 2: + raise ValueError(f"Inputs must be of shape [batch, spk, ...], got {target.shape} and {preds.shape} instead") + + # calculate the metric matrix + batch_size, spk_num = target.shape[0:2] + metric_mtx = None + for target_idx in range(spk_num): # we have spk_num speeches in target in each sample + for preds_idx in range(spk_num): # we have spk_num speeches in preds in each sample + if metric_mtx is not None: + metric_mtx[:, target_idx, preds_idx] = metric_func( + preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs + ) + else: + first_ele = metric_func(preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs) + metric_mtx = B.empty((batch_size, spk_num, spk_num), dtype=first_ele.dtype, device=first_ele.device) + metric_mtx[:, target_idx, preds_idx] = first_ele + + # find best + op = B.max if eval_func == "max" else B.min + if spk_num < 3 or not _SCIPY_AVAILABLE: + if spk_num >= 3 and not _SCIPY_AVAILABLE: + warnings.warn( + f"In pit metric for speaker-num {spk_num}>3, we recommend installing scipy for better performance" + ) + + best_metric, best_perm = _find_best_perm_by_exhuastive_method(metric_mtx, op) + else: + best_metric, best_perm = _find_best_perm_by_linear_sum_assignment(metric_mtx, op) + + return best_metric, best_perm + + +def pit_permutate(preds: Tensor, perm: Tensor) -> Tensor: + """permutate estimate according to perm. + + Args: + preds (Tensor): the estimates you want to permutate, shape [batch, spk, ...] + perm (Tensor): the permutation returned from pit, shape [batch, spk] + + Returns: + Tensor: the permutated version of estimate + + Example: + >>> from paddlemetrics.functional.audio import si_sdr + >>> # [batch, spk, time] + >>> preds = B.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]]) + >>> target = B.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]]) + >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max') + >>> best_metric + tensor([-5.1091]) + >>> best_perm + tensor([[0, 1]]) + >>> pit_permutate(preds, best_perm) + tensor([[[-0.0579, 0.3560, -0.9604], + [-0.1719, 0.3205, 0.2951]]]) + """ + preds_pmted = B.stack([B.index_select(pred, 0, p) for pred, p in zip(preds, perm)]) + return preds_pmted diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py b/EE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py new file mode 100644 index 000000000..66eb9e3ae --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py @@ -0,0 +1,64 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def si_sdr(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: + """Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general + considered an overall measure of how good a source sound. + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + zero_mean: + If to zero mean target and preds or not + + Returns: + si-sdr value of shape [...] + + Example: + >>> from paddlemetrics.functional.audio import si_sdr + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_sdr_val = si_sdr(preds, target) + >>> si_sdr_val + tensor(18.4030) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + """ + _check_same_shape(preds, target) + EPS = B.finfo(preds.dtype).eps + + if zero_mean: + target = target - B.mean(target, dim=-1, keepdim=True) + preds = preds - B.mean(preds, dim=-1, keepdim=True) + + alpha = (B.sum(preds * target, dim=-1, keepdim=True) + EPS) / ( + B.sum(target ** 2, dim=-1, keepdim=True) + EPS + ) + target_scaled = alpha * target + + noise = target_scaled - preds + + si_sdr_value = (B.sum(target_scaled ** 2, dim=-1) + EPS) / (B.sum(noise ** 2, dim=-1) + EPS) + si_sdr_value = 10 * B.log10(si_sdr_value) + + return si_sdr_value diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py b/EE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py new file mode 100644 index 000000000..abddf039f --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py @@ -0,0 +1,46 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.audio.si_sdr import si_sdr + + +def si_snr(preds: Tensor, target: Tensor) -> Tensor: + """Scale-invariant signal-to-noise ratio (SI-SNR). + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + + Returns: + si-snr value of shape [...] + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional.audio import si_snr + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> si_snr_val = si_snr(preds, target) + >>> si_snr_val + tensor(15.0918) + + References: + [1] Y. Luo and N. Mesgarani, "TaSNet: Time-Domain Audio Separation Network for Real-Time, Single-Channel Speech + Separation," 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp. + 696-700, doi: 10.1109/ICASSP.2018.8462116. + """ + + return si_sdr(target=target, preds=preds, zero_mean=True) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/snr.py b/EE/paddlemetric/src/paddlemetrics/functional/audio/snr.py new file mode 100644 index 000000000..8c54128ba --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/audio/snr.py @@ -0,0 +1,66 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def snr(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: + r"""Signal-to-noise ratio (SNR_): + + .. math:: + \text{SNR} = \frac{P_{signal}}{P_{noise}} + + where :math:`P` denotes the power of each signal. The SNR metric compares the level + of the desired signal to the level of background noise. Therefore, a high value of + SNR means that the audio is clear. + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + zero_mean: + if to zero mean target and preds or not + + Returns: + snr value of shape [...] + + Example: + >>> from paddlemetrics.functional.audio import snr + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> snr_val = snr(preds, target) + >>> snr_val + tensor(16.1805) + + References: + [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP) 2019. + + """ + _check_same_shape(preds, target) + EPS = B.finfo(preds.dtype).eps + + if zero_mean: + target = target - B.mean(target, dim=-1, keepdim=True) + preds = preds - B.mean(preds, dim=-1, keepdim=True) + + noise = target - preds + + snr_value = (B.sum(target ** 2, dim=-1) + EPS) / (B.sum(noise ** 2, dim=-1) + EPS) + snr_value = 10 * B.log10(snr_value) + + return snr_value diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py b/EE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py new file mode 100644 index 000000000..4c1f5806d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import paddleext.torchapi as B + +from paddlemetrics.utilities.imports import _PYSTOI_AVAILABLE + +if _PYSTOI_AVAILABLE: + from pystoi import stoi as stoi_backend +else: + stoi_backend = None +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def stoi(preds: Tensor, target: Tensor, fs: int, extended: bool = False, keep_same_device: bool = False) -> Tensor: + r"""STOI (Short Term Objective Intelligibility, see [2,3]), a wrapper for the pystoi package [1]. + Note that input will be moved to `cpu` to perform the metric calculation. + + Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due + to additive noise, single/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations. + The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good + alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are + interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, + on speech intelligibility. Description taken from [Cees Taal's website](http://www.ceestaal.nl/code/). + + .. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install + paddlemetrics[audio]`` or ``pip install pystoi`` + + Args: + preds: + shape ``[...,time]`` + target: + shape ``[...,time]`` + fs: + sampling frequency (Hz) + extended: + whether to use the extended STOI described in [4] + keep_same_device: + whether to move the stoi value to the device of preds + + Returns: + stoi value of shape [...] + + Raises: + ValueError: + If ``pystoi`` package is not installed + + Example: + >>> from paddlemetrics.functional.audio import stoi + >>> import torchapi as B + >>> g = B.manual_seed(1) + >>> preds = B.randn(8000) + >>> target = B.randn(8000) + >>> stoi(preds, target, 8000).float() + tensor(-0.0100) + + References: + [1] https://github.com/mpariente/pystoi + + [2] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'A Short-Time Objective Intelligibility Measure for + Time-Frequency Weighted Noisy Speech', ICASSP 2010, Texas, Dallas. + + [3] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'An Algorithm for Intelligibility Prediction of + Time-Frequency Weighted Noisy Speech', IEEE Transactions on Audio, Speech, and Language Processing, 2011. + + [4] J. Jensen and C. H. Taal, 'An Algorithm for Predicting the Intelligibility of Speech Masked by Modulated + Noise Maskers', IEEE Transactions on Audio, Speech and Language Processing, 2016. + + """ + if not _PYSTOI_AVAILABLE: + raise ValueError( + "STOI metric requires that pystoi is installed." + "Either install as `pip install paddlemetrics[audio]` or `pip install pystoi`" + ) + _check_same_shape(preds, target) + + if len(preds.shape) == 1: + stoi_val_np = stoi_backend(target.detach().cpu().numpy(), preds.detach().cpu().numpy(), fs, extended) + stoi_val = B.tensor(stoi_val_np) + else: + preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy() + stoi_val_np = np.empty(shape=(preds_np.shape[0])) + for b in range(preds_np.shape[0]): + stoi_val_np[b] = stoi_backend(target_np[b, :], preds_np[b, :], fs, extended) + stoi_val = B.from_numpy(stoi_val_np) + stoi_val = stoi_val.reshape(preds.shape[:-1]) + + if keep_same_device: + stoi_val = stoi_val.to(preds.device) + + return stoi_val diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py new file mode 100644 index 000000000..a03982c8c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py @@ -0,0 +1,32 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.classification.accuracy import accuracy # noqa: F401 +from paddlemetrics.functional.classification.auc import auc # noqa: F401 +from paddlemetrics.functional.classification.auroc import auroc # noqa: F401 +from paddlemetrics.functional.classification.average_precision import average_precision # noqa: F401 +from paddlemetrics.functional.classification.calibration_error import calibration_error # noqa: F401 +from paddlemetrics.functional.classification.cohen_kappa import cohen_kappa # noqa: F401 +from paddlemetrics.functional.classification.confusion_matrix import confusion_matrix # noqa: F401 +from paddlemetrics.functional.classification.dice import dice_score # noqa: F401 +from paddlemetrics.functional.classification.f_beta import f1, fbeta # noqa: F401 +from paddlemetrics.functional.classification.hamming_distance import hamming_distance # noqa: F401 +from paddlemetrics.functional.classification.hinge import hinge # noqa: F401 +from paddlemetrics.functional.classification.iou import iou # noqa: F401 +from paddlemetrics.functional.classification.kl_divergence import kl_divergence # noqa: F401 +from paddlemetrics.functional.classification.matthews_corrcoef import matthews_corrcoef # noqa: F401 +from paddlemetrics.functional.classification.precision_recall import precision, precision_recall, recall # noqa: F401 +from paddlemetrics.functional.classification.precision_recall_curve import precision_recall_curve # noqa: F401 +from paddlemetrics.functional.classification.roc import roc # noqa: F401 +from paddlemetrics.functional.classification.specificity import specificity # noqa: F401 +from paddlemetrics.functional.classification.stat_scores import stat_scores # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py new file mode 100644 index 000000000..44c89fa92 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py @@ -0,0 +1,418 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.checks import _check_classification_inputs, _input_format_classification, _input_squeeze +from paddlemetrics.utilities.enums import AverageMethod, DataType, MDMCAverageMethod + + +def _check_subset_validity(mode: DataType) -> bool: + """Checks input mode is valid.""" + return mode in (DataType.MULTILABEL, DataType.MULTIDIM_MULTICLASS) + + +def _mode( + preds: Tensor, + target: Tensor, + threshold: float, + top_k: Optional[int], + num_classes: Optional[int], + multiclass: Optional[bool], +) -> DataType: + """Finds the mode of the input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the + case of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. + num_classes: Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. + + Example: + >>> target = B.tensor([0, 1, 2, 3]) + >>> preds = B.tensor([0, 2, 1, 3]) + >>> _mode(preds, target, 0.5, None, None, None) + + """ + + mode = _check_classification_inputs( + preds, target, threshold=threshold, top_k=top_k, num_classes=num_classes, multiclass=multiclass + ) + return mode + + +def _accuracy_update( + preds: Tensor, + target: Tensor, + reduce: Optional[str], + mdmc_reduce: Optional[str], + threshold: float, + num_classes: Optional[int], + top_k: Optional[int], + multiclass: Optional[bool], + ignore_index: Optional[int], + mode: DataType, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns stat scores (true positives, false positives, true negatives, false negatives) required + to compute accuracy. + + Args: + preds: Predicted tensor + target: Ground truth tensor + reduce: Defines the reduction that is applied. + mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handeled. + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in + the case of binary or multi-label inputs. + num_classes: Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. + multiclass: Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. + ignore_index: Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + mode: Mode of the input tensors + """ + + if mode == DataType.MULTILABEL and top_k: + raise ValueError("You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.") + + preds, target = _input_squeeze(preds, target) + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_reduce, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + return tp, fp, tn, fn + + +def _accuracy_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: Optional[str], + mdmc_average: Optional[str], + mode: DataType, +) -> Tensor: + """Computes accuracy from stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + average: Defines the reduction that is applied. + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). + mode: Mode of the input tensors + + Example: + >>> preds = B.tensor([0, 2, 1, 3]) + >>> target = B.tensor([0, 1, 2, 3]) + >>> threshold = 0.5 + >>> reduce = average = 'micro' + >>> mdmc_average = 'global' + >>> mode = _mode(preds, target, threshold, top_k=None, num_classes=None, multiclass=None) + >>> tp, fp, tn, fn = _accuracy_update( + ... preds, + ... target, + ... reduce, + ... mdmc_average, + ... threshold=0.5, + ... num_classes=None, + ... top_k=None, + ... multiclass=None, + ... ignore_index=None, + ... mode=mode) + >>> _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) + tensor(0.5000) + + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> top_k, threshold = 2, 0.5 + >>> reduce = average = 'micro' + >>> mdmc_average = 'global' + >>> mode = _mode(preds, target, threshold, top_k, num_classes=None, multiclass=None) + >>> tp, fp, tn, fn = _accuracy_update( + ... preds, + ... target, + ... reduce, + ... mdmc_average, + ... threshold, + ... num_classes=None, + ... top_k=top_k, + ... multiclass=None, + ... ignore_index=None, + ... mode=mode) + >>> _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) + tensor(0.6667) + """ + + simple_average = [AverageMethod.MICRO, AverageMethod.SAMPLES] + if (mode == DataType.BINARY and average in simple_average) or mode == DataType.MULTILABEL: + numerator = tp + tn + denominator = tp + tn + fp + fn + else: + numerator = tp + denominator = tp + fn + + if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = tp + fp + fn == 0 + numerator = numerator[~cond] + denominator = denominator[~cond] + + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != AverageMethod.WEIGHTED else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def _subset_accuracy_update( + preds: Tensor, + target: Tensor, + threshold: float, + top_k: Optional[int], +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute subset accuracy. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. + """ + + preds, target = _input_squeeze(preds, target) + preds, target, mode = _input_format_classification(preds, target, threshold=threshold, top_k=top_k) + + if mode == DataType.MULTILABEL and top_k: + raise ValueError("You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.") + + if mode == DataType.MULTILABEL: + correct = (preds == target).all(dim=1).sum() + total = tensor(target.shape[0], device=target.device) + elif mode == DataType.MULTICLASS: + correct = (preds * target).sum() + total = target.sum() + elif mode == DataType.MULTIDIM_MULTICLASS: + sample_correct = (preds * target).sum(dim=(1, 2)) + correct = (sample_correct == target.shape[2]).sum() + total = tensor(target.shape[0], device=target.device) + else: + correct, total = tensor(0), tensor(0) + + return correct, total + + +def _subset_accuracy_compute(correct: Tensor, total: Tensor) -> Tensor: + """Computes subset accuracy from number of correct observations and total number of observations. + + Args: + correct: Number of correct observations + total: Number of observations + """ + + return correct.float() / total + + +def accuracy( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = "global", + threshold: float = 0.5, + top_k: Optional[int] = None, + subset_accuracy: bool = False, + num_classes: Optional[int] = None, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, +) -> Tensor: + r"""Computes `Accuracy`_ + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + For multi-class and multi-dimensional multi-class data with probability or logits predictions, the + parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the + top-K highest probability or logits items are considered to find the correct label. + + For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" + accuracy by default, which counts all labels or sub-samples separately. This can be + changed to subset accuracy (which requires all labels or sub-samples in the sample to + be correctly predicted) by setting ``subset_accuracy=True``. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth labels + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + subset_accuracy: + Whether to compute subset accuracy for multi-label and multi-dimensional + multi-class inputs (has no effect for other input types). + + - For multi-label inputs, if the parameter is set to ``True``, then all labels for + each sample must be correctly predicted for the sample to count as correct. If it + is set to ``False``, then all labels are counted separately - this is equivalent to + flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). + + - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all + sub-sample (on the extra axis) must be correct for the sample to be counted as correct. + If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, + in the case of label predictions, to flattening the inputs beforehand (i.e. + ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter + still applies in both cases, if set. + + Raises: + ValueError: + If ``top_k`` parameter is set for ``multi-label`` inputs. + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + ValueError: + If ``top_k`` is not an ``integer`` larger than ``0``. + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import accuracy + >>> target = B.tensor([0, 1, 2, 3]) + >>> preds = B.tensor([0, 2, 1, 3]) + >>> accuracy(preds, target) + tensor(0.5000) + + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> accuracy(preds, target, top_k=2) + tensor(0.6667) + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): + raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") + + preds, target = _input_squeeze(preds, target) + mode = _mode(preds, target, threshold, top_k, num_classes, multiclass) + reduce = "macro" if average in ["weighted", "none", None] else average + + if subset_accuracy and _check_subset_validity(mode): + correct, total = _subset_accuracy_update(preds, target, threshold, top_k) + return _subset_accuracy_compute(correct, total) + tp, fp, tn, fn = _accuracy_update( + preds, target, reduce, mdmc_average, threshold, num_classes, top_k, multiclass, ignore_index, mode + ) + return _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/auc.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/auc.py new file mode 100644 index 000000000..0e2fddb3d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/auc.py @@ -0,0 +1,133 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + + +def _auc_update(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute area under the curve. Checks if the 2 input tenseor have + the same number of elements and if they are 1d. + + Args: + x: x-coordinates + y: y-coordinates + """ + + if x.ndim > 1: + x = x.squeeze() + + if y.ndim > 1: + y = y.squeeze() + + if x.ndim > 1 or y.ndim > 1: + raise ValueError( + f"Expected both `x` and `y` tensor to be 1d, but got tensors with dimension {x.ndim} and {y.ndim}" + ) + if x.numel() != y.numel(): + raise ValueError( + f"Expected the same number of elements in `x` and `y` tensor but received {x.numel()} and {y.numel()}" + ) + return x, y + + +def _auc_compute_without_check(x: Tensor, y: Tensor, direction: float) -> Tensor: + """Computes area under the curve using the trapezoidal rule. Assumes increasing or decreasing order of `x`. + + Args: + x: x-coordinates, must be either increasing or decreasing + y: y-coordinates + direction: 1 if increaing, -1 if decreasing + + Example: + >>> x = B.tensor([0, 1, 2, 3]) + >>> y = B.tensor([0, 1, 2, 2]) + >>> x, y = _auc_update(x, y) + >>> _auc_compute_without_check(x, y, direction=1.0) + tensor(4.) + """ + + with B.no_grad(): + auc_: Tensor = B.trapz(y, x) * direction + return auc_ + + +def _auc_compute(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor: + """Computes area under the curve using the trapezoidal rule. Checks for increasing or decreasing order of `x`. + + Args: + x: x-coordinates, must be either increasing or decreasing + y: y-coordinates + reorder: if True, will reorder the arrays to make it either increasing or decreasing + + Example: + >>> x = B.tensor([0, 1, 2, 3]) + >>> y = B.tensor([0, 1, 2, 2]) + >>> x, y = _auc_update(x, y) + >>> _auc_compute(x, y) + tensor(4.) + >>> _auc_compute(x, y, reorder=True) + tensor(4.) + """ + + with B.no_grad(): + if reorder: + # TODO: include stable=True arg when pytorch v1.9 is released + x, x_idx = B.sort(x) + y = y[x_idx] + + dx = x[1:] - x[:-1] + if (dx < 0).any(): + if (dx <= 0).all(): + direction = -1.0 + else: + raise ValueError( + "The `x` tensor is neither increasing or decreasing. Try setting the reorder argument to `True`." + ) + else: + direction = 1.0 + return _auc_compute_without_check(x, y, direction) + + +def auc(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor: + """Computes Area Under the Curve (AUC) using the trapezoidal rule. + + Args: + x: x-coordinates, must be either increasing or decreasing + y: y-coordinates + reorder: if True, will reorder the arrays to make it either increasing or decreasing + + Return: + Tensor containing AUC score (float) + + Raises: + ValueError: + If both ``x`` and ``y`` tensors are not ``1d``. + ValueError: + If both ``x`` and ``y`` don't have the same numnber of elements. + ValueError: + If ``x`` tesnsor is neither increasing or decreasing. + + Example: + >>> from paddlemetrics.functional import auc + >>> x = B.tensor([0, 1, 2, 3]) + >>> y = B.tensor([0, 1, 2, 2]) + >>> auc(x, y) + tensor(4.) + >>> auc(x, y, reorder=True) + tensor(4.) + """ + x, y = _auc_update(x, y) + return _auc_compute(x, y, reorder=reorder) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py new file mode 100644 index 000000000..a393f20e5 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py @@ -0,0 +1,257 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Optional, Sequence, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.classification.auc import _auc_compute_without_check +from paddlemetrics.functional.classification.roc import roc +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod, DataType +from paddlemetrics.utilities.imports import _TORCH_LOWER_1_6 + + +def _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]: + """Updates and returns variables required to compute Area Under the Receiver Operating Characteristic Curve. + Validates the inputs and returns the mode of the inputs. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + # use _input_format_classification for validating the input and get the mode of data + _, _, mode = _input_format_classification(preds, target) + + if mode == "multi class multi dim": + n_classes = preds.shape[1] + preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) + target = target.flatten() + if mode == "multi-label" and preds.ndim > 2: + n_classes = preds.shape[1] + preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) + target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) + + return preds, target, mode + + +def _auroc_compute( + preds: Tensor, + target: Tensor, + mode: DataType, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + max_fpr: Optional[float] = None, + sample_weights: Optional[Sequence] = None, +) -> Tensor: + """Computes Area Under the Receiver Operating Characteristic Curve. + + Args: + preds: predictions from model (logits or probabilities) + target: Ground truth labels + mode: 'multi class multi dim' or 'multi-label' or 'binary' + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. + Should be set to ``None`` for binary problems + average: Defines the reduction that is applied to the output: + max_fpr: If not ``None``, calculates standardized partial AUC over the + range [0, max_fpr]. Should be a float between 0 and 1. + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = B.tensor([0, 0, 1, 1, 1]) + >>> preds, target, mode = _auroc_update(preds, target) + >>> _auroc_compute(preds, target, mode, pos_label=1) + tensor(0.5000) + + >>> # multiclass case + >>> preds = B.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = B.tensor([0, 1, 1, 2, 2]) + >>> preds, target, mode = _auroc_update(preds, target) + >>> _auroc_compute(preds, target, mode, num_classes=3) + tensor(0.7778) + """ + + # binary mode override num_classes + if mode == DataType.BINARY: + num_classes = 1 + + # check max_fpr parameter + if max_fpr is not None: + if not isinstance(max_fpr, float) and 0 < max_fpr <= 1: + raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}") + + if _TORCH_LOWER_1_6: + raise RuntimeError( + "`max_fpr` argument requires `B.bucketize` which" " is not available below PyTorch version 1.6" + ) + + # max_fpr parameter is only support for binary + if mode != DataType.BINARY: + raise ValueError( + f"Partial AUC computation not available in" + f" multilabel/multiclass setting, 'max_fpr' must be" + f" set to `None`, received `{max_fpr}`." + ) + + # calculate fpr, tpr + if mode == DataType.MULTILABEL: + if average == AverageMethod.MICRO: + fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights) + elif num_classes: + # for multilabel we iteratively evaluate roc in a binary fashion + output = [ + roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights) + for i in range(num_classes) + ] + fpr = [o[0] for o in output] + tpr = [o[1] for o in output] + else: + raise ValueError("Detected input to be `multilabel` but you did not provide `num_classes` argument") + else: + if mode != DataType.BINARY: + if num_classes is None: + raise ValueError("Detected input to `multiclass` but you did not provide `num_classes` argument") + if average == AverageMethod.WEIGHTED and len(B.unique(target)) < num_classes: + # If one or more classes has 0 observations, we should exclude them, as its weight will be 0 + target_bool_mat = B.zeros((len(target), num_classes), dtype=bool) + target_bool_mat[B.arange(len(target)), target.long()] = 1 + class_observed = target_bool_mat.sum(axis=0) > 0 + for c in range(num_classes): + if not class_observed[c]: + warnings.warn(f"Class {c} had 0 observations, omitted from AUROC calculation", UserWarning) + preds = preds[:, class_observed] + target = target_bool_mat[:, class_observed] + target = B.where(target)[1] + num_classes = class_observed.sum() + if num_classes == 1: + raise ValueError("Found 1 non-empty class in `multiclass` AUROC calculation") + fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights) + + # calculate standard roc auc score + if max_fpr is None or max_fpr == 1: + if mode == DataType.MULTILABEL and average == AverageMethod.MICRO: + pass + elif num_classes != 1: + # calculate auc scores per class + auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)] + + # calculate average + if average == AverageMethod.NONE: + return tensor(auc_scores) + if average == AverageMethod.MACRO: + return B.mean(B.stack(auc_scores)) + if average == AverageMethod.WEIGHTED: + if mode == DataType.MULTILABEL: + support = B.sum(target, dim=0) + else: + support = B.bincount(target.flatten(), minlength=num_classes) + return B.sum(B.stack(auc_scores) * support / support.sum()) + + allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value) + raise ValueError( + f"Argument `average` expected to be one of the following:" f" {allowed_average} but got {average}" + ) + + return _auc_compute_without_check(fpr, tpr, 1.0) + + _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device + max_area: Tensor = tensor(max_fpr, device=_device) + # Add a single point at max_fpr and interpolate its tpr value + stop = B.bucketize(max_area, fpr, out_int32=True, right=True) + weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1]) + interp_tpr: Tensor = B.lerp(tpr[stop - 1], tpr[stop], weight) + tpr = B.cat([tpr[:stop], interp_tpr.view(1)]) + fpr = B.cat([fpr[:stop], max_area.view(1)]) + + # Compute partial AUC + partial_auc = _auc_compute_without_check(fpr, tpr, 1.0) + + # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal + min_area: Tensor = 0.5 * max_area ** 2 + return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) + + +def auroc( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + max_fpr: Optional[float] = None, + sample_weights: Optional[Sequence] = None, +) -> Tensor: + """Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) + + Args: + preds: predictions from model (logits or probabilities) + target: Ground truth labels + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + - ``'micro'`` computes metric globally. Only works for multilabel problems + - ``'macro'`` computes metric for each class and uniformly averages them + - ``'weighted'`` computes metric for each class and does a weighted-average, + where each class is weighted by their support (accounts for class imbalance) + - ``None`` computes and returns the metric per class + max_fpr: + If not ``None``, calculates standardized partial AUC over the + range [0, max_fpr]. Should be a float between 0 and 1. + sample_weights: sample weights for each data point + + Raises: + ValueError: + If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``. + RuntimeError: + If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `B.bucketize` + which is not available below 1.6. + ValueError: + If ``max_fpr`` is not set to ``None`` and the mode is ``not binary`` + since partial AUC computation is not available in multilabel/multiclass. + ValueError: + If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``. + + Example (binary case): + >>> from paddlemetrics.functional import auroc + >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = B.tensor([0, 0, 1, 1, 1]) + >>> auroc(preds, target, pos_label=1) + tensor(0.5000) + + Example (multiclass case): + >>> preds = B.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = B.tensor([0, 1, 1, 2, 2]) + >>> auroc(preds, target, num_classes=3) + tensor(0.7778) + """ + preds, target, mode = _auroc_update(preds, target) + return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py new file mode 100644 index 000000000..bc6118168 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py @@ -0,0 +1,236 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import List, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.precision_recall_curve import ( + _precision_recall_curve_compute, + _precision_recall_curve_update, +) + + +def _average_precision_update( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", +) -> Tuple[Tensor, Tensor, int, Optional[int]]: + """Format the predictions and target based on the ``num_classes``, ``pos_label`` and ``average`` parameter + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: reduction method for multi-class or multi-label problems + """ + preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label) + if average == "micro": + if preds.ndim == target.ndim: + # Considering each element of the label indicator matrix as a label + preds = preds.flatten() + target = target.flatten() + num_classes = 1 + else: + raise ValueError("Cannot use `micro` average with multi-class input") + + return preds, target, num_classes, pos_label + + +def _average_precision_compute( + preds: Tensor, + target: Tensor, + num_classes: int, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + sample_weights: Optional[Sequence] = None, +) -> Union[List[Tensor], Tensor]: + """Computes the average precision score. + + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: reduction method for multi-class or multi-label problems + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label) + >>> _average_precision_compute(preds, target, num_classes, pos_label) + tensor(1.) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 5 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes) + >>> _average_precision_compute(preds, target, num_classes, average=None) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + + # todo: `sample_weights` is unused + precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label) + if average == "weighted": + if preds.ndim == target.ndim and target.ndim > 1: + weights = target.sum(dim=0).float() + else: + weights = B.bincount(target, minlength=num_classes).float() + weights = weights / B.sum(weights) + else: + weights = None + return _average_precision_compute_with_precision_recall(precision, recall, num_classes, average, weights) + + +def _average_precision_compute_with_precision_recall( + precision: Tensor, + recall: Tensor, + num_classes: int, + average: Optional[str] = "macro", + weights: Optional[Tensor] = None, +) -> Union[List[Tensor], Tensor]: + """Computes the average precision score from precision and recall. + + Args: + precision: precision values + recall: recall values + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + average: reduction method for multi-class or multi-label problems + weights: weights to use when average='weighted' + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label) + >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label) + >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None) + tensor(1.) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 5 + >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes) + >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes) + >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + + # Return the step function integral + # The following works because the last entry of precision is + # guaranteed to be 1, as returned by precision_recall_curve + if num_classes == 1: + return -B.sum((recall[1:] - recall[:-1]) * precision[:-1]) + + res = [] + for p, r in zip(precision, recall): + res.append(-B.sum((r[1:] - r[:-1]) * p[:-1])) + + # Reduce + if average in ("macro", "weighted"): + res = B.stack(res) + if B.isnan(res).any(): + warnings.warn( + "Average precision score for one or more classes was `nan`. Ignoring these classes in average", + UserWarning, + ) + if average == "macro": + return res[~B.isnan(res)].mean() + weights = B.ones_like(res) if weights is None else weights + return (res * weights)[~B.isnan(res)].sum() + if average is None: + return res + allowed_average = ("micro", "macro", "weighted", None) + raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}") + + +def average_precision( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + average: Optional[str] = "macro", + sample_weights: Optional[Sequence] = None, +) -> Union[List[Tensor], Tensor]: + """Computes the average precision score. + + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes. Not nessesary to provide + for binary problems. + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + average: + defines the reduction that is applied in the case of multiclass and multilabel input. + Should be one of the following: + + - ``'macro'`` [default]: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be + used with multiclass input. + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support. + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + + sample_weights: sample weights for each data point + + Returns: + tensor with average precision. If multiclass will return list + of such tensors, one for each class + + Example (binary case): + >>> from paddlemetrics.functional import average_precision + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> average_precision(pred, target, pos_label=1) + tensor(1.) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> average_precision(pred, target, num_classes=5, average=None) + [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] + """ + # fixme: `sample_weights` is unused + preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes, pos_label, average) + return _average_precision_compute(preds, target, num_classes, pos_label, average, sample_weights) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py new file mode 100644 index 000000000..132036417 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py @@ -0,0 +1,156 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import FloatTensor, Tensor + +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import DataType + + +def _ce_compute( + confidences: FloatTensor, + accuracies: FloatTensor, + bin_boundaries: FloatTensor, + norm: str = "l1", + debias: bool = False, +) -> Tensor: + """Computes the calibration error given the provided bin boundaries and norm. + + Args: + confidences (FloatTensor): The confidence (i.e. predicted prob) of the top1 prediction. + accuracies (FloatTensor): 1.0 if the top-1 prediction was correct, 0.0 otherwise. + bin_boundaries (FloatTensor): Bin boundaries separating the linspace from 0 to 1. + norm (str, optional): Norm function to use when computing calibration error. Defaults to "l1". + debias (bool, optional): Apply debiasing to L2 norm computation as in + `Verified Uncertainty Calibration`_. Defaults to False. + + Raises: + ValueError: If an unsupported norm function is provided. + + Returns: + Tensor: Calibration error scalar. + """ + if norm not in {"l1", "l2", "max"}: + raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") + + conf_bin = B.zeros_like(bin_boundaries) + acc_bin = B.zeros_like(bin_boundaries) + prop_bin = B.zeros_like(bin_boundaries) + for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])): + # Calculated confidence and accuracy in each bin + in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) + prop_in_bin = in_bin.float().mean() + if prop_in_bin.item() > 0: + acc_bin[i] = accuracies[in_bin].float().mean() + conf_bin[i] = confidences[in_bin].mean() + prop_bin[i] = prop_in_bin + + if norm == "l1": + ce = B.sum(B.abs(acc_bin - conf_bin) * prop_bin) + elif norm == "max": + ce = B.max(B.abs(acc_bin - conf_bin)) + elif norm == "l2": + ce = B.sum(B.pow(acc_bin - conf_bin, 2) * prop_bin) + # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn. + if debias: + # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from + # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/ + debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1) + ce += B.sum(B.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin + ce = B.sqrt(ce) if ce > 0 else B.tensor(0) + return ce + + +def _ce_update(preds: Tensor, target: Tensor) -> Tuple[FloatTensor, FloatTensor]: + """Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their + correctness. + + Args: + preds (Tensor): Input softmaxed predictions. + target (Tensor): Labels. + + Raises: + ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass. + + Returns: + Tuple[FloatTensor, FloatTensor]: [description] + """ + _, _, mode = _input_format_classification(preds, target) + + if mode == DataType.BINARY: + confidences, accuracies = preds, target + elif mode == DataType.MULTICLASS: + confidences, predictions = preds.max(dim=1) + accuracies = predictions.eq(target) + elif mode == DataType.MULTIDIM_MULTICLASS: + # reshape tensors + # for preds, move the class dimension to the final axis and flatten the rest + confidences, predictions = B.transpose(preds, 1, -1).flatten(0, -2).max(dim=1) + # for targets, just flatten the target + accuracies = predictions.eq(target.flatten()) + else: + raise ValueError( + f"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}." + ) + # must be cast to float for ddp allgather to work + return confidences.float(), accuracies.float() + + +def calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = "l1") -> Tensor: + r""" + `Computes the Top-label Calibration Error`_ + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + L1 norm (Expected Calibration Error) + + .. math:: + \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\| + + Infinity norm (Maximum Calibration Error) + + .. math:: + \text{RMSCE} = \max_{i} (p_i - c_i) + + L2 norm (Root Mean Square Calibration Error) + + .. math:: + \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2 + + Where :math:`p_i` is the top-1 prediction accuracy in + bin i and :math:`c_i` is the average confidence of predictions in bin i. + + .. note: + L2-norm debiasing is not yet supported. + + Args: + preds (Tensor): Model output probabilities. + target (Tensor): Ground-truth target class labels. + n_bins (int, optional): Number of bins to use when computing t. Defaults to 15. + norm (str, optional): Norm used to compare empirical and expected probability bins. + Defaults to "l1", or Expected Calibration Error. + """ + if norm not in ("l1", "l2", "max"): + raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") + + if not isinstance(n_bins, int) or n_bins <= 0: + raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}") + + confidences, accuracies = _ce_update(preds, target) + + bin_boundaries = B.linspace(0, 1, n_bins + 1, dtype=B.float, device=preds.device) + + return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py new file mode 100644 index 000000000..2face7a5c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py @@ -0,0 +1,112 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_compute, _confusion_matrix_update + +_cohen_kappa_update = _confusion_matrix_update + + +def _cohen_kappa_compute(confmat: Tensor, weights: Optional[str] = None) -> Tensor: + """Computes Cohen's kappa based on the weighting type. + + Args: + confmat: Confusion matrix without normalization + weights: Weighting type to calculate the score. Choose from + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + Example: + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = _cohen_kappa_update(preds, target, num_classes=2) + >>> _cohen_kappa_compute(confmat) + tensor(0.5000) + """ + + confmat = _confusion_matrix_compute(confmat) + confmat = confmat.float() if not confmat.is_floating_point() else confmat + n_classes = confmat.shape[0] + sum0 = confmat.sum(dim=0, keepdim=True) + sum1 = confmat.sum(dim=1, keepdim=True) + expected = sum1 @ sum0 / sum0.sum() # outer product + + if weights is None: + w_mat = B.ones_like(confmat).flatten() + w_mat[:: n_classes + 1] = 0 + w_mat = w_mat.reshape(n_classes, n_classes) + elif weights in ("linear", "quadratic"): + w_mat = B.zeros_like(confmat) + w_mat += B.arange(n_classes, dtype=w_mat.dtype, device=w_mat.device) + if weights == "linear": + w_mat = B.abs(w_mat - w_mat.T) + else: + w_mat = B.pow(w_mat - w_mat.T, 2.0) + else: + raise ValueError( + f"Received {weights} for argument ``weights`` but should be either" " None, 'linear' or 'quadratic'" + ) + + k = B.sum(w_mat * confmat) / B.sum(w_mat * expected) + return 1 - k + + +def cohen_kappa( + preds: Tensor, + target: Tensor, + num_classes: int, + weights: Optional[str] = None, + threshold: float = 0.5, +) -> Tensor: + r""" + Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. + It is defined as + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` isg + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + Args: + preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or + ``(N, C, ...)`` where C is the number of classes, tensor with labels/probabilities + + target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels + + num_classes: Number of classes in the dataset. + + weights: Weighting type to calculate the score. Choose from + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + threshold: + Threshold value for binary or multi-label probabilities. default: 0.5 + + Example: + >>> from paddlemetrics.functional import cohen_kappa + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> cohen_kappa(preds, target, num_classes=2) + tensor(0.5000) + """ + confmat = _cohen_kappa_update(preds, target, num_classes, threshold) + return _cohen_kappa_compute(confmat, weights) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py new file mode 100644 index 000000000..b4f3c12de --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py @@ -0,0 +1,184 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import DataType + + +def _confusion_matrix_update( + preds: Tensor, target: Tensor, num_classes: int, threshold: float = 0.5, multilabel: bool = False +) -> Tensor: + """Updates and returns confusion matrix (without any normalization) based on the mode of the input. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the + case of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + multilabel: determines if data is multilabel or not. + """ + + preds, target, mode = _input_format_classification(preds, target, threshold) + if mode not in (DataType.BINARY, DataType.MULTILABEL): + preds = preds.argmax(dim=1) + target = target.argmax(dim=1) + if multilabel: + unique_mapping = ((2 * target + preds) + 4 * B.arange(num_classes, device=preds.device)).flatten() + minlength = 4 * num_classes + else: + unique_mapping = (target.view(-1) * num_classes + preds.view(-1)).to(B.long) + minlength = num_classes ** 2 + + bins = B.bincount(unique_mapping, minlength=minlength) + if multilabel: + confmat = bins.reshape(num_classes, 2, 2) + else: + confmat = bins.reshape(num_classes, num_classes) + return confmat + + +def _confusion_matrix_compute(confmat: Tensor, normalize: Optional[str] = None) -> Tensor: + """Computes confusion matrix based on the normalization mode. + + Args: + confmat: Confusion matrix without normalization + normalize: Normalization mode for confusion matrix. Choose from + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + + Example: + >>> # binary case + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = _confusion_matrix_update(preds, target, num_classes=2) + >>> _confusion_matrix_compute(confmat) + tensor([[2, 0], + [1, 1]]) + + >>> # multiclass case + >>> target = B.tensor([2, 1, 0, 0]) + >>> preds = B.tensor([2, 1, 0, 1]) + >>> confmat = _confusion_matrix_update(preds, target, num_classes=3) + >>> _confusion_matrix_compute(confmat) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + + >>> # multilabel case + >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = _confusion_matrix_update(preds, target, num_classes=3, multilabel=True) + >>> _confusion_matrix_compute(confmat) # doctest: +NORMALIZE_WHITESPACE + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + """ + + allowed_normalize = ("true", "pred", "all", "none", None) + if normalize not in allowed_normalize: + raise ValueError(f"Argument average needs to one of the following: {allowed_normalize}") + if normalize is not None and normalize != "none": + confmat = confmat.float() if not confmat.is_floating_point() else confmat + if normalize == "true": + confmat = confmat / confmat.sum(axis=1, keepdim=True) + elif normalize == "pred": + confmat = confmat / confmat.sum(axis=0, keepdim=True) + elif normalize == "all": + confmat = confmat / confmat.sum() + + nan_elements = confmat[B.isnan(confmat)].nelement() + if nan_elements != 0: + confmat[B.isnan(confmat)] = 0 + rank_zero_warn(f"{nan_elements} nan values found in confusion matrix have been replaced with zeros.") + return confmat + + +def confusion_matrix( + preds: Tensor, + target: Tensor, + num_classes: int, + normalize: Optional[str] = None, + threshold: float = 0.5, + multilabel: bool = False, +) -> Tensor: + r""" + Computes the `confusion matrix`_. Works with binary, + multiclass, and multilabel data. Accepts probabilities or logits from a model output or integer class + values in prediction. Works with multi-dimensional preds and target, but it should be noted that + additional dimensions will be flattened. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + If working with multilabel data, setting the `is_multilabel` argument to `True` will make sure that a + `confusion matrix gets calculated per label`_. + + Args: + preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or + ``(N, C, ...)`` where C is the number of classes, tensor with labels/logits/probabilities + target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels + num_classes: Number of classes in the dataset. + normalize: Normalization mode for confusion matrix. Choose from + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + multilabel: + determines if data is multilabel or not. + + Example (binary data): + >>> from paddlemetrics import ConfusionMatrix + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = ConfusionMatrix(num_classes=2) + >>> confmat(preds, target) + tensor([[2., 0.], + [1., 1.]]) + + Example (multiclass data): + >>> target = B.tensor([2, 1, 0, 0]) + >>> preds = B.tensor([2, 1, 0, 1]) + >>> confmat = ConfusionMatrix(num_classes=3) + >>> confmat(preds, target) + tensor([[1., 1., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example (multilabel data): + >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = ConfusionMatrix(num_classes=3, multilabel=True) + >>> confmat(preds, target) # doctest: +NORMALIZE_WHITESPACE + tensor([[[1., 0.], [0., 1.]], + [[1., 0.], [1., 0.]], + [[0., 1.], [0., 1.]]]) + + """ + confmat = _confusion_matrix_update(preds, target, num_classes, threshold, multilabel) + return _confusion_matrix_compute(confmat, normalize) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/dice.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/dice.py new file mode 100644 index 000000000..5f90fe02b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/dice.py @@ -0,0 +1,112 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.data import to_categorical +from paddlemetrics.utilities.distributed import reduce + + +def _stat_scores( + preds: Tensor, + target: Tensor, + class_index: int, + argmax_dim: int = 1, +) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + """Calculates the number of true positive, false positive, true negative and false negative for a specific + class. + + Args: + preds: prediction tensor + target: target tensor + class_index: class to calculate over + argmax_dim: if pred is a tensor of probabilities, this indicates the + axis the argmax transformation will be applied over + + Return: + True Positive, False Positive, True Negative, False Negative, Support + + Example: + >>> x = B.tensor([1, 2, 3]) + >>> y = B.tensor([0, 2, 3]) + >>> tp, fp, tn, fn, sup = _stat_scores(x, y, class_index=1) + >>> tp, fp, tn, fn, sup + (tensor(0), tensor(1), tensor(2), tensor(0), tensor(0)) + """ + if preds.ndim == target.ndim + 1: + preds = to_categorical(preds, argmax_dim=argmax_dim) + + tp = ((preds == class_index) * (target == class_index)).to(B.long).sum() + fp = ((preds == class_index) * (target != class_index)).to(B.long).sum() + tn = ((preds != class_index) * (target != class_index)).to(B.long).sum() + fn = ((preds != class_index) * (target == class_index)).to(B.long).sum() + sup = (target == class_index).to(B.long).sum() + + return tp, fp, tn, fn, sup + + +def dice_score( + preds: Tensor, + target: Tensor, + bg: bool = False, + nan_score: float = 0.0, + no_fg_score: float = 0.0, + reduction: str = "elementwise_mean", +) -> Tensor: + """Compute dice score from prediction scores. + + Args: + preds: estimated probabilities + target: ground-truth labels + bg: whether to also compute dice for the background + nan_score: score to return, if a NaN occurs during computation + no_fg_score: score to return, if no foreground pixel was found in target + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + Return: + Tensor containing dice score + + Example: + >>> from paddlemetrics.functional import dice_score + >>> pred = B.tensor([[0.85, 0.05, 0.05, 0.05], + ... [0.05, 0.85, 0.05, 0.05], + ... [0.05, 0.05, 0.85, 0.05], + ... [0.05, 0.05, 0.05, 0.85]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> dice_score(pred, target) + tensor(0.3333) + """ + num_classes = preds.shape[1] + bg_inv = 1 - int(bg) + scores = B.zeros(num_classes - bg_inv, device=preds.device, dtype=B.float32) + for i in range(bg_inv, num_classes): + if not (target == i).any(): + # no foreground class + scores[i - bg_inv] += no_fg_score + continue + + # TODO: rewrite to use general `stat_scores` + tp, fp, _, fn, _ = _stat_scores(preds=preds, target=target, class_index=i) + denom = (2 * tp + fp + fn).to(B.float) + # nan result + score_cls = (2 * tp).to(B.float) / denom if B.is_nonzero(denom) else nan_score + scores[i - bg_inv] += score_cls.item() + + return reduce(scores, reduction=reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py new file mode 100644 index 000000000..7b9b626ce --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py @@ -0,0 +1,351 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.enums import AverageMethod as AvgMethod +from paddlemetrics.utilities.enums import MDMCAverageMethod + + +def _safe_divide(num: Tensor, denom: Tensor) -> Tensor: + """prevent zero division.""" + denom[denom == 0.0] = 1 + return num / denom + + +def _fbeta_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + beta: float, + ignore_index: Optional[int], + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes f_beta metric from stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + beta: The parameter `beta` (which determines the weight of recall in the combined score) + ignore_index: Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> tp, fp, tn, fn = _stat_scores_update( + ... preds, + ... target, + ... reduce='micro', + ... num_classes=3, + ... ) + >>> _fbeta_compute(tp, fp, tn, fn, beta=0.5, ignore_index=None, average='micro', mdmc_average=None) + tensor(0.3333) + """ + if average == AvgMethod.MICRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + mask = tp >= 0 + precision = _safe_divide(tp[mask].sum().float(), (tp[mask] + fp[mask]).sum()) + recall = _safe_divide(tp[mask].sum().float(), (tp[mask] + fn[mask]).sum()) + else: + precision = _safe_divide(tp.float(), tp + fp) + recall = _safe_divide(tp.float(), tp + fn) + + num = (1 + beta ** 2) * precision * recall + denom = beta ** 2 * precision + recall + denom[denom == 0.0] = 1.0 # avoid division by 0 + + # if classes matter and a given class is not present in both the preds and the target, + # computing the score for this class is meaningless, thus they should be ignored + if average == AvgMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + if ignore_index is None: + ignore_index = meaningless_indeces + else: + ignore_index = B.unique(B.cat((meaningless_indeces, B.tensor([[ignore_index]])))) + + if ignore_index is not None: + if average not in (AvgMethod.MICRO, AvgMethod.SAMPLES) and mdmc_average == MDMCAverageMethod.SAMPLEWISE: + num[..., ignore_index] = -1 + denom[..., ignore_index] = -1 + elif average not in (AvgMethod.MICRO, AvgMethod.SAMPLES): + num[ignore_index, ...] = -1 + denom[ignore_index, ...] = -1 + + if average == AvgMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = (tp + fp + fn == 0) | (tp + fp + fn == -3) + num = num[~cond] + denom = denom[~cond] + + return _reduce_stat_scores( + numerator=num, + denominator=denom, + weights=None if average != AvgMethod.WEIGHTED else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def fbeta( + preds: Tensor, + target: Tensor, + beta: float = 1.0, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes f_beta metric. + + .. math:: + F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + Works with binary, multiclass, and multilabel data. + Accepts probabilities or logits from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label logits or probabilities. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Example: + >>> from paddlemetrics.functional import fbeta + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> fbeta(preds, target, num_classes=3, beta=0.5) + tensor(0.3333) + + """ + allowed_average = list(AvgMethod) + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + if mdmc_average is not None and MDMCAverageMethod.from_str(mdmc_average) is None: + raise ValueError(f"The `mdmc_average` has to be one of {list(MDMCAverageMethod)}, got {mdmc_average}.") + + if average in [AvgMethod.MACRO, AvgMethod.WEIGHTED, AvgMethod.NONE] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = AvgMethod.MACRO if average in [AvgMethod.WEIGHTED, AvgMethod.NONE] else average + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _fbeta_compute(tp, fp, tn, fn, beta, ignore_index, average, mdmc_average) + + +def f1( + preds: Tensor, + target: Tensor, + beta: float = 1.0, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + """Computes F1 metric. F1 metrics correspond to a equally weighted average of the precision and recall scores. + + Works with binary, multiclass, and multilabel data. + Accepts probabilities or logits from a model output or integer class values in prediction. + Works with multi-dimensional preds and target. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities or logits. + + If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Example: + >>> from paddlemetrics.functional import f1 + >>> target = B.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) + >>> f1(preds, target, num_classes=3) + tensor(0.3333) + """ + return fbeta(preds, target, 1.0, average, mdmc_average, ignore_index, num_classes, threshold, top_k, multiclass) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py new file mode 100644 index 000000000..e3f95bad4 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py @@ -0,0 +1,97 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _input_format_classification + + +def _hamming_distance_update( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, +) -> Tuple[Tensor, int]: + """Returns the number of positions where prediction equals target, and number of predictions. + + Args: + preds: Predicted tensor + target: Ground truth tensor + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + """ + + preds, target, _ = _input_format_classification(preds, target, threshold=threshold) + + correct = (preds == target).sum() + total = preds.numel() + + return correct, total + + +def _hamming_distance_compute(correct: Tensor, total: Union[int, Tensor]) -> Tensor: + """Computes the Hamming distance. + + Args: + correct: Number of positions where prediction equals target + total: Total number of predictions + + Example: + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> correct, total = _hamming_distance_update(preds, target) + >>> _hamming_distance_compute(correct, total) + tensor(0.2500) + """ + + return 1 - correct.float() / total + + +def hamming_distance(preds: Tensor, target: Tensor, threshold: float = 0.5) -> Tensor: + r""" + Computes the average `Hamming distance`_ (also + known as Hamming loss) between targets and predictions: + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it + treats each possible label separately - meaning that, for example, multi-class data is + treated as if it were multi-label. + + Accepts all input types listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + Example: + >>> from paddlemetrics.functional import hamming_distance + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> hamming_distance(preds, target) + tensor(0.2500) + + """ + + correct, total = _hamming_distance_update(preds, target, threshold) + return _hamming_distance_compute(correct, total) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py new file mode 100644 index 000000000..59d8be1af --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py @@ -0,0 +1,231 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _input_squeeze +from paddlemetrics.utilities.data import to_onehot +from paddlemetrics.utilities.enums import DataType, EnumStr + + +class MulticlassMode(EnumStr): + """Enum to represent possible multiclass modes of hinge. + + >>> "Crammer-Singer" in list(MulticlassMode) + True + """ + + CRAMMER_SINGER = "crammer-singer" + ONE_VS_ALL = "one-vs-all" + + +def _check_shape_and_type_consistency_hinge( + preds: Tensor, + target: Tensor, +) -> DataType: + """Checks shape and type of `preds` and `target` and returns mode of the input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + + Raises: + `ValueError`: if `target` is not one dimensional + `ValueError`: if `preds` and `target` do not have the same shape in the first dimension + `ValueError`: if `pred` is neither one nor two dimensional + """ + + if target.ndim > 1: + raise ValueError( + f"The `target` should be one dimensional, got `target` with shape={target.shape}.", + ) + + if preds.ndim == 1: + if preds.shape != target.shape: + raise ValueError( + "The `preds` and `target` should have the same shape,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + mode = DataType.BINARY + elif preds.ndim == 2: + if preds.shape[0] != target.shape[0]: + raise ValueError( + "The `preds` and `target` should have the same shape in the first dimension,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + mode = DataType.MULTICLASS + else: + raise ValueError(f"The `preds` should be one or two dimensional, got `preds` with shape={preds.shape}.") + return mode + + +def _hinge_update( + preds: Tensor, + target: Tensor, + squared: bool = False, + multiclass_mode: Optional[Union[str, MulticlassMode]] = None, +) -> Tuple[Tensor, Tensor]: + """Updates and returns sum over Hinge loss scores for each observation and the total number of observations. + + Args: + preds: Predicted tensor + target: Ground truth tensor + squared: If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss. + multiclass_mode: + Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), + ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. + """ + preds, target = _input_squeeze(preds, target) + + mode = _check_shape_and_type_consistency_hinge(preds, target) + + if mode == DataType.MULTICLASS: + target = to_onehot(target, max(2, preds.shape[1])).bool() + + if mode == DataType.MULTICLASS and (multiclass_mode is None or multiclass_mode == MulticlassMode.CRAMMER_SINGER): + margin = preds[target] + margin -= B.max(preds[~target].view(preds.shape[0], -1), dim=1)[0] + elif mode == DataType.BINARY or multiclass_mode == MulticlassMode.ONE_VS_ALL: + target = target.bool() + margin = B.zeros_like(preds) + margin[target] = preds[target] + margin[~target] = -preds[~target] + else: + raise ValueError( + "The `multiclass_mode` should be either None / 'crammer-singer' / MulticlassMode.CRAMMER_SINGER" + "(default) or 'one-vs-all' / MulticlassMode.ONE_VS_ALL," + f" got {multiclass_mode}." + ) + + measures = 1 - margin + measures = B.clamp(measures, 0) + + if squared: + measures = measures.pow(2) + + total = tensor(target.shape[0], device=target.device) + return measures.sum(dim=0), total + + +def _hinge_compute(measure: Tensor, total: Tensor) -> Tensor: + """Computes mean Hinge loss. + + Args: + measure: Sum over hinge losses for each each observation + total: Number of observations + + Example: + >>> # binary case + >>> target = B.tensor([0, 1, 1]) + >>> preds = B.tensor([-2.2, 2.4, 0.1]) + >>> measure, total = _hinge_update(preds, target) + >>> _hinge_compute(measure, total) + tensor(0.3000) + + >>> # multiclass case + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> measure, total = _hinge_update(preds, target) + >>> _hinge_compute(measure, total) + tensor(2.9000) + + >>> # multiclass one-vs-all mode case + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> measure, total = _hinge_update(preds, target, multiclass_mode="one-vs-all") + >>> _hinge_compute(measure, total) + tensor([2.2333, 1.5000, 1.2333]) + """ + + return measure / total + + +def hinge( + preds: Tensor, + target: Tensor, + squared: bool = False, + multiclass_mode: Optional[Union[str, MulticlassMode]] = None, +) -> Tensor: + r""" + Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs). + + In the binary case it is defined as: + + .. math:: + \text{Hinge loss} = \max(0, 1 - y \times \hat{y}) + + Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction. + + In the multi-class case, when ``multiclass_mode=None`` (default), ``multiclass_mode=MulticlassMode.CRAMMER_SINGER`` + or ``multiclass_mode="crammer-singer"``, this metric will compute the multi-class hinge loss defined by Crammer and + Singer as: + + .. math:: + \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right) + + Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes), + and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. + + In the multi-class case when ``multiclass_mode=MulticlassMode.ONE_VS_ALL`` or ``multiclass_mode='one-vs-all'``, this + metric will use a one-vs-all approach to compute the hinge loss, giving a vector of C outputs where each entry pits + that class against all remaining classes. + + This metric can optionally output the mean of the squared hinge loss by setting ``squared=True`` + + Only accepts inputs with preds shape of (N) (binary) or (N, C) (multi-class) and target shape of (N). + + Args: + preds: Predictions from model (as float outputs from decision function). + target: Ground truth labels. + squared: + If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss (default). + multiclass_mode: + Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), + ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. + + Raises: + ValueError: + If preds shape is not of size (N) or (N, C). + ValueError: + If target shape is not of size (N). + ValueError: + If ``multiclass_mode`` is not: None, ``MulticlassMode.CRAMMER_SINGER``, ``"crammer-singer"``, + ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"``. + + Example (binary case): + >>> import torchapi as B + >>> from paddlemetrics.functional import hinge + >>> target = B.tensor([0, 1, 1]) + >>> preds = B.tensor([-2.2, 2.4, 0.1]) + >>> hinge(preds, target) + tensor(0.3000) + + Example (default / multiclass case): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge(preds, target) + tensor(2.9000) + + Example (multiclass example, one vs all mode): + >>> target = B.tensor([0, 1, 2]) + >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge(preds, target, multiclass_mode="one-vs-all") + tensor([2.2333, 1.5000, 1.2333]) + """ + measure, total = _hinge_update(preds, target, squared=squared, multiclass_mode=multiclass_mode) + return _hinge_compute(measure, total) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/iou.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/iou.py new file mode 100644 index 000000000..b7cf60774 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/iou.py @@ -0,0 +1,133 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_update +from paddlemetrics.utilities.data import get_num_classes +from paddlemetrics.utilities.distributed import reduce + + +def _iou_from_confmat( + confmat: Tensor, + num_classes: int, + ignore_index: Optional[int] = None, + absent_score: float = 0.0, + reduction: str = "elementwise_mean", +) -> Tensor: + """Computes the intersection over union from confusion matrix. + + Args: + confmat: Confusion matrix without normalization + num_classes: Number of classes for a given prediction and target tensor + ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. + absent_score: score to use for an individual class, if no instances of the class index were present in `pred` + AND no instances of the class index were present in `target`. + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + """ + + # Remove the ignored class index from the scores. + if ignore_index is not None and 0 <= ignore_index < num_classes: + confmat[ignore_index] = 0.0 + + intersection = B.diag(confmat) + union = confmat.sum(0) + confmat.sum(1) - intersection + + # If this class is absent in both target AND pred (union == 0), then use the absent_score for this class. + scores = intersection.float() / union.float() + scores[union == 0] = absent_score + + if ignore_index is not None and 0 <= ignore_index < num_classes: + scores = B.cat( + [ + scores[:ignore_index], + scores[ignore_index + 1 :], + ] + ) + + return reduce(scores, reduction=reduction) + + +def iou( + preds: Tensor, + target: Tensor, + ignore_index: Optional[int] = None, + absent_score: float = 0.0, + threshold: float = 0.5, + num_classes: Optional[int] = None, + reduction: str = "elementwise_mean", +) -> Tensor: + r""" + Computes `Jaccard index`_ + + .. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|} + + Where: :math:`A` and :math:`B` are both tensors of the same size, + containing integer class values. They may be subject to conversion from + input data (see description below). + + Note that it is different from box IoU. + + If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument + to convert into integer labels. This is the case for binary and multi-label probabilities. + + If pred has an extra dimension as in the case of multi-class scores we + perform an argmax on ``dim=1``. + + Args: + preds: tensor containing predictions from model (probabilities, or labels) with shape ``[N, d1, d2, ...]`` + target: tensor containing ground truth labels with shape ``[N, d1, d2, ...]`` + ignore_index: optional int specifying a target class to ignore. If given, + this class index does not contribute to the returned score, regardless + of reduction method. Has no effect if given an int that is not in the + range [0, num_classes-1], where num_classes is either given or derived + from pred and target. By default, no index is ignored, and all classes are used. + absent_score: score to use for an individual class, if no instances of + the class index were present in `pred` AND no instances of the class + index were present in `target`. For example, if we have 3 classes, + [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be + assigned the `absent_score`. + threshold: + Threshold value for binary or multi-label probabilities. default: 0.5 + num_classes: + Optionally specify the number of classes + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + Return: + IoU score: Tensor containing single value if reduction is + 'elementwise_mean', or number of classes if reduction is 'none' + + Example: + >>> from paddlemetrics.functional import iou + >>> target = B.randint(0, 2, (10, 25, 25)) + >>> pred = B.tensor(target) + >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] + >>> iou(pred, target) + tensor(0.9660) + """ + + num_classes = get_num_classes(preds=preds, target=target, num_classes=num_classes) + confmat = _confusion_matrix_update(preds, target, num_classes, threshold) + return _iou_from_confmat(confmat, num_classes, ignore_index, absent_score, reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py new file mode 100644 index 000000000..0d7685c1e --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape +from paddlemetrics.utilities.data import METRIC_EPS + + +def _kld_update(p: Tensor, q: Tensor, log_prob: bool) -> Tuple[Tensor, int]: + """Updates and returns KL divergence scores for each observation and the total number of observations. Checks + same shape and 2D nature of the input tensors else raises ValueError. + + Args: + p: data distribution with shape ``[N, d]`` + q: prior or approximate distribution with shape ``[N, d]`` + log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, + will normalize to make sure the distributes sum to 1 + """ + _check_same_shape(p, q) + if p.ndim != 2 or q.ndim != 2: + raise ValueError(f"Expected both p and q distribution to be 2D but got {p.ndim} and {q.ndim} respectively") + + total = p.shape[0] + if log_prob: + measures = B.sum(p.exp() * (p - q), axis=-1) + else: + p = p / p.sum(axis=-1, keepdim=True) + q = q / q.sum(axis=-1, keepdim=True) + q = B.clamp(q, METRIC_EPS) + measures = B.sum(p * B.log(p / q), axis=-1) + + return measures, total + + +def _kld_compute(measures: Tensor, total: Tensor, reduction: Optional[str] = "mean") -> Tensor: + """Computes the KL divergenece based on the type of reduction. + + Args: + measures: Tensor of KL divergence scores for each observation + total: Number of observations + reduction: + Determines how to reduce over the ``N``/batch dimension: + + - ``'mean'`` [default]: Averages score across samples + - ``'sum'``: Sum score across samples + - ``'none'`` or ``None``: Returns score per sample + + Example: + >>> p = B.tensor([[0.36, 0.48, 0.16]]) + >>> q = B.tensor([[1/3, 1/3, 1/3]]) + >>> measures, total = _kld_update(p, q, log_prob=False) + >>> _kld_compute(measures, total) + tensor(0.0853) + """ + + if reduction == "sum": + return measures.sum() + if reduction == "mean": + return measures.sum() / total + if reduction is None or reduction == "none": + return measures + return measures / total + + +def kl_divergence(p: Tensor, q: Tensor, log_prob: bool = False, reduction: Optional[str] = "mean") -> Tensor: + r"""Computes `KL divergence`_ + + .. math:: + D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}} + + Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution + over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence + is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. + + Args: + p: data distribution with shape ``[N, d]`` + q: prior or approximate distribution with shape ``[N, d]`` + log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, + will normalize to make sure the distributes sum to 1 + reduction: + Determines how to reduce over the ``N``/batch dimension: + + - ``'mean'`` [default]: Averages score across samples + - ``'sum'``: Sum score across samples + - ``'none'`` or ``None``: Returns score per sample + + Example: + >>> import torchapi as B + >>> p = B.tensor([[0.36, 0.48, 0.16]]) + >>> q = B.tensor([[1/3, 1/3, 1/3]]) + >>> kl_divergence(p, q) + tensor(0.0853) + """ + measures, total = _kld_update(p, q, log_prob) + return _kld_compute(measures, total, reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py new file mode 100644 index 000000000..8532a358d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py @@ -0,0 +1,78 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_update + +_matthews_corrcoef_update = _confusion_matrix_update + + +def _matthews_corrcoef_compute(confmat: Tensor) -> Tensor: + """Computes Matthews correlation coefficient. + + Args: + confmat: Confusion matrix + + Example: + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> confmat = _matthews_corrcoef_update(preds, target, num_classes=2) + >>> _matthews_corrcoef_compute(confmat) + tensor(0.5774) + """ + + tk = confmat.sum(dim=1).float() + pk = confmat.sum(dim=0).float() + c = B.trace(confmat).float() + s = confmat.sum().float() + return (c * s - sum(tk * pk)) / (B.sqrt(s ** 2 - sum(pk * pk)) * B.sqrt(s ** 2 - sum(tk * tk))) + + +def matthews_corrcoef( + preds: Tensor, + target: Tensor, + num_classes: int, + threshold: float = 0.5, +) -> Tensor: + r""" + Calculates `Matthews correlation coefficient`_ that measures + the general correlation or quality of a classification. In the binary case it + is defined as: + + .. math:: + MCC = \frac{TP*TN - FP*FN}{\sqrt{(TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)}} + + where TP, TN, FP and FN are respectively the true postitives, true negatives, + false positives and false negatives. Also works in the case of multi-label or + multi-class input. + + Args: + preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or + ``(N, C, ...)`` where C is the number of classes, tensor with labels/probabilities + target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels + num_classes: Number of classes in the dataset. + threshold: + Threshold value for binary or multi-label probabilities. default: 0.5 + + Example: + >>> from paddlemetrics.functional import matthews_corrcoef + >>> target = B.tensor([1, 1, 0, 0]) + >>> preds = B.tensor([0, 1, 0, 0]) + >>> matthews_corrcoef(preds, target, num_classes=2) + tensor(0.5774) + + """ + confmat = _matthews_corrcoef_update(preds, target, num_classes, threshold) + return _matthews_corrcoef_compute(confmat) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py new file mode 100644 index 000000000..4b8528dc2 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py @@ -0,0 +1,568 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +def _precision_compute( + tp: Tensor, + fp: Tensor, + fn: Tensor, + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes precision from the stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + fn: False negatives + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update( preds, target, reduce='macro', num_classes=3) + >>> _precision_compute(tp, fp, fn, average='macro', mdmc_average=None) + tensor(0.1667) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _precision_compute(tp, fp, fn, average='micro', mdmc_average=None) + tensor(0.2500) + """ + + numerator = tp + denominator = tp + fp + + if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = tp + fp + fn == 0 + numerator = numerator[~cond] + denominator = denominator[~cond] + + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != "weighted" else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def precision( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes `Precision`_ + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Precision@K. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import precision + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> precision(preds, target, average='macro', num_classes=3) + tensor(0.1667) + >>> precision(preds, target, average='micro') + tensor(0.2500) + + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, _, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _precision_compute(tp, fp, fn, average, mdmc_average) + + +def _recall_compute( + tp: Tensor, + fp: Tensor, + fn: Tensor, + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes precision from the stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + fn: False negatives + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) + >>> _recall_compute(tp, fp, fn, average='macro', mdmc_average=None) + tensor(0.3333) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _recall_compute(tp, fp, fn, average='micro', mdmc_average=None) + tensor(0.2500) + """ + numerator = tp + denominator = tp + fn + + if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = tp + fp + fn == 0 + numerator = numerator[~cond] + denominator = denominator[~cond] + + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = ((tp | fn | fp) == 0).nonzero().cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != AverageMethod.WEIGHTED else tp + fn, + average=average, + mdmc_average=mdmc_average, + ) + + +def recall( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes `Recall`_ + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Recall@K. + + The reduction method (how the recall scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import recall + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> recall(preds, target, average='macro', num_classes=3) + tensor(0.3333) + >>> recall(preds, target, average='micro') + tensor(0.2500) + + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, _, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _recall_compute(tp, fp, fn, average, mdmc_average) + + +def precision_recall( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tuple[Tensor, Tensor]: + r""" + Computes `Precision`_ + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}`m :math:`\text{FN}` and :math:`\text{FP}` represent the number + of true positives, false negatives and false positives respecitively. With the use of + ``top_k`` parameter, this metric can generalize to Recall@K and Precision@K. + + The reduction method (how the recall scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The function returns a tuple with two elements: precision and recall. Their shape + depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, they are a single element tensor + - If ``average in ['none', None]``, they are a tensor of shape ``(C, )``, where ``C`` stands for + the number of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import precision_recall + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> precision_recall(preds, target, average='macro', num_classes=3) + (tensor(0.1667), tensor(0.3333)) + >>> precision_recall(preds, target, average='micro') + (tensor(0.2500), tensor(0.2500)) + + """ + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, _, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + precision_ = _precision_compute(tp, fp, fn, average, mdmc_average) + recall_ = _recall_compute(tp, fp, fn, average, mdmc_average) + + return precision_, recall_ diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py new file mode 100644 index 000000000..11b32500b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py @@ -0,0 +1,332 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities import rank_zero_warn + + +def _binary_clf_curve( + preds: Tensor, + target: Tensor, + sample_weights: Optional[Sequence] = None, + pos_label: int = 1, +) -> Tuple[Tensor, Tensor, Tensor]: + """adapted from https://github.com/scikit-learn/scikit- learn/blob/master/sklearn/metrics/_ranking.py.""" + if sample_weights is not None and not isinstance(sample_weights, Tensor): + sample_weights = tensor(sample_weights, device=preds.device, dtype=B.float) + + # remove class dimension if necessary + if preds.ndim > target.ndim: + preds = preds[:, 0] + desc_score_indices = B.argsort(preds, descending=True) + + preds = preds[desc_score_indices] + target = target[desc_score_indices] + + if sample_weights is not None: + weight = sample_weights[desc_score_indices] + else: + weight = 1.0 + + # pred typically has many tied values. Here we extract + # the indices associated with the distinct values. We also + # concatenate a value for the end of the curve. + distinct_value_indices = B.where(preds[1:] - preds[:-1])[0] + threshold_idxs = B.nn.functional.pad(distinct_value_indices, [0, 1], value=target.size(0) - 1) + target = (target == pos_label).to(B.long) + tps = B.cumsum(target * weight, dim=0)[threshold_idxs] + + if sample_weights is not None: + # express fps as a cumsum to ensure fps is increasing even in + # the presence of floating point errors + fps = B.cumsum((1 - target) * weight, dim=0)[threshold_idxs] + else: + fps = 1 + threshold_idxs - tps + + return fps, tps, preds[threshold_idxs] + + +def _precision_recall_curve_update( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, +) -> Tuple[Tensor, Tensor, int, Optional[int]]: + """Updates and returns variables required to compute the precision-recall pairs for different thresholds. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + """ + + if len(preds.shape) == len(target.shape): + if pos_label is None: + pos_label = 1 + if num_classes is not None and num_classes != 1: + # multilabel problem + if num_classes != preds.shape[1]: + raise ValueError( + f"Argument `num_classes` was set to {num_classes} in" + f" metric `precision_recall_curve` but detected {preds.shape[1]}" + " number of classes from predictions" + ) + preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) + target = target.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) + else: + # binary problem + preds = preds.flatten() + target = target.flatten() + num_classes = 1 + + # multi class problem + elif len(preds.shape) == len(target.shape) + 1: + if pos_label is not None: + rank_zero_warn( + "Argument `pos_label` should be `None` when running" + f" multiclass precision recall curve. Got {pos_label}" + ) + if num_classes != preds.shape[1]: + raise ValueError( + f"Argument `num_classes` was set to {num_classes} in" + f" metric `precision_recall_curve` but detected {preds.shape[1]}" + " number of classes from predictions" + ) + preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) + target = target.flatten() + + else: + raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds") + + return preds, target, num_classes, pos_label + + +def _precision_recall_curve_compute_single_class( + preds: Tensor, + target: Tensor, + pos_label: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes precision-recall pairs for single class inputs. + + Args: + preds: Predicted tensor + target: Ground truth tensor + pos_label: integer determining the positive class. + sample_weights: sample weights for each data point + """ + + fps, tps, thresholds = _binary_clf_curve( + preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label + ) + precision = tps / (tps + fps) + recall = tps / tps[-1] + + # stop when full recall attained and reverse the outputs so recall is decreasing + last_ind = B.where(tps == tps[-1])[0][0] + sl = slice(0, last_ind.item() + 1) + + # need to call reversed explicitly, since including that to slice would + # introduce negative strides that are not yet supported in pytorch + precision = B.cat([reversed(precision[sl]), B.ones(1, dtype=precision.dtype, device=precision.device)]) + + recall = B.cat([reversed(recall[sl]), B.zeros(1, dtype=recall.dtype, device=recall.device)]) + + thresholds = reversed(thresholds[sl]).detach().clone() # type: ignore + + return precision, recall, thresholds + + +def _precision_recall_curve_compute_multi_class( + preds: Tensor, + target: Tensor, + num_classes: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: + """Computes precision-recall pairs for multi class inputs. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + sample_weights: sample weights for each data point + """ + + # Recursively call per class + precision, recall, thresholds = [], [], [] + for cls in range(num_classes): + preds_cls = preds[:, cls] + + prc_args = dict( + preds=preds_cls, + target=target, + num_classes=1, + pos_label=cls, + sample_weights=sample_weights, + ) + if target.ndim > 1: + prc_args.update( + dict( + target=target[:, cls], + pos_label=1, + ) + ) + res = precision_recall_curve(**prc_args) + precision.append(res[0]) + recall.append(res[1]) + thresholds.append(res[2]) + + return precision, recall, thresholds + + +def _precision_recall_curve_compute( + preds: Tensor, + target: Tensor, + num_classes: int, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes precision-recall pairs based on the number of classes. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, pos_label=pos_label) + >>> precision, recall, thresholds = _precision_recall_curve_compute(preds, target, num_classes, pos_label) + >>> precision + tensor([0.6667, 0.5000, 0.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([1, 2, 3]) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 5 + >>> preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes) + >>> precision, recall, thresholds = _precision_recall_curve_compute(preds, target, num_classes) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + """ + + with B.no_grad(): + if num_classes == 1: + if pos_label is None: + pos_label = 1 + return _precision_recall_curve_compute_single_class(preds, target, pos_label, sample_weights) + return _precision_recall_curve_compute_multi_class(preds, target, num_classes, sample_weights) + + +def precision_recall_curve( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes precision-recall pairs for different thresholds. + + Args: + preds: predictions from model (probabilities) + target: ground truth labels + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Returns: + 3-element tuple containing + + precision: + tensor where element i is the precision of predictions with + score >= thresholds[i] and the last element is 1. + If multiclass, this is a list of such tensors, one for each class. + recall: + tensor where element i is the recall of predictions with + score >= thresholds[i] and the last element is 0. + If multiclass, this is a list of such tensors, one for each class. + thresholds: + Thresholds used for computing precision/recall scores + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same number of dimensions, + or one additional dimension for ``preds``. + ValueError: + If the number of classes deduced from ``preds`` is not the same as the + ``num_classes`` provided. + + Example (binary case): + >>> from paddlemetrics.functional import precision_recall_curve + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 0]) + >>> precision, recall, thresholds = precision_recall_curve(pred, target, pos_label=1) + >>> precision + tensor([0.6667, 0.5000, 0.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([1, 2, 3]) + + Example (multiclass case): + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> precision, recall, thresholds = precision_recall_curve(pred, target, num_classes=5) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + """ + preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label) + return _precision_recall_curve_compute(preds, target, num_classes, pos_label, sample_weights) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/roc.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/roc.py new file mode 100644 index 000000000..86f4e2a4c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/roc.py @@ -0,0 +1,273 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.precision_recall_curve import ( + _binary_clf_curve, + _precision_recall_curve_update, +) + + +def _roc_update( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, +) -> Tuple[Tensor, Tensor, int, Optional[int]]: + """Updates and returns variables required to compute the Receiver Operating Characteristic. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + """ + + return _precision_recall_curve_update(preds, target, num_classes, pos_label) + + +def _roc_compute_single_class( + preds: Tensor, + target: Tensor, + pos_label: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes Receiver Operating Characteristic for single class inputs. Returns tensor with false positive + rates, tensor with true positive rates, tensor with thresholds used for computing false- and true postive + rates. + + Args: + preds: Predicted tensor + target: Ground truth tensor + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + """ + + fps, tps, thresholds = _binary_clf_curve( + preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label + ) + # Add an extra threshold position to make sure that the curve starts at (0, 0) + tps = B.cat([B.zeros(1, dtype=tps.dtype, device=tps.device), tps]) + fps = B.cat([B.zeros(1, dtype=fps.dtype, device=fps.device), fps]) + thresholds = B.cat([thresholds[0][None] + 1, thresholds]) + + if fps[-1] <= 0: + raise ValueError("No negative samples in targets, false positive value should be meaningless") + fpr = fps / fps[-1] + + if tps[-1] <= 0: + raise ValueError("No positive samples in targets, true positive value should be meaningless") + tpr = tps / tps[-1] + + return fpr, tpr, thresholds + + +def _roc_compute_multi_class( + preds: Tensor, + target: Tensor, + num_classes: int, + sample_weights: Optional[Sequence] = None, +) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: + """Computes Receiver Operating Characteristic for multi class inputs. Returns tensor with false positive rates, + tensor with true positive rates, tensor with thresholds used for computing false- and true postive rates. + + Args: + preds: Predicted tensor + target: Ground truth tensor + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + """ + + fpr, tpr, thresholds = [], [], [] + for cls in range(num_classes): + if preds.shape == target.shape: + target_cls = target[:, cls] + pos_label = 1 + else: + target_cls = target + pos_label = cls + res = roc( + preds=preds[:, cls], + target=target_cls, + num_classes=1, + pos_label=pos_label, + sample_weights=sample_weights, + ) + fpr.append(res[0]) + tpr.append(res[1]) + thresholds.append(res[2]) + + return fpr, tpr, thresholds + + +def _roc_compute( + preds: Tensor, + target: Tensor, + num_classes: int, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes Receiver Operating Characteristic based on the number of classes. + + Args: + preds: Predicted tensor + target: Ground truth tensor + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Example: + >>> # binary case + >>> preds = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> pos_label = 1 + >>> preds, target, num_classes, pos_label = _roc_update(preds, target, pos_label=pos_label) + >>> fpr, tpr, thresholds = _roc_compute(preds, target, num_classes, pos_label) + >>> fpr + tensor([0., 0., 0., 0., 1.]) + >>> tpr + tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) + >>> thresholds + tensor([4, 3, 2, 1, 0]) + + >>> # multiclass case + >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> num_classes = 4 + >>> preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes) + >>> fpr, tpr, thresholds = _roc_compute(preds, target, num_classes) + >>> fpr + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500])] + """ + + with B.no_grad(): + if num_classes == 1 and preds.ndim == 1: # binary + if pos_label is None: + pos_label = 1 + return _roc_compute_single_class(preds, target, pos_label, sample_weights) + return _roc_compute_multi_class(preds, target, num_classes, sample_weights) + + +def roc( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, + pos_label: Optional[int] = None, + sample_weights: Optional[Sequence] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes the Receiver Operating Characteristic (ROC). Works with both binary, multiclass and multilabel + input. + + Args: + preds: predictions from model (logits or probabilities) + target: ground truth values + num_classes: integer with number of classes for multi-label and multiclass problems. + Should be set to ``None`` for binary problems + pos_label: integer determining the positive class. Default is ``None`` + which for binary problem is translate to 1. For multiclass problems + this argument should not be set as we iteratively change it in the + range [0,num_classes-1] + sample_weights: sample weights for each data point + + Returns: + 3-element tuple containing + + fpr: + tensor with false positive rates. + If multiclass or multilabel, this is a list of such tensors, one for each class/label. + tpr: + tensor with true positive rates. + If multiclass or multilabel, this is a list of such tensors, one for each class/label. + thresholds: + tensor with thresholds used for computing false- and true postive rates + If multiclass or multilabel, this is a list of such tensors, one for each class/label. + + Example (binary case): + >>> from paddlemetrics.functional import roc + >>> pred = B.tensor([0, 1, 2, 3]) + >>> target = B.tensor([0, 1, 1, 1]) + >>> fpr, tpr, thresholds = roc(pred, target, pos_label=1) + >>> fpr + tensor([0., 0., 0., 0., 1.]) + >>> tpr + tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) + >>> thresholds + tensor([4, 3, 2, 1, 0]) + + Example (multiclass case): + >>> from paddlemetrics.functional import roc + >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = B.tensor([0, 1, 3, 2]) + >>> fpr, tpr, thresholds = roc(pred, target, num_classes=4) + >>> fpr + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500]), + tensor([1.7500, 0.7500, 0.0500])] + + Example (multilabel case): + >>> from paddlemetrics.functional import roc + >>> pred = B.tensor([[0.8191, 0.3680, 0.1138], + ... [0.3584, 0.7576, 0.1183], + ... [0.2286, 0.3468, 0.1338], + ... [0.8603, 0.0745, 0.1837]]) + >>> target = B.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) + >>> fpr, tpr, thresholds = roc(pred, target, num_classes=3, pos_label=1) + >>> fpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]), + tensor([0., 0., 0., 1., 1.]), + tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])] + >>> tpr + [tensor([0., 0., 1., 1., 1.]), tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), tensor([0., 1., 1., 1., 1.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.8603, 0.8603, 0.8191, 0.3584, 0.2286]), + tensor([1.7576, 0.7576, 0.3680, 0.3468, 0.0745]), + tensor([1.1837, 0.1837, 0.1338, 0.1183, 0.1138])] + """ + preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes, pos_label) + return _roc_compute(preds, target, num_classes, pos_label, sample_weights) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py new file mode 100644 index 000000000..be87dce7d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py @@ -0,0 +1,215 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +def _specificity_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: str, + mdmc_average: Optional[str], +) -> Tensor: + """Computes specificity from the stat scores: true positives, false positives, true negatives, false negatives. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + + Example: + >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) + >>> _specificity_compute(tp, fp, tn, fn, average='macro', mdmc_average=None) + tensor(0.6111) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _specificity_compute(tp, fp, tn, fn, average='micro', mdmc_average=None) + tensor(0.6250) + """ + + numerator = tn + denominator = tn + fp + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != AverageMethod.WEIGHTED else denominator, + average=average, + mdmc_average=mdmc_average, + ) + + +def specificity( + preds: Tensor, + target: Tensor, + average: str = "micro", + mdmc_average: Optional[str] = None, + ignore_index: Optional[int] = None, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tensor: + r""" + Computes `Specificity`_ + + .. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}} + + Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and + false positives respecitively. With the use of ``top_k`` parameter, this metric can + generalize to Specificity@K. + + The reduction method (how the specificity scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, or labels) + target: Ground truth values + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tn + fp``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + (see :ref:`references/modules:input types`) + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold probability value for transforming probability predictions to binary + (0,1) predictions, in the case of binary or multi-label inputs + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for inputs with probability predictions. If this parameter is set for multi-label + inputs, it will take precedence over ``threshold``. For (multi-dim) multi-class inputs, + this parameter defaults to 1. + + Should be left unset (``None``) for inputs with label predictions. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number + of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, + ``"samples"``, ``"none"`` or ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from paddlemetrics.functional import specificity + >>> preds = B.tensor([2, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> specificity(preds, target, average='macro', num_classes=3) + tensor(0.6111) + >>> specificity(preds, target, average='micro') + tensor(0.6250) + + """ + + allowed_average = ["micro", "macro", "weighted", "samples", "none", None] + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + reduce = "macro" if average in ["weighted", "none", None] else average + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _specificity_compute(tp, fp, tn, fn, average, mdmc_average) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py b/EE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py new file mode 100644 index 000000000..33e1cafdd --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py @@ -0,0 +1,396 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +def _del_column(data: Tensor, idx: int) -> Tensor: + """Delete the column at index.""" + return B.cat([data[:, :idx], data[:, (idx + 1) :]], 1) + + +def _stat_scores( + preds: Tensor, + target: Tensor, + reduce: Optional[str] = "micro", +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Calculate the number of tp, fp, tn, fn. + + Args: + preds: + An ``(N, C)`` or ``(N, C, X)`` tensor of predictions (0 or 1) + target: + An ``(N, C)`` or ``(N, C, X)`` tensor of true labels (0 or 1) + reduce: + One of ``'micro'``, ``'macro'``, ``'samples'`` + + Return: + Returns a list of 4 tensors; tp, fp, tn, fn. + The shape of the returned tensors depnds on the shape of the inputs + and the ``reduce`` parameter: + + If inputs are of the shape ``(N, C)``, then + - If ``reduce='micro'``, the returned tensors are 1 element tensors + - If ``reduce='macro'``, the returned tensors are ``(C,)`` tensors + - If ``reduce'samples'``, the returned tensors are ``(N,)`` tensors + + If inputs are of the shape ``(N, C, X)``, then + - If ``reduce='micro'``, the returned tensors are ``(N,)`` tensors + - If ``reduce='macro'``, the returned tensors are ``(N,C)`` tensors + - If ``reduce='samples'``, the returned tensors are ``(N,X)`` tensors + """ + dim: Union[int, List[int]] = 1 # for "samples" + if reduce == "micro": + dim = [0, 1] if preds.ndim == 2 else [1, 2] + elif reduce == "macro": + dim = 0 if preds.ndim == 2 else 2 + + true_pred, false_pred = target == preds, target != preds + pos_pred, neg_pred = preds == 1, preds == 0 + + tp = (true_pred * pos_pred).sum(dim=dim) + fp = (false_pred * pos_pred).sum(dim=dim) + + tn = (true_pred * neg_pred).sum(dim=dim) + fn = (false_pred * neg_pred).sum(dim=dim) + return tp.long(), fp.long(), tn.long(), fn.long() + + +def _stat_scores_update( + preds: Tensor, + target: Tensor, + reduce: Optional[str] = "micro", + mdmc_reduce: Optional[str] = None, + num_classes: Optional[int] = None, + top_k: Optional[int] = None, + threshold: float = 0.5, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns the the number of true positives, false positives, true negatives, false negatives. + Raises ValueError if: + + - The `ignore_index` is not valid + - When `ignore_index` is used with binary data + - When inputs are multi-dimensional multi-class, and the `mdmc_reduce` parameter is not set + + Args: + preds: Predicted tensor + target: Ground truth tensor + reduce: Defines the reduction that is applied + mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handeled + num_classes: Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. + top_k: Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities + multiclass: Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be + ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + """ + + preds, target, _ = _input_format_classification( + preds, target, threshold=threshold, num_classes=num_classes, multiclass=multiclass, top_k=top_k + ) + + if ignore_index is not None and not 0 <= ignore_index < preds.shape[1]: + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {preds.shape[0]} classes") + + if ignore_index is not None and preds.shape[1] == 1: + raise ValueError("You can not use `ignore_index` with binary data.") + + if preds.ndim == 3: + if not mdmc_reduce: + raise ValueError( + "When your inputs are multi-dimensional multi-class, you have to set the `mdmc_reduce` parameter" + ) + if mdmc_reduce == "global": + preds = B.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) + target = B.transpose(target, 1, 2).reshape(-1, target.shape[1]) + + # Delete what is in ignore_index, if applicable (and classes don't matter): + if ignore_index is not None and reduce != "macro": + preds = _del_column(preds, ignore_index) + target = _del_column(target, ignore_index) + + tp, fp, tn, fn = _stat_scores(preds, target, reduce=reduce) + + # Take care of ignore_index + if ignore_index is not None and reduce == "macro": + tp[..., ignore_index] = -1 + fp[..., ignore_index] = -1 + tn[..., ignore_index] = -1 + fn[..., ignore_index] = -1 + + return tp, fp, tn, fn + + +def _stat_scores_compute(tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> Tensor: + """Computes the number of true positives, false positives, true negatives, false negatives. Concatenates the + input tensors along with the support into one output. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + + Example: + >>> preds = B.tensor([1, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) + >>> _stat_scores_compute(tp, fp, tn, fn) + tensor([[0, 1, 2, 1, 1], + [1, 1, 1, 1, 2], + [1, 0, 3, 0, 1]]) + >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') + >>> _stat_scores_compute(tp, fp, tn, fn) + tensor([2, 2, 6, 2, 4]) + """ + stats = [ + tp.unsqueeze(-1), + fp.unsqueeze(-1), + tn.unsqueeze(-1), + fn.unsqueeze(-1), + tp.unsqueeze(-1) + fn.unsqueeze(-1), # support + ] + outputs: Tensor = B.cat(stats, -1) + outputs = B.where(outputs < 0, tensor(-1, device=outputs.device, dtype=outputs.dtype), outputs) + + return outputs + + +def _reduce_stat_scores( + numerator: Tensor, + denominator: Tensor, + weights: Optional[Tensor], + average: Optional[str], + mdmc_average: Optional[str], + zero_division: int = 0, +) -> Tensor: + """Reduces scores of type ``numerator/denominator`` or. + + ``weights * (numerator/denominator)``, if ``average='weighted'``. + + Args: + numerator: A tensor with numerator numbers. + denominator: A tensor with denominator numbers. If a denominator is + negative, the class will be ignored (if averaging), or its score + will be returned as ``nan`` (if ``average=None``). + If the denominator is zero, then ``zero_division`` score will be + used for those elements. + weights: A tensor of weights to be used if ``average='weighted'``. + average: The method to average the scores + mdmc_average: The method to average the scores if inputs were multi-dimensional multi-class (MDMC) + zero_division: The value to use for the score if denominator equals zero. + """ + numerator, denominator = numerator.float(), denominator.float() + zero_div_mask = denominator == 0 + ignore_mask = denominator < 0 + + if weights is None: + weights = B.ones_like(denominator) + else: + weights = weights.float() + + numerator = B.where(zero_div_mask, tensor(float(zero_division), device=numerator.device), numerator) + denominator = B.where(zero_div_mask | ignore_mask, tensor(1.0, device=denominator.device), denominator) + weights = B.where(ignore_mask, tensor(0.0, device=weights.device), weights) + + if average not in (AverageMethod.MICRO, AverageMethod.NONE, None): + weights = weights / weights.sum(dim=-1, keepdim=True) + + scores = weights * (numerator / denominator) + + # This is in case where sum(weights) = 0, which happens if we ignore the only present class with average='weighted' + scores = B.where(B.isnan(scores), tensor(float(zero_division), device=scores.device), scores) + + if mdmc_average == MDMCAverageMethod.SAMPLEWISE: + scores = scores.mean(dim=0) + ignore_mask = ignore_mask.sum(dim=0).bool() + + if average in (AverageMethod.NONE, None): + scores = B.where(ignore_mask, tensor(float("nan"), device=scores.device), scores) + else: + scores = scores.sum() + + return scores + + +def stat_scores( + preds: Tensor, + target: Tensor, + reduce: str = "micro", + mdmc_reduce: Optional[str] = None, + num_classes: Optional[int] = None, + top_k: Optional[int] = None, + threshold: float = 0.5, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, +) -> Tensor: + r"""Computes the number of true positives, false positives, true negatives, false negatives. + Related to `Type I and Type II errors`_ + and the `confusion matrix`_. + + The reduction method (how the statistics are aggregated) is controlled by the + ``reduce`` parameter, and additionally by the ``mdmc_reduce`` parameter in the + multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + + top_k: + Number of highest probability or logit score predictions considered to find the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + + reduce: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Counts the statistics by summing over all [sample, class] + combinations (globally). Each statistic is represented by a single integer. + - ``'macro'``: Counts the statistics for each class separately (over all samples). + Each statistic is represented by a ``(C,)`` tensor. Requires ``num_classes`` + to be set. + - ``'samples'``: Counts the statistics for each sample separately (over all classes). + Each statistic is represented by a ``(N, )`` 1d tensor. + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_reduce``. + + num_classes: + Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. + + ignore_index: + Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + + mdmc_reduce: + Defines how the multi-dimensional multi-class inputs are handeled. Should be + one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class (see :ref:`references/modules:input types` for the definition of input types). + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then the outputs are concatenated together. In each + sample the extra axes ``...`` are flattened to become the sub-sample axis, and + statistics for each sample are computed by treating the sub-sample axis as the + ``N`` axis for that sample. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs are + flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``reduce`` parameter applies as usual. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Return: + The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds + to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The + shape depends on the ``reduce`` and ``mdmc_reduce`` (in case of multi-dimensional + multi-class data) parameters: + + - If the data is not multi-dimensional multi-class, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)``, + where ``C`` stands for the number of classes + - If ``reduce='samples'``, the shape will be ``(N, 5)``, where ``N`` stands for + the number of samples + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='global'``, then + + - If ``reduce='micro'``, the shape will be ``(5, )`` + - If ``reduce='macro'``, the shape will be ``(C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N*X, 5)``, where ``X`` stands for + the product of sizes of all "extra" dimensions of the data (i.e. all dimensions + except for ``C`` and ``N``) + + - If the data is multi-dimensional multi-class and ``mdmc_reduce='samplewise'``, then + + - If ``reduce='micro'``, the shape will be ``(N, 5)`` + - If ``reduce='macro'``, the shape will be ``(N, C, 5)`` + - If ``reduce='samples'``, the shape will be ``(N, X, 5)`` + + Raises: + ValueError: + If ``reduce`` is none of ``"micro"``, ``"macro"`` or ``"samples"``. + ValueError: + If ``mdmc_reduce`` is none of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``reduce`` is set to ``"macro"`` and ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set + and ``ignore_index`` is not in the range ``[0, num_classes)``. + ValueError: + If ``ignore_index`` is used with ``binary data``. + ValueError: + If inputs are ``multi-dimensional multi-class`` and ``mdmc_reduce`` is not provided. + + Example: + >>> from paddlemetrics.functional import stat_scores + >>> preds = B.tensor([1, 0, 2, 1]) + >>> target = B.tensor([1, 1, 2, 0]) + >>> stat_scores(preds, target, reduce='macro', num_classes=3) + tensor([[0, 1, 2, 1, 1], + [1, 1, 1, 1, 2], + [1, 0, 3, 0, 1]]) + >>> stat_scores(preds, target, reduce='micro') + tensor([2, 2, 6, 2, 4]) + + """ + if reduce not in ["micro", "macro", "samples"]: + raise ValueError(f"The `reduce` {reduce} is not valid.") + + if mdmc_reduce not in [None, "samplewise", "global"]: + raise ValueError(f"The `mdmc_reduce` {mdmc_reduce} is not valid.") + + if reduce == "macro" and (not num_classes or num_classes < 1): + raise ValueError("When you set `reduce` as 'macro', you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_reduce, + top_k=top_k, + threshold=threshold, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + ) + return _stat_scores_compute(tp, fp, tn, fn) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/image/__init__.py new file mode 100644 index 000000000..9fe64120c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/image/__init__.py @@ -0,0 +1,16 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddlemetrics.functional.image.gradients import image_gradients # noqa: F401 +from paddlemetrics.functional.image.psnr import psnr # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/gradients.py b/EE/paddlemetric/src/paddlemetrics/functional/image/gradients.py new file mode 100644 index 000000000..abe1b08d5 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/image/gradients.py @@ -0,0 +1,81 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + + +def _image_gradients_validate(img: Tensor) -> None: + """Validates whether img is a 4D torch Tensor.""" + + if not isinstance(img, Tensor): + raise TypeError(f"The `img` expects a value of type but got {type(img)}") + if img.ndim != 4: + raise RuntimeError(f"The `img` expects a 4D tensor but got {img.ndim}D tensor") + + +def _compute_image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]: + """Computes image gradients (dy/dx) for a given image.""" + + batch_size, channels, height, width = img.shape + + dy = img[..., 1:, :] - img[..., :-1, :] + dx = img[..., :, 1:] - img[..., :, :-1] + + shapey = [batch_size, channels, 1, width] + dy = B.cat([dy, B.zeros(shapey, device=img.device, dtype=img.dtype)], dim=2) + dy = dy.view(img.shape) + + shapex = [batch_size, channels, height, 1] + dx = B.cat([dx, B.zeros(shapex, device=img.device, dtype=img.dtype)], dim=3) + dx = dx.view(img.shape) + + return dy, dx + + +def image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]: + """Computes `Gradient Computation of Image`_ of a given image using finite difference. + + Args: + img: An ``(N, C, H, W)`` input tensor where C is the number of image channels + + Return: + Tuple of (dy, dx) with each gradient of shape ``[N, C, H, W]`` + + Raises: + TypeError: + If ``img`` is not of the type . + RuntimeError: + If ``img`` is not a 4D tensor. + + Example: + >>> from paddlemetrics.functional import image_gradients + >>> image = B.arange(0, 1*1*5*5, dtype=B.float32) + >>> image = B.reshape(image, (1, 1, 5, 5)) + >>> dy, dx = image_gradients(image) + >>> dy[0, 0, :, :] + tensor([[5., 5., 5., 5., 5.], + [5., 5., 5., 5., 5.], + [5., 5., 5., 5., 5.], + [5., 5., 5., 5., 5.], + [0., 0., 0., 0., 0.]]) + + .. note:: The implementation follows the 1-step finite difference method as followed + by the TF implementation. The values are organized such that the gradient of + [I(x+1, y)-[I(x, y)]] are at the (x, y) location + """ + _image_gradients_validate(img) + + return _compute_image_gradients(img) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/psnr.py b/EE/paddlemetric/src/paddlemetrics/functional/image/psnr.py new file mode 100644 index 000000000..2ffd60461 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/image/psnr.py @@ -0,0 +1,150 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities import rank_zero_warn, reduce + + +def _psnr_compute( + sum_squared_error: Tensor, + n_obs: Tensor, + data_range: Tensor, + base: float = 10.0, + reduction: str = "elementwise_mean", +) -> Tensor: + """Computes peak signal-to-noise ratio. + + Args: + sum_squared_error: Sum of square of errors over all observations + n_obs: Number of predictions or observations + data_range: + the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given + when ``dim`` is not None. + base: a base of a logarithm to use (default: 10) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + Example: + >>> preds = B.tensor([[0.0, 1.0], [2.0, 3.0]]) + >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) + >>> data_range = target.max() - target.min() + >>> sum_squared_error, n_obs = _psnr_update(preds, target) + >>> _psnr_compute(sum_squared_error, n_obs, data_range) + tensor(2.5527) + """ + + psnr_base_e = 2 * B.log(data_range) - B.log(sum_squared_error / n_obs) + psnr_vals = psnr_base_e * (10 / B.log(tensor(base))) + return reduce(psnr_vals, reduction=reduction) + + +def _psnr_update( + preds: Tensor, + target: Tensor, + dim: Optional[Union[int, Tuple[int, ...]]] = None, +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute peak signal-to-noise ratio. + + Args: + preds: Predicted tensor + target: Ground truth tensor + dim: + Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is + None meaning scores will be reduced across all dimensions. + """ + + if dim is None: + sum_squared_error = B.sum(B.pow(preds - target, 2)) + n_obs = tensor(target.numel(), device=target.device) + return sum_squared_error, n_obs + + diff = preds - target + sum_squared_error = B.sum(diff * diff, dim=dim) + + if isinstance(dim, int): + dim_list = [dim] + else: + dim_list = list(dim) + if not dim_list: + n_obs = tensor(target.numel(), device=target.device) + else: + n_obs = tensor(target.size(), device=target.device)[dim_list].prod() + n_obs = n_obs.expand_as(sum_squared_error) + + return sum_squared_error, n_obs + + +def psnr( + preds: Tensor, + target: Tensor, + data_range: Optional[float] = None, + base: float = 10.0, + reduction: str = "elementwise_mean", + dim: Optional[Union[int, Tuple[int, ...]]] = None, +) -> Tensor: + """Computes the peak signal-to-noise ratio. + + Args: + preds: estimated signal + target: groun truth signal + data_range: + the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given + when ``dim`` is not None. + base: a base of a logarithm to use (default: 10) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + dim: + Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is + None meaning scores will be reduced across all dimensions. + Return: + Tensor with PSNR score + + Raises: + ValueError: + If ``dim`` is not ``None`` and ``data_range`` is not provided. + + Example: + >>> from paddlemetrics.functional import psnr + >>> pred = B.tensor([[0.0, 1.0], [2.0, 3.0]]) + >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) + >>> psnr(pred, target) + tensor(2.5527) + + .. note:: + Half precision is only support on GPU for this metric + """ + if dim is None and reduction != "elementwise_mean": + rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.") + + if data_range is None: + if dim is not None: + # Maybe we could use `B.amax(target, dim=dim) - B.amin(target, dim=dim)` in PyTorch 1.7 to calculate + # `data_range` in the future. + raise ValueError("The `data_range` must be given when `dim` is not None.") + + data_range = target.max() - target.min() + else: + data_range = tensor(float(data_range)) + sum_squared_error, n_obs = _psnr_update(preds, target, dim=dim) + return _psnr_compute(sum_squared_error, n_obs, data_range, base=base, reduction=reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/ssim.py b/EE/paddlemetric/src/paddlemetrics/functional/image/ssim.py new file mode 100644 index 000000000..52af9b793 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/image/ssim.py @@ -0,0 +1,225 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Sequence, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape +from paddlemetrics.utilities.distributed import reduce + + +def _gaussian(kernel_size: int, sigma: float, dtype: B.dtype, device: B.device) -> Tensor: + """Computes 1D gaussian kernel. + + Args: + kernel_size: size of the gaussian kernel + sigma: Standard deviation of the gaussian kernel + dtype: data type of the output tensor + device: device of the output tensor + + Example: + >>> _gaussian(3, 1, B.float, 'cpu') + tensor([[0.2741, 0.4519, 0.2741]]) + """ + dist = B.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=dtype, device=device) + gauss = B.exp(-B.pow(dist / sigma, 2) / 2) + return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size) + + +def _gaussian_kernel( + channel: int, kernel_size: Sequence[int], sigma: Sequence[float], dtype: B.dtype, device: B.device +) -> Tensor: + """Computes 2D gaussian kernel. + + Args: + channel: number of channels in the image + kernel_size: size of the gaussian kernel as a tuple (h, w) + sigma: Standard deviation of the gaussian kernel + dtype: data type of the output tensor + device: device of the output tensor + + Example: + >>> _gaussian_kernel(1, (5,5), (1,1), B.float, "cpu") + tensor([[[[0.0030, 0.0133, 0.0219, 0.0133, 0.0030], + [0.0133, 0.0596, 0.0983, 0.0596, 0.0133], + [0.0219, 0.0983, 0.1621, 0.0983, 0.0219], + [0.0133, 0.0596, 0.0983, 0.0596, 0.0133], + [0.0030, 0.0133, 0.0219, 0.0133, 0.0030]]]]) + """ + + gaussian_kernel_x = _gaussian(kernel_size[0], sigma[0], dtype, device) + gaussian_kernel_y = _gaussian(kernel_size[1], sigma[1], dtype, device) + kernel = B.matmul(gaussian_kernel_x.t(), gaussian_kernel_y) # (kernel_size, 1) * (1, kernel_size) + + return kernel.expand(channel, 1, kernel_size[0], kernel_size[1]) + + +def _ssim_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Structural Similarity Index Measure. Checks for same shape + and type of the input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + if preds.dtype != target.dtype: + raise TypeError( + "Expected `preds` and `target` to have the same data type." + f" Got preds: {preds.dtype} and target: {target.dtype}." + ) + _check_same_shape(preds, target) + if len(preds.shape) != 4: + raise ValueError( + "Expected `preds` and `target` to have BxCxHxW shape." + f" Got preds: {preds.shape} and target: {target.shape}." + ) + return preds, target + + +def _ssim_compute( + preds: Tensor, + target: Tensor, + kernel_size: Sequence[int] = (11, 11), + sigma: Sequence[float] = (1.5, 1.5), + reduction: str = "elementwise_mean", + data_range: Optional[float] = None, + k1: float = 0.01, + k2: float = 0.03, +) -> Tensor: + """Computes Structual Similarity Index Measure. + + Args: + preds: estimated image + target: ground truth image + kernel_size: size of the gaussian kernel (default: (11, 11)) + sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + data_range: Range of the image. If ``None``, it is determined from the image (max - min) + k1: Parameter of SSIM. Default: 0.01 + k2: Parameter of SSIM. Default: 0.03 + + Example: + >>> preds = B.rand([16, 1, 16, 16]) + >>> target = preds * 0.75 + >>> preds, target = _ssim_update(preds, target) + >>> _ssim_compute(preds, target) + tensor(0.9219) + """ + if len(kernel_size) != 2 or len(sigma) != 2: + raise ValueError( + "Expected `kernel_size` and `sigma` to have the length of two." + f" Got kernel_size: {len(kernel_size)} and sigma: {len(sigma)}." + ) + + if any(x % 2 == 0 or x <= 0 for x in kernel_size): + raise ValueError(f"Expected `kernel_size` to have odd positive number. Got {kernel_size}.") + + if any(y <= 0 for y in sigma): + raise ValueError(f"Expected `sigma` to have positive number. Got {sigma}.") + + if data_range is None: + data_range = max(preds.max() - preds.min(), target.max() - target.min()) + + c1 = pow(k1 * data_range, 2) + c2 = pow(k2 * data_range, 2) + device = preds.device + + channel = preds.size(1) + dtype = preds.dtype + kernel = _gaussian_kernel(channel, kernel_size, sigma, dtype, device) + pad_h = (kernel_size[0] - 1) // 2 + pad_w = (kernel_size[1] - 1) // 2 + + preds = B.pad(preds, (pad_h, pad_h, pad_w, pad_w), mode="reflect") + target = B.pad(target, (pad_h, pad_h, pad_w, pad_w), mode="reflect") + + input_list = B.cat((preds, target, preds * preds, target * target, preds * target)) # (5 * B, C, H, W) + outputs = B.conv2d(input_list, kernel, groups=channel) + output_list = outputs.split(preds.shape[0]) + + mu_pred_sq = output_list[0].pow(2) + mu_target_sq = output_list[1].pow(2) + mu_pred_target = output_list[0] * output_list[1] + + sigma_pred_sq = output_list[2] - mu_pred_sq + sigma_target_sq = output_list[3] - mu_target_sq + sigma_pred_target = output_list[4] - mu_pred_target + + upper = 2 * sigma_pred_target + c2 + lower = sigma_pred_sq + sigma_target_sq + c2 + + ssim_idx = ((2 * mu_pred_target + c1) * upper) / ((mu_pred_sq + mu_target_sq + c1) * lower) + ssim_idx = ssim_idx[..., pad_h:-pad_h, pad_w:-pad_w] + + return reduce(ssim_idx, reduction) + + +def ssim( + preds: Tensor, + target: Tensor, + kernel_size: Sequence[int] = (11, 11), + sigma: Sequence[float] = (1.5, 1.5), + reduction: str = "elementwise_mean", + data_range: Optional[float] = None, + k1: float = 0.01, + k2: float = 0.03, +) -> Tensor: + """Computes Structual Similarity Index Measure. + + Args: + preds: estimated image + target: ground truth image + kernel_size: size of the gaussian kernel (default: (11, 11)) + sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + data_range: Range of the image. If ``None``, it is determined from the image (max - min) + k1: Parameter of SSIM. Default: 0.01 + k2: Parameter of SSIM. Default: 0.03 + + Return: + Tensor with SSIM score + + Raises: + TypeError: + If ``preds`` and ``target`` don't have the same data type. + ValueError: + If ``preds`` and ``target`` don't have ``BxCxHxW shape``. + ValueError: + If the length of ``kernel_size`` or ``sigma`` is not ``2``. + ValueError: + If one of the elements of ``kernel_size`` is not an ``odd positive number``. + ValueError: + If one of the elements of ``sigma`` is not a ``positive number``. + + Example: + >>> from paddlemetrics.functional import ssim + >>> preds = B.rand([16, 1, 16, 16]) + >>> target = preds * 0.75 + >>> ssim(preds, target) + tensor(0.9219) + """ + preds, target = _ssim_update(preds, target) + return _ssim_compute(preds, target, kernel_size, sigma, reduction, data_range, k1, k2) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py new file mode 100644 index 000000000..1d28d0c4b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py @@ -0,0 +1,17 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.pairwise.cosine import pairwise_cosine_similarity # noqa: F401 +from paddlemetrics.functional.pairwise.euclidean import pairwise_euclidean_distance # noqa: F401 +from paddlemetrics.functional.pairwise.linear import pairwise_linear_similarity # noqa: F401 +from paddlemetrics.functional.pairwise.manhatten import pairwise_manhatten_distance # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py new file mode 100644 index 000000000..cdd24e155 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py @@ -0,0 +1,85 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_cosine_similarity_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise cosine similarity matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + + norm = B.norm(x, p=2, dim=1) + x /= norm.unsqueeze(1) + norm = B.norm(y, p=2, dim=1) + y /= norm.unsqueeze(1) + + distance = x @ y.T + if zero_diagonal: + distance.fill_diagonal_(0) + return distance + + +def pairwise_cosine_similarity( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise cosine similarity: + + .. math:: + s_{cos}(x,y) = \frac{}{||x|| \cdot ||y||} + = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}} + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_cosine_similarity + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_cosine_similarity(x, y) + tensor([[0.5547, 0.8682], + [0.5145, 0.8437], + [0.5300, 0.8533]]) + >>> pairwise_cosine_similarity(x) + tensor([[0.0000, 0.9989, 0.9996], + [0.9989, 0.0000, 0.9998], + [0.9996, 0.9998, 0.0000]]) + + """ + distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py new file mode 100644 index 000000000..fd31cd7f7 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py @@ -0,0 +1,79 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_euclidean_distance_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise euclidean distance matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + x_norm = x.norm(dim=1, keepdim=True) + y_norm = y.norm(dim=1).T + distance = x_norm * x_norm + y_norm * y_norm - 2 * x.mm(y.T) + if zero_diagonal: + distance.fill_diagonal_(0) + return distance.sqrt() + + +def pairwise_euclidean_distance( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise euclidean distances: + + .. math:: + d_{euc}(x,y) = ||x - y||_2 = \sqrt{\sum_{d=1}^D (x_d - y_d)^2} + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_euclidean_distance + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_euclidean_distance(x, y) + tensor([[3.1623, 2.0000], + [5.3852, 4.1231], + [8.9443, 7.6158]]) + >>> pairwise_euclidean_distance(x) + tensor([[0.0000, 2.2361, 5.8310], + [2.2361, 0.0000, 3.6056], + [5.8310, 3.6056, 0.0000]]) + + """ + distance = _pairwise_euclidean_distance_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py new file mode 100644 index 000000000..2d38916af --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py @@ -0,0 +1,59 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +from paddleext.torchapi import Tensor + + +def _check_input( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tuple[Tensor, Tensor, bool]: + """Check that input has the right dimensionality and sets the zero_diagonal argument if user has not provided + import module. + + Args: + x: tensor of shape ``[N,d]`` + y: if provided, a tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + if x.ndim != 2: + raise ValueError(f"Expected argument `x` to be a 2D tensor of shape `[N, d]` but got {x.shape}") + + if y is not None: + if y.ndim != 2 or y.shape[1] != x.shape[1]: + raise ValueError( + "Expected argument `y` to be a 2D tensor of shape `[M, d]` where" + " `d` should be same as the last dimension of `x`" + ) + zero_diagonal = False if zero_diagonal is None else zero_diagonal + else: + y = x.clone() + zero_diagonal = True if zero_diagonal is None else zero_diagonal + return x, y, zero_diagonal + + +def _reduce_distance_matrix(distmat: Tensor, reduction: Optional[str] = None) -> Tensor: + """Final reduction of distance matrix. + + Args: + distance: a ``[N,M]`` matrix + reduction: string determining how to reduce along last dimension + """ + if reduction == "mean": + return distmat.mean(dim=-1) + if reduction == "sum": + return distmat.sum(dim=-1) + if reduction is None or reduction == "none": + return distmat + raise ValueError(f"Expected reduction to be one of `['mean', 'sum', None]` but got {reduction}") diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py new file mode 100644 index 000000000..08e793019 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py @@ -0,0 +1,78 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_linear_similarity_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise linear similarity matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + + distance = x @ y.T + if zero_diagonal: + distance.fill_diagonal_(0) + return distance + + +def pairwise_linear_similarity( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise linear similarity: + + .. math:: + s_{lin}(x,y) = = \sum_{d=1}^D x_d \cdot y_d + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_linear_similarity + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_linear_similarity(x, y) + tensor([[ 2., 7.], + [ 3., 11.], + [ 5., 18.]]) + >>> pairwise_linear_similarity(x) + tensor([[ 0., 21., 34.], + [21., 0., 55.], + [34., 55., 0.]]) + + """ + distance = _pairwise_linear_similarity_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py new file mode 100644 index 000000000..d0079bd62 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py @@ -0,0 +1,78 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix + + +def _pairwise_manhatten_distance_update( + x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + """Calculates the pairwise manhatten similarity matrix. + + Args: + x: tensor of shape ``[N,d]`` + y: if provided, a tensor of shape ``[M,d]`` + zero_diagonal: determines if the diagonal of the distance matrix should be set to zero + """ + x, y, zero_diagonal = _check_input(x, y, zero_diagonal) + + distance = (x.unsqueeze(1) - y.unsqueeze(0).repeat(x.shape[0], 1, 1)).abs().sum(dim=-1) + if zero_diagonal: + distance.fill_diagonal_(0) + return distance + + +def pairwise_manhatten_distance( + x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None +) -> Tensor: + r""" + Calculates pairwise manhatten distance: + + .. math:: + d_{man}(x,y) = ||x-y||_1 = \sum_{d=1}^D |x_d - y_d| + + If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. + If only `x` is passed in, the calculation will be performed between the rows of `x`. + + Args: + x: Tensor with shape ``[N, d]`` + y: Tensor with shape ``[M, d]``, optional + reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` + (applied along column dimension) or `'none'`, `None` for no reduction + zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given + this defaults to `True` else if `y` is also given it defaults to `False` + + Returns: + A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix + + Example: + >>> import torchapi as B + >>> from paddlemetrics.functional import pairwise_manhatten_distance + >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) + >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) + >>> pairwise_manhatten_distance(x, y) + tensor([[ 4., 2.], + [ 7., 5.], + [12., 10.]]) + >>> pairwise_manhatten_distance(x) + tensor([[0., 3., 8.], + [3., 0., 5.], + [8., 5., 0.]]) + + """ + distance = _pairwise_manhatten_distance_update(x, y, zero_diagonal) + return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py new file mode 100644 index 000000000..7ddc60404 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py @@ -0,0 +1,27 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.functional.image.psnr import psnr # noqa: F401 +from paddlemetrics.functional.image.ssim import ssim # noqa: F401 +from paddlemetrics.functional.regression.cosine_similarity import cosine_similarity # noqa: F401 +from paddlemetrics.functional.regression.explained_variance import explained_variance # noqa: F401 +from paddlemetrics.functional.regression.mean_absolute_error import mean_absolute_error # noqa: F401 +from paddlemetrics.functional.regression.mean_absolute_percentage_error import ( # noqa: F401 + mean_absolute_percentage_error, +) +from paddlemetrics.functional.regression.mean_squared_error import mean_squared_error # noqa: F401 +from paddlemetrics.functional.regression.mean_squared_log_error import mean_squared_log_error # noqa: F401 +from paddlemetrics.functional.regression.pearson import pearson_corrcoef # noqa: F401 +from paddlemetrics.functional.regression.r2 import r2_score # noqa: F401 +from paddlemetrics.functional.regression.spearman import spearman_corrcoef # noqa: F401 +from paddlemetrics.functional.regression.tweedie_deviance import tweedie_deviance_score # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py new file mode 100644 index 000000000..ea0f77a3b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _cosine_similarity_update( + preds: Tensor, + target: Tensor, +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Cosine Similarity. Checks for same shape of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + preds = preds.float() + target = target.float() + + return preds, target + + +def _cosine_similarity_compute(preds: Tensor, target: Tensor, reduction: str = "sum") -> Tensor: + """Computes Cosine Similarity. + + Args: + preds: Predicted tensor + target: Ground truth tensor + reduction: + The method of reducing along the batch dimension using sum, mean or taking the individual scores + + Example: + >>> target = B.tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) + >>> preds = B.tensor([[1, 2, 3, 4], [-1, -2, -3, -4]]) + >>> preds, target = _cosine_similarity_update(preds, target) + >>> _cosine_similarity_compute(preds, target, 'none') + tensor([ 1.0000, -1.0000]) + """ + + dot_product = (preds * target).sum(dim=-1) + preds_norm = preds.norm(dim=-1) + target_norm = target.norm(dim=-1) + similarity = dot_product / (preds_norm * target_norm) + reduction_mapping = { + "sum": B.sum, + "mean": B.mean, + "none": lambda x: x, + None: lambda x: x, + } + return reduction_mapping[reduction](similarity) + + +def cosine_similarity(preds: Tensor, target: Tensor, reduction: str = "sum") -> Tensor: + r""" + Computes the `Cosine Similarity`_ + between targets and predictions: + + .. math:: + cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} = + \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}} + + where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions. + + Args: + preds: Predicted tensor with shape ``(N,d)`` + target: Ground truth tensor with shape ``(N,d)`` + reduction: + The method of reducing along the batch dimension using sum, mean or taking the individual scores + + Example: + >>> from paddlemetrics.functional.regression import cosine_similarity + >>> target = B.tensor([[1, 2, 3, 4], + ... [1, 2, 3, 4]]) + >>> preds = B.tensor([[1, 2, 3, 4], + ... [-1, -2, -3, -4]]) + >>> cosine_similarity(preds, target, 'none') + tensor([ 1.0000, -1.0000]) + + """ + preds, target = _cosine_similarity_update(preds, target) + return _cosine_similarity_compute(preds, target, reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py new file mode 100644 index 000000000..95ef6acf4 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py @@ -0,0 +1,137 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _explained_variance_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute Explained Variance. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + + n_obs = preds.size(0) + sum_error = B.sum(target - preds, dim=0) + diff = target - preds + sum_squared_error = B.sum(diff * diff, dim=0) + + sum_target = B.sum(target, dim=0) + sum_squared_target = B.sum(target * target, dim=0) + + return n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target + + +def _explained_variance_compute( + n_obs: Tensor, + sum_error: Tensor, + sum_squared_error: Tensor, + sum_target: Tensor, + sum_squared_target: Tensor, + multioutput: str = "uniform_average", +) -> Tensor: + """Computes Explained Variance. + + Args: + n_obs: Number of predictions or observations + sum_error: Sum of errors over all observations + sum_squared_error: Sum of square of errors over all observations + sum_target: Sum of target values + sum_squared_target: Sum of squares of target values + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + Example: + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> n_obs, sum_error, ss_error, sum_target, ss_target = _explained_variance_update(preds, target) + >>> _explained_variance_compute(n_obs, sum_error, ss_error, sum_target, ss_target, multioutput='raw_values') + tensor([0.9677, 1.0000]) + """ + + diff_avg = sum_error / n_obs + numerator = sum_squared_error / n_obs - (diff_avg * diff_avg) + + target_avg = sum_target / n_obs + denominator = sum_squared_target / n_obs - (target_avg * target_avg) + + # Take care of division by zero + nonzero_numerator = numerator != 0 + nonzero_denominator = denominator != 0 + valid_score = nonzero_numerator & nonzero_denominator + output_scores = B.ones_like(diff_avg) + output_scores[valid_score] = 1.0 - (numerator[valid_score] / denominator[valid_score]) + output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 + + # Decide what to do in multioutput case + # Todo: allow user to pass in tensor with weights + if multioutput == "raw_values": + return output_scores + if multioutput == "uniform_average": + return B.mean(output_scores) + if multioutput == "variance_weighted": + denom_sum = B.sum(denominator) + return B.sum(denominator / denom_sum * output_scores) + + +def explained_variance( + preds: Tensor, + target: Tensor, + multioutput: str = "uniform_average", +) -> Union[Tensor, Sequence[Tensor]]: + """Computes explained variance. + + Args: + preds: estimated labels + target: ground truth labels + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + Example: + >>> from paddlemetrics.functional import explained_variance + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> explained_variance(preds, target) + tensor(0.9572) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> explained_variance(preds, target, multioutput='raw_values') + tensor([0.9677, 1.0000]) + """ + n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target) + return _explained_variance_compute( + n_obs, + sum_error, + sum_squared_error, + sum_target, + sum_squared_target, + multioutput, + ) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py new file mode 100644 index 000000000..1ddb41533 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py @@ -0,0 +1,73 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Mean Absolute Error. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + sum_abs_error = B.sum(B.abs(preds - target)) + n_obs = target.numel() + return sum_abs_error, n_obs + + +def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor: + """Computes Mean Absolute Error. + + Args: + sum_abs_error: Sum of absolute value of errors over all observations + n_obs: Number of predictions or observations + + Example: + >>> preds = B.tensor([0., 1, 2, 3]) + >>> target = B.tensor([0., 1, 2, 2]) + >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) + >>> _mean_absolute_error_compute(sum_abs_error, n_obs) + tensor(0.2500) + """ + + return sum_abs_error / n_obs + + +def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor: + """Computes mean absolute error. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with MAE + + Example: + >>> from paddlemetrics.functional import mean_absolute_error + >>> x = B.tensor([0., 1, 2, 3]) + >>> y = B.tensor([0., 1, 2, 2]) + >>> mean_absolute_error(x, y) + tensor(0.2500) + """ + sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) + return _mean_absolute_error_compute(sum_abs_error, n_obs) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py new file mode 100644 index 000000000..862617c01 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_absolute_percentage_error_update( + preds: Tensor, + target: Tensor, + epsilon: float = 1.17e-06, +) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Mean Percentage Error. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + epsilon: Specifies the lower bound for target values. Any target value below epsilon + is set to epsilon (avoids ZeroDivisionError). default: 1.17e-06 + """ + + _check_same_shape(preds, target) + + abs_diff = B.abs(preds - target) + abs_per_error = abs_diff / B.clamp(B.abs(target), min=epsilon) + + sum_abs_per_error = B.sum(abs_per_error) + + num_obs = target.numel() + + return sum_abs_per_error, num_obs + + +def _mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: int) -> Tensor: + """Computes Mean Absolute Percentage Error. + + Args: + sum_abs_per_error: Sum of absolute value of percentage errors over all observations + (percentage error = (target - prediction) / target) + num_obs: Number of predictions or observations + + Example: + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) + >>> _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) + tensor(0.2667) + """ + + return sum_abs_per_error / num_obs + + +def mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: + """Computes mean absolute percentage error. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with MAPE + + Note: + The epsilon value is taken from `scikit-learn's implementation of MAPE`_. + + Example: + >>> from paddlemetrics.functional import mean_absolute_percentage_error + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> mean_absolute_percentage_error(preds, target) + tensor(0.2667) + """ + sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) + mean_ape = _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) + + return mean_ape diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py new file mode 100644 index 000000000..58af5d21b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py @@ -0,0 +1,74 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_squared_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Mean Squared Error. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + _check_same_shape(preds, target) + diff = preds - target + sum_squared_error = B.sum(diff * diff) + n_obs = target.numel() + return sum_squared_error, n_obs + + +def _mean_squared_error_compute(sum_squared_error: Tensor, n_obs: int, squared: bool = True) -> Tensor: + """Computes Mean Squared Error. + + Args: + sum_squared_error: Sum of square of errors over all observations + n_obs: Number of predictions or observations + squared: Returns RMSE value if set to False. default: True + + Example: + >>> preds = B.tensor([0., 1, 2, 3]) + >>> target = B.tensor([0., 1, 2, 2]) + >>> sum_squared_error, n_obs = _mean_squared_error_update(preds, target) + >>> _mean_squared_error_compute(sum_squared_error, n_obs) + tensor(0.2500) + """ + return sum_squared_error / n_obs if squared else B.sqrt(sum_squared_error / n_obs) + + +def mean_squared_error(preds: Tensor, target: Tensor, squared: bool = True) -> Tensor: + """Computes mean squared error. + + Args: + preds: estimated labels + target: ground truth labels + squared: returns RMSE value if set to False + + Return: + Tensor with MSE + + Example: + >>> from paddlemetrics.functional import mean_squared_error + >>> x = B.tensor([0., 1, 2, 3]) + >>> y = B.tensor([0., 1, 2, 2]) + >>> mean_squared_error(x, y) + tensor(0.2500) + """ + sum_squared_error, n_obs = _mean_squared_error_update(preds, target) + return _mean_squared_error_compute(sum_squared_error, n_obs, squared=squared) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py new file mode 100644 index 000000000..7270ffc00 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py @@ -0,0 +1,76 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: + """Returns variables required to compute Mean Squared Log Error. Checks for same shape of tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + sum_squared_log_error = B.sum(B.pow(B.log1p(preds) - B.log1p(target), 2)) + n_obs = target.numel() + return sum_squared_log_error, n_obs + + +def _mean_squared_log_error_compute(sum_squared_log_error: Tensor, n_obs: int) -> Tensor: + """Computes Mean Squared Log Error. + + Args: + sum_squared_log_error: Sum of square of log errors over all observations + (log error = log(target) - log(prediction)) + n_obs: Number of predictions or observations + + Example: + >>> preds = B.tensor([0., 1, 2, 3]) + >>> target = B.tensor([0., 1, 2, 2]) + >>> sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + >>> _mean_squared_log_error_compute(sum_squared_log_error, n_obs) + tensor(0.0207) + """ + + return sum_squared_log_error / n_obs + + +def mean_squared_log_error(preds: Tensor, target: Tensor) -> Tensor: + """Computes mean squared log error. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with RMSLE + + Example: + >>> from paddlemetrics.functional import mean_squared_log_error + >>> x = B.tensor([0., 1, 2, 3]) + >>> y = B.tensor([0., 1, 2, 2]) + >>> mean_squared_log_error(x, y) + tensor(0.0207) + + .. note:: + Half precision is only support on GPU for this metric + """ + sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + return _mean_squared_log_error_compute(sum_squared_log_error, n_obs) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py new file mode 100644 index 000000000..e1f7dd82f --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py @@ -0,0 +1,102 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _pearson_corrcoef_update( + preds: Tensor, + target: Tensor, + mean_x: Tensor, + mean_y: Tensor, + var_x: Tensor, + var_y: Tensor, + corr_xy: Tensor, + n_prior: Tensor, +) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute Pearson Correlation Coefficient. Checks for same shape of + input tensors. + + Args: + mean_x: current mean estimate of x tensor + mean_y: current mean estimate of y tensor + var_x: current variance estimate of x tensor + var_y: current variance estimate of y tensor + corr_xy: current covariance estimate between x and y tensor + n_prior: current number of observed observations + """ + # Data checking + _check_same_shape(preds, target) + preds = preds.squeeze() + target = target.squeeze() + if preds.ndim > 1 or target.ndim > 1: + raise ValueError("Expected both predictions and target to be 1 dimensional tensors.") + + n_obs = preds.numel() + mx_new = (n_prior * mean_x + preds.mean() * n_obs) / (n_prior + n_obs) + my_new = (n_prior * mean_y + target.mean() * n_obs) / (n_prior + n_obs) + n_prior += n_obs + var_x += ((preds - mx_new) * (preds - mean_x)).sum() + var_y += ((target - my_new) * (target - mean_y)).sum() + corr_xy += ((preds - mx_new) * (target - mean_y)).sum() + mean_x = mx_new + mean_y = my_new + + return mean_x, mean_y, var_x, var_y, corr_xy, n_prior + + +def _pearson_corrcoef_compute( + var_x: Tensor, + var_y: Tensor, + corr_xy: Tensor, + nb: Tensor, +) -> Tensor: + """Computes the final pearson correlation based on accumulated statistics. + + Args: + var_x: variance estimate of x tensor + var_y: variance estimate of y tensor + corr_xy: covariance estimate between x and y tensor + nb: number of observations + """ + var_x /= nb - 1 + var_y /= nb - 1 + corr_xy /= nb - 1 + corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() + return B.clamp(corrcoef, -1.0, 1.0) + + +def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor: + """Computes pearson correlation coefficient. + + Args: + preds: estimated scores + target: ground truth scores + + Example: + >>> from paddlemetrics.functional import pearson_corrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> pearson_corrcoef(preds, target) + tensor(0.9849) + """ + _temp = B.zeros(1, dtype=preds.dtype, device=preds.device) + mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone() + var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone() + _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb) + return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/r2.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/r2.py new file mode 100644 index 000000000..a83219122 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/r2.py @@ -0,0 +1,173 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.checks import _check_same_shape + + +def _r2_score_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute R2 score. Checks for same shape and 1D/2D input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + if preds.ndim > 2: + raise ValueError( + "Expected both prediction and target to be 1D or 2D tensors," + f" but received tensors with dimension {preds.shape}" + ) + + sum_obs = B.sum(target, dim=0) + sum_squared_obs = B.sum(target * target, dim=0) + residual = target - preds + rss = B.sum(residual * residual, dim=0) + n_obs = target.size(0) + + return sum_squared_obs, sum_obs, rss, n_obs + + +def _r2_score_compute( + sum_squared_obs: Tensor, + sum_obs: Tensor, + rss: Tensor, + n_obs: Tensor, + adjusted: int = 0, + multioutput: str = "uniform_average", +) -> Tensor: + """Computes R2 score. + + Args: + sum_squared_obs: Sum of square of all observations + sum_obs: Sum of all observations + rss: Residual sum of squares + n_obs: Number of predictions or observations + adjusted: number of independent regressors for calculating adjusted r2 score. + Default 0 (standard r2 score). + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + Example: + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) + >>> _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, multioutput="raw_values") + tensor([0.9654, 0.9082]) + """ + if n_obs < 2: + raise ValueError("Needs at least two samples to calculate r2 score.") + + mean_obs = sum_obs / n_obs + tss = sum_squared_obs - sum_obs * mean_obs + raw_scores = 1 - (rss / tss) + + if multioutput == "raw_values": + r2 = raw_scores + elif multioutput == "uniform_average": + r2 = B.mean(raw_scores) + elif multioutput == "variance_weighted": + tss_sum = B.sum(tss) + r2 = B.sum(tss / tss_sum * raw_scores) + else: + raise ValueError( + "Argument `multioutput` must be either `raw_values`," + f" `uniform_average` or `variance_weighted`. Received {multioutput}." + ) + + if adjusted < 0 or not isinstance(adjusted, int): + raise ValueError("`adjusted` parameter should be an integer larger or" " equal to 0.") + + if adjusted != 0: + if adjusted > n_obs - 1: + rank_zero_warn( + "More independent regressions than data points in" + " adjusted r2 score. Falls back to standard r2 score.", + UserWarning, + ) + elif adjusted == n_obs - 1: + rank_zero_warn("Division by zero in adjusted r2 score. Falls back to" " standard r2 score.", UserWarning) + else: + r2 = 1 - (1 - r2) * (n_obs - 1) / (n_obs - adjusted - 1) + return r2 + + +def r2_score( + preds: Tensor, + target: Tensor, + adjusted: int = 0, + multioutput: str = "uniform_average", +) -> Tensor: + r""" + Computes r2 score also known as `R2 Score_Coefficient Determination`_: + + .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} + + where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and + :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate + adjusted r2 score given by + + .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} + + where the parameter :math:`k` (the number of independent regressors) should + be provided as the ``adjusted`` argument. + + Args: + preds: estimated labels + target: ground truth labels + adjusted: number of independent regressors for calculating adjusted r2 score. + Default 0 (standard r2 score). + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is ``'uniform_average'``.): + + * ``'raw_values'`` returns full set of scores + * ``'uniform_average'`` scores are uniformly averaged + * ``'variance_weighted'`` scores are weighted by their individual variances + + Raises: + ValueError: + If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors. + ValueError: + If ``len(preds)`` is less than ``2`` + since at least ``2`` sampels are needed to calculate r2 score. + ValueError: + If ``multioutput`` is not one of ``raw_values``, + ``uniform_average`` or ``variance_weighted``. + ValueError: + If ``adjusted`` is not an ``integer`` greater than ``0``. + + Example: + >>> from paddlemetrics.functional import r2_score + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> r2_score(preds, target) + tensor(0.9486) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> r2_score(preds, target, multioutput='raw_values') + tensor([0.9654, 0.9082]) + + """ + sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) + return _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, adjusted, multioutput) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py new file mode 100644 index 000000000..62f7a9d4a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py @@ -0,0 +1,129 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _find_repeats(data: Tensor) -> Tensor: + """find and return values which have repeats i.e. the same value are more than once in the tensor.""" + temp = data.detach().clone() + temp = temp.sort()[0] + + change = B.cat([B.tensor([True], device=temp.device), temp[1:] != temp[:-1]]) + unique = temp[change] + change_idx = B.cat([B.nonzero(change), B.tensor([[temp.numel()]], device=temp.device)]).flatten() + freq = change_idx[1:] - change_idx[:-1] + atleast2 = freq > 1 + return unique[atleast2] + + +def _rank_data(data: Tensor) -> Tensor: + """Calculate the rank for each element of a tensor. The rank refers to the indices of an element in the + corresponding sorted tensor (starting from 1). Duplicates of the same value will be assigned the mean of their + rank. + + Adopted from: `Rank of element tensor`_ + """ + n = data.numel() + rank = B.empty_like(data) + idx = data.argsort() + rank[idx[:n]] = B.arange(1, n + 1, dtype=data.dtype, device=data.device) + + repeats = _find_repeats(data) + for r in repeats: + condition = data == r + rank[condition] = rank[condition].mean() + return rank + + +def _spearman_corrcoef_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Spearman Correlation Coefficient. Checks for same shape + and type of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + if preds.dtype != target.dtype: + raise TypeError( + "Expected `preds` and `target` to have the same data type." + f" Got preds: {preds.dtype} and target: {target.dtype}." + ) + _check_same_shape(preds, target) + preds = preds.squeeze() + target = target.squeeze() + if preds.ndim > 1 or target.ndim > 1: + raise ValueError("Expected both predictions and target to be 1 dimensional tensors.") + return preds, target + + +def _spearman_corrcoef_compute(preds: Tensor, target: Tensor, eps: float = 1e-6) -> Tensor: + """Computes Spearman Correlation Coefficient. + + Args: + preds: Predicted tensor + target: Ground truth tensor + eps: Avoids ZeroDivisionError. default: 1e-6 + + Example: + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> preds, target = _spearman_corrcoef_update(preds, target) + >>> _spearman_corrcoef_compute(preds, target) + tensor(1.0000) + """ + + preds = _rank_data(preds) + target = _rank_data(target) + + preds_diff = preds - preds.mean() + target_diff = target - target.mean() + + cov = (preds_diff * target_diff).mean() + preds_std = B.sqrt((preds_diff * preds_diff).mean()) + target_std = B.sqrt((target_diff * target_diff).mean()) + + corrcoef = cov / (preds_std * target_std + eps) + return B.clamp(corrcoef, -1.0, 1.0) + + +def spearman_corrcoef(preds: Tensor, target: Tensor) -> Tensor: + r""" + Computes `spearmans rank correlation coefficient`_: + + .. math: + r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}} + + where :math:`rg_x` and :math:`rg_y` are the rank associated to the variables x and y. Spearmans correlations + coefficient corresponds to the standard pearsons correlation coefficient calculated on the rank variables. + + Args: + preds: estimated scores + target: ground truth scores + + Example: + >>> from paddlemetrics.functional import spearman_corrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> spearman_corrcoef(preds, target) + tensor(1.0000) + + """ + preds, target = _spearman_corrcoef_update(preds, target) + return _spearman_corrcoef_compute(preds, target) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py new file mode 100644 index 000000000..89eadf9e6 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py @@ -0,0 +1,99 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _symmetric_mean_absolute_percentage_error_update( + preds: Tensor, + target: Tensor, + epsilon: float = 1.17e-06, +) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Symmetric Mean Absolute Percentage Error. Checks for same + shape of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + epsilon: Avoids ZeroDivisionError. default: 1.17e-06 + """ + + _check_same_shape(preds, target) + + abs_diff = B.abs(preds - target) + abs_per_error = abs_diff / B.clamp(B.abs(target) + B.abs(preds), min=epsilon) + + sum_abs_per_error = 2 * B.sum(abs_per_error) + + num_obs = target.numel() + + return sum_abs_per_error, num_obs + + +def _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: int) -> Tensor: + """Computes Symmetric Mean Absolute Percentage Error. + + Args: + sum_abs_per_error: Sum of values of symmetric absolute percentage errors over all observations + (symmetric absolute percentage error = 2 * |target - prediction| / (target + prediction)) + num_obs: Number of predictions or observations + + Example: + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) + >>> _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) + tensor(0.2290) + """ + + return sum_abs_per_error / num_obs + + +def symmetric_mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: + r""" + Computes symmetric mean absolute percentage error (SMAPE_): + + .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{max(| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with SMAPE. + + Example: + >>> from paddlemetrics.functional import symmetric_mean_absolute_percentage_error + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> symmetric_mean_absolute_percentage_error(preds, target) + tensor(0.2290) + + """ + sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update( + preds, + target, + ) + mean_ape = _symmetric_mean_absolute_percentage_error_compute( + sum_abs_per_error, + num_obs, + ) + + return mean_ape diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py b/EE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py new file mode 100644 index 000000000..7cb366a2c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py @@ -0,0 +1,139 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_same_shape + + +def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape + of input tensors. + + Args: + preds: Predicted tensor + targets: Ground truth tensor + power: see :func:`tweedie_deviance_score` + + Example: + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> _tweedie_deviance_score_update(preds, targets, power=2) + (tensor(4.8333), tensor(4)) + """ + _check_same_shape(preds, targets) + + zero_tensor = B.zeros(preds.shape, device=preds.device) + + if 0 < power < 1: + raise ValueError(f"Deviance Score is not defined for power={power}.") + + if power == 0: + deviance_score = B.pow(targets - preds, exponent=2) + elif power == 1: + # Poisson distribution + if B.any(preds <= 0) or B.any(targets < 0): + raise ValueError( + f"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative." + ) + + deviance_score = 2 * (targets * B.log(targets / preds) + preds - targets) + elif power == 2: + # Gamma distribution + if B.any(preds <= 0) or B.any(targets <= 0): + raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") + + deviance_score = 2 * (B.log(preds / targets) + (targets / preds) - 1) + else: + if power < 0: + if B.any(preds <= 0): + raise ValueError(f"For power={power}, 'preds' has to be strictly positive.") + elif 1 < power < 2: + if B.any(preds <= 0) or B.any(targets < 0): + raise ValueError( + f"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative." + ) + else: + if B.any(preds <= 0) or B.any(targets <= 0): + raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") + + term_1 = B.pow(B.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power)) + term_2 = targets * B.pow(preds, 1 - power) / (1 - power) + term_3 = B.pow(preds, 2 - power) / (2 - power) + deviance_score = 2 * (term_1 - term_2 + term_3) + + sum_deviance_score = B.sum(deviance_score) + num_observations = B.tensor(B.numel(deviance_score), device=preds.device) + + return sum_deviance_score, num_observations + + +def _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor: + """Computes Deviance Score. + + Args: + sum_deviance_score: Sum of deviance scores accumalated until now. + num_observations: Number of observations encountered until now. + + Example: + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2) + >>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations) + tensor(1.2083) + """ + + return sum_deviance_score / num_observations + + +def tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor: + r""" + Computes the `Tweedie Deviance Score`_ between targets and predictions: + + .. math:: + deviance\_score(\hat{y},y) = + \begin{cases} + (\hat{y} - y)^2, & \text{for }power=0\\ + 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }power=1\\ + 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }power=2\\ + 2 * (\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \frac{y(\hat{y})^{1 - power}}{1 - power} + \frac{(\hat{y}) + ^{2 - power}}{2 - power}), & \text{otherwise} + \end{cases} + + where :math:`y` is a tensor of targets values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + preds: Predicted tensor with shape ``(N,...)`` + targets: Ground truth tensor with shape ``(N,...)`` + power: + - power < 0 : Extreme stable distribution. (Requires: preds > 0.) + - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.) + - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) + - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) + - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.) + - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) + - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.) + + Example: + >>> from paddlemetrics.functional import tweedie_deviance_score + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> tweedie_deviance_score(preds, targets, power=2) + tensor(1.2083) + + """ + sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power) + return _tweedie_deviance_score_compute(sum_deviance_score, num_observations) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py new file mode 100644 index 000000000..d05abb6af --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py @@ -0,0 +1,22 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision # noqa: F401 +from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out # noqa: F401 +from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate # noqa: F401 +from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg # noqa: F401 +from paddlemetrics.functional.retrieval.precision import retrieval_precision # noqa: F401 +from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision # noqa: F401 +from paddlemetrics.functional.retrieval.recall import retrieval_recall # noqa: F401 +from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py new file mode 100644 index 000000000..0b067a892 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_average_precision(preds: Tensor, target: Tensor) -> Tensor: + """Computes average precision (for information retrieval), as explained in `IR Average precision`_. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + + Return: + a single-value tensor with the average precision (AP) of the predictions ``preds`` w.r.t. the labels ``target``. + + Example: + >>> from paddlemetrics.functional import retrieval_average_precision + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_average_precision(preds, target) + tensor(0.8333) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if not target.sum(): + return tensor(0.0, device=preds.device) + + target = target[B.argsort(preds, dim=-1, descending=True)] + positions = B.arange(1, len(target) + 1, device=target.device, dtype=B.float32)[target > 0] + res = B.div((B.arange(len(positions), device=positions.device, dtype=B.float32) + 1), positions).mean() + return res diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py new file mode 100644 index 000000000..10c5762b0 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py @@ -0,0 +1,62 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_fall_out(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction + of non-relevant documents retrieved among all the non-relevant documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics.functional import retrieval_fall_out + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_fall_out(preds, target, k=2) + tensor(1.) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + k = preds.shape[-1] if k is None else k + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + target = 1 - target + + if not target.sum(): + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() + return relevant / target.sum() diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py new file mode 100644 index 000000000..83336a50b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py @@ -0,0 +1,57 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_hit_rate(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the hit rate (for information retrieval). The hit rate is 1.0 if there is at least one relevant + document among all the top `k` retrieved documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure HitRate@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the hit rate (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_hit_rate(preds, target, k=2) + tensor(1.) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if k is None: + k = preds.shape[-1] + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum() + return (relevant > 0).float() diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py new file mode 100644 index 000000000..73fedad5e --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py @@ -0,0 +1,72 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def _dcg(target: Tensor) -> Tensor: + """Computes Discounted Cumulative Gain for input tensor.""" + denom = B.log2(B.arange(target.shape[-1], device=target.device) + 2.0) + return (target / denom).sum(dim=-1) + + +def retrieval_normalized_dcg(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes `Normalized Discounted Cumulative Gain`_ (for information retrieval). + + ``preds`` and ``target`` should be of the same shape and live on the same device. + ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document relevance. + k: consider only the top k elements (default: None, which considers them all) + + Return: + a single-value tensor with the nDCG of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics.functional import retrieval_normalized_dcg + >>> preds = B.tensor([.1, .2, .3, 4, 70]) + >>> target = B.tensor([10, 0, 0, 1, 5]) + >>> retrieval_normalized_dcg(preds, target) + tensor(0.6957) + """ + preds, target = _check_retrieval_functional_inputs(preds, target, allow_non_binary_target=True) + + k = preds.shape[-1] if k is None else k + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + sorted_target = target[B.argsort(preds, dim=-1, descending=True)][:k] + ideal_target = B.sort(target, descending=True)[0][:k] + + ideal_dcg = _dcg(ideal_target) + target_dcg = _dcg(sorted_target) + + # filter undefined scores + all_irrelevant = ideal_dcg == 0 + target_dcg[all_irrelevant] = 0 + target_dcg[~all_irrelevant] /= ideal_dcg[~all_irrelevant] + + return target_dcg.mean() diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py new file mode 100644 index 000000000..83bd11727 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py @@ -0,0 +1,60 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_precision(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the precision metric (for information retrieval). Precision is the fraction of relevant documents + among all the retrieved documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the precision (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_precision(preds, target, k=2) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if k is None: + k = preds.shape[-1] + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + if not target.sum(): + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() + return relevant / k diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py new file mode 100644 index 000000000..d26e32f8b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_r_precision(preds: Tensor, target: Tensor) -> Tensor: + """Computes the r-precision metric (for information retrieval). R-Precision is the fraction of relevant + documents among all the top ``k`` retrieved documents where ``k`` is equal to the total number of relevant + documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + + Returns: + a single-value tensor with the r-precision of the predictions ``preds`` w.r.t. the labels ``target``. + + Example: + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_r_precision(preds, target) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + relevant_number = target.sum() + if not relevant_number: + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:relevant_number].sum().float() + return relevant / relevant_number diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py new file mode 100644 index 000000000..e00d450c3 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py @@ -0,0 +1,61 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_recall(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: + """Computes the recall metric (for information retrieval). Recall is the fraction of relevant documents + retrieved among all the relevant documents. + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. If you want to measure Recall@K, ``k`` must be a positive integer. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + k: consider only the top k elements (default: None, which considers them all) + + Returns: + a single-value tensor with the recall (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics.functional import retrieval_recall + >>> preds = tensor([0.2, 0.3, 0.5]) + >>> target = tensor([True, False, True]) + >>> retrieval_recall(preds, target, k=2) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if k is None: + k = preds.shape[-1] + + if not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + + if not target.sum(): + return tensor(0.0, device=preds.device) + + relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() + return relevant / target.sum() diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py new file mode 100644 index 000000000..c92c223ec --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs + + +def retrieval_reciprocal_rank(preds: Tensor, target: Tensor) -> Tensor: + """Computes reciprocal rank (for information retrieval). See `Mean Reciprocal Rank`_ + + ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, + 0 is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, + otherwise an error is raised. + + Args: + preds: estimated probabilities of each document to be relevant. + target: ground truth about each document being relevant or not. + + Return: + a single-value tensor with the reciprocal rank (RR) of the predictions ``preds`` wrt the labels ``target``. + + Example: + >>> from paddlemetrics.functional import retrieval_reciprocal_rank + >>> preds = B.tensor([0.2, 0.3, 0.5]) + >>> target = B.tensor([False, True, False]) + >>> retrieval_reciprocal_rank(preds, target) + tensor(0.5000) + """ + preds, target = _check_retrieval_functional_inputs(preds, target) + + if not target.sum(): + return tensor(0.0, device=preds.device) + + target = target[B.argsort(preds, dim=-1, descending=True)] + position = B.nonzero(target).view(-1) + res = 1.0 / (position[0] + 1.0) + return res diff --git a/EE/paddlemetric/src/paddlemetrics/functional/self_supervised.py b/EE/paddlemetric/src/paddlemetrics/functional/self_supervised.py new file mode 100644 index 000000000..9af407aae --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/self_supervised.py @@ -0,0 +1,57 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from warnings import warn + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.pairwise import pairwise_cosine_similarity, pairwise_linear_similarity + + +def embedding_similarity( + batch: Tensor, similarity: str = "cosine", reduction: str = "none", zero_diagonal: bool = True +) -> Tensor: + """Computes representation similarity. + + Example: + >>> from paddlemetrics.functional import embedding_similarity + >>> embeddings = B.tensor([[1., 2., 3., 4.], [1., 2., 3., 4.], [4., 5., 6., 7.]]) + >>> embedding_similarity(embeddings) + tensor([[0.0000, 1.0000, 0.9759], + [1.0000, 0.0000, 0.9759], + [0.9759, 0.9759, 0.0000]]) + + Args: + batch: (batch, dim) + similarity: 'dot' or 'cosine' + reduction: 'none', 'sum', 'mean' (all along dim -1) + zero_diagonal: if True, the diagonals are set to zero + + Return: + A square matrix (batch, batch) with the similarity scores between all elements + If sum or mean are used, then returns (b, 1) with the reduced value for each row + + .. deprecated:: v0.6 + Use :func:`paddlemetrics.functional.pairwise_cosine_similarity` when `similarity='cosine'` + else use :func:`paddlemetrics.functional.pairwise_euclidean_distance`. Will be removed in v0.7. + """ + warn( + "Function `embedding_similarity` was deprecated v0.6 and will be removed in v0.7." + " Use `paddlemetrics.functional.pairwise_cosine_similarity` instead when argument" + " similarity='cosine' else use `paddlemetrics.functional.pairwise_linear_similarity", + DeprecationWarning, + ) + if similarity == "cosine": + return pairwise_cosine_similarity(batch, reduction=reduction, zero_diagonal=zero_diagonal) + return pairwise_linear_similarity(batch, reduction=reduction, zero_diagonal=zero_diagonal) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/__init__.py b/EE/paddlemetric/src/paddlemetrics/functional/text/__init__.py new file mode 100644 index 000000000..971708401 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/text/__init__.py @@ -0,0 +1,17 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddlemetrics.functional.text.bleu import bleu_score # noqa: F401 +from paddlemetrics.functional.text.sacre_bleu import sacre_bleu_score # noqa: F401 +from paddlemetrics.functional.text.wer import wer # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/bert.py b/EE/paddlemetric/src/paddlemetrics/functional/text/bert.py new file mode 100644 index 000000000..168be6eee --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/text/bert.py @@ -0,0 +1,650 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import csv +import math +import urllib +import warnings +from collections import Counter, defaultdict +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, nn +from paddleext.torchapi.data import DataLoader, Dataset + +from paddlemetrics.utilities.imports import _TQDM_AVAILABLE, _TRANSFORMERS_AVAILABLE + +if _TRANSFORMERS_AVAILABLE: + from transformers import AutoModel, AutoTokenizer + +if _TQDM_AVAILABLE: + import tqdm + + +def _preprocess_text( + text: List[str], + tokenizer: Any, + max_length: int = 512, + truncation: bool = True, + sort_according_length: bool = True, + own_tokenizer: bool = False, +) -> Dict[str, Tensor]: + """Default text pre-processing function using `transformers` `AutoTokenizer` instance. + + Args: + text: + An iterable of sentences. + tokenizer: + Either `AutoTokenizer` instance from `transformers` package, or a user's own tokenizer. + max_length: + A maximum sequence length. + truncation: + An indication of whether tokenized sequences should be padded only to the length of the longest sequence. + sort_according_length: + An indication of whether tokenized sequences should be sorted from shortest to longest. This is appropriate + to do for leveraging dynamic padding during embedding calculation and thereby to hasten inference. + own_tokenizer: + An indication of whether a non-default user's own tokenizer is used. + + Return: + A dictionary of tokenized sentences including input_ids and attention_mask. + + Raises: + BaseException: + If a tokenization with a user's own tokenizer is not successful. + """ + if not own_tokenizer: + tokenized_data = tokenizer( + text, padding="max_length", max_length=max_length, truncation=truncation, return_tensors="pt" + ) + else: + try: + tokenized_data = tokenizer(text, max_length) + except BaseException as e: + raise BaseException(f"Tokenization was not successful: {e}") + + input_ids, attention_mask = ( + _sort_data_according_length(tokenized_data["input_ids"], tokenized_data["attention_mask"]) + if sort_according_length + else (tokenized_data["input_ids"], tokenized_data["attention_mask"]) + ) + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +def _process_attention_mask_for_special_tokens(attention_mask: Tensor) -> Tensor: + """Process attention mask to be zero for special [CLS] and [SEP] tokens as they're not included in a + calculation for BERT score. + + Args: + attention_mask: An attention mask to be returned, for example, by a `transformers` tokenizer. + + Return: + A processed attention mask. + """ + # Make attention_mask zero for [CLS] token + attention_mask[:, 0] = 0 + # Make attention_mask zero for [SEP] token + sep_token_position = (attention_mask - 0.1).cumsum(-1).argmax(-1) + attention_mask[B.arange(attention_mask.size(0)).long(), sep_token_position] = 0 + return attention_mask + + +def _sort_data_according_length(input_ids: Tensor, attention_mask: Tensor) -> Tuple[Tensor, Tensor]: + """Sort tokenized sentence from the shortest to the longest one.""" + sorted_indices = attention_mask.sum(1).argsort() + input_ids = input_ids[sorted_indices] + attention_mask = attention_mask[sorted_indices] + return input_ids, attention_mask + + +def _input_data_collator( + batch: Dict[str, Tensor], device: Optional[Union[str, B.device]] = None +) -> Dict[str, Tensor]: + """Helper function that trims model inputs to the longest sequence within the batch and put the input on the + proper device.""" + max_len = int(batch["attention_mask"].sum(1).max().item()) + input_ids = batch["input_ids"][:, :max_len].to(device) + attention_mask = batch["attention_mask"][:, :max_len].to(device) + batch.update({"input_ids": input_ids, "attention_mask": attention_mask}) + return batch + + +def _output_data_collator(model_output: Tensor, attention_mask: Tensor, target_len: int) -> Tuple[Tensor, Tensor]: + """Helper function that pads the model output and attention mask to the target length.""" + zeros_shape = list(model_output.shape) + zeros_shape[2] = target_len - zeros_shape[2] + model_output = B.cat( + [model_output, B.zeros(zeros_shape, dtype=model_output.dtype).to(model_output.device)], dim=2 + ) + zeros = B.zeros(zeros_shape[0], zeros_shape[2], dtype=attention_mask.dtype).to(attention_mask.device) + attention_mask = B.cat([attention_mask, zeros], dim=1) + return model_output, attention_mask + + +class TextDataset(Dataset): + """PyTorch dataset class for storing tokenized sentences and other properties used for BERT score + calculation.""" + + def __init__( + self, + text: List[str], + tokenizer: Any, + max_length: int = 512, + preprocess_text_fn: Callable[[List[str], Any, int], Dict[str, Tensor]] = _preprocess_text, + idf: bool = False, + tokens_idf: Optional[Dict[int, float]] = None, + ) -> None: + """ + Args: + text: + An iterable of sentences. + tokenizer: + `AutoTokenizer` instance from `transformers` package. + max_length: + A maximum sequence length. + preprocess_text_fn: + A function used for processing the input sentences. + idf: + An indication of whether calculate token inverse document frequencies to weight the model embeddings. + tokens_idf: + Inverse document frequencies (these should be calculated on reference sentences). + """ + self.text = preprocess_text_fn(text, tokenizer, max_length) + self.max_length = self.text["input_ids"].shape[1] + self.num_sentences = len(text) + self.idf = idf + self.tokens_idf = {} + if idf: + self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf() + + def __getitem__(self, idx: int) -> Dict[str, Tensor]: + input_ids = self.text["input_ids"][idx, :] + attention_mask = self.text["attention_mask"][idx, :] + inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} + if self.idf: + input_ids_idf = B.tensor([self.tokens_idf[input_idx] for input_idx in input_ids.tolist()]) + inputs_dict["input_ids_idf"] = input_ids_idf + return inputs_dict + + def __len__(self) -> int: + return self.num_sentences + + def _get_tokens_idf(self) -> Dict[int, float]: + """Calculate token inverse document frequences. + + Return: + A python dictionary containing inverse document frequences for token ids. + """ + token_counter: Counter = Counter() + for tokens in map(self._set_of_tokens, self.text["input_ids"]): + token_counter.update(tokens) + + tokens_idf: Dict[int, float] = defaultdict(self._get_tokens_idf_default_value) + tokens_idf.update( + {idx: math.log((self.num_sentences + 1) / (occurrence + 1)) for idx, occurrence in token_counter.items()} + ) + return tokens_idf + + def _get_tokens_idf_default_value(self) -> float: + """Helper function that ensures `defaultdict` to be pickled.""" + return math.log((self.num_sentences + 1) / 1) + + @staticmethod + def _set_of_tokens(input_ids: Tensor) -> Set: + """Return set of tokens from the `input_ids` `B.Tensor`.""" + return set(input_ids.tolist()) + + +class TokenizedDataset(TextDataset): + """The child class of `TextDataset` class used with already tokenized data.""" + + def __init__( + self, + input_ids: Tensor, + attention_mask: Tensor, + idf: bool = False, + tokens_idf: Optional[Dict[int, float]] = None, + ) -> None: + """ + Args: + input_ids: + Input ids (`B.Tensor`). + attention_mask: + Attention mask (`B.Tensor`). + idf: + An indication of whether calculate token inverse document frequencies to weight the model embeddings. + tokens_idf: + Inverse document frequencies (these should be calculated on reference sentences). + """ + self.text = dict(zip(["input_ids", "attention_mask"], _sort_data_according_length(input_ids, attention_mask))) + self.text = _input_data_collator(self.text) + self.num_sentences = len(self.text["input_ids"]) + self.max_length = self.text["input_ids"].shape[1] + self.idf = idf + self.tokens_idf = {} + if idf: + self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf() + + +def _get_progress_bar(dataloader: DataLoader, verbose: bool = False) -> Union[DataLoader, "tqdm.auto.tqdm"]: + """Helper function returning either the dataloader itself when `verbose = False`, or it wraps the dataloader with + `tqdm.auto.tqdm`, when `verbose = True` to display a progress bar during the embbeddings calculation.""" + return tqdm.auto.tqdm(dataloader) if verbose else dataloader + + +def _check_shape_of_model_output(output: Tensor, input_ids: Tensor) -> None: + """Check if the shape of the user's own model output.""" + bs, seq_len = input_ids.shape[:2] + invalid_out_shape = len(output.shape) != 3 or output.shape[0] != bs or output.shape[1] != seq_len + if invalid_out_shape: + raise ValueError( + "The model output must be `B.Tensor` of a shape `[batch_size, seq_len, model_dim]` " + f"i.e. [{bs}, {seq_len}. , `model_dim`], but got {output.shape}." + ) + + +def _get_embeddings_and_idf_scale( + dataloader: DataLoader, + target_len: int, + model: nn.Module, + device: Optional[Union[str, B.device]] = None, + num_layers: Optional[int] = None, + all_layers: bool = False, + idf: bool = False, + verbose: bool = False, + user_forward_fn: Callable[[nn.Module, Dict[str, Tensor]], Tensor] = None, +) -> Tuple[Tensor, Tensor]: + """Calculate sentence embeddings and the inverse-document-frequence scaling factor. + Args: + dataloader: + `B.utils.data.DataLoader` instance. + target_len: + A length of the longest sequence in the data. Used for padding the model output. + model: + BERT model. + device: + A device to be used for calculation. + num_layers: + The layer of representation to use. + all_layers: + An indication whether representation from all model layers should be used for BERTScore. + idf: + An Indication whether normalization using inverse document frequencies should be used. + verbose: + An indication of whether a progress bar to be displayed during the embeddings calculation. + user_forward_fn: + A user's own forward function used in a combination with `user_model`. This function must take `user_model` + and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` + as an input and return the model's output represented by the single `B.Tensor`. + + Return: + A tuple of B.Tensors containing the model's embeddings and the normalized tokens IDF. + When `idf = False`, tokens IDF is not calculated, and a matrix of mean weights is returned instead. + For a single sentence, `mean_weight = 1/seq_len`, where `seq_len` is a sum over the corresponding + `attention_mask`. + + Raises: + ValueError: + If `all_layers = True` and a model, which is not from the `transformers` package, is used. + """ + embeddings_list: List[Tensor] = [] + idf_scale_list: List[Tensor] = [] + for batch in _get_progress_bar(dataloader, verbose): + with B.no_grad(): + batch = _input_data_collator(batch, device) + # Output shape: batch_size x num_layers OR 1 x sequence_length x bert_dim + if not all_layers: + if not user_forward_fn: + out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True) + out = out.hidden_states[num_layers if num_layers is not None else -1] + else: + out = user_forward_fn(model, batch) + _check_shape_of_model_output(out, batch["input_ids"]) + out = out.unsqueeze(1) + else: + if user_forward_fn: + raise ValueError( + "The option `all_layers=True` can be used only with default `transformers` models." + ) + out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True) + out = B.cat([o.unsqueeze(1) for o in out.hidden_states], dim=1) + + out /= out.norm(dim=-1).unsqueeze(-1) # normalize embeddings + out, attention_mask = _output_data_collator(out, batch["attention_mask"], target_len) + processed_attention_mask = _process_attention_mask_for_special_tokens(attention_mask) + # Multiply embeddings with attention_mask (b=batch_size, l=num_layers, s=seq_len, d=emb_dim) + out = B.einsum("blsd, bs -> blsd", out, processed_attention_mask) + embeddings_list.append(out.cpu()) + + # Calculate weighted (w.r.t. sentence length) input_ids IDF matrix + input_ids_idf = ( + batch["input_ids_idf"] * processed_attention_mask if idf else processed_attention_mask.type(out.dtype) + ) + input_ids_idf /= input_ids_idf.sum(-1, keepdim=True) + idf_scale_list.append(input_ids_idf) + + embeddings = B.cat(embeddings_list) + idf_scale = B.cat(idf_scale_list) + + return embeddings, idf_scale + + +def _get_scaled_precision_or_recall(cos_sim: Tensor, metric: str, idf_scale: Tensor) -> Tensor: + """Helper function that calculates precision or recall, transpose it and scale it with idf_scale factor.""" + dim = 3 if metric == "precision" else 2 + res = cos_sim.max(dim=dim).values + res = B.einsum("bls, bs -> bls", res, idf_scale).sum(-1) + # We transpose the results and squeeze if possible to match the format of the original BERTScore implementation + res = res.transpose(0, 1).squeeze() + return res + + +def _get_precision_recall_f1( + pred_embeddings: Tensor, ref_embeddings: Tensor, pred_idf_scale: Tensor, ref_idf_scale: Tensor +) -> Tuple[Tensor, Tensor, Tensor]: + """Calculate precision, recall and F1 score over candidate and reference sentences. + + Args: + pred_embeddings: Embeddings of candidate sentenecs. + ref_embeddings: Embeddings of reference sentences. + pred_idf_scale: An IDF scale factor for candidate sentences. + ref_idf_scale: An IDF scale factor for reference sentences. + + Return: + Tensors containing precision, recall and F1 score, respectively. + """ + # Dimensions: b = batch_size, l = num_layers, p = predictions_seq_len, r = references_seq_len, d = bert_dim + cos_sim = B.einsum("blpd, blrd -> blpr", pred_embeddings, ref_embeddings) + # Final metrics shape = (batch_size * num_layers | batch_size) + precision = _get_scaled_precision_or_recall(cos_sim, "precision", pred_idf_scale) + recall = _get_scaled_precision_or_recall(cos_sim, "recall", ref_idf_scale) + + f1_score = 2 * precision * recall / (precision + recall) + f1_score = f1_score.masked_fill(B.isnan(f1_score), 0.0) + + return precision, recall, f1_score + + +def _get_hash(model_name_or_path: Optional[str] = None, num_layers: Optional[int] = None, idf: bool = False) -> str: + """Compute `BERT_score`_ (copied and adjusted)""" + msg = f"{model_name_or_path}_L{num_layers}{'_idf' if idf else '_no-idf'}" + return msg + + +def _read_csv_from_local_file(baseline_path: str) -> Tensor: + """Helper function which reads baseline the csv file from the local file. + + This method implemented to avoid `pandas` dependency. + """ + with open(baseline_path) as fname: + csv_file = csv.reader(fname) + baseline_list = [[float(item) for item in row] for idx, row in enumerate(csv_file) if idx > 0] + baseline = B.tensor(baseline_list)[:, 1:] + return baseline + + +def _read_csv_from_url(baseline_url: str) -> Tensor: + """Helper function which reads the baseline csv file from URL. + + This method is implemented to avoid `pandas` dependency. + """ + with urllib.request.urlopen(baseline_url) as http_request: # type: ignore + baseline_list = [ + [float(item) for item in row.strip().decode("utf-8").split(",")] + for idx, row in enumerate(http_request) + if idx > 0 + ] + baseline = B.tensor(baseline_list)[:, 1:] + return baseline + + +def _load_baseline( + lang: str = "en", + model_name_or_path: Optional[str] = None, + baseline_path: Optional[str] = None, + baseline_url: Optional[str] = None, +) -> Optional[Tensor]: + """Load a CSV file with the baseline values used for rescaling.""" + if baseline_path: + baseline: Optional[Tensor] = _read_csv_from_local_file(baseline_path) + elif baseline_url: + baseline = _read_csv_from_url(baseline_url) + # Read default baseline from the original `bert-score` package https://github.com/Tiiiger/bert_score + elif lang and model_name_or_path: + _URL_BASE = "https://raw.githubusercontent.com/Tiiiger/bert_score/master/bert_score/rescale_baseline" + baseline_url = f"{_URL_BASE}/{lang}/{model_name_or_path}.tsv" + baseline = _read_csv_from_url(baseline_url) + else: + baseline = None + warnings.warn("Baseline was not successfully loaded. No baseline is going to be used.") + + return baseline + + +def _rescale_metrics_with_baseline( + precision: Tensor, + recall: Tensor, + f1_score: Tensor, + baseline: Tensor, + num_layers: Optional[int] = None, + all_layers: bool = False, +) -> Tuple[Tensor, Tensor, Tensor]: + """Rescale the computed metrics with the pre-computed baseline.""" + if num_layers is None and all_layers is False: + num_layers = -1 + all_metrics = B.stack([precision, recall, f1_score], dim=-1) + baseline_scale = baseline.unsqueeze(1) if all_layers else baseline[num_layers] + all_metrics = (all_metrics - baseline_scale) / (1 - baseline_scale) + + return all_metrics[..., 0], all_metrics[..., 1], all_metrics[..., 2] + + +def bert_score( + predictions: Union[List[str], Dict[str, Tensor]], + references: Union[List[str], Dict[str, Tensor]], + model_name_or_path: Optional[str] = None, + num_layers: Optional[int] = None, + all_layers: bool = False, + model: Optional[nn.Module] = None, + user_tokenizer: Any = None, + user_forward_fn: Callable[[nn.Module, Dict[str, Tensor]], Tensor] = None, + verbose: bool = False, + idf: bool = False, + device: Optional[Union[str, B.device]] = None, + max_length: int = 512, + batch_size: int = 64, + num_threads: int = 4, + return_hash: bool = False, + lang: str = "en", + rescale_with_baseline: bool = False, + baseline_path: Optional[str] = None, + baseline_url: Optional[str] = None, +) -> Dict[str, Union[List[float], str]]: + """`Bert_score Evaluating Text Generation`_ leverages the pre-trained contextual embeddings from BERT and + matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with + human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, + and F1 measure, which can be useful for evaluating different language generation tasks. + + This implemenation follows the original implementation from `BERT_score`_ + + Args: + predictions: + Either an iterable of predicted sentences or a `Dict[str, B.Tensor]` containing `input_ids` and + `attention_mask` `B.Tensor`. + references: + Either an iterable of target sentences or a `Dict[str, B.Tensor]` containing `input_ids` and + `attention_mask` `B.Tensor`. + model_name_or_path: + A name or a model path used to load `transformers` pretrained model. + num_layers: + A layer of representation to use. + all_layers: + An indication of whether the representation from all model's layers should be used. + If `all_layers = True`, the argument `num_layers` is ignored. + model: + A user's own model. Must be of `nn.Module` instance. + user_tokenizer: + A user's own tokenizer used with the own model. This must be an instance with the `__call__` method. + This method must take an iterable of sentences (`List[str]`) and must return a python dictionary + containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor`. It is up to the user's model + of whether `"input_ids"` is a `B.Tensor` of input ids or embedding vectors. + This tokenizer must prepend an equivalent of `[CLS]` token and append an equivalent of `[SEP]` token + as `transformers` tokenizer does. + user_forward_fn: + A user's own forward function used in a combination with `user_model`. This function must take `user_model` + and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` + as an input and return the model's output represented by the single `B.Tensor`. + verbose: + An indication of whether a progress bar to be displayed during the embeddings calculation. + idf: + An indication of whether normalization using inverse document frequencies should be used. + device: + A device to be used for calculation. + max_length: + A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed. + batch_size: + A batch size used for model processing. + num_threads: + A number of threads to use for a dataloader. + return_hash: + An indication of whether the correspodning `hash_code` should be returned. + lang: + A language of input sentences. It is used when the scores are rescaled with a baseline. + rescale_with_baseline: + An indication of whether bertscore should be rescaled with a pre-computed baseline. + When a pretrained model from `transformers` model is used, the corresponding baseline is downloaded + from the original `bert-score` package from `BERT_score`_ if available. + In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting + of the files from `BERT_score`_ + baseline_path: + A path to the user's own local csv/tsv file with the baseline scale. + baseline_url: + A url path to the user's own csv/tsv file with the baseline scale. + + Returns: + Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. + + Raises: + ValueError: + If `len(predictions) != len(references)`. + ValueError: + If `tqdm` package is required and not installed. + ValueError: + If `transformers` package is required and not installed. + ValueError: + If `num_layer` is larger than the number of the model layers. + ValueError: + If invalid input is provided. + + Example: + >>> predictions = ["hello there", "general kenobi"] + >>> references = ["hello there", "master kenobi"] + >>> bert_score(predictions=predictions, references=references, lang="en") # doctest: +SKIP + {'precision': [0.99..., 0.99...], + 'recall': [0.99..., 0.99...], + 'f1': [0.99..., 0.99...]} + """ + if len(predictions) != len(references): + raise ValueError("Number of predicted and reference sententes must be the same!") + + if verbose and (not _TQDM_AVAILABLE): + raise ValueError( + "An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`." + ) + + if model is None: + if not _TRANSFORMERS_AVAILABLE: + raise ValueError( + "`bert_score` metric with default models requires `transformers` package be installed. " + "Either install with `pip install transformers>=4.0` or `pip install paddlemetrics[text]`" + ) + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + model = AutoModel.from_pretrained(model_name_or_path) + else: + tokenizer = user_tokenizer + model.eval() + model.to(device) + + try: + if num_layers and num_layers > model.config.num_hidden_layers: # type: ignore + raise ValueError( + f"num_layers={num_layers} is forbidden for {model_name_or_path}. " # type: ignore + f"Please use num_layers <= {model.config.num_hidden_layers}" # type: ignore + ) + except AttributeError: + warnings.warn("It was not possible to retrieve the parameter `num_layers` from the model specification.") + + _are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (predictions, references)) + _are_valid_lists = all( + isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (predictions, references) + ) + _are_valid_tensors = all( + isinstance(text, dict) and isinstance(text["input_ids"], Tensor) for text in (predictions, references) + ) + if _are_empty_lists: + warnings.warn("Predictions and references are empty.") + output_dict: Dict[str, Union[List[float], str]] = { + "precision": [0.0], + "recall": [0.0], + "f1": [0.0], + } + if return_hash: + output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)}) + return output_dict + + # Load baselines if needed + baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None + + # We ignore mypy typing below as the proper typing is ensured by conditions above, only mypy cannot infer that. + if _are_valid_lists: + ref_dataset = TextDataset(references, tokenizer, max_length, idf=idf) # type: ignore + pred_dataset = TextDataset( + predictions, # type: ignore + tokenizer, + max_length, + idf=idf, + tokens_idf=ref_dataset.tokens_idf, + ) + elif _are_valid_tensors: + ref_dataset = TokenizedDataset(**references, idf=idf) # type: ignore + pred_dataset = TokenizedDataset(**predictions, idf=idf, tokens_idf=ref_dataset.tokens_idf) # type: ignore + else: + raise ValueError("Invalid input provided.") + + ref_loader = DataLoader(ref_dataset, batch_size=batch_size, num_workers=num_threads) + pred_loader = DataLoader(pred_dataset, batch_size=batch_size, num_workers=num_threads) + + ref_embeddings, ref_idf_scale = _get_embeddings_and_idf_scale( + ref_loader, ref_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn + ) + pred_embeddings, pred_idf_scale = _get_embeddings_and_idf_scale( + pred_loader, pred_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn + ) + + precision, recall, f1_score = _get_precision_recall_f1( + pred_embeddings, ref_embeddings, pred_idf_scale, ref_idf_scale + ) + + if baseline is not None: + precision, recall, f1_score = _rescale_metrics_with_baseline( + precision, recall, f1_score, baseline, num_layers, all_layers + ) + + output_dict = { + "precision": precision.tolist(), + "recall": recall.tolist(), + "f1": f1_score.tolist(), + } + if return_hash: + output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)}) + return output_dict diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/bleu.py b/EE/paddlemetric/src/paddlemetrics/functional/text/bleu.py new file mode 100644 index 000000000..4d00946b7 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/text/bleu.py @@ -0,0 +1,171 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score +from collections import Counter +from typing import Sequence, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + + +def _count_ngram(ngram_input_list: Sequence[str], n_gram: int) -> Counter: + """Counting how many times each word appears in a given text with ngram. + + Args: + ngram_input_list: A list of translated text or reference texts + n_gram: gram value ranged 1 to 4 + + Return: + ngram_counter: a collections.Counter object of ngram + """ + + ngram_counter: Counter = Counter() + + for i in range(1, n_gram + 1): + for j in range(len(ngram_input_list) - i + 1): + ngram_key = tuple(ngram_input_list[j : (i + j)]) + ngram_counter[ngram_key] += 1 + + return ngram_counter + + +def _bleu_score_update( + reference_corpus: Sequence[Sequence[Sequence[str]]], + translate_corpus: Sequence[Sequence[str]], + numerator: Tensor, + denominator: Tensor, + trans_len: Tensor, + ref_len: Tensor, + n_gram: int = 4, +) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute the BLEU score. + + Args: + reference_corpus: An iterable of iterables of reference corpus + translate_corpus: An iterable of machine translated corpus + numerator: Numerator of precision score (true positives) + denominator: Denominator of precision score (true positives + false positives) + trans_len: count of words in a candidate translation + ref_len: count of words in a reference translation + n_gram: gram value ranged 1 to 4 + """ + + for (translation, references) in zip(translate_corpus, reference_corpus): + trans_len += len(translation) + ref_len_list = [len(ref) for ref in references] + ref_len_diff = [abs(len(translation) - x) for x in ref_len_list] + ref_len += ref_len_list[ref_len_diff.index(min(ref_len_diff))] + translation_counter: Counter = _count_ngram(translation, n_gram) + reference_counter: Counter = Counter() + + for ref in references: + reference_counter |= _count_ngram(ref, n_gram) + + ngram_counter_clip = translation_counter & reference_counter + + for counter_clip in ngram_counter_clip: + numerator[len(counter_clip) - 1] += ngram_counter_clip[counter_clip] + + for counter in translation_counter: + denominator[len(counter) - 1] += translation_counter[counter] + + return trans_len, ref_len + + +def _bleu_score_compute( + trans_len: Tensor, ref_len: Tensor, numerator: Tensor, denominator: Tensor, n_gram: int = 4, smooth: bool = False +) -> Tensor: + """Computes the BLEU score. + + Args: + trans_len: count of words in a candidate translation + ref_len: count of words in a reference translation + numerator: Numerator of precision score (true positives) + denominator: Denominator of precision score (true positives + false positives) + n_gram: gram value ranged 1 to 4 + smooth: Whether or not to apply smoothing + """ + device = numerator.device + if min(numerator) == 0.0: + return tensor(0.0, device=device) + + if smooth: + precision_scores = B.div( + B.add(numerator, B.ones(n_gram, device=device)), + B.add(denominator, B.ones(n_gram, device=device)), + ) + precision_scores[0] = numerator[0] / denominator[0] + else: + precision_scores = numerator / denominator + + log_precision_scores = tensor([1.0 / n_gram] * n_gram, device=device) * B.log(precision_scores) + geometric_mean = B.exp(B.sum(log_precision_scores)) + brevity_penalty = tensor(1.0, device=device) if trans_len > ref_len else B.exp(1 - (ref_len / trans_len)) + bleu = brevity_penalty * geometric_mean + + return bleu + + +def bleu_score( + reference_corpus: Sequence[Sequence[Sequence[str]]], + translate_corpus: Sequence[Sequence[str]], + n_gram: int = 4, + smooth: bool = False, +) -> Tensor: + """Calculate `BLEU score`_ of machine translated text with one or more references. + + Args: + reference_corpus: + An iterable of iterables of reference corpus + translate_corpus: + An iterable of machine translated corpus + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + + Return: + Tensor with BLEU Score + + Example: + >>> from paddlemetrics.functional import bleu_score + >>> translate_corpus = ['the cat is on the mat'.split()] + >>> reference_corpus = [['there is a cat on the mat'.split(), 'a cat is on the mat'.split()]] + >>> bleu_score(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + + if len(translate_corpus) != len(reference_corpus): + raise ValueError(f"Corpus has different size {len(translate_corpus)} != {len(reference_corpus)}") + numerator = B.zeros(n_gram) + denominator = B.zeros(n_gram) + trans_len = tensor(0, dtype=B.float) + ref_len = tensor(0, dtype=B.float) + + trans_len, ref_len = _bleu_score_update( + reference_corpus, translate_corpus, numerator, denominator, trans_len, ref_len, n_gram + ) + + return _bleu_score_compute(trans_len, ref_len, numerator, denominator, n_gram, smooth) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/rouge.py b/EE/paddlemetric/src/paddlemetrics/functional/text/rouge.py new file mode 100644 index 000000000..e83c00d0b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/text/rouge.py @@ -0,0 +1,325 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re +from collections import Counter +from typing import Any, Dict, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.imports import _NLTK_AVAILABLE + +ALLOWED_ROUGE_KEYS: Dict[str, Union[int, str]] = { + "rouge1": 1, + "rouge2": 2, + "rouge3": 3, + "rouge4": 4, + "rouge5": 5, + "rouge6": 6, + "rouge7": 7, + "rouge8": 8, + "rouge9": 9, + "rougeL": "L", + "rougeLsum": "Lsum", +} + + +def _add_newline_to_end_of_each_sentence(x: str) -> str: + """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" + if not _NLTK_AVAILABLE: + raise ValueError("ROUGE-Lsum calculation requires that nltk is installed. Use `pip install nltk`.") + import nltk + + nltk.download("punkt", quiet=True, force=False) + + re.sub("", "", x) # remove pegasus newline char + return "\n".join(nltk.sent_tokenize(x)) + + +def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) -> Dict[str, Tensor]: + """This computes precision, recall and F1 score based on hits/lcs, and the length of lists of tokenizer + predicted and target sentences. + + Args: + hits_or_lcs: + A number of matches or a length of the longest common subsequence. + pred_len: + A length of a tokenized predicted sentence. + target_len: + A length of a tokenized target sentence. + """ + precision = hits_or_lcs / pred_len + recall = hits_or_lcs / target_len + if precision == recall == 0.0: + return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) + + fmeasure = 2 * precision * recall / (precision + recall) + return dict(precision=tensor(precision), recall=tensor(recall), fmeasure=tensor(fmeasure)) + + +def _lcs(pred_tokens: List[str], target_tokens: List[str]) -> int: + """Common DP algorithm to compute the length of the longest common subsequence. + + Args: + pred_tokens: + A tokenized predicted sentence. + target_toknes: + A tokenized target sentence. + """ + LCS = [[0] * (len(pred_tokens) + 1) for _ in range(len(target_tokens) + 1)] + for i in range(1, len(target_tokens) + 1): + for j in range(1, len(pred_tokens) + 1): + if target_tokens[i - 1] == pred_tokens[j - 1]: + LCS[i][j] = LCS[i - 1][j - 1] + 1 + else: + LCS[i][j] = max(LCS[i - 1][j], LCS[i][j - 1]) + return LCS[-1][-1] + + +def _normalize_and_tokenize_text(text: str, stemmer: Optional[Any] = None) -> List[str]: + """Rouge score should be calculated only over lowercased words and digits. Optionally, Porter stemmer can be + used to strip word suffixes to improve matching. The text normalization follows the implemantion from `Rouge + score_Text Normalizition`_ + + Args: + text: + An input sentence. + stemmer: + Porter stemmer instance to strip word suffixes to improve matching. + """ + # Replace any non-alpha-numeric characters with spaces. + text = re.sub(r"[^a-z0-9]+", " ", text.lower()) + + tokens = re.split(r"\s+", text) + if stemmer: + # Only stem words more than 3 characters long. + tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens] + + # One final check to drop any empty or invalid tokens. + tokens = [x for x in tokens if (isinstance(x, str) and re.match(r"^[a-z0-9]+$", x))] + + return tokens + + +def _rouge_n_score(pred: List[str], target: List[str], n_gram: int) -> Dict[str, Tensor]: + """This computes precision, recall and F1 score for the Rouge-N metric. + + Args: + pred: + A predicted sentence. + target: + A target sentence. + n_gram: + N-gram overlap. + """ + + def _create_ngrams(tokens: List[str], n: int) -> Counter: + ngrams: Counter = Counter() + for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)): + ngrams[ngram] += 1 + return ngrams + + pred_ngrams, target_ngrams = _create_ngrams(pred, n_gram), _create_ngrams(target, n_gram) + pred_len, target_len = sum(pred_ngrams.values()), sum(target_ngrams.values()) + if 0 in (pred_len, target_len): + return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) + + # It is sufficient to take a set(pred_tokenized) for hits count as we consider intersenction of pred & target + hits = sum(min(pred_ngrams[w], target_ngrams[w]) for w in set(pred_ngrams)) + return _compute_metrics(hits, max(pred_len, 1), max(target_len, 1)) + + +def _rouge_l_score(pred: List[str], target: List[str]) -> Dict[str, Tensor]: + """This computes precision, recall and F1 score for the Rouge-L or Rouge-LSum metric. + + Args: + pred: + A predicted sentence. + target: + A target sentence. + """ + pred_len, target_len = len(pred), len(target) + if 0 in (pred_len, target_len): + return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) + + lcs = _lcs(pred, target) + return _compute_metrics(lcs, pred_len, target_len) + + +def _rouge_score_update( + preds: List[str], + targets: List[str], + rouge_keys_values: List[Union[int, str]], + stemmer: Optional[Any] = None, +) -> Dict[Union[int, str], List[Dict[str, Tensor]]]: + """Update the rouge score with the current set of predicted and target sentences. + + Args: + preds: + An iterable of predicted sentences. + targets: + An iterable of target sentences. + rouge_keys_values: + List of N-grams/'L'/'Lsum' arguments. + stemmer: + Porter stemmer instance to strip word suffixes to improve matching. + + Example: + >>> targets = "Is your name John".split() + >>> preds = "My name is John".split() + >>> from pprint import pprint + >>> score = _rouge_score_update(preds, targets, rouge_keys_values=[1, 2, 3, 'L']) + >>> pprint(score) # doctest: +NORMALIZE_WHITESPACE +SKIP + {1: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(1.), 'precision': tensor(1.), 'recall': tensor(1.)}], + 2: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}], + 3: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}], + 'L': [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, + {'fmeasure': tensor(1.), 'precision': tensor(1.), 'recall': tensor(1.)}]} + """ + results: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values} + for pred_raw, target_raw in zip(preds, targets): + pred = _normalize_and_tokenize_text(pred_raw, stemmer) + target = _normalize_and_tokenize_text(target_raw, stemmer) + + if "Lsum" in rouge_keys_values: + # rougeLsum expects "\n" separated sentences within a summary + pred_Lsum = _normalize_and_tokenize_text(_add_newline_to_end_of_each_sentence(pred_raw), stemmer) + target_Lsum = _normalize_and_tokenize_text(_add_newline_to_end_of_each_sentence(target_raw), stemmer) + + for rouge_key in rouge_keys_values: + if isinstance(rouge_key, int): + score = _rouge_n_score(pred, target, rouge_key) + else: + score = _rouge_l_score( + pred if rouge_key != "Lsum" else pred_Lsum, + target if rouge_key != "Lsum" else target_Lsum, + ) + results[rouge_key].append(score) + return results + + +def _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) -> Dict[str, Tensor]: + """Compute the combined ROUGE metric for all the input set of predicted and target sentences. + + Args: + sentence_results: + Rouge-N/Rouge-L/Rouge-LSum metrics calculated for single sentence. + """ + results: Dict[str, Tensor] = {} + # Obtain mean scores for individual rouge metrics + if sentence_results == {}: + return results + + for rouge_key, scores in sentence_results.items(): + results[rouge_key] = B.tensor(scores).mean() + + return results + + +def rouge_score( + preds: Union[str, List[str]], + targets: Union[str, List[str]], + use_stemmer: bool = False, + rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore +) -> Dict[str, Tensor]: + """Calculate `Calculate Rouge Score`_ , used for automatic summarization. + + Args: + preds: + An iterable of predicted sentences. + targets: + An iterable of target sentences. + use_stemmer: + Use Porter stemmer to strip word suffixes to improve matching. + rouge_keys: + A list of rouge types to calculate. + Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``. + + Return: + Python dictionary of rouge scores for each input rouge key. + + Example: + >>> targets = "Is your name John".split() + >>> preds = "My name is John".split() + >>> from pprint import pprint + >>> pprint(rouge_score(preds, targets)) # doctest: +NORMALIZE_WHITESPACE +SKIP + {'rouge1_fmeasure': 0.25, + 'rouge1_precision': 0.25, + 'rouge1_recall': 0.25, + 'rouge2_fmeasure': 0.0, + 'rouge2_precision': 0.0, + 'rouge2_recall': 0.0, + 'rougeL_fmeasure': 0.25, + 'rougeL_precision': 0.25, + 'rougeL_recall': 0.25, + 'rougeLsum_fmeasure': 0.25, + 'rougeLsum_precision': 0.25, + 'rougeLsum_recall': 0.25} + + Raises: + ValueError: + If the python package ``nltk`` is not installed. + ValueError: + If any of the ``rouge_keys`` does not belong to the allowed set of keys. + + References: + [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/ + """ + + if use_stemmer: + if not _NLTK_AVAILABLE: + raise ValueError("Stemmer requires that nltk is installed. Use `pip install nltk`.") + import nltk + + stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None + + if not isinstance(rouge_keys, tuple): + rouge_keys = tuple([rouge_keys]) + for key in rouge_keys: + if key not in ALLOWED_ROUGE_KEYS.keys(): + raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {list(ALLOWED_ROUGE_KEYS.keys())}") + rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys] + + if isinstance(preds, str): + preds = [preds] + + if isinstance(targets, str): + targets = [targets] + + sentence_results: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update( + preds, targets, rouge_keys_values, stemmer=stemmer + ) + + output: Dict[str, List[Tensor]] = {} + for rouge_key in rouge_keys_values: + for type in ["fmeasure", "precision", "recall"]: + output[f"rouge{rouge_key}_{type}"] = [] + + for rouge_key, metrics in sentence_results.items(): + for metric in metrics: + for type, value in metric.items(): + output[f"rouge{rouge_key}_{type}"].append(value) + + return _rouge_score_compute(output) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py b/EE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py new file mode 100644 index 000000000..1a59377f6 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py @@ -0,0 +1,355 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score + +############## + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +############## + +# MIT License +# Copyright (c) 2017 - Shujian Huang + + +import re +from typing import Sequence + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor +from typing_extensions import Literal + +from paddlemetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update +from paddlemetrics.utilities.imports import _REGEX_AVAILABLE + +AVAILABLE_TOKENIZERS = ("none", "13a", "zh", "intl", "char") + +_UCODE_RANGES = ( + ("\u3400", "\u4db5"), # CJK Unified Ideographs Extension A, release 3.0 + ("\u4e00", "\u9fa5"), # CJK Unified Ideographs, release 1.1 + ("\u9fa6", "\u9fbb"), # CJK Unified Ideographs, release 4.1 + ("\uf900", "\ufa2d"), # CJK Compatibility Ideographs, release 1.1 + ("\ufa30", "\ufa6a"), # CJK Compatibility Ideographs, release 3.2 + ("\ufa70", "\ufad9"), # CJK Compatibility Ideographs, release 4.1 + ("\u20000", "\u2a6d6"), # (UTF16) CJK Unified Ideographs Extension B, release 3.1 + ("\u2f800", "\u2fa1d"), # (UTF16) CJK Compatibility Supplement, release 3.1 + ("\uff00", "\uffef"), # Full width ASCII, full width of English punctuation, + # half width Katakana, half wide half width kana, Korean alphabet + ("\u2e80", "\u2eff"), # CJK Radicals Supplement + ("\u3000", "\u303f"), # CJK punctuation mark + ("\u31c0", "\u31ef"), # CJK stroke + ("\u2f00", "\u2fdf"), # Kangxi Radicals + ("\u2ff0", "\u2fff"), # Chinese character structure + ("\u3100", "\u312f"), # Phonetic symbols + ("\u31a0", "\u31bf"), # Phonetic symbols (Taiwanese and Hakka expansion) + ("\ufe10", "\ufe1f"), + ("\ufe30", "\ufe4f"), + ("\u2600", "\u26ff"), + ("\u2700", "\u27bf"), + ("\u3200", "\u32ff"), + ("\u3300", "\u33ff"), +) + + +class _SacreBLEUTokenizer: + """Tokenizer used for SacreBLEU calculation. + + Source: https://github.com/mjpost/sacrebleu/tree/master/sacrebleu/tokenizers + """ + + _REGEX = ( + # language-dependent part (assuming Western languages) + (re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), r" \1 "), + # tokenize period and comma unless preceded by a digit + (re.compile(r"([^0-9])([\.,])"), r"\1 \2 "), + # tokenize period and comma unless followed by a digit + (re.compile(r"([\.,])([^0-9])"), r" \1 \2"), + # tokenize dash when preceded by a digit + (re.compile(r"([0-9])(-)"), r"\1 \2 "), + # one space only between words + # NOTE: Doing this in Python (below) is faster + # (re.compile(r'\s+'), r' '), + ) + + if _REGEX_AVAILABLE: + import regex + + _INT_REGEX = ( + # Separate out punctuations preceeded by a non-digit + (regex.compile(r"(\P{N})(\p{P})"), r"\1 \2 "), + # Separate out punctuations followed by a non-digit + (regex.compile(r"(\p{P})(\P{N})"), r" \1 \2"), + # Separate out symbols + (regex.compile(r"(\p{S})"), r" \1 "), + ) + + _TOKENIZE_FN = { + "none": "_tokenize_base", + "13a": "_tokenize_13a", + "zh": "_tokenize_zh", + "intl": "_tokenize_international", + "char": "_tokenize_char", + } + + def __init__(self, tokenize: Literal["none", "13a", "zh", "intl", "char"], lowercase: bool = False) -> None: + self.tokenize_fn = getattr(self, self._TOKENIZE_FN[tokenize]) + self.lowercase = lowercase + + def __call__(self, line: str) -> Sequence[str]: + tokenized_line = self.tokenize_fn(line) + return self._lower(tokenized_line, self.lowercase).split() + + @classmethod + def tokenize( + cls, line: str, tokenize: Literal["none", "13a", "zh", "intl", "char"], lowercase: bool = False + ) -> Sequence[str]: + tokenize_fn = getattr(cls, cls._TOKENIZE_FN[tokenize]) + tokenized_line = tokenize_fn(line) + return cls._lower(tokenized_line, lowercase).split() + + @classmethod + def _tokenize_regex(cls, line: str) -> str: + """Common post-processing tokenizer for `13a` and `zh` tokenizers. + Args: + line: a segment to tokenize + + Return: + the tokenized line + """ + for (_re, repl) in cls._REGEX: + line = _re.sub(repl, line) + # no leading or trailing spaces, single space within words + return " ".join(line.split()) + + @staticmethod + def _is_chinese_char(uchar: str) -> bool: + """ + Args: + uchar: input char in unicode + + Return: + whether the input char is a Chinese character. + """ + for start, end in _UCODE_RANGES: + if start <= uchar <= end: + return True + return False + + @classmethod + def _tokenize_base(cls, line: str) -> str: + """Tokenizes an input line with the tokenizer. + + Args: + line: a segment to tokenize + + Return: + the tokenized line + """ + return line + + @classmethod + def _tokenize_13a(cls, line: str) -> str: + """Tokenizes an input line using a relatively minimal tokenization that is however equivalent to + mteval-v13a, used by WMT. + + Args: + line: input sentence + + Return: + tokenized sentence + """ + # language-independent part: + line = line.replace("", "") + line = line.replace("-\n", "") + line = line.replace("\n", " ") + + if "&" in line: + line = line.replace(""", '"') + line = line.replace("&", "&") + line = line.replace("<", "<") + line = line.replace(">", ">") + + return cls._tokenize_regex(line) + + @classmethod + def _tokenize_zh(cls, line: str) -> str: + """The tokenization of Chinese text in this script contains two + steps: separate each Chinese characters (by utf-8 encoding); tokenize + the non Chinese part (following the `13a` i.e. mteval tokenizer). + Author: Shujian Huang huangsj@nju.edu.cn + + Args: + line: input sentence + + Return: + tokenized sentence + """ + + line = line.strip() + line_in_chars = "" + + for char in line: + if cls._is_chinese_char(char): + line_in_chars += " " + line_in_chars += char + line_in_chars += " " + else: + line_in_chars += char + + return cls._tokenize_regex(line_in_chars) + + @classmethod + def _tokenize_international(cls, line: str) -> str: + """Tokenizes a string following the official BLEU implementation. + + See github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983 + + In our case, the input string is expected to be just one line. + We just tokenize on punctuation and symbols, + except when a punctuation is preceded and followed by a digit + (e.g. a comma/dot as a thousand/decimal separator). + We do not recover escaped forms of punctuations such as ' or > + as these should never appear in MT system outputs (see issue #138) + + Note that a number (e.g., a year) followed by a dot at the end of + sentence is NOT tokenized, i.e. the dot stays with the number because + `s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a + space after each sentence). However, this error is already in the + original mteval-v14.pl and we want to be consistent with it. + The error is not present in the non-international version, + which uses `$norm_text = " $norm_text "`. + + Args: + line: the input string to tokenize. + + Return: + The tokenized string. + """ + for (_re, repl) in cls._INT_REGEX: + line = _re.sub(repl, line) + + return " ".join(line.split()) + + @classmethod + def _tokenize_char(cls, line: str) -> str: + """Tokenizes all the characters in the input line. + + Args: + line: a segment to tokenize + + Return: + the tokenized line + """ + return " ".join(char for char in line) + + @staticmethod + def _lower(line: str, lowercase: bool) -> str: + if lowercase: + return line.lower() + return line + + +def sacre_bleu_score( + reference_corpus: Sequence[Sequence[str]], + translate_corpus: Sequence[str], + n_gram: int = 4, + smooth: bool = False, + tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a", + lowercase: bool = False, +) -> Tensor: + """Calculate `BLEU score`_ [1] of machine translated text with one or more references. This implementation + follows the behaviour of SacreBLEU [2] implementation from https://github.com/mjpost/sacrebleu. + + Args: + reference_corpus: + An iterable of iterables of reference corpus + translate_corpus: + An iterable of machine translated corpus + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + tokenize: + Tokenization technique to be used. (Default '13a') + Supported tokenization: ['none', '13a', 'zh', 'intl', 'char'] + lowercase: + If ``True``, BLEU score over lowercased text is calculated. + + Return: + Tensor with BLEU Score + + Example: + >>> from paddlemetrics.functional import sacre_bleu_score + >>> translate_corpus = ['the cat is on the mat'] + >>> reference_corpus = [['there is a cat on the mat', 'a cat is on the mat']] + >>> sacre_bleu_score(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] A Call for Clarity in Reporting BLEU Scores by Matt Post. + + [3] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + if tokenize not in AVAILABLE_TOKENIZERS: + raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.") + + if tokenize not in _SacreBLEUTokenizer._TOKENIZE_FN.keys(): + raise ValueError( + f"Unsupported tokenizer selected. Please, choose one of {list(_SacreBLEUTokenizer._TOKENIZE_FN.keys())}" + ) + if len(translate_corpus) != len(reference_corpus): + raise ValueError(f"Corpus has different size {len(translate_corpus)} != {len(reference_corpus)}") + if tokenize == "intl" and not _REGEX_AVAILABLE: + raise ValueError( + "`'intl'` tokenization requires `regex` installed. Use `pip install regex` or `pip install " + "paddlemetrics[text]`." + ) + + reference_corpus_: Sequence[Sequence[Sequence[str]]] = [ + [_SacreBLEUTokenizer.tokenize(line, tokenize, lowercase) for line in reference] + for reference in reference_corpus + ] + translate_corpus_: Sequence[Sequence[str]] = [ + _SacreBLEUTokenizer.tokenize(line, tokenize, lowercase) for line in translate_corpus + ] + + numerator = B.zeros(n_gram) + denominator = B.zeros(n_gram) + trans_len = tensor(0, dtype=B.float) + ref_len = tensor(0, dtype=B.float) + + trans_len, ref_len = _bleu_score_update( + reference_corpus_, translate_corpus_, numerator, denominator, trans_len, ref_len, n_gram + ) + + return _bleu_score_compute(trans_len, ref_len, numerator, denominator, n_gram, smooth) diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/wer.py b/EE/paddlemetric/src/paddlemetrics/functional/text/wer.py new file mode 100644 index 000000000..4cd19b059 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/functional/text/wer.py @@ -0,0 +1,114 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union +from warnings import warn + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + + +def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) -> int: + """Standard dynamic programming algorithm to compute the edit distance. + + Args: + prediction_tokens: A tokenized predicted sentence + reference_tokens: A tokenized reference sentence + + Returns: + (int) Edit distance between the predicted sentence and the reference sentence + """ + dp = [[0] * (len(reference_tokens) + 1) for _ in range(len(prediction_tokens) + 1)] + for i in range(len(prediction_tokens) + 1): + dp[i][0] = i + for j in range(len(reference_tokens) + 1): + dp[0][j] = j + for i in range(1, len(prediction_tokens) + 1): + for j in range(1, len(reference_tokens) + 1): + if prediction_tokens[i - 1] == reference_tokens[j - 1]: + dp[i][j] = dp[i - 1][j - 1] + else: + dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1 + return dp[-1][-1] + + +def _wer_update( + predictions: Union[str, List[str]], + references: Union[str, List[str]], +) -> Tuple[Tensor, Tensor]: + """Update the wer score with the current set of references and predictions. + + Args: + predictions: Transcription(s) to score as a string or list of strings + references: Reference(s) for each speech input as a string or list of strings + + Returns: + (Tensor) Number of edit operations to get from the reference to the prediction, summed over all samples + (Tensor) Number of words over all references + """ + if isinstance(predictions, str): + predictions = [predictions] + if isinstance(references, str): + references = [references] + errors = tensor(0, dtype=B.float) + total = tensor(0, dtype=B.float) + for prediction, reference in zip(predictions, references): + prediction_tokens = prediction.split() + reference_tokens = reference.split() + errors += _edit_distance(prediction_tokens, reference_tokens) + total += len(reference_tokens) + return errors, total + + +def _wer_compute(errors: Tensor, total: Tensor) -> Tensor: + """Compute the word error rate. + + Args: + errors: Number of edit operations to get from the reference to the prediction, summed over all samples + total: Number of words over all references + + Returns: + (Tensor) Word error rate + """ + return errors / total + + +def wer( + predictions: Union[str, List[str]], + references: Union[str, List[str]], + concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7 +) -> Tensor: + """Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. This + value indicates the percentage of words that were incorrectly predicted. The lower the value, the better the + performance of the ASR system with a WER of 0 being a perfect score. + + Args: + predictions: Transcription(s) to score as a string or list of strings + references: Reference(s) for each speech input as a string or list of strings + concatenate_texts: Whether to concatenate all input texts or compute WER iteratively + This argument is deprecated in v0.6 and it will be removed in v0.7. + + Returns: + (Tensor) Word error rate + + Examples: + >>> predictions = ["this is the prediction", "there is an other sample"] + >>> references = ["this is the reference", "there is another one"] + >>> wer(predictions=predictions, references=references) + tensor(0.5000) + """ + if concatenate_texts is not None: + warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning) + errors, total = _wer_update(predictions, references) + return _wer_compute(errors, total) diff --git a/EE/paddlemetric/src/paddlemetrics/image/__init__.py b/EE/paddlemetric/src/paddlemetrics/image/__init__.py new file mode 100644 index 000000000..c3fb3568f --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/image/__init__.py @@ -0,0 +1,19 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#from paddlemetrics.image.fid import FID # noqa: F401 +from paddlemetrics.image.inception import IS # noqa: F401 +from paddlemetrics.image.kid import KID # noqa: F401 +from paddlemetrics.image.lpip_similarity import LPIPS # noqa: F401 +from paddlemetrics.image.psnr import PSNR # noqa: F401 +from paddlemetrics.image.ssim import SSIM # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/image/fid.py b/EE/paddlemetric/src/paddlemetrics/image/fid.py new file mode 100644 index 000000000..6f2965db6 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/image/fid.py @@ -0,0 +1,283 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Union + +import numpy as np +import paddleext.torchapi as B +from paddleext.torchapi import Tensor +from paddleext.torchapi.autograd import Function + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_info, rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.imports import _SCIPY_AVAILABLE, _TORCH_FIDELITY_AVAILABLE + +if _TORCH_FIDELITY_AVAILABLE: + from torch_fidelity.feature_extractor_inceptionv3 import FeatureExtractorInceptionV3 +else: + + class FeatureExtractorInceptionV3(B.nn.Module): # type: ignore + pass + + +if _SCIPY_AVAILABLE: + import scipy + + +class NoTrainInceptionV3(FeatureExtractorInceptionV3): + def __init__( + self, + name: str, + features_list: List[str], + feature_extractor_weights_path: Optional[str] = None, + ) -> None: + super().__init__(name, features_list, feature_extractor_weights_path) + # put into evaluation mode + self.eval() + + def train(self, mode: bool) -> "NoTrainInceptionV3": + """the inception network should not be able to be switched away from evaluation mode.""" + return super().train(False) + + def forward(self, x: Tensor) -> Tensor: + out = super().forward(x) + return out[0].reshape(x.shape[0], -1) + + +class MatrixSquareRoot(Function): + """Square root of a positive definite matrix. + + All credit to: `Square Root of a Positive Definite Matrix`_ + """ + + @staticmethod + def forward(ctx: Any, input_data: Tensor) -> Tensor: + # TODO: update whenever pytorch gets an matrix square root function + # Issue: https://github.com/pytorch/pytorch/issues/9983 + m = input_data.detach().cpu().numpy().astype(np.float_) + scipy_res, _ = scipy.linalg.sqrtm(m, disp=False) + sqrtm = B.from_numpy(scipy_res.real).to(input_data) + ctx.save_for_backward(sqrtm) + return sqrtm + + @staticmethod + def backward(ctx: Any, grad_output: Tensor) -> Tensor: + grad_input = None + if ctx.needs_input_grad[0]: + (sqrtm,) = ctx.saved_tensors + sqrtm = sqrtm.data.cpu().numpy().astype(np.float_) + gm = grad_output.data.cpu().numpy().astype(np.float_) + + # Given a positive semi-definite matrix X, + # since X = X^{1/2}X^{1/2}, we can compute the gradient of the + # matrix square root dX^{1/2} by solving the Sylvester equation: + # dX = (d(X^{1/2})X^{1/2} + X^{1/2}(dX^{1/2}). + grad_sqrtm = scipy.linalg.solve_sylvester(sqrtm, sqrtm, gm) + + grad_input = B.from_numpy(grad_sqrtm).to(grad_output) + return grad_input + + +sqrtm = MatrixSquareRoot.apply + + +def _compute_fid(mu1: Tensor, sigma1: Tensor, mu2: Tensor, sigma2: Tensor, eps: float = 1e-6) -> Tensor: + r""" + Adjusted version of `Fid Score`_ + + The Frechet Inception Distance between two multivariate Gaussians X_x ~ N(mu_1, sigm_1) + and X_y ~ N(mu_2, sigm_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(sigm_1 + sigm_2 - 2*sqrt(sigm_1*sigm_2)). + + Args: + mu1: mean of activations calculated on predicted (x) samples + sigma1: covariance matrix over activations calculated on predicted (x) samples + mu2: mean of activations calculated on target (y) samples + sigma2: covariance matrix over activations calculated on target (y) samples + eps: offset constant. used if sigma_1 @ sigma_2 matrix is singular + + Returns: + Scalar value of the distance between sets. + """ + diff = mu1 - mu2 + + covmean = sqrtm(sigma1.mm(sigma2)) + # Product might be almost singular + if not B.isfinite(covmean).all(): + rank_zero_info(f"FID calculation produces singular product; adding {eps} to diagonal of covariance estimates") + offset = B.eye(sigma1.size(0), device=mu1.device, dtype=mu1.dtype) * eps + covmean = sqrtm((sigma1 + offset).mm(sigma2 + offset)) + + tr_covmean = B.trace(covmean) + return diff.dot(diff) + B.trace(sigma1) + B.trace(sigma2) - 2 * tr_covmean + + +class FID(Metric): + r""" + Calculates Fréchet inception distance (FID_) which is used to access the quality of generated images. Given by + + .. math:: + FID = |\mu - \mu_w| + tr(\Sigma + \Sigma_w - 2(\Sigma \Sigma_w)^{\frac{1}{2}}) + + where :math:`\mathcal{N}(\mu, \Sigma)` is the multivariate normal distribution estimated from Inception v3 [1] + features calculated on real life images and :math:`\mathcal{N}(\mu_w, \Sigma_w)` is the multivariate normal + distribution estimated from Inception v3 features calculated on generated (fake) images. The metric was + originally proposed in [1]. + + Using the default feature extraction (Inception v3 using the original weights from [2]), the input is + expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images + will be resized to 299 x 299 which is the size of the original training data. The boolian flag ``real`` + determines if the images should update the statistics of the real distribution or the fake distribution. + + .. note:: using this metrics requires you to have ``scipy`` install. Either install as ``pip install + paddlemetrics[image]`` or ``pip install scipy`` + + .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` + is installed. Either install as ``pip install paddlemetrics[image]`` or + ``pip install torch-fidelity`` + + .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of + all other metrics) as this metric does not really make sense to calculate on a single batch. This + means that by default ``forward`` will just call ``update`` underneat. + + Args: + feature: + Either an integer or ``nn.Module``: + + - an integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: + 64, 192, 768, 2048 + - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns + an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + References: + [1] Rethinking the Inception Architecture for Computer Vision + Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna + https://arxiv.org/abs/1512.00567 + + [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, + Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter + https://arxiv.org/abs/1706.08500 + + Raises: + ValueError: + If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed + ValueError: + If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048] + TypeError: + If ``feature`` is not an ``str``, ``int`` or ``B.nn.Module`` + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import FID + >>> fid = FID(feature=64) # doctest: +SKIP + >>> # generate two slightly overlapping image intensity distributions + >>> imgs_dist1 = B.randint(0, 200, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> imgs_dist2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> fid.update(imgs_dist1, real=True) # doctest: +SKIP + >>> fid.update(imgs_dist2, real=False) # doctest: +SKIP + >>> fid.compute() # doctest: +SKIP + tensor(12.7202) + + """ + real_features: List[Tensor] + fake_features: List[Tensor] + + def __init__( + self, + feature: Union[int, B.nn.Module] = 2048, + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + rank_zero_warn( + "Metric `FID` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + UserWarning, + ) + + if isinstance(feature, int): + if not _TORCH_FIDELITY_AVAILABLE: + raise ValueError( + "FID metric requires that Torch-fidelity is installed." + "Either install as `pip install paddlemetrics[image]` or `pip install torch-fidelity`" + ) + valid_int_input = [64, 192, 768, 2048] + if feature not in valid_int_input: + raise ValueError( + f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}." + ) + + self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) + elif isinstance(feature, B.nn.Module): + self.inception = feature + else: + raise TypeError("Got unknown input to argument `feature`") + + self.add_state("real_features", [], dist_reduce_fx=None) + self.add_state("fake_features", [], dist_reduce_fx=None) + + def update(self, imgs: Tensor, real: bool) -> None: # type: ignore + """Update the state with extracted features. + + Args: + imgs: tensor with images feed to the feature extractor + real: bool indicating if imgs belong to the real or the fake distribution + """ + features = self.inception(imgs) + + if real: + self.real_features.append(features) + else: + self.fake_features.append(features) + + def compute(self) -> Tensor: + """Calculate FID score based on accumulated extracted features from the two distributions.""" + real_features = dim_zero_cat(self.real_features) + fake_features = dim_zero_cat(self.fake_features) + # computation is extremely sensitive so it needs to happen in double precision + orig_dtype = real_features.dtype + real_features = real_features.double() + fake_features = fake_features.double() + + # calculate mean and covariance + n = real_features.shape[0] + mean1 = real_features.mean(dim=0) + mean2 = fake_features.mean(dim=0) + diff1 = real_features - mean1 + diff2 = fake_features - mean2 + cov1 = 1.0 / (n - 1) * diff1.t().mm(diff1) + cov2 = 1.0 / (n - 1) * diff2.t().mm(diff2) + + # compute fid + return _compute_fid(mean1, cov1, mean2, cov2).to(orig_dtype) diff --git a/EE/paddlemetric/src/paddlemetrics/image/inception.py b/EE/paddlemetric/src/paddlemetrics/image/inception.py new file mode 100644 index 000000000..6c05b9a4b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/image/inception.py @@ -0,0 +1,179 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +#from paddlemetrics.image.fid import NoTrainInceptionV3 +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE + + +class IS(Metric): + r""" + Calculates the Inception Score (IS) which is used to access how realistic generated images are. + It is defined as + + .. math:: + IS = exp(\mathbb{E}_x KL(p(y | x ) || p(y))) + + where :math:`KL(p(y | x) || p(y))` is the KL divergence between the conditional distribution :math:`p(y|x)` + and the margianl distribution :math:`p(y)`. Both the conditional and marginal distribution is calculated + from features extracted from the images. The score is calculated on random splits of the images such that + both a mean and standard deviation of the score are returned. The metric was originally proposed in [1]. + + Using the default feature extraction (Inception v3 using the original weights from [2]), the input is + expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images + will be resized to 299 x 299 which is the size of the original training data. + + .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` + is installed. Either install as ``pip install paddlemetrics[image]`` or + ``pip install torch-fidelity`` + + .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of + all other metrics) as this metric does not really make sense to calculate on a single batch. This + means that by default ``forward`` will just call ``update`` underneat. + + Args: + feature: + Either an str, integer or ``nn.Module``: + + - an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: + 'logits_unbiased', 64, 192, 768, 2048 + - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns + an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. + + splits: integer determining how many splits the inception score calculation should be split among + + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + References: + [1] Improved Techniques for Training GANs + Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, Xi Chen + https://arxiv.org/abs/1606.03498 + + [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, + Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter + https://arxiv.org/abs/1706.08500 + + Raises: + ValueError: + If ``feature`` is set to an ``str`` or ``int`` and ``torch-fidelity`` is not installed + ValueError: + If ``feature`` is set to an ``str`` or ``int`` and not one of ['logits_unbiased', 64, 192, 768, 2048] + TypeError: + If ``feature`` is not an ``str``, ``int`` or ``B.nn.Module`` + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import IS + >>> inception = IS() # doctest: +SKIP + >>> # generate some images + >>> imgs = B.randint(0, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> inception.update(imgs) # doctest: +SKIP + >>> inception.compute() # doctest: +SKIP + (tensor(1.0569), tensor(0.0113)) + + """ + features: List + + def __init__( + self, + feature: Union[str, int, B.nn.Module] = "logits_unbiased", + splits: int = 10, + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + rank_zero_warn( + "Metric `IS` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + UserWarning, + ) + + if isinstance(feature, (str, int)): + if not _TORCH_FIDELITY_AVAILABLE: + raise ValueError( + "IS metric requires that Torch-fidelity is installed." + "Either install as `pip install paddlemetrics[image]`" + " or `pip install torch-fidelity`" + ) + valid_int_input = ("logits_unbiased", 64, 192, 768, 2048) + if feature not in valid_int_input: + raise ValueError( + f"Integer input to argument `feature` must be one of {valid_int_input}," f" but got {feature}." + ) + + self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) + elif isinstance(feature, B.nn.Module): + self.inception = feature + else: + raise TypeError("Got unknown input to argument `feature`") + + self.splits = splits + self.add_state("features", [], dist_reduce_fx=None) + + def update(self, imgs: Tensor) -> None: # type: ignore + """Update the state with extracted features. + + Args: + imgs: tensor with images feed to the feature extractor + """ + features = self.inception(imgs) + self.features.append(features) + + def compute(self) -> Tuple[Tensor, Tensor]: + features = dim_zero_cat(self.features) + # random permute the features + idx = B.randperm(features.shape[0]) + features = features[idx] + + # calculate probs and logits + prob = features.softmax(dim=1) + log_prob = features.log_softmax(dim=1) + + # split into groups + prob = prob.chunk(self.splits, dim=0) + log_prob = log_prob.chunk(self.splits, dim=0) + + # calculate score per split + mean_prob = [p.mean(dim=0, keepdim=True) for p in prob] + kl_ = [p * (log_p - m_p.log()) for p, log_p, m_p in zip(prob, log_prob, mean_prob)] + kl_ = [k.sum(dim=1).mean().exp() for k in kl_] + kl = B.stack(kl_) + + # return mean and std + return kl.mean(), kl.std() diff --git a/EE/paddlemetric/src/paddlemetrics/image/kid.py b/EE/paddlemetric/src/paddlemetrics/image/kid.py new file mode 100644 index 000000000..2f3d3a6b7 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/image/kid.py @@ -0,0 +1,277 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor +from paddleext.torchapi.nn import Module + +from paddlemetrics.image.fid import NoTrainInceptionV3 +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat +from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE + + +def maximum_mean_discrepancy(k_xx: Tensor, k_xy: Tensor, k_yy: Tensor) -> Tensor: + """Adapted from `KID Score`_""" + m = k_xx.shape[0] + + diag_x = B.diag(k_xx) + diag_y = B.diag(k_yy) + + kt_xx_sums = k_xx.sum(dim=-1) - diag_x + kt_yy_sums = k_yy.sum(dim=-1) - diag_y + k_xy_sums = k_xy.sum(dim=0) + + kt_xx_sum = kt_xx_sums.sum() + kt_yy_sum = kt_yy_sums.sum() + k_xy_sum = k_xy_sums.sum() + + value = (kt_xx_sum + kt_yy_sum) / (m * (m - 1)) + value -= 2 * k_xy_sum / (m ** 2) + return value + + +def poly_kernel(f1: Tensor, f2: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0) -> Tensor: + """Adapted from `KID Score`_""" + if gamma is None: + gamma = 1.0 / f1.shape[1] + kernel = (f1 @ f2.T * gamma + coef) ** degree + return kernel + + +def poly_mmd( + f_real: Tensor, f_fake: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0 +) -> Tensor: + """Adapted from `KID Score`_""" + k_11 = poly_kernel(f_real, f_real, degree, gamma, coef) + k_22 = poly_kernel(f_fake, f_fake, degree, gamma, coef) + k_12 = poly_kernel(f_real, f_fake, degree, gamma, coef) + return maximum_mean_discrepancy(k_11, k_12, k_22) + + +class KID(Metric): + r""" + Calculates Kernel Inception Distance (KID) which is used to access the quality of generated images. Given by + + .. math:: + KID = MMD(f_{real}, f_{fake})^2 + + where :math:`MMD` is the maximum mean discrepancy and :math:`I_{real}, I_{fake}` are extracted features + from real and fake images, see [1] for more details. In particular, calculating the MMD requires the + evaluation of a polynomial kernel function :math:`k` + + .. math:: + k(x,y) = (\gamma * x^T y + coef)^{degree} + + which controls the distance between two features. In practise the MMD is calculated over a number of + subsets to be able to both get the mean and standard deviation of KID. + + Using the default feature extraction (Inception v3 using the original weights from [2]), the input is + expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images + will be resized to 299 x 299 which is the size of the original training data. + + .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` + is installed. Either install as ``pip install paddlemetrics[image]`` or + ``pip install torch-fidelity`` + + .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of + all other metrics) as this metric does not really make sense to calculate on a single batch. This + means that by default ``forward`` will just call ``update`` underneat. + + Args: + feature: + Either an str, integer or ``nn.Module``: + + - an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: + 'logits_unbiased', 64, 192, 768, 2048 + - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns + an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. + + subsets: + Number of subsets to calculate the mean and standard deviation scores over + subset_size: + Number of randomly picked samples in each subset + degree: + Degree of the polynomial kernel function + gamma: + Scale-length of polynomial kernel. If set to ``None`` will be automatically set to the feature size + coef: + Bias term in the polynomial kernel. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + References: + [1] Demystifying MMD GANs + Mikołaj Bińkowski, Danica J. Sutherland, Michael Arbel, Arthur Gretton + https://arxiv.org/abs/1801.01401 + + [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, + Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter + https://arxiv.org/abs/1706.08500 + + Raises: + ValueError: + If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed + ValueError: + If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048] + ValueError: + If ``subsets`` is not an integer larger than 0 + ValueError: + If ``subset_size`` is not an integer larger than 0 + ValueError: + If ``degree`` is not an integer larger than 0 + ValueError: + If ``gamma`` is niether ``None`` or a float larger than 0 + ValueError: + If ``coef`` is not an float larger than 0 + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import KID + >>> kid = KID(subset_size=50) # doctest: +SKIP + >>> # generate two slightly overlapping image intensity distributions + >>> imgs_dist1 = B.randint(0, 200, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> imgs_dist2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP + >>> kid.update(imgs_dist1, real=True) # doctest: +SKIP + >>> kid.update(imgs_dist2, real=False) # doctest: +SKIP + >>> kid_mean, kid_std = kid.compute() # doctest: +SKIP + >>> print((kid_mean, kid_std)) # doctest: +SKIP + (tensor(0.0338), tensor(0.0025)) + + """ + real_features: List[Tensor] + fake_features: List[Tensor] + + def __init__( + self, + feature: Union[str, int, B.nn.Module] = 2048, + subsets: int = 100, + subset_size: int = 1000, + degree: int = 3, + gamma: Optional[float] = None, # type: ignore + coef: float = 1.0, + compute_on_step: bool = False, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + rank_zero_warn( + "Metric `KID` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + UserWarning, + ) + + if isinstance(feature, (str, int)): + if not _TORCH_FIDELITY_AVAILABLE: + raise RuntimeError( + "KID metric requires that Torch-fidelity is installed." + " Either install as `pip install paddlemetrics[image]`" + " or `pip install torch-fidelity`" + ) + valid_int_input = ("logits_unbiased", 64, 192, 768, 2048) + if feature not in valid_int_input: + raise ValueError( + f"Integer input to argument `feature` must be one of {valid_int_input}," f" but got {feature}." + ) + + self.inception: Module = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) + elif isinstance(feature, Module): + self.inception = feature + else: + raise TypeError("Got unknown input to argument `feature`") + + if not (isinstance(subsets, int) and subsets > 0): + raise ValueError("Argument `subsets` expected to be integer larger than 0") + self.subsets = subsets + + if not (isinstance(subset_size, int) and subset_size > 0): + raise ValueError("Argument `subset_size` expected to be integer larger than 0") + self.subset_size = subset_size + + if not (isinstance(degree, int) and degree > 0): + raise ValueError("Argument `degree` expected to be integer larger than 0") + self.degree = degree + + if gamma is not None and not (isinstance(gamma, float) and gamma > 0): + raise ValueError("Argument `gamma` expected to be `None` or float larger than 0") + self.gamma = gamma + + if not (isinstance(coef, float) and coef > 0): + raise ValueError("Argument `coef` expected to be float larger than 0") + self.coef = coef + + # states for extracted features + self.add_state("real_features", [], dist_reduce_fx=None) + self.add_state("fake_features", [], dist_reduce_fx=None) + + def update(self, imgs: Tensor, real: bool) -> None: # type: ignore + """Update the state with extracted features. + + Args: + imgs: tensor with images feed to the feature extractor + real: bool indicating if imgs belong to the real or the fake distribution + """ + features = self.inception(imgs) + + if real: + self.real_features.append(features) + else: + self.fake_features.append(features) + + def compute(self) -> Tuple[Tensor, Tensor]: + """Calculate KID score based on accumulated extracted features from the two distributions. Returns a tuple + of mean and standard deviation of KID scores calculated on subsets of extracted features. + + Implementation inspired by `Fid Score`_ + """ + real_features = dim_zero_cat(self.real_features) + fake_features = dim_zero_cat(self.fake_features) + + n_samples_real = real_features.shape[0] + if n_samples_real < self.subset_size: + raise ValueError("Argument `subset_size` should be smaller than the number of samples") + n_samples_fake = fake_features.shape[0] + if n_samples_fake < self.subset_size: + raise ValueError("Argument `subset_size` should be smaller than the number of samples") + + kid_scores_ = [] + for _ in range(self.subsets): + perm = B.randperm(n_samples_real) + f_real = real_features[perm[: self.subset_size]] + perm = B.randperm(n_samples_fake) + f_fake = fake_features[perm[: self.subset_size]] + + o = poly_mmd(f_real, f_fake, self.degree, self.gamma, self.coef) + kid_scores_.append(o) + kid_scores = B.stack(kid_scores_) + return kid_scores.mean(), kid_scores.std(unbiased=False) diff --git a/EE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py b/EE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py new file mode 100644 index 000000000..7cf6d03a6 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py @@ -0,0 +1,156 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _LPIPS_AVAILABLE + +if _LPIPS_AVAILABLE: + from lpips import LPIPS as Lpips_backbone +else: + + class Lpips_backbone(B.nn.Module): # type: ignore + pass + + +class NoTrainLpips(Lpips_backbone): + def train(self, mode: bool) -> "NoTrainLpips": + """the network should not be able to be switched away from evaluation mode.""" + return super().train(False) + + +def _valid_img(img: Tensor) -> bool: + """check that input is a valid image to the network.""" + return img.ndim == 4 and img.shape[1] == 3 and img.min() >= -1.0 and img.max() <= 1.0 + + +class LPIPS(Metric): + """The Learned Perceptual Image Patch Similarity (`LPIPS_`) is used to judge the perceptual similarity between + two images. LPIPS essentially computes the similarity between the activations of two image patches for some + pre-defined network. This measure have been shown to match human perseption well. A low LPIPS score means that + image patches are perceptual similar. + + Both input image patches are expected to have shape `[N, 3, H, W]` and be normalized to the [-1,1] + range. The minimum size of `H, W` depends on the chosen backbone (see `net_type` arg). + + .. note:: using this metrics requires you to have ``lpips`` package installed. Either install + as ``pip install paddlemetrics[image]`` or ``pip install lpips`` + + .. note:: this metric is not scriptable when using ``torch<1.8``. Please update your pytorch installation + if this is a issue. + + Args: + net_type: str indicating backbone network type to use. Choose between `'alex'`, `'vgg'` or `'squeeze'` + reduction: str indicating how to reduce over the batch dimension. Choose between `'sum'` or `'mean'`. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Raises: + ValueError: + If ``lpips`` package is not installed + ValueError: + If ``net_type`` is not one of ``"vgg"``, ``"alex"`` or ``"squeeze"`` + ValueError: + If ``reduction`` is not one of ``"mean"`` or ``"sum"`` + + Example: + >>> import torchapi as B + >>> _ = B.manual_seed(123) + >>> from paddlemetrics import LPIPS + >>> lpips = LPIPS(net_type='vgg') + >>> img1 = B.rand(10, 3, 100, 100) + >>> img2 = B.rand(10, 3, 100, 100) + >>> lpips(img1, img2) + tensor([0.3566], grad_fn=) + """ + + is_differentiable = True + real_features: List[Tensor] + fake_features: List[Tensor] + + # due to the use of named tuple in the backbone the net variable cannot be scriptet + __jit_ignored_attributes__ = ["net"] + + def __init__( + self, + net_type: str = "alex", + reduction: str = "mean", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if not _LPIPS_AVAILABLE: + raise ValueError( + "LPIPS metric requires that lpips is installed." + "Either install as `pip install paddlemetrics[image]` or `pip install lpips`" + ) + + valid_net_type = ("vgg", "alex", "squeeze") + if net_type not in valid_net_type: + raise ValueError(f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}.") + self.net = NoTrainLpips(net=net_type, verbose=False) + + valid_reduction = ("mean", "sum") + if reduction not in valid_reduction: + raise ValueError(f"Argument `reduction` must be one of {valid_reduction}, but got {reduction}") + self.reduction = reduction + + self.add_state("sum_scores", B.zeros(1), dist_reduce_fx="sum") + self.add_state("total", B.zeros(1), dist_reduce_fx="sum") + + def update(self, img1: Tensor, img2: Tensor) -> None: # type: ignore + """Update internal states with lpips score. + + Args: + img1: tensor with images of shape [N, 3, H, W] + img2: tensor with images of shape [N, 3, H, W] + """ + if not (_valid_img(img1) and _valid_img(img2)): + raise ValueError( + "Expected both input arguments to be normalized tensors (all values in range [-1,1])" + f" and to have shape [N, 3, H, W] but `img1` have shape {img1.shape} with values in" + f" range {[img1.min(), img1.max()]} and `img2` have shape {img2.shape} with value" + f" in range {[img2.min(), img2.max()]}" + ) + + loss = self.net(img1, img2).squeeze() + self.sum_scores += loss.sum() + self.total += img1.shape[0] + + def compute(self) -> Tensor: + """Compute final perceptual similarity metric.""" + if self.reduction == "mean": + return self.sum_scores / self.total + if self.reduction == "sum": + return self.sum_scores diff --git a/EE/paddlemetric/src/paddlemetrics/image/psnr.py b/EE/paddlemetric/src/paddlemetrics/image/psnr.py new file mode 100644 index 000000000..3226203d7 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/image/psnr.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Sequence, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.image.psnr import _psnr_compute, _psnr_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn + + +class PSNR(Metric): + r""" + Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR): + + .. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right) + + Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function. + + Args: + data_range: + the range of the data. If None, it is determined from the data (max - min). + The ``data_range`` must be given when ``dim`` is not None. + base: a base of a logarithm to use (default: 10) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + dim: + Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is + None meaning scores will be reduced across all dimensions and all batches. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``dim`` is not ``None`` and ``data_range`` is not given. + + Example: + >>> from paddlemetrics import PSNR + >>> psnr = PSNR() + >>> preds = B.tensor([[0.0, 1.0], [2.0, 3.0]]) + >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) + >>> psnr(preds, target) + tensor(2.5527) + + .. note:: + Half precision is only support on GPU for this metric + + """ + min_target: Tensor + max_target: Tensor + + def __init__( + self, + data_range: Optional[float] = None, + base: float = 10.0, + reduction: str = "elementwise_mean", + dim: Optional[Union[int, Tuple[int, ...]]] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + if dim is None and reduction != "elementwise_mean": + rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.") + + if dim is None: + self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + else: + self.add_state("sum_squared_error", default=[]) + self.add_state("total", default=[]) + + if data_range is None: + if dim is not None: + # Maybe we could use `B.amax(target, dim=dim) - B.amin(target, dim=dim)` in PyTorch 1.7 to + # calculate `data_range` in the future. + raise ValueError("The `data_range` must be given when `dim` is not None.") + + self.data_range = None + self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=B.min) + self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=B.max) + else: + self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx="mean") + self.base = base + self.reduction = reduction + self.dim = tuple(dim) if isinstance(dim, Sequence) else dim + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim) + if self.dim is None: + if self.data_range is None: + # keep track of min and max target values + self.min_target = min(target.min(), self.min_target) + self.max_target = max(target.max(), self.max_target) + + self.sum_squared_error += sum_squared_error + self.total += n_obs + else: + self.sum_squared_error.append(sum_squared_error) + self.total.append(n_obs) + + def compute(self) -> Tensor: + """Compute peak signal-to-noise ratio over state.""" + if self.data_range is not None: + data_range = self.data_range + else: + data_range = self.max_target - self.min_target + + if self.dim is None: + sum_squared_error = self.sum_squared_error + total = self.total + else: + sum_squared_error = B.cat([values.flatten() for values in self.sum_squared_error]) + total = B.cat([values.flatten() for values in self.total]) + return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/image/ssim.py b/EE/paddlemetric/src/paddlemetrics/image/ssim.py new file mode 100644 index 000000000..f34a19b1c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/image/ssim.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Sequence + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.image.ssim import _ssim_compute, _ssim_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class SSIM(Metric): + """Computes Structual Similarity Index Measure (SSIM_). + + Args: + kernel_size: size of the gaussian kernel (default: (11, 11)) + sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) + reduction: a method to reduce metric score over labels. + + - ``'elementwise_mean'``: takes the mean (default) + - ``'sum'``: takes the sum + - ``'none'``: no reduction will be applied + + data_range: Range of the image. If ``None``, it is determined from the image (max - min) + k1: Parameter of SSIM. Default: 0.01 + k2: Parameter of SSIM. Default: 0.03 + + Return: + Tensor with SSIM score + + Example: + >>> from paddlemetrics import SSIM + >>> preds = B.rand([16, 1, 16, 16]) + >>> target = preds * 0.75 + >>> ssim = SSIM() + >>> ssim(preds, target) + tensor(0.9219) + """ + + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + kernel_size: Sequence[int] = (11, 11), + sigma: Sequence[float] = (1.5, 1.5), + reduction: str = "elementwise_mean", + data_range: Optional[float] = None, + k1: float = 0.01, + k2: float = 0.03, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + rank_zero_warn( + "Metric `SSIM` will save all targets and" + " predictions in buffer. For large datasets this may lead" + " to large memory footprint." + ) + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + self.kernel_size = kernel_size + self.sigma = sigma + self.data_range = data_range + self.k1 = k1 + self.k2 = k2 + self.reduction = reduction + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target = _ssim_update(preds, target) + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + """Computes explained variance over state.""" + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _ssim_compute( + preds, target, self.kernel_size, self.sigma, self.reduction, self.data_range, self.k1, self.k2 + ) diff --git a/EE/paddlemetric/src/paddlemetrics/metric.py b/EE/paddlemetric/src/paddlemetrics/metric.py new file mode 100644 index 000000000..21c2148ba --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/metric.py @@ -0,0 +1,775 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import functools +import inspect +import operator as op +from abc import ABC, abstractmethod +from collections.abc import Sequence +from contextlib import contextmanager +from copy import deepcopy +from typing import Any, Callable, Dict, Generator, List, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, Module + +from paddlemetrics.utilities import apply_to_collection, rank_zero_warn +from paddlemetrics.utilities.data import _flatten, dim_zero_cat, dim_zero_max, dim_zero_mean, dim_zero_min, dim_zero_sum +from paddlemetrics.utilities.distributed import gather_all_tensors +from paddlemetrics.utilities.exceptions import paddlemetricsUserError +from paddlemetrics.utilities.imports import _LIGHTNING_AVAILABLE, _compare_version + + +def jit_distributed_available() -> bool: + return B.distributed.is_available() and B.distributed.is_initialized() + + +class Metric(Module): + """Base class for all metrics present in the Metrics API. + + Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to + handle distributed synchronization and per-step metric computation. + + Override ``update()`` and ``compute()`` functions to implement your own metric. Use + ``add_state()`` to register metric state variables which keep track of state on each + call of ``update()`` and are synchronized across processes when ``compute()`` is called. + + Note: + Metric state variables can either be ``B.Tensors`` or an empty list which can we used + to store `B.Tensors``. + + Note: + Different metrics only override ``update()`` and not ``forward()``. A call to ``update()`` + is valid, but it won't return the metric value at the current step. A call to ``forward()`` + automatically calls ``update()`` and also returns the metric value at the current step. + + Args: + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + """ + + __jit_ignored_attributes__ = ["device"] + __jit_unused_properties__ = ["is_differentiable"] + is_differentiable: Optional[bool] = None + higher_is_better: Optional[bool] = None + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__() + + # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/ + # B.nn/modules/module.py#L227) +# B._C._log_api_usage_once(f"paddlemetrics.metric.{self.__class__.__name__}") + +# self._LIGHTNING_GREATER_EQUAL_1_3 = _compare_version("pytorch_lightning", op.ge, "1.3.0") + self._device = B.device("cpu") + + self.dist_sync_on_step = dist_sync_on_step + self.compute_on_step = compute_on_step + self.process_group = process_group + self.dist_sync_fn = dist_sync_fn + self._to_sync = True + self._should_unsync = True + + self._update_signature = inspect.signature(self.update) + self.update: Callable = self._wrap_update(self.update) # type: ignore + self.compute: Callable = self._wrap_compute(self.compute) # type: ignore + self._computed = None + self._forward_cache = None + self._update_called = False + + # initialize state + self._defaults: Dict[str, Union[List, Tensor]] = {} + self._persistent: Dict[str, bool] = {} + self._reductions: Dict[str, Union[str, Callable[[Union[List[Tensor], Tensor]], Tensor], None]] = {} + + # state management + self._is_synced = False + self._cache: Optional[Dict[str, Union[List[Tensor], Tensor]]] = None + + def to(self, *args, **kwargs): + + return self + # result = super().to(*args, **kwargs) + # + # return result if result is not None else self + + def add_state( + self, + name: str, + default: Union[list, Tensor], + dist_reduce_fx: Optional[Union[str, Callable]] = None, + persistent: bool = False, + ) -> None: + """Adds metric state variable. Only used by subclasses. + + Args: + name: The name of the state variable. The variable will then be accessible at ``self.name``. + default: Default value of the state; can either be a ``B.Tensor`` or an empty list. The state will be + reset to this value when ``self.reset()`` is called. + dist_reduce_fx (Optional): Function to reduce state across multiple processes in distributed mode. + If value is ``"sum"``, ``"mean"``, ``"cat"``, ``"min"`` or ``"max"`` we will use ``B.sum``, + ``B.mean``, ``B.cat``, ``B.min`` and ``B.max``` respectively, each with argument + ``dim=0``. Note that the ``"cat"`` reduction only makes sense if the state is a list, and not + a tensor. The user can also pass a custom function in this parameter. + persistent (Optional): whether the state will be saved as part of the modules ``state_dict``. + Default is ``False``. + + Note: + Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes. + However, there won't be any reduction function applied to the synchronized metric state. + + The metric states would be synced as follows + + - If the metric state is ``B.Tensor``, the synced value will be a stacked ``B.Tensor`` across + the process dimension if the metric state was a ``B.Tensor``. The original ``B.Tensor`` metric + state retains dimension and hence the synchronized output will be of shape ``(num_process, ...)``. + + - If the metric state is a ``list``, the synced value will be a ``list`` containing the + combined elements from all processes. + + Note: + When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow + the format discussed in the above note. + + Raises: + ValueError: + If ``default`` is not a ``tensor`` or an ``empty list``. + ValueError: + If ``dist_reduce_fx`` is not callable or one of ``"mean"``, ``"sum"``, ``"cat"``, ``None``. + """ + if not isinstance(default, (Tensor, list)) or (isinstance(default, list) and default): + raise ValueError(f"state variable must be a tensor or any empty list (where you can append tensors): {type(default)}") + + if dist_reduce_fx == "sum": + dist_reduce_fx = dim_zero_sum + elif dist_reduce_fx == "mean": + dist_reduce_fx = dim_zero_mean + elif dist_reduce_fx == "max": + dist_reduce_fx = dim_zero_max + elif dist_reduce_fx == "min": + dist_reduce_fx = dim_zero_min + elif dist_reduce_fx == "cat": + dist_reduce_fx = dim_zero_cat + elif dist_reduce_fx is not None and not callable(dist_reduce_fx): + raise ValueError("`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', None]") + + if isinstance(default, Tensor): + default = default.contiguous() + + setattr(self, name, default) + + self._defaults[name] = deepcopy(default) + self._persistent[name] = persistent + self._reductions[name] = dist_reduce_fx + +# @B.jit.unused + def forward(self, *args: Any, **kwargs: Any) -> Any: + """Automatically calls ``update()``. + + Returns the metric value over inputs if ``compute_on_step`` is True. + """ + # add current step + if self._is_synced: + raise paddlemetricsUserError( + "The Metric shouldn't be synced when performing ``update``. " + "HINT: Did you forget to call ``unsync`` ?." + ) + + with B.no_grad(): + self.update(*args, **kwargs) + + if self.compute_on_step: + self._to_sync = self.dist_sync_on_step + # skip restore cache operation from compute as cache is stored below. + self._should_unsync = False + + # save context before switch + cache = {attr: getattr(self, attr) for attr in self._defaults} + + # call reset, update, compute, on single batch + self.reset() + self.update(*args, **kwargs) + self._forward_cache = self.compute() + + # restore context + for attr, val in cache.items(): + setattr(self, attr, val) + self._is_synced = False + + self._should_unsync = True + self._to_sync = True + self._computed = None + + return self._forward_cache + + def _sync_dist(self, dist_sync_fn: Callable = gather_all_tensors, process_group: Optional[Any] = None) -> None: + input_dict = {attr: getattr(self, attr) for attr in self._reductions} + + for attr, reduction_fn in self._reductions.items(): + # pre-concatenate metric states that are lists to reduce number of all_gather operations + if reduction_fn == dim_zero_cat and isinstance(input_dict[attr], list) and len(input_dict[attr]) > 1: + input_dict[attr] = [dim_zero_cat(input_dict[attr])] + + output_dict = apply_to_collection( + input_dict, + Tensor, + dist_sync_fn, + group=process_group or self.process_group, + ) + + for attr, reduction_fn in self._reductions.items(): + # pre-processing ops (stack or flatten for inputs) + if isinstance(output_dict[attr][0], Tensor): + output_dict[attr] = B.stack(output_dict[attr]) + elif isinstance(output_dict[attr][0], list): + output_dict[attr] = _flatten(output_dict[attr]) + + if not (callable(reduction_fn) or reduction_fn is None): + raise TypeError("reduction_fn must be callable or None") + reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr] + setattr(self, attr, reduced) + + def _wrap_update(self, update: Callable) -> Callable: + @functools.wraps(update) + def wrapped_func(*args: Any, **kwargs: Any) -> Optional[Any]: + self._computed = None + self._update_called = True + return update(*args, **kwargs) + + return wrapped_func + + def sync( + self, + dist_sync_fn: Optional[Callable] = None, + process_group: Optional[Any] = None, + should_sync: bool = True, + distributed_available: Optional[Callable] = jit_distributed_available, + ) -> None: + """Sync function for manually controlling when metrics states should be synced across processes. + + Args: + dist_sync_fn: Function to be used to perform states synchronization + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + should_sync: Whether to apply to state synchronization. This will have an impact + only when running in a distributed setting. + distributed_available: Function to determine if we are running inside a distributed setting + """ + if self._is_synced and should_sync: + raise paddlemetricsUserError("The Metric has already been synced.") + + is_distributed = distributed_available() if callable(distributed_available) else None + + if not should_sync or not is_distributed: + return + + if dist_sync_fn is None: + dist_sync_fn = gather_all_tensors + + # cache prior to syncing + self._cache = {attr: getattr(self, attr) for attr in self._defaults} + + # sync + self._sync_dist(dist_sync_fn, process_group=process_group) + self._is_synced = True + + def unsync(self, should_unsync: bool = True) -> None: + """Unsync function for manually controlling when metrics states should be reverted back to their local + states. + + Args: + should_unsync: Whether to perform unsync + """ + if not should_unsync: + return + + if not self._is_synced: + raise paddlemetricsUserError("The Metric has already been un-synced.") + + if self._cache is None: + raise paddlemetricsUserError("The internal cache should exist to unsync the Metric.") + + # if we synced, restore to cache so that we can continue to accumulate un-synced state + for attr, val in self._cache.items(): + setattr(self, attr, val) + self._is_synced = False + self._cache = None + + @contextmanager + def sync_context( + self, + dist_sync_fn: Optional[Callable] = None, + process_group: Optional[Any] = None, + should_sync: bool = True, + should_unsync: bool = True, + distributed_available: Optional[Callable] = jit_distributed_available, + ) -> Generator: + """Context manager to synchronize the states between processes when running in a distributed setting and + restore the local cache states after yielding. + + Args: + dist_sync_fn: Function to be used to perform states synchronization + process_group: + Specify the process group on which synchronization is called. + default: None (which selects the entire world) + should_sync: Whether to apply to state synchronization. This will have an impact + only when running in a distributed setting. + should_unsync: Whether to restore the cache state so that the metrics can + continue to be accumulated. + distributed_available: Function to determine if we are running inside a distributed setting + """ + self.sync( + dist_sync_fn=dist_sync_fn, + process_group=process_group, + should_sync=should_sync, + distributed_available=distributed_available, + ) + + yield + + self.unsync(should_unsync=self._is_synced and should_unsync) + + def _wrap_compute(self, compute: Callable) -> Callable: + @functools.wraps(compute) + def wrapped_func(*args: Any, **kwargs: Any) -> Any: + if not self._update_called: + rank_zero_warn( + f"The ``compute`` method of metric {self.__class__.__name__}" + " was called before the ``update`` method which may lead to errors," + " as metric states have not yet been updated.", + UserWarning, + ) + + # return cached value + if self._computed is not None: + return self._computed + + # compute relies on the sync context manager to gather the states across processes and apply reduction + # if synchronization happened, the current rank accumulated states will be restored to keep + # accumulation going if ``should_unsync=True``, + with self.sync_context( + dist_sync_fn=self.dist_sync_fn, should_sync=self._to_sync, should_unsync=self._should_unsync + ): + self._computed = compute(*args, **kwargs) + + return self._computed + + return wrapped_func + + @abstractmethod + def update(self, *_: Any, **__: Any) -> None: + """Override this method to update the state variables of your metric class.""" + + @abstractmethod + def compute(self) -> Any: + """Override this method to compute the final metric value from state variables synchronized across the + distributed backend.""" + + def reset(self) -> None: + """This method automatically resets the metric state variables to their default value.""" + self._update_called = False + self._forward_cache = None + # lower lightning versions requires this implicitly to log metric objects correctly in self.log +# if not _LIGHTNING_AVAILABLE or self._LIGHTNING_GREATER_EQUAL_1_3: + self._computed = None + + for attr, default in self._defaults.items(): + current_val = getattr(self, attr) + if isinstance(default, Tensor): + setattr(self, attr, default.detach().clone().to(current_val.device)) + else: + setattr(self, attr, []) + + # reset internal states + self._cache = None + self._is_synced = False + + def clone(self) -> "Metric": + """Make a copy of the metric.""" + return deepcopy(self) + + def __getstate__(self) -> Dict[str, Any]: + # ignore update and compute functions for pickling + return {k: v for k, v in self.__dict__.items() if k not in ["update", "compute", "_update_signature"]} + + def __setstate__(self, state: Dict[str, Any]) -> None: + # manually restore update and compute functions for pickling + self.__dict__.update(state) + self._update_signature = inspect.signature(self.update) + self.update: Callable = self._wrap_update(self.update) # type: ignore + self.compute: Callable = self._wrap_compute(self.compute) # type: ignore + + def __setattr__(self, name: str, value: Any) -> None: + if name in ("higher_is_better", "is_differentiable"): + raise RuntimeError(f"Can't change const `{name}`.") + super().__setattr__(name, value) + + @property + def device(self) -> "B.device": + """Return the device of the metric.""" + return self._device + + def type(self, dst_type: Union[str, B.dtype]) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def float(self) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def double(self) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def half(self) -> "Metric": + """Method override default and prevent dtype casting. + + Please use `metric.set_dtype(dtype)` instead. + """ + return self + + def set_dtype(self, dst_type: Union[str, B.dtype]) -> None: + """Special version of `type` for transferring all metric states to specific dtype + Arguments: + dst_type (type or string): the desired type + """ + return super().type(dst_type) + + def _apply(self, fn: Callable, *args, **kwargs) -> Module: + """Overwrite _apply function such that we can also move metric states to the correct device when `.to`, + `.cuda`, etc methods are called.""" + this = super()._apply(fn, *args, **kwargs) + if this is None: # for paddle + this = self + # Also apply fn to metric states and defaults + for key, value in this._defaults.items(): + if isinstance(value, Tensor): + this._defaults[key] = fn(value, *args, **kwargs) + elif isinstance(value, Sequence): + this._defaults[key] = [fn(v, *args, **kwargs) for v in value] + + current_val = getattr(this, key) + if isinstance(current_val, Tensor): + setattr(this, key, fn(current_val, *args, **kwargs)) + elif isinstance(current_val, Sequence): + setattr(this, key, [fn(cur_v, *args, **kwargs) for cur_v in current_val]) + else: + raise TypeError( + "Expected metric state to be either a Tensor" f"or a list of Tensor, but encountered {current_val}" + ) + + # make sure to update the device attribute + # if the dummy tensor moves device by fn function we should also update the attribute + self._device = fn(B.zeros(1, device=self.device), *args, **kwargs).device + + # Additional apply to forward cache and computed attributes (may be nested) + if this._computed is not None: + this._computed = apply_to_collection(this._computed, Tensor, fn) + if this._forward_cache is not None: + this._forward_cache = apply_to_collection(this._forward_cache, Tensor, fn) + + return this + + def persistent(self, mode: bool = False) -> None: + """Method for post-init to change if metric states should be saved to its state_dict.""" + for key in self._persistent: + self._persistent[key] = mode + + def state_dict( + self, + destination: Dict[str, Any] = None, + prefix: str = "", + keep_vars: bool = False, + ) -> Optional[Dict[str, Any]]: + destination = super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars) + # Register metric states to be part of the state_dict + for key in self._defaults: + if not self._persistent[key]: + continue + current_val = getattr(self, key) + if not keep_vars: + if isinstance(current_val, Tensor): + current_val = current_val.detach() + elif isinstance(current_val, list): + current_val = [cur_v.detach() if isinstance(cur_v, Tensor) else cur_v for cur_v in current_val] + destination[prefix + key] = deepcopy(current_val) # type: ignore + return destination + + def _load_from_state_dict( + self, + state_dict: dict, + prefix: str, + local_metadata: dict, + strict: bool, + missing_keys: List[str], + unexpected_keys: List[str], + error_msgs: List[str], + ) -> None: + """Loads metric states from state_dict.""" + + for key in self._defaults: + name = prefix + key + if name in state_dict: + setattr(self, key, state_dict.pop(name)) + super()._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs + ) + + def _filter_kwargs(self, **kwargs: Any) -> Dict[str, Any]: + """filter kwargs such that they match the update signature of the metric.""" + + # filter all parameters based on update signature except those of + # type VAR_POSITIONAL (*args) and VAR_KEYWORD (**kwargs) + _params = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) + _sign_params = self._update_signature.parameters + filtered_kwargs = { + k: v for k, v in kwargs.items() if (k in _sign_params.keys() and _sign_params[k].kind not in _params) + } + + # if no kwargs filtered, return al kwargs as default + if not filtered_kwargs: + filtered_kwargs = kwargs + return filtered_kwargs + + def __hash__(self) -> int: + # we need to add the id here, since PyTorch requires a module hash to be unique. + # Internally, PyTorch nn.Module relies on that for children discovery + # (see https://github.com/pytorch/pytorch/blob/v1.9.0/B.nn/modules/module.py#L1544) + # For metrics that include tensors it is not a problem, + # since their hash is unique based on the memory location but we cannot rely on that for every metric. + hash_vals = [self.__class__.__name__, id(self)] + + for key in self._defaults: + val = getattr(self, key) + # Special case: allow list values, so long + # as their elements are hashable + if hasattr(val, "__iter__") and not isinstance(val, Tensor): + hash_vals.extend(val) + else: + hash_vals.append(val) + + return hash(tuple(hash_vals)) + + def __add__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.add, self, other) + + def __and__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_and, self, other) + + # Fixme: this shall return bool instead of Metric + def __eq__(self, other: "Metric") -> "Metric": # type: ignore + return CompositionalMetric(B.eq, self, other) + + def __floordiv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.floor_divide, self, other) + + def __ge__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.ge, self, other) + + def __gt__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.gt, self, other) + + def __le__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.le, self, other) + + def __lt__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.lt, self, other) + + def __matmul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.matmul, self, other) + + def __mod__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.fmod, self, other) + + def __mul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.mul, self, other) + + # Fixme: this shall return bool instead of Metric + def __ne__(self, other: "Metric") -> "Metric": # type: ignore + return CompositionalMetric(B.ne, self, other) + + def __or__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_or, self, other) + + def __pow__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.pow, self, other) + + def __radd__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.add, other, self) + + def __rand__(self, other: "Metric") -> "Metric": + # swap them since bitwise_and only supports that way and it's commutative + return CompositionalMetric(B.bitwise_and, self, other) + + def __rfloordiv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.floor_divide, other, self) + + def __rmatmul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.matmul, other, self) + + def __rmod__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.fmod, other, self) + + def __rmul__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.mul, other, self) + + def __ror__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_or, other, self) + + def __rpow__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.pow, other, self) + + def __rsub__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.sub, other, self) + + def __rtruediv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.true_divide, other, self) + + def __rxor__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_xor, other, self) + + def __sub__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.sub, self, other) + + def __truediv__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.true_divide, self, other) + + def __xor__(self, other: "Metric") -> "Metric": + return CompositionalMetric(B.bitwise_xor, self, other) + + def __abs__(self) -> "Metric": + return CompositionalMetric(B.abs, self, None) + + def __inv__(self) -> "Metric": + return CompositionalMetric(B.bitwise_not, self, None) + + def __invert__(self) -> "Metric": + return self.__inv__() + + def __neg__(self) -> "Metric": + return CompositionalMetric(_neg, self, None) + + def __pos__(self) -> "Metric": + return CompositionalMetric(B.abs, self, None) + + def __getitem__(self, idx: int) -> "Metric": + return CompositionalMetric(lambda x: x[idx], self, None) + + +def _neg(x: Tensor) -> Tensor: + return -B.abs(x) + + +class CompositionalMetric(Metric): + """Composition of two metrics with a specific operator which will be executed upon metrics compute.""" + + def __init__( + self, + operator: Callable, + metric_a: Union[Metric, int, float, Tensor], + metric_b: Union[Metric, int, float, Tensor, None], + ) -> None: + """ + Args: + operator: the operator taking in one (if metric_b is None) + or two arguments. Will be applied to outputs of metric_a.compute() + and (optionally if metric_b is not None) metric_b.compute() + metric_a: first metric whose compute() result is the first argument of operator + metric_b: second metric whose compute() result is the second argument of operator. + For operators taking in only one input, this should be None + """ + super().__init__() + + self.op = operator + + if isinstance(metric_a, Tensor): + self.register_buffer("metric_a", metric_a) + else: + self.metric_a = metric_a + + if isinstance(metric_b, Tensor): + self.register_buffer("metric_b", metric_b) + else: + self.metric_b = metric_b + + def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None: + # No syncing required here. syncing will be done in metric_a and metric_b + pass + + def update(self, *args: Any, **kwargs: Any) -> None: + if isinstance(self.metric_a, Metric): + self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs)) + + if isinstance(self.metric_b, Metric): + self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs)) + + def compute(self) -> Any: + + # also some parsing for kwargs? + if isinstance(self.metric_a, Metric): + val_a = self.metric_a.compute() + else: + val_a = self.metric_a + + if isinstance(self.metric_b, Metric): + val_b = self.metric_b.compute() + else: + val_b = self.metric_b + + if val_b is None: + return self.op(val_a) + + return self.op(val_a, val_b) + + def reset(self) -> None: + if isinstance(self.metric_a, Metric): + self.metric_a.reset() + + if isinstance(self.metric_b, Metric): + self.metric_b.reset() + + def persistent(self, mode: bool = False) -> None: + if isinstance(self.metric_a, Metric): + self.metric_a.persistent(mode=mode) + if isinstance(self.metric_b, Metric): + self.metric_b.persistent(mode=mode) + + def __repr__(self) -> str: + _op_metrics = f"(\n {self.op.__name__}(\n {repr(self.metric_a)},\n {repr(self.metric_b)}\n )\n)" + repr_str = self.__class__.__name__ + _op_metrics + + return repr_str diff --git a/EE/paddlemetric/src/paddlemetrics/py.typed b/EE/paddlemetric/src/paddlemetrics/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/paddlemetrics/regression/__init__.py b/EE/paddlemetric/src/paddlemetrics/regression/__init__.py new file mode 100644 index 000000000..aafc10247 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/__init__.py @@ -0,0 +1,26 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.regression.cosine_similarity import CosineSimilarity # noqa: F401 +from paddlemetrics.regression.explained_variance import ExplainedVariance # noqa: F401 +from paddlemetrics.regression.mean_absolute_error import MeanAbsoluteError # noqa: F401 +from paddlemetrics.regression.mean_absolute_percentage_error import MeanAbsolutePercentageError # noqa: F401 +from paddlemetrics.regression.mean_squared_error import MeanSquaredError # noqa: F401 +from paddlemetrics.regression.mean_squared_log_error import MeanSquaredLogError # noqa: F401 +from paddlemetrics.regression.pearson import PearsonCorrcoef # noqa: F401 +from paddlemetrics.regression.r2 import R2Score # noqa: F401 +from paddlemetrics.regression.spearman import SpearmanCorrcoef # noqa: F401 +from paddlemetrics.regression.symmetric_mean_absolute_percentage_error import ( # noqa: F401 + SymmetricMeanAbsolutePercentageError, +) +from paddlemetrics.regression.tweedie_deviance import TweedieDevianceScore # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py b/EE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py new file mode 100644 index 000000000..3b2946e2c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py @@ -0,0 +1,105 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.cosine_similarity import _cosine_similarity_compute, _cosine_similarity_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.data import dim_zero_cat + + +class CosineSimilarity(Metric): + r""" + Computes the `Cosine Similarity`_ + between targets and predictions: + + .. math:: + cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} = + \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}} + + where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,d)`` + - ``target`` (float tensor): ``(N,d)`` + + Args: + reduction: how to reduce over the batch dimension using 'sum', 'mean' or 'none' + (taking the individual scores) + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the all gather. + + Example: + >>> from paddlemetrics import CosineSimilarity + >>> target = B.tensor([[0, 1], [1, 1]]) + >>> preds = B.tensor([[0, 1], [0, 1]]) + >>> cosine_similarity = CosineSimilarity(reduction = 'mean') + >>> cosine_similarity(preds, target) + tensor(0.8536) + + """ + is_differentiable = True + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + reduction: str = "sum", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + allowed_reduction = ("sum", "mean", "none", None) + if reduction not in allowed_reduction: + raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}") + self.reduction = reduction + + self.add_state("preds", [], dist_reduce_fx="cat") + self.add_state("target", [], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update metric states with predictions and targets. + + Args: + preds: Predicted tensor with shape ``(N,d)`` + target: Ground truth tensor with shape ``(N,d)`` + """ + preds, target = _cosine_similarity_update(preds, target) + + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _cosine_similarity_compute(preds, target, self.reduction) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/explained_variance.py b/EE/paddlemetric/src/paddlemetrics/regression/explained_variance.py new file mode 100644 index 000000000..226ac0760 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/explained_variance.py @@ -0,0 +1,136 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional, Sequence, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.explained_variance import ( + _explained_variance_compute, + _explained_variance_update, +) +from paddlemetrics.metric import Metric + + +class ExplainedVariance(Metric): + r""" + Computes `explained variance`_: + + .. math:: \text{ExplainedVariance} = 1 - \frac{\text{Var}(y - \hat{y})}{\text{Var}(y)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,)`` or ``(N, ...)`` (multioutput) + - ``target`` (long tensor): ``(N,)`` or ``(N, ...)`` (multioutput) + + In the case of multioutput, as default the variances will be uniformly + averaged over the additional dimensions. Please see argument `multioutput` + for changing this behavior. + + Args: + multioutput: + Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is `'uniform_average'`.): + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. + + Example: + >>> from paddlemetrics import ExplainedVariance + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> explained_variance = ExplainedVariance() + >>> explained_variance(preds, target) + tensor(0.9572) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> explained_variance = ExplainedVariance(multioutput='raw_values') + >>> explained_variance(preds, target) + tensor([0.9677, 1.0000]) + + """ + is_differentiable = True + n_obs: Tensor + sum_error: Tensor + sum_squared_error: Tensor + sum_target: Tensor + sum_squared_target: Tensor + + def __init__( + self, + multioutput: str = "uniform_average", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + allowed_multioutput = ("raw_values", "uniform_average", "variance_weighted") + if multioutput not in allowed_multioutput: + raise ValueError( + f"Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}" + ) + self.multioutput: str = multioutput + self.add_state("sum_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_target", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_squared_target", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("n_obs", default=tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target) + self.n_obs = self.n_obs + n_obs + self.sum_error = self.sum_error + sum_error + self.sum_squared_error = self.sum_squared_error + sum_squared_error + self.sum_target = self.sum_target + sum_target + self.sum_squared_target = self.sum_squared_target + sum_squared_target + + def compute(self) -> Union[Tensor, Sequence[Tensor]]: + """Computes explained variance over state.""" + return _explained_variance_compute( + self.n_obs, + self.sum_error, + self.sum_squared_error, + self.sum_target, + self.sum_squared_target, + self.multioutput, + ) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py b/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py new file mode 100644 index 000000000..8614bed21 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py @@ -0,0 +1,86 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_absolute_error import ( + _mean_absolute_error_compute, + _mean_absolute_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanAbsoluteError(Metric): + r""" + `Computes Mean Absolute Error`_ (MAE): + + .. math:: \text{MAE} = \frac{1}{N}\sum_i^N | y_i - \hat{y_i} | + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import MeanAbsoluteError + >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) + >>> mean_absolute_error = MeanAbsoluteError() + >>> mean_absolute_error(preds, target) + tensor(0.5000) + """ + is_differentiable = True + sum_abs_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_abs_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) + + self.sum_abs_error += sum_abs_error + self.total += n_obs + + def compute(self) -> Tensor: + """Computes mean absolute error over state.""" + return _mean_absolute_error_compute(self.sum_abs_error, self.total) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py b/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py new file mode 100644 index 000000000..66d9c0916 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py @@ -0,0 +1,95 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_absolute_percentage_error import ( + _mean_absolute_percentage_error_compute, + _mean_absolute_percentage_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanAbsolutePercentageError(Metric): + r""" + Computes `Mean Absolute Percentage Error`_ (MAPE): + + .. math:: \text{MAPE} = \frac{1}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(\epsilon, y_i)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Note: + The epsilon value is taken from `scikit-learn's implementation of MAPE`_. + + Note: + MAPE output is a non-negative floating point. Best result is 0.0 . But it is important to note that, + bad predictions, can lead to arbitarily large values. Especially when some ``target`` values are close to 0. + This `MAPE implementation returns`_ a very large number instead of ``inf``. + + Example: + >>> from paddlemetrics import MeanAbsolutePercentageError + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> mean_abs_percentage_error = MeanAbsolutePercentageError() + >>> mean_abs_percentage_error(preds, target) + tensor(0.2667) + + """ + is_differentiable = True + sum_abs_per_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) + + self.sum_abs_per_error += sum_abs_per_error + self.total += num_obs + + def compute(self) -> Tensor: + """Computes mean absolute percentage error over state.""" + return _mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py b/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py new file mode 100644 index 000000000..8c1c9245b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_squared_error import ( + _mean_squared_error_compute, + _mean_squared_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanSquaredError(Metric): + r""" + Computes `mean squared error`_ (MSE): + + .. math:: \text{MSE} = \frac{1}{N}\sum_i^N(y_i - \hat{y_i})^2 + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + squared: + If True returns MSE value, if False returns RMSE value. + + Example: + >>> from paddlemetrics import MeanSquaredError + >>> target = B.tensor([2.5, 5.0, 4.0, 8.0]) + >>> preds = B.tensor([3.0, 5.0, 2.5, 7.0]) + >>> mean_squared_error = MeanSquaredError() + >>> mean_squared_error(preds, target) + tensor(0.8750) + + """ + is_differentiable = True + sum_squared_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + squared: bool = True, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + self.squared = squared + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_error, n_obs = _mean_squared_error_update(preds, target) + + self.sum_squared_error += sum_squared_error + self.total += n_obs + + def compute(self) -> Tensor: + """Computes mean squared error over state.""" + return _mean_squared_error_compute(self.sum_squared_error, self.total, squared=self.squared) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py b/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py new file mode 100644 index 000000000..e36773b0e --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py @@ -0,0 +1,90 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.mean_squared_log_error import ( + _mean_squared_log_error_compute, + _mean_squared_log_error_update, +) +from paddlemetrics.metric import Metric + + +class MeanSquaredLogError(Metric): + r""" + Computes `mean squared logarithmic error`_ (MSLE): + + .. math:: \text{MSLE} = \frac{1}{N}\sum_i^N (\log_e(1 + y_i) - \log_e(1 + \hat{y_i}))^2 + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import MeanSquaredLogError + >>> target = B.tensor([2.5, 5, 4, 8]) + >>> preds = B.tensor([3, 5, 2.5, 7]) + >>> mean_squared_log_error = MeanSquaredLogError() + >>> mean_squared_log_error(preds, target) + tensor(0.0397) + + .. note:: + Half precision is only support on GPU for this metric + + """ + is_differentiable = True + sum_squared_log_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_squared_log_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + + self.sum_squared_log_error += sum_squared_log_error + self.total += n_obs + + def compute(self) -> Tensor: + """Compute mean squared logarithmic error over state.""" + return _mean_squared_log_error_compute(self.sum_squared_log_error, self.total) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/pearson.py b/EE/paddlemetric/src/paddlemetrics/regression/pearson.py new file mode 100644 index 000000000..7927392a7 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/pearson.py @@ -0,0 +1,140 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update +from paddlemetrics.metric import Metric + + +def _final_aggregation( + means_x: Tensor, + means_y: Tensor, + vars_x: Tensor, + vars_y: Tensor, + corrs_xy: Tensor, + nbs: Tensor, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Aggregate the statistics from multiple devices. + + Formula taken from here: `Aggregate the statistics from multiple devices`_ + """ + # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1 + mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0] + for i in range(1, len(means_x)): + mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i] + + nb = n1 + n2 + mean_x = (n1 * mx1 + n2 * mx2) / nb + mean_y = (n1 * my1 + n2 * my2) / nb + var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2) + var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2) + + corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y) + corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y) + corr_xy = (corr1 + corr2) / (n1 + n2) + + mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb + + return var_x, var_y, corr_xy, nb + + +class PearsonCorrcoef(Metric): + r""" + Computes `Pearson Correlation Coefficient`_: + + .. math:: + P_{corr}(x,y) = \frac{cov(x,y)}{\sigma_x \sigma_y} + + Where :math:`y` is a tensor of target values, and :math:`x` is a + tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,)`` + - ``target``(float tensor): ``(N,)`` + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Example: + >>> from paddlemetrics import PearsonCorrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> pearson = PearsonCorrcoef() + >>> pearson(preds, target) + tensor(0.9849) + + """ + is_differentiable = True + preds: List[Tensor] + target: List[Tensor] + mean_x: Tensor + mean_y: Tensor + var_x: Tensor + var_y: Tensor + corr_xy: Tensor + n_total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + ) + + self.add_state("mean_x", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("mean_y", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("var_x", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("var_y", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("corr_xy", default=B.zeros(1), dist_reduce_fx=None) + self.add_state("n_total", default=B.zeros(1), dist_reduce_fx=None) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update( + preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total + ) + + def compute(self) -> Tensor: + """Computes pearson correlation coefficient over state.""" + if self.mean_x.numel() > 1: # multiple devices, need further reduction + var_x, var_y, corr_xy, n_total = _final_aggregation( + self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total + ) + else: + var_x = self.var_x + var_y = self.var_y + corr_xy = self.corr_xy + n_total = self.n_total + + return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/r2.py b/EE/paddlemetric/src/paddlemetrics/regression/r2.py new file mode 100644 index 000000000..36db3d8d5 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/r2.py @@ -0,0 +1,149 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.r2 import _r2_score_compute, _r2_score_update +from paddlemetrics.metric import Metric + + +class R2Score(Metric): + r""" + Computes r2 score also known as `R2 Score_Coefficient Determination`_: + + .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} + + where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and + :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate + adjusted r2 score given by + + .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} + + where the parameter :math:`k` (the number of independent regressors) should + be provided as the `adjusted` argument. + + Forward accepts + + - ``preds`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) + - ``target`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) + + In the case of multioutput, as default the variances will be uniformly + averaged over the additional dimensions. Please see argument `multioutput` + for changing this behavior. + + Args: + num_outputs: + Number of outputs in multioutput setting (default is 1) + adjusted: + number of independent regressors for calculating adjusted r2 score. + Default 0 (standard r2 score). + multioutput: + Defines aggregation in the case of multiple output scores. Can be one + of the following strings (default is ``'uniform_average'``.): + + * ``'raw_values'`` returns full set of scores + * ``'uniform_average'`` scores are uniformly averaged + * ``'variance_weighted'`` scores are weighted by their individual variances + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Raises: + ValueError: + If ``adjusted`` parameter is not an integer larger or equal to 0. + ValueError: + If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. + + Example: + >>> from paddlemetrics import R2Score + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> r2score = R2Score() + >>> r2score(preds, target) + tensor(0.9486) + + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> r2score = R2Score(num_outputs=2, multioutput='raw_values') + >>> r2score(preds, target) + tensor([0.9654, 0.9082]) + + """ + is_differentiable = True + sum_squared_error: Tensor + sum_error: Tensor + residual: Tensor + total: Tensor + + def __init__( + self, + num_outputs: int = 1, + adjusted: int = 0, + multioutput: str = "uniform_average", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.num_outputs = num_outputs + + if adjusted < 0 or not isinstance(adjusted, int): + raise ValueError("`adjusted` parameter should be an integer larger or equal to 0.") + self.adjusted = adjusted + + allowed_multioutput = ("raw_values", "uniform_average", "variance_weighted") + if multioutput not in allowed_multioutput: + raise ValueError( + f"Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}" + ) + self.multioutput = multioutput + + self.add_state("sum_squared_error", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") + self.add_state("sum_error", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") + self.add_state("residual", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_squared_error, sum_error, residual, total = _r2_score_update(preds, target) + + self.sum_squared_error += sum_squared_error + self.sum_error += sum_error + self.residual += residual + self.total += total + + def compute(self) -> Tensor: + """Computes r2 score over the metric states.""" + return _r2_score_compute( + self.sum_squared_error, self.sum_error, self.residual, self.total, self.adjusted, self.multioutput + ) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/spearman.py b/EE/paddlemetric/src/paddlemetrics/regression/spearman.py new file mode 100644 index 000000000..76249378f --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/spearman.py @@ -0,0 +1,96 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.spearman import _spearman_corrcoef_compute, _spearman_corrcoef_update +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import rank_zero_warn +from paddlemetrics.utilities.data import dim_zero_cat + + +class SpearmanCorrcoef(Metric): + r""" + Computes `spearmans rank correlation coefficient`_. + + .. math: + r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}} + + where rg_x and rg_y are the rank associated to the variables x and y. Spearmans correlations coefficient + corresponds to the standard pearsons correlation coefficient calculated on the rank variables. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Example: + >>> from paddlemetrics import SpearmanCorrcoef + >>> target = B.tensor([3, -0.5, 2, 7]) + >>> preds = B.tensor([2.5, 0.0, 2, 8]) + >>> spearman = SpearmanCorrcoef() + >>> spearman(preds, target) + tensor(1.0000) + + """ + is_differentiable = False + preds: List[Tensor] + target: List[Tensor] + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + rank_zero_warn( + "Metric `SpearmanCorrcoef` will save all targets and predictions in the buffer." + " For large datasets, this may lead to large memory footprint." + ) + + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + preds, target = _spearman_corrcoef_update(preds, target) + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + """Computes spearmans correlation coefficient.""" + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) + return _spearman_corrcoef_compute(preds, target) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py b/EE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py new file mode 100644 index 000000000..3e545e08a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py @@ -0,0 +1,92 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.regression.symmetric_mean_absolute_percentage_error import ( + _symmetric_mean_absolute_percentage_error_compute, + _symmetric_mean_absolute_percentage_error_update, +) +from paddlemetrics.metric import Metric + + +class SymmetricMeanAbsolutePercentageError(Metric): + r""" + Computes symmetric mean absolute percentage error (`SMAPE`_). + + .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon}) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + + Note: + The epsilon value is taken from `scikit-learn's implementation of SMAPE`_. + + Note: + SMAPE output is a non-negative floating point between 0 and 1. Best result is 0.0 . + + + Example: + >>> from paddlemetrics import SymmetricMeanAbsolutePercentageError + >>> target = B.tensor([1, 10, 1e6]) + >>> preds = B.tensor([0.9, 15, 1.2e6]) + >>> smape = SymmetricMeanAbsolutePercentageError() + >>> smape(preds, target) + tensor(0.2290) + """ + is_differentiable = True + sum_abs_per_error: Tensor + total: Tensor + + def __init__( + self, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets. + + Args: + preds: Predictions from model + target: Ground truth values + """ + sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) + + self.sum_abs_per_error += sum_abs_per_error + self.total += num_obs + + def compute(self) -> Tensor: + """Computes mean absolute percentage error over state.""" + return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total) diff --git a/EE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py b/EE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py new file mode 100644 index 000000000..4687bdd5c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py @@ -0,0 +1,116 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional.regression.tweedie_deviance import ( + _tweedie_deviance_score_compute, + _tweedie_deviance_score_update, +) +from paddlemetrics.metric import Metric + + +class TweedieDevianceScore(Metric): + r""" + Computes the `Tweedie Deviance Score`_ between targets and predictions: + + .. math:: + deviance\_score(\hat{y},y) = + \begin{cases} + (\hat{y} - y)^2, & \text{for }power=0\\ + 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }power=1\\ + 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }power=2\\ + 2 * (\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \frac{y(\hat{y})^{1 - power}}{1 - power} + \frac{(\hat{y}) + ^{2 - power}}{2 - power}), & \text{otherwise} + \end{cases} + + where :math:`y` is a tensor of targets values, and :math:`\hat{y}` is a tensor of predictions. + + Forward accepts + + - ``preds`` (float tensor): ``(N,...)`` + - ``targets`` (float tensor): ``(N,...)`` + + Args: + power: + - power < 0 : Extreme stable distribution. (Requires: preds > 0.) + - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.) + - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) + - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) + - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.) + - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) + - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.) + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the all gather. + + Example: + >>> from paddlemetrics import TweedieDevianceScore + >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) + >>> deviance_score = TweedieDevianceScore(power=2) + >>> deviance_score(preds, targets) + tensor(1.2083) + + """ + is_differentiable = True + sum_deviance_score: Tensor + num_observations: Tensor + + def __init__( + self, + power: float = 0.0, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if 0 < power < 1: + raise ValueError(f"Deviance Score is not defined for power={power}.") + + self.power: float = power + + self.add_state("sum_deviance_score", B.tensor(0.0), dist_reduce_fx="sum") + self.add_state("num_observations", B.tensor(0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, targets: Tensor) -> None: # type: ignore + """Update metric states with predictions and targets. + + Args: + preds: Predicted tensor with shape ``(N,d)`` + targets: Ground truth tensor with shape ``(N,d)`` + """ + sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, self.power) + + self.sum_deviance_score += sum_deviance_score + self.num_observations += num_observations + + def compute(self) -> Tensor: + return _tweedie_deviance_score_compute(self.sum_deviance_score, self.num_observations) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/__init__.py b/EE/paddlemetric/src/paddlemetrics/retrieval/__init__.py new file mode 100644 index 000000000..208a02246 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/__init__.py @@ -0,0 +1,22 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.retrieval.mean_average_precision import RetrievalMAP # noqa: F401 +from paddlemetrics.retrieval.mean_reciprocal_rank import RetrievalMRR # noqa: F401 +from paddlemetrics.retrieval.retrieval_fallout import RetrievalFallOut # noqa: F401 +from paddlemetrics.retrieval.retrieval_hit_rate import RetrievalHitRate # noqa: F401 +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric # noqa: F401 +from paddlemetrics.retrieval.retrieval_ndcg import RetrievalNormalizedDCG # noqa: F401 +from paddlemetrics.retrieval.retrieval_precision import RetrievalPrecision # noqa: F401 +from paddlemetrics.retrieval.retrieval_r_precision import RetrievalRPrecision # noqa: F401 +from paddlemetrics.retrieval.retrieval_recall import RetrievalRecall # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py b/EE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py new file mode 100644 index 000000000..ee7f9065b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py @@ -0,0 +1,70 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalMAP(RetrievalMetric): + """Computes `Mean Average Precision`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `MAP` will be computed as the mean + of the `Average Precisions` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Example: + >>> from paddlemetrics import RetrievalMAP + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> rmap = RetrievalMAP() + >>> rmap(preds, target, indexes=indexes) + tensor(0.7917) + """ + + higher_is_better = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_average_precision(preds, target) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py b/EE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py new file mode 100644 index 000000000..76f15bde8 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py @@ -0,0 +1,70 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalMRR(RetrievalMetric): + """Computes `Mean Reciprocal Rank`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `MRR` will be computed as the mean + of the `Reciprocal Rank` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Example: + >>> from paddlemetrics import RetrievalMRR + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> mrr = RetrievalMRR() + >>> mrr(preds, target, indexes=indexes) + tensor(0.7500) + """ + + higher_is_better = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_reciprocal_rank(preds, target) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py new file mode 100644 index 000000000..38b70f7c1 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py @@ -0,0 +1,131 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric +from paddlemetrics.utilities.data import get_group_indexes + + +class RetrievalFallOut(RetrievalMetric): + """Computes `Fall-out`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Fall-out` will be computed as the mean + of the `Fall-out` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a negative ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalFallOut + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> fo = RetrievalFallOut(k=2) + >>> fo(preds, target, indexes=indexes) + tensor(0.5000) + """ + + higher_is_better = False + + def __init__( + self, + empty_target_action: str = "pos", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def compute(self) -> Tensor: + """First concat state `indexes`, `preds` and `target` since they were stored as lists. + + After that, compute list of groups that will help in keeping together predictions about the same query. Finally, + for each group compute the `_metric` if the number of negative targets is at least 1, otherwise behave as + specified by `self.empty_target_action`. + """ + indexes = B.cat(self.indexes, dim=0) + preds = B.cat(self.preds, dim=0) + target = B.cat(self.target, dim=0) + + res = [] + groups = get_group_indexes(indexes) + + for group in groups: + mini_preds = preds[group] + mini_target = target[group] + + if not (1 - mini_target).sum(): + if self.empty_target_action == "error": + raise ValueError("`compute` method was provided with a query with no negative target.") + if self.empty_target_action == "pos": + res.append(tensor(1.0)) + elif self.empty_target_action == "neg": + res.append(tensor(0.0)) + else: + # ensure list containt only float tensors + res.append(self._metric(mini_preds, mini_target)) + + return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds) + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_fall_out(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py new file mode 100644 index 000000000..6a053b7b5 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalHitRate(RetrievalMetric): + """Computes `IR HitRate`. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then the `Hit Rate` will be computed as the mean + of the `Hit Rate` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalHitRate + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([True, False, False, False, True, False, True]) + >>> hr2 = RetrievalHitRate(k=2) + >>> hr2(preds, target, indexes=indexes) + tensor(0.5000) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_hit_rate(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py new file mode 100644 index 000000000..ab43876fa --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from abc import ABC, abstractmethod +from typing import Any, Callable, List, Optional + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics import Metric +from paddlemetrics.utilities.checks import _check_retrieval_inputs +from paddlemetrics.utilities.data import get_group_indexes + +#: get_group_indexes is used to group predictions belonging to the same document + + +class RetrievalMetric(Metric): + """Works with binary target data. Accepts float predictions from a model output. + + Forward accepts + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + `indexes`, `preds` and `target` must have the same dimension and will be flatten + to single dimension once provided. + + `indexes` indicate to which query a prediction belongs. + Predictions will be first grouped by indexes. Then the + real metric, defined by overriding the `_metric` method, + will be computed as the mean of the scores over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive + or negative (depend on metric) target. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + """ + + indexes: List[Tensor] + preds: List[Tensor] + target: List[Tensor] + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.allow_non_binary_target = False + + empty_target_action_options = ("error", "skip", "neg", "pos") + if empty_target_action not in empty_target_action_options: + raise ValueError(f"Argument `empty_target_action` received a wrong value `{empty_target_action}`.") + + self.empty_target_action = empty_target_action + + self.add_state("indexes", default=[], dist_reduce_fx=None) + self.add_state("preds", default=[], dist_reduce_fx=None) + self.add_state("target", default=[], dist_reduce_fx=None) + + def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None: # type: ignore + """Check shape, check and convert dtypes, flatten and add to accumulators.""" + if indexes is None: + raise ValueError("Argument `indexes` cannot be None") + + indexes, preds, target = _check_retrieval_inputs( + indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target + ) + + self.indexes.append(indexes) + self.preds.append(preds) + self.target.append(target) + + def compute(self) -> Tensor: + """First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists. + + After that, compute list of groups that will help in keeping together predictions about the same query. Finally, + for each group compute the ``_metric`` if the number of positive targets is at least 1, otherwise behave as + specified by ``self.empty_target_action``. + """ + indexes = B.cat(self.indexes, dim=0) + preds = B.cat(self.preds, dim=0) + target = B.cat(self.target, dim=0) + + res = [] + groups = get_group_indexes(indexes) + + for group in groups: + mini_preds = preds[group] + mini_target = target[group] + + if not mini_target.sum(): + if self.empty_target_action == "error": + raise ValueError("`compute` method was provided with a query with no positive target.") + if self.empty_target_action == "pos": + res.append(tensor(1.0)) + elif self.empty_target_action == "neg": + res.append(tensor(0.0)) + else: + # ensure list contains only float tensors + res.append(self._metric(mini_preds, mini_target)) + + return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds) + + @abstractmethod + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + """Compute a metric over a predictions and target of a single group. + + This method should be overridden by subclasses. + """ diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py new file mode 100644 index 000000000..bb0740cac --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py @@ -0,0 +1,99 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalNormalizedDCG(RetrievalMetric): + """Computes `Normalized Discounted Cumulative Gain`_. + + Works with binary or positive integer target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long, int, bool or float tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Normalized Discounted Cumulative Gain` + will be computed as the mean of the `Normalized Discounted Cumulative Gain` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalNormalizedDCG + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> ndcg = RetrievalNormalizedDCG() + >>> ndcg(preds, target, indexes=indexes) + tensor(0.8467) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + self.allow_non_binary_target = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_normalized_dcg(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py new file mode 100644 index 000000000..f0f983a89 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.precision import retrieval_precision +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalPrecision(RetrievalMetric): + """Computes `IR Precision`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Precision` will be computed as the mean + of the `Precision` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalPrecision + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> p2 = RetrievalPrecision(k=2) + >>> p2(preds, target, indexes=indexes) + tensor(0.5000) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_precision(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py new file mode 100644 index 000000000..75373532a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py @@ -0,0 +1,70 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalRPrecision(RetrievalMetric): + """Computes `IR R-Precision`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `R-Precision` will be computed as the mean + of the `R-Precision` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Example: + >>> from paddlemetrics import RetrievalRPrecision + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> p2 = RetrievalRPrecision() + >>> p2(preds, target, indexes=indexes) + tensor(0.7500) + """ + + higher_is_better = True + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_r_precision(preds, target) diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py new file mode 100644 index 000000000..26ace51c2 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional + +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.retrieval.recall import retrieval_recall +from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric + + +class RetrievalRecall(RetrievalMetric): + """Computes `IR Recall`_. + + Works with binary target data. Accepts float predictions from a model output. + + Forward accepts: + + - ``preds`` (float tensor): ``(N, ...)`` + - ``target`` (long or bool tensor): ``(N, ...)`` + - ``indexes`` (long tensor): ``(N, ...)`` + + ``indexes``, ``preds`` and ``target`` must have the same dimension. + ``indexes`` indicate to which query a prediction belongs. + Predictions will be first grouped by ``indexes`` and then `Recall` will be computed as the mean + of the `Recall` over each query. + + Args: + empty_target_action: + Specify what to do with queries that do not have at least a positive ``target``. Choose from: + + - ``'neg'``: those queries count as ``0.0`` (default) + - ``'pos'``: those queries count as ``1.0`` + - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned + - ``'error'``: raise a ``ValueError`` + + k: consider only the top k elements for each query (default: None, which considers them all) + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects + the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. default: None + + Raises: + ValueError: + If ``k`` parameter is not `None` or an integer larger than 0 + + Example: + >>> from paddlemetrics import RetrievalRecall + >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) + >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) + >>> target = tensor([False, False, True, False, True, False, True]) + >>> r2 = RetrievalRecall(k=2) + >>> r2(preds, target, indexes=indexes) + tensor(0.7500) + """ + + higher_is_better = True + + def __init__( + self, + empty_target_action: str = "neg", + k: int = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + super().__init__( + empty_target_action=empty_target_action, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + if (k is not None) and not (isinstance(k, int) and k > 0): + raise ValueError("`k` has to be a positive integer or None") + self.k = k + + def _metric(self, preds: Tensor, target: Tensor) -> Tensor: + return retrieval_recall(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/paddlemetrics/setup_tools.py b/EE/paddlemetric/src/paddlemetrics/setup_tools.py new file mode 100644 index 000000000..e3233cef9 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/setup_tools.py @@ -0,0 +1,74 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import re +from typing import List + +_PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__)) + + +def _load_requirements(path_dir: str, file_name: str = "requirements.txt", comment_char: str = "#") -> List[str]: + """Load requirements from a file. + + >>> _load_requirements(_PROJECT_ROOT) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE + ['numpy...', 'B...'] + """ + with open(os.path.join(path_dir, file_name)) as file: + lines = [ln.strip() for ln in file.readlines()] + reqs = [] + for ln in lines: + # filer all comments + if comment_char in ln: + ln = ln[: ln.index(comment_char)].strip() + # skip directly installed dependencies + if ln.startswith("http"): + continue + if ln: # if requirement is not empty + reqs.append(ln) + return reqs + + +def _load_readme_description(path_dir: str, homepage: str, version: str) -> str: + """Load readme as decribtion. + + >>> _load_readme_description(_PROJECT_ROOT, "", "") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE + '

...' + """ + path_readme = os.path.join(path_dir, "README.md") + with open(path_readme, encoding="utf-8") as fp: + text = fp.read() + + # https://github.com/PyTorchLightning/paddlemetrics/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png + github_source_url = os.path.join(homepage, "raw", version) + # replace relative repository path to absolute link to the release + # do not replace all "docs" as in the readme we reger some other sources with particular path to docs + text = text.replace("docs/source/_static/", f"{os.path.join(github_source_url, 'docs/source/_static/')}") + + # readthedocs badge + text = text.replace("badge/?version=stable", f"badge/?version={version}") + text = text.replace("paddlemetrics.readthedocs.io/en/stable/", f"paddlemetrics.readthedocs.io/en/{version}") + # codecov badge + text = text.replace("/branch/master/graph/badge.svg", f"/release/{version}/graph/badge.svg") + # replace github badges for release ones + text = text.replace("badge.svg?branch=master&event=push", f"badge.svg?tag={version}") + # Azure... + text = text.replace("?branchName=master", f"?branchName=refs%2Ftags%2F{version}") + text = re.sub(r"\?definitionId=\d+&branchName=master", f"?definitionId=2&branchName=refs%2Ftags%2F{version}", text) + + skip_begin = r"" + skip_end = r"" + # todo: wrap content as commented description + text = re.sub(rf"{skip_begin}.+?{skip_end}", "", text, flags=re.IGNORECASE + re.DOTALL) + + return text diff --git a/EE/paddlemetric/src/paddlemetrics/text/__init__.py b/EE/paddlemetric/src/paddlemetrics/text/__init__.py new file mode 100644 index 000000000..782ca2955 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/text/__init__.py @@ -0,0 +1,18 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#from paddlemetrics.text.bert import BERTScore # noqa: F401 +from paddlemetrics.text.bleu import BLEUScore # noqa: F401 +from paddlemetrics.text.rouge import ROUGEScore # noqa: F401 +from paddlemetrics.text.sacre_bleu import SacreBLEUScore # noqa: F401 +from paddlemetrics.text.wer import WER # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/text/bert.py b/EE/paddlemetric/src/paddlemetrics/text/bert.py new file mode 100644 index 000000000..0f602f30a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/text/bert.py @@ -0,0 +1,251 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import paddleext.torchapi as B + +from paddlemetrics.functional import bert_score +from paddlemetrics.functional.text.bert import _preprocess_text +from paddlemetrics.metric import Metric +from paddlemetrics.utilities.imports import _TRANSFORMERS_AVAILABLE + +if _TRANSFORMERS_AVAILABLE: + from transformers import AutoTokenizer + + +# Default model recommended in the original implementation. +_DEFAULT_MODEL = "roberta-large" + + +def _concatenate(d: Dict[str, List[B.Tensor]]) -> Dict[str, B.Tensor]: + """Concatenate list of tensors within a given dictionary.""" + output_dict: Dict[str, B.Tensor] = {} + for k, v in d.items(): + output_dict[k] = B.cat(v) + return output_dict + + +class BERTScore(Metric): + """`Bert_score Evaluating Text Generation`_ leverages the pre-trained contextual embeddings from BERT and + matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with + human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, + and F1 measure, which can be useful for evaluating different language generation tasks. + + This implemenation follows the original implementation from `BERT_score`_. + + Args: + predictions: + An iterable of predicted sentences. + references: + An iterable of target sentences. + model_type: + A name or a model path used to load `transformers` pretrained model. + num_layers: + A layer of representation to use. + all_layers: + An indication of whether the representation from all model's layers should be used. + If `all_layers = True`, the argument `num_layers` is ignored. + model: + A user's own model. Must be of `B.nn.Module` instance. + user_tokenizer: + A user's own tokenizer used with the own model. This must be an instance with the `__call__` method. + This method must take an iterable of sentences (`List[str]`) and must return a python dictionary + containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor`. It is up to the user's model + of whether `"input_ids"` is a `B.Tensor` of input ids or embedding vectors. + This tokenizer must prepend an equivalent of `[CLS]` token and append an equivalent of `[SEP]` token + as `transformers` tokenizer does. + user_forward_fn: + A user's own forward function used in a combination with `user_model`. This function must take `user_model` + and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` + as an input and return the model's output represented by the single `B.Tensor`. + verbose: + An indication of whether a progress bar to be displayed during the embeddings calculation. + idf: + An indication whether normalization using inverse document frequencies should be used. + device: + A device to be used for calculation. + max_length: + A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed. + batch_size: + A batch size used for model processing. + num_threads: + A number of threads to use for a dataloader. + return_hash: + An indication of whether the correspodning `hash_code` should be returned. + lang: + A language of input sentences. + rescale_with_baseline: + An indication of whether bertscore should be rescaled with a pre-computed baseline. + When a pretrained model from `transformers` model is used, the corresponding baseline is downloaded + from the original `bert-score` package from `BERT_score`_ if available. + In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting + of the files from `BERT_score`_. + baseline_path: + A path to the user's own local csv/tsv file with the baseline scale. + baseline_url: + A url path to the user's own csv/tsv file with the baseline scale. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Returns: + Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. + + Example: + >>> predictions = ["hello there", "general kenobi"] + >>> references = ["hello there", "master kenobi"] + >>> bertscore = BERTScore() + >>> bertscore.update(predictions=predictions,references=references) + >>> bertscore.compute() # doctest: +SKIP + {'precision': [0.99..., 0.99...], + 'recall': [0.99..., 0.99...], + 'f1': [0.99..., 0.99...]} + """ + + higher_is_better = True + + def __init__( + self, + model_name_or_path: Optional[str] = None, + num_layers: Optional[int] = None, + all_layers: bool = False, + model: Optional[B.nn.Module] = None, + user_tokenizer: Optional[Any] = None, + user_forward_fn: Callable[[B.nn.Module, Dict[str, B.Tensor]], B.Tensor] = None, + verbose: bool = False, + idf: bool = False, + device: Optional[Union[str, B.device]] = None, + max_length: int = 512, + batch_size: int = 64, + num_threads: int = 4, + return_hash: bool = False, + lang: str = "en", + rescale_with_baseline: bool = False, + baseline_path: Optional[str] = None, + baseline_url: Optional[str] = None, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.model_name_or_path = model_name_or_path + self.num_layers = num_layers + self.all_layers = all_layers + self.model = model + self.user_forward_fn = user_forward_fn + self.verbose = verbose + self.idf = idf + self.embedding_device = device + self.max_length = max_length + self.batch_size = batch_size + self.num_threads = num_threads + self.return_hash = return_hash + self.lang = lang + self.rescale_with_baseline = rescale_with_baseline + self.baseline_path = baseline_path + self.baseline_url = baseline_url + self.predictions: Dict[str, List[B.Tensor]] = {"input_ids": [], "attention_mask": []} + self.references: Dict[str, List[B.Tensor]] = {"input_ids": [], "attention_mask": []} + + if user_tokenizer: + self.tokenizer = user_tokenizer + self.user_tokenizer = True + else: + if not _TRANSFORMERS_AVAILABLE: + raise ValueError( + "`BERTScore` metric with default tokenizers requires `transformers` package be installed. " + "Either install with `pip install transformers>=4.0` or `pip install paddlemetrics[text]`" + ) + if not model_name_or_path: + model_name_or_path = _DEFAULT_MODEL + warnings.warn( + "The argument `model_name_or_path` was not specified while it is required when default " + " `transformers` model are used." + f"It is, therefore, used the default recommended model - {_DEFAULT_MODEL}." + ) + self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + self.user_tokenizer = False + + def update(self, predictions: List[str], references: List[str]) -> None: # type: ignore + """Store predictions/references for computing BERT scores. It is necessary to store sentences in a + tokenized form to ensure the DDP mode working. + + Args: + predictions: + An iterable of predicted sentences. + references: + An iterable of predicted sentences. + """ + predictions_dict = _preprocess_text( + predictions, + self.tokenizer, + self.max_length, + truncation=False, + sort_according_length=False, + own_tokenizer=self.user_tokenizer, + ) + references_dict = _preprocess_text( + references, + self.tokenizer, + self.max_length, + truncation=False, + sort_according_length=False, + own_tokenizer=self.user_tokenizer, + ) + self.predictions["input_ids"].append(predictions_dict["input_ids"]) + self.predictions["attention_mask"].append(predictions_dict["attention_mask"]) + self.references["input_ids"].append(references_dict["input_ids"]) + self.references["attention_mask"].append(references_dict["attention_mask"]) + + def compute(self) -> Dict[str, Union[List[float], str]]: + """Calculate BERT scores. + + Return: + Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. + """ + return bert_score( + predictions=_concatenate(self.predictions), + references=_concatenate(self.references), + model_name_or_path=self.model_name_or_path, + num_layers=self.num_layers, + all_layers=self.all_layers, + model=self.model, + user_tokenizer=self.tokenizer if self.user_tokenizer else None, + user_forward_fn=self.user_forward_fn, + verbose=self.verbose, + idf=self.idf, + device=self.embedding_device, + max_length=self.max_length, + batch_size=self.batch_size, + num_threads=self.num_threads, + return_hash=self.return_hash, + lang=self.lang, + rescale_with_baseline=self.rescale_with_baseline, + baseline_path=self.baseline_path, + baseline_url=self.baseline_url, + ) diff --git a/EE/paddlemetric/src/paddlemetrics/text/bleu.py b/EE/paddlemetric/src/paddlemetrics/text/bleu.py new file mode 100644 index 000000000..46937d98f --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/text/bleu.py @@ -0,0 +1,120 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score +from typing import Any, Callable, Optional, Sequence + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics import Metric +from paddlemetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update + + +class BLEUScore(Metric): + """Calculate `BLEU score`_ of machine translated text with one or more references. + + Args: + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Example: + >>> translate_corpus = ['the cat is on the mat'.split()] + >>> reference_corpus = [['there is a cat on the mat'.split(), 'a cat is on the mat'.split()]] + >>> metric = BLEUScore() + >>> metric(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + + is_differentiable = False + higher_is_better = True + trans_len: Tensor + ref_len: Tensor + numerator: Tensor + denominator: Tensor + + def __init__( + self, + n_gram: int = 4, + smooth: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + self.n_gram = n_gram + self.smooth = smooth + + self.add_state("trans_len", tensor(0, dtype=B.float), dist_reduce_fx="sum") + self.add_state("ref_len", tensor(0, dtype=B.float), dist_reduce_fx="sum") + self.add_state("numerator", B.zeros(self.n_gram), dist_reduce_fx="sum") + self.add_state("denominator", B.zeros(self.n_gram), dist_reduce_fx="sum") + + def update( # type: ignore + self, reference_corpus: Sequence[Sequence[Sequence[str]]], translate_corpus: Sequence[Sequence[str]] + ) -> None: + """Compute Precision Scores. + + Args: + reference_corpus: An iterable of iterables of reference corpus + translate_corpus: An iterable of machine translated corpus + """ + self.trans_len, self.ref_len = _bleu_score_update( + reference_corpus, + translate_corpus, + self.numerator, + self.denominator, + self.trans_len, + self.ref_len, + self.n_gram, + ) + + def compute(self) -> Tensor: + """Calculate BLEU score. + + Return: + Tensor with BLEU Score + """ + return _bleu_score_compute( + self.trans_len, self.ref_len, self.numerator, self.denominator, self.n_gram, self.smooth + ) diff --git a/EE/paddlemetric/src/paddlemetrics/text/rouge.py b/EE/paddlemetric/src/paddlemetrics/text/rouge.py new file mode 100644 index 000000000..254f366d7 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/text/rouge.py @@ -0,0 +1,171 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from paddleext.torchapi import Tensor + +from paddlemetrics import Metric +from paddlemetrics.functional.text.rouge import ALLOWED_ROUGE_KEYS, _rouge_score_compute, _rouge_score_update +from paddlemetrics.utilities.imports import _NLTK_AVAILABLE + + +class ROUGEScore(Metric): + """`Calculate Rouge Score`_, used for automatic summarization. This implementation should imitate the behaviour + of the `rouge-score` package `Python ROUGE Implementation` + + Args: + newline_sep: + New line separate the inputs. + This argument has not been in use any more. It is deprecated in v0.6 and will be removed in v0.7. + use_stemmer: + Use Porter stemmer to strip word suffixes to improve matching. + rouge_keys: + A list of rouge types to calculate. + Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``. + decimal_places: + The number of digits to round the computed the values to. + This argument has not been in usd any more. It is deprecated in v0.6 and will be removed in v0.7. + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Example: + + >>> targets = "Is your name John".split() + >>> preds = "My name is John".split() + >>> rouge = ROUGEScore() # doctest: +SKIP + >>> from pprint import pprint + >>> pprint(rouge(preds, targets)) # doctest: +NORMALIZE_WHITESPACE +SKIP + {'rouge1_fmeasure': 0.25, + 'rouge1_precision': 0.25, + 'rouge1_recall': 0.25, + 'rouge2_fmeasure': 0.0, + 'rouge2_precision': 0.0, + 'rouge2_recall': 0.0, + 'rougeL_fmeasure': 0.25, + 'rougeL_precision': 0.25, + 'rougeL_recall': 0.25, + 'rougeLsum_fmeasure': 0.25, + 'rougeLsum_precision': 0.25, + 'rougeLsum_recall': 0.25} + + Raises: + ValueError: + If the python packages ``nltk`` is not installed. + ValueError: + If any of the ``rouge_keys`` does not belong to the allowed set of keys. + + References: + [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin `Rouge Detail`_ + """ + + higher_is_better = True + + def __init__( + self, + newline_sep: Optional[bool] = None, # remove in v0.7 + use_stemmer: bool = False, + rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore + decimal_places: Optional[bool] = None, # remove in v0.7 + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if newline_sep is not None: + warnings.warn("Argument `newline_sep` is deprecated in v0.6 and will be removed in v0.7") + if decimal_places is not None: + warnings.warn("Argument `decimal_places` is deprecated in v0.6 and will be removed in v0.7") + + if use_stemmer or "rougeLsum" in rouge_keys: + if not _NLTK_AVAILABLE: + raise ValueError("Stemmer and/or `rougeLsum` requires that nltk is installed. Use `pip install nltk`.") + import nltk + + if not isinstance(rouge_keys, tuple): + rouge_keys = tuple([rouge_keys]) + for key in rouge_keys: + if key not in ALLOWED_ROUGE_KEYS: + raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {ALLOWED_ROUGE_KEYS}") + + self.rouge_keys = rouge_keys + self.rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys] + self.stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None + + # Adding stated dynamically to prevent IndexError during sync function as some lists can be empty. + for rouge_key in self.rouge_keys: + for score in ["fmeasure", "precision", "recall"]: + self.add_state(f"{rouge_key}_{score}", [], dist_reduce_fx=None) + + def update(self, preds: Union[str, List[str]], targets: Union[str, List[str]]) -> None: # type: ignore + """Compute rouge scores. + + Args: + preds: An iterable of predicted sentences. + targets: An iterable of target sentences. + """ + + if isinstance(preds, str): + preds = [preds] + + if isinstance(targets, str): + targets = [targets] + + output: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update( + preds, targets, self.rouge_keys_values, stemmer=self.stemmer + ) + for rouge_key, metrics in output.items(): + for metric in metrics: + for type, value in metric.items(): + getattr(self, f"rouge{rouge_key}_{type}").append(value.to(self.device)) + + def compute(self) -> Dict[str, Tensor]: + """Calculate (Aggregate and provide confidence intervals) ROUGE score. + + Return: + Python dictionary of rouge scores for each input rouge key. + """ + update_output = {} + for rouge_key in self.rouge_keys_values: + for type in ["fmeasure", "precision", "recall"]: + update_output[f"rouge{rouge_key}_{type}"] = getattr(self, f"rouge{rouge_key}_{type}") + + return _rouge_score_compute(update_output) + + def __hash__(self) -> int: + # override to hash list objects. + # this is a bug in the upstream pytorch release. + hash_vals = [self.__class__.__name__] + + for key in self._defaults: + value = getattr(self, key) + if isinstance(value, list): + value = tuple(value) + hash_vals.append(value) + + return hash(tuple(hash_vals)) diff --git a/EE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py b/EE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py new file mode 100644 index 000000000..4f4d99e8f --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py @@ -0,0 +1,134 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# referenced from +# Library Name: torchtext +# Authors: torchtext authors and @sluks +# Date: 2020-07-18 +# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score +from typing import Any, Callable, Optional, Sequence + +from typing_extensions import Literal + +from paddlemetrics.functional.text.bleu import _bleu_score_update +from paddlemetrics.functional.text.sacre_bleu import _SacreBLEUTokenizer +from paddlemetrics.text.bleu import BLEUScore +from paddlemetrics.utilities.imports import _REGEX_AVAILABLE + +AVAILABLE_TOKENIZERS = ("none", "13a", "zh", "intl", "char") + + +class SacreBLEUScore(BLEUScore): + """Calculate `BLEU score`_ [1] of machine translated text with one or more references. This implementation + follows the behaviour of SacreBLEU [2] implementation from https://github.com/mjpost/sacrebleu. + + The SacreBLEU implementation differs from the NLTK BLEU implementation in tokenization techniques. + + Args: + n_gram: + Gram value ranged from 1 to 4 (Default 4) + smooth: + Whether or not to apply smoothing – see [2] + tokenize: + Tokenization technique to be used. (Default '13a') + Supported tokenization: ['none', '13a', 'zh', 'intl', 'char'] + lowercase: + If ``True``, BLEU score over lowercased text is calculated. + compute_on_step: + Forward only calls ``update()`` and returns None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When `None`, DDP + will be used to perform the allgather. + + Raises: + ValueError: + If ``tokenize`` not one of 'none', '13a', 'zh', 'intl' or 'char' + ValueError: + If ``tokenize`` is set to 'intl' and `regex` is not installed + + + Example: + >>> translate_corpus = ['the cat is on the mat'] + >>> reference_corpus = [['there is a cat on the mat', 'a cat is on the mat']] + >>> metric = SacreBLEUScore() + >>> metric(reference_corpus, translate_corpus) + tensor(0.7598) + + References: + [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, + Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ + + [2] A Call for Clarity in Reporting BLEU Scores by Matt Post. + + [3] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence + and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ + """ + + def __init__( + self, + n_gram: int = 4, + smooth: bool = False, + tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a", + lowercase: bool = False, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Optional[Callable] = None, + ): + super().__init__( + n_gram=n_gram, + smooth=smooth, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if tokenize not in AVAILABLE_TOKENIZERS: + raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.") + + if tokenize == "intl" and not _REGEX_AVAILABLE: + raise ValueError( + "`'intl'` tokenization requires `regex` installed. Use `pip install regex` or `pip install " + "paddlemetrics[text]`." + ) + self.tokenizer = _SacreBLEUTokenizer(tokenize, lowercase) + + def update( # type: ignore + self, reference_corpus: Sequence[Sequence[str]], translate_corpus: Sequence[str] + ) -> None: + """Compute Precision Scores. + + Args: + reference_corpus: An iterable of iterables of reference corpus + translate_corpus: An iterable of machine translated corpus + """ + reference_corpus_: Sequence[Sequence[Sequence[str]]] = [ + [self.tokenizer(line) for line in reference] for reference in reference_corpus + ] + translate_corpus_: Sequence[Sequence[str]] = [self.tokenizer(line) for line in translate_corpus] + + self.trans_len, self.ref_len = _bleu_score_update( + reference_corpus_, + translate_corpus_, + self.numerator, + self.denominator, + self.trans_len, + self.ref_len, + self.n_gram, + ) diff --git a/EE/paddlemetric/src/paddlemetrics/text/wer.py b/EE/paddlemetric/src/paddlemetrics/text/wer.py new file mode 100644 index 000000000..7bb69740b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/text/wer.py @@ -0,0 +1,109 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, List, Optional, Union +from warnings import warn + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.functional.text.wer import _wer_compute, _wer_update +from paddlemetrics.metric import Metric + + +class WER(Metric): + r""" + Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. + This value indicates the percentage of words that were incorrectly predicted. + The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. + Word error rate can then be computed as: + + .. math:: + WER = \frac{S + D + I}{N} = \frac{S + D + I}{S + D + C} + + where: + - S is the number of substitutions, + - D is the number of deletions, + - I is the number of insertions, + - C is the number of correct words, + - N is the number of words in the reference (N=S+D+C). + + Compute WER score of transcribed segments against references. + + Args: + concatenate_texts: Whether to concatenate all input texts or compute WER iteratively. + This argument is deprecated in v0.6 and it will be removed in v0.7. + compute_on_step: + Forward only calls ``update()`` and return None if this is set to False. default: True + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step. default: False + process_group: + Specify the process group on which synchronization is called. default: None (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather + + Returns: + (Tensor) Word error rate + + Examples: + >>> predictions = ["this is the prediction", "there is an other sample"] + >>> references = ["this is the reference", "there is another one"] + >>> metric = WER() + >>> metric(predictions, references) + tensor(0.5000) + """ + is_differentiable = False + higher_is_better = False + error: Tensor + total: Tensor + + def __init__( + self, + concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7 + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + if concatenate_texts is not None: + warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning) + self.add_state("errors", tensor(0, dtype=B.float), dist_reduce_fx="sum") + self.add_state("total", tensor(0, dtype=B.float), dist_reduce_fx="sum") + + def update(self, predictions: Union[str, List[str]], references: Union[str, List[str]]) -> None: # type: ignore + """Store references/predictions for computing Word Error Rate scores. + + Args: + predictions: Transcription(s) to score as a string or list of strings + references: Reference(s) for each speech input as a string or list of strings + """ + errors, total = _wer_update(predictions, references) + self.errors += errors + self.total += total + + def compute(self) -> Tensor: + """Calculate the word error rate. + + Returns: + (Tensor) Word error rate + """ + return _wer_compute(self.errors, self.total) diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/__init__.py b/EE/paddlemetric/src/paddlemetrics/utilities/__init__.py new file mode 100644 index 000000000..b1b2a5067 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/__init__.py @@ -0,0 +1,3 @@ +from paddlemetrics.utilities.data import apply_to_collection # noqa: F401 +from paddlemetrics.utilities.distributed import class_reduce, reduce # noqa: F401 +from paddlemetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/checks.py b/EE/paddlemetric/src/paddlemetrics/utilities/checks.py new file mode 100644 index 000000000..b948f103b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/checks.py @@ -0,0 +1,582 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.utilities.data import select_topk, to_onehot +from paddlemetrics.utilities.enums import DataType + + +def _check_same_shape(preds: Tensor, target: Tensor) -> None: + """Check that predictions and target have the same shape, else raise error.""" + if preds.shape != target.shape: + raise RuntimeError("Predictions and targets are expected to have the same shape") + + +def _basic_input_validation(preds: Tensor, target: Tensor, threshold: float, multiclass: Optional[bool]) -> None: + """Perform basic validation of inputs that does not require deducing any information of the type of inputs.""" + + if target.is_floating_point(): + raise ValueError("The `target` has to be an integer tensor.") + if target.min() < 0: + raise ValueError("The `target` has to be a non-negative tensor.") + + preds_float = preds.is_floating_point() + if not preds_float and preds.min() < 0: + raise ValueError("If `preds` are integers, they have to be non-negative.") + + if not preds.shape[0] == target.shape[0]: + raise ValueError("The `preds` and `target` should have the same first dimension.") + + if multiclass is False and target.max() > 1: + raise ValueError("If you set `multiclass=False`, then `target` should not exceed 1.") + + if multiclass is False and not preds_float and preds.max() > 1: + raise ValueError("If you set `multiclass=False` and `preds` are integers, then `preds` should not exceed 1.") + + +def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> Tuple[DataType, int]: + """This checks that the shape and type of inputs are consistent with each other and fall into one of the + allowed input types (see the documentation of docstring of ``_input_format_classification``). It does not check + for consistency of number of classes, other functions take care of that. + + It returns the name of the case in which the inputs fall, and the implied number of classes (from the ``C`` dim for + multi-class data, or extra dim(s) for multi-label data). + """ + + preds_float = preds.is_floating_point() + + if preds.ndim == target.ndim: + if preds.shape != target.shape: + raise ValueError( + "The `preds` and `target` should have the same shape,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + if preds_float and target.max() > 1: + raise ValueError( + "If `preds` and `target` are of shape (N, ...) and `preds` are floats, `target` should be binary." + ) + + # Get the case + if preds.ndim == 1 and preds_float: + case = DataType.BINARY + elif preds.ndim == 1 and not preds_float: + case = DataType.MULTICLASS + elif preds.ndim > 1 and preds_float: + case = DataType.MULTILABEL + else: + case = DataType.MULTIDIM_MULTICLASS + + implied_classes = preds[0].numel() + + elif preds.ndim == target.ndim + 1: + if not preds_float: + raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.") + if preds.shape[2:] != target.shape[1:]: + raise ValueError( + "If `preds` have one dimension more than `target`, the shape of `preds` should be" + " (N, C, ...), and the shape of `target` should be (N, ...)." + ) + + implied_classes = preds.shape[1] + + if preds.ndim == 2: + case = DataType.MULTICLASS + else: + case = DataType.MULTIDIM_MULTICLASS + else: + raise ValueError( + "Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)" + " and `preds` should be (N, C, ...)." + ) + + return case, implied_classes + + +def _check_num_classes_binary(num_classes: int, multiclass: Optional[bool]) -> None: + """This checks that the consistency of `num_classes` with the data and `multiclass` param for binary data.""" + + if num_classes > 2: + raise ValueError("Your data is binary, but `num_classes` is larger than 2.") + if num_classes == 2 and not multiclass: + raise ValueError( + "Your data is binary and `num_classes=2`, but `multiclass` is not True." + " Set it to True if you want to transform binary data to multi-class format." + ) + if num_classes == 1 and multiclass: + raise ValueError( + "You have binary data and have set `multiclass=True`, but `num_classes` is 1." + " Either set `multiclass=None`(default) or set `num_classes=2`" + " to transform binary data to multi-class format." + ) + + +def _check_num_classes_mc( + preds: Tensor, + target: Tensor, + num_classes: int, + multiclass: Optional[bool], + implied_classes: int, +) -> None: + """This checks that the consistency of `num_classes` with the data and `multiclass` param for (multi- + dimensional) multi-class data.""" + + if num_classes == 1 and multiclass is not False: + raise ValueError( + "You have set `num_classes=1`, but predictions are integers." + " If you want to convert (multi-dimensional) multi-class data with 2 classes" + " to binary/multi-label, set `multiclass=False`." + ) + if num_classes > 1: + if multiclass is False and implied_classes != num_classes: + raise ValueError( + "You have set `multiclass=False`, but the implied number of classes " + " (from shape of inputs) does not match `num_classes`. If you are trying to" + " transform multi-dim multi-class data with 2 classes to multi-label, `num_classes`" + " should be either None or the product of the size of extra dimensions (...)." + " See Input Types in Metrics documentation." + ) + if num_classes <= target.max(): + raise ValueError("The highest label in `target` should be smaller than `num_classes`.") + if preds.shape != target.shape and num_classes != implied_classes: + raise ValueError("The size of C dimension of `preds` does not match `num_classes`.") + + +def _check_num_classes_ml(num_classes: int, multiclass: Optional[bool], implied_classes: int) -> None: + """This checks that the consistency of `num_classes` with the data and `multiclass` param for multi-label + data.""" + + if multiclass and num_classes != 2: + raise ValueError( + "Your have set `multiclass=True`, but `num_classes` is not equal to 2." + " If you are trying to transform multi-label data to 2 class multi-dimensional" + " multi-class, you should set `num_classes` to either 2 or None." + ) + if not multiclass and num_classes != implied_classes: + raise ValueError("The implied number of classes (from shape of inputs) does not match num_classes.") + + +def _check_top_k(top_k: int, case: str, implied_classes: int, multiclass: Optional[bool], preds_float: bool) -> None: + if case == DataType.BINARY: + raise ValueError("You can not use `top_k` parameter with binary data.") + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError("The `top_k` has to be an integer larger than 0.") + if not preds_float: + raise ValueError("You have set `top_k`, but you do not have probability predictions.") + if multiclass is False: + raise ValueError("If you set `multiclass=False`, you can not set `top_k`.") + if case == DataType.MULTILABEL and multiclass: + raise ValueError( + "If you want to transform multi-label data to 2 class multi-dimensional" + "multi-class data using `multiclass=True`, you can not use `top_k`." + ) + if top_k >= implied_classes: + raise ValueError("The `top_k` has to be strictly smaller than the `C` dimension of `preds`.") + + +def _check_classification_inputs( + preds: Tensor, + target: Tensor, + threshold: float, + num_classes: Optional[int], + multiclass: Optional[bool], + top_k: Optional[int], +) -> DataType: + """Performs error checking on inputs for classification. + + This ensures that preds and target take one of the shape/type combinations that are + specified in ``_input_format_classification`` docstring. It also checks the cases of + over-rides with ``multiclass`` by checking (for multi-class and multi-dim multi-class + cases) that there are only up to 2 distinct labels. + + In case where preds are floats (probabilities), it is checked whether they are in [0,1] interval. + + When ``num_classes`` is given, it is checked that it is consistent with input cases (binary, + multi-label, ...), and that, if available, the implied number of classes in the ``C`` + dimension is consistent with it (as well as that max label in target is smaller than it). + + When ``num_classes`` is not specified in these cases, consistency of the highest target + value against ``C`` dimension is checked for (multi-dimensional) multi-class cases. + + If ``top_k`` is set (not None) for inputs that do not have probability predictions (and + are not binary), an error is raised. Similarly if ``top_k`` is set to a number that + is higher than or equal to the ``C`` dimension of ``preds``, an error is raised. + + Preds and target tensors are expected to be squeezed already - all dimensions should be + greater than 1, except perhaps the first one (``N``). + + Args: + preds: Tensor with predictions (labels or probabilities) + target: Tensor with ground truth labels, always integers (labels) + threshold: + Threshold value for transforming probability/logit predictions to binary + (0,1) predictions, in the case of binary or multi-label inputs. + num_classes: + Number of classes. If not explicitly set, the number of classes will be inferred + either from the shape of inputs, or the maximum label in the ``target`` and ``preds`` + tensor, where applicable. + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for inputs with probability predictions. The default value (``None``) will be + interpreted as 1 for these inputs. If this parameter is set for multi-label inputs, + it will take precedence over threshold. + + Should be left unset (``None``) for inputs with label predictions. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + + Return: + case: The case the inputs fall in, one of 'binary', 'multi-class', 'multi-label' or + 'multi-dim multi-class' + """ + + # Basic validation (that does not need case/type information) + _basic_input_validation(preds, target, threshold, multiclass) + + # Check that shape/types fall into one of the cases + case, implied_classes = _check_shape_and_type_consistency(preds, target) + + # Check consistency with the `C` dimension in case of multi-class data + if preds.shape != target.shape: + if multiclass is False and implied_classes != 2: + raise ValueError( + "You have set `multiclass=False`, but have more than 2 classes in your data," + " based on the C dimension of `preds`." + ) + if target.max() >= implied_classes: + raise ValueError( + "The highest label in `target` should be smaller than the size of the `C` dimension of `preds`." + ) + + # Check that num_classes is consistent + if num_classes: + if case == DataType.BINARY: + _check_num_classes_binary(num_classes, multiclass) + elif case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS): + _check_num_classes_mc(preds, target, num_classes, multiclass, implied_classes) + elif case.MULTILABEL: + _check_num_classes_ml(num_classes, multiclass, implied_classes) + + # Check that top_k is consistent + if top_k is not None: + _check_top_k(top_k, case, implied_classes, multiclass, preds.is_floating_point()) + + return case + + +def _input_squeeze( + preds: Tensor, + target: Tensor, +) -> Tuple[Tensor, Tensor]: + """Remove excess dimensions.""" + if preds.shape[0] == 1: + preds, target = preds.squeeze().unsqueeze(0), target.squeeze().unsqueeze(0) + else: + preds, target = preds.squeeze(), target.squeeze() + return preds, target + + +def _input_format_classification( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_classes: Optional[int] = None, + multiclass: Optional[bool] = None, +) -> Tuple[Tensor, Tensor, DataType]: + """Convert preds and target tensors into common format. + + Preds and targets are supposed to fall into one of these categories (and are + validated to make sure this is the case): + + * Both preds and target are of shape ``(N,)``, and both are integers (multi-class) + * Both preds and target are of shape ``(N,)``, and target is binary, while preds + are a float (binary) + * preds are of shape ``(N, C)`` and are floats, and target is of shape ``(N,)`` and + is integer (multi-class) + * preds and target are of shape ``(N, ...)``, target is binary and preds is a float + (multi-label) + * preds are of shape ``(N, C, ...)`` and are floats, target is of shape ``(N, ...)`` + and is integer (multi-dimensional multi-class) + * preds and target are of shape ``(N, ...)`` both are integers (multi-dimensional + multi-class) + + To avoid ambiguities, all dimensions of size 1, except the first one, are squeezed out. + + The returned output tensors will be binary tensors of the same shape, either ``(N, C)`` + of ``(N, C, X)``, the details for each case are described below. The function also returns + a ``case`` string, which describes which of the above cases the inputs belonged to - regardless + of whether this was "overridden" by other settings (like ``multiclass``). + + In binary case, targets are normally returned as ``(N,1)`` tensor, while preds are transformed + into a binary tensor (elements become 1 if the probability is greater than or equal to + ``threshold`` or 0 otherwise). If ``multiclass=True``, then then both targets are preds + become ``(N, 2)`` tensors by a one-hot transformation; with the thresholding being applied to + preds first. + + In multi-class case, normally both preds and targets become ``(N, C)`` binary tensors; targets + by a one-hot transformation and preds by selecting ``top_k`` largest entries (if their original + shape was ``(N,C)``). However, if ``multiclass=False``, then targets and preds will be + returned as ``(N,1)`` tensor. + + In multi-label case, normally targets and preds are returned as ``(N, C)`` binary tensors, with + preds being binarized as in the binary case. Here the ``C`` dimension is obtained by flattening + all dimensions after the first one. However if ``multiclass=True``, then both are returned as + ``(N, 2, C)``, by an equivalent transformation as in the binary case. + + In multi-dimensional multi-class case, normally both target and preds are returned as + ``(N, C, X)`` tensors, with ``X`` resulting from flattening of all dimensions except ``N`` and + ``C``. The transformations performed here are equivalent to the multi-class case. However, if + ``multiclass=False`` (and there are up to two classes), then the data is returned as + ``(N, X)`` binary tensors (multi-label). + + Note: + Where a one-hot transformation needs to be performed and the number of classes + is not implicitly given by a ``C`` dimension, the new ``C`` dimension will either be + equal to ``num_classes``, if it is given, or the maximum label value in preds and + target. + + Args: + preds: Tensor with predictions (labels or probabilities) + target: Tensor with ground truth labels, always integers (labels) + threshold: + Threshold value for transforming probability/logit predictions to binary + (0 or 1) predictions, in the case of binary or multi-label inputs. + num_classes: + Number of classes. If not explicitly set, the number of classes will be inferred + either from the shape of inputs, or the maximum label in the ``target`` and ``preds`` + tensor, where applicable. + top_k: + Number of highest probability entries for each sample to convert to 1s - relevant + only for (multi-dimensional) multi-class inputs with probability predictions. The + default value (``None``) will be interepreted as 1 for these inputs. + + Should be left unset (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. See the parameter's + :ref:`documentation section ` + for a more detailed explanation and examples. + + Returns: + preds: binary tensor of shape ``(N, C)`` or ``(N, C, X)`` + target: binary tensor of shape ``(N, C)`` or ``(N, C, X)`` + case: The case the inputs fall in, one of ``'binary'``, ``'multi-class'``, ``'multi-label'`` or + ``'multi-dim multi-class'`` + """ + # Remove excess dimensions + preds, target = _input_squeeze(preds, target) + + # Convert half precision tensors to full precision, as not all ops are supported + # for example, min() is not supported + if preds.dtype == B.float16: + preds = preds.float() + + case = _check_classification_inputs( + preds, + target, + threshold=threshold, + num_classes=num_classes, + multiclass=multiclass, + top_k=top_k, + ) + + if case in (DataType.BINARY, DataType.MULTILABEL) and not top_k: + preds = (preds >= threshold).int() + num_classes = num_classes if not multiclass else 2 + + if case == DataType.MULTILABEL and top_k: + preds = select_topk(preds, top_k) + + if case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) or multiclass: + if preds.is_floating_point(): + num_classes = preds.shape[1] + preds = select_topk(preds, top_k or 1) + else: + num_classes = num_classes if num_classes else max(preds.max(), target.max()) + 1 + preds = to_onehot(preds, max(2, num_classes)) + + target = to_onehot(target, max(2, num_classes)) # type: ignore + + if multiclass is False: + preds, target = preds[:, 1, ...], target[:, 1, ...] + + if (case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) and multiclass is not False) or multiclass: + target = target.reshape(target.shape[0], target.shape[1], -1) + preds = preds.reshape(preds.shape[0], preds.shape[1], -1) + else: + target = target.reshape(target.shape[0], -1) + preds = preds.reshape(preds.shape[0], -1) + + # Some operations above create an extra dimension for MC/binary case - this removes it + if preds.ndim > 2: + preds, target = preds.squeeze(-1), target.squeeze(-1) + + return preds.int(), target.int(), case + + +def _input_format_classification_one_hot( + num_classes: int, + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + multilabel: bool = False, +) -> Tuple[Tensor, Tensor]: + """Convert preds and target tensors into one hot spare label tensors. + + Args: + num_classes: number of classes + preds: either tensor with labels, tensor with probabilities/logits or multilabel tensor + target: tensor with ground true labels + threshold: float used for thresholding multilabel input + multilabel: boolean flag indicating if input is multilabel + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same number of dimensions + or one additional dimension for ``preds``. + + Returns: + preds: one hot tensor of shape [num_classes, -1] with predicted labels + target: one hot tensors of shape [num_classes, -1] with true labels + """ + if preds.ndim not in (target.ndim, target.ndim + 1): + raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds") + + if preds.ndim == target.ndim + 1: + # multi class probabilities + preds = B.argmax(preds, dim=1) + + if preds.ndim == target.ndim and preds.dtype in (B.long, B.int) and num_classes > 1 and not multilabel: + # multi-class + preds = to_onehot(preds, num_classes=num_classes) + target = to_onehot(target, num_classes=num_classes) + + elif preds.ndim == target.ndim and preds.is_floating_point(): + # binary or multilabel probabilities + preds = (preds >= threshold).long() + + # transpose class as first dim and reshape + if preds.ndim > 1: + preds = preds.transpose(1, 0) + target = target.transpose(1, 0) + + return preds.reshape(num_classes, -1), target.reshape(num_classes, -1) + + +def _check_retrieval_functional_inputs( + preds: Tensor, + target: Tensor, + allow_non_binary_target: bool = False, +) -> Tuple[Tensor, Tensor]: + """Check ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. + + Args: + preds: either tensor with scores/logits + target: tensor with ground true labels + allow_non_binary_target: whether to allow target to contain non-binary values + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same shape, if they are empty + or not of the correct ``dtypes``. + + Returns: + preds: as B.float32 + target: as B.long if not floating point else B.float32 + """ + if preds.shape != target.shape: + raise ValueError("`preds` and `target` must be of the same shape") + + if not preds.numel() or not preds.size(): + raise ValueError("`preds` and `target` must be non-empty and non-scalar tensors") + + return _check_retrieval_target_and_prediction_types(preds, target, allow_non_binary_target=allow_non_binary_target) + + +def _check_retrieval_inputs( + indexes: Tensor, + preds: Tensor, + target: Tensor, + allow_non_binary_target: bool = False, +) -> Tuple[Tensor, Tensor, Tensor]: + """Check ``indexes``, ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. + + Args: + indexes: tensor with queries indexes + preds: tensor with scores/logits + target: tensor with ground true labels + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same shape, if they are empty + or not of the correct ``dtypes``. + + Returns: + indexes: as B.long + preds: as B.float32 + target: as B.long + """ + if indexes.shape != preds.shape or preds.shape != target.shape: + raise ValueError("`indexes`, `preds` and `target` must be of the same shape") + + if not indexes.numel() or not indexes.size(): + raise ValueError( + "`indexes`, `preds` and `target` must be non-empty and non-scalar tensors", + ) + + if indexes.dtype is not B.long: + raise ValueError("`indexes` must be a tensor of long integers") + + preds, target = _check_retrieval_target_and_prediction_types( + preds, target, allow_non_binary_target=allow_non_binary_target + ) + + return indexes.long().flatten(), preds, target + + +def _check_retrieval_target_and_prediction_types( + preds: Tensor, + target: Tensor, + allow_non_binary_target: bool = False, +) -> Tuple[Tensor, Tensor]: + """Check ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. + + Args: + preds: either tensor with scores/logits + target: tensor with ground true labels + allow_non_binary_target: whether to allow target to contain non-binary values + + Raises: + ValueError: + If ``preds`` and ``target`` don't have the same shape, if they are empty + or not of the correct ``dtypes``. + """ + if target.dtype not in (B.bool, B.long, B.int) and not B.is_floating_point(target): + raise ValueError("`target` must be a tensor of booleans, integers or floats") + + if not preds.is_floating_point(): + raise ValueError("`preds` must be a tensor of floats") + + if not allow_non_binary_target and (target.max() > 1 or target.min() < 0): + raise ValueError("`target` must contain `binary` values") + + target = target.float().flatten() if target.is_floating_point() else target.long().flatten() + return preds.float().flatten(), target diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/data.py b/EE/paddlemetric/src/paddlemetrics/utilities/data.py new file mode 100644 index 000000000..13e43fb60 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/data.py @@ -0,0 +1,240 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, List, Mapping, Optional, Sequence, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from paddlemetrics.utilities.prints import rank_zero_warn + +METRIC_EPS = 1e-6 + + +def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor: + """concatenation along the zero dimension.""" + x = x if isinstance(x, (list, tuple)) else [x] + x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x] + if not x: # empty list + raise ValueError("No samples to concatenate") + return B.cat(x, dim=0) + + +def dim_zero_sum(x: Tensor) -> Tensor: + """summation along the zero dimension.""" + return B.sum(x, dim=0) + + +def dim_zero_mean(x: Tensor) -> Tensor: + """average along the zero dimension.""" + return B.mean(x, dim=0) + + +def dim_zero_max(x: Tensor) -> Tensor: + """max along the zero dimension.""" + return B.max(x, dim=0).values + + +def dim_zero_min(x: Tensor) -> Tensor: + """min along the zero dimension.""" + return B.min(x, dim=0).values + + +def _flatten(x: Sequence) -> list: + return [item for sublist in x for item in sublist] + + +def to_onehot( + label_tensor: Tensor, + num_classes: Optional[int] = None, +) -> Tensor: + """Converts a dense label tensor to one-hot format. + + Args: + label_tensor: dense label tensor, with shape [N, d1, d2, ...] + num_classes: number of classes C + + Returns: + A sparse label tensor with shape [N, C, d1, d2, ...] + + Example: + >>> x = B.tensor([1, 2, 3]) + >>> to_onehot(x) + tensor([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]]) + """ + if num_classes is None: + num_classes = int(label_tensor.max().detach().item() + 1) + + tensor_onehot = B.zeros( + label_tensor.shape[0], + num_classes, + *label_tensor.shape[1:], + dtype=label_tensor.dtype, + device=label_tensor.device, + ) + index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot) + return (tensor_onehot.scatter_(1, index, 1.0) > 0).to(label_tensor.dtype) + + +def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor: + """Convert a probability tensor to binary by selecting top-k highest entries. + + Args: + prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the + position defined by the ``dim`` argument + topk: number of highest entries to turn into 1s + dim: dimension on which to compare entries + + Returns: + A binary tensor of the same shape as the input tensor of type B.int32 + + Example: + >>> x = B.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]]) + >>> select_topk(x, topk=2) + tensor([[0, 1, 1], + [1, 1, 0]], dtype=B.int32) + """ + zeros = B.zeros_like(prob_tensor) + if topk == 1: # argmax has better performance than topk + topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0) + else: + topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0) + return topk_tensor.int() + + +def to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor: + """Converts a tensor of probabilities to a dense label tensor. + + Args: + x: probabilities to get the categorical label [N, d1, d2, ...] + argmax_dim: dimension to apply + + Return: + A tensor with categorical labels [N, d2, ...] + + Example: + >>> x = B.tensor([[0.2, 0.5], [0.9, 0.1]]) + >>> to_categorical(x) + tensor([1, 0]) + """ + return B.argmax(x, dim=argmax_dim) + + +def get_num_classes( + preds: Tensor, + target: Tensor, + num_classes: Optional[int] = None, +) -> int: + """Calculates the number of classes for a given prediction and target tensor. + + Args: + preds: predicted values + target: true labels + num_classes: number of classes if known + + Return: + An integer that represents the number of classes. + """ + num_target_classes = int(target.max().detach().item() + 1) + num_pred_classes = int(preds.max().detach().item() + 1) + num_all_classes = max(num_target_classes, num_pred_classes) + + if num_classes is None: + num_classes = num_all_classes + elif num_classes != num_all_classes: + rank_zero_warn( + f"You have set {num_classes} number of classes which is" + f" different from predicted ({num_pred_classes}) and" + f" target ({num_target_classes}) number of classes", + RuntimeWarning, + ) + return num_classes + + +def apply_to_collection( + data: Any, + dtype: Union[type, tuple], + function: Callable, + *args: Any, + wrong_dtype: Optional[Union[type, tuple]] = None, + **kwargs: Any, +) -> Any: + """Recursively applies a function to all elements of a certain dtype. + + Args: + data: the collection to apply the function to + dtype: the given function will be applied to all elements of this dtype + function: the function to apply + *args: positional arguments (will be forwarded to calls of ``function``) + wrong_dtype: the given function won't be applied if this type is specified and the given collections is of + the :attr:`wrong_type` even if it is of type :attr`dtype` + **kwargs: keyword arguments (will be forwarded to calls of ``function``) + + Returns: + the resulting collection + + Example: + >>> apply_to_collection(B.tensor([8, 0, 2, 6, 7]), dtype=Tensor, function=lambda x: x ** 2) + tensor([64, 0, 4, 36, 49]) + >>> apply_to_collection([8, 0, 2, 6, 7], dtype=int, function=lambda x: x ** 2) + [64, 0, 4, 36, 49] + >>> apply_to_collection(dict(abc=123), dtype=int, function=lambda x: x ** 2) + {'abc': 15129} + """ + elem_type = type(data) + + # Breaking condition + if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)): + return function(data, *args, **kwargs) + + # Recursively apply to collection items + if isinstance(data, Mapping): + return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()}) + + if isinstance(data, tuple) and hasattr(data, "_fields"): # named tuple + return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data)) + + if isinstance(data, Sequence) and not isinstance(data, str): + return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data]) + + # data is neither of dtype, nor a collection + return data + + +def get_group_indexes(indexes: Tensor) -> List[Tensor]: + """Given an integer `B.Tensor` `indexes`, return a `B.Tensor` of indexes for each different value in + `indexes`. + + Args: + indexes: a `B.Tensor` + + Return: + A list of integer `B.Tensor`s + + Example: + >>> indexes = B.tensor([0, 0, 0, 1, 1, 1, 1]) + >>> get_group_indexes(indexes) + [tensor([0, 1, 2]), tensor([3, 4, 5, 6])] + """ + + res: dict = {} + for i, _id in enumerate(indexes): + _id = _id.item() + if _id in res: + res[_id] += [i] + else: + res[_id] = [i] + + return [tensor(x, dtype=B.long) for x in res.values()] diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/distributed.py b/EE/paddlemetric/src/paddlemetrics/utilities/distributed.py new file mode 100644 index 000000000..aec42872a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/distributed.py @@ -0,0 +1,145 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional + +import paddleext.torchapi as B +#import torchapi.nn.functional as F +from paddleext.torchapi import Tensor + + +def reduce(to_reduce: Tensor, reduction: str) -> Tensor: + """Reduces a given tensor by a given reduction method. + + Args: + to_reduce: the tensor, which shall be reduced + reduction: a string specifying the reduction method ('elementwise_mean', 'none', 'sum') + + Return: + reduced Tensor + + Raise: + ValueError if an invalid reduction parameter was given + """ + if reduction == "elementwise_mean": + return B.mean(to_reduce) + if reduction == "none": + return to_reduce + if reduction == "sum": + return B.sum(to_reduce) + raise ValueError("Reduction parameter unknown.") + + +def class_reduce(num: Tensor, denom: Tensor, weights: Tensor, class_reduction: str = "none") -> Tensor: + """ + Function used to reduce classification metrics of the form `num / denom * weights`. + For example for calculating standard accuracy the num would be number of + true positives per class, denom would be the support per class, and weights + would be a tensor of 1s + + Args: + num: numerator tensor + denom: denominator tensor + weights: weights for each class + class_reduction: reduction method for multiclass problems + + - ``'micro'``: calculate metrics globally (default) + - ``'macro'``: calculate metrics for each label, and find their unweighted mean. + - ``'weighted'``: calculate metrics for each label, and find their weighted mean. + - ``'none'`` or ``None``: returns calculated metric per class + + Raises: + ValueError: + If ``class_reduction`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"`` or ``None``. + + """ + valid_reduction = ("micro", "macro", "weighted", "none", None) + if class_reduction == "micro": + fraction = B.sum(num) / B.sum(denom) + else: + fraction = num / denom + + # We need to take care of instances where the denom can be 0 + # for some (or all) classes which will produce nans + fraction[fraction != fraction] = 0 + + if class_reduction == "micro": + return fraction + if class_reduction == "macro": + return B.mean(fraction) + if class_reduction == "weighted": + return B.sum(fraction * (weights.float() / B.sum(weights))) + if class_reduction == "none" or class_reduction is None: + return fraction + + raise ValueError( + f"Reduction parameter {class_reduction} unknown." f" Choose between one of these: {valid_reduction}" + ) + + +def _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]: + gathered_result = [B.zeros_like(result) for _ in range(world_size)] + B.distributed.all_gather(gathered_result, result, group) + return gathered_result + + +def gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]: + """Function to gather all tensors from several ddp processes onto a list that is broadcasted to all processes. + Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case + tensors are padded, gathered and then trimmed to secure equal workload for all processes. + + Args: + result: the value to sync + group: the process group to gather results from. Defaults to all processes (world) + + Return: + gathered_result: list with size equal to the process group where + gathered_result[i] corresponds to result tensor from process i + """ + if group is None: + group = B.distributed.group.WORLD + + # convert tensors to contiguous format + result = result.contiguous() + + world_size = B.distributed.get_world_size(group) + B.distributed.barrier(group=group) + + # if the tensor is scalar, things are easy + if result.ndim == 0: + return _simple_gather_all_tensors(result, group, world_size) + + # 1. Gather sizes of all tensors + local_size = B.tensor(result.shape, device=result.device) + local_sizes = [B.zeros_like(local_size) for _ in range(world_size)] + B.distributed.all_gather(local_sizes, local_size, group=group) + max_size = B.stack(local_sizes).max(dim=0).values + all_sizes_equal = all(all(ls == max_size) for ls in local_sizes) + + # 2. If shapes are all the same, then do a simple gather: + if all_sizes_equal: + return _simple_gather_all_tensors(result, group, world_size) + + # 3. If not, we need to pad each local tensor to maximum size, gather and then truncate + pad_dims = [] + pad_by = (max_size - local_size).detach().cpu() + for val in reversed(pad_by): + pad_dims.append(0) + pad_dims.append(val.item()) + result_padded = B.pad(result, pad_dims) + gathered_result = [B.zeros_like(result_padded) for _ in range(world_size)] + B.distributed.all_gather(gathered_result, result_padded, group) + for idx, item_size in enumerate(local_sizes): + slice_param = [slice(dim_size) for dim_size in item_size] + gathered_result[idx] = gathered_result[idx][slice_param] + return gathered_result diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/enums.py b/EE/paddlemetric/src/paddlemetrics/utilities/enums.py new file mode 100644 index 000000000..7476c051d --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/enums.py @@ -0,0 +1,83 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from enum import Enum +from typing import Optional, Union + + +class EnumStr(str, Enum): + """Type of any enumerator with allowed comparison to string invariant to cases. + + Example: + >>> class MyEnum(EnumStr): + ... ABC = 'abc' + >>> MyEnum.from_str('Abc') + + >>> {MyEnum.ABC: 123} + {: 123} + """ + + @classmethod + def from_str(cls, value: str) -> Optional["EnumStr"]: + statuses = [status for status in dir(cls) if not status.startswith("_")] + for st in statuses: + if st.lower() == value.lower(): + return getattr(cls, st) + return None + + def __eq__(self, other: Union[str, "EnumStr", None]) -> bool: # type: ignore + other = other.value if isinstance(other, Enum) else str(other) + return self.value.lower() == other.lower() + + def __hash__(self) -> int: + # re-enable hashtable so it can be used as a dict key or in a set + # example: set(LightningEnum) + return hash(self.name) + + +class DataType(EnumStr): + """Enum to represent data type. + + >>> "Binary" in list(DataType) + True + """ + + BINARY = "binary" + MULTILABEL = "multi-label" + MULTICLASS = "multi-class" + MULTIDIM_MULTICLASS = "multi-dim multi-class" + + +class AverageMethod(EnumStr): + """Enum to represent average method. + + >>> None in list(AverageMethod) + True + >>> AverageMethod.NONE == None + True + >>> AverageMethod.NONE == 'none' + True + """ + + MICRO = "micro" + MACRO = "macro" + WEIGHTED = "weighted" + NONE = None + SAMPLES = "samples" + + +class MDMCAverageMethod(EnumStr): + """Enum to represent multi-dim multi-class average method.""" + + GLOBAL = "global" + SAMPLEWISE = "samplewise" diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/exceptions.py b/EE/paddlemetric/src/paddlemetrics/utilities/exceptions.py new file mode 100644 index 000000000..767fe9014 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/exceptions.py @@ -0,0 +1,17 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class paddlemetricsUserError(Exception): + """Error used to inform users of a wrong combinison of Metric API calls.""" diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/imports.py b/EE/paddlemetric/src/paddlemetrics/utilities/imports.py new file mode 100644 index 000000000..f3794801c --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/imports.py @@ -0,0 +1,90 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Import utilities.""" +import operator +from importlib import import_module +from importlib.util import find_spec +from typing import Callable, Optional + +from packaging.version import Version +from pkg_resources import DistributionNotFound, get_distribution + + +def _module_available(module_path: str) -> bool: + """Check if a path is available in your environment. + + >>> _module_available('os') + True + >>> _module_available('bla.bla') + False + """ + try: + return find_spec(module_path) is not None + except AttributeError: + # Python 3.6 + return False + except ModuleNotFoundError: + # Python 3.7+ + return False + + +def _compare_version(package: str, op: Callable, version: str) -> Optional[bool]: + """Compare package version with some requirements. + + >>> import operator + >>> _compare_version("torch", operator.ge, "0.1") + True + >>> _compare_version("any_module", operator.ge, "0.0") # is None + """ + if not _module_available(package): + return None + try: + pkg = import_module(package) + pkg_version = pkg.__version__ # type: ignore + except (ModuleNotFoundError, DistributionNotFound): + return None + except AttributeError: + pkg_version = get_distribution(package).version + except ImportError: + # catches cyclic imports - the case with integrated libs + # see: https://stackoverflow.com/a/32965521 + pkg_version = get_distribution(package).version + try: + pkg_version = Version(pkg_version) + except TypeError: + # this is mock by sphinx, so it shall return True ro generate all summaries + return True + return op(pkg_version, Version(version)) + + +_TORCH_LOWER_1_4: Optional[bool] = False +_TORCH_LOWER_1_5: Optional[bool] = False +_TORCH_LOWER_1_6: Optional[bool] = False +_TORCH_GREATER_EQUAL_1_6: Optional[bool] = True +_TORCH_GREATER_EQUAL_1_7: Optional[bool] = True + +_LIGHTNING_AVAILABLE: bool = False +_JIWER_AVAILABLE: bool = _module_available("jiwer") +_NLTK_AVAILABLE: bool = _module_available("nltk") +_ROUGE_SCORE_AVAILABLE: bool = _module_available("rouge_score") +_BERTSCORE_AVAILABLE: bool = _module_available("bert_score") +_SCIPY_AVAILABLE: bool = _module_available("scipy") +_TORCH_FIDELITY_AVAILABLE: bool = _module_available("torch_fidelity") +_LPIPS_AVAILABLE: bool = _module_available("lpips") +_TQDM_AVAILABLE: bool = _module_available("tqdm") +_TRANSFORMERS_AVAILABLE: bool = _module_available("transformers") +_PESQ_AVAILABLE: bool = _module_available("pesq") +_SACREBLEU_AVAILABLE: bool = _module_available("sacrebleu") +_REGEX_AVAILABLE: bool = _module_available("regex") +_PYSTOI_AVAILABLE: bool = _module_available("pystoi") diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/prints.py b/EE/paddlemetric/src/paddlemetrics/utilities/prints.py new file mode 100644 index 000000000..ff4b1b35e --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/utilities/prints.py @@ -0,0 +1,49 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import warnings +from functools import wraps +from typing import Any, Callable + +from paddlemetrics import _logger as log + + +def rank_zero_only(fn: Callable) -> Callable: + @wraps(fn) + def wrapped_fn(*args: Any, **kwargs: Any) -> Any: + if rank_zero_only.rank == 0: # type: ignore + return fn(*args, **kwargs) + + return wrapped_fn + + +# add the attribute to the function but don't overwrite in case Trainer has already set it +rank_zero_only.rank = getattr(rank_zero_only, "rank", int(os.environ.get("LOCAL_RANK", 0))) # type: ignore + + +def _warn(*args: Any, **kwargs: Any) -> None: + warnings.warn(*args, **kwargs) + + +def _info(*args: Any, **kwargs: Any) -> None: + log.info(*args, **kwargs) + + +def _debug(*args: Any, **kwargs: Any) -> None: + log.debug(*args, **kwargs) + + +rank_zero_debug = rank_zero_only(_debug) +rank_zero_info = rank_zero_only(_info) +rank_zero_warn = rank_zero_only(_warn) diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/__init__.py b/EE/paddlemetric/src/paddlemetrics/wrappers/__init__.py new file mode 100644 index 000000000..d74928f6a --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/wrappers/__init__.py @@ -0,0 +1,16 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddlemetrics.wrappers.bootstrapping import BootStrapper # noqa: F401 +from paddlemetrics.wrappers.multioutput import MultioutputWrapper # noqa: F401 +from paddlemetrics.wrappers.tracker import MetricTracker # noqa: F401 diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py b/EE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py new file mode 100644 index 000000000..6a3e7b16b --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py @@ -0,0 +1,173 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from copy import deepcopy +from typing import Any, Callable, Dict, Optional, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, nn + +from paddlemetrics.metric import Metric +from paddlemetrics.utilities import apply_to_collection +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_7 + + +def _bootstrap_sampler( + size: int, + sampling_strategy: str = "poisson", +) -> Tensor: + """Resample a tensor along its first dimension with replacement + Args: + size: number of samples + sampling_strategy: the strategy to use for sampling, either ``'poisson'`` or ``'multinomial'`` + generator: a instance of ``B.Generator`` that controls the sampling + + Returns: + resampled tensor + + """ + if sampling_strategy == "poisson": + p = B.distributions.Poisson(1) + n = p.sample((size,)) + return B.arange(size).repeat_interleave(n.long(), dim=0) + if sampling_strategy == "multinomial": + idx = B.multinomial(B.ones(size), num_samples=size, replacement=True) + return idx + raise ValueError("Unknown sampling strategy") + + +class BootStrapper(Metric): + def __init__( + self, + base_metric: Metric, + num_bootstraps: int = 10, + mean: bool = True, + std: bool = True, + quantile: Optional[Union[float, Tensor]] = None, + raw: bool = False, + sampling_strategy: str = "poisson", + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ) -> None: + r""" + Using `Turn a Metric into a Bootstrapped`_ + That can automate the process of getting confidence intervals for metric values. This wrapper + class basically keeps multiple copies of the same base metric in memory and whenever ``update`` or + ``forward`` is called, all input tensors are resampled (with replacement) along the first dimension. + + Args: + base_metric: + base metric class to wrap + num_bootstraps: + number of copies to make of the base metric for bootstrapping + mean: + if ``True`` return the mean of the bootstraps + std: + if ``True`` return the standard diviation of the bootstraps + quantile: + if given, returns the quantile of the bootstraps. Can only be used with + pytorch version 1.6 or higher + raw: + if ``True``, return all bootstrapped values + sampling_strategy: + Determines how to produce bootstrapped samplings. Either ``'poisson'`` or ``multinomial``. + If ``'possion'`` is chosen, the number of times each sample will be included in the bootstrap + will be given by :math:`n\sim Poisson(\lambda=1)`, which approximates the true bootstrap distribution + when the number of samples is large. If ``'multinomial'`` is chosen, we will apply true bootstrapping + at the batch level to approximate bootstrapping over the hole dataset. + compute_on_step: + Forward only calls ``update()`` and return ``None`` if this is set to ``False``. + dist_sync_on_step: + Synchronize metric state across processes at each ``forward()`` + before returning the value at the step + process_group: + Specify the process group on which synchronization is called. + default: ``None`` (which selects the entire world) + dist_sync_fn: + Callback that performs the allgather operation on the metric state. When ``None``, DDP + will be used to perform the allgather. + + Example:: + >>> from pprint import pprint + >>> from paddlemetrics import Accuracy, BootStrapper + >>> _ = B.manual_seed(123) + >>> base_metric = Accuracy() + >>> bootstrap = BootStrapper(base_metric, num_bootstraps=20) + >>> bootstrap.update(B.randint(5, (20,)), B.randint(5, (20,))) + >>> output = bootstrap.compute() + >>> pprint(output) + {'mean': tensor(0.2205), 'std': tensor(0.0859)} + + """ + super().__init__(compute_on_step, dist_sync_on_step, process_group, dist_sync_fn) + if not isinstance(base_metric, Metric): + raise ValueError( + "Expected base metric to be an instance of paddlemetrics.Metric" f" but received {base_metric}" + ) + + self.metrics = nn.ModuleList([deepcopy(base_metric) for _ in range(num_bootstraps)]) + self.num_bootstraps = num_bootstraps + + self.mean = mean + self.std = std + if quantile is not None and not _TORCH_GREATER_EQUAL_1_7: + raise ValueError("quantile argument can only be used with pytorch v1.7 or higher") + self.quantile = quantile + self.raw = raw + + allowed_sampling = ("poisson", "multinomial") + if sampling_strategy not in allowed_sampling: + raise ValueError( + f"Expected argument ``sampling_strategy`` to be one of {allowed_sampling}" + f" but recieved {sampling_strategy}" + ) + self.sampling_strategy = sampling_strategy + + def update(self, *args: Any, **kwargs: Any) -> None: + """Updates the state of the base metric. + + Any tensor passed in will be bootstrapped along dimension 0 + """ + for idx in range(self.num_bootstraps): + args_sizes = apply_to_collection(args, Tensor, len) + kwargs_sizes = list(apply_to_collection(kwargs, Tensor, len)) + if len(args_sizes) > 0: + size = args_sizes[0] + elif len(kwargs_sizes) > 0: + size = kwargs_sizes[0] + else: + raise ValueError("None of the input contained tensors, so could not determine the sampling size") + sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy).to(self.device) + new_args = apply_to_collection(args, Tensor, B.index_select, dim=0, index=sample_idx) + new_kwargs = apply_to_collection(kwargs, Tensor, B.index_select, dim=0, index=sample_idx) + self.metrics[idx].update(*new_args, **new_kwargs) + + def compute(self) -> Dict[str, Tensor]: + """Computes the bootstrapped metric values. + + Allways returns a dict of tensors, which can contain the following keys: ``mean``, ``std``, ``quantile`` and + ``raw`` depending on how the class was initialized + """ + computed_vals = B.stack([m.compute() for m in self.metrics], dim=0) + output_dict = {} + if self.mean: + output_dict["mean"] = computed_vals.mean(dim=0) + if self.std: + output_dict["std"] = computed_vals.std(dim=0) + if self.quantile is not None: + output_dict["quantile"] = B.quantile(computed_vals, self.quantile) + if self.raw: + output_dict["raw"] = computed_vals + return output_dict diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py b/EE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py new file mode 100644 index 000000000..789445be2 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py @@ -0,0 +1,165 @@ +from copy import deepcopy +from typing import Any, Callable, List, Optional, Tuple + +import paddleext.torchapi as B +from paddleext.torchapi import nn + +from paddlemetrics import Metric +from paddlemetrics.utilities import apply_to_collection + + +def _get_nan_indices(*tensors: B.Tensor) -> B.Tensor: + """Get indices of rows along dim 0 which have NaN values.""" + if len(tensors) == 0: + raise ValueError("Must pass at least one tensor as argument") + sentinel = tensors[0] + nan_idxs = B.zeros(len(sentinel), dtype=B.bool, device=sentinel.device) + for tensor in tensors: + permuted_tensor = tensor.flatten(start_dim=1) + nan_idxs |= B.any(B.isnan(permuted_tensor), dim=1) + return nan_idxs + + +class MultioutputWrapper(Metric): + """Wrap a base metric to enable it to support multiple outputs. + + Several paddlemetrics metrics, such as :class:`paddlemetrics.regression.spearman.SpearmanCorrcoef` lack support for + multioutput mode. This class wraps such metrics to support computing one metric per output. + Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs. + This means if you set `num_outputs` to 2, `compute()` will return a Tensor of dimension + (2, ...) where ... represents the dimensions the metric returns when not wrapped. + + In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude + fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the + intersection of NaN containing "rows" upon each update for each output. For example, suppose a user uses + `MultioutputWrapper` to wrap :class:`paddlemetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally + has missing labels for classes like ``R2Score`` is that this class supports removing NaN values + (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows + + Args: + base_metric: + Metric being wrapped. + num_outputs: + Expected dimensionality of the output dimension. This parameter is + used to determine the number of distinct metrics we need to track. + output_dim: + Dimension on which output is expected. Note that while this provides some flexibility, the output dimension + must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels + can have a different number of dimensions than the predictions. This can be worked around if the output + dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs. + remove_nans: + Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying + metric. Proper operation requires all tensors passed to update to have dimension `(N, ...)` where N + represents the length of the batch or dataset being passed in. + squeeze_outputs: + If true, will squeeze the 1-item dimensions left after `index_select` is applied. + This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful + for certain classification metrics that can't handle additional 1-item dimensions. + compute_on_step: + Whether to recompute the metric value on each update step. + dist_sync_on_step: + Required for distributed training support. + process_group: + Specify the process group on which synchronization is called. + The default: None (which selects the entire world) + dist_sync_fn: + Required for distributed training support. + + Example: + + >>> # Mimic R2Score in `multioutput`, `raw_values` mode: + >>> import torchapi as B + >>> from paddlemetrics import MultioutputWrapper, R2Score + >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> r2score = MultioutputWrapper(R2Score(), 2) + >>> r2score(preds, target) + [tensor(0.9654), tensor(0.9082)] + >>> # Classification metric where prediction and label tensors have different shapes. + >>> from paddlemetrics import BinnedAveragePrecision + >>> target = B.tensor([[1, 2], [2, 0], [1, 2]]) + >>> preds = B.tensor([ + ... [[.1, .8], [.8, .05], [.1, .15]], + ... [[.1, .1], [.2, .3], [.7, .6]], + ... [[.002, .4], [.95, .45], [.048, .15]] + ... ]) + >>> binned_avg_precision = MultioutputWrapper(BinnedAveragePrecision(3, thresholds=5), 2) + >>> binned_avg_precision(preds, target) + [[tensor(-0.), tensor(1.0000), tensor(1.0000)], [tensor(0.3333), tensor(-0.), tensor(0.6667)]] + """ + + is_differentiable = False + + def __init__( + self, + base_metric: Metric, + num_outputs: int, + output_dim: int = -1, + remove_nans: bool = True, + squeeze_outputs: bool = True, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.metrics = nn.ModuleList([deepcopy(base_metric) for _ in range(num_outputs)]) + self.output_dim = output_dim + self.remove_nans = remove_nans + self.squeeze_outputs = squeeze_outputs + + def _get_args_kwargs_by_output( + self, *args: B.Tensor, **kwargs: B.Tensor + ) -> List[Tuple[B.Tensor, B.Tensor]]: + """Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.""" + args_kwargs_by_output = [] + for i in range(len(self.metrics)): + selected_args = apply_to_collection( + args, B.Tensor, B.index_select, dim=self.output_dim, index=B.tensor(i, device=self.device) + ) + selected_kwargs = apply_to_collection( + kwargs, B.Tensor, B.index_select, dim=self.output_dim, index=B.tensor(i, device=self.device) + ) + if self.remove_nans: + args_kwargs = selected_args + tuple(selected_kwargs.values()) + nan_idxs = _get_nan_indices(*args_kwargs) + selected_args = [arg[~nan_idxs] for arg in selected_args] + selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()} + + if self.squeeze_outputs: + selected_args = [arg.squeeze(self.output_dim) for arg in selected_args] + args_kwargs_by_output.append((selected_args, selected_kwargs)) + return args_kwargs_by_output + + def update(self, *args: Any, **kwargs: Any) -> None: + """Update each underlying metric with the corresponding output.""" + reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) + for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): + metric.update(*selected_args, **selected_kwargs) + + def compute(self) -> List[B.Tensor]: + """Compute metrics.""" + return [m.compute() for m in self.metrics] + + def forward(self, *args: Any, **kwargs: Any) -> Any: + """Call underlying forward methods and aggregate the results if they're non-null. + + We override this method to ensure that state variables get copied over on the underlying metrics. + """ + results = [] + reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) + for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): + results.append(metric(*selected_args, **selected_kwargs)) + if results[0] is None: + return None + return results + + def reset(self) -> None: + """Reset all underlying metrics.""" + for metric in self.metrics: + metric.reset() diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/tracker.py b/EE/paddlemetric/src/paddlemetrics/wrappers/tracker.py new file mode 100644 index 000000000..b2b939d91 --- /dev/null +++ b/EE/paddlemetric/src/paddlemetrics/wrappers/tracker.py @@ -0,0 +1,127 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from copy import deepcopy +from typing import Any, Tuple, Union + +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, nn + +from paddlemetrics.metric import Metric + + +class MetricTracker(nn.ModuleList): + """A wrapper class that can help keeping track of a metric over time and implement useful methods. The wrapper + implements the standard `update`, `compute`, `reset` methods that just calls corresponding method of the + currently tracked metric. However, the following additional methods are provided: + + -``MetricTracker.n_steps``: number of metrics being tracked + + -``MetricTracker.increment()``: initialize a new metric for being tracked + + -``MetricTracker.compute_all()``: get the metric value for all steps + + -``MetricTracker.best_metric()``: returns the best value + + Args: + metric: instance of a torchmetric modular to keep track of at each timestep. + maximize: bool indicating if higher metric values are better (`True`) or lower + is better (`False`) + + Example: + + >>> from paddlemetrics import Accuracy, MetricTracker + >>> _ = B.manual_seed(42) + >>> tracker = MetricTracker(Accuracy(num_classes=10)) + >>> for epoch in range(5): + ... tracker.increment() + ... for batch_idx in range(5): + ... preds, target = B.randint(10, (100,)), B.randint(10, (100,)) + ... tracker.update(preds, target) + ... print(f"current acc={tracker.compute()}") # doctest: +NORMALIZE_WHITESPACE + current acc=0.1120000034570694 + current acc=0.08799999952316284 + current acc=0.12600000202655792 + current acc=0.07999999821186066 + current acc=0.10199999809265137 + >>> best_acc, which_epoch = tracker.best_metric(return_step=True) + >>> tracker.compute_all() + tensor([0.1120, 0.0880, 0.1260, 0.0800, 0.1020]) + """ + + def __init__(self, metric: Metric, maximize: bool = True) -> None: + super().__init__() + if not isinstance(metric, Metric): + raise TypeError("metric arg need to be an instance of a paddlemetrics metric" f" but got {metric}") + self._base_metric = metric + self.maximize = maximize + + self._increment_called = False + + @property + def n_steps(self) -> int: + """Returns the number of times the tracker has been incremented.""" + return len(self) - 1 # subtract the base metric + + def increment(self) -> None: + """Creates a new instace of the input metric that will be updated next.""" + self._increment_called = True + self.append(deepcopy(self._base_metric)) + + def forward(self, *args, **kwargs) -> None: # type: ignore + """Calls forward of the current metric being tracked.""" + self._check_for_increment("forward") + return self[-1](*args, **kwargs) + + def update(self, *args, **kwargs) -> None: # type: ignore + """Updates the current metric being tracked.""" + self._check_for_increment("update") + self[-1].update(*args, **kwargs) + + def compute(self) -> Any: + """Call compute of the current metric being tracked.""" + self._check_for_increment("compute") + return self[-1].compute() + + def compute_all(self) -> Tensor: + """Compute the metric value for all tracked metrics.""" + self._check_for_increment("compute_all") + return B.stack([metric.compute() for i, metric in enumerate(self) if i != 0], dim=0) + + def reset(self) -> None: + """Resets the current metric being tracked.""" + self[-1].reset() + + def reset_all(self) -> None: + """Resets all metrics being tracked.""" + for metric in self: + metric.reset() + + def best_metric(self, return_step: bool = False) -> Union[float, Tuple[int, float]]: + """Returns the highest metric out of all tracked. + + Args: + return_step: If `True` will also return the step with the highest metric value. + + Returns: + The best metric value, and optionally the timestep. + """ + fn = B.max if self.maximize else B.min + idx, max = fn(self.compute_all(), 0) + if return_step: + return idx.item(), max.item() + return max.item() + + def _check_for_increment(self, method: str) -> None: + if not self._increment_called: + raise ValueError(f"`{method}` cannot be called before `.increment()` has been called") diff --git a/EE/paddlemetric/src/setup.py b/EE/paddlemetric/src/setup.py new file mode 100644 index 000000000..8d7dd2d0e --- /dev/null +++ b/EE/paddlemetric/src/setup.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +################################################################################ +# +# Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved +# +################################################################################ +""" +Setup script. + +Authors: sunmingming01(sunmingming01@baidu.com) +Date: 2020/12/31 12:33:34 +""" + +from setuptools import setup, find_packages + +with open('README.md') as readme_file: + README = readme_file.read() + +setup_args = dict( + name='paddlemetrics', + version='1.0.0-beta', + description='Metrics library for paddle, porting from torch metrics.', + long_description_content_type="text/markdown", + long_description=README, + license='Apache', + packages=find_packages(include=["paddlemetrics", "paddlemetrics.*"]), + author='Mingming Sun', + author_email='sunmingming01@baidu.com', + keywords=['Deep Learning', 'Paddlepaddle'], + url='', + download_url='' +) + +install_requires = [ +] + +if __name__ == '__main__': + setup(**setup_args, install_requires=install_requires) \ No newline at end of file diff --git a/EE/paddlemetric/src/tests/__init__.py b/EE/paddlemetric/src/tests/__init__.py new file mode 100644 index 000000000..b56a90645 --- /dev/null +++ b/EE/paddlemetric/src/tests/__init__.py @@ -0,0 +1 @@ +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, NUM_PROCESSES, DummyMetric, MetricTester # noqa: F401 diff --git a/EE/paddlemetric/src/tests/audio/__init__.py b/EE/paddlemetric/src/tests/audio/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/audio/examples/audio_speech.wav b/EE/paddlemetric/src/tests/audio/examples/audio_speech.wav new file mode 100644 index 0000000000000000000000000000000000000000..0fa4e9e7f978e56a655e754d068b69d625dc9a3f GIT binary patch literal 99244 zcmeEu1)CH(*LISs>h5u2W*7!{cOTr{-F1P%-DPlhmt~Q~9Tr{O9TsTuB*DMOH#*jax7`tpl;p2r&H3dM%y}l293-SN(iCY;;Z6d5+RI)lHNT=_K3$l z|Nry=2jqd_Tf=cU3BYE@kw38!73Ua;BCiIK z5S(}5ib?{pg^`>j2PsGjlTxHO_Ohf5sX|JS()f)fZv$3Qa_E=JbR3(-1 zzY-~m`^w_(qIkYA$xjNABG?M~ehZP@*a`s7Xl!95Bgu+A6wpQjp5JgXAIwsgg>Vb6 z&_#+!7KtJizw#(mqzRAsA%0=|DZb+9n|P1ygZPa7hwtc>cr8Bqj^2ou;;DEmUWs?& z1&&^0dxGQFIQLP!!Twr&$My*=|HPhzEln7Bvne!CD$EC63?R-yvSTj<=<||1_$>?C zNE#HyUWpX*ZH0Vz7b7LHmBN)`prXXT5T4A9f4Q(llT2tW18|N&>yqAq;DA5)CHbKX zLec<9l1LC=uzd%N&j8~?eBTt;#U*i3ToKoBbQQ;^#W`_GoEQIz8{)pWCGLuQzU>k2 zzbT$$dxrf1uDk&SKBL`FKCC~00_mWnTev`93-2@_ME?Hi!}20T84-(SJuuRhNGghYJ9xm5hR zu$exNaUlc;Ha5s|Alk-XTv72Z6|^;QMc_@6&R%&5|4r-+(r^18R8c-SGXhT8r(%4( zDv16dDIxiu-^b^I;A_7B{m%p$jslhia3&|7$co>Lpj|ffxG-QFh_)R*NH}2g0JjuC z{mTb=GL93lNn9kJKY-CE@J90LEqL|XEvTf(noa+5ZcHV!1(!te|En zeB^o;40xVYJxNF3aXaz{=|ei< ze|ORz$8p&DklwhbjqiLM=|MW<-VQj|5l4~|ZGdZK&`U~xCA_5)-j){_=LYX%utoVa zaVU;t|CTZzj-x<8>cDphpbo;h5Xg%B4)W=lOulDxfL7tyv!lH%_|1)bVtf)Q`;ye0 zA^4vS7-#iqqkNFK0^qLHtWwt$2G%8f+Nl;`s|kM9$JQAAwGn9!{?x;F9Z;f;?@CM3 z5Wk(k!{*ppVef$Nw&1ATx&qrSWB`5#lOfm!VIM|@;>-wq%cG&7Rex;VL7P6fuNUbC zYIVnR9k90o)!X`{q#b@+;D1xRzYZX&0hp@ew+@af0m}L~t_Nz?#9k4ySQ%%_LQ*RN zqu+QHLa!_68&UG&TlPPRYbjt`0lVaKU9?f#hg~!9u^D*P6Ptu}FyI^kc*l`=GK0(~ zQ^^!E9Y@p295R#4CX2`dY%_4Q9KUPHGF+KSrjdER`zGVc7;NLv!UQtb*IpbM1zHTp zx1_>w;64!lr5s7w82~DD$G<;FKhS0n{tx!uIm~yDgkuzpv|+e1%6Ip0+}9Vh8-Tkd zJ%?fMjW-TQ+w$J=I39`b$+$8CPfh{`69MBmKr@Vt#y$is#G#F$cycuE8IJEb{FCqR zjiZ5h&oHzkx0$|oOU@0#)&<;`JnM-gDcvp5Pnv>$l7k%~1I^NZ46SN(KT~c5MnTtCH0k2_r zVl<8>pOI_Ix z^lS|&tqsYqhdVlf#vQSB!nG#QB~>8xWpSl3s3iN-V7zUZPpWOfO|52M}t4(aXcGT zUVygNqn%}BA$rL)ysHmdXbb&N1O2NgdRsBycu^aWxAFC_I_RUZK24PgJ+mVEa&w&T z1zJjZ8V@L@q2)PvljQjX;4%WO&IL!N<83Q^bQ}YSr{k{aIKK$+PXb-zK$mWSJPw#h zE=gT83e=GOtUoYl3kmE3N=iyM1wHFSW}D%-6^?6SlVeadKwS;mp$H(8Be&EUMe)BT zuGIlP(!MPV=qur=UTEzPP`E#iB;>vE{sExiU_c->Su1?E!CBdp#^Fjg@T@)VYlpoH zu1byB8fT;ymAyi?CC}9dKWpKAbpcUR-&PNAl-5xppM52*h&-UG)MW)=JxCj(B%Y}O z$f|;#6`{q8facN$FOBaqzT=9(OUhRZ+*txo$TlQ)Qs1)sXyxhCOL-Y!gp!3QwlaiHJp>SU;*q>>&lV7 z5^$*uT1uTz%{S&t$^I?J<#4C8gQfM97h_dU$Vo1Y%~ALb#|RdNU4fOte50bj&l=#c z0(IDH8f>OWJQM2k3KXB6=#OzT1IE4#xDo{WPHxhl$$-0Z`9{BRJR!%#Og;!DcPoHO zO+Zbl&w79cvbVLz??A}nXh`K~NZV{|@%SBw{xJnoHX3x1{aB9h4gZH|ila!tml5Al81JQrl>yI&<4BJC!9F}x z*lY|on;W=0VaZ8bM#Fzg*nwRTo{)Z-fHxw2H4}Guaj!h1f_^sqN?Voz9tnX4SSTnO zglp7iWrYEj7|=Zuw2J}!(i6#ofAY7ePky8oQ51YE2Cf!?9A(9QF?g?}va}){`1S*o zl(+$f4eyX|QE*=o9{K#8CqAF&C2ZV}*rd-Q?OX%avw`QOhhXAY`fI5;BRvSG&n}eS zg9$h#MJ1#ZkOhGn(!vVBnPBYU;GOipq)jFLWQk`GIAzCu8Szj0WD;xXF-Qwl_`DP8 zRY=P{9rBq544l{v-xbUEYvBBEPc;cPY6`d{-!83O3oS@rR>gmgZ+Tw+lhPpHl?D8f z_Hk~|FAD!6K);OGbNILroapPBfPv(T28@Exez1?Td2l`wScduT%kJwZ zVYrqBPfA}T5Nbglsp(UK9W8K}S_bKQE7-F1`B_xHmhVlRS*_@mm2F zzkoM~tbGM9l7M{zxc3Mi%4cxrop=l$-t&1XPvCi8#g(VnpJ96m?^JrQk8$)G&wYfa z`W8I>hJWACru2G#fu6t6ioE&>-|5(7PY}WZjHLGz49?_4zb*opast=PHL_KpmR3Jt)zP--W5z- zkSZs(ckr8Ktl$wO^A3FObK)}m?c?H**e`aAtzrlE4PpznZP-?bzwy08>=Nt6W*i^J zy}QId-1)cIi=%_^)1|k5RGdLO=Rl#$;w+9Yz@I;Y?L6La6<+)Uc{0Pp?c05CWt{=pMRuCMQM>=1PhBN^3VJkf6Q<2 zOZ+K!@hFi`Y|0&1h#{{VKya1$)n^{?8*nW zlPzIGSW{M?#jtPmD4j{$(8AP9o{_DvE+#^&*M@935IK1VZr#KFuaD9<5dHYz>yK_o zP+p8zjWEvjhW?L-m9vrTB1g$?Q05}JPHtekM9yG4LjHz@G7nZq58oJ27X48r9?1P; zaT1(ei*1n@CH@etL``5530zg-f++cm1N0<0@r#I~|)EP-VJ9m*Q$7Mi5!9N4A{Vb?9kk+e+zBPVzDdrLtMCt= zKt6wvZ{!vEPQH>Pl1|)&QYQ(a0{4(f;9K`Bkdy>G}Bat@rk;A?L;-n9Vz zV-jpyX}kCG**eX98cu4oB7iy@bcnP$7_^zx<=>&B?)&0pm(V+9AJ{6^p(n3EU)Tsb z90IitpvNBrJuX6~o??t~VN?!8uawcPs?eStpkF2czn$b?@{oKci6n(MXbjCkOVE?_0OatU~GqCT)ogLZj}{&hy=6@#HE{^JI}#i=;OH}W=oCf9g9UWYg2 zhV`e_!iu+4>#+IQTw?AqZ_4#u#0laXLSo#7wWdtj#ELGa7g>BOuWBs!D zt#TyVC)w-StJu?QV{Hetk=hV#p1PJjC7;DJ-j`S4Hs02XH;b6fO<{C4p6Qv4E5-nG zl2wy$;R|^PpsNdxm83b?NyVnU)(+Xm*j%>%Y@6-b9e>!X*<s-_lG zqLi0x9o-Bav|SVxfubW{XH~O=={Bob$IVJ+b#s<^!HltbS*5I;)@19J70Mm_sI|#T zwSHQ+vDLF)o0h3qtIXnNb0blIswe1M^mY1S?=T+BQQg$63)Vh<3AFqOdH+No zt9$Gp{cia`@(&KI9au2%Ti_poY5qk5wgxs0>J!+`f1iDn)?2aYA+kv9w;BT9?B+0i zzh{d3l53NzqkENSt~Zxn%y?^VR~ zyxL*yy)CbOy6uwYRV`(?@`nA1vF$c=*LKlK#BjwLZe}rynJvv!!yiy)G0U10%xUIe zbBg)iEN;CuE1Ci3M`MH$gtzq9>*)pc>fTS@K05JM_Wtlb_tf$}_cE`ax1~;uXse_M zgPFVyQjtfQt!%I@^q(EHC}eJEmC&uByFyk3T?zaW5a@r*|F6I+fl9z=hrc$GO(n1R z2y2_U(}*>;7!hWO(cBy5$>`bP`PVzpXk#7Zk40u$gMDESF;ZP(nbaulzBbVo3#wF8 zdnh&8b@B*$e+_zLP7wn6I%zI4>_#CY(r92rnz^ki+=g-UsYoK3*jaW;$)Ju?kExlp zvD!whh;~P{X_>T}YJSyUp{yZY2rcv-_pgS$hVz|PPwR>Kr=-2f#`VxJJUPwRTz3F}LEvFaO&v{#Vxo4Ee?Md+-(~FqP ztx#c+U2K9P`RzUJNOZR@?@dE>tAe(hg&RZYopMT3UduxNV=N zYR%O)NC<*}CX8W=Y-(5kQzdMNAA zQdxDAI!&Fcc82T}P@Aa7m5qml+COtyFyFTx#R&XE`L#%SF@&=xy+x&F3{mKNlY&{M73f&(t zE;Dafk+7csA&se%9%W6G^~xJ1R4uHwR(q(!)t-=&M#>Df4YtJ+_)@z?bKwvV_ys_h zonN*(T5dDJEMm>KLih^)gV)9=wjE=tKdh-~uu~e-e`#jch*f7LSs1f1K?SAkJI0xl zbSaI4#hnCepeaV_|6oP;!B`t9()n3Ff*0bc)>-KGG8nVYn2XGA(EDx7j?iiqan#lv zZ=S(;P|_N1ZMI&3UOo7Beic?^ZH%0OumhT7Tpa}erXTDLoeZUSX#@6}%~MLMyVP%L zv{puIq7BpbY0OsM*3wqbR^8UtR@Fvr3*oSjP<4!e*_B6ZDlEnoG@5RO|8!S068m^2 zXpibvvU$>62bl6f$38V)LyF$uJJrZ;b~cZi$)HFVYr1vP`eEhafAGuPAzDC}y%)t` zfB#EP$HoL4uKyNcu$PQrFmw~xYzn+eX*WePcRl<#rIF>f?yuS^W%=j zyaOMEyO;AFfb1r`jm!KO&n&9J;+Y0pZYykx8;A|$g}rf!R7J0v#DbNn$_u41t=4L2Z=hAOs3&0mWLNI9iL5-hJf9Yzrx4FegT2^A1c|$R z4e!TmLIc0RSUuFLVdb*|tqkZ1CD1RLTXEKE>xgy7O0yii4DZcX@pJG8+Q4gi3-7Kt zB8n?vt$zXaYQtmO0_ugaN~{6v?W5iZHjIsD@oYAmf#1Qb1*jLoZqQ}$`*P49z-0-d zStViDroz@c2n(;dC@20S!jv0q73;@}gWvmTPZ~i_!v73L&zOOClp+iCGT=0`V0-F=Cc;gZCYl4Ue zAwS@KpM$q9{rUxvn=zn5cW}85H8;TeRGukcVNiav*7SI-#}aJ z=bNCl7V@QhJ>P>~@QSB$MP!A|SqGNXa9H7gfuk>>&2l5A5C@-WD}1n5@LC8BgU?cf zHlqXRFgltJhdd4ewT7UVwnpEsN-ILQ6^HKu1IHF)C%;j=V>Buz)=;{u=;Broj6c*H`p#l_%1hY-HlIy~$J~jPV*xCkK1LK_IUPwG(_+-0zJ%v89g%^&;87wx!o6ZC z{QFj-x+n!HbqF0=I0>5@eI>KVg?>^C)Qp1^EQKDp?2FjwplJ-eDH&U94j=9h^zAWd zH6EV*9Q=;=MPE7rZb_%C@Zc19J+iW6A1E;vJ-5Cn0?o(q@jyrDoC!Y>AQYkDo#jOb zQ0u+O34iknqCaiv0`$Z;z_c3l!k_FF3sQ;~J8YZB`UEGv6Y zZy+*Mf!gR@&pj-?SSGLyxXJjf0sdO$BB`^D;^wL1Nq$uO*Ky}0aqp?RPQX^@n8@R(LY;ueD@ zGP5aTc5#T%$;{439QVh)4bZb?CPCJl$chCSmxHa2u|Zaeq@yqWKs@4iO~4QQ{_sU2 zli-;rBR(Nx%oG^O8YdYAm$6ZqG0zSEzoakHDPw?fc>6T)WF=x0M-hYg53z(KViG%L zG@NFJhn@|6!5iNG>Mh$cqS!tlp9JTrTJf z8SzcS6`3DM1dqSqTh@EY=)MP6+`ciXlWURyN-y-ZS=PL@C$DtiuWFOyg`iojgLMFpieTM zmGx?=pcDsv1*FMDiyTKB{Zi&>WTr+|2FWUuP#?!-21-VgWQ1SVn1tY0<{iRuHV3v` z=*cqoSRE2s3);OIELz!@yMm&!&S4y)ULL-uSM)p zFJ(`*BlB4@ha&T2`M`BqSt9YT4+-iHy)np#_dMXb4mS8U=!88OKmW$QnH)qE?_b1l z&mby!8e{5N*d|A?9l$ub3-@lsXt@-Wnhfgo_T?hlK-*P@TuCX;2iXYq^%R-C@*v_W zb=+rYSy|Ee%vbd#a}lzd@dieyE0Bg8zWD4teBXz}NZHt>y?1a6$hK~~UNLNIhr^4P}Lu=6`&;oAA-DL8K)S;JYE7-8Vs2_VvuOOOT z9%JtT=7jf|U1`XU&^mN9dRlq(+DKSK6G7Q>kiDwp7a%T+@$MrcenmwlSl91h(R}Bp z`A)uwALKuI1B}W`FyhD@u*^XJfHut|?(ube9xuvEz`y!#t+du!J*=1Je$&ra zI}(sJ*d3f66cZ3e`1m0{q4~p4i;#86`m3&5~FH#8z*!$utKUEH?6`)*Q2y zu@e!!M7^I;1>W^p^RuyDpXF_Z2*DUcvuYxC)6M9BI8FmZB-eOvxr@3+J1;sPxjMN^ zyZ>>;yQ;Xac)siBtqCNLn%yD%7YF7FdKXwBuynv^{}=uP1D^-?4O^e#Ohk`}QsJM1 zZaSVQZ^WPG0q-MsarZiRPmj|Rg_up0C&hKmIn}w*wbWDHXv-_oEoy&zfM1;76~}D* zDa0^7u&?Zfa$kL=?XhLCU$k}5{FU3Jl(=ghG0TDSKdg%ISl1#-A4K0k%9BMDEX?9; zwqmM(+v+%q_!aeQ=J;w0)zVoEwIR2%giN7xSaF5A`ZEP zCS;Wu!f%*4j91>C-ld4cE|o$s9eT)Exj+_~HXT-lxZ z(&nb<$+uIKbn2O6?NSc-zYcvFF+A!>WWfw;Lv9By31}ABIe2i`wTOoqGi6H67!Z*& z_>FB0`KA|ey-ItXw#?~t*U>*4UCd2Jn%8nSa9wnsb9MAqv;Lvsb~P|QBqpqV=#QW} z{=03Tl>w{}Jfr%m2KiZQZ(`f2tRn}lReCK?Dfb?Cy!VljlfM*M;lE|0rAQm#_?V9t zyUBJoU(4kf;y2x|zT<#)nH3?G`39>mUny$Q3d%UO4B~z( z_eotUSQBlw{j?*W!*1KdV#OKbk|(P>*8Rn^&DhVMk^$%|h1n)3=jGOSv!C^eyU2Q_ znJvZs*-^?7ZX2llO|tMMh)QI(@{8Vdf^te-t`<=8)3zv)SY%n2O-v) zk29O;-93%n!`<~fO}*iIaebw?zUQNBlk-FR{`8#AWzLsQn=6+~b0s>Xoomxtr6#95 zP5s~;Y523|{&&JRM$e0R89g$hame6+gMRP*dk0?(56hT8W^l~Uj2FTu2OLm_T9@5R z(qdEBr@eRW)(2SYIN=kG2#@NVp0+N%rl)|_k>zw04~hw075X;#Ou$!rW%W38v(L=~{XAk>4;jc{6yk=v~d{Jc;aP6%d!oLYIk9&ds-oV+|Jr>1buJ*4Or* zR!RLz7m9sWcf@Djnn(Fa#2qInGnD15GQA-F;;j((ND^ylUS*K_TuoMjnMPh(7mZlG zowuI%ySJkrr+@Qq_XN0yIMwu4X{*zAr@Nh7UFThKu71wg^zW%9Q*tGDOumsa(`hp` z#SF+2*(Ih>=2FpC7!T;}xMNTC*TQyYtdaFX_Qb4HqaTGHu@@!n^pnngY2DKkT&MK1 zJO$%x5Wnkf?7W?loV+h>s+W>vTce;Ip)W(r1mE=QqZza!tdO7NBAclFXMc~l-*#JF z#S#z9R)~aBVdpQ+f6bj%S5c9cSGK5z8mV??9Z7j!)g;Dr-O!!JG4rQc-dw6@ z^UQZ1Oyj8y(q^Uq?Y!dp1S52Lda2a8Nl$+T{Th~7FSWAwJ2~#xIK$UWl{1%*o*7ob zzqgi6{c77Als9T0FVvF@2#sm(iR*gcp#(8swRrwlJwo z;+bT{y^W8xy$?zbOAV_Oyxft_>WOPsN8X5JP&(VrIC}flah%rLv998&8E*I)|CsH> zB6dX^Ywu^ztKFqfcz3gn(ZXEMYtW2{Gf%cP)E=@HqynF0UPacYi}jvYKosph?8?oM zoRda5-!%+gHp#B{j|5C-C}BITbB7`hO+*Rl%8ZieQ4hmHZjZC zJbm&-=jxitKiFdFdS0iQS}T2<`-qW4yobl~+*Ee)eAW7200rAbEKXtG?BZ?-uKkFJ6rd;%)1J5v`jZ-4m+6H5C3#1kpLD zf4C-E^8_=S*~eUB{m0*ml4PU!Wi>VOdu-0jsoPQ}r_@Sql6Ez1Y+9AnlS$ta_Wua} z5&LU*dL3(;%`dE9^r4v18P9}Vwg1P4(qZbEfT@vHv!~~&oTpK?^oSI{<7A1x&Q-&e z-SbXgWEBu4L|?0h*PYfiDK4Qv!uX^p*GsFub}67qNc)h6fqU$63PmKRh|<>9$!|o! z%z){Bx|WqS7qgKQDrFt#qsU^`Pvz=DRaO2Y_jn#_tvT14Ckin?ExUc5y@>6DQim=^ zPQ%L`hy*WTCdHvQG0fzKrj(Hk+G`g2p05$8Eeda@c7I zTOTnWdW*O}q}NZIoO&fCcgm3D^@#%$R{eUPWK0jTX`#YvgcuKvmevuT0eP(gMyhLM>h{E;3EdM0C!I}K zjMXGuYwRfN_s%|1>!BQAx0P|A%$eo^^Q<*Q^q_MTKdqzoms(UAMO%=cqBZG5&$7YD;MrINNwnS?gOJfWrXMmM zfL8OZbkk;9x=r8e?c!bL?XBl9z8TYy7ujz9FqY`0y%*e0XWR5wsqWjZ}^&!X#W^3AL=lc*g6Fd&3Gc4p0iqx%rVo#;vGk6SKi$!#`lQl zfMO6ywN88Qq_)?HijG&<4F@`y4@8}Eu>>;$&hS; zvmCRv_sXBj814gWfa#pc{js4dav&^vZCMIH1Zh_y>Xr_ z?q$yY={?Zj7o`kM&YM&@sY7!0v^Jgx*1v4N<6z*fpe}*E{U+FEXccT1{0@dZj(ikT zFzfQn-7`)Is}Ydf=4SiRmlo3~iuzA}OMmWC(t?v8BsNMKnBs8e_xf2|#W&KQ=0*m% z2dPebD;sUa{4)fN2(A=7An?22J^NPM2HR8naKCH*MFUj-#r7F$91Rd{tzKp)GstpV z&-qNm`u;-yIgG64814_B)=SE;N2oIRNVkxIq7ENz)kUPe6#Ri#d@cWG?KejlmGl|j zQr^1AIi`3k=|x~0yFE$nb*}QxEooI#gHlE%O-cNb*gqxOm1uk=Q*6xx)&)BKr`Qvf zS**CS()KhkCBuVE9kNV`*&5kDq?+GzRZ(gvx76;);3ukSG?r8Sv2$baToKB8S{x`8>Vr=sDwCmpWVxM*) z;9JoCz>810%&Q{{1^;WON>`eVomQ@EIc(F_mGqHy)ceud zC+$sYN?MR>py!2tz+8mS6abYqhe?FCft6|I8kvMj^X3&elKZQpBN{HCddQ-e$NRSk3;ga-O9rr?s25RoZ-( z!e{GqUC+|)q;5>>?5yv8;Ysoy(l6*U^i%qDe-Yz$qSPr5_=~) zlVhA+^!{Rt`pItEE831I|FBr)we4uohzNJ4CRx_Ue2?51vd;gK{iv;)J+q^tBf^%K z<>8aO!<@%cSEsZ{-J4d|X6!Xs;!G{tR{$&S!-neH}Yr5p@0v5?fhp2><|1IP|o3} zoV0FwBAm&oTQOD_N$;F~B<&Hr5_{@_)Ya*u-SPSoD~i-$L8z3ts{LilXD{YB?6)^y zX;7o!-obwcod}rechP>{R@P>-9oN#-ii(|95~rX+)|!2+QM`)SiFo%+ROihWp{Ojo zMrXmUR9OHmLi&nZ7)N{aPCSzT1qsby<}q^UUA=QWF85J)SNBQRC}*+sS*b-+9wk3X zZQ}gbO{`GrQRZkz6_Y)nUFbITo8OUF&O4SmsyY_iC)!?P)LBTnhotYg-3YZf$7q`5{9fiF4Uwaa-Zy>^wQ~loRRLC?&;oAW>uaO zk?*5qAVz^Yid#G9cQoiy==boXa6QZn-4og{tV-C$umNEiL-qy~v5!{%ASqS@^M>Bf zTh-0eo2TtbxtY8-`D9A@wBhN6UAx?Qyc6}8#uKZ%xInhBiRw68fMcNFNB>&^JpvB| zjtxADxJ4DeFh_!IfmT6vvWc)`XY=Y-C-by1*Vtj`MrE^yx!L?|wu4vP1$}HiqJ#?K zi&+rw{sb*E7geYYc@OIkv#OC>U+(?k8S3fkndF(_>F$nmZ}f(k%lK79ly-{|$c~N` zKj}klf!~|J^q>ksHv%sBR|u#P*f2O(XjEvs;J5w@?C;bWY&PvpBWVc9%`X|%yhYt* zU4OeSxo3JbZ!Yf>PY+K;Paf|^y`*`>a`AhjJffk|)Sw2-q2|+0YuB}cTFm zr_^}WU#+PuKn>?qj0vr1K^jUvpqBJIGEM7H9h-#Qe{pdMTBIR#M$8IZUcokYTr;#)<`>|W>xnnBY|-6 zA#x{;5X~)yIQJdV9<)4;?H$i8x*|W*9<|AzkQaZ(P2L!pus@N5oQTX#b6i`Bhs#riiBYLu7R&s-a&{ot8!2#B8>Ot%WA)$||zb ztQ5<|4EhsM))BN4GO>q{8ybSQWd*6n_{$TP`V*X2NoA;@kXhyK2TsP|J;*nad?75_k$#XUsoryvJZ3OODJ zBI0Ri;UuU(9J#tG;C>#|oaV=G5ma?&2h~c8rjWL!faWDK1R5grQOE|BLmp%{Dwyw} z#ZU0NpP=&hC2$TvRIdQ)GBP4>7>@7k$d<>V?l&I|LB;Y*V+6$ciPSOTOIlT z%&1HLj{3}f&{|XROnuZj=Rroqi+sy-ptBDfByKO%o3FyF-njB?=a z63C3T1{Y=^JG2@V7F&=TIf@bIJSyxjBQLTQ`4c(cZve7NvB=*QN8CcrXGup!My}Fu z4R5%M{OBcL_Dfb!+`>D4pojQ@2KkUVkaLK8Aa5+^3~qwtOh(VSfohH&h!dX#XBHt- zGadOTnR6e8p4kU)k~5*}V=s@qj;vpoGX>>LKsn#-cW!42q84kB*^zl2S*d;!9KMBY z&RJw}4x;T%IQJK@*@L>}_4v2Jhuu89e;ueb6MaX{9-9Qb;xHqnBW$D^$RFiJ&&=fG zo6LjC*;=w;=l#me+eFZ066~~PfOHG?4ah?628B)`M|Bvz<0!D$ ziK>SKzRL53;FO#n8xMNP%=m2ZZY++bpgo!I8;iEa;7xMAt(@`O4^YYZ#+`wqoK4;Y z**!UvyE^iF6~NQVI9m&uv)bs4M*G_%G*G%DVpHfTynynsK-;=axcr_k%2cxk`nhn7nBYYAiXA@7seTz|BwHjJ+1z?sL&6Rl1 zI-FbSYiSF8xZuv`>ukUT>!^O(5emIaRBh&#v9j>FJdHg(KqPv%kZeLlb#q0zKSE@)_#lu zRnRw|fU3uYocmscZa}Uj1XbY`NPlDs&(rN9KiX|ZW!3(2!0Hl5=yp^fFT`j+234qs z#TC?aT>`vyfa3r}CN{DMB%Cst#YR0m>OFWD?$*OFWimFSg!(6N=pR%rDKB$_`J(R4bh75||9 z5dkX7C$NRIEM^A{5Ia#x|C*H{^Nc+TY9vV^<$^UGeSRF<&pNUr$W%s=zj#4%T}-r^ zA)h)3IpOg%PE^LZQjpwo;(}QP_7iF$%*o`ZdXQg(pIn4oQ!*jXdzJj;tJogcA8W)K zSj;LZYzP zV_YnR`lnvx2ro$v(i^NHC>bv(TS=#jE|x~)lw;(HNH8_hLd{Gr@<{sLSitU!!hEGN z+$zcgNo)2~RJX3NGRTX*VHNo(mT0v!|yOeBQ2 zW)H+2+S&RJowJo5BnzxSO|_1i<+KeXOswGl@t$fkk&kq>#?l5#U+W9;6D@cXr7N?; z_c3S^n}I6OAZshdKu50e5MG4tW2)F_&R0glraefHScS*}MYVnqculOdRgk2zcifLu zh27ECcG>FgZm9OMuQ6MDi?T?y6)DT>nHylw3^wwp?NJGRULDUi8SBLrK2g=!Ld-+3i8i{D#C9+k#<$sr1nXFLf!*<}pS{T-`l7t?JmX8$NvNHf#U zDESHw4>0p<@w6*h43ADX-|-~dXY$b4Po{GREyf0mOQuboElOE^QDNGSzoPJ8&19v& z+JTzhiAF8D#(qky&`XMTw2^ujHM;Re4R%JGguMD&VZ$8Bx5N!A{1@HNClDv=Zd`zd zJOrO-CtU!Iw4Cy9X4e~$Wwv*NN@RE2hd1zXdaO>Xi*im6mK_kyRh#T5F8gwhFS3a<};hA^)YW+u@z zr3Ojj2WdTXt%$U5=5vgN^riU7PSIl4NK%Tqt?YcFwuETbpJWfMua~iP&2O+)7ug1)Zq;XH=u@#3ks&`Q|{(*(s~uC1b1$bf(Bc zchX9v6JiBf*;!hR)+dMgPdWy%zPz*rtH7I!3K&IWF>j}c`=hZ$-OL=UGg)WMUO+7k3eyd(R{*QmS5zhVl>ZY~qY*+yj^q2^ZDg=5JV_LxlN2h5`E zPjZIMC37$@pbfd8bc4k{#mdB|vnwpWQbz1BE6|egHYd_il(USeAAN}aT!-Z{UYmoo zB;_mSkR3!tc?0ee;YLH2T`gh6vy!|XEZj%111}nirjr|H3(O>`_t9Hb5^17h@6)+BP6^(4<}qJD}uRw_~tDtQCw zJ>)0Hp`T4vPZ&;bGh0(Fzj$e#H^a3%7$+{G zNb)TDm>g5qAl7q&cP5v#GD>@K+Pr5RLBw?cnJQw8KNX#s#GkqNbn7c;>H)+`P^Tuw zkr?&`cF_}-o8L4d*m(Og8DHYH#AxM|mTJE7D#{Z2iQiKCS=T^Aw^>Rt^MR(f5tBz@09StK-N8~P5hsf}8g{hYbba2v(dR{l*<5kJ$~MYo`uEXckARZ{uQ zvSgs+G|9si7R~bzPD|S!(XF0n;vqM+8!Ru6Cryne2~e*am7x9n#V)lF^g~0@ zR)2;0Wd~_Ha^4KaY{Q=FMlBB22fxhq=yN}nuH*}5k-3FUGu1v;n7C=oBP*4!^tH0U zi03nWGg-f|ePXIL5OWT9viC|2z&BMqrQKL8HN+&okGxg{?*{4kNV~99Bi_W^U1bi& z@^*-n_oT1jRky*s*u3mE9|S9DhWeHi*01utYH9Sfm8g4;Q|s_p@r&kUtF5!N5k`&` z$}q(%+S1j=T;9PpMHHh~;4lA&ip3+C2SCVIYdQ76Pd-N(%Yj^XYx)NI_W*f|>{4}7 zTge8=Fs(A88XGHegHC%eo35F;TeM}1@J5bE=rxv6X{f}C1y(o6)M;f2X-ETU0kIPE zDejQr>M637&xbc0sg#Fr+*Q1{hVUL#Q^wOk-iD0jH|beek6UPF@y04l(olDOo*l)w z`34!X>8Pn+2hVdh^5_TXPx24DOP7jGm>su>sp>hAXf70)NDg|7{s9~QyojK^SP9Zv z)P&X7OnkTg6IqlD$|j8JYv?O@+|}T%CcqPs>#CK5b>4?^_(|ts%j_3j;lGApuFpF$ z8Fh)1Nd;K&vr&;e0CQvGVcogKJXrqGXlFcpwmg7r3Au=>_b!MMJcn**PdCv^&=QNW zWu>i9p|=~J$pUzw!SGjBA-XaOm4CsASXPC9^A59;%8@0AWH`wVtUmAylD1xygEiQS zHp1NP&G4AkAqL`s^(Xz&e|$c+TrsH%=#i6_2iLX%XC3j50E`T^aDNWWEN%suIw2CX z0Y1-pVgRQs@Ca;(^lXQRz7*}pBA!1$#7L%!zD88tlSs zh~cR4xg${FDpvu#il}7*s#Z$ETda&I#6`R%Nlb(n)C4~JC)mWz;Cr?Pm2<%BsEO!9 zJ=p28T3SA_1MkcWuRb?Cia0Hnde5c%)*a z%!#{YZE`kTD~PkQ!Z|0}DFj^eAsUk(5E5L;kMnXRr#y(FFu+q5vC0^{#|112p3MyS zby%0OYCIFD6a`r2Tyq8gYXIx=@aOXb-T=_92BNnOaD{?CjR9qO;F}+IX)aK)E$)@8 zMU?;#n!zKCL>n1#M>}9r5E4=s=X)Yz&;mF#!X3@Pu{MBB&g*CaKei)qvEeOp-HQ^y zM9u)ni0=Y8L&yvGs~K@V7@lM*A}N`0Bv(L`YrV)SYPl+)tQ>a2zx`d`E30UeaHm{V zL{t>jdD4p=G(!r~D2;TOnvit{o}shBNwTD(k{yfKP5vNS=wnw_Ir{2kw(= z{lJq&JMvk%#;UBl{=J%!T+>n@F8&3O=!lGR_+8&2ZE~GPxdM(0|G&V>krnRAh-5y) zy>bN`xd!5Eoa2z#1mIxcjEwkw0lvwI!+pi>^hH1AIxK0x?G>K=g)^Ug?aBK31e{j@ zjT4#3pJ-FogUh(#FUYzR|K&QA$gSg?9XyxkQ*bQTq>=0E$m(dhx{ZWet}H3n7jl3K zexScxlPJiCsexyIuVWvMK`!KKhH^zB zc_te25&?>4$F=;Rqg-)Du78sc?aMV0^TFUvFmRG< zM@uft`1J2Jgn!eJ<1KP+BZ;$|vmy1Jl&s%Wk#w^GZ_7s~S;sHuB}j?Y@jkhJ=Qli^ z;KNz2Y@3W{W$fMtIAn#sTpLNslbjtP*FTc8GK!(!<${itG27D69i?$p2zn$I(jwQ# zlq(F&*%fk)$n4k(;>i;DC-XBod{4_&$9~TylIyF7xZHf>wcLl*jmNKBPoTqm%9F zwg~pcxTTKwZ_ftkYmJNKl*O%eYIgfsD^fo$mTQk-^Od7FEY0oorm%^&8tl9Hn>Hh< z@NRag^Vq-QAW3E|lwI!g4nmYAQ! zLzV#BW212JBK#g{O)mjcR&{D8M_YsSpMPKW{c4CHPURsjnB#+57 zWTZQ=qR3h;L-vi(n)n`#S=X)6=cZte;1uYYZ(TMXRHh+ISo%%mIz zA7-(bW~GCh7=th;u>$P}+iExYTU^5mI@eJRrTIpLM#yblhOdwhG3bKy99D|5$ORU$j`I~=$c#U0_Wk&99nC|e!+FdZKEC^mw=Kt{C? zW;Qm1XOc!66B(h_k@MR~YGZw^yz~oMitPO+w6gKIp+e;hjVyy1xq} zP&A}!1$sJ#CfE;ot0kuK^SqsCkI|*1sE1thOyu4PXo%3I8X-O6^N&cUU0Qg7ma zJ+@d`qO?<1ve8%#V5BlmiACjF8X&EQD!u>X=q#YC#?~ktPp(UE1$Qm3#fn>TcLoM` zcXuo9?t@!_!QEwWcPUnHuf&tL`xc8???JheGceRpj-uD#xlll_}~9Xa!@j zx4%Z8yfnF&KFOTo2CyHwpL~Ka&q~h^k$`}PKo^3D|dc zqK|&m_^MCTN&UWd6)MSeeXnsDk;Gg0&(e*Y@WvhlKWRIjre=83SAdLm4xOoHpa$JX zE&qn%PzuVgr4Qf`eGBq_GV5nXc*gJa3F#d(hQNlcXI#r#6YL@d)mrd`6d^uPcCMK) z#a7W-$Xzfj(U%x8IPzqqH!643yc`Kp?;{R{{q0%mEMt$ieij;Y7Pg?#s<2Ky+JhQ!>ean1f!yowidW*ZKI!f8sSO;3t zg%<2ast&;#Ikj-5s&Z4FC9M#pP$)Djlp1O)CCaT-r&d(ouDgxp_~f7t>KnF<~TC}B%+~oWojs*=}CA-iWqM-hc-;zqHL1aO37l3cqY^)lv_M5#)Lixvay3M`XRs&3J;zhoQ{L@z z?y)Vl@|JsiC$0}u7pLD?RGXKm>53??!TanL2Z%+)C1RvhTpp}6QE!7Slc-wt>P9o1 zDhcFCkf8?BSLte?Nvs3UB!(Txw5E4cJIJlrY1+f@cUh=D#20x_97!TBw$y{4mu^S9%YW2CdC zE5`lQQz~p;*eq`ucSpxvtJ@lGU1n*=w_pmBL8F{rQ?)7`s zJPPj9Of6mer2VZIH|CpV$Uyk3 zt4HNTsg5*Oyc3EJ{R(~#9t!=GGL)v;3#}T|VbzT8=*+ICf^;6XJU;UemS@{R*_#Jx zatpR9Jr}26g89Q}jausl=vLQ)?c-BvwU=5Ab@##0YH!o`XiK$h&d+Pzr{@EqtRpnp zt)y|{=Fra2)nM1qbUB9}ZBS$Ek)TO{tt1*hVQc7JmX zamCxGSejXiSQ7cs>~7}KS?z;zOa37xiUii}K`~X_D0yY4GEPZSH|S6Ga(L=* z;B@~&&ZBGM9#m$xvemdHTnlbH{&#O?47~*%kypr2Dx2{}VdJ4b3aqSo*vBjAyYWOQlThdhQnf>MW@;G&gJ{L;6 z+C&w2ZDz4e_-R5N%Tj9rd!mDLe}LWkrSpXIptG{Gq+^Tiu4Od;o!!XJVxCdq=o_p9 z)n+dAvM#NH+EGislA#*G4S_cSD%4lJ zCXJKV$&}Jr9jezxY`mUmOje|dGbgyg@M>HITzS8Jy|bVDihG)Cu~T=c_Q0T0M1?;!^S|Bb9mziSII3DXHF2yJ)ex9sHL5=;cR% zSTuvGO@EnD(@nYKFS-GX0Pi1&`GOP|gMCeKtl zD?`BjTcoC{54A`m7J92Ta5roRaj6d&T6x%i(dT)~@a#X#UHS-=d9joOD#J{&3nHfm zh(^vKYn%@4-w~s!A;KvTrYovlovhfEvhq)HQD{o=T)-K4=pPn{2wf5PNb}{dPvf3%IM3AWrt zW(sqMNoVFWw`rX^M4hA_fh=_qwAJ=dvJHe5u{hZqnuX!e5n2$5(8g13JN!hC6`OKe z;>AV5W`T|V`Tm%|;$UvEI8N@3@+@Vo+5oh_FkFSN#9V4I^B=cLSZ%3j9c8=XkX*|> z`#s&=O z`39QQ1Z4O}p!jQoKK5#`&Q@qo)Y9r%C0V|V{i<5%CB923zdviNe^D@1xDO?DM_#@T0Emk3GRJ$4r( zQX#zC72}H@((-Gk)V%6ZrH4`pG0qG516=4Ml%L8Nb%pk~u?MyC7sMaj7YEajl|iE_ zje9eRJ;c_5`gsD~4|@Cr`Zc|eHbB*CM~y>N(v{qRdPF!J|7TIxzl7(kHayr4Em|$9 zRF-E;YegwIFp$r$WS-0v{Ii2g#D-ESX|c3i4p+4pkyR`ef^QGNmb6aBgH|#j(2^B_ehhO~+Scw0qh16(uk8(-wE5||o zoLinRM=M=GrtG1WM7B~HinCMj0Jnmt{tGLy8`#Ng8rzV20rhhj(;QCT5lnw3mRUxp zf%!WQ>!%aBAJvs;!+rR#D{Pas%mfs8sN(zi(D0_|+cabbk+Z zdJA#Bcu_nqHB|5FlZ?vf_TM$vQrFqD{03o|rIIbexztm|H_Z3dd&+ad6X&7fMy+Gd zWi22a;C8Y;CX;MPl!e-d)@5~r+81=qcd}h(WLi#>dda#RkRz4e>KeTXa+Yq$hqvRT z?9EQ#)^O?U9<~_wlDotGV)L@km^bLB`Pc~D_sw)F^#{7$m(Wz+LCl5>Yh8USZ8kgP|GlW3img-o_=BsI&eMo zO7Q+ofyZRD+*=wV-IjJq^Cg?STiz=-RVHY6jfbdgY$u9RGw8QWYc3yujynwsT9BK~ zCvz9sl59t|H9L~M%&unpFi!d$NJBfQRmkz)qN1NYnduvHxQ9kHBM9o`e_&01fg}I5 z)I`h-z79V>o^s)60~grthw zCBL))uJ;n~fUTF?DeJW7aOYh$dlBKt`JOY0Y!&Vy>tc7Y_4!@=YOV^K#*}2ca&UCw zYE@_ZGh>*tU{L0xx`1+(i0t_%>YA6$I>ue?Bbb^Q>T>MaKSGm&Y5sFrk(oQwI;Ip$ ze)s3(uiL-sr1cels~6=Dvr5VO+vJktH^H^DDlB=a>x+l^%KH_84o?LBm{mijEV!ZF8cfG5d zW2$uzM>4I+`KF+j2)zh;rG`owX;Yx0|DFF$@NtNj+R25K^T><)!XfoXUu9mTrg5xg zyyd8+H=O2Igelgkwis(uA;gv8Dzj&)%21Ax^l)Ut6;UnTj#Fhn^lZ7wZK$usP?vB% zeIg!{`>B#p6-nr(g=5D(WlS}S>#^z)dAw95B>8t{wa&Vbu`6v+%CDq7zlZ-g_3h%< zuTp~dO|ny_AV(9WZ-@7}#(Z^BmjIz|47O3TqZ=e``XBi4AUx$8S$ z*>YLy2{#!J`CYHC9unUMh6g3FtGq|N>fe|7J?leoz0^(Vpwe0~ZK75Up0_p7+ijt$ za{VlZg|qbKvpBnDsja&Gl640^jlBh@LT-8()sMbLvs6vQK84U_{hO=~&EO`e{YRiL z%ECpF4|i5Whx9jkp$Ci%{jlyZcq3Umr=&{*#c9Djf#?3*{$iQYX+=|>C7t~}^5?nl zm%oikjQicrzg9~y#;OUS`$`E~wT^d9aJF_e^cMET`xg2VBDoxGaxBi_i+UQ7>Wy|g zods+It*3<#Ob22u+?7q`i=pAck)grTV)0yHc;#rSNWy%P=|ts+eeRsM)M?b zkrbI&p|!QPHHWZ;i{aZ@3pl*?MZ#S6I771snHTg=x;-<9o(cu#baFN|1I~tgba%Q0 zSOsBlzsw@{K#jf`b=N4w=)(~mTj5?hXMEF3Xg!ot@&Ku*s07~zw)(GTE>8cMIwZAa za?YevzuW$5__KfF+8=FG1bHQx%pbKM3ic&Vb$muXTE=@e>@VL3-y+|-h~W`cBHdB` z$gqeT?l1Of)~}X6)*?bzI>MZ%RZ`!IvB777RiSUwZ5_B}Iz?nC)eP0T{Y z^aI%toWDN!mX(QHaIU<9_SJ%%sthV3br3_pHwK`_cN5MsN?WWj@(`TRzeO_i+`l8M zepZ>x?dj2}CsNqddnr$oPX6lggZ%kdQXcUm)qrbAO$9O5MsKpu_O0~AdvAEgdzyNV zdGd$4pbr*8Bwf^BJV@D0+O7NR_ zL|)W!)H~0k$FVnCi$IpcJ~SBWo+rjz^8wM0Ooi^RG!c*bS;%-~jzYye%KW9Ph#<-- zqm`pdSLKehJG3uYDwr#HJw zGje3s&&ZShBjrQ#rPO2oDp18V=K7MAwZZxia;zoO)!tLcJ;-t0zQobd{mHjI{9gE8 zUv=LvpD(Pnd#3F@Uz#t>cjUHEw+y5nq4R-aK~4(DhvlW>`oQ!6AIg+2sHNZy+W?=< z4-=jUst8+7NU)x^2b^zQ`dC!mT}8>LrcAuL!u*rJxXe6{sB`0)za!vI=CenaeWfrp2UM(_^y=$_33@R3mDy z5w34FNP32Kv%6MUmgl=`xwC+)bXfby;yKfDe2gp@F(_=Br-5sNeYJIpkeffu^dhGi zzm(fzTyRlvlvr2Jsf>^o;5_dbYA?6Ys+exo!GOE~{(0S=j zTfvvD$@GM~t^$3JPNSzF=go(TMp^WVR>J$X5Yg&K@M7IsU1SD-D`UW?Y6=JcD`}zB zLV76uC!Lf!NRvfUd>t$tT<>3&m74iIL&!Li)+wWq|7IvCzft>{!$}H0xSdP^%MnLi z?-yU2@GB8%5p5!;MLMFC97#FKsIQuSJJKJ{aTgxD!1^*XUk)2A{ zBoCV7jq>^l)X2io&>aRAb0WyuIhB3NaCM>vCIOsnwXvgD1kd^pRS53ff$-;LAo4E^ z9`kn4i?7jBs8R3{=*TZS!Pz)m_rgQ`3moaGYDcxWDj?7JOKq-}Ri7y*6dHe$Qyqi+ zDMqf08f&G{`@krFk*tH66*G5aTKpA*x5RMeu=W=*g1*A4>~XH0C6}X~r;o1&hL9eQ z2#=T&UM^x+Bo&oAa)xiTXN5Du{=<^Rxfl@5&{ND%lBIIE+M@y+{1Jh5!MS3%yhrxI zBVJAOf;BRkY)PL$b#FUckL$;u6AoFTtkG5(OsloLmAjAL!X}D=XQC6h9rx8HN@>I& zW26M>DSV-;veFBI}^j z`qt=)p3qP|5j~Nzm=!Qo+Xfo`0PO3tQ0ME4|Mmhog45698s*X^;=Z((k4j%ehxj9C z4Tc5E`Nw3H%G#V&G;kkT{|$MDI?Ujx?%YsehS17V(dKm?@vz}rBFP+Aqs~O^_O8Xg>a^wROiXZ#K)l#Atv-VaLRwipC{NtY%Dv~3K|JoL?fIQ zsf3IE$t+=az^Pi0yABe1SAHU|A%2*E_~N-TKPsGAV8EqF9+_7D zRqG=o*ltvXE2K3!2o$+mm^82z%#JMXI6qjZD-7V9a@|2@P?%jz8XN)J$qwl4o&wjs z1nQ0F!TJ_)#;=BN`W&uMZGDn{Ro|vBLM>FmpOw;h5Z+_7)#^gUC*MWI_(^b5utG30 z_#p7X|1|5btb{B}U`w!o6sfGx9-2$(2s}X*_}~0POQwB?dyKDZWT&Xu$acOh-cepE zta8{*&q-Gq$8&29e$#>PSp}IIKLDAHdyR zR9*`9&ta`AuJr)3I5^K;KuT{k;*aomxn#C4+m_A6#xt*J3%H3PXeMPa z*GXeN==WW;lA53e)otoVbw8v~O|^d7DJ=lv{be9bm*TEmQe{*M)+;m7hglE5{@{=^ zbPmx=)xbMH6IdUZ0P+GW4VU9pzY(GmxGj7KK7mgXPT6X>%7yKSs2O!QazS`mpWRmi zzsGt9xGOq)+FM!Y@avd|;1W#HtjYzD+y+Y%#AU%p{s=JKP6Yad`hhe!8#T#>S|go7 zpE(wU`%27cc0V_o5An@~6~bG72G^UV*beL|#zQ}c)8@K9LmP{Z!z9#aYsfBXwHPK< zMeTQn5~EJjLg0FYOw2!mb7K%_@-5l#Tt#7?&`8MO$Mc1-&#z#!m*P@CzLpqpniwC|ghqoGR^* za?0c66LK3|@mru8{x)Rz9a^FS@_=5;B(a70Am2q8D2x)y2m|@W=$kZVH!|bUA#6#G zK)v*X@j)-9$7_YPdtkOKz;#-IbF8>p3M8giYEb=(Ztf(t9eR-qaMn!&+a*b^Cf||b zr5IeRWnkD92idW&Xp!zoZ)C69MB8YbBNwqt`F(tV|0WE#{$p?Fy5Q{;zBs}gQ7rtd zFNd#Nn8mxuUD;`I#M%2>6M2FCMqW22Vp>T{sfct+JRCX_922) zQ+A>2`M{Wt+=L=KQzD(fHsG6p)7aaxLuk#91@nf?L?Y)BFN1jXi@- zR5{$l?j{f_PPw#S+pwsSfUWTlI-H&O8mb&ro!K%JIE~XloUFn)8b%x6c#) z!gtlT+?Ny<@^e4i&j`j!Z4;hhJH%6CwiHq zKrAY*WhvE_aPX<}fr&C&5tU}@IQ0ws8EuS_h>mKI{r}%paIuHknp`4Rg|CDiwIDC> z8@X-Rp_b8$sMX|YXxmPkJTi=-`oGwrPvJU+t0JncPMo8D<)kt}si-*dYn<{}i3D=u zx-tdtnv8iR3!z8|Ns^Q*<;Tp747n=$rfI6Ig~QXhhWUy}<`e3_VU}aoi}seTgN(Q|_;H32=v{h{PwGgS2|1d0VZ z1*E{q;JVOPaje`P_rcV2qgJC65!7F_mwnB-g&o2nVVkgBXd`UruVOckV8fZm)F%+P z8xb8r57cowZ^M4xSZ$?FSI>aj#i5E)T>qjUL?s3DKhdSiiz@gmCWEPfIB7Y1l6}r9 zI7_{3FJ=o)PXm)*ra^N##C(UCK*PzFrj69z;1&NUN0ik{PbCj($v*VZm)$T%Ta8P}y8ftY=Y#jO)Oc8p8%6lDD01JY;uoV+=7BIiiPnd?87CXUJ zjApMg^_cUZW!9noB2S{$vfZo%x=l@J>++%(qT;F-S5xpTQOZB^Hu(x--yQM-Ov0%y zKSNG34s&qcOTQ$CTw6Yg?&}7eA<^1c{hbj_gh+vz$HsA9K0=7I9I#fj`;m7EM*-A+kFeX!( zoxm<+hp;(Ox2nP%fbXd`$TQ!d2>gv_Du}9LxUmk}mUyiI*lO)m6&g?v zDb3Nrbtyhn!4>olL^(|9udG*gD|?g&N+GaJEa)|#FpHCZDuOx7RAo^TO7ELCkK z9IPwW9plO6jrHF3Ja9j7opc_?G)#ORsvUY4 z?2o$ojljD=qhO&>KXgK@U`$y!$~oq4Ju~x+*$LJ8Iio*&?j_gfe_5wjJFL8q5;L0!>lw$bP9Ao~uIQ_t5N6 z^-wOX!4093A}vM94MF_+s+9-VstHk%>`hIl=P`ZQ#-KH4@RfyjLKVT!PX%4;4;#zQ zWfJJiIIm1}4>v-or0X5@=TKl-w5Qmk6io1{p^ewRYfa%Fs$|?jHRy-g2Xh6wLB;Bz zdQ&^8SCj#Vxu1Ff9(Hl+2|C_3@)&w3c4$mHo1ZbkhBj6qHlK(!cpY))G{g$ku@d*9 zIu@(sKo|56`h_i&dFbiBQSLxJQV*-Xy*|_UW40mBQzL1G-pfp58*w6+Q#fI%Z@Xqs zcjR{^xDL7sy8d#`bo^@g!^1+wibxFzs)5^2KsB`l<&9~yAgTZ6&D~% z`xtryt-~vEgEUees_e#IF5}!o_Dc*PYf&zmfx_iKoEzi0wcH}E61RnQuq#j*8;zMS zrQovvhQ8!3lQ2gbjIk92xpJ6{Qwlqb2WpGIK?dpmY=^CdtXnNkOD$B6cClG>Q|cAwM2$9D>x(tFHcib?hAXnXLM|X5M3i_$ z3Q5z@&k#T^{h%G!I~p0rcKCkI5wYYe@+h>398;BPi(gK}&~C85M^m||U1Vp>Qn`;x znjdpyN}^xU4O0idfUuMS1y@m{4Ady|q3rt&U37oUm3a?+Y8{XWzQKi99^HW9a9EW^ zUG*wFRpU@I&vp`7;W)YsEoCHBX%FzP^TXeJ5P2^YIeH9~BsT1XLr^uSjHi~>eAvH6 zBCGkK-NJ;Yr&?jXmOf4Yq5li*_*iIg$6*sjdRxX=I$G*k zvV@7qHAe9^eiioJs^|?*p=VNk$!^e-UN`DMJyTqNsO?4OUE+Plr1mFFr47=(9|A>>uf&i#v?$w_#4X3_n1i{gJjkj z45igL<(#OUw>Bfpub6Xl8oxtWbI+g)UWwjNHeDtLE|L(GEn~pNSP0dBKPY(HL%F&a zS92vMu8hJ@pMv_d4(7L}gKBgh9iufye{>wGfCDrLcc46UKzT7KZa;j_zYHD<#X0!t zWXzW`jXY*?^mQJfR}&AW&vOt}vgePkhpM(RnHS3Y(jWvjpoe1CSRr(Rqv;fS46~D2 z52ovRrWv!Jt_^L%9cmG3nYpREWMg>n5}~BFLJ>P1tfrT^e_2LJ7*lRy29gUs?M%Hc zK4ZF(!<=D`$DEjA=0Ip>w_r9z2CiQAbmM$*+WZF7DSHxew#Q^Wu1-DZh%aMCP$Qi1 zDWElFQ?4?wha2Dn>Bdt0gbh1Sv;RAJE@rFcL5gs}=|DlFe-c&PRanJeQKk9@pXO1# z&Mf>(5m&FX*$MQaiRL#<#u1_AU4OR4ffD6MKWjZE_Gsi8oGDX4AhJOL{}Zv01v8vJK$qSh+VFj#cYFqk zCkl$*%edODQK#Jog^Z6l17FoQuz!+`si?T;Bi%L{4jce>fwVDa}-!5!=R#wLLcieI1@=mwomUL@Hy(D`?kuw zLJWg*<%v-WHO#B<`4l5qsvda?O7D?SSARn{;|TEwHPz4N1fmJGg1Bg$HU1$df>W!R z(TMJ8)bC`t0Iq`h5lj4pemoAk?Mwgnk-Ue-em;4Nh$O$F>lZ;hH-CU9GaKKk6FD1- zCLX)sS4?B94>kTwy!VU$(`HuVd`*Ue{1bHXpW#e61P^B`tdYNQiuZvZHiuak3hYjJ zzK0Oo$w`>IddWxzQR)=V_-A-Zvfz}Sj!b@o`JB|u<@zwQH{8@kG1H)yxtE+ugki$l z7Iac)nkV4jZAp5F$wnRc)T=?|y$bg=TgzP#9iA|Bnh(J#*b3L74^%>>phE$}ft&_X zz;&`Pxdlp_Wb-jqpKziVy@6hfoTxMS5#^}EpxJ#UBAG>`V!+FX*=Di<2Ne_sBcN|j z#1%e^D|7&)mM&1v^Qh4O1>!|-^8za8DR9o#BKDdi&8bvp_|?Ba*}jiFLcB6Y64%IB zN;ejpk3qPqMC69f{xsC_o#B{hNNmCNErKk!FWje_@uW`!-@;D(XH>!oR0ZGY6DITI z&UQ(WL(DryVSMWnL@hAW%91~!LvL!nB7Z>VUKgieD?AY|aH7nGn;<8irEAz@+9OkF zj8D@E+=|iY?YYV6L<_70d`n#8JSIa?b{0KD} zc}s#>j!wb-D`8%tkCE}XYkyPiz{&j%s`+9NHRhNizQ=$6_slIr1Tg_;CAJ z&ANhU;{jZe#o_xsP0YuAo=z@?TE8{*&0w(0RLAqN%XkQ9Z}xnm8ssVR1$KsN)D^O- z@zQKhW|Hspb?^l=Bwiau(Gfa?Rd9l6M^DG8(gW*sq8?A|p^8DBZ>4OwB3Z;haOq>P z`<(#4u)A@^Scs@1r2Qm1(KfQ9zL1QgGR;)uJW&L$m03g^kRINeOAvM6G?zlPUmL#T zRpd7_KfcjB_zgyq7ooLp0x!1}YviEWoT`L~Bd`)uo(>g{GwTc*nwxbFHV-FMkc%+ z4>6^4J$yNnv48r=uJ99&Fxyflv{;EagI+@Oqni83^~5jGeQu&pGRs(ueXKjGvGuXr z@#JLeLc6E}IEAiZ2WSMfd2cG!JWaGUhfAs?@jV}7U%F#r8ao*cepOz4+RNldT!oF`1;v}~5b2Vj^E{z8;WfY% zKnJg~ITkDT81|n^{SK4<}q^bL{adI>mOfH1uHG;VIWaBuRiL z{Ws_mEwNXRr^1bkL@`PL4^f;F+jDP(Blyo~`Vh8Vt8H*>u zaL7;eBVU@OsbPkQwG+@EA=2myXIMPwBvuf}dxKlWQay}VOp$A)VO<`NnVW}_Jh1yw;1UyW}pVr4{|vnc^5I8T{IO*jz`QU{C+#4PYT?&2B` z1P{k=%rZw(l9A1{D?^rq_m#tIOff1#lQNCyVtC2vWDdd$>QNueX=x8n;1BYGF`F2L z9d{+JmH;9UDkjzT>0nb<{E*9(G4c#|k$#1oyV@rYn2V+DGR zx$wp3q)r&!$p)yobp(m38F7ouhyC#;^_r*yepfB>jn0!*@SIH}Zh+^q7C%!T&dWjM zIAcF}aw$Y}Ba@27vym6q_&M<#>ho8`88DshfZw$fOr7=kvrf3Gxd;<~cLv_Of3e5p zBI9sUjUlrmq6WlNa|dOCu5diH8a};ISdYcAVrIZyCSZjLp#Ch@hhp~}ZxjT7YatOZ zZsUoy6I(IuyDHBAYoHnWaJ~{`ir$!7s3j3f1 z1mb+y*^7c_lZ6R z@zNkO%4_64546J%z}#4my{t1>Yu#~LEykUBfpt~M{Du26AJbMYKr!9{r}}cl z!EKF-IG1+8zZXS5!+AIXYPfa!ZX$|$XS@YlekXWil0E}hwII%6(?}pI;rmYj@AVGu z`ATCp*%5BQTUY@$qPW%>-TvNsZtU^MKeU0=TY}U{B8#l6_lMhWsDb^7_((X6R+QU3 zWIRAz*g~I0#hZ6A+hzfrjvL^4}x7(h*(Ba$Rk%Gb8#cix05=q`7BII$V)oW(O?jVarpTM!~o(e z_D&yS;p>Q3KcEsPP!)_x#05kRwc$*iV+^2XU`NVH@i@(SsB*s;8sd{;c&+o8FOi5> zoNfHXxiJ^_KZd-RU1P>Gd_d2@DVdKrf_>s9vWpXNX?ZCx_O1Ddy{8#tupZwVf02!i z80=TmFuQUBg^DW?)VEUokVWPs>mzDuj12WL*25_DbC;M6uqw*pxk!gL^)Rk#HFFxG z+tTJ3tfP5|$d|*De2(y%1Mt)>0dw>+vB;ccY(-YkLm!EGW3}}Un1ayEu$Tszb+wIl zh$k$@ba)h(5ye1@?FF_^V^p5f@Ncu{L$*c4n`MATNZiHiCYfjOjK_fo^}&3l!v#zH zr(?IJUZdMw8sF`sQ3#~Bl8BqjnJcsDm{#WDn!nK_qWl$!)}8;~P%#a`3b^ zfy1{6GP6^-6aV3y>Of`^<&84*F=U8VaNOb$>GVT>rV;m`8Z1g?Vpa202keLgi38*r zoIg{ELgabFMGT{s7(MWPCsT|*0#5NHEuGj%)-mF6UUmgdW-lB}uk`+4F1^y*z_}-3 zqD*Dd3HQWuvMpkk$HYHITj(d^ar%_T>b!*vvmDOuEMq4kTNZgiXDSw_S4VOJR^313 zBDlU|z$jcwzQb&bfO&%a2ehCf0p9PKr%R3o^obDEQDS9&z!xQ=9TWNtU{xpUyGPavxi3k@691?2oEp!-e0Q+x?= z&3zMd>!5z%5MyPV$I;Y9nr3 zfs=M4qVYLq6jcT9vK6uGYplD0oGMRMtP?RI7}A)k!_XUU(C)I}2AF9B;UCw&NsR zYqH2-w<6{)Mpgp5uogKGRJt+bb=>7>VkSKSOu&`&BcmQNp`w%@appQW^S9!Ae*n2L z7x_`YO~%3jz7?_JKiDg?nGp97+pFddM5zCQj+IK@L{&Zy6#$QO4!M_HmQDX9iy{i2 z2C~q5T!pjvmyd8}?>7!1-l&0}xI_%5Mi~i&NM;!)@zj4qj9UzqkOzpQmZCawotkJ) zB=^xC!a~}paj5TnB}p^I$VWATOF2KXxBQ3->fve>A|67sTpuU+a-4NMaSSnX2XZ^s z?K6Dp$!2A;JmTU7$opI4>05>?UlvrQH&}@@Sps{`CK9U)yK(mUGzl^NSnPT|5k*A8 zyHyd$5{V2Zo0}3%X3uDTg_y1xvikbCvPCFE?@YF$d+FDSSyWx@7C9+9`O+9gEi|uT zSDlVezW~?fEzYUi#!Y0ds}W`8q{}0pBoH6o0%@QS_UeMvAgt0ih`-#3XPqD<4@1=S z0;k3`^B5>WXUvv}AMX(@seSqiB8X=fpNaen=gLzM2-ZTYkFL3y?Taf$6+w^T54GNy zMV6-48Ewcf@R?h&Rzg_s)lnNLKt92D>x-I@4Oe~)JQ*x%pW%p0E8&UunSICtpzJy* zhxwCGa0)&{btDc>b)NVD7VKT@qs`10ST&}(8h5-bYC1=B7nMcK*588EaLc%Y`_T>A z#!jr`@x~5n52jMSC#T?q6_J-!!8c=Yes(2O5QBM82MI@}>Ouxv7JqsWnSqG7*8(2m z41&XckDL}y+fw2tYAzb8do$n%c?%A}c#yfS!55bT|HXY&56j}5m`L^4cD z1Z={7(2SZy?gl+|2EJ1(qA_K~IoAbQ|9s;YxiVX8kBVI$Fd*&{mrzwLLHi0>8T!izsDtX9oL&@&K32MPT>I~9dGdzp^(1&Y8*2cVn%j9X}Ej(Qx z4at0pr(+*#6RCu#??;6BTR(=HV0)t>*$8zM3cJC0)GLqRTV6BzfKc|ptc0lV19plA zs86j&4w+rgyg@D|Y?vJ=pyKclCoMiBI7L&yFDrwXzXTXIS5aSni^!)XP9p5l*kd1H zKj;U=Vg+O`4&0$asBkU96)cTv^fs)@N?1<|Q0pm5$jDAF;I4^ge!S;e)Nj5ZHuE5U z)e*ZbC1|XmXrc`&1+73yXoCH#9xA8TKr_B?z9K53_B{_S@?*%1|HU;QfjyB%#We?Z zlYQnua3S7ej~s=VARf_fIozYEh!KBa5B_9!K&8?^ZqgC7vi*n)?}90jP3X>^_v_Y2r2Y*`@?;3{RA5rbx@c+KVb5suOsH{IjMJ$^p#p2a3g79-1 z&#eGTMm9C-9pbT%*r^2YeOy>Q`SBhh?1@&?)0&{_E#SF31eRF`e0m;C@f5tuX568I zSQk1X)h_rH<#7Hj!TV3gb&W$*unpg53a&y~P{#J-JSm8$wKDGKD7?cTvn8tkVTe;7 zBd;}a%?MC9&LJu(fqi7JSs1bJS$x(bSO+|QCIi>`IrhE-sK33$znBHL)(!09v&<`~ zxOGC!dm}QK*@&^Gq3ZqyRrb~Rf73xHSq;kFO4JB1V(#B5T(@u7@2=xdFXPp(B64u! zPNZVZ#iG}<15eS$|MT@#WYJZT2hPQbe;A~~0H`8qaP=M`17NefL+9u$xr^KcCh*^6 zHPlug;Y{j^C#fjv0!Q!}4uFuF7oQ9S>TFFt=2kE`(>9}raRDCY66jOhGzx;CAsZJF z`4NcOE247|hdB;$CTC7YPva%}2?vd5nEtXEKIc+;9(_1ee;+`U9i$dVN3EvT8Wd-r z;l(uJ2XJ_#V0T%CDqJ|7L62qYb3VSYP|32z^4*eYIbq3V*(ChpEqs2i8+(@dNw=hD zQETwI3lgo(?ch4Rfv%&pdQj=B@Ct*uY;O2#pF*Y5AKHZ;|DRcss;>a&g+hL|nOsdx zr3)}Wn6B&`_8$A4O=Qzqj`MLOXXC!Gi{Xo0!hA+&C6=m#Na-c|8$aO|-T~!s5S^?9 z^!we~Wpxa?^Ig;>@KT>t9a?Q|yLMC4p;o`HUDh^WvRrPB&@!N0I|`lJL3JnQ1RLsS z=mU!DyY%(?2YsR8!xNB%Y}QA1qaITU^c%JrFZ0)g>z1C@v(`=4nbxl86rL8U3pSxU z-;C?Q-eug3n@&My;X?#>3X0Q#U>~f-%;8t^YdA?S$|NQak5LXmH<74Lg4T7pKFyej zY&3}ofD_q=uFZ(deD)P!~+nL2@}((QZWg zJB(cD9PY(?t%5Fy(2l7?)HawB+!4Lc^J*n+mG%?&cP^&26f>q9YmL3eW6(&lC&@g7 zw~+8FI1{B%2^7`g~&YJyx3sv!s14Do7F?E@6KYY;)-ME#;D^@HlosN7NE znB|9+u$Oi0bPRFyaujp)wHLN^vgWbcEH`<4YG|16((|Ci=n1;+P4ohDLI?F0T~l3p z13mP2siAxfyo1h4P`Rwm(@brXzSNkEdU;7`&|>M_a9Koih57CLbEskZL9rJtScSWM z4SpLJ&0U6ap&3(yUIwnwDr7e~%qGx+_SD`%g;)@ClN&*CRS`-b369j>P-W~uC-|^h z0Nuz3V5V%wJhBs*Y_?Ue0VU&n%?iErS9sU6fBrl4>$R0s1mq=UmV$;;I$T zW|e`9JAk>z{Sw|=7uer9Wal#1NEhL1;f!_UvX@0~y_;pEFcwC+|#Mfdk>8&(HZlZXgAGoh2z(*1=tCGj4ddzkkDflp0hR9{?_-Fi5AilgIE8|-C{R0tHD&hp6!>1cJ2wZmYmuddc4hY4cQ?dmySbE zaa$7Pp7JF*7PA1atCO{2pd=JErh!7g5_8D>P$TEiM?+^HtqxY6K>5VLbx>WdDc_Pu zt9i9N+H-A;eiw9zLq;{s5V=6c(tX$$LN{A|M?2SNx6j+j`^Qtwliz*CnQs5jR?xQC zQd{uwAK7yZNw*?Dn_-4aOOe}4`NWmM%YpuZ6@kjZb-~SWyHApa$_>EA*@tPZNrrCD zB70%JV{^WhWs8-z*K$NSn>t0u6Gv;u6im0uwD!VOz4k&cz6n=@y+z+fv|9%BC|Y{~ zO+=DZT>1(H;AioTcvB>ya-0n{i5D|(C%{i11?7qzGqCbvCtqNW0V!%4$SxK1B3eGR zFlJpn6o-r9;(?GutS63vPT+dk>N(H2ZDF5$pTZl2 zkMU{V%kEju6x#*scgrU}folaHaT0Zls9_e;yD3k_m>?1Ol(i!B3iv6L{nlVr&ZBxp0G?8&hbOIo6J$V zJT(^?$q)FtJIl8~q0obBPzcowMTqyr-qK@9miEeN@_3~@_U{bsoqhqCPz^)@x4{{; zKmlwu>gtAiM(HDKVnS$Qux?PJpWBS_a za5G^|!tX^&QHeR~M-2(D8n)jx#$Lw~#uw#I(HWR(Gt>OrNXGQ(#^PyznasZFd(!Hr zJJaJc%KIk=2T4_xcG^gE%3Bln$#rybE}h?Qz3dq2ZW5O1+Zd4*@lGG#gCHNwk5S$ZwA{N3t=aVIPnV#6 z`5RSz4LzWx1^h9z=w3rmU7O3HGm^CEBn(j~2)Awbb^jDL*>W4s{ zj%J>+)rCqz5y5I%Zsi>%T$92!Mm5j*E?4o~qjSB>(bhM__0YPBEk=O@Y)sSRppt1N z7m%ih%KP(XR!%2UBUAmUA2NytE`=oJhOv*V#w=n>@OSwnevl>E>cBdw?zt9rC;V7M zrHCOuoA;?>iDeS^iXKef1sNjKxC=dlU_^lcvJL3ZdQugs1pL|kl>=ITR83E#LtZUA zqDRir71{?55!O-E=Z0gF)-+6wf2&_nb1J2!!=V$wv%&gMf;|ha4E`0klXWoTc3Pp- zJ;_g!18F<`^QBo@Su%~wu(q+cv(K^Ju+FlG!dh#xW3{KhuU?d#Ygz7_IsOXI?e1?a z&R8)`^)FC?I;*v`MD2rmNG>4u3{=fD)8w>Y8I$}u#eCX+Vio+&A~eEX`46mz9m+1? z@(Ib-8P4vW8{To=7w)Hy|12jk^RgE5m3Z8tRhTRA5yX<5==QYL7D0KbD#z5G`bBdt zW>l1g9`QF+0U5>;sDzg5$(RIQ8WDYV-|Ly#)#wIYwg+?Mnu7uG2wIz$&?1u3zoB3L zDp|)eDy8pFYo2yLb$r^Jv|!fnkf_|z#}E^lRYFngZ);B5a$7@tRmWG?eD4Y0jBqMq zL4-S^mG`zI%u<^r(LXAIJ)x9dUE8I8S6a&tLN@~|{Js4Z1G7VEa!9Ly{CPWlpYb!b zm@D)_nt&(aDCQ@&w^p;2vKO)cYke#f;Xkugp!vN{cVSL5GvI9chps{GMP9QPx}H+T zGteEzAy>#mb}<3*Cn|V)3Fu1%OcksS9#%at&^AJ=Is#MBpCLjyrq#lZZc!8EF4DKq z!e9?fsx6S!HuDm^^h)Z&l*Ht@Nh6aNr*95=)nevUny|EY=#J`+ESqFIYg^#x<8I|k zh*%m~H1c_PEpK1@BCaZR4!gj0Xb(R@6*~`%o^nce@hUtWI|B)!t#VJj57B{() zed!P~$i~zexKx&L5%3gnLMwhgw~3v?m~=k2CwCsv-w-H&Zm?JAk<@3hEquFIQS)y^ zm7x}3MgfI-{ASEMi$%PB6Vdb}t~1u^h*A+t%75PJ+J+<{i-vsCnaoF zL}FwhyiHhw+v8kssl+y?s)1Q&Kv^`|xTn=sR!J+NQLYryLgnP0DhFOsSLA5FaiZmi zQs5J&SA1kvKt=qRIReejJh}uklHJ73f?nMVZT}4ZH@k{C#VE{6CJZXtUCeH}In^Ke zYy)BxVz1T6O(r7;tA!2^D&xjX#NfpcQ_aRa_CLloIJR~|f0d-gYbBr-U88K4&r65I z<-w@HCV%y;su>N_x2MIWMqzUB+~g|B15z{mdz5d+M6wwB(3;`!ID0wL?RlJb_f2of z@aV`K5zD-PT#WO&wJ3Lq`hg5$A8Hw-Q3dlz28rK7??Qw)P3$K>RYO{HL^&}y#d?~1 zh^|ymsL=iepW+0ZGvhFIq8(itjE6V8pO541{4{81A7K^ehM)I3-xo}TlZ=I4Po|(E zwgp<8AA|!w&T`P7KgQEj4^M3zve<(l2sAdz8vURzd!&6s+*}2kuW?$8dR%S_51lKt zAW%KI^QuOxl5z=#$V zJdD7cf)e!}H4-y}Thld}Lfm;iT(I)n*uv~Ab|JTptHH@!5xyngk8`jzvyB=>ouQV} zo9WI}Lvjh~OhII++fcc92o=o*Xz#`wYjqycb5Jj71kuz&iM%DE8X_nN@DK%2ACJjz1nyLo|s|@-IHny2Xv!1uDu+6gnaV2^qBX&jQ zi0T|Z+5O2e!v4YfNtngWV!zUBL3!o0d(w{3#?W;!D9w;Z%j2QlpCmt!GZaBDVaAaz z`YU~aj->BUW5EYFMqgtdvNyS@{BZsUCft@m%sdfJi)MUH{uS4Oo5a>(IIP-5^i!xq zyD*zTyH7wa+6R@{eb8f^MDMRLGQ%C9l04N%fmSpOuGCeal(k0&_#XL#p?H;ia!?Ep z-3ZhQB!hlYC*yPa`?N!;2U4yiRY{6WzM0WXu8zuH8~O?7v8=P?w3W8oU5&!(N5n++ zk7^!q(39mD;h-Io?YLzhKbb9s3Q`X3FS(ZZK2%Zi!E5tWd>E<_8W;K@K9^f+tBm%j zcjkbOcmwGH!NV!|4nK25kW?0=Z0IRyJ|adswVKE}ao z+Z`Q@1o8rYU4hcYK<~v5`rC5M6x<8t+!IW4^}vVSQ2!3~<5{>UCup_QfV^4CA(aq^ zgyIn&?aL~W*(IZ3#)!0EDVvjPC)Y?$OSu~v_os-H5C0!P}f5R~E3q4S*)Y0Y!kY*Mb?Xe^6#H7wa`W5ItKd51#nT=5r)tL>!GQ z8s&~C<+Zu)I?KDxxCXnLJIYw2*(O9s{f+ukiBMj{Inh!Qga7(_XKl&47Wgd=Q*UBc z`V34YEsILa8L|M=lpD+E<7aa-xmWBy?tzfc+R8f0`piN>m3#u)`%c_V&JRM%LMEP~ z(9!w|#UDpCq;5i0oResW&ha{81!^SM$vI>hsA-_`N8NEVs!(4sDWInD4ZCe4tuMT= zYoKzSra0vjVnj#__6n5^6%3XNG|Y<1ID;9>CsS6Zyh`z=B?NkFy`lLkO&4LFFjl^r zCE8|n_4bYr3;2%s&U(MNC%IYAZch=9>ELWH`GfQ~C~ayO6SaNd+#M3T1}pj5tkGHd z0`o%0rM$QX2h^(|1N?(J^j7*2boUGRqqv*hd6%W2eUzh-V~zc9+Y2xq2MG`P5`tZr z#$_<0=vm}D^egv4u~HHJm-=SNSO$LQG<15dVn&yQ$UGH2utZdo2B2ec74`3bp)ROs z1Tg)!xo(0+Ism+^Zpr~UR{n_m?~znSYAUV?)ede7T=aMMcgOVX9+{&uN@Sc$|KP8n zr06=LuS4`IW-C~H1^H;(99MmBW>_%n0=NoG-Hf+JSc$MM9*gUdZ5Q8+K7m;m&-8oR z8MTG{A^2}ryUcGHk26>KlY=>>ZgMf@FZGl*z}SQtA5VY<|7t)ebzIdFb-#6I+oaTUZyJ2cX3pcnl? zj=*G(rQ}s|JErbTLFe=)II#zeC5WGL>VvfJaIBnFWaRAYlncsqDB0a|PbnXC**~S+ zm>qr~=nizxn)yGD&H_BDYm35HXD057yL)hV2u^W#cXule#T|-Ea4YUmcsNChyK6jS z*WRCg^97nFpUiw-NGo&-SWh{&p3OLkq$_UNqv*F zH1V%rM-r!^o+YPeq>gn}68qWL+bhbwr02HBK@1y<%^ur5wqk7Stn0HovJH%$7hW-Z zZN!YQ&wgX&!R9ILwO99ka*s)SlqmnI{%cg?kHo7<-%{3R?C?YzE#ah85Em(*eO?Dn z44D&>5&SvW9ugP&F|;?CUU*<|K#cEtbqh?gh8Fw@eWmvT`OCxfY3YWuntP>tjVG&i zL4Raa1_S>r4p)Av_3gjx;r8QX8Jp2>>ZaB~XFMJ@-7Pd6`x^i1e}d+{)2?`%pyP_n%l8^nL3VT-S3=5)1y+eC7(_dljbJ9Nct!F zW@>ucy^Mw~AMXw`-aam{YtXWQn1Ic}{h}Uc?Vlq{&Zap#=4hQgE?dW#%i)Q^)q;8k zp7SfImNs{}E7J$xKD|QPzGNZM`*ZfMpNadE&!)b~xbJywWEFGElG0pN9XI^!!M8*6 zhusee30@rdDPT!JfPbX#5Jv->C9SpE>e)~Q+(2LbQ|FP4Z0YY(`=*u77~y*0xv0%I zyryY6g@Mut<#*dW#{{1OK8f}rw)tw5`dR6v%moKp452jjDTCbmTJ^8vxspRT-1xc!eNi>KIlA@@pt;J^!xNY_D=1e z`X;q?+Sv3B&cg2Fo?BX7^H1TPQr2<8e|hMm@Rnf_A)%o)!iDI7*fr5}qnr_W!}5f9 z0u%iXIhxw9tB2)O!P0ei0hA7>I91mZXS0k$>EBTPT1I!hj~;I>5{5~-d_}pd(93JT z?^DsQuHR{&jrJ0@RHeUCL8&G`ffYT%>S1)%%7ICirnBm-r@m(&Y5`T<>pc0iG5R>r zDW|!MIRZNckGMwuq^wq-sWU;xZpjJe& z@+Z?Za2I56vn5%3uqrjc_Vabc;yyHBCivVhL3 zNauo#of%&;hGwK>1h@vc3wu($3-x)%dAeHm%Dn?_husZ%=(pIXzW>jVo{^Ozt3=qs zV?u*NzM~?w%xAN0fGyOaI4a5sMuMld`J&|413o**HhB%=laW8 z(`owW|Eo0d^O<5Na6=Op)G?;#NB-$B`OnC0NMzLUQOS`kssqq2%F8p=!r`p-^0N|TNuOW zDL&|p(N7xNQG!pTy3-!!Pg|;`U!>wnf3$G(*;=aylzGZEr5ehNS5-gtwA@^(BBs(g zR!O`K8}KZ#hSG!HG zpuZ==ogGi5xIef$yTV;zt|d+)3D;Qne$U_DaM0|2+Cyd}CcsPEX*qlgg!c~b;IGr^ z72!WTbVS(wFgaWcUJ*DlC@yHbUroEE7IwV#TjE$L&eP|6ez?zi@97KB(CXq+GFGF| z>2+3jmGackzrc3%rw04hY+@Y~>)ZT%x;TPu?@*{om7|moasv5cT{#HmftL=hYjm+U z(90Qr>%Y7WJZs#|JPxguzSA(62;YumYQ?qL-oBpK?oRH{u7<9ju2-(g^rALJ+4u<+g=(Ju-gGUVPOlB-W8*Qpxi+&Z zDiW*xx`eL?-|WARPXAMm9D!GZ%Y~+gE)6~x5EL-Qzm3mD)H9Yi%J{$asV}D(hrFje zY2L={jT@}kY8R?h&Nm=D$m!%zBaO{eTZ$Q-v>M)`-m2PVeLi}NPM9$Vsd%QtiEm6LrJduoy`Zf( zUAgs?6s4n@svcHrDZfh(V0JAAJ?RF!>HyPH=~Myv{GvZF7MT+L1QX47oVs{aPx7-~ z*EpRD7*A8W;`W0eJ=B+={Mgp}ho`A0i0Z>!bOpY#ULIE?*B93aPh%|{?fAzi6g<|m z8`DiZp93sgDO(3$TTrXuKSJWeT8CT?i1P2`n_=53*O2YDdp?WoxyA3= z1osrzTX%V_3rZQSQTW^M3PuZKjq9LiqH$li&Aj@#a1NrWj(?B~*fv+1MzfuRv>gJ3YZ&xQZZey8D2;fu|+4gFYyu@1~wI51sOK?X|v% zyP0M#!FEhSeLoj`zkk(~pl#tff*#wl%NLbiKDYdT4;&smC-^|XHvf_S^?VE`?H|=x89>!wtTh9h<5{f;I^gwSTx7$_M740116g*66fyq#m4yIMqWm2paa#=Fn zg6eCczq;~VsfW~(9IZ~N<;Yth#kBOYW|uTV4gG%QR(p*Q?W~Z^g~o- z=9_QK?chA8dB^#!idG)_VeT+zARD+|ZNs3Zx&RfUx_W>8J<1~c(SB`2N8}#QFV7`Z z8Xls*QjLma4El~Q%!P0%vx&E*dOmxC`-j>Be%MoNyM4y^X&{w{gSG_@_kZs<&F`k; zppr;saT!RDhQHR=i?F-)K`Wt)N1!q8vJ6eUDSVkS%|yS6N>&0*GfwEjKBmkCB^YKNor z!|c@AJZE=fjp1t!|F61wg0;JDhQMo0qNDIMwe|IQywl+6d#Hkz(sq%9_eQ(a+Sv`xiRHGLjZ3Xi)23n1T zec}VOGFQt>l;LVqZ1Y`t9cszvQLG(B&!Q>TrpM_6cmFFpVu^7a4TUnGn7{GpZD>YW zSVR8&Zh5)B+*)1?Vwz1&Q7hT1+w$5fP@g}fbd|S&O+A+ONr6(J=(XNcosTp%W1*3% z-(_Zo4(ig2l}Ob5(2Kc7FYrf{<5KCIE(^Li!90YwECaV;IQwp=S;s6*1X~-lwF?t4 z-PA#5;Yp{Oo9H$w07m!Aibc)rgz?K*Yre7qKmqs2e=*haAK`;og#JpKEUV9Lm+hS# z)g0yQ_tgICc$6r%I$|9$_HbL28mE+$Q^Y6mD_Vk!Rin#izF8kNl5o6EE4>oEvOZ*1 zW$1*j112^?SGm>=Sm1bk)-!l2Md6*)WM1Vh@jBd6os2be%HsyUJ`2O6wU~`*sH0$? z#B<^X@NY?CD__*wFH_Gki8gCd6SyLLg+&<49Gp?G_t%Ke#842A3UrD`OOoV<8S<~V zm*;Ja)~N-HXaF&4R(f@^(v|WNUid_!`{i`$3}nwGnjd(4ri<~N`OsZ53Fs!Ms5!?fMafYsDGFX}mfTLRD9dy@*Fl@L1Lr1&6=)B?@do+F z8rUnHn7Mn9dpQL4yLx6;W@mh)N?V@S8imK7OMmYv-a&DC2fM)(ThBVBz|e8R^N)f@ zwG1{4Q*mutWT8;xH7TRNoX^H|I6U- zi|Fd8OeeBG2<|zsqUGGXEg)@Yc^u|D57-4E+|@>`<18ZfBi#45aFSx-r*(%xwFvZa z4Gi7=s5yTUlEDz)!@s#n@A^U5RqL41a*!U(E&SeCdcNwA6%>O77Q&rW1eyM<43!TG@`xC z9<9f4s|Vn_8nB~3!ko&S1UZyf{Q?^#FHFcSbkiRZ7h}%{!43(RlcZW`st!a?F^l|8 z`dgaD4EFJymc3$_c#0ibk4}r)-0RL@WS{X|73czMA}qsSzJS$O6_juqF=w3B&oqr@ zc)4!o3m89(%};dAmm#8>$#vbp9_Ob!_$N7L8&>ZEXJ!YLqh6fhE5r}~uqRIwvHrtJ zn@t{(z+V1=E$e`q%0I?-;{g`!5;_q*(Yw3_7FkBO>HBouD8_ot7p95Tv5>pvf0U6# z$}iMz_C!Y(pI45PXynXxgrJX;gX;fgCRg;7S4pG9^1{FP>0apkZPr(7H&DUPhQ>iL zZ5xw|3m9*Vqs*kOKtHt(C%B1JP`-v9L}8TuHz@s-oM>EKV(NmIsca2ViOwP27e~On z9|V)}JN7dJmKqaanUB^GE?s`QD_tJ_Cg*7R@uYi~IY-H_>0gtSwvLAPfl z4Apo#RH|@i6e@BXxi{;XS?0kvKA~^BB({1eHfc9`#!KGK8Q%3+^D6VIvg@t1vnWJe z@Qn2=1kKol8q{Kx;D=L-A5R}ZVKvqf?f93uFCWyt$K|nIfL27VihJ`FG98QAvT>CC@}ra*fpIkp3RUkh8VDJT7*lpibR$0^-}e$xVZs;o1$rX=$Tt`JG~Au=kD zmpKPFbv;w=USqA+z+|fkX6%bsDM5$$0IDPtsFOTLo!}y_tii<_*!M@A09>6z*@l|%BU_pG9$qA=Q zdYn09M^O|=g>~JD=bJ=N!UuZYU*p@n^jg60M(v@i*c`QaktlpI_tTeZqYjH+Vh`tn z$zOwi{Wuw4$zD%z7BsYATELT?ffmMY*wmv?8|a4>A4R2d4cEIG{%=q2TokWQ2LP|0 z#0kuNrfPiD0^4iMU+kj4K|_b5K6>KqnQ~bcpYVwe=do@U2>k-=o-<0o36%4xX0yr&Jx_c%*n)6v5yV$PEC3Bte_?)?<2FnEENo_Fngyt zJE{#oZOU&o;+=N=Z@+ATm%L1vk0cS3lry!>Sxe_5F>x6ErS#Tu36ISavwk7CWj`JGrIg!25n4)>!gK7T5A zvy(VcT8%!jqBMtHa#Fe^ZU9{@OkderCRJXvGAG_Fz*go3nVn0-A8dZ6V)=}c`%H)zS2OQr5FClm z%$2;yIxJ!*-xUUnW5jdfD-;0c!J+*hdtxfrS&?^Ni52U?PLAi@x}nOB>cA6V?u&+PX%czPH3e-`dn zaeku#7O*=Qz#y)3Drf63dBthr2zz!HJ8c~MwJFaL$Cb6_y-nlByYv8w83>e3N9f9H2DU;0dk4{`~A9&v#vCMNkO6JyWXwS8XgX)xW!|yo;X#^^*G-^jCkD+5(dt9J z^P~}K=3uWMr(5$P69)217s;20(z8?6T1?(}-9(>-&eI!o4?P#3kSj-tN$ly7oXW9e zHj_kC>@8iDZcB3|ovBtTY9Q7Bn;Bo4J#XXHj*&ekVKrRr_sl69_sLG1nElPp=t4a) zvywwq#ReJV)c>%8-9h<#6BWLJ;o1xYyaoNw2jR|yntu}&%{BfuYJvFM;2iHGPW(Yv z$vQHerl8ufD8u$Ow;NyeB;$whN%344}9S>i~z%GhsN7cG@1HIjZsYQt8`E_ zIiFmY?#-cCixHfpeM}>{CGM1TreS)dJ0O$u#Vwrb8`RLIlNYZeE*K4N;{s=G4ZrVy z`T$e}H|U9acS9h4$%&0_z*^0wu9l%U1GzLcANahjj8bH*f!O#MbekM-%f^EH3?i-_ zN>9LRW`>-G&(nl%(^6tD>7_hR>8b2dDyxf$QwJ+)vQyeGj^nf$!n*&Em@HCHsR|Qa zL&Wi%l9E;v^NZmJqLCZh`kn5eExeQNobOv!cN8(InRV%+X-s74&syChTfBsoA4S#e z8FMddW@d_bp{MjL#&eE~W2sN@^)}w|aM)sAGLVyWa8w}5+C+!w2ymL*XcVo$M~@|A zzbJeIlm1f_!NaOC@oosKmW;w(P43KXe87BR7B;jPmL{2Kb0PUgG6=};;3MtL(eNmr z65D&3Z}{BYXZ}V-X+7~~G>VGnQ1Gr`{0?f^%k<}ZeDP!x_$Wngz7tism1Y!(`c?QL zuc?dFq~D_{anoepU0e41CL-tC;!3)Imts}VP`$~iwygh&RAGABVq=`{)cWgljD6-t=Jvj!Q|1`{W+u}`&oM*QW;QYf z*0d7&Q7bZlcUb7!cKHd=ap2nBXhw&_>&WVRG5j)UQPZqfEhU9@)x-i zvjP;9<9lI8=$WJU?F6ygSt7sc{O%#VP+9I`UHY;oU}18j*wz(n@I5LkztOoBh9zuC z70GKnH4@?Q4FT6WZaxEl_(7F!30IjF9!WO-sxUd>6=NeA;e6NwxtI#rk9ztgJu(vk zHRJHwJA|QV(r4vFy`+ATD*H1pbEthN3eY#~q4qAeG3tG$6dj{)Ya2GIv(!%hE-#P| zOP9&DP3x7pAAXCahk&OJFh^mlc7aHd-GU|*1`+9rwdqZ6P|W&Bzuqf&PdkV_n6M6N zH<*uUAW$;i`U#jwrmD^;=B+dX-^w6=Ve&KCO?GiE*;6@bHt5kB@Q|vo>9>O^UdIzw z#Ouc4_om{?tYg)Z_9TOr>!emDHe_!aU!?zm$6r4*Kbh=Jl+tWSCQn$ichM_%I&H<*af6DW}++KFMA5GX`Vz&cl9< zA+!I^8c(9qdyky-4jqO5d>&0M@erGS5?lAx_=Bo|N#9s^;=BT^c{|}Ba=wLlgdT7R zu7Vs7r}L-@9XQ+R#QMi5Y#hMf%kYMfK)YHCMcVidkIuarlc!UT>hRC;cj_sz;=j-xoucJzi{ z)3X=`6JQ~a?Zm{j1h;Si73THW{|NduCP=}|xK9_o;s<)k8{%8HgZcg-p07+l^G9rB zVOB!Ga*wB$XL4eL=qLBJ?tod$qBG|hyDblRN+WQ=0pOWK*(D)X8oqP{7V8T$;_AWE z+@tT+&+tg6V=g<)&H09%T;LNJaUAhM60GEF#zAVK?Trw4^{>eqhT?TM(}i45(&W?X zN_%skAwIc%-Z=6&YT1+Nr(2^ILf76>{!}l)xO@smc0g(=#-dX<9!A}D<2#YcSUge@ zD%A$w$_MUmdlWLiQw6Nga}NTWlwssOAR{ZlUOZ1WoPpkCJZG^Dk?lRsYaCUS<2)J? zdmn>);Rj|uhxwXEKuEeuwWS8E-Vy1Ulq6MV3c-Bx{`TCXIz*A-c!TboQ3GYY=Ip+6 z#GYrl5*vt55d8vk*qwjjdB%YPxy(P=EzRK_?4xqtm%L#k?3E;N$Sd>&4%0(nWq6s+ zQ64)s5HzzI-TE!9a;)1WcH$qzBG0InALpHIq<{3JnM4oiXJ%{WMwxJ?G*W42%SP{A zpkq2()Zc9TZG&xXZ3S%OZ2|Uq_S?)g_|vwHDaSXZwqhA3mH#q_VnD6O9i>06h54#EQY7im$0=lmb(($0@2n<+Bkg$%W;| z(nA=1@2C^6fNzrr&V8igDvx8#TBz=@J93~Lzeatn)K{j+Go+tlu-G0i-kw?NE-<#G zRLhQotR8~rSCX|H3EH{~?_U%o?kG6(95Vu>GL)US8o!YuG@yfHF0pJM@Sq*2ss>O; zIEAiiJe{?i7BmT3-ssXcCz9)R9G!v0{h%!N!7TFopz8D7EP z+CA-*_KAwJsMpjx=n2fKhy}BoMQ`hQ(C+f&lhfE+x#+N}PPdZEM`i5Ud(jD#yff#J zE?v}5YEXyBMLni9p8m1;hPdc5YjsAvBmP8py$XDzBJ5;c^pVo|$#|lg^TeCWh%D<9 zhudT72(!?2$qrQE9Ub zCw&?S;#zXF((JiNKH9O8^Lef~^D)nLmAu0;*Ru1n!U8Kg^ z?%TH64ZCjtY`S3pe zcRlUE(1=PvDQ{ zaS*uoV`7Z@@X`($OIf`#bg~wtGov`_(pd~d;OW>XNZn-u^@<5tp?~3lhH-9V@eOV8 zt^>`g?6hbinV*~;2|ek8W*)kz{isn#o8yTb1tC8b*f8m=xEt@-P~N3fp!?^V>aabs z{j~RUOmb9V7SR{hZ>&8ha~gu_Ch^E3{JD+1uZeW6C3A0%Vn0feXOBkv{5{Ov2k5^) z(LQU*FsTkxwRz1HvluE%|D)m{5ut4Yx2%Duti%o*%&A;SuU`S~hA+z4eLz!lh+n`l z|HYdY0M{yu0(2S9@&wK~y>)1+Hj>&)U8J^BO;)d*(!;Ur(g|FaaxAg}vOCzS&CDHHx7(R1_OLf~#04O_UcYUDXrn8<-XM=o(y3 z=SD4uip@)>%c_q38YeeW&7%~N>rm&?uwj?5E%BU;%|<69pOJvQdqtQ2e%QUMwN*@o z2!*c`XWTV9V!I2Ha~=kZjRDuWLru66yQL6Tt+Ut@47Vl>=Ngpub z8d$y(Sc#c%i~KmPEm*fv(h{QPMP$W8q|Q=xDJLsI>7 z*tF1&e2u#6Lgth{!?%4RGu%v-s*lu98Yc~uvP;KV|C>BR6*9I>c(SZk4(k~na~dmm z3BS-6zc9(z#cYp2YKtZ4nu}$w>8=0rh&0%t&B)`LnB~}4h0R08IQZPv@iViD2Tc%# z{$v9=SmQt}W*C;UG-%5wvj9q}DY8p-sZq8)>VEa2?YuqQQNi)I{ddP&pI-Etym$1r z2cc4NTWL!#Tytq8HKn}5bZdnIXSR7qJ*vQ#AFF|A7g&q$+d3+)2f>APj()ptr4wpcjxuqzF~0_7{Zr z)>*zFYp_wz%Dv<;82%a3D`_k1)>`@|_7^{p;~rzJcC%9^<1c$+^(|BghI1O~GD)Eq zF>zLYa^H+1HY!U;Q(17OvgF*UAX9ren>AVCNBqSsGXKLMd<$5+MJT|(VQOJ9 zk@u3Rm4c00k!b}T$!JmpRgR+y{jc3*U%72w+d%sa`(hp~9g}>1`CRc??nq%)@((KR z2}G)e!0;R4%lxe@Ok<4ITQITZo;FeshQHRG{P7}@{V5deAED?qgS>SNduP6Q7hJD4 z*hn04Sp_PlrTF|AIedS#S)Q5|$it_BuT25hs!yF`B{6mk)uTf2XbMmp&15`(ln==T zsQF*xy!OJ^ww78+zC;L9=uj+5WfWCoE0C-5BZv4G9ALQF!2AyD>$dUCP}yZ2iP|Qy zMwMZ9r-BK1m{}K%2l!yQym;A zoq`+E8}0Z?FoQ;-P(F|owiMoTrXRjeZh8s4x&fH;GU}#pglfblvoP^w~GjE7j~!-(QX8BEpw-cj1{nytW=)+l7|#U zt1+3KbA&iD=DOY`(_Lq{Kuva;w>UY^h_8y0wV;JY=JyV3xQoXZ z@;-Rh!ak{_@<8!bhpN%cEgPmLtJ!U;t(tAJeV}8!V~2ek)7muUGIMN>DlS>WHfwM~ zatJyMjn{e)eLcOWHYRc|=5?zWn}~qk8bRE@H|9L@{(3|Worp`4@JV~ck75MAu|0Nv z1XHnxQ7fGfb}57BrTteCJO-=g6-du7^7pFL43elj@1%#YKOQ4O%t<7tflgmQ5A&Ar z0qjmdb2i1yRNWth-^&HBeoC1F;4>;)_5Tq;E zlmzT`F+5#$Vs>3@A^j`Ol>?dbQ=5qsr{rOBPx-oBi|GT4l#Xz;+;VYwDUpFitXqK0 z{*AQ+b%F21mBWZ2OA`rYa%0YbwfBHAmX&i9%AA17)ULva{V$?uT9?%TH2`&5PEPPU zS;0W+mL;g@+#|E9LuQqS{Bk`A-C3AaAHcrdmI%sZfgP+MiaU%AJw}}UC-3SgnDRJ~ z*i+nx_ORNjQjJcfo^g=J1=e&7b>{h0XBUH`Po+9>i2AHAXk~d2zc1v#H+ZcsAkQ^e zsS==B_o&@I=lvWZ<33I$Y!-E^m((Fng5C7{uWFqe#l1IZbma#5Is$q#m0DadSW$aw zV>9@73%1k`FW(vDwERfCiWAriEvd>?24PwTrdI&}au!S?7!3J7 zuaK3mrkD#^o4?qV*}x=DT3#~}HbX()aWfEcJDQ7bDsKI$c}7{+%+~a@|1t-#{#Q8x znOvZ&)^GguDVnJXRtW0NQ>><9G1S!zQ1QaNLM&MP6Y4RId9IV}@gkg=#=Netb&Zqp z&1z-VCyQNe=HiYkXqkS(7X6?$IF5VrH`n_bz4i0LHS&a;LO~b`OHs+(Dx8Hy_S4)- z1agv!W)-m&8v0vU$pz>?W)b?L8#jRGDh&tvBB<7GcoC`imcLOVmr%xBjS}4wChr7+ zw=#|ve_fJOcF(LJW)Zw*4tg0LaMt%@?V3^_Y%4xBRk0%#^4UDIi>hJ-n&5fqr|ECz z7ZynszKm~WaxlXLee1YG|cSLkTH*mMQ(YKkMO5Q4} zago%Na$B>+^1R++*6}hrU@fVw7UW*_;-|x;5vE-zr=-z+@uwtG&Go~kHDHFrC9t3I z=#;&otIKW;K%sCblMQyP3|1WdWpbiodzRx^QI<0xzgo&EDw$Pk~Sy_Z5w zSwWHf8=QwYW=##F_A_0`ir)1k@~1Gd2Y6m7qKu(}FE*sOb(!;f$J~yE>}L+KK8iQs zr!+?g7_J^X#fL;O$(*fgsLEe5C20-4_=~07qG<}^a`s~b)b|^}@>7Iu?4`0+Mfm-` zob{#R56-HEu5^F=Q6W&&hC(=}?Fp|kR_KCayH`x$xr@lM*+-maZNs(&k==YUMafG} zRGvy+V^Am*`BC7jj&67%w6Ry=rH+cdg*5bP2T5(vS{#dZXbX04TeBFk*gkWGq%xgh z7^pWHvJj}uG$vX5rSsTmN$do(;gXeA3^8r;9xEFY05Y{I_G6DUPI{^kD%Q0A6+Px# z7zA4cYN1@i9(G!NGOd2*7O9feguVJ5MdpFhdNhTbaXAx~#v`*Y^`kA|{@{(| z%oEI|)?I0vaRtTkCPqH7jWCGX(>!$81M$^0s8F5;#XHI#u7GvyCvB$oincoSOn+-J z3i3;>aM^DBfu%ZQ6eA-FLxcO5(H|c17_KK+9Bz~V1z%0hlYN^|h&VM;q8 zMb9U4SEGR&+3)g7Y^oWu#{akP2P8Y`q& zagupYnt+P(A#s(E-%KYG-DD_&zZ$6*v)t-It*@2O-cGx1-B9e-O}(0JmetXSk)QHg zo8@Zw{0s7M{W}%GNBSqhp%(GZ6^Eg4OKECVGD4Kb*t1N%%&S6SqqpRQp)(HU=!H}jO>?x` z%2Pn@Ddp2f$#G&`)KU*h&&;Ci?+)TaZLz#wyk^9U0V4C?OjSC|O1ZcXJF#SU=smb^ z=9bo4%rla*Q48FUF3xXKF{8U!M4qN+fK&cvOb1!(i8tQF^tWMpEqRMj$hagYSv9Q{ zU?J_W!YDY?oi|R5+G@VxMEF{VsD#Bcq2q#B8NK!pzQ0UrZA_3$35Ky=Jj@J^O-5(=H({yX zM?pWr%rEsY9}DZ44>3^Or(Y*~e5QYr9thLTmFU~H#>YMoHwmkaRPjBh<}Yf`PqE93 z(E*>%IM&7_Up%q<4@5#QyqNr7$+Uj5u7pnAw@htbtb6 z2I-0Q$tWeC5t=abrK?yNZQnoficvxjX5`ecesPa7Sxk$iFV;=NEB!63H`YjH%;r*V z^Se+RFftRmW`Wg{n74@uh;yHGHHkc^y zQCaS2jZ;SGYhkNS$4B25V^Pi=EmttI2_uwN+7vhgUVQjExCFD!f2CT&YV!=5w1>g> zGOPnks)(ghvR~gW^%ggor9p0f!yn90%4%(-1j)f#zha`pRnw)EGnlmww`;e#Mc5_| zG7iAn+$p>_`by8pgNj&7l{4Bzaf19m z9IMxpJFy3%sX;auHkn1F86tX2VqW=%R#I-rS#g;krQAd}tw4=Z;I~Xs{(u!yOa7nL z+RP%)6cY54QXBcb_n@rESM+w`TPan4B`1k{yv>yM5;N@uQ^}!E7LUlu`UyD(PZ@{B z*hzhQCYs|nt-8v3t-QQn&aE|)t`Jw3z&Ce9ZT>J_7emppT`CkXa??-KOmORuVTU%- zM+qKzh_PI#C9l#qiRGo8`gXw*TbkX8SgYdE81c)r1|mXn77X@oq92oD#tYf7*Y$;a z;zIN(s((rZW>sQH+c3NQh=EZ&OL#-Bl)DvS*V-DrgS|_ zka~#!XtEN-DuxMe`7+4XQK=pM4d-DDgqZi`x7IOq@Y_j`z?qg&dB`vlK`IBMdYew< zucOgEjZCt>e1eGmF|kP+5mtZe0jmC;ts|0#KWncD<_C2Cx5E_}Can{P8HJcS`O}zg zolt5Ucd_)Jth+{W>6UQYSPObFkgmgybhPa@k4ZP-vmB7RF#92aig=s}_m*gGrFmI8 zY&AD~h)sz6yP5B>+T)o&(q4RE3>T(JLyUP=TREGxQ7>rQpii?FO8cmzE(X=FsGpMt zFuP+ZQ$rHXWH_Uryf#71nbjkJ65O?~d?QY;C+|=IsJ9h%aI#l(!F? zvOG{2WUiDJ2!~LbJ|(r&HcPXlZF+I??+rqVaa(?41)zLwS+|VWQdQxXu>(EWA;K1| zs=8a)t`7o-mdOvVi~ZnPgu>qeAwn(qDi-4wQDYA{E1lp1o}1HMRXw%O_?Z4rLQYNZQP$m=RdKZI+>ap|Kl#LN(xJ%C2`N^^jeX1bZx)7cy< zUS-$!1;L=#ocR2zc-shv1NT|KNT*3{BGhQ5ll4(QO|}1}{*4SN%0gS$ddEzO?Ba3q zlN7jM_rRWD4a#qgE9NxC1PLpm&NJ#;d!$)bhS5%*ZY{yX{uC>lFD;9iSed-fJ(fdy zU>p~kv+LX9J9b;A@tv8~jW%LmsvKp+szM91oR~{GVI(m}0Uu^tNX`5p%C+YR!70lp|)kph$=Ca>vD6)+ytK{8gByzopCTFXOEhMojXY+^Lm7EdZ=J z%<3qd7L$#VQg3Eygosnfl5biGFcQX)4;?@+y1H1v8iQKnYGMw5aTfjTRm7&uMQB8> zQUr|J%S@VR;j-8j4%i5CiErXu_%S`Cf64P^OFm*~D<4+bZ zGf5`_UGtLkmD!2aJA%@W7k3MRW=v*WfF}Pn7&jM1e>7~`VSysaG&hrxMW>Me z*1~3wlya z1Tvz>%+V@|>iQOV1C{8!DnVA}3;uNs4CXtyLpPAtW>#ld3$grN74nbWYA@0uVn%knP@*^q|=e+=kQN>A!hS2Kn6c!-uZK9;%7OuExs6fdPGhA>mKutfue*MtV_jumqxu z@$CG~bRp*F)Yaz1tt5*{2RoQV)-JHSUx5tmpCX zhk<0Vc29}ZuVYV-fUuUsQ{U%1&%tT3acZ)G!rF)n467ojXbPxU8aA;CY=bcFO(yeT z6imwQSm6QKv;nOAS^%_FYkXCoRcrxA+lGF7X1A8i$@vLq_aV_&<_e?Ky*8M-N z3plbo_R=l(7^L_c`*sUwWFz&Vg?x7xUyUWU8N|uZz`h@Xrv3#}xDN>H0=gpS!I0Yy zKV%-9hXg7%A7B@|=|-H0kbN+snpavoGgUCKc9o|*+@yfr$8 zdu>x~Gi?KH#cZ!pu;@iStA(P|k$p=lj0R^2(P9};uG(;0hfv@6KsBMO(Zq-|Dp6S| zibha5SS@|2j_-oI=T8k|E;X)i^!g8C?=A;#U5$NSNKN23)Z>rC-sNtbjNKBmE2F!dhrQGDUA9VH{2k4<=r=e{PjmoLg~P;}p|uC&## zFSNfwU9+p>ts|RHxX&%>!TB89Q7{a&kAwp=RhcSpqq0^*{DZquAJn`jm8$3Xl|e>L zUb`uERbSTdj48q!JIic8T{ukN*(<4){EysJ*{CEd_0_s+548nTsZ*4#N>-(td{GLK zhLD+eA!_PleWO0`g=%7P!$Z}3Gw-9mUXqWNa2;Ce`T4vo%)D8!Y$ns=-$ieY=HnN1 z;J@WxbGK7Yx{Y%xDqMHfXq~7(##C9zIJqZ zrZHLmr)PraH%}|ibdMiBpPf*&KFS*BFr(S0Wr(k%=@3+v&FElMRcoXF=&Qb0exg_U zO?k#UFQzO3sh%W7N^3#LmQ%6w0gwELGq@Byy(8FkSD2;Sz^C4^kH688<^bKz3rd-v zlNZiekK^+w&i*0Lz)0%?mG8>%%7(zXc+Pa^&S-xu=6yf*&hvKmMth&59sj^{*>l>H zK!4$M&qPlhl;n#s<-GyA;+4^1FRi`Mw&=%=N9ISc+*EO`#H2*!o?6p>#qrtaly8FH zpZ;$GJ_haz`VwRW9SfQqR5s{IV2i-`fF!?uzJ}wZeLZU1vC30)S5|`cXSZPcGK)P( zuT95kG4CVKAlzK4QaHAg(#7n$(Af z-3A^-JM73*(EVBBFtIJ{feCzc7pKFYm;@)v1G_Pvnp+WgWqGASbT|e`pTrID3i`q} zC`mo#1a>M59%VPvEn}>cRIk&hR_BKQ)=B@Ut=CFw*SzylrZ4Hu>ivSkehZJobJ;x) zU6#S_%I^2BE3O1rc6WeB^4{>af+@0Jx6>m%S=A$-}EqF=L>%jbh9Rue3Z}ZFL+s1L%HbgD0_{!tnzH}-1fLUbG((cvakc+7(J*qHo~ScH-+iJHQ-Q;ftz!MsuHLs9M7vn z-ZPj2u?+rLU9kw6GBXaTJ(c*c*RBccwq5)pTJViRC65>>$oe+&qg{5$ zebF7v3<1Nl4PE~p^c8(H*5Xa}5jiacOV7#OUqfHU3vkOiy#E34U+YK(&@K&?KEtF) z5W8{q{@~2b<^+xrC&QYXA}#@GnTYyh4|ZZp;>XIcES^$pT0#_18iwdIqP-qOU-zi! z*XFbxf`NETZ>N9KhBFUgiMN6`*>l*_n#Slw?hj97EwCdN6UN13(mS~t%#m&m$#;iep@7MO`-7ebp9;wpS~&DeNWGAD!F_{f z2KEn#_jmg4^_lOeWWS@fRh~)xM5077R?J-Ng>8| zG)-P>!%+5JjV-$4ndQmp*^fUv?;7o@>?-Jraea0!bQb2Z%2~msy0dtEy_PplA7}_L z?DvV&q?&Rw<%-(TUc%=An(_||$8eh| z%=;jfx5Itg z^}uz~)z4MHWq0M|WL4s9opt7Rj?9>xQOMce^~3$fyI!wt>P!jzLyop}a*XyJ;&;_= zlz+E?h@kbsX(10oJB0NO3kX{ldNX86@X|o9Ur(PGwkN2)YT_Ys%!kGxJzDei`gjJr z{ZJO%VDe39~I7rqo}>@lr=QSUJgjfiAXO_H9H^_ig)OU5Ck!iELHs z%r(qj#u?`J^NE=zy+{%e+apbV~dmeYk~{g~}UrPdlqC)Kls@ zwVrxaDWr^-FH1LI`)lZl^raTm&^VY%D>&C=6v~LqXu)&}-8tW_dcSG~O`GsW6qFCPojwKpuLmp({1#Lp zq*Lgrush*9BK#w-N7Rn!8(uZ+w~(KKNq+Agy1GO@P1ZZoe5`lT3NRPpm#c#--TBm+ z?5yP~`nw>$EXZ>*#suI?n`-CCAWNsj8tYWV-LfF z`sYn7!3b{wuOAA|t-K;LplTS+n3-~hjQ_ZpQ=XyBS0ik*VGf7bYw*ZwACFS=QRSHY zUQ(nmF*nGB(}*?dF?Fe-_Jyc5$+MiOwFXhf0wS?No`Fmgy2rY#^Ir2#_pb4La5s1R zy4RquH$J0yddsxpX)V$wr_XcVa_7_f855ZzdQ-{inCYA3x6MB};8@VDkc#2kA{Ruz zj4@;8#}tjJ8eJ)}W!Si&J${27ht$92t5o1c^QiW_XRRwOL0eM%>8)GO8>IC$vMu- z5Ku8FN65F(jPS#ePojE9 zmyd1|og+Ft>V5d?kR1VbU#D$_(o3o@z|7Svc?Y^*IQKFO<$Btu)E%k$)7G*JZ@4|) zi+Uq-6WzGs(s21CQTSHd9Q#toOCQParr!s@>VB$kZN~!Jd8LFrn-yM#7GqC6LGyaM zqLUclnS#o49rqpgR%XwP($?rI^P4^hRpCp2k~^U6S`W_e7~6T<88X$PwgmdQnxI|2 zR%FICx)rP0E#H|0ve%R39_@bS>g`H$Zg$2ywG7$W&H33m-nExir^w^JCGGqO4(-H*M6jLKFIp^|i4scS#yvHPC8eY6jHJ(!;P zg+}5_X`YfFu6(G^0pAS2oB=U`O#)8`XnyHFild|Lt5QckB{mlFTVIVk`oBbEuh4j( z@TIkEj5?*<3xR1FmIsKg@GQ8=#(}$;@PS?_>WE5~7!2g_c{)V#Zl8i|i{W1zO z8|R#Jjr+T|nNbF8tvO0~8*NTUTi;WDz5$B^YX`RpO+(~tRdh_uyXaogQ={yWyTcNL zwSe!w#T}>BE%FKRixp*F(8hS$xt3)_rE96JQtPC?PJNyhpHU2*(A?T3y(}uZ)2Za& zluxK*?8V7v+xhPc*cw|s^SukSojN^(XaMzvIisKzv++*;1{TO)xn9QTob^$CY4m*5mKlk3zn2tuOU;xT zwrP&LzQF-{VAEh%uohe;SPrTd(7~^rPj@t{W0W(}Xc$a!)(xXFQ*S=I2fErgcV!IA zNXTg9?BE*hKH<6PeW0Dw|1g?^mk3}}xunVRZRI1X`%z5g7!J3)y`st&h;fdP$=yZC z@GNZ6!JZUsXYP!zX?IfdrcOZvS}_oE-;B;p%m zwJ`&X#@Zy$KG*Y%cInyEs-@0LiAlMfydt@L^1n%;Nt+W7Cl*f{n>;ghaK=>kCT+R7 zO?;`Wve)(b$G0GH>-?Y_As51bM7@kHo9%t}J2{r-sG5CYmLky&!q)^B4k+)t#okY? zEk`how3gn<)5qB^ZDexp#OFVU{1krGO7uw_tp2;&Xmz-UUwMDiMwF1NacltE4e^atZEvzj1 z5O;_4I?2y}F8@CH+lFuBzgPWPD6vj*dP=Lb;ps;+Vq72G1+_Hei{Mon`HTz*3&|Fq z9l=Picesu_7ukQeF3mAz@5|ho(IdTN+SQb4$*+HmN*tb$`u(?r z4nJJU$!YDfK3l&#FDpZ|@BNW%6{4?NZ=Q+|9Tr`4`2!iLMcG zFXT>;&3}P!qF19(B3?uNN(KdnJPv&lRyMq2cucq>Y;f3_(C;A+gH5@12fX%O?-k&&RNF(< zxKp-!mfSh(vKD1t&ak8xOYfO>Ep=|{=G4fvO6f{wsq9XcrMAP4mAchclG;r54duy& zTW4zHk>)YNJksp+P&`f=s~8rmZ(L3EfsSRigO;@Hwpq_I2Bu$4%}8#VbU*Qag5&%0 z@9h&B{V1F=H~o8dsJ*A2;Hr;z*4I?WT+sVhzkh>%3VRckp7+=MXX6SLyk8)1zB75Y zMqLgs5xOFmEg;&jfcI99%bJ%f&OR&qcG|6^z{F$Uhkn2IJwD;D#G^mPCACcMkg_Ya zN&4o@)j7rN&-7zjAMg@+GBcnwF8B>7C4NjkLbZe2|)%)aOU*L|4M&1TC?2;*7+DiBo^PNwTE0Pv4Ri zVfAtjQ>GXjc)s#^=6?+y2njn9-YW7~)PU%U(Z5Fxj+_#{JEV7z>VLs|o_VZsj#}E4 zh6LkH z)2|*&JqCK{rYWY|#!SOqZ7`8FbM;k@b+%y3gRH6>fe;@hFa$SK05>62N#RzlKXN@g?u&hX|ad$DtTToXe$n$&m-*6{1+-P%0O_y^H7yB&wE zAG4=q4oF{{T0Uh)a^>V+$>)*>rZh_3l;)AKDRX@G7RyH4O2=?`mPmf>so}YCoe6MP zk2|Kh;ITI});4@$#~|F*!0BhdX<3>56I!c%>OaYIk{bW;PF$T>I7v}H%u#A5*GqN2P#lGGVOHA8x~|R!=FipSl1cF7kNe3d(`RSDwiIV=dcu}tzwIodj4;eJU-PLD zI3{F5gn#tQ=wG5gMMXtljjok@b9Bq7F%i8(hXi%=8|~G}G+X^i?_z&yxsts)GcB!W zO82CLiCYqCC0t2(omed?ETvXjNan}vc~&3C6Mcx<*Rd9zw%oTc89Z%X6j%Sn}zE2j)f z-Ix}Vu`9D(c96wvyKEl@*7Z@>1tmgLsln$%Zo_-SHG{$`%bz{{IeIZ?q`kYfL(a;q zIvFR^&ZT@w+Mo0}*)#22W+f`E*0MCU2HNJ^4>%jJf~sI1>r*$-KcsT_sED@_g(A&S zt)f@to|Jo8^wY>Q;blWSf_nH>@$xl&AX>bt^Sf=WMa_Da)+A*@lKIEai31X^B$oa$ zJE=%Y3#z~5%W9i5z-DoFRO^}&Jy-jL`7a67f+IqG!>dP}kN79zVMM2hF5%NdFX#Fv z;Dv8_?|5?^)6d!kR~hFHTTRP|>{Xe&(z8-GraVkun;enyTS`D`2J-W3Mts)DoLqS3 z-`I;gf7fj;7g)v%wLFIQhS`RZhFpe{S}iq~a)tGKuwyb61gxy12QlU)Q_G}!q@B!& z%>I~D%<{lenN>(F`&B16Vp?U7soqunhXqv$UK>(6^nO^+h_I+rQ7KX0QB5PxhL#L& z6Ij~svzNg<+!(68WQ|ncRxxK!=A!hEsTnE5Qc9;hP0mZLly#~5(sE_QWL?X7Ys;ta zBuenB>6JOZ_f_A$0sDgb1z!!h5E>CyF>F!j*pSD$9D#}c(SD75te%BE@)^b{gY?FZ znzo6SN7?4ADH(OrC#1bkU6eW?b#LmFw9n~&nZ2^kWT)hOri#)m`!k1!K9>xhn@S&i z=uzxH`~sSh1w^OT_!}OO_gBzS&=zAIXX%wQDZ5~Ho2>8ImvZu3Us#9X)m&>^W7o)< zKCiUW78%Q$J9t;ald#=?UqJJ~fLw2K{TZAdJU{qu(4fE({wsV-dYe5{$sQ-G1Nk{i z-{H7wGg$BDbjqob^C>$yyKc@3D$M#*QRtJklzoF^wq99zM9k_0sucI|s6~~oFP_i5 zx_KY;zU$r3dz;sL&m-U>JT|3JiDaraKpiJlF!eIdu8!{Z`8M4;4ZQRh?ECy}`OUHg zt9KQ<8I^5m@MttB7d@Sgn9zgSl^a6F&krKbYvbeH!#>n3Vj*3`&n+d}<|;eQwOo(b zQ`SK$_<`qdsy*KR#-3!?K;e2qo@ZY0m4@SspW%!Kcd0o06kpZ(T2;dZtcNGYnIML& z^$0Z|Fh_ch@to?JWIm4nB9>~lJv_2aW3e$;8ojBf7hvcL?N5T<(NPTs$>wij6FY;8 z(g`1Y5bY>TzCs6LKN|2XP6THZkp+?L;J;>nd>(l_<;kb{n|xzIT@BR>f$jH%+{E4F zyKdrjD~}U=enA#!Zt{rxu@ij?6zY$<7arYVRN&r7Ec!vBIsXQ|;V!6;&)Ap0#Gd;> zcB<#GdpU+Z`EK9=)&#wzI7m8$i1a87zD0d@>^t(9Ne;{z_WckSM6q-y`r#M|l|{kO zIfg%Y0NLj6G*9r)3mWPhMi|BzW*L6vbr{I|r9iR%q+QYe0PSQZI~EnSbn-n9vqRV) z9EDi&+zoDe9ALj@2;StjJn9jx63Q;CPNYRLc|F(2>=EqegG5x%Cv&SkIX+F?E%hbO z>jl`vSLyBlz}+}ZHuGUJ>h^)%d}XaaOS&rmRc4>B20r%kS55C!?za zBQ%h-l`K2Q92o=G(n8sf8Y4)!l@c2rk zjTd{=HK6}6;$QYNH!c#_ah4vRCkF5hIedZY10_Ga`w`@UG$M7<@yFaG^5!0qb@9Z; z>>}o6HoHm_iS3arp2>`+;2+K-u5vh0C;jQ+AH;iI;ke(){~Jrx$x+UH#N!T_2G5ui zr?}@qdVi64pOIvX_a!Q`0l9^F*>A5yBt{ZcI7CEKJhOH;Gvgr9JiFlIesIVsB6DsM zQ+gdfxy=#h;qmRn)}-@(jQGVBJpX2_t23*v6Q6XLxFMAY-@(M`jA1WqF8duzh#FY{ zUd(Mq-bF2z%}N|-S>Z(R8HgDyK!ny)+Ij;V)j#NmVAO124K0Y3#W_2an3^Kwdh2AM z%p^B%0+gJ}F`bFCSxeU5KSbeFBA4kY^z2OBK}Yt7Cvl(c^w+FT2A$y^S%CYX;3oP# zhdJiM*#@4TWE=#6r|Fj|qI~sTy#7Kdn6~Js?f{(te@(@JSfy8+=XE$`NeuN#Pb=h`wgvW`@dP)Nk{LS&-|l20EoZCch!>1k2D!fJe$3T0Dz=SN3xY8@k|@8` z&~7hTsc+%v&u~po-W#JM$}xI-&>8vR#xPal`f`wz>CmGqx#7X;ZsgAy=I91yxg9+i zKplloR8yYD`9oRjlq0hVUk((w#oW$?T&bU)ta=s5;=BOVhBeNCovEn z9Ksy@g%;Mu5}6D|`|{fiK3AkQed%p8@>4saAL^kUTXTLnXs*%cT3jtJS8fcIiek%D z;>Z%nuEInl%5NRWjI76XYr@lwxIzrQiKW;1$!oL|cl()$DGQq3N*_IlXfzOUsn9R! zQ82V9$+i8lhz$Hk0afl8!nLb$-`Yex`g7I%oDsoqMR^V3TPWYn%q8Mq8LbfRRh36Y zMy4^nYeY+GK!+Oisw4drjORAcwLNrbgPiQi_dZ;^6vvbxax)f+SK{iGc}8-TZ?m#`rNA;pNi3PX;WFgm16wsz%50H42z*J1>G&n!>7FNaSr7Q z#Tlu=w4nk=Sm7Ck@hnNJr4Kc@k{s>NopW(c5dRtB{%EdOo9kC3cRW9zqj}BC8ATbL zO5C{+Z7RgK(p)n?_ZP(7W>BFS`k^uB$p|&2WrBoUnrB6NSl$ zXJPITMtcjn??(pn|Me#~cPYU21;x4mN5wF1(Z~fsCl++=FxuqLXXG9aGvk;|EUQkO zs-1EF;@0aw;QUm0z|MnOjXV`(ST1^6fJY%H7RwbSu1@fNOS}8mkQP>DG^_Bf3Un(E zor`m99PJT2@BF;xf)6~Ql-$>!x#WS?_hp`VaODu%nTO-@@ft}RqiK08--^(Wirg!P z&r!5rdKAGCk@O^reu%|xqIDkbX9!m{@$}_t-i(X<8o>Po@!P=NRQKJz`0mZDkfQ|O zS;kS?<3sdw5D&qo4xmMm{4ebaaR2Yg^};yMpCi0@^>d#a!Eb^LEfk624B=5}tvn0z z$V2;!@>wjWyj(}F<;6X{X{Q(02%u#q+GeJ$atAN|@pg}mAHVu?MT%0o`Ax17%k}f~ zErjPXr=)k${1?QLf{h;K{#{yT|}^rY23^jAUiOOJ&EgxZ1J!;jZst|9G>boU~P z@yo+KO2ExB=jFZ)kw_A+-NZfP8Z$FHVpDfPhPUOtAK!Yg8taPGXvCbZhuzbOXIo@$ zQ*8ML{Mv}`t@))n@~s83seyZq)tuMr%=JddfSSylvhEpGk>AR3U+HaecN>J4!l0K( z;Bek0bK0AJ3o^MMpS&1rjo)&)$A=I&jusA-0l6jpf(D?mUN{ybt0xK_VZD_8Ni2?hhS0 za_&!Dr75&%N?%)Xwc1Ee=~YFfLk0R-f_sN}YJ4v3 z?sZeHTf;qW?PzmX#!adX{DK8B0!wA8ySHO_orInR0ejUg6U+DR0=+lRB@61C+ zwJGfujPzR2vW{D`)ucb=>4EgDrh7z7aX*prji5|rj;YR55bkU75PbTo{3i(R6?hfR zTAMSa8iYJ5ySb(b_mS0kcl4uR^LOM^FQ_f}_XB9taN0ST(HMaxF^bPqupTDRlev7F z&HF5RF@g2P7{+lL7RES!m)DW3c%;AM`970R6S1x4aqa@X&E=e_yiehM2CFi`Ns)D7 zckVx!@$Z9uA^g$`ZfOPu+PP;;bK2I4Inb8>T0xsO{3mmwxqB`M)tb?l*6yRoNaNg& zjDT>QSOM+$zYBC~^nb_8S@rlPn!OfutHJkLw4fZlD5AVG>flDibG^Dq#5&BHiCkl( z5)CfXQ`bBw^&0EOh_!r;nDK(lf+()h3`&a(7Y=Jm`v#MP^bhO557^TySf?Gu!e7ZM z^C4b=?MjB?t!^emtA$!Y`J!bS614YV7u>|gi=lSx64r4WSuNGYk1_`vu|0@ZFPSyiJL*i`U~L>q*47$EOD8jS9MRdi)SaxsBWds7M05AVV^NQI(E&s( zpI1*BN*W@__n5^TFYK}s!|(#@{spo5$?*PtP__eH9f8w+Il{}6H zLQJ(7#Hkf0?-!H3;g z?+e~}Ay!$*Q12S_i==N-!?^-&D~!Ds!FLn#+^oFk{A4V(L>^Ywq!swB81o|rT6}?a zN%VO#{+aVo|1>=jD|#rj@#8*KxxY^SmA~?Vl}k2M5o;`nb#@}Ft+yN$oy_7{xRYqI1%x)Fo((4rT36vPrK%KY^MLB|ui9e@wY zGb+R3qY_Zq2ihNWOV3$Ij(kvIJn|xo@%157X*_-*u}ShFpL~h>U&Vav#u`m3q6FZ5 zoe!OZ(BxCdb~^x;{WrePB7@He{*!@Tya27Z5qDgQ*X>WRamM12IS(RBbut`(#|Kp$ zB$|#OnET^t9YBOAPdKbCXRd||XOjWjm-XorX2B4Aeq~uxRzyBtWgO3NW(CkyZ*WiX z+nhqu$dMVWikrjd>8$JwQ-0s59_`1*z4*sp|pPE99ZpC2#XM zUIejuN`r(H#vC4uG&_yX=>uIH%pZTQ+!Q{G!8dlBWA|}CU;GW58KEQSnzCr08r(TI z61WaBc{={f(PY5x#lQ9qEiwa1+L5`tgIOb-be*h;Tkz&m>RHW0R=-oqsdi8V@~b12 zvg%Zjn$oEkTplicODxeBr4?S3iOL~nLu0Pu3DsnU`xSJl0dP+%w2&F?(VKBu34Kq~ zTQg^@MsE$KZKdJHiuBdND%}K4x*$Vxy;3)* z5_hai+kb;@jggk+poCcPU9f#bllMbs{SKWfab|n29RaTu;BL8@t;HDESx7tu{yu=0 zbU6}EI4qvIkROYwJ+po_(z6$GGzm>dob&}Va<5*jAKURK?>f+mKUjH zMV=_MrV4jIgSJ{i+xDQvE7M!4NHdI9L@*k0@O2b4d&YdtV)x-+c+?Avqc`#~!JWyt z6-l!b4t+}hvq4es!EBg;t`hy<1F0iHxtiIq6koU08*;%1FX43s4*!bxbT2+GJHCZ( z@M|evM?>EpT>W3DaS7b2b+mFFbNDD4R5CcNNUz6ekXKOU1e|oAQ7eO8(hH8S3g5(V zWfj@|gt_^I@szrcZQH?)j*L30kJj{!7v{}{)0kq1WGm0_ZcaiNUpzc4+{XfY>yofx0 zi=29dbQawG$Iw7lABEtu07j%Ql%K?Sw!j{YgR)^D4s1hf>7W^iv^mO5+s15o3AgoP zOccgBl%vIyG9G{a4dnr|aw}(-qiwPlyuo-shgKz-M-yp(2Y0`!A;VQ>PAU>55II{J z8^RB{VuwTG$q}uCjI78mlz8%Aq7B4yl83A)by|H5iCYXR-DT&AT6n5OHER_RSjX@o zSI3L|3z-ZT$&ZO5FJ=&E3+>1kx=AIGTbY&r;q)d}jEL^*!Rd-1n@{0-v+q<-IF-&+~rey_wz2HC|^t_n2#Y1e%@`(|p0T z9F)w0&KR;t!@!NN1}@}OyraR;b|}*8HIi!=Sw7#@`ruQR)LxUdG6Y1Q0_0xaU(^2txFk4|-yG*bPHyLTU@u{xW4mvyX>DZL zn=>bSTGp-1BAI(L!!n~Xt7Kl!Jdqt`(QR$@t!i~@li&BN8u)Wiy`a|t(E-`M+r1lj zKJYl}vBM+HT->XR_j~US-ag*Hc)l|&(@ML79VaYZi8m^jbtx-vP6z8{JCU$dmi|NC zPX5tzIC>u*a9^y%0nEOMXpRW5JAMLBaW6G)9}&;Hf?dHfWLK8cZm16#y}D#?iFdUI zn#HQDQ%7q3!Ct&hgsV5O)LDjW+8cEQ_*Yr%06gO{l-;Z^X#WPB@er-o56tKq;Ms6DRaO?Fi@kM~o8r-rQw4~i%qS{J8}#pAf4t(K~lHszR~de`@B6!3`J+h=@ooAYVS zocXP_vkMWGI*w?DzRpOcwH9tXV)QaD)eaDu>`Ud6LV8*7HCkx*4b@Eds8^cLbkML# zYl??^GV4Qj! zjDrlvNGjNLbq?2uu=CRa?BRQ8#-G^T`kTy`P2?g@MqWL_(>+MdAg6phRgv!6x>!?l z2If@GDP!qitxB$0Yx@mQp)1-xSXbv1C#oQj=*RtO38{-x8l-rq?aT^ut}tBnEgk%8 z`0tU$BRfS@4SDb1$1BWum{r0i!$PySPptowfFgkd{VV$H^mw4IcXqOl&uNeoVC_mI z&pz#6<2TdarfS9@?KLiv5xG5xBmymrqx z*?ikG&2x!)hA|Elvr+ce)^y8B>o9vo_681VCykwLZsUlxpSHZpipcOxJ(`?5DJpqH%GOLDd$iWidsRro z=&5;^=b06;FQl1&BhN9${90EvQ+sP#wCivYBya_!<979u+KaH)L(;3)WJ~JSi#uR zuv*o@?wUpg-9MZ*eSz{x`)G{w*zA#DD#F?|+0_o2?BOi#h_Q#-YFmG`#92y{za5tSAgg-T*v!V6jWaH1-p+Yq z|LPiK=b`Yc zb{=v)R87Xeh``&T?NqKff47&gowhY~`YH8{s^>AU6tCCjBgTemyfepE!P?LoZ7%_i z$Wo)r2`GSgUInJ)Kf@8I~mHm*?`ZC9C`van1ue-9un`Fc|W`S1(oV9CsOt=stUYw zrs#LkMAguC6TpG@(N~evP%Ebm@{yv_i@uy>V11_F@{i6MX#}b2mP!1UGutZa;jmv z=J?_Gsy|Vu8ZVJg)zIJqsi=r^f-T9?&-&W2PU&u}LEK3%k9CI5;57x=M_cESoOSlVL$Q_BY!7x4hhZ~!;fOI<8f)3D zjx$ULjXD#{q%oS=fUJE5zWE(0f{aj)usT>pRK`5km$JL;Lv@#aRLkn?{6ckrhSXFb z_K0c|@zkFv$4nRo0)A028mds&$ji~+e$tlTcH5fI+RyUHGKRd@KK3QHX|`l*Ao(o~ zt;=k^937p_sBTsn^r=~%4+9LLeS+Ki&htz(wl&-%#<{XF+qB(0+pCSwe(y8pfuN23 zj2Ef2GEp0B+(Ne2IVDg}BJ-vm8!ww3XPjU4XG&>pxtdR@u0N)#(kZmdFf#n!^8md= zUjl!t+^rV4b5GG*g_Iqx`FaoL#WqJl{#gJT(h}B}MX7|YwO%MK7X&_(ayd`0G0b7~bBsgkAX z6~R{ijD4F1NJ zYM&1(_+?Q2Z#XvE6YN_JyZp9oNNS=Ll-SebP4(N0R2+z<6FD-&U}iP7qu4JN2p# zQptNDHNE%iqm*rAvCjpmX+E_d4pHx61gPr!i4f>VHp)zpt}~o3^>_FRxBb_i>|2XR`d_%kfZN^mMmX_M3e*xJ#om;$JDu?_r|7T_$sagL^5 zrHA8?V>r3oE2+PJ3UrbHy)F1DE%7d$#yi-PJl9FYw|yXgb1ByDK<+;k9F;^O$Qlwm z6#@Q!b9%57#QR9<@sx2kaTKu6B)0Cn`iF0W&>LY*{QG(w(^_kT45dxg%_F?LeLDFp z^6u$5%s4@7-CJQOg%l8nX(Oov^%U#*0KzXCtI-^6^obP}PtH;@N6Y zZQ}6V>?f9JBlVP~QZFE$`a?yjb$4E$fZa3>-^56~zzWzr*I1>!WR+6g)mLBP{MnIc zU%_mNWmQs+6_7-4bzyxw(UnQvC#lM1(#t!;9Y4Sx*Mqi~mhYmcMa zE=@5lH^21S>C?z}yibhR0n<^{4*C ze``y1#DdFqj+VWkfAd@o^y<`rxJ6#$K=S;1fNQ**b>Kx%eKz2EX-3}vRIs!Hs5$O% zddZqsJ%V3%h#CdTV^OuJw!-j(OpsB=wT7ecR5fNm6R>j5YdyjK*@Y~tNfgOXRH|%f zxQHKj9iD;V%-mw?Ue>cO;ph3zYGi#MMSm;=O=c_j=QiqfoYPyd=Dv<6PhqZK)wi?O zQ9-u9P1NFfXJ=}rq&nl6KSS_E(e9r%}yX2%h z;>?DiIwAl1tuu~9{(jYBJ-T^*^E&TU$#a+Kh~@-UGR^gmR?H*D!)&C2kZUZKUr$Fr zvIaNeA)-Q;y$m()6-Pa)=DorDaEqFDpvR)Gf6*U<_#CcQBC7V8qpmXmWYogiC?sTS zszz0z*6Lr*c6u?q=b!K&JcDBw>Fr$ulu6Xj%%lc|jT!>)k@Hiv@94+VYF}j*)y2%z z9}Yw|?j+{vCSDc~xVQnac47F7-hv{QiwaF;S(~(Tjzp$Ot;LG^d+u}?zCNd0^{Qx3 zZ`W6<0`#J~&fio8h$U-!71b-6QbVRcC_~TTP@BFOpG7G+coJOsFEitRpypd0TOF5> zdFfUk^6v{)&eO--!5E^wXBFO5nW>&P>>(TZ zwmRGO0d$~#)V!ERZ49CZ^-uOLR&PrS>q|#1u1;75XO%blU`Bj5wNSJ4(nOPC(!A*PY=vs?e^8*P)&9c((DL&{2#Q(32Mvkq*6teK9g1V z8D>lsVt_{>>6Wot+=50)C0qX^=rWrv6TtylWtr~mW4i3)@X0h$y+kjjwly{K?Cd$t zBij(BHBf&>RxMV~X{vFV@t9^%2I#M;=`;s>A=VYE|B2i$s$(As_n_6;aNr;+1}d?C}LPt)ChQZj6I_*@$)Ar8>2}hVo=UF2?KFTF^Ar z2Smgfh=LnVM*brzC{;#df1&!&OGfRXa}svZY5Z!-*z*~xJHSfXhGr`cj@2p9J}>Hx z@gj9&-aMxE&wQ{l=CdcV8u>Mtor+0V#2caJ4Cr%sv6@Wtg># zb%x`RVZV>XZ=UB^bp$KH+~ydc3qJF_GED=KJ)^M|z8M-&YipD#$52ikz+Mnm0r(sj zlp(GYPLM?H`5Z;SKN_R_0rkVk!r!ZZ0fq6D;ZH+eO>pXyK_VH=E?P2p+6&p!Y74iW za@|mhYgeh>k%<>L5+D8?FkB~7rKqyHh4#CYcy%&LDQf+Nq!M z=eEVyx({!0s`|UOM~l(M5YanXt)i_nKpM$1wg%Iq{L9|z$c?)7J1 zL3iy_<`W@aQ0f6t0m(x2;X=+UgOAuueS;%-K~1Nk`u zTYEa5{8Xh7k()7UESMz*Lo0)>-GcVT*r~js{!MK4BKAGkVcp0MXBXEYR;o|2elBAB zbOvi}E_G(?R2U4_$HJLLBA|B@`#GCC|B2r_FP4P^f^n687$9)*7dYZhwY;No0%_dM|r9Q;5^Nf|K7u!8i}9105YKxbt>y)i4SEiM}tH)6aLGM1i8yvW}Es5 z%=>Swf^)O4(T3|;h+90y$mK_3ykKP)M1*-YR(&<->rn1+hHKw|#w+m5EkcIO$D2GG z|KCu!YCC7H;h(|8at`OeLwMh(5Y3+#FKltBUmR`zgqoV;u%V9VajXM8u)piN{oHSq zIclsnRU64lGzU&-K~_Pc=0(-YMO5QChWBeNtsJL%spDwlDn?W?8G=Eo`wR5F?PLI$ z(8KfS`&0HY6nL;MUhkdk9=|1?{X4e7Q}p#~M)@{g*ojcR6`t$1j7J%K;u7tXAB(Xl zI}eqRqQlrd+0K5?73iM8YN#-eIA%g&6@*PtAZ@IjKV#c&XEaB!AJz#Uzr<=tO?!y} zsme9RumiOO4j+mazdft%ZaiwTcbtP9_<#>Rftmln?Z4l_n4M$tFEO^gJc9ugWIcs&Q(DlM|AtVcj7!$i&LvmEA)o?0<7ojR9&0Se2-&ocL6%(#)94iw+?1@R$`u|q8o0z>$!Qe z-uA*f>`8V)cOsIP^H|2Nh9z_)MT%q7&50h z84%;K;Qzql{0>i-h3k9r7|$%)jMUsfBUX*R2WC+g z`%pGymh4FsXO&uw>ojI>WGGzHhtX=q*)@qikoZOiGwKz)D(~?gyn~+}F(#jom;vrx zue?~twXtlbE7Oplqlg|~3I5$dwWMaz{0!%`FY0%=tSRf?tIW9t?0~Pr>t?|kx=$9z z4Dx%v@#syR&vf*0GcfQRRGWK^)Of)9@gh~)Ix~w_LZ=#Jb`)ke`~>ZH5$zESug-w$ zTH|||i*{;4EOSqE{R{O9Ys+QK>=oGme$>~@4Fc3d@aP^;gYGf^G*MOc6EWOL?Djr_ zi+*E=VH4bY5}h{}?KhIWpx@AAYuN=_h5SFqevkv4+N#Kl?mPyuVjYPs-%zcLWVU0~ zeP&j1Z=H4?rLu?C`Y^V-u2Cumk8nzGUf#d)p<%_9OS+F2$zB_1w zo!oI7+Ic!Nt}nY)f^$2B9lAkijIn$k3!N9z!iDU={?6+{j$ebOTt;-r6lT~c_+SOD0u~o-@kd@T1irUSK4Wa~Hg2LUQ~{zs?am z+b!RJHsYj8t0c?2<>T^&?3sWw-D(NfUkk6C|S zBl3SQvug!MFW|Y5*QM}`WCT4UYb1flB?B1%Mbt8kXC*XvX|7a`XF;L|?C6|CX2B!4 zf=);msajQL5s# zfP?yibl0C*HJ&}f>3Bv4sKY>e>kHmuJB}zzA4+0JL?NRUX5lyFej*b5Eu4B8X?_8l z@qc`}{(s-%(FJd5<74W6+=f?Pp(&^d!0Sua*U3=m4w(aI;qtxEL899_AjuoE6I=l; zQ588-mPbvVb=jYis6>f58p=cRjaJf+gV;LMCPIQ*$vrYa85`HlaPPN1V7w1-%qsN6 za%j7P890r{czA0%cEMoiG>_kwF{+E$9o|W+uhO2IoEOi|t#J77$mfaNvnvv?9_z*8 zX#9Na-%9+k#FI;;b_v>A60Ot_+pHsxzU=e%q%T70$;_k0yia82_2Abg^rR|wLvCUb z?CzcXM7J!wf{Ygb(h0XmWgC{yGM;~+-*@oK8LXUZ>_=ZigWX4FzCm7PVtuK^n;4PD zzHpwzQA&1Z_qTFcJ#Fj_H`yY@yi*5OeO`^caD;2fl_*ff{fvDk@~F$Mq4XmnO9 zas!+3+KAoc>S(=2cyw#At6alX4?WQgtD}codvwQ3HyX`2iTAB)R+!oL&H zK~vbDpTpdnL^Rq&u00-WWghZ&B>9v>c#h!LH5|7Cy|xAWW;f3*NaVR(bt(VMr^17r zH-pd1c%OvT*@E`<;reyZfi-AdYj;a}k_GcK#~s8rS^~XxVHL-t?Jg5Jc8NZI!VY-F z_vh>x+$YB2e^Br&-~WWB=Q&I6y91l%D0ae8J{^SOr_pKu5S?%q`|u{8o^geTQ2!Ph z>n*f@&UOCgpLdMFBd)WPtL$fNZea)PM)w}%S~tp9t!0Ch;NJjEfUJ`W312jh&=a^lB>K1Q+)eXWpSDcd%%l@jOR+{>Sz1 z@_5WI_qm3}t$!WvJ^w)XK{75Rnp@&^tH61+-7yr6kWlS-G$FF08=TsOcbRpqv9bEV zyJW@9nUxTJh<>|0Tb)8~@eijGl0F8!WQUeD2NXPQ3oY|37iX)?A}LSF8;m zw&2`uX#TbwE!ixc-L_nFjvfeSN-T;*Rk!7n*l+ELbZEvo{ovj9oY9tf*Oj*Pq_>Ir|jfj&sg2-j8yP;~aC0 z_dj_*$aUlze{kkzp1WzqAGBu!I!EGeR?$+aCb|P@w4QHk_`ICwQsm1rzHLVSh~8b! z8O!O-3VO4i*UfHwVT=0;qMer_^CXUCBfsx=+XhFu_et7z9(jJ2*VFF4|4F<4qTQF> z5)DjIr0*4E^bL+W&wrAYd%}IpUz~fAwjShqTWRrDuD_L5NNa_%b9i0lZn5-0+OPo1 zFU4Y*&NqolUO?L?^L+|Z?pM6N!?1PwV_)=U+y1sOSErB}YoMeiPn>yE?)@gW%u+ z$oIa;_5pkwf`#)da(FCU^DB~HV!_ApZ6Ym~*tQ8AJD7JW`q7qu)j&g8Z4?zT$LR|| zX4&BSf%j(vXv-JXmqcLAqK}X8Mx3R>%S_#qS(uj!e&^K~EtNJ*p!)4h_IB{$Fa|Ga zPc}328?(AUd}YGU$fNC1kHN9M+%o4qtNU(5-8@ALPoR?h5VFo)`YvQv929(v9Ib+s z^FX>ire0Wcv~e2RwkbXIhrf<0rO396Ro}8YilD`hkq|A>f^X1Yov_-vBb~#L!^@Bp z`;a2F&=axj6yzo*(!mN-vP)!Ln?mfZ1)IGn8dswGbEDr%Gfq{wPdn&sV*Psr*|rn8 zEbDF7PROWab`_%WHYo0hx>Cp($z8dRjC;;?9}|n?$2v^n;|uXmF)Z}U{2ETYwGXy` z27YTR+U_DU@&fY9j(poq8~#ON9_H%enTsGAUlNpB$e4ZC*<$e0! zic!8oq?{efTZnh`Ar|l}n*Rb(ZTa~%9~>gpDk@r8A{V*%dnsE38;%4&V zBkSXuqi=;rR=MsYabw`DWoX-HaQHdmXO}V4N^z}YjQTzOBuBT`pR@n)3H%AOT8o)Q zZk6j-U4u_g6SMyqjj{tSbU?%VP{$$}&PW|_)#>+5TKI-tfEcnxV~|}L&~_!h5z)bC zsLxZ1_}@YBPdq$Pkv*RQ$b%;!3-7}}y3=)-+KNW}IPHn7oUb_bTF_|@)QVChListP zSk$$W*o9~+;51@|Ul571ntiyrZf$iI4jc@RkXNc6!XEq1wRb?nI%Ji$RhT!&`#=iTsx>h^OLKvdQGWM z-JB1~Wa_E5Q6K4PXs4#k^s20AZehVSAV;`}Yq+XIq47w#|LHf-G-b$!E`#Q-Nc3(C z^$q-XOZCBn&`vee^De9^=IaZUwrT;mVv)KUf5b!NaxXHLA1hO#Kq9rds*quUZ-sls zLyKwD&dG5#r4>iG?_X%9KiI3=Ku<1G@8&Txtf;zz*}b3nwhhi3K;2o%k^YGhc}DfV z-s&{!hX$*g^-y*ox*(-f)vDyIXAu=uR55E3)9a^s!quB@w^q*B&hva$bBSHP7CY8?KGuFQ|s61B~)_er$| zdUq5ap8pZ=Cp|r(E`^T=t6iA)bI=nBuFljBImuWKQ;Sdqw!hi|z3?9CoJv_)N{&er86KhaRc2BV>H`vTH<+Nl z%-aC11sUHHsZg^XWUJ}Ogu+~N8CB`lBc)bSBj^$pz<1&AuA!DvJXtf}XG9oh?DA5Y z4Lf?B)({=GN&CQhU@t4uD6EY#>MPE9pth!_Oh0uTRfFC#M*_hQSnuki{)KMutTtgJ z9?6Jd?d@RA+X_xvjD(KF2D(R8)MHo{=dman(d%ra^a|Q=knHAx@N{QdSV)T|JG?NM z#En={Z$caF$3|Sp6~fg=?2&Dt?_-#G=hdIEFfZV@`V*^j6}4IO!U>h}wzxoF8-%v3 zqP}&_Le^AO8mcF-mwnVL)FCU39$bkgw4)yuvChkaD=MMG3b7|r0BP`md;Sg2Wl;l=y&7IW zqE*i`AKRmclU=*@R#fUe7VsFQjK{j>#?hK0&?{V`WQ z#2#rGDnWiHs{IKP8~Ye}Q-#RkT(SgXaaHNT> z)SvSEQ?&~6eXe?dF5gsT_@N5_Fd7N?$bUOSmFG zz3?EvWjh%rpSb!=yp&(5_GM&EGlX^TI_i^6#QU~{DwU(v!}=q9LK&c9Z9(h2z$#Yp zx@2M#NY)veEyDscXR-}SD_P4$C*!F8RQaC7wg+D*LEx2S$vSx?cL zvd{jHy4_V-@2$`VFj{BJ?2Sf|+_|=Ul7wd8&_6Utg= zN1x3+>_Ro^DasDLJJv;YwAFm5NR1#UItx2H7W@4MlG8?=-|EaTquPgAHPz)n2CbwT zVKTj(&OEf}?U3GgnL8WRW!St6)C$-QU%A&2*G#Pl-jsS;RX8z8tp@d;;ollfyGyDb z;5=?ocVi{OWy)>+xVn`pifMSh!u4zT;J%`R>rqF01@-)bb#KMjP?=p-9jSc{kI-=} zi}G+&U*^djbgmO0#ZdKyUY!~bm-We1eY~kJ#@9L(`}7(0t9!Zrrc&Prtbs53aJ*2N zdKL6a0W7LVN(1bqztm8yhMvq8a#C1dGK-)M%YT2H|JTQz33hJF2X7X0jjukK-5-Q($1J zWJ0+{sz;!79`ykdZa7k9F`hgRbYLXa755M&`cU`5KS^AkvVr>4oz)x4eci^cx(DqF z(jMw{KzNCOzhcyF#Isx@_fS@el4V|7jlx=SQR(pv855mZ%}2`EF|U4L<&~o6>zGMF zpr3fcr`wogKafe6T)ojglc+K6O`W30>UsSQzCNikaZJ62Wweic(lB)4Iiy{n8VGeI zf21YcosK?fi$ov8{%U(P;23f;yQrs?J5uf050+6+qGRH@ zUr{Af{YkP0wW(O%naF_coVyPX(G#T3LDyU*NbSsgE~dt?3hRJWGqR3;hGjYoZT$;U zq6`w^PpX%mp{i;(_K!toPGlvS;##jZ(T^g}c2XUD12%mgYN9@;a^E)MK*y*_?9dER z4`O+5Pz}hq5H$`N;th40BdIn(pE*dXE38b~aCAX_`yW!CTn%JPIIDxjxo$ zL$LiGF@`^*$JvQeD-&^bQJsrjTT%VOSbxB)@P(tNqo3X}`~4WZ(X3q(vDXf`YHHV5 zX?#{~tR8BqOUMN)sAiGZpvTnzi~mbr z#Q3>y>!sMS`Aluvlb|3!M-vpnW?z6+&>yL=2+9Yj+reR4tyII(I)R;W9j_>v6S7)_ zMq98?sxiJ*(P(9`smCd1YCFE6=Ah_Yg_!waN+meGE}E-@vHgw2@ver%dAJyn&1xvt&ocO+J1F^? z;2(X!U-X92p3N(saq6a5Q*(5|?J9?7xw*O<`q!m*LD)bE@KY_Kc0Z!4uBeIlD7&jI zvE97XvG`~FR283s`M^&4>*zp7i1@hh-u-=QVrh#fnqc1LzLQ7dv)5a!VLk6E=ehpRlG zOGoNVKA?`MRJSZe9NY&aO#~6}@6ogmkmSSBpKF=z9nq}|vHw?MHJqV^X1MS+|CUCt zd2=2y-s(N1eFpSu!Fu33cA@U7t}et1@luDtJ$13}Z?Q(1f^7xb7weo&v|=B+p*C{5 zvntu0P1K2SgSYCB1thB-AN-8-_`DZil@IOcgcrXKekH-)ILVBSW?nC1zN+MHoZ3IDlIhA84V?pgl z?wwP2VsX^Q>YW6Q^GgH^=SDIEpJ3aKKzD6bpD699Z0U`xIDmb42`RURdu~HZJjc?@ zL;~fdjZNUQ(TrIY{8e9=^-d^dBmS;4a{V>jdl+p+R2zLdk8C-EmLbCc+88jDinr}Fbm+mE zi{W6|EgniMH)AJnghKV{v*7Hk;(DSb@3P`Zcb!1*WHJ95!7JUc2!4=l^edVO35qOk zhJT_VSIWcPt;mU)WC7&>7xy+y9L(zeb!)&Spn6?gMNyYKq+jWV2(Y`+QgS@ zR|ZusgEewxcy>E&PlZ1sI6@F(im+mc<|@N@$}XDZ{U(#aJBra5i^dqvwc7D(OYS;> z9tF{=qv+zg$hj5JFb_S?he!M?>u3jlf+twm*P%8UfZSugOHjr>u|{4^>m%@otb%%Z z_}m`uRCyPCl_)H?LtK9mR(2wLBxSkFSMU^~XZ9DiI?e?(6lheR`DMd* z*q$T4pqqtjlBaIQI{$>^Pr%P7K0iNJYT^gIg-0ZkRl!OSA_5pSe<+-a*YO=*t%vyZ5~0Nl zG;kGW+hY86Rnh9JS;1UpFRu_IkiZUBGq`ar-d@2N`h*SHjg~y-yxNSwb!b(UyA)vb z^V1JAefNZ(|Kn_LW*wT38IXYPZv|(^GHRvaLl^WK0{8gxUomvA2_F08_RBV-cb}Ng zb>QnvMpMTVlLt8y%Qy(qQV3&|%KsMVk-&P^lesClNnVWD2l{cIJ;5+|VI4lSIIO1& zSX?pC?H#;g!zUNOy=4dV9sa)uoMC{QPD0Im-2X0P8^D-+=hzbN`@8660ApGRIZzz# z@5s3HVCMpzc>0@a=2gA3Oc#VO6QOp~so6B5qX;Jil z0&}_{QrgU%D#qw#y(6TZ|3Na%p(aUiDkEIuZybG34Ddu%=q<~=A$bO6Xk^4ZE za?oocV_$@M+?lflf2BH)P8=nBEd5z!%I;1LC>9B&YH?0gtleLj^%CzRXl@1QhY#Ep z!YB*AlOSISW}OZ0{Q{i?&5ULtD-5(Yfp7AQH@$Vi`?0h!4k;83KME#NFl`Uzk(XIj znGxvBxRs%0aokBz*=o?j7W8ERkN)`Ol)yoBt4cHr5P{tDN(f!phGrF?KyLFg?=0VU{V6V9v8*|D@OH*E~2pMt{W zM-M{ieH^0`N-v^ma~Nj|N>nVr2!5B~fC=K8px6o8QA227mEIIUhX{&DZSE=2Q$5iq zvYX!tpl=2JNa7}@VLPp0 z21rEFCTxnCoIM$BI1{U80aqKtr#Wbak(|>94I#Knf>1P!Zylj^M`+s`N(kOx8~CUt zlx#*DYcPjOK${BCsvdU~so#;-3c`@!9X00Bz&$>Ku2%_a$xfc^6Sw19&3P4sB*7FE z^sWD}w>m=omK^aDW7LCIjG$+I>GwcdBQZ?ub0KvH)51RdHwCRak{(Rtlf-ohUBqgc zLeJ*8=^^`m^UzK!&}(wUBA)ZG(Ux%RJdR$@*|Yg3)Dg-Ea+Dw^$&TkdH2z}#Tgh`d zR=e!e(`vC7p@Qp~kitO$FiGS@V zcJLX_SdZ^}C-%W1-na64fNv+CFdnX%!D9}OSx{$zyN@fNz%tssf$OcKzk-u0J(8XD zL;SLjwn+=u^ZWnbd0x-)J7|@(VGC!=e)!AbA^6zVKZ6*G=ei{bCz(Y zV0sCct)WkA8HIUJZ6@tr!ut$9NkraUeqYX0#$XDwb|O8MD7@+JD+t1~@ckHCA&6lT z(>H*2NY8pO``R;)WG7uv?Ifn66_Qxc%VgiZCNic5GDh}nYVunRW~S_?QkWPZyLEqHX{JUOZhuM#;rm}4cDL!v%9y5&SujusTL zE^fIY@}nDV?ZojCSt1y25*H$|9)dzAn01NDZJTLM3TyJ6b5TsSjmZ(U1So^90jV zDAy3m2-=$<{8d6G*K&Uoth;i^xkBj6qP)lPsTi6q4*MVvvOm-<{{=@i3T+sT4v@I4 z+~`HILHi&mrSDT|7(+iQif?Ov&P#%daQ5J$^ zC}>ZD*;m&6yCD3@cfl|%h9;6e3udWcrHcM5j0O|z$^T-Ea-rb_KUQ$r1gkWZPoY>| zf;}0G=9AwgUM`SdMbin=pIoH~R}qw~!n_uxXNCBsAm7Tm`&^#8mZj%{4OZLT``YwH zVnZY*MC^-ZaIVA&2_ox%BPnQe@~?2ZoF#fmFd9p6#bR!H3a+c5n+om4nh=anLHP8+ zHt}~`TTW~$75gy*t4;8_{$*GHn>&*1J$C13{BuvS@9wcqd5R5spT`4i&jj$PqBJToYJxs1=(vKIDE5fhN@8;fHd;ZhBB{m-qv3K&*lj5{n zkTeBnQtT_i(kw;)O3_21jr=BPqCyp+jy(V4o>KHP+WiomKtY9-J9%SMN}Qx1ippB# zzj^=?bMb*zzGN%~#b3~fuF;zttjw;_8+m;IRU~%rA@4H2524LJQ06}WKjPavRv~ZQ z{O|?-_{{S?6qUHXWZsjZuf*~F%d3UamxvpOn+me>abo?8)h;M>p4j+e1M!(kW*s$Nw(Rrc}LKt4tYrzjw5F#iF_31gf=XCg+?(~1O zntVm+stl5g$y|CT+44H|tJc*s#WPXwp-exB2*deuQt}%eainqLtz|v5l{1d)$*wl)cl;W9~KUTdS?n))e!a@yaM&~x zDz=vq-jRRgy#&P_r0a4irLwwF{iL|EEI*SLD`WLvef|A?y(P5Yq=NW6Z^x#(XW4FE zjX!de?Z);$)<^Sncu~k3S{2L{K4LC$x1kesh7>I|mgXqybmGeqaU^nPVK_a!)F9?#tDifa|4(?(J#-Px z6Q_9!yT<3DvUoK5fGVOkybfFHR(H3ue&SCNEq3!As2<5jBWMNk3M~?=*korb{O&h# z&#mWfVzpUwcdOf!zvm12BH<@br9YJH>Po$f@4IiH|G7U?#Bg68Pn`aTUQ4?m=aPbC z9vbUXySW`@UkJwq7X&5->jg5>&!jKP_?A&3<7URhU??;$d^r5xsAC>BDul;|mW6(V zHizq*b?r0u9%nC0;I;T@Hjn**_DH|0ceJ;fs}z>I(jsIgK9ABxG5*HA?cA`d+g+@Q z_8j*WtH}-ahV5Ye`2>-U*GiA%g-TiVnOaP#DOIKuX=C~dcfhUi3z3;;cZ1G6cOM(d z^NAs-I#$V6T0!|!)ip=Wq7BrBs3(WUQJVvOv0vSURNh7iv ztryS4JJDP8<(7NR8SG4Qn%ZxSBcUb1=-{Nl&WtA+>48tdQNaSi_Ms>v(fZ4sB-Y^< zBnPQLI!l|BM%w>0*^|v9>m$`}N+so~G?rXJZ^U|33R|KWOL1=6FRie-#k7p##;tH; z<6mQ|dB7TE&u~h+m)LRcM`LhrnkMa0gt}5A8dr|U<>k%FbEUA7D4n6L=pJ$$7bcg; zK3KESaJ55_K~$*w3#G;4d77wueuwq^L zAkiKlpo^s@a${w?oLx#HkMIF9M4BY~<-t@XvvEy$w+|jdZ zMX*+QZg^PeRiI)Jhq>|G{BHhXU9`tJ0jI3Hob~3Nc^i==CX1(hB0tOuyKCGL;vu|u zdFef!L0^)JWEI|ns5k^?^#J?RePU%X37qJ3vy0useayPDZSFi)R!l|zp-;#m2c=a~ zvZN}F)Me^K^`IJ|4c1O*6|@f88EvF?PeUHl)7?AEd&%3@x5>L(Kc$`3Zm1p9Eb1$1 zKb*|~8h{FjRQHFy$S&oya=JLW)641LzH$+7C`O?y_$?}lN8vj3tu#z}3nwm)E~2L> zrrAg)a)>k~a}kD0vC}>5Zg$h%>+FUgqz5j7{u7JPSX`ET#E(!e*vHZ6GvCakQ64g$ z3??neV>}PP5yiQ}HPKm25pTG~2SHWK5SH+W5+aYtCZ6!^EECJ@4t8$a8*JS!W-qd8 znW`1%)Z~xQaeR+vD24Sho|)P^WusC^TjKHg#`$jh6#q&8ec!L%ww}#eE48xHRyL)x zaz1%IeNHCQT5<`crTl_kgu7V}yL$rZhd+oZ{2#W%?cx09M6yJlO8J(?p^1jbJ5OZZ?iWzN8NF51Z&K1@%`dB z+JtM9Qy8Itd3W&#-bkBE3+Y3=8tp@W;OnRiDvYX&WxPGl3HRMieB#^1Fytlq=|hr@ zo{Clq`mb{=H!C_j?V%Q=+~^50TU`L6VZ z97Hd99igC9e$9PpSGGUdhEs_}@QsYSKis^mx9hb#m@CXk>wq=Oj&V-dr>&=Ug8PW~ zLKpBN(wI!gEzwo91RufGNqzc-rqh|U8y!y)a4Rx|tfOUVRT2f~U>Z8ctGNB_w)R5% zo7L4SWnDI>8r4Icf^|YYjA3>p>m&XXtwb^LP*lM)=|g#~cGVNmx8;7;fK6iWcwPR= z%>%FAZYC$zer)x3Watnsnu@E^fK*1B30K))ss>e2(8?r14Ch-|d3N8qVXZQASsuHo zJTpHRTEqBF^Q?`^-`V>o@n21{%PJKdS0cE z^qlO%8R7+B!fP|?9*63h%en59bc?gnA}_v+6G?#VlJ3gIm8|jy`a8Xf+2RGfdqFauHj%O^m(>~C zSiOg5pI7yL@s{<@($8s))R9U(=`qQH9X8|$X(@`a(gAeT1TCu zRFR8H=g5CJGpfy-v1`r-`-Szw&I)}muYJc^2KV<3kk(%e-cRH}SqPH0D3RJXb&|G3 zf8!bB`2+CV41J2ePAjRbm24@G(n^^l@0RN;dz8oOHoc1Hl-?Isc&uK)Q`ei}z2TeU ztL@#Y%laX0tg?o#MSFN5=5^P)AKa5}d-sfe)t>9#Vf(qtTl{}j6}LrAF<9&tO@+%+ z;m#U}5xA@5Q8KBIwdvmB{#m|~`UaWP8Q3c}v;FQmXOCUqUS}hF1z?n|b|JSf%PsDs z7vyibrut3m6%7=bL>4~YO|?f@*`TA2 zbC2*<++pSUP?VEAp_`R2TDpEk%dB2iujo0vKJOdPZBHi83T=Tl%d^jS$T!=QSNo!@ zkh@A*X-oVY`dc)DC-4_9z@vF#zJq7*mr(06a0hY^_a~BkUAdv=((Y*A^rqf>p30sC zJ*@TB(v_Y{A!Up5ukuVPLigc{$Q2KuAKn*|IKq;hV@?w{>=LL^$?iyY5U^1bHU(;O z7W=6E#%f_U4abK*2aaScOHawT8|r8EWoLLZksaTW+NcR~C;Ae{ky!E_T@cCqf2=o~ z#U_c7aB|*>Omv)5T0N`S(jBRc9I1TL`g;rbhx@PlX84wP@9B-T9O@Otm1@#A_yj(U z+u?i2g9@REaKbXAUdl-IlKL9(Mi!|LokR0U+2y%PPxYpplfGaj?WSfYv%C4)Ty5>L z7Tc5HE*iO$+&1hKZ-$y-6W1WWklf@uobNAW8YxKD;-?5&Gf#5+vrq0ScaT%izGAPo z4>?<0#U1KOJUcE(%Shwppxg&)mrfNbNGmCyTvu8KsL4c+@E**?OQ?YJ*;2NU$D!7u zG;8U!wSPJRcQD_|t~j%;TgClTPAmJJHOPv!j+m3JZT1A`u#;-1+WVYt>^#fM>bsZS zIo#k9&*lE&2)mfm!~O0qV_A7kk%+z0D>f`(!2+B(aC`Ab2!7D ztnOS^i#OyJ>+Xh~ugpcwX?}UJ@>98|RF!K<-=w`tg8EgdFSn6S(fss(=!mF^Ew3CrQ#Z z<(slWt_&EcyfmH;zz>8b>aY_|$o4yNc4_muQNe0sFShgBx!i0bA8G^`-5_V^E9suH z70_54`7&KgYU4k}SnlC#U~dj#n_T2PFn_lkXE5(9%8OJ!70%L7!EqGquhQ~WnaO9B zB5G;1jd~Gw?mHT!2Hiz^qja9ewy?hZ50QlWU=8mVW%)Z-b+bCd?Vt8r`(L}7TU^w_ z*>O=ZlW#(=2$ts2A7m0S&@8AuyMfG}AjN2B$*UYvC#(B_105pUNj<3fiBebD19-B! zp5!^=jqo-1w9$H~@1X`J%8lfIrOHx0@>Y!JL)k?);B;`lTlG!9@ppKAxMDcLsOFpz zCFnfqld{y))HlI%MLn(*Q>)00a3?WKOc$~2Pv@%><1Dm#*>P+Nbh9*`50xi_s7rrJ zZiUMunA;v4P=h^Z&dB4N3pG@mBXaneh=8XabX+_LUk z=ZGC`m$56loA^ES0)M4jRljGHwjHqQ6nTbZ;wtzj4&oc41RujkiH9u7{%Q5MTRJ7=96~CNCmU#ad5}_68?EQ_+}9QTH?6H&Rp~2@rm<3po+nSx5@0-+oD%L= zR!=NL3ACoXR?eWG$XxoTlqx?~I%>o99NJa67CnkPqFLN@hq)<$6k9q$E7qRtd~pgn zv+aUzKfV!CUd`tR!!Q?w~BFhzN@ExHuAQGn*(&XJ$2~|c={{iQ7V;D7U@v}--s4vE8%Y*z+%$7D)brcs ze(Q~W-Wlt5cegt$;dRNm;8y4DMP76okED!J=_%+a3DO$cgqD*2l7E+8Lkegqny+q5g%yuMf`wu>2}q^N-E;bc6HEP$)rLOisx zR9%`W)stO$rCgWhByGt7+zE7^38EpKJcC`~kHt*nqR}8v{SZ;&6I<)fbrw6%-BmmW z6$1R3kNk~e@GZQA)Ff%}*+XH^?R82)E$-(wVqX4%C9v7t5FOBP?seNb)mRr{pxmUt z^cK{h+w?l@*6Gq6I*E)0E+-)&Vo)wr9sSLVyVTlbuD5Tx1^Hlpn!n>7!P!r@9bbq> zkZU9nmnOOBFH(JZzx+^cCl!VjEhkho49}pQWS_EMel886u&PKV=g# zYEN#{O;EXHs193{?Mi-y%6(}(>5N~ae^3Sd8*s#JsH#xK5bn4m;8XjE{&*cZ514!~ zVZ@c*D>JoHTC|dvz86VsCmYIsVPUr}Gu$-it##fET0@;_&QrU(v)5h8rh;BEn+;?0 zSwX&>*XIKmVoTiK(5-Sqg)CxyFt3_>tRBt^mdZWoHsHfY(qyHZ+EeYVoRgwy5PwGj zzzjj2RV?5W*l@SBbKB}^6}5&~^Q~d$RS1wb~x<)Nnf5(atIN23y1Ph!bc6-h}(& zTBHUoEZ>rs%U|SXN;|cK@=j_3r+hf>z@OQb%&UgiD&SmZXHW|olrrQ^N`{&p(Enl3 zP`YU)^=tYD{f2f^xh3D1HW4nG@Mdl{r-0qb8e!G2np%17>rNm37JbGaP(yJGcy(R3 z2Ft_pxo7Q))=YCUT=iGG8ap8dk!avFh3Gq|2xDXpc&Br0#C z=g3Oc0% zcr+RP#AT(%vR|1DinuNRE4Py$fj*EKCGu|!GYr*ffHTNRfew@l_D*|LmsFKNr;rlp z58M>Z6r15*XTcer0XylCbK3S;DdCx+k)ea(p~jZ*%J)i&(_^mq?DXRCnK$Bt)P1g5d_ zcr`=4qUF{bcn5lC>OJ8E+)|U|R4J2mU8*VNAZfg{BSKfx0!dAhZYB>+dz^MF-AQi| zDrq^^ZR?gf+iYNr4W$MPnnidL@R(Yp8Yn8)agt!H1zX@AbAQ1@6zbXOY3?2Dt>f9L z3?w$M!CJcKcx!T4+3z{(-xYBnVt>SB{~{gB+sP*qqV1*oP^&vCkJO6lMKYUjaq?S> zj5$WSvll(0X-X4M3xD0nViEhi9ksC19N4U_Vs9s38{b@SKdq}ANoKPR*0GSA-YeZm zj}2}z|K%}qR(+0MNYAe>p|yBNC&4Za$Q;{+*jMxruS1hWV_w7UZFUL2F>kx=(MNO? zJcNSuFI1R4v6q{D&0c1TdB<+VuDZXu+hDK$&#C5I0p!z>{l!&u5_LgW@HDBllB}dk zIcWm@D&3K(KEzYfbJTmv`_7y-Bn#xtALedR#4p7Bk{Jk|k{4zK%^jo-q zG2DD(E_714O}43;Z(c-4hhLpl}Ix37TA z6=|;GD&M4ss2qRje6-TS^+Uz7( zT$4XIZSX+5BV_F=mG)bD~>Ej3zy$Cx8_O;s$nw7CQG}l_fr=tggqkHmk{h7BxWW(rDaYZt(&eS00 z4{tYlBCf~&v!}X#)C1dS5)P7qQeFJe9qH5rrEG`2)Ey_Lf*Pmq0o(Q zDzlZYT6W)ts0)!bB3k)NdSiW=BFe|?&ipOQ*4XEf+kG$eEXr8gm;NE;r_IP2KuBrM z@-Pn+2!0Qxhwg^ASkGB+^c)uj9cHAM47KSO-V-xAK>j98miEiBv<+E|z2FU816Ay+ zoLfFD^`zTTLpQsXY>qPOg&W$%LF0}Q7eyPAN6DiZbR(Vtm?V!#aNF?Rco69(pH;8= z+D4X)yzI~7X`#L&7sUo=0r1-=Rz0gKi=_F~Y3g#Ri+BTUf0eb{e&OCl#eh$KQ%=yk zVxM!~I2&GImbOp1Np5Slh8?v}S*jHwGwCI4OL<+UVv3 z7cdFk#LI|RuA)BjG>AMDy*#Q)L=*1>Pn>s|mRC>L|MuSUUh~$}Zz##~GhiH*(InIr zaN%%r9y-Z%bdf8pi*wMPVcrbZ4&)D=G#B#A^ty6R{ZG11_KGZG7HIQ9bcwW=pK0ag zrzD?LlHMmZ=@qH6mad*uqv%d1?0jZo=vLrFXhrxxql5X`Y-p{syv`6S(QM#eWFJ6> zNEbI?U(`p%L?OITDWlKS+xWcx81*jh>|U}CoA*LfLW{zyLIXlIL+!%vpa)lVRPdJ$ ziqYsiiX=Y(!)}#FX#3T-z(aQPg67EZTeBWNPx9cLEU)3o*pWInjivVtBp81=2iQXv z!8SV;Srn_uvWpKmQ7R`z;-~C`Xo3!)VPrOKs(k>ZcCN~ zY*&NYH_s@~Q@w|_UaPNU2CkI>O5_SRCx0j=x{quO4)Y!Rpb@a1Imhgm<}oW|2U(i< z$m<}7j*%9SqBNgofLHrf=_&^k`mC3@igVF!>R~j|Y!+M|I2q~_Tn#S8QcH42Sj_6e zrl9Y5kaS2hJ*sboK07NDpvDu|_-q zEPfSMt02vwr=^cH8+Cw%_Jsa;iA`~0+*ZIpZ<2yajMiU?ky_Jp;A$thX;y=9(Tw7O zg@FwLHTcOKZdJ01*tKELv=8JB>(+nHVY*8Bu5PB~@mlg6dR0ATnfg>YF8lFW_mlD3 zoMm5g6F`53e&t*ZoepLT9S!YLBnRdG%3UcRX-%TZG~8a= zCg;>Ad;an!`QCfmd!Bi#N7RVCAN4xYjOgTBqeYOzJQk4KYw(Z)b|>iGkF9(5SAK>^ zi)*|Oe`r@Vxi!wsDRzm>;xK>a7IyR7m&_ODVS6MfN?)xyPHwlTI|Mp-e%_c*7E?tE zaO1JyfxRa2L_sCs|E2Iql1Xl-HkP~N40hES08HmNVYG+TnA~PtoqlGtIXn1E_+R6( z)yeK=bzleVqHcYsIr}MA;T+_q_*=|n1>6+#mSKcL!O4Mo>A$AW3060@TKm~diuHSn z4$k&*QWiA^=QNUbRNJX9Az_emYgzoV!NK zZmpAELLDM)Brni0vRg`02YQxzeOg`RqZA?6QyxiI=`(4l@{}|I9??nEz@^|>oJDiQ z5?;VP>%_W^z?<(3Yn2cl9^MKGi-qAGW_=s+PQY(Ffs2}At%Po$5769J>7dk`R1y?e z#!2=H?E}}W0GzL`;-I^lMY)aLk8T;zDl?p)?nZ`%4Nh$x4lEU($S+X6-|*Yw0V~5! zv;HjEt>IJ$72=c?7oHqk7Z@F!WAro6mOb{Ayo>d+`rn=yeU0)} z$`5LIJLQR3Z-28>-3#E}Ok(Fy3?TpK;9>krYH43Q=d~kR6S+EB;_h-1Kt&twjJNxT zZS$a=Vsj(LZoyu&tiW-`y4j`kNtH~FsqI^NVLvzySQ~|F}mrqC+=_izrEwqP$JJp`Y zg|t%{Biqsf`K$C8$3T}n&-=6NkYHGEA9SABS)5mT*)93mON2Q7HCTurJ*SAn8foGzvo9>-VObB!_~7TRZ~+c%uM?7nEp_wzn1-reEe zW~0GXILMB$H4b2G<9o;mFS33!i-j6ztWRGR_+KCpx*C3LUbI%(6PXZ4a572|&3H%l z4qpbpeFagJhPtkq%1J2>7vs6n7F>e#<$2sBXP$GDtw)>aOQi+W(J@k6P>>zknykao z(lAiywm^+-t3Ok9wU6f4u#%fj6^otEW@$5v5eWSqN;P6YHQC4x0GpoxnoS&A!6L;9 z_CIH$^Ohe&`Ni+-z4O{`ZI%nI2`tXIkWnO~X|R~Rk^e=O%UI3@-0rm6S?i-bl%`AZ z@)7wiJw)!26cmlep$o`Q?$RAn9&k+Zhy|h^en(2mNm^H5Hs2t%sr;G#jf+CpTm&Yv+hu$Pfcs{&WR&kPhMOKl{2{+*xfj$kclmXgjqT_SY zpuV>?W>}Z)es)*zczQXN8PsC%Itrp0EC&2Gn+3q%ImZ&&H2w+t^f=flIbnyiKm#Dh zF~u2Yf3Xs*sm>MfKN_+HkT;=hg1gN<%4?F=xGWxl6M*ehrB`VY<)zkFC!PiRuUb*{ zfATUKMLNPW!%*$=qftFmkauX^MYqXyTkj!2g6-V&Dq3q z@qOUAzvUBnI@`y4fXcrEP|ELcuCst1x)Rh=4m@}%;+D*kW9|E;*D*a1tuJ+Rh zdvbe@s;i`-&^Jq=w&)9b23oHJirX|i0FA_BNr?Q!J#b@j8JxzG&IizMuR>bKFWQM- zC?`6^xA6I*2K1k+^gUqSui!1^5m&_~F;=_=UHli&=o9HJa-YnX6j}!NA_vH7G0Cmy zl!o1z>ZGyr2-A4En|2*|XlG9WPZq7IT0nY+(?Q9mq5yjbO7bhGC(9;=;U|y=c|oJ4 zpJW=n4Nhf0{0?8iZTTVRj9uG#0g7`awwo;j73nWt6i~rLt-dEwf2L%XUZM+tX9rt1 z&0_X@_ZZ6wUg#~@J!3^9d=x*#I$cMelTp$Q`Mq*LxkHnn>Rm=VG6gpPXQCva`3$@ozrp{20$GFz@C>}XyW8IS5vm-V5-J|v9L{e1 zWv+D|xn=ohu~*a)>EbKaq#IIOd8qtZuAn?rn#-MWZqbQRXM}YGQb!f+3RWYh5$y4Y ztT_HGO_T%5F3(_Z8Bb-cm}j_`djgOs^k`+2r&Pm}a0R@??FYG%kk!NFMrNBkyZLJz z4Z2Sqb-ngf&+PllzYo0is_G@$1eZXqSRT8#)z{7k>P0lDN56t2UYPv}YVA|^hI7;P zgTr^gEy2r+yKJT#a)vk!oJ;mTyNz83c$sco4b2H3GeX8IbH1fIONCDwMhDR4ati2- zL*)6=1UX3=q}-POlqfw73gcH%LyQqAXe22JdeB2i6Mj{XY9+MBI`$-J54BfncBP5@ zK^iE}P{wJBzF8}$#!I8%8Pr8X#7?%vUTGy-%=i!Vw@l$j;cjM}bBA@nDN;M-o$BiE z^$0CaYp7-drnXDAA>Z}@*Ta4IEqfX`<^ApA&QZ&>{*QuQdrKV?eH%4zw>wV zZ}$J?|Hbp4;+K|_W9Suc45(x+Fw}+iuXY*71NqP*>;e1CFY?Lk4u3Arh(ctsJW75A z-tz$^w>nxbKqILOxrlLOwvrF_X*0+X-IY#ArKu|C)MC9|y$|)9+J3#VE~~@oKR6$1 zfDV#w5>@_|Wo5l|3er3IQ(y0|*#*TuEkb`Y^#yDwC2=-}DGK#(+UBln^hXM(hKfAUU#r;gO}=r`4o zYO*>&Z4dkCp^9_|e9nBn4gMw(<02|WtdD#b**@^ z_|>dr?lOsUn*S%>AWPilWBD$W4H$MSG@8F>nfYaxIKs1)iQo@u-_ zt{JlRuXCGS6h-h}a!XpReepd4R{aE!sHDaM`&*7`krT=?-`2P|Jvv(RVzTKj)S ze29J$J0`ARY?jFSdM+skssYY=I`05|D=*qf7LzYL(iPT3V^4Tmc#x6DtZ2=E8k5WV zZ0gol(8_Ze6RnljZlj4&%w2#^v+Cix8F@qb!1?;eer+$YM_GfN*P;l`OzT2V&1FS8 z&-rECLz&<$6E!T;(CmwHmd!Rc&Wghtar{bE%^^TKck3@9z_wyI__3*Wh z$P%SS&5t}ESvI1AUQarO3OTn!c4(Wi!<=qjvT^`w{l^~VwqT9zx#nOqkNw1rLHo&Q z>9FF_;M2)BGiPviD62W$8Ojer4tuH{!xN}0qga zjYMu|nXx|jBcpBl>$E|E8R5l7S;(^tGe?BmhE^Eu!I?5bi5Z!KFM_e5B}NBmzq zr?v}(!k?@y_F(&weZy)N4hKBpG$$KC zr4*PvC`mf<*XF4}^$>O@q3@(DxMw@Gbni3YXHSg!5S?_|I@ehqsPNx$Q)#riO_@$6 z^2}}?yOoK}W@crxn{}0)126S={?6)T4!2A4R;V7zhnLbhY7hUa==3-$}Ag z;@0Fin`cMPky#!@{qTxq^c6AB@fWE`E~z(Z4Ye= z9@{o_*C?=R6lf`%X)uV?;l@d+=cD(WDAVm68&Z zx+O&=o%&hjXTu*?z8C(JCq3l%YlF~ta(meYzIvA%^jTBp$rZBR7Hb}Hw*?D;cy zh^pb+tesYh=_@0iWSW#?YwioVhGZ2{QCerxix0DkhSmh{1>c9?TBcLZ{bDT$4oSNz5JQ04=1Gh-G<)$;e!Cn{liop#H+C1QI-Q_n2=n%{8yx?b@T zWg;VJUNz3!($`cgg`?bQ=8^EHP=W9$V~-hUy)=iI?ah?%$nf3pZAfs=4mJzN*qx^j@_Hh%Dq4Dues-DJr-R+ez}(a zymoF}Un!;`wG`zn(*DitWe?&)_S=d?I^8R?I@p~f|V-fa|v zxs5WcD~bTUa};P9a9y;OcG;WY%kUQSG}U@b+eCh+XIP{+PR||8WvccsR$(J^a7x968-e$G6Ea2S%9g)L?roX=r)6_L zr~8v%Ce=)-oOU6joYBd??@Y9o1@fk3`+4zakJRHvZ#>xhdt4yPjQB!X21mzhS_`=`%ATjp=Sj(nWcsPxg-TG}9B*L-IQ5tGrQtrEm7{ zkD3+TJMz6}to#HGfmFd}UK<%GMJxoBjkpEfI_^SemvhLS4H!KF82Vs-pTCAYlE-Wn z`krwveOdajw8)eWNyC$?rnFCuN$K(P+xPt6_I#iEJ@XIqd;cH!N1gBO6YZo{>BY<# zG+F-DL{2q( zve7!&CM{1&xg?QzKPfid6S`-vXMd4>+HmhFeX#r&TE^Bp0rr*@(8_pzKn^wo7~lr! z0xBwcqIYzb`a4VsjPQ*2yoJirMuPmUBUwRn49rJ0HyZ#xZ*LVdf*B|+J>_v~O8WKS zI*pM5<6H(&I=)gNfF`a4jz4gQqB)zanA_3*~0kP>5MEHdjmK$)F|zwiiz?U&*%tG zWcG+0z7C#{)>B)opYY7`O!j2*PWNuoXDPW!G}~ouvI;v#oNM+9qf~HPM(Ok!sV!0h ziGO^RKE;1*@a4|;?LSI==U)zfyPnd>ER41)SM^-_XXPV(ibMDu9j;3L#W8KNkZck0 zf#|%R#&iZG2P=t*s26D}wUc^C9mx*&W9VC|H)(TXg`fV^3qi+N3heQly~d6C+;c#Po>E9*LjU# zeuFF*OI^*W;`DGj*#n)rP9A%^U4tzU``B5iZ;XGp(jJPYKcKe6u;$oaxp@7G1B@P-@79=^!*6 zI!BDLI4~z;NT6-7RcLc)M{r!=_lz>BSAOnHY>>1x^;mkPV2okdE656cPt?xLC$pT2 zn-&?b*OrrHpT5SwHs)I9$5}SSX%R8%e9$UQT3YHT-SFMB3w3LO9LLGeK|ag`b-fdnE4onJcmFx6qeAe^g5!WR4Yc zH#VV9gk=IBt@j5euLQx>QGnYKUUa%j2T3k7LA zrHE2VxBcn9RoYOaJb%zVx?;E5GRb;Qi+P*H^@!6wx_4 zCMGE|@(JMMYttskYwhjSk(ke)X^zIjwl;EnsWG2YAm1OAiVHQGn%62E3fWL!($ zpPVOoY;vcRi7EG!di-qnqwSBsl4_;>4|;IB@F}Y(yCov%1vSc>?mg(e<@@H_>uul- zc%n58+znKz`oMrk!Tsk)wlgODHsjxnqTyB$xV@x& z*D`A@JZtAdK&B9^zo`7wb*83U*O-A@`)dQ zJV>gOJ}CUu`e<~?D3shkscBlF@JC05^sMTRwX!*%*!+{Dcpg|3?hkp7<>CdIEnD&zm|{2}Uy;ixOSOL9cY0;$ z+l^33P|6ESFO^G520bn^xj(JrR)Vv{eQYP2?I0o1)}8@5({xrB%?1qGoV>yd_&w*D z)07ou*WB)G4CE&~;wXR2cf&MX4!nj|lIBn^E)P?JS7?5Hs`po2QO~Mh^nIY9<EX>09NW8d2Og7_$Cl>1%YKw}EM$;gCtV znyv)?41Ei~2T${?Io>v4V&@H{D?6i0s0>+3tJC&mIQdV?tIk)4$$iLZO2j`+&#$Fs+oqj5PbMS#t(z#y# z-++6+NQWTRv6Z~ROCj+V0lVlps)Y=(Pe^DEIV4w83d&!>gV_pt?G8K#ZRIBz5v%b& zm}|KXGpY?CRZ;*ZRz{QMB#HE<_rV=5D8GPtUMdsi983@Ol1oP74$1=AZEa9RoMOERDvw>6Suw7 z*&@~gtB;jn>^cX#Yi-^^{3@%5%;-{hw_~f17q*`{e>*eiG zB5ehCREA{23Frt~jozXn_yAr{21}PDlYCy^+SGY%bpgmP2kq1&eiHbz1vwqPO^qsuo1$*N*2iEcj?1z(hhe*OD zjfC4c_fJ*74=W#uMr`yQ1{bKcstb6{P=fBVK-v`$I)3pSy``L{OvkD zJwrE0cQ;6eu6w=eojK=?=f9u5e|v-e zS@RnQjJf(EB{kMR8W(e^IgAW*m07`PYV<1nB>mCCpZ^AevIS znJ#pBYAiXNm}E&t1-+H_6z=7MdIM`Um>2u$2;CpV3@^EmBB+(lR;wBu4~bf$(on7) zn-ks`+!S_V>(peV*pjR!_9r{L<09se&FE#!Tc!$gj(ll9#PkZ~GKrSf;7>^{Mrd)CQ8HA7-#0rdlslYE4e z^dPRHdHz*KJ7blkW1-5xW2+%sf09vpGKB5?-?s}MysgedLa5DF|e6P zrY*7|(TQ5k$V_&ArO;aNb7|~D?kfA0x=MC;6uXU8!8oSJ>A8@J=xMcgd~^WYvA1Rv zeuOLPu*mvwiI_VoM6K8mb+*3Th#58i zLN8(}2<7{r_WKPK;<|~-T=Gky_H`dq4iUXnXiea);+U4rlqY+ZzQ|- z3VX=fopV|X z_*0sj7tIC6Fk^-B9=dEXhjI!ymCSzHU6oVIMPT{PxE_0>UYCz4Y^;^$clIGG)zm1c zzo5=Ae{lC*IfQ1y4(cFRmoFf6Vyh8pd>?DRk%Ku%)O31Vjm%r*OmJ#$YF_)IO=%bG zllCQud{4*?<~3X9cDU||`^0K&4JqnAEOixU3WhuA<6QIE=E7EDq-_u<%$Qw(>T7<* zba98R>TpOg=F^Lt*YMA`=MOj~wNK1l&eDI8=h|PK@5Ybt^VY!}cTp=&*3st3b&N9F zj!1&KLsZ*gCw0!X6laPRle^E~mD zb!TVy3q8p-c5(gRXlHF#bV)cZvfz7bxtbByb6bg2HgO@Hhdf7wiJzG9;!t;IZ%=PI z_g3!#aRQz@BiNyif*o2>Vmc=KM5tEB^$W_ESOLuEyG=v?QA?B$#7dESwW8{3C92N@ znd1*C%Q%2k-jNXeZr5j^z!}48DuCQeN*qu^3xdD(3DeCb2E;8Ss7XrQb56*a6sL zRI`GsE{;^II3&?o- z2XTYrQeLRTqBml7`NXbt0B7qClD=I2SadxY0U{-w9O6?X)C{IdEtZKe7~R+T0CU)lwvDHiMZ zjXw4wl(ai8)$_T^1Dllo`M^!Q}&U{PfTu(Qd0TpQwpKG|w)z0=1jJ>(>8 zJSvJj{3C==njPSp#wGKM;6ZOdRv|S!AX-4F6g#5T)!UdAh+mw%^k=%B`IFv=8AsQovN0=#9>i8{6B)1v z8eO%8(J9tIatuzyll7d;ZA|BVgjC`=nFaq-6{@;jNB3Eai7e+8nF~{BUUC=C>D(R3( zbcq(#B{<(LV~L?Di;PQ@Ze|g`hzay3XEr&5EiNoDI@!Z8+b@-_@U^-5>|m{Ez9a?KXU|ggK|7tM~`ULtUbmjEjMP#B&3CJ5;e^?Mh9oQJ=ETe#7E0@&{+3k@%Y^*2gQ!sep`UH3F&r(jI2oQlBYKGKXKJ~o(SEOaQ zZLF+vJ+o6rZtaT3sWa?0I%RDkOFHv$pB?3nk;r?yJ)Xk;P1Hc6gSJi|Mh4BbPzSa~ zR%t=(GrQp&EUVw62lRgS33+AMQoorySe~@$fznDkox37d<7P+#(uI>f*8|<+62$G? z56(1V5cADghP!LK2?C#DC?A5iGRSYAGHS`cMs9@f1iwedsX;X<);u^l+TZXxi$Pmy zKvkpC;QLxbWusH|mik(4fpdYGq%Kuz#&&CM^f6`{cN$j`L7Zdony%Q?tRa<6plbTy?@XpVy;8=B(+Fk&3)n!AbkpcpseS;tFO4-HICoFWfJdkX36hNqz5p+f%4MMYE7O)cJ3K-()dPv=I4l(xX(RK zd=5TKhPX-W&0hq4@s-sHzNHP;Nl>Z$L|^)rGYy`Gs>CKDtaE!&tA!;9cj&CQP61RG0xT=U-#F4^G%+zuGBqqD`6I+$oW4$r+ zI+Lsp+9Wl*dR0qNKB>!$G$QC!(eIdwvxm^_N>&@|5VZX|aJ8M3GNBd4+$V)mLSA?! z|Ki>EVh z1tLcg;)Yqu1VaE@mm2VbG^6TL6Ua`4O*SDenj5u$^a?nQbull>W0BgQr1n#%D9zwm zZtRSwR)Z60(lN%xR-*^fnM`?b3KluniFD@(-1)Edep*OgEmu?Xfr-WwWk`uAW|P)k zGubW)N5v!RE%ln73I%&Cc5Rh$DldSkZWJ*R)P!W}oBhPBryo-1Xf4f-aGfR4z1WuA zN;-$r*jnc-L|*sq|Fh@faF1UFKVc7?(D~stcx-nkveUoO?Wo#dYV?;TyI*+5_>%q4 zd@cR?1C`@;#J7n5Gce0n-{W?z5SFors028D`#S59Mr;6IQyX}3n<5LahJH;=r<&1^ zxtvlD%;LO|i|s)2Ry}yOe^qCzVeBy<8gqeMl989; z;*qM6(UB+7vT(B8k@G1Jl@_W8N%>;dW~V)wlM2v(L-l#i4&|5dKY}#UhrDTTG8^IF znO{GSRM=7L2M`rESc}c-poq+IDzmiI!4v2G$DPa7Sv=1zqDkT<_%$I=LFR(Mb&YAk zG^556_wD?ajD2z;V?Q$f-_*UaTH&3+%wUu7&tWB838&^6#xAqG?IV7s+B5CBlY%L= zb$55yb`20;z#%w9kVHmUz}fnIn1I2JTD0O44kTFlYtn8NW#uzz6IfZQL zOk`2aJ;6p;Axn6JUPTy7>>8dS;Sx&qQ%U3q16 zQ)E%(TV!0cc&w!SJeCowsRXnM+A(dL(GZg?>r}A4){j=YQ-CSNr}OLi%OGhy$60(J zcJMv1g&!m3krs%*@r9TH;3s{w$2lvU%}x?np8sG|q&qUv3{S%>dN7zhx1jX@0PQ1@ zzYX^4pTr4x_kSclA<0n|I^$fY9P(TB$UbyG#$?7ZHJKaKe4;Go@y}KX+=06+x0P+BSwA#oEd+uw;RpuX;g{`?F%(WX=?^%(hpVo3jG!(My0^CwuOoyfnSNKGLm65bK2 z4^-x=P9tj%whjf=4oJ!_*A|EeEo@!V+xZe?A>N9BcgFyHgExEm@nY&*-NaZ{-AGQQysQpd)7dtsX1aP(Ft4LRpb%mMJ`dpKzXT8spJqcllY3$Y$A~dTFxB$Dzk>Y%6%4& zNGg&rJK;P|fWF=fD*YV1(-k^Q5u{=#ST~Hm)+hV2^_%(GSd004t@+yO3+~=bqA~fN z9FGl3Eov67@lW)xaJ^4senytGHl>3WvWy7eyX#6M5!>uWAT%xmGx@u9(~c*aIfbl0 zz+rK89a+E^;IQPj`dKXQ(SAfpkX*`w;gQ0&;?nt@!gyhmVDf{I)<4hq*?C|cM)aFu>S4`OriSNRG;E@2h_k$)tN66=f2ga$$r5C!{5nwVQS1=XfJw!}xQVo>#) z83eTKl_2%@HwNk@^xb%B8AfxfANWTvaK{(7Nn#LbgYwhEERyp%;(22Sjr;G() zUEjq#vV_V38ViTNdx=;MlR|f4pfH}l&1K`Bf~`;!_iq~ei58K>YEKQdX(P+Zpsthm zoe$0+YClof9%f~N%r{tDWhYW$XS>;uJcsF6q>nIP=|plnc@EEsFEm(}&QdsV27$^o z*M_EIPeZrh>nZtEyOx%G=D}o zCp!3aS}_FsMi?v1Tu(=3NeDucaaCCA9Z?nmJP&r8JPY>GQ2VP96KIa#eny9Ji*BFQ_{F zVlFCda&7d4y!(84;w}U>`z!hkZ)I;a*G8!&xMz3RymUcIM*3_RvJn^cZJ1I{Xmiy; zN*OJm)?F#2G}k8@g$&UeZ%=aaItTGaTtH4BdN5V+6rKck(M0wHuDLgKbM`AZU{jDu zdu)2FV|EwoFn+ogaC-MQRbnAE+umcWb?%Z$Cd$D+Ge$o%*h|+G|kr9 zDEDH$6|YKSBX~lu4o7EqBcFA`x`L~-FnOBJ!Cm7kaXpYQKS&NCa#=s=L$n*}gIF24 zostwQ9NiOc84bvJdk0OP1)g1Yrr?-dkCBP zO#X<_MI0y1lU5_^C5p*n8S#M7kPirbg^8S-weh?xOZ<*!T@&ZDo!6{v6gDpCZ}pRU zJ##dsp(@~6p93T3nRVIHtf$U(Oem+E!&Xh}M^iM%K%MMjJkz=wE3I1C)cygs^KcrMHIBArZnhf^KWH1^3K{jKZ z-2_g9c3OhIRA(`nmaz*tUG0H(XD8^Sk=ww%_!XqZ{LE7N2;M5sK`ZXXj=_fXD>sVu zGACJw&mpQp6>*r@8%dMzYzCXie-QTbquJ)nk8~bt6f&hb9hWu8xT$?meQHD%jam8w z_)IlrkPzxZOYK6qhw7N?u(MR{Ol&t7J3W!-FNa;|Y}-Yo5FfyD zO2GE6rJ2hNVaxIfdHaQMbT_0EsaE6$@&qNp|Mif{Pjb#WJEs%pG({55Fmf164c%N1 zUan!}HlJg!AF=OP*DMCCd4_O-;k+7K-e*oOLP5e?wpLq~nPJ>Bi-7wz)1Her$~sa6 zbFLS;g`UprfY$gEx0Nl8X|omWr`_08m1aIr4zhMRm{evn_Ny(pDl83;{Zd{NJ_#L! z7yKB$J}+=x*fsP7=>9VJZF9*KXFC)+102!woB<^?61kq|x7-)$P&vcH^8k$gIr*i|8QrFHQlUK?nQ;yv!%a zZs;r8iM$DMgeIa#n1gTv{t{ zOu(GB8FMddEFI_npr0YL{-625tYg$yS4O9V z7X=>&PlxkFe~!`eM)^8EqY`?mzDnP2YR+n^6Wx-!L8N2rwv9T1IckVwnnU!PDxsEz zQq(Y3Hj*4V72FdfLwu-9=v=5>@L^{3`?7Cszg7LdBC|tyr}7?p!elMZSmrFG$MU14 z53aSo5^+xxQWFOzOi3J`#HChBNlJPhUnS1t+vsX09Tb}j$3eoZ3Qq9~GfB&?)Rt$( z=Ey~q`C2Z$rXJRkLF`NgMR6zi_p9u0#Bus6mtFXQFUq~f{Pu*uE!-0yik*b3{2DIG z8Ejc-m?z-)D^Iky9vK^uKc@As+G3n5i)zo+@ybKFmQq+2<@HfM%17pAeaTuGJQ=zZ z%~U?<3C0|~u({JNN%o}=gOjxgEc|`qJ!y-Vk2B&XB`izS;_vwt(3Q%#r+6-6D||(m z!agQzI6k=C3z)@?{Ki*JRcorbk>6MX$NwOtJ*$FyaN0g=1#tdv0cPSTVg~5;6m^Wc zLY1R?QrG{NFa>`y9nX>)DIyLp@)Qic1q!V$y{6l;x z{^q{zu2wuePh>aSF?t&Zv^7fpSb^w**oxS~m=N0+n*|c>YK_wS>g$XIbC&hqc}Fm0 z4Wb{>0(a|1%)M8sw!{&;zV)}6Wx_`et)r*fR30097405-gXD8juzGiaty|2PZxn_9 zzaH5Ban^Qys=PGXF*aB&YxX26vc-f%aRB$7QJH%DE@_%)y?g8!(0i|3};jr|5XN*tY&dSrjqi@>|KHu4~}CNwi#B77*bVdjX;qFHmZs%NbV zU52}1ocu^`uMX5Tqq5b4s1LQ}6n&k|%{|~|a3mMty3muzLP$gpBL2kNX9XGsDuKhY z&dv@7Q(wCQzMoc5opM`iklgF0H7N5h)N_pE)w)TKZoZOsI@p5p5=xGFxei zcTO>zqMk6T`6jNqaNd#e9peM>I}#2hmP(u$KhOWnli}(hl@~s6Dm{g)?to)#ZZ)z) zCH@;}kv8fQq+jw!Pen;}wE7R+6!VZ`o^O0aa%Z-k;ABFXd4`PgX|tfUnkWR4i)j6- z3;IU2wLCNWSCo>kq7&t&@<5&$YZ+M*w&D8D)M{YvYy^kXSUdsigD%W#0j-OAPwpVU zi!Khg43pspQJ-D|ttf9nD(HmsB&^ zNSGh!6xbEW=0D+15wCDJK)b7GbXQ--xDOt3ts90b)x>nxPbS%*G>idvNI@$ zr<_elujeOLfP7zvm;q*Z1vpIE~B^&l6!$aUY_!&}qS5y{t9@QdveXN!LcRhdR)HDr-0n4G>7 z?F%)mfO#GMr>^R9d37`obe>)y0p`{l!WCqJ$-czw0|)LlYCQdcw!q(e2k*@?IIb6g zQ14-SQwA{?r}q+03%iTG5_ySI=I`bn@J&lv6Rg8_L1GqMV5^WkUZAGNsz&aF#%F~y z2V@-x4i2vgZwtQ+N5jGB2<5%r+!|?#&PAd%>QpTSN|B9E{g%DcoNJW;m9@Xq*WP5`wGTP%&>XScsYMM#OMr(;B0o9l zaG(_cFJ}yBnN_q5rH+F1w*03Ymh&ogwG3mEbCn!U&4qhTVEQr{TyOpe`<~@^kGM{% zA^j-L6<_l+g=FcR`@QEVG`BfYZLzHQosaQ##2kDI?vLAe#w@bq>{V7ea`NkqyXMdM zZuS_Zt@=)sqTmU7LS{G{>~iSIN#q4FUYf&SWxmks(fqIwM3G+13YMiCa@E0^Y$y=S zDt0aVmTktj=BkPRA)jJ#*}-8i2ttlP^mX1i$IS?~-#@8Bbab$3Fi$W(t6v70F+cP7 ztc=XMSwn)2qL*Xq)w@O;n{@7jg#DbIAwF>3^R)3S@l_5a#P5q&6IUj1fwjJJAi*0# zBX&6X6#R;(#%_3e4nX@HC|_28*9Ylk(ZMqiIqA04NTw_AmU2tixFKw3elg!zye+nu zZn}!OpWw6kA`<*Gwjn(f&qflA%Z2Q4_<#mt+UkL1;(jC5D2s1#y>UpJs{W-GgulOm zTt%*}NP2Ee%Z)HOsz?XqH%@}o5wGPnk}Vz+T1)%AH3gc(@0j7&!3Ub3KFr}l!o!|A(*{>YqQyO8&6+gPSAmF4I!!CIlku}Gwd zyeRThs6=>Wut02qQr$XZKX=Yk|4{SzQk>6K&wX0h;$ggE;IOa0FXY`RzT?j`LFx}` zFm-{f?c}s$mZ87K>7G=DEmQb%4`?D&p*fUvItC-(Mi%eNvI6EvfNo z!&9&1D4y+E(vyJc%R=g)Z=j=Ryf}ujow7(W&9>_h|LSYi^U<{MnP|`O&{($Ul4$!# zVdW+oZ1$Ol%@F-W6kSaNITMN|_efon>dMaNypnrGN`r)b-p}5f{)gTNKGoBk_faE^ z!%7ud3$@H_l36@>4bRxmk-d?A%2p*5ZmMoIIyvR=b_Z1hsmlBoso%1ySz`%5T4Va- z1T)-dr+&}}TaBHXhF~tYZhq*ofA~K48|L^Pdtn;6H{`Xr(+swAHe32CE3VDYfYA(%O9r;OL%;uJ=LbKZswn1)s z2i@PhF40L^nUI_?GT~OjbIf1EQl}(GlS(G-_Wa;(Bj)BNFvp4Q>{zx3a~m(|2zDjE z>T~3Qif#2YcHmC!Y?LB9(dS8%z|)t|?9zlq{lWC%yHI82*^w5Y9v+W9h{~ARPi3|a z-H5D=-B*LrB(*e@^WU9=#yxT{m&lcn%DI2_{OxTMH$DDXe1Bh8H!D_Wv*;SqVW~Oa zf-vPXYVKIY$d+)PNI&zG5obMd?mC;uMEE4ad_Jj|bV}F><#(%hMbfJrrE-UopC`A- zzBOAyj)d&rb8gJ@CAokv**(Ffu)5YNI#;c!BxZWP=KWmi?a{XrJ~sNi|J}7`_KR}w z{(IHt%iD|--#l5Bzh3^jBGygsNKUfO$Fhhr>=Jf^c%DsQpMiBZ&3h*4eA4l_GvX~O z#`JTI@VAb;;Au?f)M9_8>SsazYtq4AW~w@_vhC-BBNycl7(we`4`!wOEQVJUT_4 zNpxf<^CZ7hnk-HuS8BQJRB4p^2W~EL!Ax~#f!AHg-ia2oC;DKeBB)!#oHZ7@+w^w2 z!0vQDBO7+!>CEn7j&U>HX`oAH2N~~YslV$J_^apr-GmKb+WYy_oFXoBUkfaj((JRK zzB^)T&mGUrgeQS1OfKjk__u;xl|@D9G1La5xY5u6qdHnVy;H`_Ncmu`tT`D~Gq(TN z_QSbPuRjcaUGmwt$LpW0cyZ+O>}ax_J907DT1mu)a{-&*H7Ib;Uo%i8etRH$LZ9Sd zj*NnL@(s?rDD`0S`lMq%uV)E}6BFq3v_ot%n?$?GZOrXv74=bcQSgtfQb8)%SPP=N zs7v%h)}82lfa!et8!MgNC}~0y@u76Vv(I1Czt^YwJ9(>nno1L;7(1K`idj-Mp$vRS zn*LJt+g->B%x!cpv|r17+1+}c;-r7T>pA6Nek%gR*bmn{HV)Xc3WW<(o3~a=w$SOkqaEZ zR@7SND8X5;Y!N=*f3fF!N$2GH^4ZvR&}(|}hg~B)Z-tuVAoIDg9Yo(52J~>JzcC!H z&(%f|s|~@>BzVhvpi2KkEr;sr&^?_N=m$x&@@P-PT(pFoL!KzdtJ~DoF<0nFdh4vX zkT>WL-p(lV{qnbKU#owe^<~7DssB|?udQ5j+S7h=s|gk~Q`=KAd3)-OJpM)NvTT)?DhYfv&>N+o@v;*a^XLttDli*yNnQcqjM zjZNAr&1Drf{?X^c**nb|X00O$zAI*uD{x@71fTq!G(*}cCUP~vry0RmXu^BSw`G4L zK109hWHuxwf%jO0%_8zR=PeTLGF43p4BLxN%&~37{AiZePbqoibCGG0^5OKXQ&}UC z*B+1=NH_nx{Gs2w8}F-rIQ0I{w`JaUe_P~L?-vK({t(GTpNOT-)9Qj(T8zEx&guQb ze?Q@Ml90MQ?NPSAxd!F=kgH7oY5BY4Y?iXY-!E=Q!b4vzu^6=l9P#?<+Hl`+Be>uO zN1Wh@U{W-9WSU%FZ>^QmldWWG3DuE2hM#vcx_6dI7yT9e2R%6@OPubXA6MV^9-T^c zghc!{K*%q{*7Ei%p{VS zL9~zP1x0Kac=!Em3bgyv^3mw=uqSjW7znja|Lbd;ubaMp{xb7@+gGJumHN;-xZL(Q z9n^LqDRe}AO^@|3fsx*&?lqoruI=u9@mk`lq|u41D)ZE-==L+Ih(GOmi-e_)Hw;x*B*97m5$Z zFG@U_%4D~5RLj{h=}KJlxFK2J5cm8UBx%FlDmvLWC zf64o4^v5pmufM(c<}L2jl=sX38yISDO()LUeho|$C%@D>1YIeOUV_tCN#cPu$vC9fwZ1tTF#%7eaZWkwU(>W| zJ2#v!L}{zL_8_)8d;~OFCe|S4il${P`d;+w!Y@@m?|9$qefRgjzT5h)_J^Y%8-HsM z-l)7cB<*(Wp(Z;0g+9LLemQPqe9nX_$=kE7&e<#X_nbfG?3;6K_6w<5i93DOJ@4Ir zyI7$f`B_hig|gOXRSF)Jr>Ucr>T*|*$cxFpXlub1Or`38O)b;$JSTJ(=1Hrd`Q=XB zkd!ZRcfj;6i))vVlo$+LbjJx9^g;BSu0ccUYkQ=zT^Gr6_W!eUErU2SIO;_(`C7^(JfDm$4U5wM*F7u!UE(RG9(&v$Oh9 zTd(z%k4N8!Hig3B&5_U15wX&-jS&iDVJo9oa7i?3mZJ8v$GAjl8+Pi?F|D3+_wbVL z9a2emH+M(h;-ukeJ+lQ;@~0L~?Uht7{)jKb^-Zej`bW&hi!l7HKf>9lOvl-a7Hr0a49jzZGzjx#pBx+7NZ_t!5(0NQ= zr{b7{)NO~fhk^Mz2h;dt{1ffWR?x>b8n=xBW`a4|+F{b5r!~>9=%4Nvv2ONv3gion@|X1(!Xnl}3y~LYifsfMy{y~lLw%*b*5<$o zD(b1~^w?>6J9>B9p-Z7Lw9)n2HeJ>~+BK=4>CV_R@8M?h%3LYIZE_mzJeFApQHHmTnqF?xz4JYZI~|uNo=J$s@9>}UPfb{w(l)ha zav=VAAa@|e+eNC(w!-sjx+Q35mA~bR(TvcHte%g8czc79Ih5RILF+V~m1a_Q&_Ni1cApFQ|17B~vb02P(J!VA^Sif3v_H_FK z2n>>42pRLcWK}plN3bQCjdTM#FEaLSq$o;aOR@xs)w$>kykK6?TFKF9LacANNY*cz z3xn^%KSv6O2SxJ7?t$?0+$u*s=B`M`Jt@AHp7p*_fr<$elAC5Lp5tN;D|?xoDLH23 zn3+9q_Lpg2lZwRc@G#;7?hzW3o}jgOFw}-!N>%WrHiipD&POMyQKN&s72DPF=1{b~ z?gHbIVpgDCCJB7??R;P84l~(KY!{{`ybJHpmh?0IjOt8UP85{Gw&?i%Y}7I-O;o!< z-&~=8)!%5J^eNa$oG`yxFNp5!P~jKpu`B9o;Bk3pxd(&t_|a8ea?mK8Bpwwj3I(}z zcsf9YM~~)Ca9pd1yQMnrU9Rs^h7{(1cKC(G-FS;pq7>w$E<|_L%+Gg?*uk4Oy zF6*FE6TJHZOh@_{(pRb21xZ|f_%Ro7huIeFY4rMCp<0le?PbPUZI0qs#>-3P6eS6y zr$6-zP;9%P75gF)hizIv_5fE-_)&N(6cDEiJA^qxb);?Ia~HTC@FMh(T1#m{cfJ~X ziCM;!;Qr+!VpPm73c@V7T{9NcwVJUWA-i{RBF7gZ7s4ml=xUSM2@4NUn$;*=d zi*M~)?_TVA>$SXf{KI|4d~4miT^B?b|1bNBvEk5|jy&CPW(>WAl+dlD5WT6gcqUBX zjw6ZvfS!t$v|IdPp$C+_g{~i6qutj$58Mgf<=)SpYVOj~d~vYw7)+ofs<2bvqM<#G zLWg)nohT>9I>+9{DD{$d-&kmj0K0t__FDhoO8eKYZs#Z3lb@+F^i`q}HozO~c(O8D zj}{V7(FUYq2DpoNau+fWam^ZR6}CTGt>6||VMNWaRm!er=f=5egX6VFT6@j2Amoll z+sbAq!6}P#Ms@qV6*U)N3O3OR`Ky@#O>ruygY_wat1mtC27DpUD4*#~544X@OFEE} zo>DQTVv;ZMc7l}XNjM+)(YMs|!F5sk%G2CcP%yR-S4_ruq;^;K#co8!NbblFQ8qSM z`vzTOfLQ}X+DdlJmXM11$Zg^82+6MU?)&b=uKce1Vp)C)?!3vo#^>d9)7+Nds4cce2u&L+%N6JEG=CV=2EM;9sH${2)yzV;p?Jf3FaUa8ymF$b=GUOQ_s}kCcewLd@%SCEN zmPd+4jaX|C20|eAU$bocy8Sn(h#Qec_)O17|8$r|x`o+9O(Y-Ld!d+njbqxMvM;(d z@+P!6_$p|IDu-8Pt<5SPTplVK{uKEd8?R2&n&|7S+I9kY8TZRu`Z_mMs3txT{jTG# z8j>jGc0HA5ipzw{!alUA=N11JY6&ZZn(z+I<|lBK(DpbT3FFsnbFM6VlevyP@CJSq ze}^w3yyxL+KvUmkek|NV3+PKtQp-Ncq+ zI(is5>66Rl+ZEW1XxF`V-uf15eVrM?Dpnve8Tn3Kj81mMwB6HBl|Ihxj~O+ax!f}(4Wc`;dt&h zZYSPvWwBp>fvo2ewij0j$z54_=z7lTJILjA#9WIv=i z9`^;$gUcLnWq3;X)j*&4@qsJ8;l4b+O`dD+!O~dx;O}rhvxlis%m|8f4kKIppTfZN zk7+2%MGA+ugrdQYq4wbck)F{W%5Lql+1Z}z6h}wcYw&F{*=O7z{8~_)ha%@Hh9? z_XoO_W(#LPjk@QmDJ_&9iH)R6(k}Qmn=otO!n}yg_g;|Pyw+DEL$87)@G^9Y&C{=` zGnA(49Q6leO?0E5Iat50iN;@0K{L^imJfc~2kKX~vNm5SEeFB+DgfP}V629+27Zr2 zMj>Yl$i`v z>jQV1nNPgKMBBs8fWL4vb|+iGIG>LE^(N{R)rNjfFMtbXH~!Nu#*O^@N@fys8`t(8 zoSr(-y{Q9m0>8#H;1#x>C$SwO)qm7d@}%g6NVZ7%h!Fjb^oq+sU$eac9+0!n1!^b# zmFdNOM>eH2KN!B-HcTsUXMV;$x*J;C2RJi`D`2GZ)>|lP(~$5@aY|wn{t*uN=42ai zczcm6;K;89pI-;?w@D_AiBOKy7Mz~*@M-Nri*H#gW^^($ja$}UG?({vWYB=xqA~9a z>l18ovulZK92CoB>7;Z)tSFUrb#c$}-15-AH@>mn(XNaX5u2f`*NfPF(88FDI8J)mfbhF(jJK`t znD3o0PhfZ8f2o%Jo)d7;mtx&G^Bl8hSetZ1o2k~1T?iKsr3a-*k7x<7!oDjdwEg-w zysa);8O|_r9n*;KDDD?0q8s;(xLx|gb?V5TccnU5X0%;68Y&ka7ycF+6`YbWHN9VY)AX_FeKOx?6$@uX*Q*xV4}P(3 zgG}3ouFr?W2hw!+9!wRqE4#E+NEU_)^Mw(7S9UI)f#f$$tany}Jw3u%51-6r>qjG| zeO6CsIV=xZk?Mv7c0Ok^T-0T;Aq%2QxB@1QDqIunSD&JzF$&fAGGo!Rh-cu4{(|c} zn?a(DDz`Ze=QJHV&B;b@oaheOaNMHxw+3h?pR5?Ru$RoH2C3y&dPVbxOGS!CVxf%S zx#0NVyI_t`V(4P%b9h#4i_+J)=48PKUWnqUA27i$WmfQUuD0HNf&UW5Cb7wfllY_t ziMtXO#OIHH5!mco<-6{E?Y=FJ+K#<0yz_HZKVR3ruvx4@(*$i2X<#xq;M zLTZOr*8h;;cCiG-Iyd2PbC8U^k4_C zeGHcRNQ*`~zZ)FNBf!@W!vED5Ig5RCi2ey@p$YV8x&yNfY@R&e@UVPi;l8xi{fqZk zf42CI@pIy)#+{0Lhd+nnD#sE2e?7z8xn28(n|yBmBo_qH<2n5k*fR^ED0Vg`>W+3v zI}fVp-_h*R?~z^6c}j0|158!RDD~up@)c!~*3Z~veIka^^TBwo%gm$Vk#@Go>gX`3 zO8CL6Uu6fZtHu%;RNfM+73&v!5z8(I6kVC3?1}w_>Hm|`UmIwA z!F&|93Lx|HH*~Ry@P2h;+c6n<`p=^N0)zio+C<*H0iH$=ob5yh(ne~fG*uq`q*iBSzpiiyqf3Fag%`IYr5H z)mJ0XDv%l&=#P5NxFqSO__H)y3Sw>{-X7^!hbE}`ecPmiTv()XFtIE((u?FIj&4_lji$Omz@ zbX=pnv;3;Rp}(JRvNzqm&b83B-yQGi<}MCWqn}^Sc4Tv~ADDH_K02OCLx;*YY?M1- zn%-@kFueLlWx9Ma)>_V`JXiLqU(_vHTU~>b>>q8S`ONW9O~BCK%?@UZa9{Y#ViEBo z*MP0Zenr0ZFR-@9JFTI2e}uELh`vOAl$fA7dA=n{g)mhAX`){fd1I)=``^%%w~H;2~`f zin%OimukB%c>eKy@TL1o`Lq0o1Dyk>{D*z}yjMK7>%P=pwD`6B5Bw!u1@);-_K*7S zavFBszXu~3^V7ekAI;1YJ`lYu57lOyo6Jpmdvw`sH+*(2YCi|gn>bCpAzqe1;}+Ji zDm|O%3!eBFt+bw^*V0C)tJE~Du<}0KIQVO*adbU+7#H*}xCeqbsjr~wG5;|Uwk_9y zpN*NkylaEhU)m>a5=UXf*5oV&t00XK&N32ugv5^U&{r20t1;8 zFjty8Ppl&LHfy!vsK;U%;Q_&;S)tG$(IxURxssesHlnv9WkHes5c)klBl=yD%+pRT zY9gB^{wluV4Pk?{OuQuKbp7sG3^DA^Ot_4yLVL1B}?V}z$2XKD-&D22}nrGiP z|3?{nqWjfK@Lpfh(9LcfLpJKXwGDGnX=0HRabDSdjd+kucE~SdBS8dC#dGhF$|%oc z0p+$jQp?hM854~Tc#ChhK03QGzgKZcC)qk|^w(!YzrA7}GCx@l;2St$Cz8K{MW(f zU~uYGte)0vTwf=MHptG8WSX)ixSzPH>>eWl3&VE_^ZERH*x{N z8a2TXUZ`F&ldMi?R4ZY$(Yopf&}!cdT^OgJ7B(kSNs)L$b!NTXB{c3Sq9Huw3$Rn^ zLVQC|8-`P*h@#{gv%HdDNklIGjrkE|!XwsXqowvl*3>TAA!CJ89lYw}mPs8Yx|0Sq zMyM@l{CiitucK$SAaS#~{OBg^E-V*+!Y8v9yQoKKdOi+H*&cEP-JU&2jiH`EO?2Ts z`p#))bpa!MCKzj5)R*cx{kxiB?ZGC7pzjiKOmXbaWt`2tRD%68F_T@xv0QHTW_S88t$#FRoh%6?aoa1yP%Tvot*1Bsw)p}|{eUIE%7L8+8Ib^l_5>4qD z)DU~0wFwE(kIr^xJ{%91>F-=i_9A+C&XG&#D6yEhj;8GCT0!W%!|>$)V5Jcpvlw3d zE=VpcLvwW|TqUP5SDvQ&6Z3GkZ0y)ZqPa{hp&eG!l#{Vky@~9P9*G`}_~hMcPUkr7 z1Lf;qY6L$6RN;L73URfto#-S^X8KU0pwW!LH{O!pLEo{KL90m9+v1$mUVmoxH5X~0 zw3ft4Gy`sD457GW2}ilYJk92%HlY3M5?P%2PAo$!>03HruR%L|aXde^m`mZgrnIJV z1EZaJ$9{pgfL3crDy zZpU6{uA%FvIKP#tZQVE0wXe!nxJaj&JZ)K*vz&D#2AeZF=X-Q_FQPudDhOTDT8qv$Mvqe$~8 z-rhaaGnu#n!QI{69fI598Z7AIkl^m_y0}Ad*DTKBu1SbY#@f5@U0rS6R^1VjO!xQw z<-OlKgsk{sNc<$>&)X*iA{hthE|E z@$bl@EoyWkhQPP6gC30;;tg&TD6CKLH!tHVBK3HQO}5AJ_pwK5dwNDP@Xr3Y)TG=m3Y9OOR9@ zN0ec|G0nMrd}ru7gP42FPNJIe3yI{dK>o4m`QeV?;AHG?Ju_MI74&Qen0?e&@+Dc6 zW|^775$M~_Q^jnJ_}N@OCWG5c4=G_;vMc$j0fWRN%tn3LgHxsPctNlYXvQ#aw8{%lnOd&Oy~ zgjX@4$Q*5S0R@4y4(JR#Y9aa~4I~9K6{#8^)bJi8;7-~CNAxm2 zl&dZz@waT9Z7yfL^P=4;<`YYR%@7Uk!Ru=PRSrbO(G$CbZ!<^I7ey z9P)eQHpm1|_TAG)YrD*!Nb8FO6|)j;x3#hjw|TkGl!N&I2ls#MV*VbcY=h1DWD8~u z+{7=TkSYrAY7fRkx`|^Hhd=d=8PB$c z2cJg<$$PD!HW6vVca@pS4Sl!V5emYl@PYJ_2TM&gPP-yK)ry$c&0FRyxLMPzhV)j( z$3DZ`y^x*G7R0kDBS&ii+%40T?eaEth%z|vLS3RB)w`NktW2w)K1M%krjWgvVeEJK z;isSj=wwL9gnX_?uziHa>>zP?et z0h;7dj_>ZeSDc0NCz@_W=5`V2dZyCD2#mm#yb72Jk*v61cs9yA5*-*&ASs5WOX z4c)BOvZ9zrPz4i&+mInasc~%(dcVOCMn5MVehZWa?fb%fS0daqWZ=s2*ElA9Rxo zB^kI?w~`&K*7)tdS4PUgmW>!k&!om!)%AFE<;~%~_{gor{riZ$!hGY~@W1E{m`MM& z23ouIdfGB|q?88>#>?67eO}GA?-Zei*vz2Xos!;aL^ZP zQ+tRdf3{urvY@8&%u^u07*BFoz_ zFUXP-ulF~>U9_HKFZIe;V@1&Q`F`viwlDG~$HTdthyF(SES4N%mb1!OK~@;5wI#+x zd9;$E%nURM9Fn^PCLo{oivEiHVtj*RYLYrcy$t799Z=zl&<6E^W9&oudBRf8Avo-D zq9F|De&U~N^k$(s=f>SXk)W)V#u9x7azYrF1L4}mRT56;3k z<8LFNj*yc*;Ao;28jac9R@%ulCn}ix>7(ofGL>FK$6Ed$Q!gXH<4pC z1X=Dy==$J3)MNG$?Z`3 zubu=U_{Ggz7=?)*NCWKErHs1X0p#uVzWjv>JK?comizmy}dEjww^s zXXrljzGlkp;kR`l?|P3mS-+&!QI8n;h!krMb&cD@Y(g$sfIUjCFuxeJ>A_&3?L=?& zJup6Zxu=}(w%W;fZ}vvD(ABJ9^tO(&USXmA6tahZ^ZD$8BThb-2|*h>_qoC-OKOxE0J##?H@R&toFL z9rM20L?!*0bWR=(uUb7!b04Zl13mq^|A^Z$+VUowcCM5G(pL@RnRHa*!w3_l%jUKM?=`nPgXx{3d1S5QgwE2ow9ay#X;vBK&~ zzUJ!KPl+|{s~!8roenE_dhiG5iJJGfjU7wGW!x#|FJ>Alrb)!#R!?)i*_eDxza=>`lVmZ+ zISNwHK6($CU=F6oQKLYGX00t+Q}97Hkmcc|D6S9DPU!E{6tjVq0NQ04JbiP`wt7wT zfO-Jg_IIJQud2VYuH*d*BVL%Ieh5>b;@IG>1OMO(vXW8*@BO0!|0-GPE;&~ouPy~! zsEl@3X`_}jR^v8VOE~l(vJgF;VTJurQMGWUIO2lexY}Y%b~9+YXmVHihRicC+a_3l z^bbZE{UnHvWsGnwl_(8{#dq*De}EiZNL&`2OxY|d83tC6I-60<{GG}3Sy(5V_ei48yz%5nyY3@RlGes zi+vTnPGsA&Ue!yYmrVDa^_Nw9YWFcQJ!jQ7NRzduDl4=)s3iMo>3Ry>Wf^)Ey%2nD z6QNCeq|}imErNJ#9w*n)1+A7QN1vk3Fr{qe!BjrYJw;Z05I0>AK$aYbJ0lOBYRy!; zO6`<0>R@EC#;MtwNO*`9)-7T;lC?GBn5p0nb`gbf0uI+ndWN_~%90{N4Yd{V8h^aPcc1a)s`ps4y)&&ISNKdQbQJiqny z-qr=YWmZ?N4(2Dw|ph#x5`!cy02-h*~e<7 zwZwciQ5mKeGVGwtFEOSX@AP0Ql4wI*vc?)L66fD4Df)ZOtxthA%MZPF7TM2wOpjrr zuxWlkpP(kPeZ{Z#J|ZjbwbkcS@V++3TT~L9s8d=)^eIpLrF<2n7yf<9Pvez3+5DS$ zjs5>qvz9(cJpi`wPVIm`U*DwM1WO@RpQ23GD58~7h^R;QCkn8un1h(!)?>U-AysB_ z@b6ElON@bj{xo}nNn$F~r@?1w22XkkT4uYzt$xbLhnZ1vqaKn{XK7#5(XvZQ#tf#J z+FcJ)OGqmNet9q)K4<;IbIW_?d-mtP1_gepGMxyclE@)!PreEC%4>!5d>uZo4Ia0bLIL3%`wU9tHtaCIF-66-0D;=Km z|IY`~gqHS4nEcnrhWZsYT+7%aR5D$FoeEb_x>`!ADo>Zk`_NbWeZH=NS<*Q9y*yWY zr{yu{5&6hzR5Q?%J}~8}o5nrrFY2!G)hdCV$Z7B#hG<)n!_!4RDTU~%;Pj?}=f6YW zs;xCkfy3P$G~H}HRp|{4(=`7yr1z!zOKB(do?3hJHdGh=jn5WMPNkP2v-Bl1nW{ra zKm*tYxyV`cM*fJcx6llv))M?O<{wm)<;j28reLuA784vNgwJ3Ht>j9f$}7OvW&5xr z*+cX<#?Q2*A0TaY7MK7VLCy%L8$wC(huqI}V-n~>+)ZXOCcjI_MU=vp05{uzL6oAp-WJ~R0LVtN7W`hWH#QR5Zp$O(0|M#LqTd_xkyyB zC2fznx!g_m2((wl$#eL7h7rMF+TW!2gF$nQ+l3s4Z$db~4>hpDjN*F0vo@IbFo%(} zd9WwY)ZGQP7Ajlwxv**b&y>u2mc8uC53V3y-Pakse< z{ANyPqS!x7bE++f-vhLPYNFa(y{E*fKUC8664U4@423yEoG^u-!`}qQyR~f*-<_>S zeW2a#^J;pGTq?}OjJ!KAn3I2d@!LOooh|A_| z(7i9IQxqs{p;$8EIPl1S$;s+R{V}HL2auW;O#KA8PS{s0f>E%2VMQ6s1uL=CH@HP9%nRtrcT(Ual%;;kO&Ee%ro>jTWLV0Pq%Po@m+ z<4~#$`P7;Xj$apY74!p5h*rpYX+|`|6Fvhuw01%x=Ygi=!oGMFA8K1*BZR-%yv!@Q z1@o4IjuRfyl}s1%0kT!L7`@HuAowgK^D~Rsyj&l46ssU9_!yGeJCdPbf3~L6DHlD9 zDnNu`Qv1YgV%;(x84Zx>En63en?xIHsIgejMJjna6*q|bkN(N*ZMoqUoodWh=PG~X zf>HydrY0lZxEQ#VT|DWY@xGt_jKD#8Bc?q0wKte-{!q#&{rq!0_ucp0|GD*?_PJNU z(5vq28PMd7@Ps4p2^!wN^wq{(vODvMt7unGkkr&&F6$dB~sPM3z(5E$0o~*pSTl90}0%xi9Bc?j7F>e=VsP ze!kVX$=WEVWN$$AxA4vO^!0T2ZIp^C^WZ1$V%8<@gDz#kwZT*SDGwr9IujxaJ4%3hwPv#RYZ(d0HE}Xtp+8 zm)ea<*;%S4Qwb;M5@hWDZDE2>)FqG5n{W!fVa~9TAdouvhRAf?$2>vKKof2|cLtud zH&i_KE*`QmQP=Elgqs_POLPiT0QFcCY_!4SG@C(DswwxSHc0RsuWXcdLBpEtFDd;a z=U43N1E@tY!N!j1h_#Sd0HzL!jIEMPF?5b6O$IKWrI^G&mzv1K?HTs7wxxU& zPW81+aoS4`CA*V{jKRti=~LjHR9pJ2_A?6;4a|y|2{biH(`LkLZM6eP3OJ>8()SoS z#%28)eDW@6s@Iu!uvaZeoV4!2r}LCb!0C66y~{ns?D{dCK&6mVG0~`EwlGg4lOj?- zsx?BsIceGH_b{8p1buYHxE{I^T^n2now=Z(uH(|!*=!zm7HY|Nm;ygmswr=!HoiQb z9?+BJ%T?Xq+`6zsHvhg<*LRZ)u_)^^4ig3wEdD6SPHDg2bRi=PJia zu>suVJHXuki7dFyR07z%VdemRxb{L%0Kw%OSe69l7*CaP$`-i@vVl_75n3mBd!K1E z5+Mqi>1F`WoQzXs3huyKL{;2IIpzy9&NTF1(EN5(j$%{DNcm(+X)o{5_8WK11H@^H zrALFJGmLffee8ePzu12`UOF$h8i%|MZ4o{z%ocXh)hFnIbF!$}W(X17LS{cxj4c8U zd@-=nYMbCh2QvLN{n?%o;K?{M$++|BOdp48lUuZ*0ZnjV{Hp8J@)PVPg0OC?3S zNi3iGr0MDULR?z(N6uF*M zQMw_ug}-R8mJtbyK z-Zl9*7kHHaW1g-t>m&aQUF50*$3R?AjCc`L&-F|nvV?v{dg$q#lI_9*>9db#x4 z>G2s`vR>q*d+kbwo^DR2JZ!WW8XOhwjw%iN+v)9s{xQKTLBzaN9r_D{f6ILMCo#?b%lo_7pFvrTy-npPa|rp2uFE$Ub_*+PiDI9i zjv*hy3&ecLTQjb2TtvP~v1Y{R;D(NY_Ic1gBy$JY&+KpZKJyHWzyzzm@dKwQi}a+0 zW+|`~-hncClUYU|pazjW$=L)!oPzdpIuuaJK%8Mg}4P2CMq+_;g zXNWf>F04vOjI)lNXCufmAWil%ULd(>GpgqD@(;hmv&!wsnVVzeJn>$WikL^qEz~9U zJG+-{3XSezE*2V-+Cn99nzN|O2rBM!I_KD43wQYbTup8>{#bE9r@mF;6;`U`{|MFQB5z^n18WC5%AGMg{-fBSnwX{TRj)#oQ9-M#b=L1; zGu8__&nNDpS@r&O{4*(|cRKa!^v_B^&-_UG-uPR&-%GNOcq>Vb^)u9Ep@B`srki)P zbB+#L7#bhdG&U=?YV4b+&tZ&nBj1n8B0CXIt0<_zUG-b)1)LR~eJyj(=WKJ|_vDpc zsL}t|r-1=6p4kIe^&G~{4CL(gBr)7s*73@|-TqFN3cN$ztO8Fsk z>I%@=O9i%gd-_KE#(DSVuFhHJ-tO*^6`Prq@#*)gU;M9>-}};wXD{?UQdi1*A; z+fe&?;Vl<0?B&-BqaEX1dqND?9_Jdd1^+LlfE`-Hu(bZ*6g4vFBb-z zz#!A!m!F&Z4KcVr$r&FG%C{O9QJ$=`*qzRzKwYkf~k`#Wc}f4kDy zTFW&Q8#>}06&)KLk*-e2U0oFZHaO8S&o&MdU~1F+Up~oi)+3BZtgl4nYliRyYdUYd$uey~V#k!u)8sG8f>vwy+_Y2yF?) z3YfOY)F7gq!9rEjU#qB|QnRo#j|Yc`(B>*F|Gjq;xjOp%T# zJylAZ4gy*wd4qgG3JRR_*9>GK-}zPcnXHC?zNFpxeel=hU%9_(|4H(W*XkJe$@bi4 zN2#DIuGX$$A^9UFMZJ!Fm*;Took({`D_5eUl>IjQh^|WarYfO_O49wvH(ViIQ_iZb zKrN|mt;ZJn60{)4pgF0CDm#*DMupLVriH`XCt1{ZSjf0sXR>+yE|;n{LkCG3Q}s)68m_eLdNdSM6$SwaP&q zxfQDGEUuS$%GD(FaoB>e0-@1JxV&PUVXMo($5!Jo@fh>IQ_5H9%s%?3$~j85wnN{f zmp0tU%-dttAa7zGLm|oS6!`s@_(|9)=d+a;+dJnt!_dFChC^dCoVa5xC(3{_y_U|$T!$XAHp{Y6>~C;ZCoy9gie5~{5W|rq z))PtUQ}h)^LA|v$S!<#mlq<@8?;3FVd-($34HU_pj4YA2jDWlAk$t=P z2->RY;%w*JkVj$7BKk)(3UfQR+ja;~g~ohMHjLV9bv86P#Q(`%FlRz`N=`&>{@lK~ zOWmv7zue_fx7Y*2l$}~pV+T6sD@Lrj3&~z$jOv98$Qv!TyI zg|G^tor8b4Ub#-VmOv%VIoFGY?Ynt`Kg1nnSg3!anWc1fI1!eBKTVMlxP6wH2e2!9 zhy1)E#$cn7(NtqWGw3hB4wMeO^{?}v_P0f?ekiw9?zvo-ThGqP>YcSY^G{ZsU(`C7 z=O~VwU_U1Ib}n?C2uY1%^1RKPlvm9A0*>ROVK+j)2kjQ832vqXxl?bdtoDEO?93UP zbIRSw{XD0HyI79H{nNABe?;D(9?J>Zz;cm8?AI$M*g=hZw zW6utgvW>n(0wz1_;n}3P{r0}jx53`f^I=uPpM;!u8n#uoOL&K72t$|~=4ZL9kMQpD z&i3iPVNyYPULfAz-q*z6P8zAD6EW-xp}xJA&4UTiI9nHcVR49%4cc+DPAlSz0VKaanX}VX0Wr6(8Y_-V-}7Pi$=Sm?bfFV-7^e zM<0X}{+`3j?L^<6i)_HNR9|y}f_?+q&=mjLz(0yZ&o-jWZe~I7*bbQ-Rhpk@e`Sw_ z-g=vDu>FrX%vr%1>j-vai!JR{?Q3j_LXz----Bt!Q0jvv8)vnaYHOvf94i-=Pb-r# z_u2+FR3{`?7NzD=Gw9NEQKl6LWOjN2nLzws5q??U3!amxF9R)oI%ac2kZhO^$Ik|N zntV%6m45oiV{cN*chgte|Cj%fucPNc&Tw~ODZ|W%uXmq$%V!C-;1RfL8{w!Od?Bo1 zM4#|op~K;7c@`{&d~z*z9>KQjD^mp7Y`ci}NN#*i6f`fxn|MP?m3B%8Kz8dTXG@Er zdA|uRTM6A|4WN!Q40D`1NeyN=p@!p!>rL=pe62n~jhrp*ZXrvUCrso|vwvu?a>+aY z@2gF_g_o9GON~I<$Ri}MS7IJv|2l{qgakwlJlqN0aLbtQOarzCXsf%dpGIYUkFs7WEWP!=@^|p(^Z)0)I=kh?XjRQ8;l z+8)E(-G5LTrZ=VLvs3w#{05<>(;xCA;zIPuXd|*|l3J!?f z`RQCS{vkYZlfl5)FIVvAdcXRv`iDX*GD)tB&07cT!mE$8@5iA0L_ zY(w@Ny_fuhy4_`PMzo#;=T;;LpaqPo=!XyLA5jZ@C7!_56OXeij_yW}A}wcU1zO9)+I+lkn zi#ZjSTyR8@JVnnGIag>{T!lP)qf3FMkQ6-K_L5#NumT)!AAA|4A+qIzE~+&3Wb;bCiA?TD=&| z{}#Z>wOy~Fg~Q2EKvS@(YN7AavXLsWOq~bsU~{PPj(}7t5Ra(}!heoc!HXjI#Dqt) z;VnX^h3Ai1k~b`GfxJ&+mP9NI+2@oU73`-mDNZIUn!0kpH{4w|_l0|pr;o3TKPym5 zij_7g7mV%Dl5V9QvBQL3wrN5ZH%oYf)b)TkUp#9sX`2b&?lAc76WKxRcxD0F9=hoL z@Ng*9c)Aq%6{qxVEgY#TC8&P%e^hg1RV+lk7S7ca28drA1)K{VF6Wk@xS%A*2m4;I zS=tLM|DA2Zq>>^uG#Tb|v!~UZxM*#&HkhTLeQRVT5f8!qq^yNz5OOf0tZqgZV-TEP z-*v?-M{Ixxb)d1_e1L6cHM6j>%$#E}NN_D?>{p*Dta?@{p%j$Y$$8=QPXODpm{rs2 zN%PL);dep?Ix5;Gi&p4_Xm_kF&+C}CkwZflglur#5%)toP{8)o-iH4h>VT4#g`H?I zx)!sUVu<%hKNtkPOa-uW9P~H(B5Kj6;D1&SS|Pb`m-CAF%$_A^NFdq4Ul(53=ZkG^ z8QfQfp$O=_*Tb3q4UU33;4@CrIvSVZDSCy}il1O*u4Llb_l(3GXH`B4>GRWsxBOk9 zt56R0VjYm%8Y6is6KN^|vK#2|X;7EUraRz?$|eFx6|;kNd>1tNIJyfJ%a9;(+-H(8 z;r>M|wBDO(Ac5D*QOmGIa+bt9|bl)cSg4%!&*4LcNO3ttinlyGR5;On5Lc7-c)f5-@@2T4Mwm~rfR zVUfL}(3@^xom{O)Mi`A3shakViFmd zeoSA)Mr4$bEZoMl{x-jhm+Vv zP8~$XPnuPO2oTlC(dZ;=!L523@10l8t9*nGZlv@{P6HpelrjXYO;L-*?q`E#Sd3K@ zda|j^Gw34-rb1Bh=(MQTuK$F7wnssABZ49eM`lHcVMb_Z#F2<;q0623#RTV|V8-!< zsb_5jVdpe_97o`BKP)GLvNKVx6DSGhbrTg@dN8Z&E9=mew$L70(bRKfz3#%EVV1eb zIzY|@PooT@lC_Xibr8F{N^sSFqPkEUi4E3Z>oj)rSBR!iI*%uMVQ-@lOR+KiPB#&1 zIjTBKAUi2Y>?$5b54{)8?^;}U_8ltQMpOwf2fHFqyB89d_F~5ISWQ#6g8ETR%dht^ zI_qcQQ)#LGL2sR;rGO0opOy?h^muKmzF2RpT~#(9=ORfxqMp;L>uK6XT>y7vv0*dI zA~kjlXxAa&f_5Z_5*L~39Q)Pq*vOO4gIqNKTpSeoE4)xtfoMIvPe_iddeAc4 zY^DL-fZZ<4XY*SXv_t6Pcd3KncSw-i`dfMWxliU)&5^QexMPqCA5b^wU%{eXtu)a$ z5FObI$mbpl&(1;S8D_-=;IzFhPKA5Bh0vEfL!ZP(I37xcVgzjz(&E*=s$Yu(1)7A5 z@4Qw)-)H!U7OW}!w&wx)U9k1FrP*6LdOGfiv+X~Tb-WshD7BE!kc1kiH`Gl-tYRQx zuY`B#3p$6{(6i3Y2t)ndXzaL4TVm2^u7%9lo z9b`>l3d1Eb2ZS^aZxa}ygizd8Nt7^Csw%w0Pr8L!iCR%WYQikytZ`N?h{Vd)QU&?8 zlB1s0Bh13q1EMpX4ll=VajvtSbH5`|v~1;V8{n;%u@~RNKBY~34@HpDeh$3;2IO7y zi?%|6wo5)P6H0>IP}=Rk;qNAOQDpUuKEs5@hZqFf^Ah|{I~(D~M=+xOYOc&muYGTQ zB9adl1}X>k1lsu9d3SpE_?}9UYJ0sk^naz0C9|5`L^noGTCj6YRF}Lzqr9#qjv3Ao z!9k&&z+#As_!0atXoc%_&@1~_ffv()Ylii3eHA9sEiJD;!T5p6eI-2?R8b;O+ZXLQ zpIgc&O8J!zO7#HcP0BsydF3b7Wx5N>8wnK1%UU61C^sW2Qc-j|rW@7hX><(`U9O-5 zJj~W*UQi2&kwjt4@E21C6O6fdC{JT=zlEV{9H+fe|(bu+4yL^ zWZ6b% z-jytZ45(`0U3_DAvk~lEHo!J!E%*()(rM6omNLg1tMvNnZ8;ttS|HFk@Fox(xZ*qN zUE-bUJLA^_jg)^;PnY5g@r~&H=yU|yWR7!^@ZR3gUd8rS*vEIGyP@Z81&YLbsHobj zCMXIeq_T20)ZO3oTw^cN69cGO0!BWo7U+|`*o(|+`ULrjC`LA?W-uw-FW!sUeNS5@ zVHaje=h0)I6%vqSme)3q&pldAr*+0`U_@!Adfc~j zie&G|GBU-iyxGz2k)Ds<1iwG<7fz&NV6{9XqiK;j0FA`ITw8uVcE@eV!Nf*rBI5Pe z+F`Y{b{k6Tl~Db!z>KpRNE#h6tzz_JMjDt6Sxjec9h7}nk+Ali8^9}Btsn4e{DfCqz*{$lXijz`DwAW$H&E`pfiI*GU76g0|GhK*gas9+ zT^k^W1_t^){?U>yyOn!tkj|Jl%r;gl>m@OZ_=eN%8*z|43-wqlvIUe7Bs(yP1>BN2WTo74h^Ux+Rmq{GtD% zN5hvMO1`&lTKC~4ZE0m=&w2@!LM{EaxrVq+79l_3H(??R;sMc~N@3oE>L(!0V;J2C z-B}rS60-=qn=ItAjR7O(rdAIQuCCe!^{`qOzU{mEej^z!oi_SzZ5R3t-fU?$#A#fT zN}@%YLMm24_+Tc1+S3c#tLrATf@ViJ0$U(8tpH{p)0sLJ@m+m^#`Pqz&7s)$6r08u(<+ysLpgxb~aRI8<7s#h8=}1=_6Yo$3ka} zQx?aIIR}snpRioi49FV zI8w?;UjxSir~LbTExnSryYG{Krc_>8j!oWhd5qLnT7yl`6!n>ULhYgqK-F^%=^D@c zyi!%Sp%;1qZ`=w@B%Wh(zl6L>9U$Lg@_Yjo=}sz)jTVmCNpYaPgl)a;vc01CRqPp* z7*Zqjyz8@L0eF*}#p#ZoLEXSjn~Nx`vXq`lr&SRg#4F!dO)v%BnKMlO(|44oOXTiiH^YhWDS&n zkBvT<5TTbcZ{YWJ3##4j#&sxv4iY=C6A2Io-I!0Y?LmDoSdiJ5^jGR{Y=T1Iqwav| z$a~OZd{9#Rq;k?K=w$9FQ}yN8%!J`9pRoFpNSX%=s-93$xQ$gW7`jUF1lpKUapZouTD&yp>WJ=_#IrM^vmT51I?W?t+qdubVZL+cs7RuWgw{z5c`pWt&< z6Snc`+#O!EDcC8+2*u$B`axVZx@gPQ?V!F5luJp^B^l2D6s4>7%-Ddu2m$lDyX<8l zTqGQi#7sD98}oTN8%KbF6=lzdWStsRIbxZ48#9;JW;^SYX@^do!km1jnWiV`W6U^e zJ=lj$paAHH_En`dO2;YjZ!kdTKQQ>!(+{<6|ZxvZ2?4})Dg zO&O-8Kz&`4n1E!^Wi&;%z&%Uh{(r-|*`m;M4QKCgPQD7emdu5A`huJyZIoMJel{M{ z&}8YB)L&X5)l$mg`${)-Gl4A69Au-pO4wUm#64SyEf1PncUEP<5M?KEn~~&l2hIZ* z??vY*a5s@vWBjD^vsKfWte4hT8eh#XsK}3#hs|K4J8q_A z{Q>4X_we(N(&uSmT6@i@tx;37-p~b=!7TBS{8h1QmGS5PvC=Kt`p0;ol~&(jf16hx zr&clgkZtG!Na4<7HG=B30eOtNOYdW{=$6zV+~!5#ZF@x)g0rR&^)C^I&G&Ao=|7Rp zkk;7^w6Xx#01C&Y*oFMfb^{wFfj)z(c!hBj8oq|mJGZi;i5J!&kPfoJyR5~|!M^bT z`i#DKW~;EDk&L+x=Xx%gNY7vt<~3COMaWZDUEKM9jCSC^J_m20AYou0SJjBnPw3T9 z8@?n4SoO@!+H-6iddUCEA<74-l2kSD!ruiINc1BsCm%GKL1YK=B2g3i>08*gJ~Gp-#vt+BAQH`f zW_x1~?!h@oXL4(sbdWC8MEFW?_*Wyzu$A;gzN5rwYml~GA5;84n8?N0d)Os=3)>og zF1Li+#+T&JL4l_7U$_DA#)Psbs6keuvB9Wp^1I7iadyxI!Pg7X<2 zF(WNYg<&roM-;JIn6XAXu=Z#})i>a?k1-_l?pw9ITB5uq5F`zke)#JJO8Ad?e|k=O z)gx?fNNO)PqP>3RPX|T)74En?&V{{ExB)Ps4ky81z2%4a=NIhOiG< zf|*XQrPe`F^%E(=RggyQL{`~ky}C9*>7!&SCoqYsVAM3mnxo;>l#n~f!1F_ERZ-Qw zRx~7aR)=FCnHr4rl2PO{G8x|FFnA>gVJZ`9OxN3B=Dx@v@C>~%W}Ah~o=~K(GbeWIvsB>d0Vj%8~9f=PQ5y~IEIV11uH z)p)HBGjCeqWNUI4n7X|f6GVqdb};UlKU6F=g=j^*CwfxPF{^z;`q52HfN$q0bOcHI zZ{t1KZgtf4@cS2lI;Nu9M(35f(PQ`#o2x-rYBX!3BaSH$mRrE$>6gS$Et6Z(!;TaYJWFyks_ z)y60Nlj=(kMUrD7_A*2Ax^UFi%QgUd&~Zq1Yb%WA8$+|xmLCMV-8jr zUcj>SEm~c@ur^ESEz!PK&}?1wRhRZCWub*Bg}l~d`gwhT-VMFXT0IK>s5rfwx>(t+ z+O=Ak#O+kSD0k#$xU1G`=fKVQ3aajKkfD00MNxe`$0>Qz_=Ge3D~PV2^s0E@(ur93 z4%-vcFiZVPq>-`oU1(UUBi*woSf!P)TL>oBQU$0NxWkTHJE3b%fP_B^yD5Ub&1cvLJ5z%SyZ&}{4r(NZ^JAF${#yhxN z3&XoO7A~6V*55j>ZkDt_54n}vPs`B{KvQ-Cir9ntA*~FQwbMXeTj=Kl9Rd@jC(?9f zotlLdwyk;xP#25qbM*N*i?3lm_8rgM2y=^>*Q!8VCAvUulVUbOs$o4OTtD2~__||}7v2;~^0HESIls2c>`97n5={Wn{trLPwp^Si9<+C~RREQ7B>#!s zz8%^UOr!C^&eX(2a&^ z2eoWGHTTrL%51r-blU&Ud&T?G{|~4QHNgNBP;qplpHn5k*zg*Elnzkt7EwZU%LpSw zn2N|J{|~I>0mf^1y3hEgdJB1L`rk+);P@sZZR0G`Rhtm^%(eOiwY*Xkcm6DGj(S|_ zDesp?W0E>rSqpXfzi^3~9pVE5uw)vA1p*}gq zhuCM?pP(MRFMhSX6p|Fw5f%5%Cibrb6Q2B^NK&$>sIx1aNczw1xnH)=v zA-kCy)SZFB{%*eRz7@U<5ZqGKHHL<{>mzGA8HY@-$?%`8hM)7RUPJF@^tZZ!q+uDe z;3R2lC1Nvo5__(2hGP<`g+!eBNB>I;RZdIi10Q^=y=y%+JcL*D=a-Iax2-rN?nb~J zbPDdyfUys!adUWdgUvG_w@)*Iaca*6-)A&5^Yh8|Ado)bG6crP*dN(H+4}GwQF%A| zKR1f30-wfoGJ~j(ULuP4j3nB1WEgy%E$IBnyl%+t7Ve0x9fjbMnFNRM3t=Fa3Xjl5 z>Ku~fA5){~Hnc#Mh2Qxo8G&;yoT(0{%N#Nqo}jKsp6!aMkc>q0Nya{$IUAsCoTN|0 zv^o)VuQmQW{_?(?-j&`=?@E6``I+W6>f;vIpp?E!&83H9`@P8A4Yk}2V+=OT(R!wS z3#`hA#ysP*?$k=C!?mV*W&MHv)a*(naII~<#mbJN&bs0|TL}M#ZjGC<1o0ed+`-H; zb{jWHs4tWRQ++hP?m8<09K2K5rXD2~CXJiKpXB1X*=z#MkpEdXF<0NAk2SlK?@-@0 zWt-srdCLEWdTkJ2kw1qS$9|?Rc;`i#-Pj2=BCivBEYR!Vzq)IE!+Y-{niC(0XT$_D z2*3GzjGe2&Hf7GzNi+|B{7aA%8`1gb?$mMYriO!xOY8H&cSParubI-*ZV7g zcN-=Zkk82;xxJJe_$WO9o$s+$-b}*jnP&6^spl2ke;bgovz2;B2-s}JfU}zhhpwtz zl7~y{p{%~4T+>DywXJgGaH>E3o!QOIpgQAw>rS7*-sqP7w4;SXfsXGZG%e#Xb^FC^ z#mps^xk6u~KjNebMY6$NoHPxfrE80P(ko^q@*4A*-)u{8@N`@u)l!IjYd***4s3t{~S5yO$hl3v-p( z2<>urp*!D{c~21LYt5!L#PlRuf21!k`kF~lIS$3SoNPGFuLf_G#cXgbaTlJh-l(D< zqK3V#J5igIM1IXz)bH!jZ{3DN`-deGQRX3dRf4ptS|4qlW*OJXpX^Ed3)j}r_|V58 zLqc*w3Wd5tl7fago+3A`JWkl*I8oYg*VrjceQJU^5{}(V(3l<1E$aE(_bbpF>Zmwm zh)2TFKNiIA1n+l`?)mKDJ=Z+f{F36+ZAEwbj&^EjJOo?imB z)oa|d`KWGSGWTH4z-2a&{l-57C$+Np29Azy%p~iNal~pu4nsO!OC|uaf37}Qd!$hE zJgJz%>ZL%1*@xf8F8w1s-H+k=9;bV?mFgpbP>)06xIGJr8~U zf1nk&0>|zQyhe+SL}-F9fj3$nmEAI=2dBpnp*KWGHNqkI1P?l0KH$Pp@E?a`(CVd{?BjN5ipYfdV;? zJw}HUd643;AM}DK>nB`j|ACjEWW{27(bsHmwuR8`f9+33fIU!`|adi}xLk z9rvLqc+AX2W^M>hwK(*LkI0|68)Pf5RR_IxamztPGuJSo{tAuzP0ZC^W4j*(zKIK2 z2^;aLt<_c;c?ps7W1sYjVlZF43jY6SswK%0Ip!fa%`WOqwSM?CQ>9^WYC5HUfdc`% z9HS;dAzlY++Z^~?CGD$LS?i=~+73{)_G?4sv(h_dkue4iHOi`pzb#mE;4XiHBm*Cr z4&L@Yx;wEQJ?S;2i`E+ioSnG4Qnh#}TJxv_jdsLGVw5>SU#^W(2dn9N1l-I&%rRP7 zc!r*kxC{fexdT-k)43S>2Td|ZXpwyb*7iqEVJku>k|REM@Xjf4p;bh-=`gr2 zlUangb8T^Ytfe}WO|3)V@s-fKYr0Zco*y{rPxqG&+(Vx2H)*DHUV4w4-YYqgyxT;X zt1OgPBUd8bKhIAF$^@PV`bnnTO@C)@#a-xBhZQd=Kk;M*aZ^hCa}*p;I7+a*0xrH z@%Mig&sEH#>k(g!mFQb?pkv=4C#t`+W#~R!$V?f5DH)^9H!9%w(T+TA9yj(OrRQ&A zGx3IaM>tV&+%(>SAe5-~!zO>EK2uA@EX;$P%ZY--xemcZ)OaKcLL1#*eh$5i5$A2J`v&;^<{4q{m$5nlKm0#pHIjk?lA>qh`=u z#Mm;q{7gxDJmn=4t=@*s7zR?Us*XgOP#JZT5)HlA5me2aG@sVQSOUsYTPhda{|U&~ z{A32;4{4+mAZhH;X>%DWMF$ZMV!&P8!O!uZo>XO`w;5p^Rp(1{{rP?8Jj1-}ypM7l zxRvY!SrJ+7GjlR(WY8JW8HN7r&#ISec^^vzs?kWO%RZ>RLEo+k=0$!o1r+_`^a1t- zR1RV6aC#Y)mn&?4Dt@tDfhy>=P(-YQoZ&J2b$)`)X+P-b5_~pfN$8%?YoQau^F>q- ze;GO^Cd*CnWKe>yh>y^#jW{8!8tn2>p^jfG<4)YJ8KO4*xr+QeLK0@zoE=NM!HB->z zKrX4LjFCS|w6xj3(R;|79qVtyKc+|Q{gH$?EB14$d>O+7q?O|u`3JT`)73hE65qdKoFfq(D&#oa6>hxj#7=7 z0^APvE&GDa0o&7uIV8)<>}qZ`7m93}TF^eM;KuSrZ7swV*v8HV<6%A0e9w{sxtsh# z{YxhztKu3maRl3G`!>gTXThK@L92rYgd_*=ca9JX*&g#8_XBhF&LC8c)GuN(z5_&@ zFv;?B*hwFRYT5xZcsn>fy87dNnV$dLy>kXxcbD+QdhX<==6>)m z(Ow~a{-Ie0TDeTrDR0OxL_;!yyiYioMz*cuabXc$VAX7fcv;L4GT5igW@Nrp68;g| zW9!|-m!@x6xgavNBR-hyhPwXqc`%_E>&X|3-- zz1!XLV#9qNr^70^k{jDk+b#=X{B>qM=(6qUzd+<7&@Ft06SkgqySEFz5`2;kjtam*(H{PlQJ%yQwRv6?btDZOPST+fxoO zFD7c&p&vJ2Y~7J&VO4Jr?xjiS$MK4(k@8&3<#T`6?KCNBQ^sC9{g2i%_l#f7{a3 z@zTD{IuNc4$^Ohf-G0qB*&b$ZZ|~#i8{`f2I8rQY%$rSdCOdD28>}`n81t%&;3T}_ z9xj0GqkywjdTP_yS#sk?~`wl{{r%ecfqr6fvj+#-UQUASoS@) zTOj5gNEO_*>@pWKZ8sM+pW~Y_UEoZ+psP?Z_ruTP$L%mzk2JFNzGz#UsZ>K#JjQ_)i z3tg=FkVDdTJP^7*0H4voIexh`Z`Cqolq- zdye~Xcd;QHGUX)E-^lma%lZ%dSBN{s&0-z7g)$1g@5`xLq#dY$qq#n&CU9v?GY>US zH}5rXMrte3)X~(MZ^a%YuSqIs>%)xN@SUurz8dkm1jcW1wXMR)rRDG90{=|^c_`9p zzLRK9KkHv0hDo!f0aBJ60!^fXk}LgD-Wj>LiJqknfIyN<9B^6};BRvR(zUmQSLiYK z@T-vmZ^^eaUo)Q)wsN=Fe9)=4;9aiI)#myCUkb7^?~2AYzg`*elV zRAQdOPkJ2w*0y>mW~_(cx5~!6EM98@_NWA<qudWQ<3 z#ev7IcYsb~hExv@k7;U8@b44Fw!T?-JDYpolQU zbT_>4?LI)WR28y=UO=iqh3Y1Z=JH{ZJr*g7wd@`)fSb;|Vs@gDq$2cb9gfh0Cf@qP zG=Q(oS2q2zthBwbTs4h1buo=K9WhU^JV*P-aUltJ*Fw-zSHUp1*uz(|uxAw2LMUM7?$$|F+QXI$89FbvvZ$D^VX}ZU` z*;q^`2)!L%L_^7^>_H1nOX;TXpl7_Nfw!2y7MfgwrFc11E-CKu_4mHSj8hxnPWc_ggZjwzoR=CQKMN{4GF=yy<%*)7L|bkgm5JHd zQj$b^k;x$SK4)g&+&G8tVR>PTu!fjQ@rgo^`MUX#DVJ}-KA^)f35vt5a2qs83nuaL zm`yaGmKyc&>CZ;H426}Tm6ENbV8-1>Js{Tc^l)|ct`-j|3-lCfD%%h2&KG=D(+*=uo{d%{_;)A1{4e8`L7c|kvddqyacYs2}F^?^--UWAy#N{4k0 ze(xA>ZDt;0u5V68Mp`9HsYPH=?$(mwDj2CQRJMp!eC4oii~`@RrBVkvOnc)GXmeH6 zj_?>PRj%QE%Z9VHkvsw|en(Z0(nWp;CRB#MlQc|;(&ys--;GKk8`%=v3a&eP$#VD) z{Cu3-i-HMzhu_4OV?VGb&^R@VxkMKRE3P^<8Z!kaCYqnex-2|%4W&I>v1Rkip zJHQ{Wss%#PyQ^n_XE;?!f;S;TGpO_UW-Yvf9S=ue3$B^4&y*kQW(jLAyW2L?e3)&( zjO41BH(CF+)Dwzxi=jpK26;tIls4)I z=*Ok83pCcUaTj!;zqp3p6Rh_C98&Y(H~7XR!;PgtZFo-&(}pXDRljkG1S2U;vv~U8 zLl_UnejSp?c85-r#Y}+D@Da$>3m6L-V%&n;X0`T2t*ca)%gGm|*?0z?fJr(^ehL;( zf>I2u|7fM1(gey+CEN<%8Dr?xm@75WmMAafR|=!+RDH(G9HIoMNGhEK7upNb2U><* z*vSv((#Zw9dpy$=pUNuX6StMD2aT|*(FAYAb2#Q5`b_mI=F?~&)mIt^un$*fY)l*a z3iix1n1hEQ8Qqz=gACLq$u9p?O6v+3Huo`~3P9h$1@eIT7YT)CSXt+i@t6$1WkcD2 zLG^zCO5+060WXMCNl;Izx|*z}LqRKzwQGa6P-~>88ah3eO=5SDDC}dCs72IXdJooB z6)h{1lvrtzf4hIEzrHwI6vQv$H}RD0R90&L(EZ`ntVWJO!JWwDa@X-5pWza*rmZ6D znRU?Gn?Ps(n~i4&k+5%ec|(MWzux7xW7=h0qdoj;aB+w_D!^ZxjQ-o`|!(=``X8ZGH1~m`U>vKWlS(* zV@G3BI-NO4zhtip--Ik)GJOH3Hrh18G(zaf*I*Wr1q_cw^GY<1onRBVI?OL)54mBS z0NE@Y*_{XAW|yW4qFZM*)0t{VG%AZ;kMsO)oNqj)(tH-yPKlYyv8F7z7XAfKe=cDFDASzvQa zGnEotre)kz<`&kYAtc*aLlJn^_Zbu>!QYXxECtVEK6KCLGBwF>+Rqf{E#w;bb1SH6 zP@KyeJoQ3%VfI`f{{BmtUtGcLEt1MGico&)CdmHZ^fty>y#wh@9#cB}JJ5jXIMzgK zpsMoJN~$q_y2a!d{gwHE?|HQmCx@xylxA`x&4?u;D=v=8=(`MN$=*ma0++aJc6rkE+I4#=3}PR z+;okrWT|Vmf&u6kD)N2#B3w3i2ooO#Dp>|;PHs{4*fC^-VMlkJsud&Yq?cZT-b8QI zXVY!SFMTiVLJvWc!}N)GPRHPr=Es8l5VYjOXk?(t|Ed z&}gC`)0!ITAjmyN?!t^rZfT?uuF`Yq7kWNW+sflKO1STEc6TH7=xurieV-~u597KZ zMO4*P)$<w7xOVFm1DbGOdFuebn5a?1Ba`Nhjzy8%oZS&wMW?mK}~eA}y?AenXe- z!8WBEF}ZXyKCKS)LAoFQJe6+4o+pt;XZjJGMy;7+d@s5wT>cl?Q9=XLOs)Vw8Y+Jz z5%n$_NQ&UH_Mi&Ety-MSfm^Z!`V(H8>+zqV-kOXHXc($2Mk56nBYn|+$it1*@K|hP zCXubi0M^QWGSZpmCO>mW2(#}-E73>GQ|5~HfGI#V!rZ4S`_u?vTJtGn05uY`hkE2G zH`crx>D*=LT3bYrIOMic98-?1sx?KvX(#oLv}Io~5An{yRY+|7IlQNt><6rE4SAYx ziWbQ}bbYQ9IStCzdA2rP)rjJfsZrz;HH{3^_u?ZGXg-(g42B5#=+{4^z$8_fLy328q*nP{Tu zCE#7Epe@C_7K4`3eq1WOlR3^^HqK)nwveiaoqQS?*_XIjCYA318mblA)=}=L`GRR2 zKSS7W9f{wC37>4yN|7(F`QDq7>b~sCQ9t{(czerz{U1GVu%ljA29nBJeyEAvv=21H z50-T*flF5J(FQl#*bMJNMS*b4N{+uCgOFNA9ypa!HXMdZH5 z6TPXXvNUFMeZh{MPVeY_ zsUiAm3{U=O74-RVl=lQZ{f^Rw8iO_eF14DEAQjmtem2vIq?_JR1}SQ7LJ#FPn4j{= zxJ7ru%&i{vFIAFuvo`h>U6EaBq|qj2K2=nUm*=Uwq$lzV|9(wSS4j!X2r3+!iJ6^? z=|>yKV@GF8aKLgK#oxqDt`xJJu0;OQTWSZPARbg=r~_JCZ8g=B%1=L`?{PE9btV;e z;lIE8Ei2BP_L-< z0sXza;?^ruKNL6p0A7wsU>P4(+xV|&PnEI$2&xEF-I27N9*cxWEOQ=-%^JLh-h>Qc zJI64C;5@5I&f?Ghj;H@0h5|YAJ+&KI$tBu){e%2Te1{gYhKkeh%HWI1Beg$9JNW|$ z1K{!sDQrK|5CqX=wmIvuT;+%GZ*47k4(I9Wa9JLp3xf+CE}cX(@I-AORS6EdALOmP z8~3IUG(xFZcXP-~A&(pq+S|W#jU7q$+E%xvh$YQDOt^0CXFA1QH1%K?GXYRs_K+I* z2EH4SVwl=ldg$#cUIT4rq_o#qCnmuYvy2+RTojz_3T~pk8qZip!pHv_6Vg>|3;hbX zi2XsipGK8HMlsO%&8}5GXxr)E4 z-VO<{yXb0(q&^$L*ozX`cV^wPjL9=s5dyir+(mPJ{vuz}Jiwg9ZRLtv{$@@QH`c|g z^bEcV6Rz8t7^b$|kLk$F(>%n&9oPMAC+3^lQ;6p0(jB-Qb2YjM-%=QDjO6~I^TXvj znyQb>Eg7xKie|BhV z5~8P&?zr>x!P#gtor8X{j?^`p<1dnMQr)zGGsrw{4(Z8GXAbik4-RuVi z@R%hO9Kur8+UB!lmc=KuCA-;FzMfG}ZK-+unm<`Crd*YV7@OsnevhL^&iDst}InbLG8&W4j!I9aXd zXv07p4p*fMaQr_zD&n1cO9T65FrgGN_$?Cryj zK7>B5FQZ$syWw_O#muHo8M)Y%It$Z<0`}Twn|VLzR%hWz=WNyaP$9*f%uc2D3ip(Q zD!7SAGxt{$bdS^%{MAnC4cznB>)X*frV*#KU3Ze};9cY=7u77uXH=BaT~B z0!F1P(leQ%a7PwY#v<3CYDXC}nP9wQ=CBD`9`Zadlz#LuG+ai}tZY)R$(k7c_?kfS)Gr-Rr1 zjy%y0&_9i})O2kjy_YVgq-im1kkVWqhy-$1>KKziHc=nhNhHO{;W{$$>SX4vQCx3p z?9^__Rc8PhMxf?@-0em6D#*8uky9KW^4$70IddfX9 z)LU5pD5m)&&5#$tJ6i(&M=P@RMf9Ua40VX+*ulmg<2>C1nRyxOQaKu|EjVeW3n%QC zY-cQ~wyI`Y_+=?5d^ERbqu6@PRdm9K8@<#U`1L~$hwrk!CQF*u8S7;OA|sDAnrUGE z&HCtr;2cF!57>Ed9}N~J!UK2!Y}R|=$y-TzoN8av_whUwL}$tjuB|pwnMH;`2|8mW zs?+4%Mx62;y&b3JKK}EPMO&_HQjdU%7(ohai;U)Iw|EVgOGnU<7J>~~0JOSo@B_|Q zTPugfMCF|_!dFe&DODFU#a5aNZ0|aFPeYWIMrWMkV&Ubg!0ln%u(#Q^@WAwD`x?W! zw&o3HA~dyD6>5_Vz5zEMiRx&s4l$GdV#KTvI?vuZKHMB()v= zevDoW>7$dzB%>jE2p7t;j7eGnHI(kABHE5Q{9WS)o5Nq%PgC95*jMa3uHbbp>wTdXKkG)Lc5 z2e7Xg8Xn<$To7G=9>Hv-hQg&ls5DG@_re$RoH|dp28pT**90r!C%TuuQ?aOJ&?zvP z-E68M`~!CO7qXg~%bX!~n0e$EHx_|C;2`AEJEH%7bJx4Rf1VR$xl7!;FdG zh9{EyP-<9ui^i+}!egDSCE-Rri!BPz(mt-W(4H+!e;`FkDV?WY!%24#O#@f;$=X)< zK5I(z{BxyLxu+VdeMR^B13d{YLz8|%Yo(u6z9I=-5A%|~cn^MoB2b;J%3a_;vzIVg zZ^JCs{d&4FRjPo#m-X^ni9%w>DbLb+{FgB$Ym8aOIykLkv_{$>!-h0-%1PpIlg#!tn;u^nA4%Sl9hx(NH#jWSxv2QU=k?9me&=WOIe~16ZDS9KQ{zH*% z;58e38r{|L$~gH4s21fg^Z6ST_=9pN)Wv%0B=|UH=zrl}`Ps;zTG2Jo>)4MrlhVvs zuq;C{l?wv(q8#^x-2wmcMlKIpcsYEsyXbRNJe7{Tbw^{fo`%(z(W13Bn6^#^xp^XW zj^0d4gPa`<{@gpg9BAU(G>1MG`bIPql{;vsxP(*e6fjl((1vkYzpJ-}pTSUm;cXoY zcfu^St?`T60cT_h^qKUAzwmEHAm7pXS(U0yoi`2|y^XOT3T7!=z(=m6zSYK|?Lx$fB{&T9|;%9Wo{Iw!uR@XxU=tW(r{J@IR0;%k#ctUPM9WR1eT>ws{X=tSC zr|(Ak=o(dtcEGWJ5e>Z~$RB(z581)oSU$>h(mVtXt$U`=LMy%?*Pm%dFEGYn-H?sD z^bY9cG&h7Dfvn^>`W)^edy(SJM0fNn`0+%{0!u4rrLiFVrz*R&mc}t8eoCSHvIy0g z`U~mAec%e*Abs$Ap3;r!R%qU74VCOAJP{Q+7qf{>p!4(`O37 z$kovr!Yj$j8nwTkVI)%;JoATNhfq5t21nGe3YNoLh&4%Ui{RJw$ z<<0QDe^yqjtJFj45;aTth8gKh{~KRbzZ8ZTd*y7OVESBdjXFeV;r*`d+m>`FU_@0LxdQ9BD;W0qna9hwGxU$ z>Wh3+e<@DdC+0&#dsE*O?*Z>(UyhIW*8wl$u`)}aP5n(j0_(CFNoLByjeQ2~ul-EX zpj+NCl{e=DapOJG>}$C4+yOMbUPg=B-+VH7?8)pY%+jLB3{ZY{l7gfK93#iUg-iqA z@*5JeL15x!!qr|AYl#7`V@G|2MuB~DM1CL}@(wT^hRWULZb-gf@xKQrW3jiWXO8=- zr@Y@LEfvH4>D~_Bf!^}yRR8T8;?MRs65E4G{zW;i*4N7Gud$o0Vg_(CK_D1lDPW`R z-R&Rk|2mF2TsFZb+hQF{9RUunO|vbswX)f5M=a~$hdPZF;|DVZZpW=!1?7Tx2MOGX zq6)@@OKGC#z~jYI3yd(M88~qTkRP*x^Lm>qjK+XSuoGM0Cj1kenxZ(D{YGMEF_MCV zkfrytzqwjCS%u-Py1=>E^;jXlu+6v(&c$^=fBYeKK9~}}*ch#NT0++;MU^e!$ew}sZ@b(}Y7I3rMwy43&Uvj3X1>Wt7UdEbW9Nc| zKc+R7HC6*%TA}tEj_ZNrf(L_D*CyDye_eB+P~PP~XU9Hkdym{4f=?#8_To?)p(sgpq=Y@4>yp znp&F)V*X3-^WGdiPaUTiyVDJRm zvzu{FZHpE*j{An>_j`D@cY&91()b2FW*XI9{x!I8RLn&pJ$)vqkFB(;_Q?+Ht#~-73aV1$KEz*bgiST(At1+P>Ak{Gh+j` zt`KO6v~95^+I!j?Ic5fS37!>FDq9MGV zAE56%$6oXriJW4XmT~k;JQ>5`-!DR^VV|4>rG4XnPbl(q+t9rd!9F3?&=$jx1ah8U zL|>!3VXpiY)Q5-EA)|`6Lb)b4P#UT?v=AfPcnKx@pk7&9t^6ZD5ij}=`F{C2qkG6M zcao0!_xN=04{ttSecvl@f|vGA@fJcNFhY!x9C913Ir5V}h6eE$@Fm_*>*%#`3y24od@cHgLdj|Axk`Ep`9AoLdXIT{?;u}waguaOKCJL+9j%;^fD=Ynq(jnhx{gHl_X+mPPvjpu znxZKW7$ZfIr?x`(N@n&jSD0q(OSEix(8!F{i?m|a!nd**iQB5|G5A)Ba5-2P6KIy| zXXHcDuCvh-x0XKiF2=#f2$_6;>;V)%itB{8@g12)*QN5|20v3xk`MXwdpDz7t+4xo zyS~TbY2_~N{stOserHqXM)zjlakPqTm7jvO^#rV++iFX_G5*VU(Esv~%0O?N#%$v1 z3A=<6Se@4ilg)1}ig~d~Fog?mgmb2yrdZPv(@)HTiSPvtTHpAJTo9RREYvn3txyS^ zvpoL{|7PDRF9DtDfwuw(kg9ipzr0inspai@J?abn4Q}e=jF%b4Ok_@hzxEAW8XY&| z4Pcv$HJ*Z!H%=c773&!~f7@Zz8BccuzweduNis<{Bu1&BU7^fak&2S8P%DSv98V+D z5^B^#NAW;vIGsV4rA8yCwGYHYNv?<{vhR3u!VXdw7W zZJ-=T3CqaL*tD72FXV(u>)S!u9!ZylA{)fiU_QXbmceJ54q~M)Y^!N+=Gf(UZr^DO zLN8+nbF46sok~)`RZG{`DI>)DzF*#Vo?h-YF25_E=YnUUC(aY-nde#V`zS5dZcq_e zVT zuKTZw9;KtPls?PU#3bPy_g~ZXH$IWqIF66tU-EFj@OD0(%}2)LoZpM;fcr#KxZR8M zQBYxHxf(cE?n5Tr3chx2_Yo)GvlFfM3h5EtYNC>Jn0a7J*G;PVc~ zQkvUH#Uj12QhMO81po9r_ZaZFn)>JZTl$u{OFEOBwLCV-stuzGu)l^oE~urc7%?sm@FmR}j4-7li^yD(n;1V25icBngE~ zJuK5~Z5$T^HUvc3Q_Sy#Is7@+z+S%rzM<>%3J_PqjkS6uBz9keU9cJx%gb2xs{3usI?99qmk$_(G;IOJhqbaF%vE zBu5PRxjp$c^a3Tq7YbU`FK-XOAO_1;+`T?2iHnk4phl`h;nkrIzi6{kpx2?V@F(Ig?LgpxmPI_?b2zckD%IlAfoulNXBRK_FM* zvhmURIjiuGa17sFFQGlo6?c%Rjb`rBwJ-zOt!c_5-ZTOep0uFEyv6jDt*X3-U zvn12^$CVMBQ_wlZ#d}Km>q)%S2ARKBzFzJim)9NZn}7~^$-B_|)?3Lt%w5V|#am38 zs~$88@%0>RXj=H~h{cf&BCmv>4c!=gDY#TfU`VMDPuSon5}gnk74|kT*fv=>!`_Fo zQlGS^j%kTt1YVYg`!&yVXJoD^r)F-PtCufXiKKGL9W)v&78aTInipBptx@)~_Hf5; zxKv99WH^HCoF!1W%oYdz$VFbD2Y_P+(dYE7Dy_^B(>=qS4|7N4Nv;z9!^$UP5!~W+ zq56hfwp*@PFWE-moLbHvWB+YgDePf)gU40_{^uF+n`}ed#xgxY&6JBt3w(7vCtXpl z%g(CK4S5~%&N*wjH@cs=7|%3c1^6)AAuHDbGXmr);R<|gOj56k*L-H*YTpm>g}R;U z%aq`13vDc!_RWFLz`6J}A*e-&7+N(vF(N&pZsfekSCK6vGs8!QwF;>alp1i%QPw`q zdfePhsK%b5POII;L7rO9p}Bo>AMg`3L~O_C(VD^XfEoV2TwbfzCSn9t!B zZ~|9}Ey>g-`AB7S=mydjil@dK_mJ@@qJ5B0fV!~}JK=Qo4I1Ca>l@Hg{se5*bZx&D z0oUVA+>c}7li6dW8A)J4omZbAS@jw;rG?&t?p4mK zxg&F)XE)ASo>MaKt1G{Ehp(A`sF*Cd<*k|ts-(a_h7SkVQ(%=WVha^sz zHcjp(_V5+){^NN7KF}l2B~O3P-`>T3lQdHF`}X)g`gh8P)|9$HKP5YugE+Um6vC_) zdlUN*TOr$QTOWIMTW@Pu^KoGu&vPBwi&$H$(eWilqYgaWKx7Rw02Ouqp+ncKZukwda9B5KF4~4T^DjmPC(YK%zv`{ z*?)4r=B7ATyVHHAw^V+KW-t=t1)j`=;l_H6-d(1f5d!AnCv2gd~kIi_2Sn+Nc- zNv2T`EcT1&7uhTo5MTR#c+a~By9{TDvz#;DW$`TYRTQgByQNB?rVNv!B}y)(GyzL% zuzsIPC$-qsI1jz&wj+7gAH8@bm}iWelqJQ{p!vd>q$}w0oP-J0H;}4>q#_apzU&^U zvy>rDmOhJ}(SG;RyWG3oTg87-+O7`Lt5CJ!%=h!*mK@6z%O^_-km}Y1ya*~3x-ooO zJf}8%BFaWOJc4PvTqn?%Th(CQdJndbxUB6ss+-SZjE+9e+rNPM`FW7shF`u8-O`bvau8%u~V2Y=W4^z zx$Bo7N%zFE;$^Xy)C&D|1JSP>;+H(%oWpXZ>?S!Aa?a=VcbPn;y%)Xbz3sg{JgZ&D z@^4)mjz+GGt(iZ%z={HI3q zg^cCmg-fQ5)b5%|0q%AY7F+w3>Bs}G7x$;<(tn_VT zr2aQ35--8{Xd%A#ly=U_Etex^ugV^pT`)T>>s9vV>>AmXvm&!fWOdIjn|sO?>uV;> zQ99|p$V6ce<}ptl;{w}*b)FhsJI)k8v2dTFY_Y~ggNq~<8kYZP^!Tu1L1B&;mU(;< z83dkuRrFW(_HB3P%gf19GsChHvfJfWbnXRdJ>HuDS7Lygpgn{~|OyAz^69 zJ{(Xbpp)aht+!>CP?xPh)*4oAxLj16>#OOz?EB;U2Ly)$Xzwe7RrRT-vFDjJ06MqS;ZTI4oToVXn~y+-cq z&U(&A&RAE9>y-0kUO!9>y8DxrDC#j|=7*b(neSM0Y@N{JloS*mIx(_ktSNt$g1ZY% zPI#2iI$?Ri$hgtb)x+NgUqZ9r1M?w%GSh*Yral(`@?LZv%&}x`%b1WcC-ZuCT;3mN zH}_ocFn<+kq;d<&LpYhw65+Ast6dEE64*4bF8=O~9HSf|fd={?yyk*PiksmZdSOgg zKKMqW7jL`sAJ-#Sg!6Vz#hifL^0`88dUkB~v+VY{yPPdO7XJq^04el7)NbY(U&WMV zDvm^m)BFj&c!}0I==U|i9Q(v*$i=(p=f-{Qf-+v(Br3jr-b2h?LDrP24}B15lKsShAAqbRY;}CsMx?bZ=vL30g1WA#uSSwVkn4;{v1dz{$u3#^zY6eZ2G@{US>SVESr<*9OC;W z57oTJD0a5_v!hq=>QGz6mGD~OM);t}j?s0a&qb|?yb*2=s}uYsV59B3P>P)iwY4E` z>08CJ{<7X!cMBKobmrX1*^zrW?=#Md1H3D|AAAYYe7U<4uk4i*AxCDzk={mM2PdzE zo(z5E9%kP^kny>!ZA5;0E4r7eDFekr-YuTuZmVl;-q4()IrsDSyC%BtdIJ5g#AAv> zI}FXDpY~DjOLBy@j>ORW5vFK4c3go8@t+G`inGN0j9e8VhW`#d8FbLT!~B&UO0Cy! zO55CfvW1M4KS>$Ztn%4MvRY&f!2gbBJ;Dc-3;$>F;20m-<4;eEKV@qqRVj&h{uK=LKTE}bvYXacH?`h4gHFGNzFH2D+en(b+dM5n{y`T)XVGO3iqv& zPN<+q{>5I;-pSs;249N#56>az)|*QLN%(;7mn^>H?ibGc zc~kQC<$lU5?n-xm5gTEm+TX}Eu2R*Z5ijE3nUfq{f`*0GkGK*Ym5+*>9p{fN6GNhR zN8Ag$5w;;LGI*+eyeWh|WDHRwq$R!tZ!Y>&mN^gSEzZr$dX$m;dqR53@A{b~a<1i# zai@BJpmQcoJSH=GUuM6s%9<}=L(ry>3!!7egCc5%j|?9awk4!w(Dr~7`#JE0w}GXx zhlCg-lyrYbZy)zrS6|ml=N9KiWbw{9f4ZM|p^dcH{!(5_X8%&p7w4PY^c*9{l$)80<_}*dm?y4-`Lc?+#T6AE z35CoHtOxBS0=tIS#V4&W$upux2^V1$_#x9MUADV(7Qfp`rUiH-&|S2ZiMesS()8ehw)h7x<-| zP?q^&Oi?>Zyzhx?xU-Zq)j8QEy571|Jc-_YzO~{6P`M)Y_u%%a#!x7N_mG=T0P(4>w5O@-?>xKnjcXDbx~}+o_@4N7K_`OySIck6 zR4P-`RK(Uez!4M`Y!B9hJA_OPHA0JowGGJ$eCfDrZ)Uq?j^S4#SMeH3O0bcnmXiX! zuDq%_QQ0wBGc%iKwET1A_s8^G>GysY%Bqxiz*9rIu2z6Q;|AByG}6{AFg18Y*yixE zk)xx&L`6k)ikKF5C1g_2K8MAA5S~FNsbEyqnkZwWx#A4}58oEwZ8QOQ^RDyEaG!Fi z&hO4KuIug)-w;ufRLpGppgAxD9*U#PAh2S1q|WMt8>`YK!2S*+r=fTkBV(Xgj;C2; zopKK|=&#^z?Ex8QxEO)nS&ui%x80uz&2yCAjBRT6S`+PNTL>Cf`#4&okth+9ry};% z)?ZNG3iE5=#_rB8W^LR|xOO@chN`0mNQZpQ(a-zcGuHFcMO=@arCeiN=iF(&({g+L z3wF=`^i5*I(`Yguw6w7mw$Zltw)?g>)+v@prVupDQM`Z&&lLDBj!}(`SL#V=45k1N z+yyA0rA1!jy5NVJ6QRS#%WE;3jTabr8365d9nr5DhqDwSxo63n5(AY=+6NlmIAn2{HtXay*e zv`{`KHv!Qs1G{cJt*Y+THTd>+<7SosPyHRbDg#C!s8}`W%SL6RID7*8!S0Eo)2We2 z^LU`6RDz@7w)QVPdjsX`@;&tb6$97wjy6*Jq;1BHe=`VAnQ$Ty>I_{O8gNap%BMk- z4}?<9Km%J!--0W|3BKcNeHSPeCqRmPua;Bisx`C)nE#K~($NN?%5%XGT!dR?JIu0< zf>qiJS;6aSJ7m8@zzQ3v>;n~IHs-dS@$)>^;c1{}!?*6?3i3@sk$aCO?mPcA-J`O)gZ?*}jpHW>FEAsi&W}YhVH8^z%2-PxpjUa5@j@@Eb9z~* zS(mhA{kR?l$I)tZl0Hpp<-;1zB~N|M>M87!2t%o+9q_f<$i;{1lWAy-3h=IdHmB7oM)Cp!STyt>79m^xKWYxJU2Q_rOh*sQyH!$a(3BxCNQHa_TQ-1^j+1 zRjYPRi-*P*Oe!;}Yz7;NcWMv%QU}2|)|38$vt5RqRs**?~F8A_Cs49 zE(ggT=^u~^?JB(6$QOEKFWTlRK(ihW8sRl;w7jaPy@$FUpWPH9c% zg6KRAUXAu}XPs9S5QDZzZ!xKx1@6=>RZ{Y4td_KR{L0g1 zjM<=gPJzSqmGTWOJNuDldoJ%$JaUG(58vqw<&ttkIiU0fD}+`$Ydw+h71cZNLw$ob zAA#go0XV)YsqtzWd|Cgg!W0rFsn9hspQ7s^;Zu>##Lqbj#QtYcsr-Bj*O?tcia>P= zV7^f_y$zn5=kThur9+{3mcyGm7m2a8hNjKZn;37X8%9B6s8PVUsQrW^=@8Y2%%r~2 z)!+pRqa`B%?M8#~tsci*v;xjLIr0*mY|5z(wc_X}y=`1JYU$OHs%)=C8}-rGR1RwN zU463N2_&OUbP;AJ{m3Y3+(#C3jgq7^1H-9{`dvMTN#jptj66oZqP|9ZO1w5-^~rgP z3k=kU+A8=C^I>w{8D5b}bZa^nPr_XM{A<9&xd`@9s_|Q&Zq(K`sXw(r+H_^A+6O;R zcewRB8g5yp79+Q*VeW)d4s$|7UiLs_M8+5}BOB5{N^OC1jW zcR{@nwHdy!SSB5cz#)ogL)2yHfobMHCT@{7$&bOV&4oIjf(&FsilzUij~REt#{C8E z@@x9MaYY@eSd}FB3fF5kV~%kcD{)6S^IB3Kjig-=Bh!prBw7X5DCy@qlaBU-B;eAZSrxJ%OkDuPlG(;wI z5qhSUY7^jas3n(>cr*p~_P0jkYxWV6i#1SpQ#m9AIQ=2EvczgNNOyxQfaZH*hea)yi=b+pI?D}I72I@mDU%4 zXEzLO4{7jx%~JcSrL+-n7gfP3)*m}cLwsL%)L5h|bV?#ITmrbV247jYjU3NcwkbxN>0M~?@+ zX9#GASI`%i4f^SNzM_djFN~8v%I##T(t*Z6=rwP&=32V;6=$8TYB9LRC&9(@O0Q^C zz^?U9`=i^9qgdA#k}FIbKHX%sl(f_aRrD>my`p&<+KShpo$o!S?tR%_%n?kjTHyqH zSv!j)+g0i`-J6M zg!&n_Ah=7>;7ZkkFL^T?jV#Jcqr0{le&!Eg@oraR)y?3J7lvPqXf`kkZYhO8R^077 zZij)I1xO{L)Y@uR7c8q|3{vpUdYq58~*PH3x(Xy#%-_?)GZDoUc5AOfx zdJ1|b9H8sp;sy)t;EC}I0m2i$J$hL(On1QKZ~O;FkUZ%33*e! z5-2034a1m*JM(376MudWP{m%*{mCfC1CnVUcmhgu1-MM`{4+qy>V=bhG??NNcCS?W z7(J8TPj`mzy&AIC(~M7Wfi8j<^pW08pRW%xzCyEoskcDm!~mpgR#O80zeoWiht>!l zz{|*XR)nkYZ(+7DQ;_&_{26W`cLaUwA@nD`le%2akPgaUy!n{8lBpd9#lg2@yz9y?1@q-8Wl;I6>)I!1VHdTOo#0;_5g zKn34t{syuIAbnh zi@Ca0wT`mK1^fue4%i%U+40@(vdywSGmkNCM0fCQVFG+EcZ3>3Fh89e%8tUjcAGRr z5_J!{al6tJa7ItT^x%(nQ+X}z0~Knr`XBwTvAjy!E;f^rr9skaaRU-QwK0jSqCH37 zMrGX5`(veE2vYk3Fk$myXG_C3B`K{z6K;&|=?3C9e{u1Sl%hD*Qu+Wm@+P2t)JnIb z#u(#_c&aX45skh%>}Ku(r?XAbJ70tS3ZBP*tLFly7oM35LUmJdK}CNG!+!=_R1zwf zJE8Bdqq&wXtLB*S`POl zr!0aGY`=Oyt7;shR#A^J*~mmjC7N*TPp&_Iil2>>{x`UUVGKpq=p~uJRA*-3d;UbN zN1wz@2>Vg~pNu16C;g)<#y`v7( zsbmG|Z`1*cc!M#8+DEMfi)Sd^g04WHrI(OeV73is9mw4?prlM%Bq3CS0?z;E5MHQgW&uY z8MQ4+kc{UJzq5>-h8B(Tc+Ud01^RCNALA1;9r=-Z?get!Vqu4=rKzELD%^?TXwe-G zpK}x5#}lD5{La<+c;q$~a+OTQL8po|J>)90Uq}ghm);&Mw_RYDoYxNtMkCi|=iUx(4diCaDX^ zq2r`?xR(?}dwUA*lhOJqV-d9sbek*eWpI%igSOQi@5T~za75x)Z=9t!VcOjg9Rab_ zEwGs9syoq1@&ZoVW7<0EH^N8*Zrg47o*(OQNq<9c<$ASV@RQai2pts|uB``3(EETJ68TO)}Ej#Fi^l#lgo^`-kaib3KueVs8->*in7JE?tR}NDg?OaX9_up)baTe!hp`B6NVe z)2SXoHe?J=P5sd)&-1+v zXWV1Q!f)FVEV<#7-FT%{RYfTUIfvq&OYnz8c$fP(NPFeAP(VVElQWTu%xi`t1MrMb zr+?7LNh78N8oaK78FHT~!i{3%$zb{gy`H*dEH~cjshEN_)+TAXX4da%!}LUBx86zH z12&LsEWi!#7rlTgM96p}Y$WiwVL(p%=Q|0{Gki>*y@Qs!G=| zygGJK7Ko#co!A9-cPn;xcXwi-f;Dz`C$`vNx1xe#D=M(}ULEK0{Fv)pXXcu76!%{1 z``-6??i(pT2lJg?i7fDV^zsZ4g9hS#HxBm{Ke_}z%{|r{I|trQB<6Ir?Qmi;v4a=` zGWAVsD{??B!8|_!u5(p+l0VZs)>lKyD;Jbk$`@gI`mJ-AVD`rpFdcv9FF?kb!Zza; zvRBw0jEud~6fQwHE6x<|a4Xs4bRps=dU1;tQcCwp*@e7yeMf!kq`T@*BN*A{$#4l} z+O2I0lZqE)Pj;Rli#c3l@#dQAn2N2=Pk2VQkvE8OW>F);bXc{q2iazCf+^>L&Zra7 zwW+HH8Uu*lV1|7n(wHw`W4v-^xr00j&RhH=b}mN>&G-gfNA@LV5GlkWD+46S&Y-jw zh82d?mg9SurH<0->S;J>^wv*kqv1NwCC@|>d4ez1w-ogFyXqS=Kly|{%#Pw)gK&A2 zugtY&RMJZvA_wBRJ`%Ucm3mXXg8oNatUtqy-7s#THL%C50+ZZBvIl*Z-i#CQXX*ot zPJO6!Y9Sp;hmkMfnqFb-*TQkDPtnf779#82&H45O>J!^cXeKst41%v>jBBRb3IEyP z!0~|t-RaJ;4hO%Uu223(tNNB+Q4=tWV&M`g01wS{?O)@Nz6VtM)|l5s;JsI$oC#j< zXtIkv6YsiU(5wcbpSReIL9Z}EAFl3DvSb<=i%-6^>~dKx|F|>XWbc!!nrZfCOGf|i zJ33`I>;z&7ri)Qj3-T78DksS1;0Dq5Ff^ftV@~!Da-xxD9Jn=Y;kFxrWKSY)L0%Zf zDO$H9Y#q((%2WiSA=UMZ+=~rmIq*3qnxAbqd4qb7md$pm2OI|{@m-q>PS`(sEj3z7 zM1n4#bQ>+A&*0+m<{HdK2U69U8r)ufvmk+w%z=RU3^vtxdNtjg=Z7w8IJj!R~x4I1d^X}F@OSPuseYgPc$JS&m-05Z!1>sb= zVBJI`)M*lCG4yNJ8y-x9hJaUDTw~z%*qCRtUt|^WKJk~7{qkKc)Vyr| z!1iH3=yg@-!rXUZopXS@s3+E4)Rijc2HkTxGoLDL$LKYbR#Gv4dw&;yA^&CnX%TBeMJ{W0;`g#kT*caQsh%B-x^^2-Y&&2(6F&dkfnN>_v z_5#%^qPq|qPWygk-c*qe5%U;Mwbc4Q98y6Vj-1*;W}{MKgBxZ}_@Sjuc-?=XuQ z3eJ~W>{a$V*M^?~S5&EC{#iA)MBmu#4$~FoLY6zR}0=zuST>3a{>R_A=aFDK_S%MhI@%BaGMP zA?!ttV+XT^E(8BcF_*(FdMbEUx=xCR_@dly%qAX!YIl9xr`ZjL=8k|qca{%zm2fR|4i!fV%Y;u{e&!(gnkb5{ehG3K z{f&Dq9ETyi7g$hV`9yXB^8~IU5O&~`JcyHcFivzl(ViX)Yu*jCf7`Oh=r80ioFqSD z$25@K3f48|Uc?eG%1v}Do2gA?#(%+^p4~qCS=Q)mJ9~@ooWHwN0h69oZ|khXS;ezo zX6N=j^>*-f^nLd~mSZq~)MQFN1QtU#*bn-#eZ)vtc)&>HqEmy;2X69|cb*bTanZ~` zY!Rw+KlsbSb4;ET(IIZsXma9nkqclgBj{0gof z^O$Tz1X=mvn$I+3IJ~x64RDrQ0>4iNAyehqRs3$SjWYTA|EHHNWjUrJZt$gWKJya_ zS&uqGMX)o#rC7&&#u=(S=NG?-_xasy6?O^ti(3MwK@75=G%>-vW{g8h!D(n9F{bJT z^*X8x)cCx%kQX{*HG@2o{ivafX9_anOsa&14d*JlM~?*A2&@#e?BU~(&e zqLnek6=7Mxpq%5wPK4+GCpzDXyvK5{4EDMI!Od{B;~&>b4;A=d;H7{$?rCBnrUo(G zoQDKKVPtdfYctJJoL+v(vd$c+AWthv8QH9nZE$q#NDw4a9SED;SCI zV6*b;$<|qF8~csFA+YG#_F^_r^T=IxE^t6T7-5*qPXx^X)GXu}Qpn@j7@dQGtrFU3 z^{D%Jzdi-MkrdjC{lt>uPT0F#>|>BZl3>61t(P$dz;gfFnoTsM7BE|}mp)|MIPZGR z{uITY!|t^|rgr7os-R|E)Mm(n?_B2bKY4x^{ax`XL&6xkub#2kkqi}|MdN* zV!nHM8V3Im2T;ARr@BQ|N1lGNYnKa>66aL$B%5Ko_2u#l{{jDVxs^WN8bK{)U4kX{ z5&Lod=&ztnWtlxOch8LzPa_x$r^1cl)26~~;D=E$POopQGq>8!V7D!WX+dB6x=}^1 zq|Swt=C1FPG+*Cf%^+{nfgFvShs3|-#-fcI3u@3JCXy>4c64M4^DqzHO}79a;{iPO z9dKuDOCDw(3cVfY#8No%wPtpay@)8H4R&jpnCM4hJ8~WV(DSCE?NxFq-{qY+iS9S5 z*k`c;+Go`=hN=1GUEY~ljsL{`8JoG*mq&{xYH_0Lcz_(#KWM(o!<(i{p5j~O8|)vf zB$ygCnwLetFo$bOB^dS9zHleKhUrjOqs>j^9mdT)r()~?OVtJ{q3T5AoXx|ml20t+ ztl}zl=F=wFwr^PuTrlWDf|bVlkl(y8`V33pWdk zaDf}JpNYWkem_?mWQ)?YVFww9F}t{g*>?{(RkCo_>Y;sAn#kk)L%pq#onPY{?)&T& zy`8---xij<;fRVQFj zVEw>Q=V#VK$;3mvb&gQwaJy+Qly!U)esaHPzrERxGGYBX%4;hbU)#)A}eU6;6xQ3t4+%e|v%Z-Y;@JxLLC>m%KurrbD=MLX@~xhzB+1 z8+RL)#W>v2r?Dluf7q^|SD&*Z&4$QjG_%GNQ>dHBZ~P0|=tOEf@zVUDRZ|#nUHoz< z{24dbICYx*T52FiDf?8X)=O<O$a(!4EynZMikK=T&%SoBVd!K zmt!|`*!ryYkvvjk`Lmj83?~lbyjQ_YQQt@!o?b2eE48K8ck&bcjjlpBB$MFg{hQq# zt878ap*Bt!`6(Ia3m072b?o2Rwr+xb>aLX`3{s$YKSaLXW%V)s?c9>uFA(+j3(GaX6 zKZ12?w||<_PCsv6wBKS2J%lagDi?eu=aS%cu1P`x(0tC^Qv1 z@qYfMNNbXQLamB-$6GZH{j9mzb@Wna;JHSS zCFy1CN>~!BhzW3d%@O-Jl5h&OK;|y$Na3?_I!`7eiODu^_r#WgBNv0gGZXFG5-_|^ zV_t)b@exetG-UD4u=&`=%n%S(H(?%87yE(^Mx?$e2j0EzdAa} zL$o_u8veSaEz-XmhS>o3&mJ%zpXEjc?8*5cN0RFS-wGS1(L{f;KPbs#*!)N(!RdlLiqk}kRsL2Mqc6o16)M#uqr9RUKX^DPE55+#M3RM9v_MO5VAyDvf+1w;S71kgHHG@fI zy29s>CPWE6*oM>zyD`Dh1apbz>6TPu+$Hj38yrrDz@mE<9gbE^GOSvKm=aVoyy?7D zB7GFxmf7I?b>l|xfx;DjH#eW{!Tv_OFqO&;M%htoi17jKl?ZhvM9Wpwdf=Hi(;jN8 zwF+8>_6&2P^6EnQhExi!?ekJ~`Ib}}Z17OuLj69w*K;&*uwx6;od~wpq4RkWbes9q z8~O|v&AFLZm{ZIoyHbUyP_{dNU)U`a0C9E{=8==^7!XD(V#;xnG(p%LZmxw(V*)ri zE3_z3zeC{}tA|rw0x5u6wV0jG_2YZN6kZDagA1@(HA8oGATbGi8y&yr1#}p5h`Gh) z;~t`yZ7~(umpE;OvWxg}VjeMBc*d1Mo8&89gUQ8|W5#3SP=zzt@ys1E2Z(CFK(CpE z&Dc#+gFUa8UC=PqI;x`1gfVjpenm0HcRjaR4b$fF@X6f*#pni%1>IbE*f_ z7~B)1K~t_}&Nu5>uk50jZZ@zAB4?)RfAstM8LhGws3szJ zDD-7+gjm`2#XZln+*8Nnb2NeZy90b2``8^^9{xJWnS0=V`f8RSZ!=D=5&H&|>*>rE zHU`<-%6ub!2>*~xqra2!mPenX{-ZxPa{uLwA%T)^U$=UhwP5^o8>c|3>tIc?OrxZ6 z5ag5)Jz9TYL|H8`!R>3cuyYYn}mVAbs zOnq{MyJ_x)d4>mc> z5thymMz*=rYJ{6pV@xSTw4^O2m|2O9$t>!lUB=8(&%!=eQVvr#$sux1G*(zy@elG9 z^wIu8vKxeKO{Z{%AFXY{&iyPA=8Vj_I#1JJuW%96qCE6=wg&99QG7*oGAEdIkw{#m zr5GQOTnn~G7?-tC+DuGMUz;is%0}{U*~!#I?b&;rK9YFGAFJ?=T$YAuw47aaV1kKBKaHB+GC*2d<>3g{U^sv_= zB{B}*|Do6%7-SP{TvCyHJdN#17V?eNje7n zpF+eJFg&&(<2DlMItoeEzmvQ}xMQp*!jWS>4x(=?{+&ve6aSuMG&crduRO=tVt&L9 zw+k%Dqd>v!^$QM<7y-`Nb)Z+Db*k=Zr z`^^2O0k0%&PQlJJ7Q_LMR#}bGo@;!u&GH?-86kU?> zG9TdiBR~=BNq)kK`l^ zOlOV~-U^Z81ilb=0XJe6Z>A-nf!#t2eUsJFOhi(;kX}!Fqm?m7+ab7N4d(KTNzQYw zuda)(5LbE7nL7(59I37bo{H`Q4ll^u)vVFT^K~V7?E8Y@gP3QXfPFdI4kP=6RnwHv zFa!BWZlTO^RVUQ{t z2s%t>)KSuVbUDf%~y@v12b>Ri81%+9>Si zTf-BfF_XA&>=x>)y^okfJ)*B5UlmDqg$1}f(xqczGw2Qq;(D{RIbG+}lTr_V3x6yB z954;{OVj0DauH>j))M*LZ`Ndc2gPt3kr(G-8z05b;!6s)K#A!>GC!4f3n~0`q|kmb zujpVviC#gPX+M|7Y*lF#iS$g|(I-AI|9q*~E$V6d-)PUSMC z2X5dEjn%l%9JFS_7SJA!MC&gzT>R}U8&EVTB&a~Z3-{uHpMgwJ3C}CBJog=q-*Gs- zR3-A-gR#|bfo)WGvSR60TI#z7-cO*kRc&hgP;;QqJyOnXPXyIIi~mpf z%pIZ^QrFnlVsQtC*!>BB8Xs$&ay0c&w94H6a!#qb%c$1VdeF$^1*zT|Fux6xc{C~udlYWavrGFyA( zJ(Zn9Ic!X@H{$)#l6Y#$>Jz1=-VK}aJ{GB8QYI*Gl-p`qeXOzAZjB_uMe+>ujdu#w zc^Axq=NzSlb^H~fkT9JMCYvLzQXCYWr8sw7$5}EA@4~kxW0xZf!lls-u925aZhpBi z7>$x3(GngA^SJ(uK|jTPr<*ZB9VyN7?eRXzev!Q?du8?xxT|aXZ^?7C=EgRovlT+c zqDhsEO`;&)7E@t=Jm;L^uI>qTk-~D!zT49UK#bf(_`vviq3%QLpI2w7n{cC?h;6`L zBL`T>EZLm8OQW5@_TwuGf#NlofZD+~l!_gH2QDYw&rZ{m)I{l%Z;rQG_KiP7e{IXC z@w;T^;_OdZd9(Da_TK8!B|VDtu(>!N2j+?R$dxCczk7{ir?5llf%9c7=$#X(U^@pU zC{y9qT?TGIs-4GPjDE*=YZ{*Bula|;U)i)T?2#m zVK9h-kciG}5k{=KTk7c_>Wj$sXP(P^ment7Vb+YSSy{cZ%lPxj{V=7D(A&ZpSRAv( zFI0KbwDN+<@!oEXdtM#;9eNX=v8B?jJVYrxzk-k)X+++)LBYdL`3(FOG_{pVV~#SP z@bC7vM}r$UkaBWM(I8&!40Ie6biwI7;a(89A^0h%4lw}-;Q*5y6+r;Y$&{hC6L-x@ zdWxLjJDNpiN`J;?55u2XH}wuuK2MA;a2;J`%kh+;!-i2ES^h-a`{poJ=~l>_Ri+vd zl9`~lQP0csr7BWI_#|`6t>y2^FHJHz>j^AyDd5%YW3I4oh5nAp&I!)et~ggi=O}^a z$8euPWITkOM?Sr?RMI;;Yj@UkZ%p>cKYf4C`(5r2nK?ach_|25<-3(7dJ`nj7s0S> zVm;AD>$Q#bc2)WZ6Tse}n^48EZCpccVy1(`u}u8tc#pS2ana?t;JoDiH{f~TxS(7? zlL8wAb`Q!Gd^>P>KyA-`cf5O$YpwVdXU_>BM|KCDWhMF{BPhelgDo0Z2Gmnx51z#} zkocQ}Zu%>1SlXj$u}iz8^u(#+iC#%trwo+a`~PI^$;`-Hn&tNvlW9C3!$3MIZH}{E z6PT7`k}{ria=(~U$Zk8(HhKLwpJ4Sfb0gO{7unT2+Cy_AQJPMMzpNqY!pwV_aKPEc z!w2LJSm*ie9_Q+Y9dS>gFh?;uPmU4lnZTChc`HTLDv1u5}EZggR{DMKl`>x z9Tn0jh^+of<`F;8Q4w6r-2uq~kpWJ3yi0fNaNH1ka#N@qFd=jXiJ%P#U)QDb*gY0h z&#Hy>QRZgzrI`zxlaH(+d~&>V&UAHm^>!+b6vrt?32_g9n!ClWr;Cv>W(>AFcl=2( zsV4fs^TAerEwXxry;FT3{q3dA$^vbdQ3p)DRY*Y|BWY$n+nY-fwm7n!ADyL~_nnch z`>syz@$Sm*EHRdCK+mTR;gc|x387UaUydXHQOnw(yOAW#4}y0bTt|t@IOV<6SsA7c zvIY^kiI3JqYXt7nN%lo}OJcZ4?lRj7w6$EIbdO@*QWePzocud$gf>?FD!KjheYCH; z-z^nV8ezY`SS=1l&Ro-pcHCEbD%%BJ~Yd9HCUz zqm6QAL7ZXRuq2C!NpsM23d8482KoB$);EDjf-B`Xve8H2+Ij^J+dQqB z+8l(@CCVxg6TZq`+|$0vALIves(c5Zo58vtCp8c3R-tNVoO~W)yO1B*oc?H{tpHz5 zR+5!M_@+HY+NuY+g4#z%ahd#OOdub+@&yJ5H4hpam^&cdmEdx^a=KVon)5px(qs7_ zd}Y2NdlPasR~#HRmC@i95I% zH-km5Hs;aAta+deYVd4G$YIC8Iaw7ngtcf$KevOBjN8x5;}<$YTo*+`=qLVgmhfbG zOn0L5wy=CG1gK;D%Micy4aM+3o<0flsVe#5?Lc-V`3ZCz=rf=*`q6 zZxOaNjpz<$YpPya-i#J+0J;*tV5~W=l+^C(=ZwSVe&d>YMxxa}#vmkwDdaFZ>7g)N z3_~7b2=RJKpTcV@!lM(VjZ;p^OQl!-L+B^$^WM#Rltp{H_?yWeBuSc}>{lwvndoOX z2Y)fp+tqhbYN$+-Px&MKThLsN^9}Kf=*X4@@9HI}a(Cc@EUF*VA8Cz@TJ}0Jke}r& z=(_1B=zbN@5FP%8xs)7D0##4v!0VpMu6-`kIfGk=jBOXvYxP0WbOiXAej`g;iT6Mp zvMQ}Ghg*fTUoWyCeU*GpM1hO^75&Ph_%ql6S8jK!iPg$}fZWeAax2w^sRHL`CpLyj z#jdUg2OW=)$FT-P-U9qPp}KR4$W&SMx z*jdH#AGexIEf)b?P0d{?(D0pxNDZR{kgzIn{HK>?kCG{zUCC%VD| zfNYChn;b^Zrw1cH+>5-9S@~Wx@!G)llAliz(137cJA>evn zz9l1(YOjyJwndz@2UwS_3Pf|_GMSHVLsR507$8rO$3POz%}22Jk=E`(Co+ZjHSnff z6w0%Um`Zdf__dd7x1?jf*8X+=Dac|adT(VF%uLCO_R><2ybKJQck&QD+gN2$Bm~Hu{n=`7B^n#mP<5vE#T-D)Aip6E)0XSV_ZAz7*SLnv zDRw5mgrCPx6KV-jTuHu{81Gypp2ImYFZSlEn0?F^oRVJ9{h9gT4Hrhc?Igcicqoj= z-7*In?=Rq7Km!Kvg`#vFwhkB0#?S@ugowp6?IFmk8x5CUQ~w5b&O(I)-L|?OU)y5ND`3oB{qtgQgS6#siV^3Fo>ZLrin~=m>NaCR2&b9i{*~;BWVutZc8f z$D;Av(_Th2g2g7=inLdu>v_bwZWSY|Vi6hBM{?SsEvp;^5A|#G5_Fo@AdUsv@EKjqZ%hZzX0T{REwa z>DB{ycAns$K0#`{57CE=qa55Rv85xAc!3`U9^_Ys!O!44952z}VvgdE2-lFYuP(IX zYoWi^96RHF_~%1-M$p9jFsC-eoooQI_;C)8Q*s75;<@UuHZ`J0GV#n2%-w!6vzQfB zBCco8zeQhNisW`2LylBicx?#hfq~}1RV6DDOAA@;% zimGb6H4`biHug-ML|5V7xRr<@yCW$YfsM{s^e_Ej`oFiHBgYq@ZLrT1SIoP5as6Md zfm}m*4{FavEmTWZ4}jU&o@hn)plO`OV;sGlYn=TYZ}>`l8Ti3}kx}T)CYvt&Dhk*g zt<8FGmDR@+g~*DS)&C^~%h+;!rDyX5m&kh zqt^@1u{m0M^&gp#-sAmOOeQrGbF36}-^USi=rLS0KS5|O{N^VM3A_v!-!NBW&sL|& zWRh!`YFtmSN6QK#f06l*ZpzgXYl%Ab*%$=FTRqF6)t2JC6?|#F5^_B)QXT8x?cM8} zt+oUECcnnC|Xk;*#UeS2+OZw2oA!oZ4^<23a6LaX)x#(HaVgNRgi4WaC~1r zQfTX3=35~i2K*xQ(UMWD$}aRn)hI8HTZ?qLcxL2$u#`GXF^ z+3GD?ammDJ%(#P$8rmi~)ZaoO)g=E2^o6S{Gv$-=P5(dAUNqOkRUUVSU;1R~Ep=1N zr&TlylA<EkageJ0=0;^}%K#>R%#RA7H*GKM00b`>2H`n^szq?5!7$m!=j;XaM}_OE2~)<*j~jNlVOHjYM(ZERhEmYr$OuK zPd=~`(Q(diEhE|@3)+sYLl>kX!I}OK?SxdOAbQcy(H!c=JVUGHK2?HRkF8vF+;PgG zyDLm}8#!K1lb0c5Fk030mP8%2U|)lXFwdxEX4$9E>K%{#eQ$HAkzeP)l1h<7by-hF z=dTB>%&%aDT1|h2mFWiEiJ2hW7IwK#I{Nb6*_p^34@1(a49*9~(H%&)y=GY?p6;20 zVSqbsw6?;HQU-}ccc4*KPd84X?;nbe>HsuR`@;uU7v0YaMhNl~`^}pA9gr?sE47rj z{usChdHm`U&>1=d;zA@kK)XpFd4@hfFCs_5Bou*t?Vn=TfJ+u`u%zGtLz^!d%ibRPJ=(7wpLCHQ(vg%K^E?;3H+O!=3{bs$@(bAw zY!`knKZx(hcVYbWJ&K?%W23s2nMIYv2K_Ebfs@Tfpe+a%Z8p*4v|_T)UsQ_q7mz-C zhfCMA6Z!&jtYeik$<@If@5t*M=(>X&N1}5mT8R-{2K|c2Z@mXaa13azzw8laag{UA z*?H-b!gR+%#~iUWCcl@F_dO?Oz!-82pUp&kb8bkd;HkIC8b2Boc67D|KUW$NDSY;PlP``^X>l>C#D9U#fdO+5()z!};jbqdlY--zv8 zCKyFA%x1E>*_l|cE|FVeBV3I+B|d{?xHKQlW-xwX8oq_I#5Im|&$ECc?u@|FLHp4h zEg+8JH#_zU|8cDx9fUAu8DrQ3@LrF{G$2W1waH2=cpg5=#pD^%6se6gR&A=)H*9Jt z`HUPvg`;VxlTq|}q+XuD$@c88bN+)=* z3tkHog(>1gR~N1X)z5ySpTbW=@K(+8`IdTlpNI@eCG0gys$H?qEN5M{W|P&Bhg!*H zGs~$}WOa1RS|Vi@X@As482hwYS`G6yn7q@-7t~>{l&iQik2ug(*|kEP6gb?I&vDZ; z+xgY;)iu|VN65`bG5wJl6zyI(Y2^Zqqz*cJ4l=j>MX#klS0+iLl{!d{r5JHwOB^D; z5r?e%X0U!vxs4m-I&HNcYZ3Ypb*i$}nCGRjzE1tFWh4vpA2`<|Xpr}cf- z4l)&O@7cy)(5LOlxNz#zLmbqZbmsibPWF?N7W(@S5ZU&rt3 zF!JGVwV}8Z+%i(}nTeo=U^Y>lO@`BCs@O!R#a*Z8Qn$%??6Pj6^>-BW&aHM`Yq@dR zT7(H>RW$4t7$P`COVm?V592FN{s+BDSrz@c{av&@#AYIq^wWQkfJ{T`d#*K>8bvNZ zE=!wpem~LymwG{2W?wIW@L9VkU7~XBLDNnQ0$r5Pf&S6|!BQBPj zh-baeUO|jODxx@5pPL}e7CQ&L2o!?O1WgXe?V?=8#Xuo~=b06l@MjV4sl!ZZau|_> zZsj5>nJou@XEt%&7y?__PQ4E@6T?(p2{-0xoYGLP;w>pf>P~H!l>mdGfjppWXhwe6 zo}-ZiyoCI49grePn0T{ogD8Z}*g!7aIUf_cN|=6?#Aav&otK@*Jz;b4DO^Ra4cmk& z2hxfgNrN-gWnq%g({{jX)cwY6bWIe=~Y~?am>95gLljyu)x|3R8 z>@*ql+g6wd%;x$qbSP%2lR;JeA-_`?Sn7INYe|g}aN3DRt|A>byHMU1YC2nDZ$A=q z)1$6lt}gBi$Z|Y^kLwbDLp0z#xamj~-mzhP0!|7O=}QDnJR*1E<{!t#ibEWw#l_g$ z-xn7Om-$S*5xQH8%}=n0z5)d$9I3>L=;t;E5#j`K4z9({^gOPXqqtLay%PLP2QUX; zP#(0ZZ=uzkX!j)VqM6p;ED8HXVfnj%o9}(LnqAxL_3f3%fK)SAeWpC~74gRV%J~Cv z`^#aSvbKP~WP<551O%@`W@+m#Zd4mBA29<2y?2;aq}#Wtc2rsXbYHL@A&-7vqoP_xHwmNSwp{q3_@m89`Jx zgVnbF@xHRYFWzLSgn9)%u3~m+^!HmCx1lAj)mex?;jTuQ>qk`1|nbl30`&0s@uQdY_5_^^VSB+rpiqgTYvynMoakQTPj?lJUqCT?JG23Ya1FXaSa$ z;lc>qr3cW%h}Sq^8)9Y8=xV^Qh%iS8R2_b^*unj#=GmPlRJmxe;g3Utl8S!TIT5>@k<)Ib6lO zrdCo7wWc<~=ueEM8ZfEsR;0z#v6ZWhzH+FL&QYA39f92SVUQwgl5fDjvyIR818O)< z%*ClY$ggQLM8C$J)b~^mRPwnG& zJSCw+k`w00`XVOxa3lr`clilCiB9D#dJbETZh*FaZ_r8Na9;VFH7-K-#PhHZa|7x1 z`SelTHK&sI$$a=T?*x+GYb0fZkd^pI{IDt-1Cg3aRrab4HA=lIM=O2R1TDiz!gGJB znTg)PB_aZp%lt^pf_8_F*PH+I7xFNDkPc7B?;$tte^KZ&H^e-qJF$iu3A#*EW)po6 z>8|eNUUWSU8jXxAup0S-3CE z6T)~MS5&$E1UIAeA?jyigX zr{N9S3wC{fq|jg^1smc!ZrUr!Cqz!9xO;#=F@hT{>~Z$@ObJ{OG9jlFaxJj4yR*2F zYXJgcL9~BP8=CrF9xpBNHS-q8j`nW zLEfZKvR9aza9!6muUfsVSIBw9V*bH{LDP)FNuDYXI@AE{s>nrd(@A}v39B2-0D|MS^QU8@@P@0$&1Q>JnbP{r9-)gjWMy0d_tvC$E#UvYwo#v3V+Da3{GhtPkG z0ZT$}nvJ#%|C-eu%G|$q` zYuO;#olzw@PU?nD+*N%(JeM2v%lc&81|Ndy@WZ#nchc`rDp;XlZj>OvOU2wd1ry>B zw0c~WO5C#A5!2B4NU{47y~r6<6WnB4Q=`~F{6#bcr;3y#j=#@U zfk8$fulE#$_;DbOH4p}J6LEHRQt{vn%>l3N7_!`}$PjXk6=vSkw_}=C8FtTHIK8aJ ziF+qi3{Q=9%uIR|se#(66M=AF&!#S0=glE_x{N}Lck!?*SDYR~4_!BUaQv7nTV)U^84F+0L&rqQ~L{B|^$Q~8j_c!k{ISe#(L8Z*En9$^eJgUn;PqSt|k z`>^rV$THsR-PH@~7F{?ZsXhWSn<73~8c479=jge|Bf!$kE zP=iveYM7aLs#nzqV`|kHY0Ww2cWiGd`Wi9GoMq%S23T(DH2a$y$P!e4 zn46ba8__G*S^%WCC~%E&-9(T8cs8JVx`{qz{2q;r@n%`rv1=m>XKcNB8)4pH2}Pvqwc z8!@}zPMx7^veR)VtjitYuCPz(e&ilI0||;&Mou%>dSeFJ9l-;MXH3q?59hn`w{W(J zqVJ+>+)$sT3{+kyyH!~~2Iq2XIFjPv@6FWTs!8g<#!+hl@q{YO%t1%Ls@)U2ypiM_ zVxhS}{|2hzT*HMlQi4)V3ina|4RU$y0eBhZw46$ul1GoS=h}m;1amR`XBDl^m|+J} zX<(F=p<}sad=2(I#dG-`eZ(5vSGF-9D#D4!mf|moC-{A!j1EJ7_!u2ZrDKv&nLZ2h zU~VdqE=09Ohvtzr+b9iI@jWt4O!{S=sghA{qRsD5S)~h~H)dBruAC`%#BT zC1UY+6NyNw5i^I|CA@U(a{LzGiS7AI>}R+}{^plXab@}S+(z~}(+G^+9o!yn6nlZ5 zLT6BMu(?(R$K!<^PP&nD>xkWEIpUrppk~)i6^{2Y0h6qtI?T7;|JAw({@Dt(rt-||B zyOx!ieC$Xx?HG1Adx||x=fM2AK9V(2mDWS<1R^*phIsmb4R zj`6&7zjKd+b8R)3n~K1!__OuV-VA2tGHNX~49&H-%m&OV52B&?4!rnzWM|}K8KSb? zkP5+;#YLp+Th#<99`5D|>NCABlJ0BF*En?-1Y_os?~OOb`_{L`f7h=`E8t#Pskg?P z@jb{n6JT>?z@=o&oBC2Dal)-AY=X{#2)GO6mC^8nt|=%AR&SM#R}|EW(b{+zQ6=9d6}Z*A)GbV(Jt-< za~CJ~Z6Fj~0@EoL_mKif$lN8|c%v31F50zlS{PxiH}@Enb(lN_!|OYC2S11_MqjZ1 zvn0$CKPr|y44GeEISd=h%d z!1fbAiI*HX&~tXi0jxYTOKV8(Q7%Sb{O|pYNDCXxFv` zneH;Znsu?QaJnr=PPaqM1?naFt^cflsgzHl^<=%5k%Vt}J>2bzvR~Qud_I0Dvi4D+ zTg~OOm`zjy_|y69j_7QMXr}Cz{NA>{2GU6Bo4*RY5MKF&%&L+$6-2l5L^R!kO@$}? zv?z<6`AD`S=|eXs5%aU-XuZ5Kr&;&FBt1>+$9<|7yqLL>;#h|JUz%X?KZW~_bVn;e z<%jXJxqZ0*@5Ls3COr-0+rvn*Z>JiO5#&AcUt+7}A)M48qBGTuIZnHo7TCpHx5^Ml zt$vu8HU_D`tC7!qU8{hzh;2>)C@E0A|ZOqm}wryG_o7`ROfI@@-Q>76_w~M}4?_(B5)-N3lGR^*CA4F4ZBsmbLryE9b zit2T(rIxAQ0S#xXU7cKsQ%z6!j18F9%QEqFHMSfxg*y)3Vi;eSJ&rw%4{4nS{9-Pa zUP=7r*)23Lqo3q3-Wk`e4R%jr3MOT_Os7%Wwm_u{K%V9>H5J^K_4Fuu1@1G0a4Yx^ ziKxfsChMe8)0zrvZ*$vi&9^3^jdj*uh)Jvm@BE)+38pKYkpJLD*%Ms358y>bB5f|P zfnY&j!nXZ1>^=dyj8^Yf)uph?2Aq}t^%s|3_!ViUoQzxLDy6G-T1!#Fwa55uF44c3 zrI7vWg0@$C-1}F9p7cfQsvFu<f9Y6<1Ax*yc7??#w)1yr*mOdjqX_gvfpTFQUO=A7qy zbN`{`yN+3b)Ju2bEuNJ%&Cy0IW^qN(3_M^o)JLhu)FE;W-&uc_+)`!Li~47yqPg8Z zhIUMUP6w;9HFpc|@Wx=jDg=7k`aNZ+;*g8@n)-@MnbJr9GpT`k9dvR_e`U_Jka#z@ z810OCV508^AzT2zH7A%EKdcI7DbQ=%p$|L)?e)=Slu^Z;tM|hFq?R(pA1IZP9%Wbe zj({t0wKCM$OPs-ea~MamKXJ;gjQ;d_dN(|~rRk02JItzb&|}fWdC8OnXKompcRRpi pdqOs*eB^SF_}*ZbThV@DyhRe!WuCMOqwBlF?ra Tuple[Tensor, Tensor]: + """A naive implementation of `Permutation Invariant Training` based on Scipy. + + Args: + preds: predictions, shape[batch, spk, time] + target: targets, shape[batch, spk, time] + metric_func: which metric + eval_func: min or max + + Returns: + best_metric: + shape [batch] + best_perm: + shape [batch, spk] + """ + batch_size, spk_num = target.shape[0:2] + metric_mtx = B.empty((batch_size, spk_num, spk_num), device=target.device) + for t in range(spk_num): + for e in range(spk_num): + metric_mtx[:, t, e] = metric_func(preds[:, e, ...], target[:, t, ...]) + + # pit_r = PIT(metric_func, eval_func)(preds, target) + metric_mtx = metric_mtx.detach().cpu().numpy() + best_metrics = [] + best_perms = [] + for b in range(batch_size): + row_idx, col_idx = linear_sum_assignment(metric_mtx[b, ...], eval_func == "max") + best_metrics.append(metric_mtx[b, row_idx, col_idx].mean()) + best_perms.append(col_idx) + return B.from_numpy(np.stack(best_metrics)), B.from_numpy(np.stack(best_perms)) + + +def _average_metric(preds: Tensor, target: Tensor, metric_func: Callable) -> Tensor: + """average the metric values. + + Args: + preds: predictions, shape[batch, spk, time] + target: targets, shape[batch, spk, time] + metric_func: a function which return best_metric and best_perm + + Returns: + the average of best_metric + """ + return metric_func(preds, target)[0].mean() + + +snr_pit_scipy = partial(naive_implementation_pit_scipy, metric_func=snr, eval_func="max") +si_sdr_pit_scipy = partial(naive_implementation_pit_scipy, metric_func=si_sdr, eval_func="max") + + +@pytest.mark.parametrize( + "preds, target, sk_metric, metric_func, eval_func", + [ + (inputs1.preds, inputs1.target, snr_pit_scipy, snr, "max"), + (inputs1.preds, inputs1.target, si_sdr_pit_scipy, si_sdr, "max"), + (inputs2.preds, inputs2.target, snr_pit_scipy, snr, "max"), + (inputs2.preds, inputs2.target, si_sdr_pit_scipy, si_sdr, "max"), + ], +) +class TestPIT(MetricTester): + atol = 1e-2 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_pit(self, preds, target, sk_metric, metric_func, eval_func, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + PIT, + sk_metric=partial(_average_metric, metric_func=sk_metric), + dist_sync_on_step=dist_sync_on_step, + metric_args=dict(metric_func=metric_func, eval_func=eval_func), + ) + + def test_pit_functional(self, preds, target, sk_metric, metric_func, eval_func): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=pit, + sk_metric=sk_metric, + metric_args=dict(metric_func=metric_func, eval_func=eval_func), + ) + + def test_pit_differentiability(self, preds, target, sk_metric, metric_func, eval_func): + def pit_diff(preds, target, metric_func, eval_func): + return pit(preds, target, metric_func, eval_func)[0] + + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=PIT, + metric_functional=pit_diff, + metric_args={"metric_func": metric_func, "eval_func": eval_func}, + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_pit_half_cpu(self, preds, target, sk_metric, metric_func, eval_func): + pytest.xfail("PIT metric does not support cpu + half precision") + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_pit_half_gpu(self, preds, target, sk_metric, metric_func, eval_func): + self.run_precision_test_gpu( + preds=preds, + target=target, + metric_module=PIT, + metric_functional=partial(pit, metric_func=metric_func, eval_func=eval_func), + metric_args={"metric_func": metric_func, "eval_func": eval_func}, + ) + + +def test_error_on_different_shape() -> None: + metric = PIT(snr, "max") + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(3, 3, 10), B.randn(3, 2, 10)) + + +def test_error_on_wrong_eval_func() -> None: + metric = PIT(snr, "xxx") + with pytest.raises(ValueError, match='eval_func can only be "max" or "min"'): + metric(B.randn(3, 3, 10), B.randn(3, 3, 10)) + + +def test_error_on_wrong_shape() -> None: + metric = PIT(snr, "max") + with pytest.raises(ValueError, match="Inputs must be of shape *"): + metric(B.randn(3), B.randn(3)) + + +def test_consistency_of_two_implementations() -> None: + from paddlemetrics.functional.audio.pit import ( + _find_best_perm_by_exhuastive_method, + _find_best_perm_by_linear_sum_assignment, + ) + + shapes_test = [(5, 2, 2), (4, 3, 3), (4, 4, 4), (3, 5, 5)] + for shp in shapes_test: + metric_mtx = B.randn(size=shp) + bm1, bp1 = _find_best_perm_by_linear_sum_assignment(metric_mtx, B.max) + bm2, bp2 = _find_best_perm_by_exhuastive_method(metric_mtx, B.max) + assert B.allclose(bm1, bm2) + assert (bp1 == bp2).all() diff --git a/EE/paddlemetric/src/tests/audio/test_si_sdr.py b/EE/paddlemetric/src/tests/audio/test_si_sdr.py new file mode 100644 index 000000000..f7647b49c --- /dev/null +++ b/EE/paddlemetric/src/tests/audio/test_si_sdr.py @@ -0,0 +1,131 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import speechmetrics +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.audio import SI_SDR +from paddlemetrics.functional import si_sdr +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 + +seed_all(42) + +Time = 100 + +Input = namedtuple("Input", ["preds", "target"]) + +inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, 1, Time), + target=B.rand(NUM_BATCHES, BATCH_SIZE, 1, Time), +) + +speechmetrics_sisdr = speechmetrics.load("sisdr") + + +def speechmetrics_si_sdr(preds: Tensor, target: Tensor, zero_mean: bool): + # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time] + if zero_mean: + preds = preds - preds.mean(dim=2, keepdim=True) + target = target - target.mean(dim=2, keepdim=True) + target = target.detach().cpu().numpy() + preds = preds.detach().cpu().numpy() + mss = [] + for i in range(preds.shape[0]): + ms = [] + for j in range(preds.shape[1]): + metric = speechmetrics_sisdr(preds[i, j], target[i, j], rate=16000) + ms.append(metric["sisdr"][0]) + mss.append(ms) + return B.tensor(mss) + + +def average_metric(preds, target, metric_func): + # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time] + return metric_func(preds, target).mean() + + +speechmetrics_si_sdr_zero_mean = partial(speechmetrics_si_sdr, zero_mean=True) +speechmetrics_si_sdr_no_zero_mean = partial(speechmetrics_si_sdr, zero_mean=False) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, zero_mean", + [ + (inputs.preds, inputs.target, speechmetrics_si_sdr_zero_mean, True), + (inputs.preds, inputs.target, speechmetrics_si_sdr_no_zero_mean, False), + ], +) +class TestSISDR(MetricTester): + atol = 1e-2 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_si_sdr(self, preds, target, sk_metric, zero_mean, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + SI_SDR, + sk_metric=partial(average_metric, metric_func=sk_metric), + dist_sync_on_step=dist_sync_on_step, + metric_args=dict(zero_mean=zero_mean), + ) + + def test_si_sdr_functional(self, preds, target, sk_metric, zero_mean): + self.run_functional_metric_test( + preds, + target, + si_sdr, + sk_metric, + metric_args=dict(zero_mean=zero_mean), + ) + + def test_si_sdr_differentiability(self, preds, target, sk_metric, zero_mean): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=SI_SDR, + metric_functional=si_sdr, + metric_args={"zero_mean": zero_mean}, + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_si_sdr_half_cpu(self, preds, target, sk_metric, zero_mean): + pytest.xfail("SI-SDR metric does not support cpu + half precision") + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_si_sdr_half_gpu(self, preds, target, sk_metric, zero_mean): + self.run_precision_test_gpu( + preds=preds, + target=target, + metric_module=SI_SDR, + metric_functional=si_sdr, + metric_args={"zero_mean": zero_mean}, + ) + + +def test_error_on_different_shape(metric_class=SI_SDR): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) diff --git a/EE/paddlemetric/src/tests/audio/test_si_snr.py b/EE/paddlemetric/src/tests/audio/test_si_snr.py new file mode 100644 index 000000000..967451172 --- /dev/null +++ b/EE/paddlemetric/src/tests/audio/test_si_snr.py @@ -0,0 +1,112 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import speechmetrics +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.audio import SI_SNR +from paddlemetrics.functional import si_snr +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 + +seed_all(42) + +Time = 100 + +Input = namedtuple("Input", ["preds", "target"]) + +inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, 1, Time), + target=B.rand(NUM_BATCHES, BATCH_SIZE, 1, Time), +) + +speechmetrics_sisdr = speechmetrics.load("sisdr") + + +def speechmetrics_si_sdr(preds: Tensor, target: Tensor, zero_mean: bool = True): + # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time] + if zero_mean: + preds = preds - preds.mean(dim=2, keepdim=True) + target = target - target.mean(dim=2, keepdim=True) + target = target.detach().cpu().numpy() + preds = preds.detach().cpu().numpy() + mss = [] + for i in range(preds.shape[0]): + ms = [] + for j in range(preds.shape[1]): + metric = speechmetrics_sisdr(preds[i, j], target[i, j], rate=16000) + ms.append(metric["sisdr"][0]) + mss.append(ms) + return B.tensor(mss) + + +def average_metric(preds, target, metric_func): + # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time] + return metric_func(preds, target).mean() + + +@pytest.mark.parametrize( + "preds, target, sk_metric", + [ + (inputs.preds, inputs.target, speechmetrics_si_sdr), + ], +) +class TestSISNR(MetricTester): + atol = 1e-2 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_si_snr(self, preds, target, sk_metric, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + SI_SNR, + sk_metric=partial(average_metric, metric_func=sk_metric), + dist_sync_on_step=dist_sync_on_step, + ) + + def test_si_snr_functional(self, preds, target, sk_metric): + self.run_functional_metric_test( + preds, + target, + si_snr, + sk_metric, + ) + + def test_si_snr_differentiability(self, preds, target, sk_metric): + self.run_differentiability_test(preds=preds, target=target, metric_module=SI_SNR, metric_functional=si_snr) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_si_snr_half_cpu(self, preds, target, sk_metric): + pytest.xfail("SI-SNR metric does not support cpu + half precision") + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_si_snr_half_gpu(self, preds, target, sk_metric): + self.run_precision_test_gpu(preds=preds, target=target, metric_module=SI_SNR, metric_functional=si_snr) + + +def test_error_on_different_shape(metric_class=SI_SNR): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) diff --git a/EE/paddlemetric/src/tests/audio/test_snr.py b/EE/paddlemetric/src/tests/audio/test_snr.py new file mode 100644 index 000000000..86d28837b --- /dev/null +++ b/EE/paddlemetric/src/tests/audio/test_snr.py @@ -0,0 +1,125 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial +from typing import Callable + +import pytest +import paddleext.torchapi as B +from mir_eval.separation import bss_eval_images as mir_eval_bss_eval_images +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.audio import SNR +from paddlemetrics.functional import snr +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 + +seed_all(42) + +Time = 100 + +Input = namedtuple("Input", ["preds", "target"]) + +inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, 1, Time), + target=B.rand(NUM_BATCHES, BATCH_SIZE, 1, Time), +) + + +def bss_eval_images_snr(preds: Tensor, target: Tensor, metric_func: Callable, zero_mean: bool): + # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time] + if zero_mean: + target = target - B.mean(target, dim=-1, keepdim=True) + preds = preds - B.mean(preds, dim=-1, keepdim=True) + target = target.detach().cpu().numpy() + preds = preds.detach().cpu().numpy() + mss = [] + for i in range(preds.shape[0]): + ms = [] + for j in range(preds.shape[1]): + if metric_func == mir_eval_bss_eval_images: + snr_v = metric_func([target[i, j]], [preds[i, j]])[0][0] + else: + snr_v = metric_func([target[i, j]], [preds[i, j]])[0][0][0] + ms.append(snr_v) + mss.append(ms) + return B.tensor(mss) + + +def average_metric(preds: Tensor, target: Tensor, metric_func: Callable): + # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time] + return metric_func(preds, target).mean() + + +mireval_snr_zeromean = partial(bss_eval_images_snr, metric_func=mir_eval_bss_eval_images, zero_mean=True) +mireval_snr_nozeromean = partial(bss_eval_images_snr, metric_func=mir_eval_bss_eval_images, zero_mean=False) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, zero_mean", + [ + (inputs.preds, inputs.target, mireval_snr_zeromean, True), + (inputs.preds, inputs.target, mireval_snr_nozeromean, False), + ], +) +class TestSNR(MetricTester): + atol = 1e-2 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_snr(self, preds, target, sk_metric, zero_mean, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + SNR, + sk_metric=partial(average_metric, metric_func=sk_metric), + dist_sync_on_step=dist_sync_on_step, + metric_args=dict(zero_mean=zero_mean), + ) + + def test_snr_functional(self, preds, target, sk_metric, zero_mean): + self.run_functional_metric_test( + preds, + target, + snr, + sk_metric, + metric_args=dict(zero_mean=zero_mean), + ) + + def test_snr_differentiability(self, preds, target, sk_metric, zero_mean): + self.run_differentiability_test( + preds=preds, target=target, metric_module=SNR, metric_functional=snr, metric_args={"zero_mean": zero_mean} + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_snr_half_cpu(self, preds, target, sk_metric, zero_mean): + pytest.xfail("SNR metric does not support cpu + half precision") + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_snr_half_gpu(self, preds, target, sk_metric, zero_mean): + self.run_precision_test_gpu( + preds=preds, target=target, metric_module=SNR, metric_functional=snr, metric_args={"zero_mean": zero_mean} + ) + + +def test_error_on_different_shape(metric_class=SNR): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) diff --git a/EE/paddlemetric/src/tests/audio/test_stoi.py b/EE/paddlemetric/src/tests/audio/test_stoi.py new file mode 100644 index 000000000..70c7208b7 --- /dev/null +++ b/EE/paddlemetric/src/tests/audio/test_stoi.py @@ -0,0 +1,146 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from pystoi import stoi as stoi_backend +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.helpers.testers import MetricTester +from paddlemetrics.audio import STOI +from paddlemetrics.functional import stoi +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 + +seed_all(42) + +Input = namedtuple("Input", ["preds", "target"]) + +inputs_8k = Input( + preds=B.rand(2, 3, 8000), + target=B.rand(2, 3, 8000), +) +inputs_16k = Input( + preds=B.rand(2, 3, 16000), + target=B.rand(2, 3, 16000), +) + + +def stoi_original_batch(preds: Tensor, target: Tensor, fs: int, extended: bool): + # shape: preds [BATCH_SIZE, Time] , target [BATCH_SIZE, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, Time] , target [NUM_BATCHES*BATCH_SIZE, Time] + target = target.detach().cpu().numpy() + preds = preds.detach().cpu().numpy() + mss = [] + for b in range(preds.shape[0]): + pesq_val = stoi_backend(target[b, ...], preds[b, ...], fs, extended) + mss.append(pesq_val) + return B.tensor(mss) + + +def average_metric(preds, target, metric_func): + # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time] + # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time] + return metric_func(preds, target).mean() + + +stoi_original_batch_8k_ext = partial(stoi_original_batch, fs=8000, extended=True) +stoi_original_batch_16k_ext = partial(stoi_original_batch, fs=16000, extended=True) +stoi_original_batch_8k_noext = partial(stoi_original_batch, fs=8000, extended=False) +stoi_original_batch_16k_noext = partial(stoi_original_batch, fs=16000, extended=False) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, fs, extended", + [ + (inputs_8k.preds, inputs_8k.target, stoi_original_batch_8k_ext, 8000, True), + (inputs_16k.preds, inputs_16k.target, stoi_original_batch_16k_ext, 16000, True), + (inputs_8k.preds, inputs_8k.target, stoi_original_batch_8k_noext, 8000, False), + (inputs_16k.preds, inputs_16k.target, stoi_original_batch_16k_noext, 16000, False), + ], +) +class TestSTOI(MetricTester): + atol = 1e-2 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_stoi(self, preds, target, sk_metric, fs, extended, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + STOI, + sk_metric=partial(average_metric, metric_func=sk_metric), + dist_sync_on_step=dist_sync_on_step, + metric_args=dict(fs=fs, extended=extended), + ) + + def test_stoi_functional(self, preds, target, sk_metric, fs, extended): + self.run_functional_metric_test( + preds, + target, + stoi, + sk_metric, + metric_args=dict(fs=fs, extended=extended), + ) + + def test_stoi_differentiability(self, preds, target, sk_metric, fs, extended): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=STOI, + metric_functional=stoi, + metric_args=dict(fs=fs, extended=extended), + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_stoi_half_cpu(self, preds, target, sk_metric, fs, extended): + pytest.xfail("STOI metric does not support cpu + half precision") + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_stoi_half_gpu(self, preds, target, sk_metric, fs, extended): + self.run_precision_test_gpu( + preds=preds, + target=target, + metric_module=STOI, + metric_functional=partial(stoi, fs=fs, extended=extended), + metric_args=dict(fs=fs, extended=extended), + ) + + +def test_error_on_different_shape(metric_class=STOI): + metric = metric_class(16000) + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) + + +def test_on_real_audio(): + import os + + from scipy.io import wavfile + + current_file_dir = os.path.dirname(__file__) + + rate, ref = wavfile.read(os.path.join(current_file_dir, "examples/audio_speech.wav")) + rate, deg = wavfile.read(os.path.join(current_file_dir, "examples/audio_speech_bab_0dB.wav")) + assert B.allclose( + stoi(B.from_numpy(deg), B.from_numpy(ref), rate).float(), + B.tensor(0.6739177), + rtol=0.0001, + atol=1e-4, + ) diff --git a/EE/paddlemetric/src/tests/bases/__init__.py b/EE/paddlemetric/src/tests/bases/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/bases/test.log b/EE/paddlemetric/src/tests/bases/test.log new file mode 100644 index 000000000..eb8e22c41 --- /dev/null +++ b/EE/paddlemetric/src/tests/bases/test.log @@ -0,0 +1,2764 @@ +============================= test session starts ============================== +platform darwin -- Python 3.8.12, pytest-7.1.2, pluggy-1.0.0 +rootdir: /Users/sun/Projects/oix/baidu/ccl/paddlemetric/src/tests/bases +plugins: hydra-core-1.1.0.dev5 +collected 86 items + +test_composition.py .....FFF.....FF.................FFFFFFFFFFFFFF.F....FFFFF.FFFFFFFFFFFFFFFFFF.F....FF.. + +=================================== FAILURES =================================== +_____________________ test_metrics_and[3-expected_result1] _____________________ + +second_operand = 3 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric(3), tensor(2)), (3, tensor(2)), (3, tensor(2)), (tensor(3), tensor(2))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_and(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_and = first_metric & second_operand + final_rand = second_operand & first_metric + + assert isinstance(final_and, CompositionalMetric) + assert isinstance(final_rand, CompositionalMetric) + + final_and.update() + final_rand.update() +> assert B.allclose(expected_result, final_and.compute()) + +test_composition.py:83: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:559: in bitwise_and + return _bitwise_op( +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +op_name = 'bitwise_and' +x = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +y = 3, out = None, name = None, binary_op = True + + def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): + if paddle.in_dynamic_mode(): + op = getattr(_C_ops, op_name) + if binary_op: +> return op(x, y) +E ValueError: (InvalidArgument) bitwise_and(): argument 'Y' (position 1) must be Tensor, but got int (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:505: ValueError +_____________________ test_metrics_and[3-expected_result2] _____________________ + +second_operand = 3 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric(3), tensor(2)), (3, tensor(2)), (3, tensor(2)), (tensor(3), tensor(2))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_and(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_and = first_metric & second_operand + final_rand = second_operand & first_metric + + assert isinstance(final_and, CompositionalMetric) + assert isinstance(final_rand, CompositionalMetric) + + final_and.update() + final_rand.update() +> assert B.allclose(expected_result, final_and.compute()) + +test_composition.py:83: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:559: in bitwise_and + return _bitwise_op( +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +op_name = 'bitwise_and' +x = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +y = 3, out = None, name = None, binary_op = True + + def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): + if paddle.in_dynamic_mode(): + op = getattr(_C_ops, op_name) + if binary_op: +> return op(x, y) +E ValueError: (InvalidArgument) bitwise_and(): argument 'Y' (position 1) must be Tensor, but got int (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:505: ValueError +______________ test_metrics_and[second_operand3-expected_result3] ______________ + +second_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 3) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric(3), tensor(2)), (3, tensor(2)), (3, tensor(2)), (tensor(3), tensor(2))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_and(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_and = first_metric & second_operand +> final_rand = second_operand & first_metric + +test_composition.py:76: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../../../../../torch2paddle/paddleext/torchapi/tensor_.py:361: in __and__ + return paddle.logical_or(self.bool(), other.bool()) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = DummyMetric(), name = 'bool' + + def __getattr__(self, name): + if '_parameters' in self.__dict__: + _parameters = self.__dict__['_parameters'] + if name in self._parameters: + if in_declarative_mode(): + return _convert_into_variable(self._parameters[name]) + return self._parameters[name] + if '_sub_layers' in self.__dict__: + _sub_layers = self.__dict__['_sub_layers'] + if name in self._sub_layers: + return self._sub_layers[name] + if '_buffers' in self.__dict__: + _buffers = self.__dict__['_buffers'] + if name in _buffers: + if in_declarative_mode(): + return _convert_into_variable(_buffers[name]) + return _buffers[name] +> return object.__getattribute__(self, name) +E AttributeError: 'DummyMetric' object has no attribute 'bool' + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/layers.py:1123: AttributeError +__________________ test_metrics_floordiv[2-expected_result1] ___________________ + +second_operand = 2 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(2)), + (2, tensor(2)), + (2.0, tensor(2.0)), + (tensor(2), tensor(2)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_floordiv(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_floordiv = first_metric // second_operand + + assert isinstance(final_floordiv, CompositionalMetric) + + final_floordiv.update() +> assert B.allclose(expected_result, final_floordiv.compute()) + +test_composition.py:126: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:460: in floor_divide + return _elementwise_op_in_dygraph( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/wrapped_decorator.py:25: in __impl__ + return wrapped_func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:434: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 5) +y = 2, axis = -1, act = None, use_mkldnn = False +op_name = 'elementwise_floordiv' + + @dygraph_only + def _elementwise_op_in_dygraph(x, + y, + axis=-1, + act=None, + use_mkldnn=False, + op_name=None): + def is_inplace(op_name): + return op_name[-1] == "_" + + if op_name not in OP_NAMEMAPPING.keys(): + op = getattr(_C_ops, op_name) + out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) + else: + if in_dygraph_mode(): + op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + out = op(x, y) + + if _in_legacy_dygraph(): + op = getattr(_C_ops, op_name) +> out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) +E ValueError: (InvalidArgument) elementwise_floordiv(): argument 'Y' (position 1) must be Tensor, but got int (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:218: ValueError +_________________ test_metrics_floordiv[2.0-expected_result2] __________________ + +second_operand = 2.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(2)), + (2, tensor(2)), + (2.0, tensor(2.0)), + (tensor(2), tensor(2)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_floordiv(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_floordiv = first_metric // second_operand + + assert isinstance(final_floordiv, CompositionalMetric) + + final_floordiv.update() +> assert B.allclose(expected_result, final_floordiv.compute()) + +test_composition.py:126: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:460: in floor_divide + return _elementwise_op_in_dygraph( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/wrapped_decorator.py:25: in __impl__ + return wrapped_func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:434: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 5) +y = 2.0, axis = -1, act = None, use_mkldnn = False +op_name = 'elementwise_floordiv' + + @dygraph_only + def _elementwise_op_in_dygraph(x, + y, + axis=-1, + act=None, + use_mkldnn=False, + op_name=None): + def is_inplace(op_name): + return op_name[-1] == "_" + + if op_name not in OP_NAMEMAPPING.keys(): + op = getattr(_C_ops, op_name) + out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) + else: + if in_dygraph_mode(): + op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + out = op(x, y) + + if _in_legacy_dygraph(): + op = getattr(_C_ops, op_name) +> out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) +E ValueError: (InvalidArgument) elementwise_floordiv(): argument 'Y' (position 1) must be Tensor, but got float (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:218: ValueError +____________ test_metrics_matmul[second_operand0-expected_result0] _____________ + +second_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 12) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric([2, 2, 2]), tensor(12)), (tensor([2, 2, 2]), tensor(12))], + ) + def test_metrics_matmul(second_operand, expected_result): + first_metric = DummyMetric([2, 2, 2]) + + final_matmul = first_metric @ second_operand + + assert isinstance(final_matmul, CompositionalMetric) + + final_matmul.update() +> assert B.allclose(expected_result, final_matmul.compute()) + +test_composition.py:225: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [2, 2, 2]) +y = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [2, 2, 2]) +transpose_x = False, transpose_y = False, name = None + + def matmul(x, y, transpose_x=False, transpose_y=False, name=None): + """ + Applies matrix multiplication to two tensors. `matmul` follows + the complete broadcast rules, + and its behavior is consistent with `np.matmul`. + + Currently, the input tensors' number of dimensions can be any, `matmul` can be used to + achieve the `dot`, `matmul` and `batchmatmul`. + + The actual behavior depends on the shapes of :math:`x`, :math:`y` and the + flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: + + - If a transpose flag is specified, the last two dimensions of the tensor + are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor + is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas + for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. + + The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: + + - If both tensors are 1-dimensional, the dot product result is obtained. + + - If both tensors are 2-dimensional, the matrix-matrix product is obtained. + + - If the `x` is 1-dimensional and the `y` is 2-dimensional, + a `1` is prepended to its dimension in order to conduct the matrix multiply. + After the matrix multiply, the prepended dimension is removed. + + - If the `x` is 2-dimensional and `y` is 1-dimensional, + the matrix-vector product is obtained. + + - If both arguments are at least 1-dimensional and at least one argument + is N-dimensional (where N > 2), then a batched matrix multiply is obtained. + If the first argument is 1-dimensional, a 1 is prepended to its dimension + in order to conduct the batched matrix multiply and removed after. + If the second argument is 1-dimensional, a 1 is appended to its + dimension for the purpose of the batched matrix multiple and removed after. + The non-matrix (exclude the last two dimensions) dimensions are + broadcasted according the broadcast rule. + For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, + out will be a (j, k, n, p) tensor. + + Args: + x (Tensor): The input tensor which is a Tensor. + y (Tensor): The input tensor which is a Tensor. + transpose_x (bool): Whether to transpose :math:`x` before multiplication. + transpose_y (bool): Whether to transpose :math:`y` before multiplication. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Tensor: The output Tensor. + + Examples: + + .. code-block:: python + + import paddle + import numpy as np + + # vector * vector + x_data = np.random.random([10]).astype(np.float32) + y_data = np.random.random([10]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [1] + + # matrix * vector + x_data = np.random.random([10, 5]).astype(np.float32) + y_data = np.random.random([5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10] + + # batched matrix * broadcasted vector + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([2]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5] + + # batched matrix * batched matrix + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([10, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5, 5] + + # batched matrix * broadcasted matrix + x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) + y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 3, 5, 5] + + """ + if in_dygraph_mode(): + return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) + + if _in_legacy_dygraph(): + op_type = 'matmul_v2' + op = getattr(_C_ops, op_type) +> return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) +E RuntimeError: (NotFound) There are no kernels which are registered in the matmul_v2 operator. +E [Hint: Expected kernels_iter != all_op_kernels.end(), but received kernels_iter == all_op_kernels.end().] (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/imperative/prepared_operator.cc:327) +E [operator < matmul_v2 > error] + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/linalg.py:145: RuntimeError +____________ test_metrics_matmul[second_operand1-expected_result1] _____________ + +second_operand = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [2, 2, 2]) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 12) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric([2, 2, 2]), tensor(12)), (tensor([2, 2, 2]), tensor(12))], + ) + def test_metrics_matmul(second_operand, expected_result): + first_metric = DummyMetric([2, 2, 2]) + + final_matmul = first_metric @ second_operand + + assert isinstance(final_matmul, CompositionalMetric) + + final_matmul.update() +> assert B.allclose(expected_result, final_matmul.compute()) + +test_composition.py:225: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [2, 2, 2]) +y = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [2, 2, 2]) +transpose_x = False, transpose_y = False, name = None + + def matmul(x, y, transpose_x=False, transpose_y=False, name=None): + """ + Applies matrix multiplication to two tensors. `matmul` follows + the complete broadcast rules, + and its behavior is consistent with `np.matmul`. + + Currently, the input tensors' number of dimensions can be any, `matmul` can be used to + achieve the `dot`, `matmul` and `batchmatmul`. + + The actual behavior depends on the shapes of :math:`x`, :math:`y` and the + flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: + + - If a transpose flag is specified, the last two dimensions of the tensor + are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor + is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas + for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. + + The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: + + - If both tensors are 1-dimensional, the dot product result is obtained. + + - If both tensors are 2-dimensional, the matrix-matrix product is obtained. + + - If the `x` is 1-dimensional and the `y` is 2-dimensional, + a `1` is prepended to its dimension in order to conduct the matrix multiply. + After the matrix multiply, the prepended dimension is removed. + + - If the `x` is 2-dimensional and `y` is 1-dimensional, + the matrix-vector product is obtained. + + - If both arguments are at least 1-dimensional and at least one argument + is N-dimensional (where N > 2), then a batched matrix multiply is obtained. + If the first argument is 1-dimensional, a 1 is prepended to its dimension + in order to conduct the batched matrix multiply and removed after. + If the second argument is 1-dimensional, a 1 is appended to its + dimension for the purpose of the batched matrix multiple and removed after. + The non-matrix (exclude the last two dimensions) dimensions are + broadcasted according the broadcast rule. + For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, + out will be a (j, k, n, p) tensor. + + Args: + x (Tensor): The input tensor which is a Tensor. + y (Tensor): The input tensor which is a Tensor. + transpose_x (bool): Whether to transpose :math:`x` before multiplication. + transpose_y (bool): Whether to transpose :math:`y` before multiplication. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Tensor: The output Tensor. + + Examples: + + .. code-block:: python + + import paddle + import numpy as np + + # vector * vector + x_data = np.random.random([10]).astype(np.float32) + y_data = np.random.random([10]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [1] + + # matrix * vector + x_data = np.random.random([10, 5]).astype(np.float32) + y_data = np.random.random([5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10] + + # batched matrix * broadcasted vector + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([2]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5] + + # batched matrix * batched matrix + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([10, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5, 5] + + # batched matrix * broadcasted matrix + x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) + y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 3, 5, 5] + + """ + if in_dygraph_mode(): + return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) + + if _in_legacy_dygraph(): + op_type = 'matmul_v2' + op = getattr(_C_ops, op_type) +> return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) +E RuntimeError: (NotFound) There are no kernels which are registered in the matmul_v2 operator. +E [Hint: Expected kernels_iter != all_op_kernels.end(), but received kernels_iter == all_op_kernels.end().] (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/imperative/prepared_operator.cc:327) +E [operator < matmul_v2 > error] + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/linalg.py:145: RuntimeError +______________ test_metrics_mod[second_operand0-expected_result0] ______________ + +second_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_mod(second_operand, expected_result): + first_metric = DummyMetric(5) + +> final_mod = first_metric % second_operand + +test_composition.py:240: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:616: in __mod__ + return CompositionalMetric(B.fmod, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('fmod',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'fmod' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +_____________________ test_metrics_mod[2-expected_result1] _____________________ + +second_operand = 2 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_mod(second_operand, expected_result): + first_metric = DummyMetric(5) + +> final_mod = first_metric % second_operand + +test_composition.py:240: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:616: in __mod__ + return CompositionalMetric(B.fmod, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('fmod',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'fmod' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________________ test_metrics_mod[2.0-expected_result2] ____________________ + +second_operand = 2.0 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_mod(second_operand, expected_result): + first_metric = DummyMetric(5) + +> final_mod = first_metric % second_operand + +test_composition.py:240: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:616: in __mod__ + return CompositionalMetric(B.fmod, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('fmod',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'fmod' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_mod[second_operand3-expected_result3] ______________ + +second_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_mod(second_operand, expected_result): + first_metric = DummyMetric(5) + +> final_mod = first_metric % second_operand + +test_composition.py:240: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:616: in __mod__ + return CompositionalMetric(B.fmod, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('fmod',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'fmod' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_mul[second_operand0-expected_result0] ______________ + +second_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 4) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(4)), + (2, tensor(4)), + (2.0, tensor(4.0)), + pytest.param(tensor(2), tensor(4), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_mul(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_mul = first_metric * second_operand + +test_composition.py:261: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:619: in __mul__ + return CompositionalMetric(B.mul, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('mul',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'mul' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +_____________________ test_metrics_mul[2-expected_result1] _____________________ + +second_operand = 2 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 4) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(4)), + (2, tensor(4)), + (2.0, tensor(4.0)), + pytest.param(tensor(2), tensor(4), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_mul(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_mul = first_metric * second_operand + +test_composition.py:261: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:619: in __mul__ + return CompositionalMetric(B.mul, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('mul',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'mul' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________________ test_metrics_mul[2.0-expected_result2] ____________________ + +second_operand = 2.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 4.) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(4)), + (2, tensor(4)), + (2.0, tensor(4.0)), + pytest.param(tensor(2), tensor(4), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_mul(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_mul = first_metric * second_operand + +test_composition.py:261: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:619: in __mul__ + return CompositionalMetric(B.mul, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('mul',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'mul' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_mul[second_operand3-expected_result3] ______________ + +second_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 4) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(4)), + (2, tensor(4)), + (2.0, tensor(4.0)), + pytest.param(tensor(2), tensor(4), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_mul(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_mul = first_metric * second_operand + +test_composition.py:261: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:619: in __mul__ + return CompositionalMetric(B.mul, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('mul',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'mul' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_ne[second_operand0-expected_result0] _______________ + +second_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + False) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(False)), + (2, tensor(False)), + (2.0, tensor(False)), + (tensor(2), tensor(False)), + ], + ) + def test_metrics_ne(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_ne = first_metric != second_operand + +test_composition.py:285: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:623: in __ne__ + return CompositionalMetric(B.ne, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('ne',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'ne' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +_____________________ test_metrics_ne[2-expected_result1] ______________________ + +second_operand = 2 +expected_result = Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + False) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(False)), + (2, tensor(False)), + (2.0, tensor(False)), + (tensor(2), tensor(False)), + ], + ) + def test_metrics_ne(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_ne = first_metric != second_operand + +test_composition.py:285: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:623: in __ne__ + return CompositionalMetric(B.ne, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('ne',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'ne' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________________ test_metrics_ne[2.0-expected_result2] _____________________ + +second_operand = 2.0 +expected_result = Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + False) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(False)), + (2, tensor(False)), + (2.0, tensor(False)), + (tensor(2), tensor(False)), + ], + ) + def test_metrics_ne(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_ne = first_metric != second_operand + +test_composition.py:285: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:623: in __ne__ + return CompositionalMetric(B.ne, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('ne',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'ne' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_ne[second_operand3-expected_result3] _______________ + +second_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +expected_result = Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + False) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(False)), + (2, tensor(False)), + (2.0, tensor(False)), + (tensor(2), tensor(False)), + ], + ) + def test_metrics_ne(second_operand, expected_result): + first_metric = DummyMetric(2) + +> final_ne = first_metric != second_operand + +test_composition.py:285: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:623: in __ne__ + return CompositionalMetric(B.ne, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('ne',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'ne' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_or[second_operand1-expected_result1] _______________ + +second_operand = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [1, 0, 3]) +expected_result = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [-1, -2, 3]) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric([1, 0, 3]), tensor([-1, -2, 3])), (tensor([1, 0, 3]), tensor([-1, -2, 3]))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_or(second_operand, expected_result): + first_metric = DummyMetric([-1, -2, 3]) + + final_or = first_metric | second_operand +> final_ror = second_operand | first_metric + +test_composition.py:303: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../../../../../torch2paddle/paddleext/torchapi/tensor_.py:357: in __or__ + return paddle.logical_or(self.bool(), other.bool()) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = DummyMetric(), name = 'bool' + + def __getattr__(self, name): + if '_parameters' in self.__dict__: + _parameters = self.__dict__['_parameters'] + if name in self._parameters: + if in_declarative_mode(): + return _convert_into_variable(self._parameters[name]) + return self._parameters[name] + if '_sub_layers' in self.__dict__: + _sub_layers = self.__dict__['_sub_layers'] + if name in self._sub_layers: + return self._sub_layers[name] + if '_buffers' in self.__dict__: + _buffers = self.__dict__['_buffers'] + if name in _buffers: + if in_declarative_mode(): + return _convert_into_variable(_buffers[name]) + return _buffers[name] +> return object.__getattribute__(self, name) +E AttributeError: 'DummyMetric' object has no attribute 'bool' + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/layers.py:1123: AttributeError +__________________ test_metrics_rfloordiv[5-expected_result0] __________________ + +first_operand = 5 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [(5, tensor(2)), (5.0, tensor(2.0)), (tensor(5), tensor(2))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_rfloordiv(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rfloordiv = first_operand // second_operand + + assert isinstance(final_rfloordiv, CompositionalMetric) + + final_rfloordiv.update() +> assert B.allclose(expected_result, final_rfloordiv.compute()) + +test_composition.py:347: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:460: in floor_divide + return _elementwise_op_in_dygraph( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/wrapped_decorator.py:25: in __impl__ + return wrapped_func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:434: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = 5 +y = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +axis = -1, act = None, use_mkldnn = False, op_name = 'elementwise_floordiv' + + @dygraph_only + def _elementwise_op_in_dygraph(x, + y, + axis=-1, + act=None, + use_mkldnn=False, + op_name=None): + def is_inplace(op_name): + return op_name[-1] == "_" + + if op_name not in OP_NAMEMAPPING.keys(): + op = getattr(_C_ops, op_name) + out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) + else: + if in_dygraph_mode(): + op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + out = op(x, y) + + if _in_legacy_dygraph(): + op = getattr(_C_ops, op_name) +> out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) +E ValueError: (InvalidArgument) elementwise_floordiv(): argument 'X' (position 0) must be Tensor, but got int (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:218: ValueError +_________________ test_metrics_rfloordiv[5.0-expected_result1] _________________ + +first_operand = 5.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [(5, tensor(2)), (5.0, tensor(2.0)), (tensor(5), tensor(2))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_rfloordiv(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rfloordiv = first_operand // second_operand + + assert isinstance(final_rfloordiv, CompositionalMetric) + + final_rfloordiv.update() +> assert B.allclose(expected_result, final_rfloordiv.compute()) + +test_composition.py:347: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:460: in floor_divide + return _elementwise_op_in_dygraph( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/wrapped_decorator.py:25: in __impl__ + return wrapped_func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:434: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = 5.0 +y = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +axis = -1, act = None, use_mkldnn = False, op_name = 'elementwise_floordiv' + + @dygraph_only + def _elementwise_op_in_dygraph(x, + y, + axis=-1, + act=None, + use_mkldnn=False, + op_name=None): + def is_inplace(op_name): + return op_name[-1] == "_" + + if op_name not in OP_NAMEMAPPING.keys(): + op = getattr(_C_ops, op_name) + out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) + else: + if in_dygraph_mode(): + op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + out = op(x, y) + + if _in_legacy_dygraph(): + op = getattr(_C_ops, op_name) +> out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) +E ValueError: (InvalidArgument) elementwise_floordiv(): argument 'X' (position 0) must be Tensor, but got float (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:218: ValueError +___________ test_metrics_rfloordiv[first_operand2-expected_result2] ____________ + +first_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 5) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [(5, tensor(2)), (5.0, tensor(2.0)), (tensor(5), tensor(2))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_rfloordiv(first_operand, expected_result): + second_operand = DummyMetric(2) + +> final_rfloordiv = first_operand // second_operand + +test_composition.py:342: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:257: in __impl__ + other_var = create_scalar( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:90: in create_scalar + return create_tensor(value, dtype, shape=[1]) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/base.py:299: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +value = DummyMetric(), dtype = paddle.int32, shape = [1] + + @no_grad + def create_tensor(value, dtype, shape): + out = _varbase_creator(dtype=dtype) +> out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape, 'value', + value, 'force_cpu', False) +E ValueError: (InvalidArgument) fill_constant(): argument (position 6) must be float, but got DummyMetric (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:189) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:84: ValueError +____________ test_metrics_rmatmul[first_operand0-expected_result0] _____________ + +first_operand = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [2, 2, 2]) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 12) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [pytest.param(tensor([2, 2, 2]), tensor(12), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4))], + ) + def test_metrics_rmatmul(first_operand, expected_result): + second_operand = DummyMetric([2, 2, 2]) + +> final_rmatmul = first_operand @ second_operand + +test_composition.py:357: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:257: in __impl__ + other_var = create_scalar( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:90: in create_scalar + return create_tensor(value, dtype, shape=[1]) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/base.py:299: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +value = DummyMetric(), dtype = paddle.int64, shape = [1] + + @no_grad + def create_tensor(value, dtype, shape): + out = _varbase_creator(dtype=dtype) +> out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape, 'value', + value, 'force_cpu', False) +E ValueError: (InvalidArgument) fill_constant(): argument (position 6) must be float, but got DummyMetric (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:189) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:84: ValueError +______________ test_metrics_rmod[first_operand0-expected_result0] ______________ + +first_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [pytest.param(tensor(2), tensor(2), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4))], + ) + def test_metrics_rmod(first_operand, expected_result): + second_operand = DummyMetric(5) + +> final_rmod = first_operand % second_operand + +test_composition.py:372: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:257: in __impl__ + other_var = create_scalar( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:90: in create_scalar + return create_tensor(value, dtype, shape=[1]) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/base.py:299: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +value = DummyMetric(), dtype = paddle.int32, shape = [1] + + @no_grad + def create_tensor(value, dtype, shape): + out = _varbase_creator(dtype=dtype) +> out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape, 'value', + value, 'force_cpu', False) +E ValueError: (InvalidArgument) fill_constant(): argument (position 6) must be float, but got DummyMetric (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:189) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:84: ValueError +____________________ test_metrics_rpow[2-expected_result1] _____________________ + +first_operand = 2 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 4) + + @pytest.mark.parametrize( + "first_operand,expected_result", + [ + pytest.param(DummyMetric(2), tensor(4)), + pytest.param(2, tensor(4)), + pytest.param(2.0, tensor(4.0), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_6)), + ], + ) + def test_metrics_rpow(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rpow = first_operand ** second_operand + + assert isinstance(final_rpow, CompositionalMetric) + final_rpow.update() +> assert B.allclose(expected_result, final_rpow.compute()) + +test_composition.py:395: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:168: in pow + return _elementwise_op_in_dygraph( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/wrapped_decorator.py:25: in __impl__ + return wrapped_func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:434: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = 2 +y = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +axis = -1, act = None, use_mkldnn = False, op_name = 'elementwise_pow' + + @dygraph_only + def _elementwise_op_in_dygraph(x, + y, + axis=-1, + act=None, + use_mkldnn=False, + op_name=None): + def is_inplace(op_name): + return op_name[-1] == "_" + + if op_name not in OP_NAMEMAPPING.keys(): + op = getattr(_C_ops, op_name) + out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) + else: + if in_dygraph_mode(): + op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + out = op(x, y) + + if _in_legacy_dygraph(): + op = getattr(_C_ops, op_name) +> out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) +E ValueError: (InvalidArgument) elementwise_pow(): argument 'X' (position 0) must be Tensor, but got int (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:218: ValueError +___________________ test_metrics_rpow[2.0-expected_result2] ____________________ + +first_operand = 2.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 4.) + + @pytest.mark.parametrize( + "first_operand,expected_result", + [ + pytest.param(DummyMetric(2), tensor(4)), + pytest.param(2, tensor(4)), + pytest.param(2.0, tensor(4.0), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_6)), + ], + ) + def test_metrics_rpow(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rpow = first_operand ** second_operand + + assert isinstance(final_rpow, CompositionalMetric) + final_rpow.update() +> assert B.allclose(expected_result, final_rpow.compute()) + +test_composition.py:395: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:378: in wrapped_func + self._computed = compute(*args, **kwargs) +../../paddlemetrics/metric.py:756: in compute + return self.op(val_a, val_b) +../../../../../../torch2paddle/paddleext/torchapi/core.py:91: in paddle_func + return func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:168: in pow + return _elementwise_op_in_dygraph( +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/decorator.py:232: in fun + return caller(func, *(extras + args), **kw) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/wrapped_decorator.py:25: in __impl__ + return wrapped_func(*args, **kwargs) +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:434: in __impl__ + return func(*args, **kwargs) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = 2.0 +y = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +axis = -1, act = None, use_mkldnn = False, op_name = 'elementwise_pow' + + @dygraph_only + def _elementwise_op_in_dygraph(x, + y, + axis=-1, + act=None, + use_mkldnn=False, + op_name=None): + def is_inplace(op_name): + return op_name[-1] == "_" + + if op_name not in OP_NAMEMAPPING.keys(): + op = getattr(_C_ops, op_name) + out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) + else: + if in_dygraph_mode(): + op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + out = op(x, y) + + if _in_legacy_dygraph(): + op = getattr(_C_ops, op_name) +> out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) +E ValueError: (InvalidArgument) elementwise_pow(): argument 'X' (position 0) must be Tensor, but got float (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/math.py:218: ValueError +______________ test_metrics_rsub[first_operand0-expected_result0] ______________ + +first_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(3), tensor(1)), + (3, tensor(1)), + (3.0, tensor(1.0)), + pytest.param(tensor(3), tensor(1), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_rsub(first_operand, expected_result): + second_operand = DummyMetric(2) + +> final_rsub = first_operand - second_operand + +test_composition.py:410: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:666: in __sub__ + return CompositionalMetric(B.sub, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('sub',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'sub' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________________ test_metrics_rsub[3-expected_result1] _____________________ + +first_operand = 3 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(3), tensor(1)), + (3, tensor(1)), + (3.0, tensor(1.0)), + pytest.param(tensor(3), tensor(1), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_rsub(first_operand, expected_result): + second_operand = DummyMetric(2) + +> final_rsub = first_operand - second_operand + +test_composition.py:410: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:657: in __rsub__ + return CompositionalMetric(B.sub, other, self) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('sub',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'sub' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +___________________ test_metrics_rsub[3.0-expected_result2] ____________________ + +first_operand = 3.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 1.) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(3), tensor(1)), + (3, tensor(1)), + (3.0, tensor(1.0)), + pytest.param(tensor(3), tensor(1), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_rsub(first_operand, expected_result): + second_operand = DummyMetric(2) + +> final_rsub = first_operand - second_operand + +test_composition.py:410: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:657: in __rsub__ + return CompositionalMetric(B.sub, other, self) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('sub',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'sub' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_rsub[first_operand3-expected_result3] ______________ + +first_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 3) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(3), tensor(1)), + (3, tensor(1)), + (3.0, tensor(1.0)), + pytest.param(tensor(3), tensor(1), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], + ) + def test_metrics_rsub(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rsub = first_operand - second_operand + + assert isinstance(final_rsub, CompositionalMetric) + final_rsub.update() +> assert B.allclose(expected_result, final_rsub.compute()) +E assert Tensor(shape=[1], dtype=bool, place=Place(cpu), stop_gradient=True,\n [False]) +E + where Tensor(shape=[1], dtype=bool, place=Place(cpu), stop_gradient=True,\n [False]) = (Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True,\n 1), 5) +E + where = B.allclose +E + and 5 = () +E + where = CompositionalMetric(\n add(\n 3,\n DummyMetric()\n )\n).compute + +test_composition.py:414: AssertionError +____________ test_metrics_rtruediv[first_operand0-expected_result0] ____________ + +first_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(6), tensor(2.0)), + (6, tensor(2.0)), + (6.0, tensor(2.0)), + (tensor(6), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_rtruediv(first_operand, expected_result): + second_operand = DummyMetric(3) + +> final_rtruediv = first_operand / second_operand + +test_composition.py:430: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:669: in __truediv__ + return CompositionalMetric(B.true_divide, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +args = ('true_divide',), kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'true_divide' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +__________________ test_metrics_rtruediv[6-expected_result1] ___________________ + +first_operand = 6 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(6), tensor(2.0)), + (6, tensor(2.0)), + (6.0, tensor(2.0)), + (tensor(6), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_rtruediv(first_operand, expected_result): + second_operand = DummyMetric(3) + +> final_rtruediv = first_operand / second_operand + +test_composition.py:430: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:660: in __rtruediv__ + return CompositionalMetric(B.true_divide, other, self) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +args = ('true_divide',), kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'true_divide' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +_________________ test_metrics_rtruediv[6.0-expected_result2] __________________ + +first_operand = 6.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(6), tensor(2.0)), + (6, tensor(2.0)), + (6.0, tensor(2.0)), + (tensor(6), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_rtruediv(first_operand, expected_result): + second_operand = DummyMetric(3) + +> final_rtruediv = first_operand / second_operand + +test_composition.py:430: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:660: in __rtruediv__ + return CompositionalMetric(B.true_divide, other, self) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +args = ('true_divide',), kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'true_divide' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________ test_metrics_rtruediv[first_operand3-expected_result3] ____________ + +first_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 6) +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(6), tensor(2.0)), + (6, tensor(2.0)), + (6.0, tensor(2.0)), + (tensor(6), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_rtruediv(first_operand, expected_result): + second_operand = DummyMetric(3) + + final_rtruediv = first_operand / second_operand + + assert isinstance(final_rtruediv, CompositionalMetric) + final_rtruediv.update() +> assert B.allclose(expected_result, final_rtruediv.compute()) +E assert Tensor(shape=[1], dtype=bool, place=Place(cpu), stop_gradient=True,\n [False]) +E + where Tensor(shape=[1], dtype=bool, place=Place(cpu), stop_gradient=True,\n [False]) = (Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,\n 2.), 9) +E + where = B.allclose +E + and 9 = () +E + where = CompositionalMetric(\n add(\n 6,\n DummyMetric()\n )\n).compute + +test_composition.py:434: AssertionError +______________ test_metrics_sub[second_operand0-expected_result0] ______________ + +second_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1.0)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_sub(second_operand, expected_result): + first_metric = DummyMetric(3) + +> final_sub = first_metric - second_operand + +test_composition.py:449: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:666: in __sub__ + return CompositionalMetric(B.sub, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('sub',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'sub' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +_____________________ test_metrics_sub[2-expected_result1] _____________________ + +second_operand = 2 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1.0)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_sub(second_operand, expected_result): + first_metric = DummyMetric(3) + +> final_sub = first_metric - second_operand + +test_composition.py:449: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:666: in __sub__ + return CompositionalMetric(B.sub, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('sub',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'sub' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________________ test_metrics_sub[2.0-expected_result2] ____________________ + +second_operand = 2.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 1.) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1.0)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_sub(second_operand, expected_result): + first_metric = DummyMetric(3) + +> final_sub = first_metric - second_operand + +test_composition.py:449: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:666: in __sub__ + return CompositionalMetric(B.sub, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('sub',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'sub' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_sub[second_operand3-expected_result3] ______________ + +second_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 1) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1.0)), + (tensor(2), tensor(1)), + ], + ) + def test_metrics_sub(second_operand, expected_result): + first_metric = DummyMetric(3) + +> final_sub = first_metric - second_operand + +test_composition.py:449: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:666: in __sub__ + return CompositionalMetric(B.sub, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = , args = ('sub',) +kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'sub' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________ test_metrics_truediv[second_operand0-expected_result0] ____________ + +second_operand = DummyMetric() +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(3), tensor(2.0)), + (3, tensor(2.0)), + (3.0, tensor(2.0)), + (tensor(3), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_truediv(second_operand, expected_result): + first_metric = DummyMetric(6) + +> final_truediv = first_metric / second_operand + +test_composition.py:469: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:669: in __truediv__ + return CompositionalMetric(B.true_divide, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +args = ('true_divide',), kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'true_divide' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +___________________ test_metrics_truediv[3-expected_result1] ___________________ + +second_operand = 3 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(3), tensor(2.0)), + (3, tensor(2.0)), + (3.0, tensor(2.0)), + (tensor(3), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_truediv(second_operand, expected_result): + first_metric = DummyMetric(6) + +> final_truediv = first_metric / second_operand + +test_composition.py:469: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:669: in __truediv__ + return CompositionalMetric(B.true_divide, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +args = ('true_divide',), kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'true_divide' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +__________________ test_metrics_truediv[3.0-expected_result2] __________________ + +second_operand = 3.0 +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(3), tensor(2.0)), + (3, tensor(2.0)), + (3.0, tensor(2.0)), + (tensor(3), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_truediv(second_operand, expected_result): + first_metric = DummyMetric(6) + +> final_truediv = first_metric / second_operand + +test_composition.py:469: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:669: in __truediv__ + return CompositionalMetric(B.true_divide, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +args = ('true_divide',), kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'true_divide' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +____________ test_metrics_truediv[second_operand3-expected_result3] ____________ + +second_operand = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 3) +expected_result = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(3), tensor(2.0)), + (3, tensor(2.0)), + (3.0, tensor(2.0)), + (tensor(3), tensor(2.0)), + ], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_truediv(second_operand, expected_result): + first_metric = DummyMetric(6) + +> final_truediv = first_metric / second_operand + +test_composition.py:469: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../paddlemetrics/metric.py:669: in __truediv__ + return CompositionalMetric(B.true_divide, self, other) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +args = ('true_divide',), kwargs = {} + + def __getattribute__(self, *args, **kwargs): + # Perform custom logic here + +> obj = object.__getattribute__(this_module, *args, **kwargs) +E AttributeError: 'module' object has no attribute 'true_divide' + +../../../../../../torch2paddle/paddleext/torchapi/__init__.py:20: AttributeError +______________ test_metrics_xor[second_operand1-expected_result1] ______________ + +second_operand = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [1, 0, 3]) +expected_result = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [-2, -2, 0]) + + @pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric([1, 0, 3]), tensor([-2, -2, 0])), (tensor([1, 0, 3]), tensor([-2, -2, 0]))], + ) + @pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) + def test_metrics_xor(second_operand, expected_result): + first_metric = DummyMetric([-1, -2, 3]) + + final_xor = first_metric ^ second_operand +> final_rxor = second_operand ^ first_metric + +test_composition.py:485: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:616: in bitwise_xor + return _bitwise_op( +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +op_name = 'bitwise_xor' +x = Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [1, 0, 3]) +y = DummyMetric(), out = None, name = None, binary_op = True + + def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): + if paddle.in_dynamic_mode(): + op = getattr(_C_ops, op_name) + if binary_op: +> return op(x, y) +E ValueError: (InvalidArgument) bitwise_xor(): argument 'Y' (position 1) must be Tensor, but got DummyMetric (at /Users/paddle/work/pengyuqi/Paddle/paddle/fluid/pybind/op_function_common.cc:737) + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:505: ValueError +_______________ test_metrics_getitem[value0-1-expected_result0] ________________ + +value = [1, 2, 3], idx = 1 +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["value", "idx", "expected_result"], + [([1, 2, 3], 1, tensor(2)), ([[0, 1], [2, 3]], (1, 0), tensor(2)), ([[0, 1], [2, 3]], 1, tensor([2, 3]))], + ) + def test_metrics_getitem(value, idx, expected_result): + first_metric = DummyMetric(value) + + final_getitem = first_metric[idx] + assert isinstance(final_getitem, CompositionalMetric) + final_getitem.update() +> assert B.allclose(expected_result, final_getitem.compute()) + +test_composition.py:543: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../../../../../torch2paddle/paddleext/torchapi/functional.py:308: in allclose + return paddle.allclose(input.float(), other.float(), rtol=rtol, atol=atol, equal_nan=equal_nan, name=name) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) +y = Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, + [2.]) +rtol = 1e-05, atol = 1e-08, equal_nan = False, name = None + + @templatedoc() + def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): + """ + ${comment} + + Args: + x(Tensor): ${input_comment}. + y(Tensor): ${other_comment}. + rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` . + atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` . + equal_nan(equalnantype, optional): ${equal_nan_comment}. + name (str, optional): Name for the operation. For more information, please + refer to :ref:`api_guide_Name`. Default: None. + + Returns: + Tensor: ${out_comment}. + + Raises: + TypeError: The data type of ``x`` must be one of float32, float64. + TypeError: The data type of ``y`` must be one of float32, float64. + TypeError: The type of ``rtol`` must be float. + TypeError: The type of ``atol`` must be float. + TypeError: The type of ``equal_nan`` must be bool. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.to_tensor([10000., 1e-07]) + y = paddle.to_tensor([10000.1, 1e-08]) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [False] + + x = paddle.to_tensor([1.0, float('nan')]) + y = paddle.to_tensor([1.0, float('nan')]) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [True] + """ + + if in_dygraph_mode(): + # NOTE(dev): Pass tol as Tensor to fix precision loss problem, because + # C++ backend will cast it into float32 if passing float from python. + as_tensor = lambda x: paddle.to_tensor([x], dtype='float64', place='cpu') + return _C_ops.final_state_allclose(x, y, + as_tensor(rtol), + as_tensor(atol), equal_nan) + if _in_legacy_dygraph(): +> return _C_ops.allclose(x, y, 'rtol', + str(rtol), 'atol', + str(atol), 'equal_nan', equal_nan) +E RuntimeError: (PreconditionNotMet) Input(Input) and Input(Other) must have the same dimension size. +E [Hint: Expected input_dim.size() == other_dim.size(), but received input_dim.size():0 != other_dim.size():1.] (at /Users/paddle/work/pengyuqi/Paddle/paddle/phi/infermeta/binary.cc:39) +E [operator < allclose > error] + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:140: RuntimeError +______________ test_metrics_getitem[value1-idx1-expected_result1] ______________ + +value = [[0, 1], [2, 3]], idx = (1, 0) +expected_result = Tensor(shape=[], dtype=int32, place=Place(cpu), stop_gradient=True, + 2) + + @pytest.mark.parametrize( + ["value", "idx", "expected_result"], + [([1, 2, 3], 1, tensor(2)), ([[0, 1], [2, 3]], (1, 0), tensor(2)), ([[0, 1], [2, 3]], 1, tensor([2, 3]))], + ) + def test_metrics_getitem(value, idx, expected_result): + first_metric = DummyMetric(value) + + final_getitem = first_metric[idx] + assert isinstance(final_getitem, CompositionalMetric) + final_getitem.update() +> assert B.allclose(expected_result, final_getitem.compute()) + +test_composition.py:543: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +../../../../../../torch2paddle/paddleext/torchapi/functional.py:308: in allclose + return paddle.allclose(input.float(), other.float(), rtol=rtol, atol=atol, equal_nan=equal_nan, name=name) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 2.) +y = Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, + [2.]) +rtol = 1e-05, atol = 1e-08, equal_nan = False, name = None + + @templatedoc() + def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): + """ + ${comment} + + Args: + x(Tensor): ${input_comment}. + y(Tensor): ${other_comment}. + rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` . + atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` . + equal_nan(equalnantype, optional): ${equal_nan_comment}. + name (str, optional): Name for the operation. For more information, please + refer to :ref:`api_guide_Name`. Default: None. + + Returns: + Tensor: ${out_comment}. + + Raises: + TypeError: The data type of ``x`` must be one of float32, float64. + TypeError: The data type of ``y`` must be one of float32, float64. + TypeError: The type of ``rtol`` must be float. + TypeError: The type of ``atol`` must be float. + TypeError: The type of ``equal_nan`` must be bool. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.to_tensor([10000., 1e-07]) + y = paddle.to_tensor([10000.1, 1e-08]) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [False] + + x = paddle.to_tensor([1.0, float('nan')]) + y = paddle.to_tensor([1.0, float('nan')]) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [True] + """ + + if in_dygraph_mode(): + # NOTE(dev): Pass tol as Tensor to fix precision loss problem, because + # C++ backend will cast it into float32 if passing float from python. + as_tensor = lambda x: paddle.to_tensor([x], dtype='float64', place='cpu') + return _C_ops.final_state_allclose(x, y, + as_tensor(rtol), + as_tensor(atol), equal_nan) + if _in_legacy_dygraph(): +> return _C_ops.allclose(x, y, 'rtol', + str(rtol), 'atol', + str(atol), 'equal_nan', equal_nan) +E RuntimeError: (PreconditionNotMet) Input(Input) and Input(Other) must have the same dimension size. +E [Hint: Expected input_dim.size() == other_dim.size(), but received input_dim.size():0 != other_dim.size():1.] (at /Users/paddle/work/pengyuqi/Paddle/paddle/phi/infermeta/binary.cc:39) +E [operator < allclose > error] + +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/logic.py:140: RuntimeError +=============================== warnings summary =============================== +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:19 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:19: DeprecationWarning: Call to deprecated create function FileDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + DESCRIPTOR = _descriptor.FileDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:33 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:33: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:37 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:37: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:41 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:41: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:45 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:45: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:49 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:49: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:53 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:53: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:57 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:57: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:61 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:61: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:65 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:65: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:69 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:69: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:73 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:73: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:77 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:77: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:81 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:81: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:27 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:27: DeprecationWarning: Call to deprecated create function EnumDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _ATTRTYPE = _descriptor.EnumDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:115 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:115: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:119 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:119: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:123 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:123: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:127 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:127: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:131 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:131: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:135 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:135: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:139 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:139: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:143 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:143: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:147 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:147: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:151 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:151: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:155 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:155: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:159 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:159: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:163 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:163: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:167 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:167: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:171 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:171: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:175 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:175: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:179 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:179: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:183 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:183: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:187 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:187: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:191 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:191: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:195 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:195: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:199 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:199: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:203 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:203: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:207 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:207: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:211 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:211: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:215 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:215: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:219 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:219: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:223 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:223: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:227 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:227: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:109 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:109: DeprecationWarning: Call to deprecated create function EnumDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _VARTYPE_TYPE = _descriptor.EnumDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:247 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:247: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:240 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:240: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _VERSION = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:278 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:278: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:285 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:285: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:292 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:292: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:299 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:299: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:271 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:271: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _PROCESSMESHDESC = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:330 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:330: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:337 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:337: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:344 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:344: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:351 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:351: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:358 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:358: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:365 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:365: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:372 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:372: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:379 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:379: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:386 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:386: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:393 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:393: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:400 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:400: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:407 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:407: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:414 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:414: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:421 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:421: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:428 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:428: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:323 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:323: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPDESC_ATTR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:458 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:458: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:465 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:465: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:451 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:451: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPDESC_VAR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:495 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:495: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:502 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:502: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:509 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:509: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:516 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:516: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:523 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:523: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:488 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:488: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPDESC = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:554 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:554: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:561 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:561: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:568 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:568: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:575 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:575: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:582 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:582: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:589 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:589: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:596 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:596: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:547 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:547: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPPROTO_VAR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:626 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:626: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:633 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:633: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:640 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:640: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:647 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:647: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:654 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:654: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:661 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:661: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:619 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:619: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPPROTO_ATTR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:691 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:691: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:698 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:698: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:705 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:705: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:712 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:712: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:719 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:719: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:684 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:684: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPPROTO = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:750 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:750: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:757 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:757: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/comet_ml/monkey_patching.py:19 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/comet_ml/monkey_patching.py:19: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses + import imp + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:1104 +test_composition.py::test_metrics_and[second_operand3-expected_result3] +test_composition.py::test_metrics_or[second_operand1-expected_result1] + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:1104: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. + Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations + elif dtype == np.bool: + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/tensor/creation.py:125: 1 warning +test_composition.py: 10 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/creation.py:125: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. + Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations + if data.dtype == np.object: + +test_composition.py::test_metrics_eq[2.0-expected_result2] +test_composition.py::test_metrics_ge[2.0-expected_result2] +test_composition.py::test_metrics_gt[2.0-expected_result2] +test_composition.py::test_metrics_le[2.0-expected_result2] +test_composition.py::test_metrics_lt[2.0-expected_result2] + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:276: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.int32, but right dtype is paddle.float32, the right dtype will convert to paddle.int32 + warnings.warn( + +-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html +=========================== short test summary info ============================ +FAILED test_composition.py::test_metrics_and[3-expected_result1] - ValueError... +FAILED test_composition.py::test_metrics_and[3-expected_result2] - ValueError... +FAILED test_composition.py::test_metrics_and[second_operand3-expected_result3] +FAILED test_composition.py::test_metrics_floordiv[2-expected_result1] - Value... +FAILED test_composition.py::test_metrics_floordiv[2.0-expected_result2] - Val... +FAILED test_composition.py::test_metrics_matmul[second_operand0-expected_result0] +FAILED test_composition.py::test_metrics_matmul[second_operand1-expected_result1] +FAILED test_composition.py::test_metrics_mod[second_operand0-expected_result0] +FAILED test_composition.py::test_metrics_mod[2-expected_result1] - AttributeE... +FAILED test_composition.py::test_metrics_mod[2.0-expected_result2] - Attribut... +FAILED test_composition.py::test_metrics_mod[second_operand3-expected_result3] +FAILED test_composition.py::test_metrics_mul[second_operand0-expected_result0] +FAILED test_composition.py::test_metrics_mul[2-expected_result1] - AttributeE... +FAILED test_composition.py::test_metrics_mul[2.0-expected_result2] - Attribut... +FAILED test_composition.py::test_metrics_mul[second_operand3-expected_result3] +FAILED test_composition.py::test_metrics_ne[second_operand0-expected_result0] +FAILED test_composition.py::test_metrics_ne[2-expected_result1] - AttributeEr... +FAILED test_composition.py::test_metrics_ne[2.0-expected_result2] - Attribute... +FAILED test_composition.py::test_metrics_ne[second_operand3-expected_result3] +FAILED test_composition.py::test_metrics_or[second_operand1-expected_result1] +FAILED test_composition.py::test_metrics_rfloordiv[5-expected_result0] - Valu... +FAILED test_composition.py::test_metrics_rfloordiv[5.0-expected_result1] - Va... +FAILED test_composition.py::test_metrics_rfloordiv[first_operand2-expected_result2] +FAILED test_composition.py::test_metrics_rmatmul[first_operand0-expected_result0] +FAILED test_composition.py::test_metrics_rmod[first_operand0-expected_result0] +FAILED test_composition.py::test_metrics_rpow[2-expected_result1] - ValueErro... +FAILED test_composition.py::test_metrics_rpow[2.0-expected_result2] - ValueEr... +FAILED test_composition.py::test_metrics_rsub[first_operand0-expected_result0] +FAILED test_composition.py::test_metrics_rsub[3-expected_result1] - Attribute... +FAILED test_composition.py::test_metrics_rsub[3.0-expected_result2] - Attribu... +FAILED test_composition.py::test_metrics_rsub[first_operand3-expected_result3] +FAILED test_composition.py::test_metrics_rtruediv[first_operand0-expected_result0] +FAILED test_composition.py::test_metrics_rtruediv[6-expected_result1] - Attri... +FAILED test_composition.py::test_metrics_rtruediv[6.0-expected_result2] - Att... +FAILED test_composition.py::test_metrics_rtruediv[first_operand3-expected_result3] +FAILED test_composition.py::test_metrics_sub[second_operand0-expected_result0] +FAILED test_composition.py::test_metrics_sub[2-expected_result1] - AttributeE... +FAILED test_composition.py::test_metrics_sub[2.0-expected_result2] - Attribut... +FAILED test_composition.py::test_metrics_sub[second_operand3-expected_result3] +FAILED test_composition.py::test_metrics_truediv[second_operand0-expected_result0] +FAILED test_composition.py::test_metrics_truediv[3-expected_result1] - Attrib... +FAILED test_composition.py::test_metrics_truediv[3.0-expected_result2] - Attr... +FAILED test_composition.py::test_metrics_truediv[second_operand3-expected_result3] +FAILED test_composition.py::test_metrics_xor[second_operand1-expected_result1] +FAILED test_composition.py::test_metrics_getitem[value0-1-expected_result0] +FAILED test_composition.py::test_metrics_getitem[value1-idx1-expected_result1] +================= 46 failed, 40 passed, 120 warnings in 2.37s ================== diff --git a/EE/paddlemetric/src/tests/bases/test_aggregation.py b/EE/paddlemetric/src/tests/bases/test_aggregation.py new file mode 100644 index 000000000..559ddad0c --- /dev/null +++ b/EE/paddlemetric/src/tests/bases/test_aggregation.py @@ -0,0 +1,166 @@ +import numpy as np +import pytest +import paddleext.torchapi as B + +from ..helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.aggregation import CatMetric, MaxMetric, MeanMetric, MinMetric, SumMetric + + +def compare_mean(values, weights): + """reference implementation for mean aggregation.""" + return np.average(values.numpy(), weights=weights) + + +def compare_sum(values, weights): + """reference implementation for sum aggregation.""" + return np.sum(values.numpy()) + + +def compare_min(values, weights): + """reference implementation for min aggregation.""" + return np.min(values.numpy()) + + +def compare_max(values, weights): + """reference implementation for max aggregation.""" + return np.max(values.numpy()) + + +# wrap all other than mean metric to take an additional argument +# this lets them fit into the testing framework +class WrappedMinMetric(MinMetric): + """Wrapped min metric.""" + + def update(self, values, weights): + """only pass values on.""" + super().update(values) + + +class WrappedMaxMetric(MaxMetric): + """Wrapped max metric.""" + + def update(self, values, weights): + """only pass values on.""" + super().update(values) + + +class WrappedSumMetric(SumMetric): + """Wrapped min metric.""" + + def update(self, values, weights): + """only pass values on.""" + super().update(values) + + +class WrappedCatMetric(CatMetric): + """Wrapped cat metric.""" + + def update(self, values, weights): + """only pass values on.""" + super().update(values) + + +@pytest.mark.parametrize( + "values, weights", + [ + (B.rand(NUM_BATCHES, BATCH_SIZE), B.ones(NUM_BATCHES, BATCH_SIZE)), + (B.rand(NUM_BATCHES, BATCH_SIZE), B.rand(NUM_BATCHES, BATCH_SIZE) > 0.5), + (B.rand(NUM_BATCHES, BATCH_SIZE, 2), B.rand(NUM_BATCHES, BATCH_SIZE, 2) > 0.5), + ], +) +@pytest.mark.parametrize( + "metric_class, compare_fn", + [ + (WrappedMinMetric, compare_min), + (WrappedMaxMetric, compare_max), + (WrappedSumMetric, compare_sum), + (MeanMetric, compare_mean), + ], +) +class TestAggregation(MetricTester): + """Test aggregation metrics.""" + + @pytest.mark.parametrize("ddp", [False]) + @pytest.mark.parametrize("dist_sync_on_step", [False]) + def test_aggreagation(self, ddp, dist_sync_on_step, metric_class, compare_fn, values, weights): + """test modular implementation.""" + self.run_class_metric_test( + ddp=ddp, + dist_sync_on_step=dist_sync_on_step, + metric_class=metric_class, + sk_metric=compare_fn, + check_scriptable=True, + # Abuse of names here + preds=values, + target=weights, + ) + + +_case1 = float("nan") * B.ones(5) +_case2 = B.tensor([1.0, 2.0, float("nan"), 4.0, 5.0]) + + +@pytest.mark.parametrize("value", [_case1, _case2]) +@pytest.mark.parametrize("nan_strategy", ["error", "warn"]) +@pytest.mark.parametrize("metric_class", [MinMetric, MaxMetric, SumMetric, MeanMetric, CatMetric]) +def test_nan_error(value, nan_strategy, metric_class): + """test correct errors are raised.""" + metric = metric_class(nan_strategy=nan_strategy) + if nan_strategy == "error": + with pytest.raises(RuntimeError, match="Encounted `nan` values in tensor"): + metric(value.clone()) + elif nan_strategy == "warn": + with pytest.warns(UserWarning, match="Encounted `nan` values in tensor"): + metric(value.clone()) + + +@pytest.mark.parametrize( + "metric_class, nan_strategy, value, expected", + [ + (MinMetric, "ignore", _case1, B.tensor(float("inf"))), + (MinMetric, 2.0, _case1, 2.0), + (MinMetric, "ignore", _case2, 1.0), + (MinMetric, 2.0, _case2, 1.0), + (MaxMetric, "ignore", _case1, -B.tensor(float("inf"))), + (MaxMetric, 2.0, _case1, 2.0), + (MaxMetric, "ignore", _case2, 5.0), + (MaxMetric, 2.0, _case2, 5.0), + (SumMetric, "ignore", _case1, 0.0), + (SumMetric, 2.0, _case1, 10.0), + (SumMetric, "ignore", _case2, 12.0), + (SumMetric, 2.0, _case2, 14.0), + (MeanMetric, "ignore", _case1, 0.0), + (MeanMetric, 2.0, _case1, 2.0), + (MeanMetric, "ignore", _case2, 3.0), + (MeanMetric, 2.0, _case2, 2.8), + (CatMetric, "ignore", _case1, []), + (CatMetric, 2.0, _case1, B.tensor([2.0, 2.0, 2.0, 2.0, 2.0])), + (CatMetric, "ignore", _case2, B.tensor([1.0, 2.0, 4.0, 5.0])), + (CatMetric, 2.0, _case2, B.tensor([1.0, 2.0, 2.0, 4.0, 5.0])), + ], +) +def test_nan_expected(metric_class, nan_strategy, value, expected): + """test that nan values are handled correctly.""" + metric = metric_class(nan_strategy=nan_strategy) + metric.update(value.clone()) + out = metric.compute() + assert np.allclose(out, expected, equal_nan=True) + + +@pytest.mark.parametrize("metric_class", [MinMetric, MaxMetric, SumMetric, MeanMetric, CatMetric]) +def test_error_on_wrong_nan_strategy(metric_class): + """test error raised on wrong nan_strategy argument.""" + with pytest.raises(ValueError, match="Arg `nan_strategy` should either .*"): + metric_class(nan_strategy=[]) + + +# @pytest.mark.skipif(not hasattr(torch, "broadcast_to"), reason="PyTorch <1.8 does not have broadcast_to") +@pytest.mark.parametrize( + "weights, expected", [(1, 11.5), (B.ones(2, 1, 1), 11.5), (B.tensor([1, 2]).reshape(2, 1, 1), 13.5)] +) +def test_mean_metric_broadcasting(weights, expected): + """check that weight broadcasting works for mean metric.""" + values = B.arange(24).reshape(2, 3, 4) + avg = MeanMetric() + + assert avg(values, weights) == expected diff --git a/EE/paddlemetric/src/tests/bases/test_collections.py b/EE/paddlemetric/src/tests/bases/test_collections.py new file mode 100644 index 000000000..d92234f8c --- /dev/null +++ b/EE/paddlemetric/src/tests/bases/test_collections.py @@ -0,0 +1,251 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pickle + +import pytest +import paddleext.torchapi as B + +from ..helpers import seed_all +from ..helpers.testers import DummyMetricDiff, DummyMetricSum +from paddlemetrics.collections import MetricCollection + +seed_all(42) + + +def test_metric_collection(tmpdir): + m1 = DummyMetricSum() + m2 = DummyMetricDiff() + + metric_collection = MetricCollection([m1, m2]) + + # Test correct dict structure + assert len(metric_collection) == 2 + assert metric_collection["DummyMetricSum"] == m1 + assert metric_collection["DummyMetricDiff"] == m2 + + # Test correct initialization + for name, metric in metric_collection.items(): + assert metric.x == 0, f"Metric {name} not initialized correctly" + + # Test every metric gets updated + metric_collection.update(5) + for name, metric in metric_collection.items(): + assert metric.x.abs() == 5, f"Metric {name} not updated correctly" + + # Test compute on each metric + metric_collection.update(-5) + metric_vals = metric_collection.compute() + assert len(metric_vals) == 2 + for name, metric_val in metric_vals.items(): + assert metric_val == 0, f"Metric {name}.compute not called correctly" + + # Test that everything is reset + for name, metric in metric_collection.items(): + assert metric.x == 0, f"Metric {name} not reset correctly" + + # Test pickable + metric_pickled = pickle.dumps(metric_collection) + metric_loaded = pickle.loads(metric_pickled) + assert isinstance(metric_loaded, MetricCollection) + + +@pytest.mark.skipif(not B.cuda.is_available(), reason="Test requires GPU.") +def test_device_and_dtype_transfer_metriccollection(tmpdir): + m1 = DummyMetricSum() + m2 = DummyMetricDiff() + + metric_collection = MetricCollection([m1, m2]) + for _, metric in metric_collection.items(): + assert metric.x.is_cuda is False + assert metric.x.dtype == B.float32 + + metric_collection = metric_collection.to(device="cuda") + for _, metric in metric_collection.items(): + assert metric.x.is_cuda + + metric_collection = metric_collection.double() + for _, metric in metric_collection.items(): + assert metric.x.dtype == B.float64 + + metric_collection = metric_collection.half() + for _, metric in metric_collection.items(): + assert metric.x.dtype == B.float16 + + +def test_metric_collection_wrong_input(tmpdir): + """Check that errors are raised on wrong input.""" + dms = DummyMetricSum() + + # Not all input are metrics (list) + with pytest.raises(ValueError): + _ = MetricCollection([dms, 5]) + + # Not all input are metrics (dict) + with pytest.raises(ValueError): + _ = MetricCollection({"metric1": dms, "metric2": 5}) + + # Same metric passed in multiple times + with pytest.raises(ValueError, match="Encountered two metrics both named *."): + _ = MetricCollection([dms, dms]) + + # Not a list or dict passed in + with pytest.warns(Warning, match=" which are not `Metric` so they will be ignored."): + _ = MetricCollection(dms, [dms]) + + +def test_metric_collection_args_kwargs(tmpdir): + """Check that args and kwargs gets passed correctly in metric collection, Checks both update and forward + method.""" + m1 = DummyMetricSum() + m2 = DummyMetricDiff() + + metric_collection = MetricCollection([m1, m2]) + + # args gets passed to all metrics + metric_collection.update(5) + assert metric_collection["DummyMetricSum"].x == 5 + assert metric_collection["DummyMetricDiff"].x == -5 + metric_collection.reset() + _ = metric_collection(5) + assert metric_collection["DummyMetricSum"].x == 5 + assert metric_collection["DummyMetricDiff"].x == -5 + metric_collection.reset() + + # kwargs gets only passed to metrics that it matches + metric_collection.update(x=10, y=20) + assert metric_collection["DummyMetricSum"].x == 10 + assert metric_collection["DummyMetricDiff"].x == -20 + metric_collection.reset() + _ = metric_collection(x=10, y=20) + assert metric_collection["DummyMetricSum"].x == 10 + assert metric_collection["DummyMetricDiff"].x == -20 + + +@pytest.mark.parametrize( + "prefix, postfix", + [ + [None, None], + ["prefix_", None], + [None, "_postfix"], + ["prefix_", "_postfix"], + ], +) +def test_metric_collection_prefix_postfix_args(prefix, postfix): + """Test that the prefix arg alters the keywords in the output.""" + m1 = DummyMetricSum() + m2 = DummyMetricDiff() + names = ["DummyMetricSum", "DummyMetricDiff"] + names = [prefix + n if prefix is not None else n for n in names] + names = [n + postfix if postfix is not None else n for n in names] + + metric_collection = MetricCollection([m1, m2], prefix=prefix, postfix=postfix) + + # test forward + out = metric_collection(5) + for name in names: + assert name in out, "prefix or postfix argument not working as intended with forward method" + + # test compute + out = metric_collection.compute() + for name in names: + assert name in out, "prefix or postfix argument not working as intended with compute method" + + # test clone + new_metric_collection = metric_collection.clone(prefix="new_prefix_") + out = new_metric_collection(5) + names = [n[len(prefix) :] if prefix is not None else n for n in names] # strip away old prefix + for name in names: + assert f"new_prefix_{name}" in out, "prefix argument not working as intended with clone method" + + for k, _ in new_metric_collection.items(): + assert "new_prefix_" in k + + for k in new_metric_collection.keys(): + assert "new_prefix_" in k + + for k, _ in new_metric_collection.items(keep_base=True): + assert "new_prefix_" not in k + + for k in new_metric_collection.keys(keep_base=True): + assert "new_prefix_" not in k + + assert isinstance(new_metric_collection.keys(keep_base=True), type(new_metric_collection.keys(keep_base=False))) + assert isinstance(new_metric_collection.items(keep_base=True), type(new_metric_collection.items(keep_base=False))) + + new_metric_collection = new_metric_collection.clone(postfix="_new_postfix") + out = new_metric_collection(5) + names = [n[: -len(postfix)] if postfix is not None else n for n in names] # strip away old postfix + for name in names: + assert f"new_prefix_{name}_new_postfix" in out, "postfix argument not working as intended with clone method" + + +def test_metric_collection_repr(): + """Test MetricCollection.""" + + class A(DummyMetricSum): + pass + + class B(DummyMetricDiff): + pass + + m1 = A() + m2 = B() + metric_collection = MetricCollection([m1, m2], prefix=None, postfix=None) + + expected = "MetricCollection(\n (A): A()\n (B): B()\n)" + assert metric_collection.__repr__() == expected + + metric_collection = MetricCollection([m1, m2], prefix="a", postfix=None) + + expected = "MetricCollection(\n (A): A()\n (B): B(),\n prefix=a\n)" + assert metric_collection.__repr__() == expected + + metric_collection = MetricCollection([m1, m2], prefix=None, postfix="a") + expected = "MetricCollection(\n (A): A()\n (B): B(),\n postfix=a\n)" + assert metric_collection.__repr__() == expected + + metric_collection = MetricCollection([m1, m2], prefix="a", postfix="b") + expected = "MetricCollection(\n (A): A()\n (B): B(),\n prefix=a,\n postfix=b\n)" + assert metric_collection.__repr__() == expected + + +def test_metric_collection_same_order(): + m1 = DummyMetricSum() + m2 = DummyMetricDiff() + col1 = MetricCollection({"a": m1, "b": m2}) + col2 = MetricCollection({"b": m2, "a": m1}) + for k1, k2 in zip(col1.keys(), col2.keys()): + assert k1 == k2 + + +def test_collection_add_metrics(): + m1 = DummyMetricSum() + m2 = DummyMetricDiff() + + collection = MetricCollection([m1]) + collection.add_metrics({"m1_": DummyMetricSum()}) + collection.add_metrics(m2) + + collection.update(5) + results = collection.compute() + assert results["DummyMetricSum"] == results["m1_"] and results["m1_"] == 5 + assert results["DummyMetricDiff"] == -5 + + +def test_collection_check_arg(): + assert MetricCollection._check_arg(None, "prefix") is None + assert MetricCollection._check_arg("sample", "prefix") == "sample" + + with pytest.raises(ValueError, match="Expected input `postfix` to be a string, but got"): + MetricCollection._check_arg(1, "postfix") diff --git a/EE/paddlemetric/src/tests/bases/test_composition.py b/EE/paddlemetric/src/tests/bases/test_composition.py new file mode 100644 index 000000000..0c9e6a080 --- /dev/null +++ b/EE/paddlemetric/src/tests/bases/test_composition.py @@ -0,0 +1,559 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from operator import neg, pos + +import pytest +import paddleext.torchapi as B +from paddleext.torchapi import tensor + +from ..helpers import _MARK_TORCH_MIN_1_4, _MARK_TORCH_MIN_1_5, _MARK_TORCH_MIN_1_6 +from paddlemetrics.metric import CompositionalMetric, Metric + + +class DummyMetric(Metric): + def __init__(self, val_to_return): + super().__init__() + self._num_updates = 0 + self._val_to_return = val_to_return + self._update_called = True + + def update(self, *args, **kwargs) -> None: + self._num_updates += 1 + + def compute(self): + return tensor(self._val_to_return) + + def reset(self): + self._num_updates = 0 + return super().reset() + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(4)), + (2, tensor(4)), + (2.0, tensor(4.0)), + pytest.param(tensor(2), tensor(4), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], +) +def test_metrics_add(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_add = first_metric + second_operand + final_radd = second_operand + first_metric + + assert isinstance(final_add, CompositionalMetric) + assert isinstance(final_radd, CompositionalMetric) + + final_add.update() + final_radd.update() + + assert B.allclose(expected_result, final_add.compute()) + assert B.allclose(expected_result, final_radd.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric(3), tensor(2)), (3, tensor(2)), (3, tensor(2)), (tensor(3), tensor(2))], +) +@pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) +def test_metrics_and(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_and = first_metric & second_operand + final_rand = second_operand & first_metric + + assert isinstance(final_and, CompositionalMetric) + assert isinstance(final_rand, CompositionalMetric) + + final_and.update() + final_rand.update() + assert B.allclose(expected_result, final_and.compute()) + assert B.allclose(expected_result, final_rand.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(True)), + (2, tensor(True)), + (2.0, tensor(True)), + (tensor(2), tensor(True)), + ], +) +def test_metrics_eq(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_eq = first_metric == second_operand + + assert isinstance(final_eq, CompositionalMetric) + + final_eq.update() + # can't use allclose for bool tensors + assert (expected_result == final_eq.compute()).all() + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(2)), + (2, tensor(2)), + (2.0, tensor(2.0)), + (tensor(2), tensor(2)), + ], +) +@pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) +def test_metrics_floordiv(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_floordiv = first_metric // second_operand + + assert isinstance(final_floordiv, CompositionalMetric) + + final_floordiv.update() + assert B.allclose(expected_result, final_floordiv.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(True)), + (2, tensor(True)), + (2.0, tensor(True)), + (tensor(2), tensor(True)), + ], +) +def test_metrics_ge(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_ge = first_metric >= second_operand + + assert isinstance(final_ge, CompositionalMetric) + + final_ge.update() + # can't use allclose for bool tensors + assert (expected_result == final_ge.compute()).all() + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(True)), + (2, tensor(True)), + (2.0, tensor(True)), + (tensor(2), tensor(True)), + ], +) +def test_metrics_gt(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_gt = first_metric > second_operand + + assert isinstance(final_gt, CompositionalMetric) + + final_gt.update() + # can't use allclose for bool tensors + assert (expected_result == final_gt.compute()).all() + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(False)), + (2, tensor(False)), + (2.0, tensor(False)), + (tensor(2), tensor(False)), + ], +) +def test_metrics_le(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_le = first_metric <= second_operand + + assert isinstance(final_le, CompositionalMetric) + + final_le.update() + # can't use allclose for bool tensors + assert (expected_result == final_le.compute()).all() + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(False)), + (2, tensor(False)), + (2.0, tensor(False)), + (tensor(2), tensor(False)), + ], +) +def test_metrics_lt(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_lt = first_metric < second_operand + + assert isinstance(final_lt, CompositionalMetric) + + final_lt.update() + # can't use allclose for bool tensors + assert (expected_result == final_lt.compute()).all() + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric([2, 2, 2]), tensor(12)), (tensor([2, 2, 2]), tensor(12))], +) +def test_metrics_matmul(second_operand, expected_result): + first_metric = DummyMetric([2, 2, 2]) + + final_matmul = first_metric @ second_operand + + assert isinstance(final_matmul, CompositionalMetric) + + final_matmul.update() + assert B.allclose(expected_result, final_matmul.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1)), + (tensor(2), tensor(1)), + ], +) +def test_metrics_mod(second_operand, expected_result): + first_metric = DummyMetric(5) + + final_mod = first_metric % second_operand + + assert isinstance(final_mod, CompositionalMetric) + + final_mod.update() + # prevent Runtime error for PT 1.8 - Long did not match Float + assert B.allclose(expected_result.to(float), final_mod.compute().to(float)) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(4)), + (2, tensor(4)), + (2.0, tensor(4.0)), + pytest.param(tensor(2), tensor(4), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], +) +def test_metrics_mul(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_mul = first_metric * second_operand + final_rmul = second_operand * first_metric + + assert isinstance(final_mul, CompositionalMetric) + assert isinstance(final_rmul, CompositionalMetric) + + final_mul.update() + final_rmul.update() + assert B.allclose(expected_result, final_mul.compute()) + assert B.allclose(expected_result, final_rmul.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(False)), + (2, tensor(False)), + (2.0, tensor(False)), + (tensor(2), tensor(False)), + ], +) +def test_metrics_ne(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_ne = first_metric != second_operand + + assert isinstance(final_ne, CompositionalMetric) + + final_ne.update() + # can't use allclose for bool tensors + assert (expected_result == final_ne.compute()).all() + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric([1, 0, 3]), tensor([-1, -2, 3])), (tensor([1, 0, 3]), tensor([-1, -2, 3]))], +) +@pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) +def test_metrics_or(second_operand, expected_result): + first_metric = DummyMetric([-1, -2, 3]) + + final_or = first_metric | second_operand + final_ror = second_operand | first_metric + + assert isinstance(final_or, CompositionalMetric) + assert isinstance(final_ror, CompositionalMetric) + + final_or.update() + final_ror.update() + assert B.allclose(expected_result, final_or.compute()) + assert B.allclose(expected_result, final_ror.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + pytest.param(DummyMetric(2), tensor(4)), + pytest.param(2, tensor(4)), + pytest.param(2.0, tensor(4.0), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_6)), + pytest.param(tensor(2), tensor(4)), + ], +) +def test_metrics_pow(second_operand, expected_result): + first_metric = DummyMetric(2) + + final_pow = first_metric ** second_operand + + assert isinstance(final_pow, CompositionalMetric) + + final_pow.update() + assert B.allclose(expected_result, final_pow.compute()) + + +@pytest.mark.parametrize( + ["first_operand", "expected_result"], + [(5, tensor(2)), (5.0, tensor(2.0)), (tensor(5), tensor(2))], +) +@pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) +def test_metrics_rfloordiv(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rfloordiv = first_operand // second_operand + + assert isinstance(final_rfloordiv, CompositionalMetric) + + final_rfloordiv.update() + assert B.allclose(expected_result, final_rfloordiv.compute()) + + +@pytest.mark.parametrize( + ["first_operand", "expected_result"], + [pytest.param(tensor([2, 2, 2]), tensor(12), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4))], +) +def test_metrics_rmatmul(first_operand, expected_result): + second_operand = DummyMetric([2, 2, 2]) + + final_rmatmul = first_operand @ second_operand + + assert isinstance(final_rmatmul, CompositionalMetric) + + final_rmatmul.update() + assert B.allclose(expected_result, final_rmatmul.compute()) + + +@pytest.mark.parametrize( + ["first_operand", "expected_result"], + [pytest.param(tensor(2), tensor(2), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4))], +) +def test_metrics_rmod(first_operand, expected_result): + second_operand = DummyMetric(5) + + final_rmod = first_operand % second_operand + + assert isinstance(final_rmod, CompositionalMetric) + + final_rmod.update() + assert B.allclose(expected_result, final_rmod.compute()) + + +@pytest.mark.parametrize( + "first_operand,expected_result", + [ + pytest.param(DummyMetric(2), tensor(4)), + pytest.param(2, tensor(4)), + pytest.param(2.0, tensor(4.0), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_6)), + ], +) +def test_metrics_rpow(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rpow = first_operand ** second_operand + + assert isinstance(final_rpow, CompositionalMetric) + final_rpow.update() + assert B.allclose(expected_result, final_rpow.compute()) + + +@pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(3), tensor(1)), + (3, tensor(1)), + (3.0, tensor(1.0)), + pytest.param(tensor(3), tensor(1), marks=pytest.mark.skipif(**_MARK_TORCH_MIN_1_4)), + ], +) +def test_metrics_rsub(first_operand, expected_result): + second_operand = DummyMetric(2) + + final_rsub = first_operand - second_operand + + assert isinstance(final_rsub, CompositionalMetric) + final_rsub.update() + assert B.allclose(expected_result, final_rsub.compute()) + + +@pytest.mark.parametrize( + ["first_operand", "expected_result"], + [ + (DummyMetric(6), tensor(2.0)), + (6, tensor(2.0)), + (6.0, tensor(2.0)), + (tensor(6), tensor(2.0)), + ], +) +@pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) +def test_metrics_rtruediv(first_operand, expected_result): + second_operand = DummyMetric(3) + + final_rtruediv = first_operand / second_operand + + assert isinstance(final_rtruediv, CompositionalMetric) + final_rtruediv.update() + assert B.allclose(expected_result, final_rtruediv.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(2), tensor(1)), + (2, tensor(1)), + (2.0, tensor(1.0)), + (tensor(2), tensor(1)), + ], +) +def test_metrics_sub(second_operand, expected_result): + first_metric = DummyMetric(3) + + final_sub = first_metric - second_operand + + assert isinstance(final_sub, CompositionalMetric) + final_sub.update() + assert B.allclose(expected_result, final_sub.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [ + (DummyMetric(3), tensor(2.0)), + (3, tensor(2.0)), + (3.0, tensor(2.0)), + (tensor(3), tensor(2.0)), + ], +) +@pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) +def test_metrics_truediv(second_operand, expected_result): + first_metric = DummyMetric(6) + + final_truediv = first_metric / second_operand + + assert isinstance(final_truediv, CompositionalMetric) + final_truediv.update() + assert B.allclose(expected_result, final_truediv.compute()) + + +@pytest.mark.parametrize( + ["second_operand", "expected_result"], + [(DummyMetric([1, 0, 3]), tensor([-2, -2, 0])), (tensor([1, 0, 3]), tensor([-2, -2, 0]))], +) +@pytest.mark.skipif(**_MARK_TORCH_MIN_1_5) +def test_metrics_xor(second_operand, expected_result): + first_metric = DummyMetric([-1, -2, 3]) + + final_xor = first_metric ^ second_operand + final_rxor = second_operand ^ first_metric + + assert isinstance(final_xor, CompositionalMetric) + assert isinstance(final_rxor, CompositionalMetric) + + final_xor.update() + final_rxor.update() + assert B.allclose(expected_result, final_xor.compute()) + assert B.allclose(expected_result, final_rxor.compute()) + + +def test_metrics_abs(): + first_metric = DummyMetric(-1) + + final_abs = abs(first_metric) + + assert isinstance(final_abs, CompositionalMetric) + final_abs.update() + assert B.allclose(tensor(1), final_abs.compute()) + + +def test_metrics_invert(): + first_metric = DummyMetric(1) + + final_inverse = ~first_metric + assert isinstance(final_inverse, CompositionalMetric) + final_inverse.update() + assert B.allclose(tensor(-2), final_inverse.compute()) + + +def test_metrics_neg(): + first_metric = DummyMetric(1) + + final_neg = neg(first_metric) + assert isinstance(final_neg, CompositionalMetric) + final_neg.update() + assert B.allclose(tensor(-1), final_neg.compute()) + + +def test_metrics_pos(): + first_metric = DummyMetric(-1) + + final_pos = pos(first_metric) + assert isinstance(final_pos, CompositionalMetric) + final_pos.update() + assert B.allclose(tensor(1), final_pos.compute()) + + +@pytest.mark.parametrize( + ["value", "idx", "expected_result"], + [([1, 2, 3], 1, tensor(2)), ([[0, 1], [2, 3]], (1, 0), tensor(2)), ([[0, 1], [2, 3]], 1, tensor([2, 3]))], +) +def test_metrics_getitem(value, idx, expected_result): + first_metric = DummyMetric(value) + + final_getitem = first_metric[idx] + assert isinstance(final_getitem, CompositionalMetric) + final_getitem.update() + assert B.allclose(expected_result, final_getitem.compute()) + + +def test_compositional_metrics_update(): + + compos = DummyMetric(5) + DummyMetric(4) + + assert isinstance(compos, CompositionalMetric) + compos.update() + compos.update() + compos.update() + + assert isinstance(compos.metric_a, DummyMetric) + assert isinstance(compos.metric_b, DummyMetric) + + assert compos.metric_a._num_updates == 3 + assert compos.metric_b._num_updates == 3 diff --git a/EE/paddlemetric/src/tests/bases/test_ddp.py b/EE/paddlemetric/src/tests/bases/test_ddp.py new file mode 100644 index 000000000..7f713e1db --- /dev/null +++ b/EE/paddlemetric/src/tests/bases/test_ddp.py @@ -0,0 +1,241 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +from copy import deepcopy + +import pytest +import paddleext.torchapi as B +from paddleext.torchapi import tensor + +from tests.helpers import seed_all +from tests.helpers.testers import DummyMetric, DummyMetricSum, setup_ddp +from paddlemetrics import Metric +from paddlemetrics.utilities.distributed import gather_all_tensors +from paddlemetrics.utilities.exceptions import paddlemetricsUserError + +seed_all(42) + + +def _test_ddp_sum(rank, worldsize): + setup_ddp(rank, worldsize) + dummy = DummyMetric() + dummy._reductions = {"foo": B.sum} + dummy.foo = tensor(1) + dummy._sync_dist() + + assert dummy.foo == worldsize + + +def _test_ddp_cat(rank, worldsize): + setup_ddp(rank, worldsize) + dummy = DummyMetric() + dummy._reductions = {"foo": B.cat} + dummy.foo = [tensor([1])] + dummy._sync_dist() + + assert B.all(B.eq(dummy.foo, tensor([1, 1]))) + + +def _test_ddp_sum_cat(rank, worldsize): + setup_ddp(rank, worldsize) + dummy = DummyMetric() + dummy._reductions = {"foo": B.cat, "bar": B.sum} + dummy.foo = [tensor([1])] + dummy.bar = tensor(1) + dummy._sync_dist() + + assert B.all(B.eq(dummy.foo, tensor([1, 1]))) + assert dummy.bar == worldsize + + +def _test_ddp_gather_uneven_tensors(rank, worldsize): + setup_ddp(rank, worldsize) + tensor = B.ones(rank) + result = gather_all_tensors(tensor) + assert len(result) == worldsize + for idx in range(worldsize): + assert len(result[idx]) == idx + assert (result[idx] == B.ones_like(result[idx])).all() + + +def _test_ddp_gather_uneven_tensors_multidim(rank, worldsize): + setup_ddp(rank, worldsize) + tensor = B.ones(rank + 1, 2 - rank) + result = gather_all_tensors(tensor) + assert len(result) == worldsize + for idx in range(worldsize): + val = result[idx] + assert val.shape == (idx + 1, 2 - idx) + assert (val == B.ones_like(val)).all() + + +def _test_ddp_compositional_tensor(rank, worldsize): + setup_ddp(rank, worldsize) + dummy = DummyMetricSum() + dummy._reductions = {"x": B.sum} + dummy = dummy.clone() + dummy.clone() + dummy.update(tensor(1)) + val = dummy.compute() + assert val == 2 * worldsize + + +@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows") +@pytest.mark.parametrize( + "process", + [ + _test_ddp_cat, + _test_ddp_sum, + _test_ddp_sum_cat, + _test_ddp_gather_uneven_tensors, + _test_ddp_gather_uneven_tensors_multidim, + _test_ddp_compositional_tensor, + ], +) +def test_ddp(process): + B.multiprocessing.spawn(process, args=(2,), nprocs=2) + + +def _test_non_contiguous_tensors(rank, worldsize): + setup_ddp(rank, worldsize) + + class DummyCatMetric(Metric): + def __init__(self): + super().__init__() + self.add_state("x", default=[], dist_reduce_fx=None) + + def update(self, x): + self.x.append(x) + + def compute(self): + x = B.cat(self.x, dim=0) + return x.sum() + + metric = DummyCatMetric() + metric.update(B.randn(10, 5)[:, 0]) + + +@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows") +def test_non_contiguous_tensors(): + """Test that gather_all operation works for non contiguous tensors.""" + B.multiprocessing.spawn(_test_non_contiguous_tensors, args=(2,), nprocs=2) + + +def _test_state_dict_is_synced(rank, worldsize, tmpdir): + setup_ddp(rank, worldsize) + + class DummyCatMetric(Metric): + def __init__(self): + super().__init__() + self.add_state("x", B.tensor(0), dist_reduce_fx=B.sum) + self.add_state("c", B.tensor(0), dist_reduce_fx=B.sum) + + def update(self, x): + self.x += x + self.c += 1 + + def compute(self): + return self.x // self.c + + def __repr__(self): + return f"DummyCatMetric(x={self.x}, c={self.c})" + + metric = DummyCatMetric() + metric.persistent(True) + + def verify_metric(metric, i, world_size): + state_dict = metric.state_dict() + exp_sum = i * (i + 1) / 2 + assert state_dict["x"] == exp_sum * world_size + assert metric.x == exp_sum * world_size + assert metric.c == (i + 1) * world_size + assert state_dict["c"] == metric.c + + steps = 5 + for i in range(steps): + + if metric._is_synced: + + with pytest.raises(paddlemetricsUserError, match="The Metric shouldn't be synced when performing"): + metric(i) + + metric.unsync() + + metric(i) + + verify_metric(metric, i, 1) + + metric.sync() + assert metric._is_synced + + with pytest.raises(paddlemetricsUserError, match="The Metric has already been synced."): + metric.sync() + + verify_metric(metric, i, 2) + + metric.unsync() + assert not metric._is_synced + + with pytest.raises(paddlemetricsUserError, match="The Metric has already been un-synced."): + metric.unsync() + + with metric.sync_context(): + assert metric._is_synced + verify_metric(metric, i, 2) + + with metric.sync_context(should_unsync=False): + assert metric._is_synced + verify_metric(metric, i, 2) + + assert metric._is_synced + + metric.unsync() + assert not metric._is_synced + + metric.sync() + cache = metric._cache + metric._cache = None + + with pytest.raises(paddlemetricsUserError, match="The internal cache should exist to unsync the Metric."): + metric.unsync() + + metric._cache = cache + + def reload_state_dict(state_dict, expected_x, expected_c): + metric = DummyCatMetric() + metric.load_state_dict(state_dict) + assert metric.x == expected_x + assert metric.c == expected_c + + reload_state_dict(deepcopy(metric.state_dict()), 20, 10) + + metric.unsync() + reload_state_dict(deepcopy(metric.state_dict()), 10, 5) + + metric.sync() + + filepath = os.path.join(tmpdir, f"weights-{rank}.pt") + + B.save(metric.state_dict(), filepath) + + metric.unsync() + with metric.sync_context(): + B.save(metric.state_dict(), filepath) + + +@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows") +def test_state_dict_is_synced(tmpdir): + """This test asserts that metrics are synced while creating the state dict but restored after to continue + accumulation.""" + B.multiprocessing.spawn(_test_state_dict_is_synced, args=(2, tmpdir), nprocs=2) diff --git a/EE/paddlemetric/src/tests/bases/test_hashing.py b/EE/paddlemetric/src/tests/bases/test_hashing.py new file mode 100644 index 000000000..af77dbd1f --- /dev/null +++ b/EE/paddlemetric/src/tests/bases/test_hashing.py @@ -0,0 +1,22 @@ +import pytest + +from tests.helpers.testers import DummyListMetric, DummyMetric + + +@pytest.mark.parametrize( + "metric_cls", + [ + DummyMetric, + DummyListMetric, + ], +) +def test_metric_hashing(metric_cls): + """Tests that hases are different. + + See the Metric's hash function for details on why this is required. + """ + instance_1 = metric_cls() + instance_2 = metric_cls() + + assert hash(instance_1) != hash(instance_2) + assert id(instance_1) != id(instance_2) diff --git a/EE/paddlemetric/src/tests/bases/test_metric.py b/EE/paddlemetric/src/tests/bases/test_metric.py new file mode 100644 index 000000000..a57eeb80e --- /dev/null +++ b/EE/paddlemetric/src/tests/bases/test_metric.py @@ -0,0 +1,356 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pickle +from collections import OrderedDict + +import cloudpickle +import numpy as np +import pytest +import pangu.core.backend as torch +from pangu.core.backend import Tensor, nn, tensor + +from tests.helpers import _LIGHTNING_GREATER_EQUAL_1_3, seed_all +from tests.helpers.testers import DummyListMetric, DummyMetric, DummyMetricMultiOutput, DummyMetricSum +from paddlemetrics.utilities.imports import _LIGHTNING_AVAILABLE, _TORCH_LOWER_1_6 + +seed_all(42) + + +def test_inherit(): + DummyMetric() + + +def test_add_state(): + a = DummyMetric() + + a.add_state("a", tensor(0), "sum") + assert a._reductions["a"](tensor([1, 1])) == 2 + + a.add_state("b", tensor(0), "mean") + assert np.allclose(a._reductions["b"](tensor([1.0, 2.0])).numpy(), 1.5) + + a.add_state("c", tensor(0), "cat") + assert a._reductions["c"]([tensor([1]), tensor([1])]).shape == (2,) + + with pytest.raises(ValueError): + a.add_state("d1", tensor(0), "xyz") + + with pytest.raises(ValueError): + a.add_state("d2", tensor(0), 42) + + with pytest.raises(ValueError): + a.add_state("d3", [tensor(0)], "sum") + + with pytest.raises(ValueError): + a.add_state("d4", 42, "sum") + + def custom_fx(_): + return -1 + + a.add_state("e", tensor(0), custom_fx) + assert a._reductions["e"](tensor([1, 1])) == -1 + + +def test_add_state_persistent(): + a = DummyMetric() + + a.add_state("a", tensor(0), "sum", persistent=True) + assert "a" in a.state_dict() + + a.add_state("b", tensor(0), "sum", persistent=False) + + if _TORCH_LOWER_1_6: + assert "b" not in a.state_dict() + + +def test_reset(): + class A(DummyMetric): + pass + + class B(DummyListMetric): + pass + + a = A() + assert a.x == 0 + a.x = tensor(5) + a.reset() + assert a.x == 0 + + b = B() + assert isinstance(b.x, list) and len(b.x) == 0 + b.x = tensor(5) + b.reset() + assert isinstance(b.x, list) and len(b.x) == 0 + + +def test_reset_compute(): + a = DummyMetricSum() + assert a.x == 0 + a.update(tensor(5)) + assert a.compute() == 5 + a.reset() + if not _LIGHTNING_AVAILABLE or _LIGHTNING_GREATER_EQUAL_1_3: + assert a.compute() == 0 + else: + assert a.compute() == 5 + + +def test_update(): + class A(DummyMetric): + def update(self, x): + self.x += x + + a = A() + assert a.x == 0 + assert a._computed is None + a.update(1) + assert a._computed is None + assert a.x == 1 + a.update(2) + assert a.x == 3 + assert a._computed is None + + +def test_compute(): + class A(DummyMetric): + def update(self, x): + self.x += x + + def compute(self): + return self.x + + a = A() + assert 0 == a.compute() + assert 0 == a.x + a.update(1) + assert a._computed is None + assert a.compute() == 1 + assert a._computed == 1 + a.update(2) + assert a._computed is None + assert a.compute() == 3 + assert a._computed == 3 + + # called without update, should return cached value + a._computed = 5 + assert a.compute() == 5 + + +def test_hash(): + class A(DummyMetric): + pass + + class B(DummyListMetric): + pass + + a1 = A() + a2 = A() + assert hash(a1) != hash(a2) + + b1 = B() + b2 = B() + assert hash(b1) != hash(b2) # different ids + assert isinstance(b1.x, list) and len(b1.x) == 0 + b1.x.append(tensor(5)) + assert isinstance(hash(b1), int) # <- check that nothing crashes + assert isinstance(b1.x, list) and len(b1.x) == 1 + b2.x.append(tensor(5)) + # Sanity: + assert isinstance(b2.x, list) and len(b2.x) == 1 + # Now that they have tensor contents, they should have different hashes: + assert hash(b1) != hash(b2) + + +def test_forward(): + class A(DummyMetric): + def update(self, x): + self.x += x + + def compute(self): + return self.x + + a = A() + assert a(5) == 5 + assert a._forward_cache == 5 + + assert a(8) == 8 + assert a._forward_cache == 8 + + assert a.compute() == 13 + + +def test_pickle(tmpdir): + # doesn't tests for DDP + a = DummyMetricSum() + a.update(1) + + metric_pickled = pickle.dumps(a) + metric_loaded = pickle.loads(metric_pickled) + + assert metric_loaded.compute() == 1 + + metric_loaded.update(5) + assert metric_loaded.compute() == 6 + + metric_pickled = cloudpickle.dumps(a) + metric_loaded = cloudpickle.loads(metric_pickled) + + assert metric_loaded.compute() == 1 + + +def test_state_dict(tmpdir): + """test that metric states can be removed and added to state dict.""" + metric = DummyMetric() + assert metric.state_dict() == OrderedDict() + metric.persistent(True) + assert metric.state_dict() == OrderedDict(x=0) + metric.persistent(False) + assert metric.state_dict() == OrderedDict() + + +def test_load_state_dict(tmpdir): + """test that metric states can be loaded with state dict.""" + metric = DummyMetricSum() + metric.persistent(True) + metric.update(5) + loaded_metric = DummyMetricSum() + loaded_metric.load_state_dict(metric.state_dict()) + assert metric.compute() == 5 + + +def test_child_metric_state_dict(): + """test that child metric states will be added to parent state dict.""" + + class TestModule(nn.Module): + def __init__(self): + super().__init__() + self.metric = DummyMetric() + self.metric.add_state("a", tensor(0), persistent=True) + self.metric.add_state("b", [], persistent=True) + self.metric.register_buffer("c", tensor(0)) + + module = TestModule() + expected_state_dict = { + "metric.a": tensor(0), + "metric.b": [], + "metric.c": tensor(0), + } + assert module.state_dict() == expected_state_dict + + +@pytest.mark.skipif(not B.cuda.is_available(), reason="Test requires GPU.") +def test_device_and_dtype_transfer(tmpdir): + metric = DummyMetricSum() + assert metric.x.is_cuda is False + assert metric.device == B.device("cpu") + assert metric.x.dtype == B.float32 + + metric = metric.to(device="cuda") + assert metric.x.is_cuda + assert metric.device == B.device("cuda", index=0) + + metric.set_dtype(B.double) + assert metric.x.dtype == B.float64 + metric.reset() + assert metric.x.dtype == B.float64 + + metric.set_dtype(B.half) + assert metric.x.dtype == B.float16 + metric.reset() + assert metric.x.dtype == B.float16 + + +def test_warning_on_compute_before_update(): + """test that an warning is raised if user tries to call compute before update.""" + metric = DummyMetricSum() + + # make sure everything is fine with forward + with pytest.warns(None) as record: + val = metric(1) + assert not record + + metric.reset() + + with pytest.warns(UserWarning, match=r"The ``compute`` method of metric .*"): + val = metric.compute() + assert val == 0.0 + + # after update things should be fine + metric.update(2.0) + with pytest.warns(None) as record: + val = metric.compute() + assert not record + assert val == 2.0 + + +def test_metric_scripts(): + """test that metrics are scriptable.""" + B.jit.script(DummyMetric()) + B.jit.script(DummyMetricSum()) + + +def test_metric_forward_cache_reset(): + """test that forward cache is reset when `reset` is called.""" + metric = DummyMetricSum() + _ = metric(2.0) + assert metric._forward_cache == 2.0 + metric.reset() + assert metric._forward_cache is None + + +@pytest.mark.skipif(not B.cuda.is_available(), reason="Test requires GPU.") +@pytest.mark.parametrize("metric_class", [DummyMetricSum, DummyMetricMultiOutput]) +def test_forward_and_compute_to_device(metric_class): + metric = metric_class() + metric(1) + metric.to(device="cuda") + + assert metric._forward_cache is not None + is_cuda = ( + metric._forward_cache[0].is_cuda if isinstance(metric._forward_cache, list) else metric._forward_cache.is_cuda + ) + assert is_cuda, "forward cache was not moved to the correct device" + + metric.compute() + assert metric._computed is not None + is_cuda = metric._computed[0].is_cuda if isinstance(metric._computed, list) else metric._computed.is_cuda + assert is_cuda, "computed result was not moved to the correct device" + + +@pytest.mark.skipif(not B.cuda.is_available(), reason="Test requires GPU.") +@pytest.mark.parametrize("metric_class", [DummyMetricSum, DummyMetricMultiOutput]) +def test_device_if_child_module(metric_class): + """Test that if a metric is a child module all values gets moved to the correct device.""" + + class TestModule(nn.Module): + def __init__(self): + super().__init__() + self.metric = metric_class() + self.register_buffer("dummy", B.zeros(1)) + + @property + def device(self): + return self.dummy.device + + module = TestModule() + + assert module.device == module.metric.device + if isinstance(module.metric.x, Tensor): + assert module.device == module.metric.x.device + + module.to(device="cuda") + + assert module.device == module.metric.device + if isinstance(module.metric.x, Tensor): + assert module.device == module.metric.x.device diff --git a/EE/paddlemetric/src/tests/classification/__init__.py b/EE/paddlemetric/src/tests/classification/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/classification/inputs.py b/EE/paddlemetric/src/tests/classification/inputs.py new file mode 100644 index 000000000..d0bf94885 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/inputs.py @@ -0,0 +1,125 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple + +import paddleext.torchapi as B + +from tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES + +Input = namedtuple("Input", ["preds", "target"]) + +_input_binary_prob = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)) +) + +_input_binary = Input( + preds=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)), +) + +_input_binary_logits = Input( + preds=B.randn(NUM_BATCHES, BATCH_SIZE), target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)) +) + +_input_multilabel_prob = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)), +) + +_input_multilabel_multidim_prob = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)), +) + +_input_multilabel_logits = Input( + preds=B.randn(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)), +) + +_input_multilabel = Input( + preds=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)), +) + +_input_multilabel_multidim = Input( + preds=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)), +) + +# Generate edge multilabel edge case, where nothing matches (scores are undefined) +__temp_preds = B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)) +__temp_target = B.abs(__temp_preds - 1) + +_input_multilabel_no_match = Input(preds=__temp_preds, target=__temp_target) + +__mc_prob_logits = 10 * B.randn(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES) +__mc_prob_preds = __mc_prob_logits.abs() / __mc_prob_logits.abs().sum(dim=2, keepdim=True) + +_input_multiclass_prob = Input( + preds=__mc_prob_preds, target=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)) +) + +_input_multiclass_logits = Input( + preds=__mc_prob_logits, target=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)) +) + +_input_multiclass = Input( + preds=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)), + target=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)), +) + +__mdmc_prob_preds = B.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM) +__mdmc_prob_preds = __mdmc_prob_preds / __mdmc_prob_preds.sum(dim=2, keepdim=True) + +_input_multidim_multiclass_prob = Input( + preds=__mdmc_prob_preds, target=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)) +) + +_input_multidim_multiclass = Input( + preds=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)), + target=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)), +) + + +# Generate plausible-looking inputs +def generate_plausible_inputs_multilabel(num_classes=NUM_CLASSES, num_batches=NUM_BATCHES, batch_size=BATCH_SIZE): + correct_targets = B.randint(high=num_classes, size=(num_batches, batch_size)) + preds = B.rand(num_batches, batch_size, num_classes) + targets = B.zeros_like(preds, dtype=B.long) + for i in range(preds.shape[0]): + for j in range(preds.shape[1]): + targets[i, j, correct_targets[i, j]] = 1 + preds += B.rand(num_batches, batch_size, num_classes) * targets / 3 + + preds = preds / preds.sum(dim=2, keepdim=True) + + return Input(preds=preds, target=targets) + + +def generate_plausible_inputs_binary(num_batches=NUM_BATCHES, batch_size=BATCH_SIZE): + targets = B.randint(high=2, size=(num_batches, batch_size)) + preds = B.rand(num_batches, batch_size) + B.rand(num_batches, batch_size) * targets / 3 + return Input(preds=preds / (preds.max() + 0.01), target=targets) + + +_input_multilabel_prob_plausible = generate_plausible_inputs_multilabel() + +_input_binary_prob_plausible = generate_plausible_inputs_binary() + +# randomly remove one class from the input +_temp = B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)) +_class_remove, _class_replace = B.multinomial(B.ones(NUM_CLASSES), num_samples=2, replacement=False) +_temp[_temp == _class_remove] = _class_replace + +_input_multiclass_with_missing_class = Input(_temp.clone(), _temp.clone()) diff --git a/EE/paddlemetric/src/tests/classification/test.log b/EE/paddlemetric/src/tests/classification/test.log new file mode 100644 index 000000000..3ea5a1d07 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test.log @@ -0,0 +1,451 @@ +============================= test session starts ============================== +platform darwin -- Python 3.8.12, pytest-7.1.2, pluggy-1.0.0 +rootdir: /Users/sun/Projects/oix/baidu/ccl/paddlemetric/src/tests/classification +plugins: hydra-core-1.1.0.dev5 +collected 816 items + +test_f_beta.py ..................ssssssssssssssssss..ssssssssssssssssss..ssssssssssssssssss..............................................................................................................ssssssssss................ss..................ss..................ss..................ss....ssssssssssssssssss..ssssssssssssssssss..ssssssssssssssssss..............................................................................................................ssssssssss................ss..................ss..................ss..................ss....ssssssssssssssssss..ssssssssssssssssss..ssssssssssssssssss..............................................................................................................ssssssssss................ss..................ss..................ss..................ss../Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/comet_ml/monkey_patching.py:19: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses + import imp +/Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/comet_ml/monkey_patching.py:19: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses + import imp +.................... + +=============================== warnings summary =============================== +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:19 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:19: DeprecationWarning: Call to deprecated create function FileDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + DESCRIPTOR = _descriptor.FileDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:33 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:33: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:37 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:37: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:41 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:41: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:45 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:45: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:49 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:49: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:53 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:53: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:57 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:57: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:61 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:61: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:65 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:65: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:69 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:69: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:73 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:73: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:77 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:77: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:81 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:81: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:27 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:27: DeprecationWarning: Call to deprecated create function EnumDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _ATTRTYPE = _descriptor.EnumDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:115 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:115: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:119 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:119: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:123 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:123: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:127 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:127: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:131 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:131: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:135 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:135: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:139 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:139: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:143 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:143: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:147 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:147: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:151 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:151: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:155 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:155: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:159 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:159: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:163 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:163: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:167 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:167: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:171 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:171: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:175 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:175: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:179 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:179: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:183 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:183: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:187 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:187: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:191 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:191: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:195 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:195: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:199 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:199: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:203 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:203: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:207 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:207: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:211 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:211: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:215 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:215: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:219 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:219: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:223 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:223: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:227 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:227: DeprecationWarning: Call to deprecated create function EnumValueDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.EnumValueDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:109 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:109: DeprecationWarning: Call to deprecated create function EnumDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _VARTYPE_TYPE = _descriptor.EnumDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:247 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:247: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:240 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:240: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _VERSION = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:278 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:278: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:285 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:285: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:292 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:292: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:299 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:299: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:271 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:271: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _PROCESSMESHDESC = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:330 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:330: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:337 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:337: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:344 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:344: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:351 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:351: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:358 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:358: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:365 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:365: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:372 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:372: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:379 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:379: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:386 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:386: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:393 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:393: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:400 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:400: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:407 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:407: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:414 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:414: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:421 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:421: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:428 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:428: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:323 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:323: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPDESC_ATTR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:458 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:458: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:465 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:465: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:451 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:451: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPDESC_VAR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:495 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:495: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:502 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:502: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:509 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:509: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:516 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:516: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:523 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:523: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:488 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:488: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPDESC = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:554 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:554: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:561 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:561: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:568 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:568: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:575 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:575: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:582 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:582: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:589 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:589: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:596 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:596: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:547 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:547: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPPROTO_VAR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:626 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:626: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:633 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:633: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:640 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:640: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:647 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:647: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:654 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:654: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:661 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:661: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:619 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:619: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPPROTO_ATTR = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:691 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:691: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:698 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:698: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:705 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:705: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:712 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:712: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:719 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:719: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:684 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:684: DeprecationWarning: Call to deprecated create function Descriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _OPPROTO = _descriptor.Descriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:750 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:750: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:757 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/proto/framework_pb2.py:757: DeprecationWarning: Call to deprecated create function FieldDescriptor(). Note: Create unlinked descriptors is going to go away. Please use get/find descriptors from generated code or query the descriptor_pool. + _descriptor.FieldDescriptor( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/comet_ml/monkey_patching.py:19 + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/comet_ml/monkey_patching.py:19: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses + import imp + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:276: 1 warning +test_f_beta.py: 3304 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:276: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.float32, but right dtype is paddle.int64, the right dtype will convert to paddle.float32 + warnings.warn( + +../../../../../../../.envs/oix/lib/python3.8/site-packages/paddle/tensor/creation.py:125: 1 warning +test_f_beta.py: 326 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/tensor/creation.py:125: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. + Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations + if data.dtype == np.object: + +test_f_beta.py: 2012 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/layers/tensor.py:657: UserWarning: paddle.assign doesn't support float64 input now due to current platform protobuf data limitation, we convert it to float32 + warnings.warn( + +test_f_beta.py: 13026 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/framework.py:1104: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. + Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations + elif dtype == np.bool: + +test_f_beta.py: 794 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:276: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.float32, but right dtype is paddle.int32, the right dtype will convert to paddle.float32 + warnings.warn( + +test_f_beta.py: 792 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:276: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.int64, but right dtype is paddle.int32, the right dtype will convert to paddle.int64 + warnings.warn( + +test_f_beta.py: 792 warnings + /Users/sun/Projects/.envs/oix/lib/python3.8/site-packages/paddle/fluid/dygraph/math_op_patch.py:276: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.int64, but right dtype is paddle.float32, the right dtype will convert to paddle.int64 + warnings.warn( + +-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html +========= 600 passed, 216 skipped, 21149 warnings in 85.12s (0:01:25) ========== diff --git a/EE/paddlemetric/src/tests/classification/test_accuracy.py b/EE/paddlemetric/src/tests/classification/test_accuracy.py new file mode 100644 index 000000000..26ec78765 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_accuracy.py @@ -0,0 +1,362 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import accuracy_score as sk_accuracy +from paddleext.torchapi import tensor + +from tests.classification.inputs import _input_binary, _input_binary_logits, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_logits as _input_mcls_logits +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multiclass_with_missing_class as _input_miss_class +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_logits as _input_mlb_logits +from tests.classification.inputs import _input_multilabel_multidim as _input_mlmd +from tests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_BATCHES, NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics import Accuracy +from paddlemetrics.functional import accuracy +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod, DataType + +seed_all(42) + + +def _sk_accuracy(preds, target, subset_accuracy): + sk_preds, sk_target, mode = _input_format_classification(preds, target, threshold=THRESHOLD) + sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy() + + if mode == DataType.MULTIDIM_MULTICLASS and not subset_accuracy: + sk_preds, sk_target = np.transpose(sk_preds, (0, 2, 1)), np.transpose(sk_target, (0, 2, 1)) + sk_preds, sk_target = sk_preds.reshape(-1, sk_preds.shape[2]), sk_target.reshape(-1, sk_target.shape[2]) + elif mode == DataType.MULTIDIM_MULTICLASS and subset_accuracy: + return np.all(sk_preds == sk_target, axis=(1, 2)).mean() + elif mode == DataType.MULTILABEL and not subset_accuracy: + sk_preds, sk_target = sk_preds.reshape(-1), sk_target.reshape(-1) + + return sk_accuracy(y_true=sk_target, y_pred=sk_preds) + + +@pytest.mark.parametrize( + "preds, target, subset_accuracy", + [ + (_input_binary_logits.preds, _input_binary_logits.target, False), + (_input_binary_prob.preds, _input_binary_prob.target, False), + (_input_binary.preds, _input_binary.target, False), + (_input_mlb_prob.preds, _input_mlb_prob.target, True), + (_input_mlb_logits.preds, _input_mlb_logits.target, False), + (_input_mlb_prob.preds, _input_mlb_prob.target, False), + (_input_mlb.preds, _input_mlb.target, True), + (_input_mlb.preds, _input_mlb.target, False), + (_input_mcls_prob.preds, _input_mcls_prob.target, False), + (_input_mcls_logits.preds, _input_mcls_logits.target, False), + (_input_mcls.preds, _input_mcls.target, False), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, False), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, True), + (_input_mdmc.preds, _input_mdmc.target, False), + (_input_mdmc.preds, _input_mdmc.target, True), + (_input_mlmd_prob.preds, _input_mlmd_prob.target, True), + (_input_mlmd_prob.preds, _input_mlmd_prob.target, False), + (_input_mlmd.preds, _input_mlmd.target, True), + (_input_mlmd.preds, _input_mlmd.target, False), + ], +) +class TestAccuracies(MetricTester): + @pytest.mark.parametrize("ddp", [False]) + @pytest.mark.parametrize("dist_sync_on_step", [False]) + def test_accuracy_class(self, ddp, dist_sync_on_step, preds, target, subset_accuracy): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=Accuracy, + sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy), + dist_sync_on_step=dist_sync_on_step, + metric_args={"threshold": THRESHOLD, "subset_accuracy": subset_accuracy}, + ) + + def test_accuracy_fn(self, preds, target, subset_accuracy): + self.run_functional_metric_test( + preds, + target, + metric_functional=accuracy, + sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy), + metric_args={"threshold": THRESHOLD, "subset_accuracy": subset_accuracy}, + ) + + def test_accuracy_differentiability(self, preds, target, subset_accuracy): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=Accuracy, + metric_functional=accuracy, + metric_args={"threshold": THRESHOLD, "subset_accuracy": subset_accuracy}, + ) + + +_l1to4 = [0.1, 0.2, 0.3, 0.4] +_l1to4t3 = np.array([_l1to4, _l1to4, _l1to4]) +_l1to4t3_mcls = [_l1to4t3.T, _l1to4t3.T, _l1to4t3.T] + +# The preds in these examples always put highest probability on class 3, second highest on class 2, +# third highest on class 1, and lowest on class 0 +_topk_preds_mcls = tensor([_l1to4t3, _l1to4t3]).float() +_topk_target_mcls = tensor([[1, 2, 3], [2, 1, 0]]) + +# This is like for MC case, but one sample in each batch is sabotaged with 0 class prediction :) +_topk_preds_mdmc = tensor([_l1to4t3_mcls, _l1to4t3_mcls]).float() +_topk_target_mdmc = tensor([[[1, 1, 0], [2, 2, 2], [3, 3, 3]], [[2, 2, 0], [1, 1, 1], [0, 0, 0]]]) + +# Multilabel +_ml_t1 = [0.8, 0.2, 0.8, 0.2] +_ml_t2 = [_ml_t1, _ml_t1] +_ml_ta2 = [[1, 0, 1, 1], [0, 1, 1, 0]] +_av_preds_ml = tensor([_ml_t2, _ml_t2]).float() +_av_target_ml = tensor([_ml_ta2, _ml_ta2]) + + +# Replace with a proper sk_metric test once sklearn 0.24 hits :) +@pytest.mark.parametrize( + "preds, target, exp_result, k, subset_accuracy", + [ + (_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, False), + (_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, False), + (_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, False), + (_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, True), + (_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, True), + (_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, True), + (_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, False), + (_topk_preds_mdmc, _topk_target_mdmc, 8 / 18, 2, False), + (_topk_preds_mdmc, _topk_target_mdmc, 13 / 18, 3, False), + (_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, True), + (_topk_preds_mdmc, _topk_target_mdmc, 2 / 6, 2, True), + (_topk_preds_mdmc, _topk_target_mdmc, 3 / 6, 3, True), + (_av_preds_ml, _av_target_ml, 5 / 8, None, False), + (_av_preds_ml, _av_target_ml, 0, None, True), + ], +) +def test_topk_accuracy(preds, target, exp_result, k, subset_accuracy): + topk = Accuracy(top_k=k, subset_accuracy=subset_accuracy) + + for batch in range(preds.shape[0]): + topk(preds[batch], target[batch]) + + assert topk.compute() == exp_result + + # Test functional + total_samples = target.shape[0] * target.shape[1] + + preds = preds.view(total_samples, 4, -1) + target = target.view(total_samples, -1) + + assert accuracy(preds, target, top_k=k, subset_accuracy=subset_accuracy) == exp_result + + +# Only MC and MDMC with probs input type should be accepted for top_k +@pytest.mark.parametrize( + "preds, target", + [ + (_input_binary_prob.preds, _input_binary_prob.target), + (_input_binary.preds, _input_binary.target), + (_input_mlb_prob.preds, _input_mlb_prob.target), + (_input_mlb.preds, _input_mlb.target), + (_input_mcls.preds, _input_mcls.target), + (_input_mdmc.preds, _input_mdmc.target), + (_input_mlmd_prob.preds, _input_mlmd_prob.target), + (_input_mlmd.preds, _input_mlmd.target), + ], +) +def test_topk_accuracy_wrong_input_types(preds, target): + topk = Accuracy(top_k=1) + + with pytest.raises(ValueError): + topk(preds[0], target[0]) + + with pytest.raises(ValueError): + accuracy(preds[0], target[0], top_k=1) + + +@pytest.mark.parametrize( + "average, mdmc_average, num_classes, inputs, ignore_index, top_k, threshold", + [ + ("unknown", None, None, _input_binary, None, None, 0.5), + ("micro", "unknown", None, _input_binary, None, None, 0.5), + ("macro", None, None, _input_binary, None, None, 0.5), + ("micro", None, None, _input_mdmc_prob, None, None, 0.5), + ("micro", None, None, _input_binary_prob, 0, None, 0.5), + ("micro", None, None, _input_mcls_prob, NUM_CLASSES, None, 0.5), + ("micro", None, NUM_CLASSES, _input_mcls_prob, NUM_CLASSES, None, 0.5), + (None, None, None, _input_mcls_prob, None, 0, 0.5), + (None, None, None, _input_mcls_prob, None, None, 1.5), + ], +) +def test_wrong_params(average, mdmc_average, num_classes, inputs, ignore_index, top_k, threshold): + preds, target = inputs.preds, inputs.target + + with pytest.raises(ValueError): + acc = Accuracy( + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + threshold=threshold, + top_k=top_k, + ) + acc(preds[0], target[0]) + acc.compute() + + with pytest.raises(ValueError): + accuracy( + preds[0], + target[0], + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + threshold=threshold, + top_k=top_k, + ) + + +@pytest.mark.parametrize( + "preds_mc, target_mc, preds_ml, target_ml", + [ + ( + tensor([0, 1, 1, 1]), + tensor([2, 2, 1, 1]), + tensor([[0.8, 0.2, 0.8, 0.7], [0.6, 0.4, 0.6, 0.5]]), + tensor([[1, 0, 1, 1], [0, 0, 1, 0]]), + ) + ], +) +def test_different_modes(preds_mc, target_mc, preds_ml, target_ml): + acc = Accuracy() + acc(preds_mc, target_mc) + with pytest.raises(ValueError, match="^[You cannot use]"): + acc(preds_ml, target_ml) + + +_bin_t1 = [0.7, 0.6, 0.2, 0.1] +_av_preds_bin = tensor([_bin_t1, _bin_t1]).float() +_av_target_bin = tensor([[1, 0, 0, 0], [0, 1, 1, 0]]) + + +@pytest.mark.parametrize( + "preds, target, num_classes, exp_result, average, mdmc_average", + [ + (_topk_preds_mcls, _topk_target_mcls, 4, 1 / 4, "macro", None), + (_topk_preds_mcls, _topk_target_mcls, 4, 1 / 6, "weighted", None), + (_topk_preds_mcls, _topk_target_mcls, 4, [0.0, 0.0, 0.0, 1.0], "none", None), + (_topk_preds_mcls, _topk_target_mcls, 4, 1 / 6, "samples", None), + (_topk_preds_mdmc, _topk_target_mdmc, 4, 1 / 24, "macro", "samplewise"), + (_topk_preds_mdmc, _topk_target_mdmc, 4, 1 / 6, "weighted", "samplewise"), + (_topk_preds_mdmc, _topk_target_mdmc, 4, [0.0, 0.0, 0.0, 1 / 6], "none", "samplewise"), + (_topk_preds_mdmc, _topk_target_mdmc, 4, 1 / 6, "samples", "samplewise"), + (_topk_preds_mdmc, _topk_target_mdmc, 4, 1 / 6, "samples", "global"), + (_av_preds_ml, _av_target_ml, 4, 5 / 8, "macro", None), + (_av_preds_ml, _av_target_ml, 4, 0.70000005, "weighted", None), + (_av_preds_ml, _av_target_ml, 4, [1 / 2, 1 / 2, 1.0, 1 / 2], "none", None), + (_av_preds_ml, _av_target_ml, 4, 5 / 8, "samples", None), + ], +) +def test_average_accuracy(preds, target, num_classes, exp_result, average, mdmc_average): + acc = Accuracy(num_classes=num_classes, average=average, mdmc_average=mdmc_average) + + for batch in range(preds.shape[0]): + acc(preds[batch], target[batch]) + + assert B.allclose(acc.compute(), tensor(exp_result)) + + # Test functional + total_samples = target.shape[0] * target.shape[1] + + preds = preds.view(total_samples, num_classes, -1) + target = target.view(total_samples, -1) + + acc_score = accuracy(preds, target, num_classes=num_classes, average=average, mdmc_average=mdmc_average) + assert B.allclose(acc_score, tensor(exp_result)) + + +@pytest.mark.parametrize( + "preds, target, num_classes, exp_result, average, multiclass", + [ + (_av_preds_bin, _av_target_bin, 2, 19 / 30, "macro", True), + (_av_preds_bin, _av_target_bin, 2, 5 / 8, "weighted", True), + (_av_preds_bin, _av_target_bin, 2, [3 / 5, 2 / 3], "none", True), + (_av_preds_bin, _av_target_bin, 2, 5 / 8, "samples", True), + ], +) +def test_average_accuracy_bin(preds, target, num_classes, exp_result, average, multiclass): + acc = Accuracy(num_classes=num_classes, average=average, multiclass=multiclass) + + for batch in range(preds.shape[0]): + acc(preds[batch], target[batch]) + + assert (acc.compute() == tensor(exp_result)).all() + + # Test functional + total_samples = target.shape[0] * target.shape[1] + + preds = preds.view(total_samples, -1) + target = target.view(total_samples, -1) + acc_score = accuracy(preds, target, num_classes=num_classes, average=average, multiclass=multiclass) + assert (acc_score == tensor(exp_result)).all() + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Accuracy, accuracy)]) +@pytest.mark.parametrize( + "ignore_index, expected", [(None, B.tensor([1.0, np.nan])), (0, B.tensor([np.nan, np.nan]))] +) +def test_class_not_present(metric_class, metric_fn, ignore_index, expected): + """This tests that when metric is computed per class and a given class is not present in both the `preds` and + `target`, the resulting score is `nan`.""" + preds = B.tensor([0, 0, 0]) + target = B.tensor([0, 0, 0]) + num_classes = 2 + + # test functional + result_fn = metric_fn(preds, target, average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + assert B.allclose(expected, result_fn, equal_nan=True) + + # test class + cl_metric = metric_class(average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + cl_metric(preds, target) + result_cl = cl_metric.compute() + assert B.allclose(expected, result_cl, equal_nan=True) + + +@pytest.mark.parametrize("average", ["micro", "macro", "weighted"]) +def test_same_input(average): + preds = _input_miss_class.preds + target = _input_miss_class.target + preds_flat = B.cat(list(preds), dim=0) + target_flat = B.cat(list(target), dim=0) + + mc = Accuracy(num_classes=NUM_CLASSES, average=average) + for i in range(NUM_BATCHES): + mc.update(preds[i], target[i]) + class_res = mc.compute() + func_res = accuracy(preds_flat, target_flat, num_classes=NUM_CLASSES, average=average) + sk_res = sk_accuracy(target_flat, preds_flat) + + assert B.allclose(class_res, B.tensor(sk_res).float()) + assert B.allclose(func_res, B.tensor(sk_res).float()) diff --git a/EE/paddlemetric/src/tests/classification/test_auc.py b/EE/paddlemetric/src/tests/classification/test_auc.py new file mode 100644 index 000000000..df6e3ff76 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_auc.py @@ -0,0 +1,106 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import numpy as np +import pytest +from sklearn.metrics import auc as _sk_auc +from paddleext.torchapi import tensor + +from tests.helpers import seed_all +from tests.helpers.testers import NUM_BATCHES, MetricTester +from paddlemetrics.classification.auc import AUC +from paddlemetrics.functional import auc + +seed_all(42) + + +def sk_auc(x, y, reorder=False): + x = x.flatten() + y = y.flatten() + if reorder: + idx = np.argsort(x, kind="stable") + x = x[idx] + y = y[idx] + return _sk_auc(x, y) + + +Input = namedtuple("Input", ["x", "y"]) + +_examples = [] +# generate already ordered samples, sorted in both directions +for batch_size in (8, 4049): + for i in range(4): + x = np.random.rand(NUM_BATCHES * batch_size) + y = np.random.rand(NUM_BATCHES * batch_size) + idx = np.argsort(x, kind="stable") + x = x[idx] if i % 2 == 0 else x[idx[::-1]] + y = y[idx] if i % 2 == 0 else x[idx[::-1]] + x = x.reshape(NUM_BATCHES, batch_size) + y = y.reshape(NUM_BATCHES, batch_size) + _examples.append(Input(x=tensor(x), y=tensor(y))) + + +@pytest.mark.parametrize("x, y", _examples) +class TestAUC(MetricTester): + @pytest.mark.parametrize("ddp", [False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_auc(self, x, y, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp=ddp, + preds=x, + target=y, + metric_class=AUC, + sk_metric=sk_auc, + dist_sync_on_step=dist_sync_on_step, + ) + + @pytest.mark.parametrize("reorder", [True, False]) + def test_auc_functional(self, x, y, reorder): + self.run_functional_metric_test( + x, y, metric_functional=auc, sk_metric=partial(sk_auc, reorder=reorder), metric_args={"reorder": reorder} + ) + + @pytest.mark.parametrize("reorder", [True, False]) + def test_auc_differentiability(self, x, y, reorder): + self.run_differentiability_test( + preds=x, target=y, metric_module=AUC, metric_functional=auc, metric_args={"reorder": reorder} + ) + + +@pytest.mark.parametrize("unsqueeze_x", (True, False)) +@pytest.mark.parametrize("unsqueeze_y", (True, False)) +@pytest.mark.parametrize( + ["x", "y", "expected"], + [ + pytest.param([0, 1], [0, 1], 0.5), + pytest.param([1, 0], [0, 1], 0.5), + pytest.param([1, 0, 0], [0, 1, 1], 0.5), + pytest.param([0, 1], [1, 1], 1), + pytest.param([0, 0.5, 1], [0, 0.5, 1], 0.5), + ], +) +def test_auc(x, y, expected, unsqueeze_x, unsqueeze_y): + x = tensor(x) + y = tensor(y) + + if unsqueeze_x: + x = x.unsqueeze(-1) + + if unsqueeze_y: + y = y.unsqueeze(-1) + + # Test Area Under Curve (AUC) computation + assert auc(x, y, reorder=True) == expected diff --git a/EE/paddlemetric/src/tests/classification/test_auroc.py b/EE/paddlemetric/src/tests/classification/test_auroc.py new file mode 100644 index 000000000..36b436114 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_auroc.py @@ -0,0 +1,218 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import pytest +import paddleext.torchapi as B +from sklearn.metrics import roc_auc_score as sk_roc_auc_score + +from tests.classification.inputs import _input_binary_prob +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, MetricTester +from paddlemetrics.classification.auroc import AUROC +from paddlemetrics.functional import auroc +from paddlemetrics.utilities.imports import _TORCH_LOWER_1_6 + +seed_all(42) + + +def _sk_auroc_binary_prob(preds, target, num_classes, average="macro", max_fpr=None, multi_class="ovr"): + # todo: `multi_class` is unused + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + return sk_roc_auc_score(y_true=sk_target, y_score=sk_preds, average=average, max_fpr=max_fpr) + + +def _sk_auroc_multiclass_prob(preds, target, num_classes, average="macro", max_fpr=None, multi_class="ovr"): + sk_preds = preds.reshape(-1, num_classes).numpy() + sk_target = target.view(-1).numpy() + return sk_roc_auc_score( + y_true=sk_target, + y_score=sk_preds, + average=average, + max_fpr=max_fpr, + multi_class=multi_class, + ) + + +def _sk_auroc_multidim_multiclass_prob(preds, target, num_classes, average="macro", max_fpr=None, multi_class="ovr"): + sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + sk_target = target.view(-1).numpy() + return sk_roc_auc_score( + y_true=sk_target, + y_score=sk_preds, + average=average, + max_fpr=max_fpr, + multi_class=multi_class, + ) + + +def _sk_auroc_multilabel_prob(preds, target, num_classes, average="macro", max_fpr=None, multi_class="ovr"): + sk_preds = preds.reshape(-1, num_classes).numpy() + sk_target = target.reshape(-1, num_classes).numpy() + return sk_roc_auc_score( + y_true=sk_target, + y_score=sk_preds, + average=average, + max_fpr=max_fpr, + multi_class=multi_class, + ) + + +def _sk_auroc_multilabel_multidim_prob(preds, target, num_classes, average="macro", max_fpr=None, multi_class="ovr"): + sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + sk_target = target.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + return sk_roc_auc_score( + y_true=sk_target, + y_score=sk_preds, + average=average, + max_fpr=max_fpr, + multi_class=multi_class, + ) + + +@pytest.mark.parametrize("average", ["macro", "weighted", "micro"]) +@pytest.mark.parametrize("max_fpr", [None, 0.8, 0.5]) +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_auroc_binary_prob, 1), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_auroc_multiclass_prob, NUM_CLASSES), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_auroc_multidim_multiclass_prob, NUM_CLASSES), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_auroc_multilabel_prob, NUM_CLASSES), + (_input_mlmd_prob.preds, _input_mlmd_prob.target, _sk_auroc_multilabel_multidim_prob, NUM_CLASSES), + ], +) +class TestAUROC(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_auroc(self, preds, target, sk_metric, num_classes, average, max_fpr, ddp, dist_sync_on_step): + # max_fpr different from None is not support in multi class + if max_fpr is not None and num_classes != 1: + pytest.skip("max_fpr parameter not support for multi class or multi label") + + # max_fpr only supported for torch v1.6 or higher + if max_fpr is not None and _TORCH_LOWER_1_6: + pytest.skip("requires torch v1.6 or higher to test max_fpr argument") + + # average='micro' only supported for multilabel + if average == "micro" and preds.ndim > 2 and preds.ndim == target.ndim + 1: + pytest.skip("micro argument only support for multilabel input") + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=AUROC, + sk_metric=partial(sk_metric, num_classes=num_classes, average=average, max_fpr=max_fpr), + dist_sync_on_step=dist_sync_on_step, + metric_args={"num_classes": num_classes, "average": average, "max_fpr": max_fpr}, + ) + + def test_auroc_functional(self, preds, target, sk_metric, num_classes, average, max_fpr): + # max_fpr different from None is not support in multi class + if max_fpr is not None and num_classes != 1: + pytest.skip("max_fpr parameter not support for multi class or multi label") + + # max_fpr only supported for torch v1.6 or higher + if max_fpr is not None and _TORCH_LOWER_1_6: + pytest.skip("requires torch v1.6 or higher to test max_fpr argument") + + # average='micro' only supported for multilabel + if average == "micro" and preds.ndim > 2 and preds.ndim == target.ndim + 1: + pytest.skip("micro argument only support for multilabel input") + + self.run_functional_metric_test( + preds, + target, + metric_functional=auroc, + sk_metric=partial(sk_metric, num_classes=num_classes, average=average, max_fpr=max_fpr), + metric_args={"num_classes": num_classes, "average": average, "max_fpr": max_fpr}, + ) + + def test_auroc_differentiability(self, preds, target, sk_metric, num_classes, average, max_fpr): + # max_fpr different from None is not support in multi class + if max_fpr is not None and num_classes != 1: + pytest.skip("max_fpr parameter not support for multi class or multi label") + + # max_fpr only supported for torch v1.6 or higher + if max_fpr is not None and _TORCH_LOWER_1_6: + pytest.skip("requires torch v1.6 or higher to test max_fpr argument") + + # average='micro' only supported for multilabel + if average == "micro" and preds.ndim > 2 and preds.ndim == target.ndim + 1: + pytest.skip("micro argument only support for multilabel input") + + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=AUROC, + metric_functional=auroc, + metric_args={"num_classes": num_classes, "average": average, "max_fpr": max_fpr}, + ) + + +def test_error_on_different_mode(): + """test that an error is raised if the user pass in data of different modes (binary, multi-label, multi- + class)""" + metric = AUROC() + # pass in multi-class data + metric.update(B.randn(10, 5).softmax(dim=-1), B.randint(0, 5, (10,))) + with pytest.raises(ValueError, match=r"The mode of data.* should be constant.*"): + # pass in multi-label data + metric.update(B.rand(10, 5), B.randint(0, 2, (10, 5))) + + +def test_error_multiclass_no_num_classes(): + with pytest.raises( + ValueError, match="Detected input to `multiclass` but you did not provide `num_classes` argument" + ): + _ = auroc(B.randn(20, 3).softmax(dim=-1), B.randint(3, (20,))) + + +def test_weighted_with_empty_classes(): + """Tests that weighted multiclass AUROC calculation yields the same results if a new but empty class exists. + + Tests that the proper warnings and errors are raised + """ + preds = B.tensor( + [ + [0.90, 0.05, 0.05], + [0.05, 0.90, 0.05], + [0.05, 0.05, 0.90], + [0.85, 0.05, 0.10], + [0.10, 0.10, 0.80], + ] + ) + target = B.tensor([0, 1, 1, 2, 2]) + num_classes = 3 + _auroc = auroc(preds, target, average="weighted", num_classes=num_classes) + + # Add in a class with zero observations at second to last index + preds = B.cat( + (preds[:, : num_classes - 1], B.rand_like(preds[:, 0:1]), preds[:, num_classes - 1 :]), axis=1 + ) + # Last class (2) gets moved to 3 + target[target == num_classes - 1] = num_classes + with pytest.warns(UserWarning, match="Class 2 had 0 observations, omitted from AUROC calculation"): + _auroc_empty_class = auroc(preds, target, average="weighted", num_classes=num_classes + 1) + assert _auroc == _auroc_empty_class + + target = B.zeros_like(target) + with pytest.raises(ValueError, match="Found 1 non-empty class in `multiclass` AUROC calculation"): + _ = auroc(preds, target, average="weighted", num_classes=num_classes + 1) diff --git a/EE/paddlemetric/src/tests/classification/test_average_precision.py b/EE/paddlemetric/src/tests/classification/test_average_precision.py new file mode 100644 index 000000000..aea088cca --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_average_precision.py @@ -0,0 +1,170 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import numpy as np +import pytest +from sklearn.metrics import average_precision_score as sk_average_precision_score +from paddleext.torchapi import tensor + +from tests.classification.inputs import _input_binary_prob +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, MetricTester +from paddlemetrics.classification.average_precision import AveragePrecision +from paddlemetrics.functional import average_precision + +seed_all(42) + + +def _sk_average_precision_score(y_true, probas_pred, num_classes=1, average=None): + if num_classes == 1: + return sk_average_precision_score(y_true, probas_pred) + + res = [] + for i in range(num_classes): + y_true_temp = np.zeros_like(y_true) + y_true_temp[y_true == i] = 1 + res.append(sk_average_precision_score(y_true_temp, probas_pred[:, i])) + + if average == "macro": + return np.array(res).mean() + if average == "weighted": + weights = np.bincount(y_true) if y_true.max() > 1 else y_true.sum(axis=0) + weights = weights / sum(weights) + return (np.array(res) * weights).sum() + + return res + + +def _sk_avg_prec_binary_prob(preds, target, num_classes=1, average=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes, average=average) + + +def _sk_avg_prec_multiclass_prob(preds, target, num_classes=1, average=None): + sk_preds = preds.reshape(-1, num_classes).numpy() + sk_target = target.view(-1).numpy() + + return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes, average=average) + + +def _sk_avg_prec_multilabel_prob(preds, target, num_classes=1, average=None): + sk_preds = preds.reshape(-1, num_classes).numpy() + sk_target = target.view(-1, num_classes).numpy() + return sk_average_precision_score(sk_target, sk_preds, average=average) + + +def _sk_avg_prec_multidim_multiclass_prob(preds, target, num_classes=1, average=None): + sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + sk_target = target.view(-1).numpy() + return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes, average=average) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_avg_prec_binary_prob, 1), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_avg_prec_multiclass_prob, NUM_CLASSES), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_avg_prec_multidim_multiclass_prob, NUM_CLASSES), + (_input_multilabel.preds, _input_multilabel.target, _sk_avg_prec_multilabel_prob, NUM_CLASSES), + ], +) +@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None]) +class TestAveragePrecision(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_average_precision(self, preds, target, sk_metric, num_classes, average, ddp, dist_sync_on_step): + if target.max() > 1 and average == "micro": + pytest.skip("average=micro and multiclass input cannot be used together") + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=AveragePrecision, + sk_metric=partial(sk_metric, num_classes=num_classes, average=average), + dist_sync_on_step=dist_sync_on_step, + metric_args={"num_classes": num_classes, "average": average}, + ) + + def test_average_precision_functional(self, preds, target, sk_metric, num_classes, average): + if target.max() > 1 and average == "micro": + pytest.skip("average=micro and multiclass input cannot be used together") + + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=average_precision, + sk_metric=partial(sk_metric, num_classes=num_classes, average=average), + metric_args={"num_classes": num_classes, "average": average}, + ) + + def test_average_precision_differentiability(self, preds, sk_metric, target, num_classes, average): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=AveragePrecision, + metric_functional=average_precision, + metric_args={"num_classes": num_classes}, + ) + + +@pytest.mark.parametrize( + ["scores", "target", "expected_score"], + [ + # Check the average_precision_score of a constant predictor is + # the TPR + # Generate a dataset with 25% of positives + # And a constant score + # The precision is then the fraction of positive whatever the recall + # is, as there is only one threshold: + pytest.param(tensor([1, 1, 1, 1]), tensor([0, 0, 0, 1]), 0.25), + # With threshold 0.8 : 1 TP and 2 TN and one FN + pytest.param(tensor([0.6, 0.7, 0.8, 9]), tensor([1, 0, 0, 1]), 0.75), + ], +) +def test_average_precision(scores, target, expected_score): + assert average_precision(scores, target) == expected_score + + +def test_average_precision_warnings_and_errors(): + """Test that the correct errors and warnings gets raised.""" + + # check average argument + with pytest.raises(ValueError, match="Expected argument `average` to be one .*"): + AveragePrecision(num_classes=5, average="samples") + + # check that micro average cannot be used with multilabel input + pred = tensor( + [ + [0.75, 0.05, 0.05, 0.05, 0.05], + [0.05, 0.75, 0.05, 0.05, 0.05], + [0.05, 0.05, 0.75, 0.05, 0.05], + [0.05, 0.05, 0.05, 0.75, 0.05], + ] + ) + target = tensor([0, 1, 3, 2]) + average_precision = AveragePrecision(num_classes=5, average="micro") + with pytest.raises(ValueError, match="Cannot use `micro` average with multi-class input"): + average_precision(pred, target) + + # check that warning is thrown when average=macro and nan is encoutered in individual scores + average_precision = AveragePrecision(num_classes=5, average="macro") + with pytest.warns(UserWarning, match="Average precision score for one or more classes was `nan`.*"): + average_precision(pred, target) diff --git a/EE/paddlemetric/src/tests/classification/test_binned_precision_recall.py b/EE/paddlemetric/src/tests/classification/test_binned_precision_recall.py new file mode 100644 index 000000000..a1ea33765 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_binned_precision_recall.py @@ -0,0 +1,129 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Tuple + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import average_precision_score as _sk_average_precision_score +from sklearn.metrics import precision_recall_curve as _sk_precision_recall_curve +from paddleext.torchapi import Tensor + +from tests.classification.inputs import _input_binary_prob +from tests.classification.inputs import _input_binary_prob_plausible as _input_binary_prob_ok +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.classification.inputs import _input_multilabel_prob_plausible as _input_mlb_prob_ok +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, MetricTester +from paddlemetrics.classification.binned_precision_recall import BinnedAveragePrecision, BinnedRecallAtFixedPrecision + +seed_all(42) + + +def recall_at_precision_x_multilabel(predictions: Tensor, targets: Tensor, min_precision: float) -> Tuple[float, float]: + precision, recall, thresholds = _sk_precision_recall_curve(targets, predictions) + + try: + tuple_all = [(r, p, t) for p, r, t in zip(precision, recall, thresholds) if p >= min_precision] + max_recall, _, best_threshold = max(tuple_all) + except ValueError: + max_recall, best_threshold = 0, 1e6 + + return float(max_recall), float(best_threshold) + + +def _sk_prec_recall_mclass_prob(predictions, targets, num_classes, min_precision): + max_recalls = B.zeros(num_classes) + best_thresholds = B.zeros(num_classes) + + for i in range(num_classes): + max_recalls[i], best_thresholds[i] = recall_at_precision_x_multilabel( + predictions[:, i], targets[:, i], min_precision + ) + return max_recalls, best_thresholds + + +def _sk_prec_recall_binary_prob(predictions, targets, num_classes, min_precision): + return recall_at_precision_x_multilabel(predictions, targets, min_precision) + + +def _sk_avg_prec_multiclass(predictions, targets, num_classes): + # replace nan with 0 + return np.nan_to_num(_sk_average_precision_score(targets, predictions, average=None)) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_prec_recall_binary_prob, 1), + (_input_binary_prob_ok.preds, _input_binary_prob_ok.target, _sk_prec_recall_binary_prob, 1), + (_input_mlb_prob_ok.preds, _input_mlb_prob_ok.target, _sk_prec_recall_mclass_prob, NUM_CLASSES), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_prec_recall_mclass_prob, NUM_CLASSES), + ], +) +class TestBinnedRecallAtPrecision(MetricTester): + atol = 0.02 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("min_precision", [0.05, 0.1, 0.3, 0.5, 0.8, 0.95]) + def test_binned_recall_at_precision( + self, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step, min_precision + ): + # rounding will simulate binning for both implementations + preds = Tensor(np.round(preds.numpy(), 2)) + 1e-6 + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=BinnedRecallAtFixedPrecision, + sk_metric=partial(sk_metric, num_classes=num_classes, min_precision=min_precision), + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "num_classes": num_classes, + "min_precision": min_precision, + "thresholds": 101, + }, + ) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_avg_prec_multiclass, 1), + (_input_binary_prob_ok.preds, _input_binary_prob_ok.target, _sk_avg_prec_multiclass, 1), + (_input_mlb_prob_ok.preds, _input_mlb_prob_ok.target, _sk_avg_prec_multiclass, NUM_CLASSES), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_avg_prec_multiclass, NUM_CLASSES), + ], +) +class TestBinnedAveragePrecision(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("thresholds", (301, B.linspace(0.0, 1.0, 101))) + def test_binned_average_precision(self, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step, thresholds): + # rounding will simulate binning for both implementations + preds = Tensor(np.round(preds.numpy(), 2)) + 1e-6 + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=BinnedAveragePrecision, + sk_metric=partial(sk_metric, num_classes=num_classes), + dist_sync_on_step=dist_sync_on_step, + metric_args={"num_classes": num_classes, "thresholds": thresholds}, + ) diff --git a/EE/paddlemetric/src/tests/classification/test_calibration_error.py b/EE/paddlemetric/src/tests/classification/test_calibration_error.py new file mode 100644 index 000000000..f0a470fc7 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_calibration_error.py @@ -0,0 +1,114 @@ +import functools +import re + +import numpy as np +import pytest + +from tests.classification.inputs import _input_binary_prob +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all + +# TODO: replace this with official sklearn implementation after next sklearn release +from tests.helpers.non_sklearn_metrics import calibration_error as sk_calib +from tests.helpers.testers import THRESHOLD, MetricTester +from paddlemetrics import CalibrationError +from paddlemetrics.functional import calibration_error +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import DataType + +seed_all(42) + + +def _sk_calibration(preds, target, n_bins, norm, debias=False): + _, _, mode = _input_format_classification(preds, target, threshold=THRESHOLD) + sk_preds, sk_target = preds.numpy(), target.numpy() + + if mode == DataType.MULTICLASS: + # binary label is whether or not the predicted class is correct + sk_target = np.equal(np.argmax(sk_preds, axis=1), sk_target) + sk_preds = np.max(sk_preds, axis=1) + elif mode == DataType.MULTIDIM_MULTICLASS: + # reshape from shape (N, C, ...) to (N*EXTRA_DIMS, C) + sk_preds = np.transpose(sk_preds, axes=(0, 2, 1)) + sk_preds = sk_preds.reshape(np.prod(sk_preds.shape[:-1]), sk_preds.shape[-1]) + # reshape from shape (N, ...) to (N*EXTRA_DIMS,) + # binary label is whether or not the predicted class is correct + sk_target = np.equal(np.argmax(sk_preds, axis=1), sk_target.flatten()) + sk_preds = np.max(sk_preds, axis=1) + return sk_calib(y_true=sk_target, y_prob=sk_preds, norm=norm, n_bins=n_bins, reduce_bias=debias) + + +@pytest.mark.parametrize("n_bins", [10, 15, 20]) +@pytest.mark.parametrize("norm", ["l1", "l2", "max"]) +@pytest.mark.parametrize( + "preds, target", + [ + (_input_binary_prob.preds, _input_binary_prob.target), + (_input_mcls_prob.preds, _input_mcls_prob.target), + (_input_mdmc_prob.preds, _input_mdmc_prob.target), + ], +) +class TestCE(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_ce(self, preds, target, n_bins, ddp, dist_sync_on_step, norm): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=CalibrationError, + sk_metric=functools.partial(_sk_calibration, n_bins=n_bins, norm=norm), + dist_sync_on_step=dist_sync_on_step, + metric_args={"n_bins": n_bins, "norm": norm}, + ) + + def test_ce_functional(self, preds, target, n_bins, norm): + self.run_functional_metric_test( + preds, + target, + metric_functional=calibration_error, + sk_metric=functools.partial(_sk_calibration, n_bins=n_bins, norm=norm), + metric_args={"n_bins": n_bins, "norm": norm}, + ) + + +@pytest.mark.parametrize("preds, targets", [(_input_mlb_prob.preds, _input_mlb_prob.target)]) +def test_invalid_input(preds, targets): + for p, t in zip(preds, targets): + with pytest.raises( + ValueError, + match=re.escape( + f"Calibration error is not well-defined for data with size {p.size()} and targets {t.size()}." + ), + ): + calibration_error(p, t) + + +@pytest.mark.parametrize( + "preds, target", + [ + (_input_binary_prob.preds, _input_binary_prob.target), + (_input_mcls_prob.preds, _input_mcls_prob.target), + (_input_mdmc_prob.preds, _input_mdmc_prob.target), + ], +) +def test_invalid_norm(preds, target): + with pytest.raises(ValueError, match="Norm l3 is not supported. Please select from l1, l2, or max. "): + calibration_error(preds, target, norm="l3") + + +@pytest.mark.parametrize("n_bins", [-10, -1, "fsd"]) +@pytest.mark.parametrize( + "preds, targets", + [ + (_input_binary_prob.preds, _input_binary_prob.target), + (_input_mcls_prob.preds, _input_mcls_prob.target), + (_input_mdmc_prob.preds, _input_mdmc_prob.target), + ], +) +def test_invalid_bins(preds, targets, n_bins): + for p, t in zip(preds, targets): + with pytest.raises(ValueError, match=f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}"): + calibration_error(p, t, n_bins=n_bins) diff --git a/EE/paddlemetric/src/tests/classification/test_cohen_kappa.py b/EE/paddlemetric/src/tests/classification/test_cohen_kappa.py new file mode 100644 index 000000000..d79cc8d8e --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_cohen_kappa.py @@ -0,0 +1,133 @@ +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import cohen_kappa_score as sk_cohen_kappa + +from tests.classification.inputs import _input_binary, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics.classification.cohen_kappa import CohenKappa +from paddlemetrics.functional.classification.cohen_kappa import cohen_kappa + +seed_all(42) + + +def _sk_cohen_kappa_binary_prob(preds, target, weights=None): + sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +def _sk_cohen_kappa_binary(preds, target, weights=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +def _sk_cohen_kappa_multilabel_prob(preds, target, weights=None): + sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +def _sk_cohen_kappa_multilabel(preds, target, weights=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +def _sk_cohen_kappa_multiclass_prob(preds, target, weights=None): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 1).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +def _sk_cohen_kappa_multiclass(preds, target, weights=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +def _sk_cohen_kappa_multidim_multiclass_prob(preds, target, weights=None): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 2).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +def _sk_cohen_kappa_multidim_multiclass(preds, target, weights=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_cohen_kappa(y1=sk_target, y2=sk_preds, weights=weights) + + +@pytest.mark.parametrize("weights", ["linear", "quadratic", None]) +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_cohen_kappa_binary_prob, 2), + (_input_binary.preds, _input_binary.target, _sk_cohen_kappa_binary, 2), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_cohen_kappa_multilabel_prob, 2), + (_input_mlb.preds, _input_mlb.target, _sk_cohen_kappa_multilabel, 2), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_cohen_kappa_multiclass_prob, NUM_CLASSES), + (_input_mcls.preds, _input_mcls.target, _sk_cohen_kappa_multiclass, NUM_CLASSES), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_cohen_kappa_multidim_multiclass_prob, NUM_CLASSES), + (_input_mdmc.preds, _input_mdmc.target, _sk_cohen_kappa_multidim_multiclass, NUM_CLASSES), + ], +) +class TestCohenKappa(MetricTester): + atol = 1e-5 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_cohen_kappa(self, weights, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=CohenKappa, + sk_metric=partial(sk_metric, weights=weights), + dist_sync_on_step=dist_sync_on_step, + metric_args={"num_classes": num_classes, "threshold": THRESHOLD, "weights": weights}, + ) + + def test_cohen_kappa_functional(self, weights, preds, target, sk_metric, num_classes): + self.run_functional_metric_test( + preds, + target, + metric_functional=cohen_kappa, + sk_metric=partial(sk_metric, weights=weights), + metric_args={"num_classes": num_classes, "threshold": THRESHOLD, "weights": weights}, + ) + + def test_cohen_kappa_differentiability(self, preds, target, sk_metric, weights, num_classes): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=CohenKappa, + metric_functional=cohen_kappa, + metric_args={"num_classes": num_classes, "threshold": THRESHOLD, "weights": weights}, + ) + + +def test_warning_on_wrong_weights(tmpdir): + preds = B.randint(3, size=(20,)) + target = B.randint(3, size=(20,)) + + with pytest.raises(ValueError, match=".* ``weights`` but should be either None, 'linear' or 'quadratic'"): + cohen_kappa(preds, target, num_classes=3, weights="unknown_arg") diff --git a/EE/paddlemetric/src/tests/classification/test_confusion_matrix.py b/EE/paddlemetric/src/tests/classification/test_confusion_matrix.py new file mode 100644 index 000000000..9ae6fa81a --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_confusion_matrix.py @@ -0,0 +1,188 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import confusion_matrix as sk_confusion_matrix +from sklearn.metrics import multilabel_confusion_matrix as sk_multilabel_confusion_matrix + +from tests.classification.inputs import _input_binary, _input_binary_logits, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_logits as _input_mcls_logits +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_logits as _input_mlb_logits +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics.classification.confusion_matrix import ConfusionMatrix +from paddlemetrics.functional import confusion_matrix + +seed_all(42) + + +def _sk_cm_binary_prob(preds, target, normalize=None): + sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.view(-1).numpy() + + return sk_confusion_matrix(y_true=sk_target, y_pred=sk_preds, normalize=normalize) + + +def _sk_cm_binary(preds, target, normalize=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_confusion_matrix(y_true=sk_target, y_pred=sk_preds, normalize=normalize) + + +def _sk_cm_multilabel_prob(preds, target, normalize=None): + sk_preds = (preds.numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.numpy() + + cm = sk_multilabel_confusion_matrix(y_true=sk_target, y_pred=sk_preds) + if normalize is not None: + if normalize == "true": + cm = cm / cm.sum(axis=1, keepdims=True) + elif normalize == "pred": + cm = cm / cm.sum(axis=0, keepdims=True) + elif normalize == "all": + cm = cm / cm.sum() + cm[np.isnan(cm)] = 0 + return cm + + +def _sk_cm_multilabel(preds, target, normalize=None): + sk_preds = preds.numpy() + sk_target = target.numpy() + + cm = sk_multilabel_confusion_matrix(y_true=sk_target, y_pred=sk_preds) + if normalize is not None: + if normalize == "true": + cm = cm / cm.sum(axis=1, keepdims=True) + elif normalize == "pred": + cm = cm / cm.sum(axis=0, keepdims=True) + elif normalize == "all": + cm = cm / cm.sum() + cm[np.isnan(cm)] = 0 + return cm + + +def _sk_cm_multiclass_prob(preds, target, normalize=None): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 1).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_confusion_matrix(y_true=sk_target, y_pred=sk_preds, normalize=normalize) + + +def _sk_cm_multiclass(preds, target, normalize=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_confusion_matrix(y_true=sk_target, y_pred=sk_preds, normalize=normalize) + + +def _sk_cm_multidim_multiclass_prob(preds, target, normalize=None): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 2).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_confusion_matrix(y_true=sk_target, y_pred=sk_preds, normalize=normalize) + + +def _sk_cm_multidim_multiclass(preds, target, normalize=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_confusion_matrix(y_true=sk_target, y_pred=sk_preds, normalize=normalize) + + +@pytest.mark.parametrize("normalize", ["true", "pred", "all", None]) +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes, multilabel", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_cm_binary_prob, 2, False), + (_input_binary_logits.preds, _input_binary_logits.target, _sk_cm_binary_prob, 2, False), + (_input_binary.preds, _input_binary.target, _sk_cm_binary, 2, False), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_cm_multilabel_prob, NUM_CLASSES, True), + (_input_mlb_logits.preds, _input_mlb_logits.target, _sk_cm_multilabel_prob, NUM_CLASSES, True), + (_input_mlb.preds, _input_mlb.target, _sk_cm_multilabel, NUM_CLASSES, True), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_cm_multiclass_prob, NUM_CLASSES, False), + (_input_mcls_logits.preds, _input_mcls_logits.target, _sk_cm_multiclass_prob, NUM_CLASSES, False), + (_input_mcls.preds, _input_mcls.target, _sk_cm_multiclass, NUM_CLASSES, False), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_cm_multidim_multiclass_prob, NUM_CLASSES, False), + (_input_mdmc.preds, _input_mdmc.target, _sk_cm_multidim_multiclass, NUM_CLASSES, False), + ], +) +class TestConfusionMatrix(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_confusion_matrix( + self, normalize, preds, target, sk_metric, num_classes, multilabel, ddp, dist_sync_on_step + ): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=ConfusionMatrix, + sk_metric=partial(sk_metric, normalize=normalize), + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "num_classes": num_classes, + "threshold": THRESHOLD, + "normalize": normalize, + "multilabel": multilabel, + }, + ) + + def test_confusion_matrix_functional(self, normalize, preds, target, sk_metric, num_classes, multilabel): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=confusion_matrix, + sk_metric=partial(sk_metric, normalize=normalize), + metric_args={ + "num_classes": num_classes, + "threshold": THRESHOLD, + "normalize": normalize, + "multilabel": multilabel, + }, + ) + + def test_confusion_matrix_differentiability(self, normalize, preds, target, sk_metric, num_classes, multilabel): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=ConfusionMatrix, + metric_functional=confusion_matrix, + metric_args={ + "num_classes": num_classes, + "threshold": THRESHOLD, + "normalize": normalize, + "multilabel": multilabel, + }, + ) + + +def test_warning_on_nan(tmpdir): + preds = B.randint(3, size=(20,)) + target = B.randint(3, size=(20,)) + + with pytest.warns( + UserWarning, + match=".* nan values found in confusion matrix have been replaced with zeros.", + ): + confusion_matrix(preds, target, num_classes=5, normalize="true") diff --git a/EE/paddlemetric/src/tests/classification/test_f_beta.py b/EE/paddlemetric/src/tests/classification/test_f_beta.py new file mode 100644 index 000000000..741c0d46e --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_f_beta.py @@ -0,0 +1,451 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial +from typing import Callable, Optional + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import f1_score, fbeta_score +from paddleext.torchapi import Tensor + +from tests.classification.inputs import _input_binary, _input_binary_logits, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_logits as _input_mcls_logits +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multiclass_with_missing_class as _input_miss_class +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_logits as _input_mlb_logits +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_BATCHES, NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics import F1, FBeta, Metric +from paddlemetrics.functional import f1, fbeta +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod + +seed_all(42) + + +def _sk_fbeta_f1(preds, target, sk_fn, num_classes, average, multiclass, ignore_index, mdmc_average=None): + if average == "none": + average = None + if num_classes == 1: + average = "binary" + + labels = list(range(num_classes)) + try: + labels.remove(ignore_index) + except ValueError: + pass + + sk_preds, sk_target, _ = _input_format_classification( + preds, target, THRESHOLD, num_classes=num_classes, multiclass=multiclass + ) + sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy() + sk_scores = sk_fn(sk_target, sk_preds, average=average, zero_division=0, labels=labels) + + if len(labels) != num_classes and not average: + sk_scores = np.insert(sk_scores, ignore_index, np.nan) + + return sk_scores + + +def _sk_fbeta_f1_multidim_multiclass( + preds, target, sk_fn, num_classes, average, multiclass, ignore_index, mdmc_average +): + preds, target, _ = _input_format_classification( + preds, target, threshold=THRESHOLD, num_classes=num_classes, multiclass=multiclass + ) + + if mdmc_average == "global": + preds = B.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) + target = B.transpose(target, 1, 2).reshape(-1, target.shape[1]) + + return _sk_fbeta_f1(preds, target, sk_fn, num_classes, average, False, ignore_index) + if mdmc_average == "samplewise": + scores = [] + + for i in range(preds.shape[0]): + pred_i = preds[i, ...].T + target_i = target[i, ...].T + scores_i = _sk_fbeta_f1(pred_i, target_i, sk_fn, num_classes, average, False, ignore_index) + + scores.append(np.expand_dims(scores_i, 0)) + + return np.concatenate(scores).mean(axis=0) + + +@pytest.mark.parametrize( + "metric_class, metric_fn", + [ + (partial(FBeta, beta=2.0), partial(fbeta, beta=2.0)), + (F1, f1), + ], +) +@pytest.mark.parametrize( + "average, mdmc_average, num_classes, ignore_index, match_str", + [ + ("wrong", None, None, None, "`average`"), + ("micro", "wrong", None, None, "`mdmc"), + ("macro", None, None, None, "number of classes"), + ("macro", None, 1, 0, "ignore_index"), + ], +) +def test_wrong_params(metric_class, metric_fn, average, mdmc_average, num_classes, ignore_index, match_str): + with pytest.raises(ValueError, match=match_str): + metric_class( + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + ) + + with pytest.raises(ValueError, match=match_str): + metric_fn( + _input_binary.preds[0], + _input_binary.target[0], + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + ) + + +@pytest.mark.parametrize( + "metric_class, metric_fn", + [ + (partial(FBeta, beta=2.0), partial(fbeta, beta=2.0)), + (F1, f1), + ], +) +def test_zero_division(metric_class, metric_fn): + """Test that zero_division works correctly (currently should just set to 0).""" + + preds = B.tensor([1, 2, 1, 1]) + target = B.tensor([2, 0, 2, 1]) + + cl_metric = metric_class(average="none", num_classes=3) + cl_metric(preds, target) + + result_cl = cl_metric.compute() + result_fn = metric_fn(preds, target, average="none", num_classes=3) + + assert result_cl[0] == result_fn[0] == 0 + + +@pytest.mark.parametrize( + "metric_class, metric_fn", + [ + (partial(FBeta, beta=2.0), partial(fbeta, beta=2.0)), + (F1, f1), + ], +) +def test_no_support(metric_class, metric_fn): + """This tests a rare edge case, where there is only one class present. + + in target, and ignore_index is set to exactly that class - and the + average method is equal to 'weighted'. + + This would mean that the sum of weights equals zero, and would, without + taking care of this case, return NaN. However, the reduction function + should catch that and set the metric to equal the value of zero_division + in this case (zero_division is for now not configurable and equals 0). + """ + + preds = B.tensor([1, 1, 0, 0]) + target = B.tensor([0, 0, 0, 0]) + + cl_metric = metric_class(average="weighted", num_classes=2, ignore_index=0) + cl_metric(preds, target) + + result_cl = cl_metric.compute() + result_fn = metric_fn(preds, target, average="weighted", num_classes=2, ignore_index=0) + + assert result_cl == result_fn == 0 + + +@pytest.mark.parametrize("metric_class, metric_fn", [(partial(FBeta, beta=2.0), partial(fbeta, beta=2.0)), (F1, f1)]) +@pytest.mark.parametrize( + "ignore_index, expected", [(None, B.tensor([1.0, np.nan])), (0, B.tensor([np.nan, np.nan]))] +) +def test_class_not_present(metric_class, metric_fn, ignore_index, expected): + """This tests that when metric is computed per class and a given class is not present in both the `preds` and + `target`, the resulting score is `nan`.""" + preds = B.tensor([0, 0, 0]) + target = B.tensor([0, 0, 0]) + num_classes = 2 + + # test functional + result_fn = metric_fn(preds, target, average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + assert B.allclose(expected, result_fn, equal_nan=True) + + # test class + cl_metric = metric_class(average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + cl_metric(preds, target) + result_cl = cl_metric.compute() + assert B.allclose(expected, result_cl, equal_nan=True) + + +@pytest.mark.parametrize( + "metric_class, metric_fn, sk_fn", + [(partial(FBeta, beta=2.0), partial(fbeta, beta=2.0), partial(fbeta_score, beta=2.0)), (F1, f1, f1_score)], +) +@pytest.mark.parametrize("average", ["micro", "macro", None, "weighted", "samples"]) +@pytest.mark.parametrize("ignore_index", [None, 0]) +@pytest.mark.parametrize( + "preds, target, num_classes, multiclass, mdmc_average, sk_wrapper", + [ + (_input_binary_logits.preds, _input_binary_logits.target, 1, None, None, _sk_fbeta_f1), + (_input_binary_prob.preds, _input_binary_prob.target, 1, None, None, _sk_fbeta_f1), + (_input_binary.preds, _input_binary.target, 1, False, None, _sk_fbeta_f1), + (_input_mlb_logits.preds, _input_mlb_logits.target, NUM_CLASSES, None, None, _sk_fbeta_f1), + (_input_mlb_prob.preds, _input_mlb_prob.target, NUM_CLASSES, None, None, _sk_fbeta_f1), + (_input_mlb.preds, _input_mlb.target, NUM_CLASSES, False, None, _sk_fbeta_f1), + (_input_mcls_logits.preds, _input_mcls_logits.target, NUM_CLASSES, None, None, _sk_fbeta_f1), + (_input_mcls_prob.preds, _input_mcls_prob.target, NUM_CLASSES, None, None, _sk_fbeta_f1), + (_input_mcls.preds, _input_mcls.target, NUM_CLASSES, None, None, _sk_fbeta_f1), + (_input_mdmc.preds, _input_mdmc.target, NUM_CLASSES, None, "global", _sk_fbeta_f1_multidim_multiclass), + ( + _input_mdmc_prob.preds, + _input_mdmc_prob.target, + NUM_CLASSES, + None, + "global", + _sk_fbeta_f1_multidim_multiclass, + ), + (_input_mdmc.preds, _input_mdmc.target, NUM_CLASSES, None, "samplewise", _sk_fbeta_f1_multidim_multiclass), + ( + _input_mdmc_prob.preds, + _input_mdmc_prob.target, + NUM_CLASSES, + None, + "samplewise", + _sk_fbeta_f1_multidim_multiclass, + ), + ], +) +class TestFBeta(MetricTester): + @pytest.mark.parametrize("ddp", [False]) + @pytest.mark.parametrize("dist_sync_on_step", [False]) + def test_fbeta_f1( + self, + ddp: bool, + dist_sync_on_step: bool, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + sk_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=metric_class, + sk_metric=partial( + sk_wrapper, + sk_fn=sk_fn, + average=average, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + mdmc_average=mdmc_average, + ), + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + check_dist_sync_on_step=True, + check_batch=True, + ) + + def test_fbeta_f1_functional( + self, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + sk_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_functional_metric_test( + preds, + target, + metric_functional=metric_fn, + sk_metric=partial( + sk_wrapper, + sk_fn=sk_fn, + average=average, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + mdmc_average=mdmc_average, + ), + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + ) + + def test_fbeta_f1_differentiability( + self, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + sk_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_differentiability_test( + preds, + target, + metric_functional=metric_fn, + metric_module=metric_class, + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + ) + + +_mc_k_target = B.tensor([0, 1, 2]) +_mc_k_preds = B.tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]]) +_ml_k_target = B.tensor([[0, 1, 0], [1, 1, 0], [0, 0, 0]]) +_ml_k_preds = B.tensor([[0.9, 0.2, 0.75], [0.1, 0.7, 0.8], [0.6, 0.1, 0.7]]) + + +@pytest.mark.parametrize( + "metric_class, metric_fn", + [ + (partial(FBeta, beta=2.0), partial(fbeta, beta=2.0)), + (F1, fbeta), + ], +) +@pytest.mark.parametrize( + "k, preds, target, average, expected_fbeta, expected_f1", + [ + (1, _mc_k_preds, _mc_k_target, "micro", B.tensor(2 / 3), B.tensor(2 / 3)), + (2, _mc_k_preds, _mc_k_target, "micro", B.tensor(5 / 6), B.tensor(2 / 3)), + (1, _ml_k_preds, _ml_k_target, "micro", B.tensor(0.0), B.tensor(0.0)), + (2, _ml_k_preds, _ml_k_target, "micro", B.tensor(5 / 18), B.tensor(2 / 9)), + ], +) +def test_top_k( + metric_class, + metric_fn, + k: int, + preds: Tensor, + target: Tensor, + average: str, + expected_fbeta: Tensor, + expected_f1: Tensor, +): + """A simple test to check that top_k works as expected. + + Just a sanity check, the tests in StatScores should already guarantee the corectness of results. + """ + class_metric = metric_class(top_k=k, average=average, num_classes=3) + class_metric.update(preds, target) + + if class_metric.beta != 1.0: + result = expected_fbeta + else: + result = expected_f1 + + assert B.isclose(class_metric.compute(), result) + assert B.isclose(metric_fn(preds, target, top_k=k, average=average, num_classes=3), result) + + +@pytest.mark.parametrize("ignore_index", [None, 2]) +@pytest.mark.parametrize("average", ["micro", "macro", "weighted"]) +@pytest.mark.parametrize( + "metric_class, metric_functional, sk_fn", + [(partial(FBeta, beta=2.0), partial(fbeta, beta=2.0), partial(fbeta_score, beta=2.0)), (F1, f1, f1_score)], +) +def test_same_input(metric_class, metric_functional, sk_fn, average, ignore_index): + preds = _input_miss_class.preds + target = _input_miss_class.target + preds_flat = B.cat(list(preds), dim=0) + target_flat = B.cat(list(target), dim=0) + + mc = metric_class(num_classes=NUM_CLASSES, average=average, ignore_index=ignore_index) + for i in range(NUM_BATCHES): + mc.update(preds[i], target[i]) + class_res = mc.compute() + func_res = metric_functional( + preds_flat, target_flat, num_classes=NUM_CLASSES, average=average, ignore_index=ignore_index + ) + sk_res = sk_fn(target_flat, preds_flat, average=average, zero_division=0) + + assert B.allclose(class_res, B.tensor(sk_res).float()) + assert B.allclose(func_res, B.tensor(sk_res).float()) diff --git a/EE/paddlemetric/src/tests/classification/test_hamming_distance.py b/EE/paddlemetric/src/tests/classification/test_hamming_distance.py new file mode 100644 index 000000000..a1ca480b8 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_hamming_distance.py @@ -0,0 +1,106 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +from sklearn.metrics import hamming_loss as sk_hamming_loss + +from tests.classification.inputs import _input_binary, _input_binary_logits, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_logits as _input_mcls_logits +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_logits as _input_mlb_logits +from tests.classification.inputs import _input_multilabel_multidim as _input_mlmd +from tests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import THRESHOLD, MetricTester +from paddlemetrics import HammingDistance +from paddlemetrics.functional import hamming_distance +from paddlemetrics.utilities.checks import _input_format_classification + +seed_all(42) + + +def _sk_hamming_loss(preds, target): + sk_preds, sk_target, _ = _input_format_classification(preds, target, threshold=THRESHOLD) + sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy() + sk_preds, sk_target = sk_preds.reshape(sk_preds.shape[0], -1), sk_target.reshape(sk_target.shape[0], -1) + + return sk_hamming_loss(y_true=sk_target, y_pred=sk_preds) + + +@pytest.mark.parametrize( + "preds, target", + [ + (_input_binary_logits.preds, _input_binary_logits.target), + (_input_binary_prob.preds, _input_binary_prob.target), + (_input_binary.preds, _input_binary.target), + (_input_mlb_logits.preds, _input_mlb_logits.target), + (_input_mlb_prob.preds, _input_mlb_prob.target), + (_input_mlb.preds, _input_mlb.target), + (_input_mcls_logits.preds, _input_mcls_logits.target), + (_input_mcls_prob.preds, _input_mcls_prob.target), + (_input_mcls.preds, _input_mcls.target), + (_input_mdmc_prob.preds, _input_mdmc_prob.target), + (_input_mdmc.preds, _input_mdmc.target), + (_input_mlmd_prob.preds, _input_mlmd_prob.target), + (_input_mlmd.preds, _input_mlmd.target), + ], +) +class TestHammingDistance(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [False, True]) + def test_hamming_distance_class(self, ddp, dist_sync_on_step, preds, target): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=HammingDistance, + sk_metric=_sk_hamming_loss, + dist_sync_on_step=dist_sync_on_step, + metric_args={"threshold": THRESHOLD}, + ) + + def test_hamming_distance_fn(self, preds, target): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=hamming_distance, + sk_metric=_sk_hamming_loss, + metric_args={"threshold": THRESHOLD}, + ) + + def test_hamming_distance_differentiability(self, preds, target): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=HammingDistance, + metric_functional=hamming_distance, + metric_args={"threshold": THRESHOLD}, + ) + + +@pytest.mark.parametrize("threshold", [1.5]) +def test_wrong_params(threshold): + preds, target = _input_mcls_prob.preds, _input_mcls_prob.target + + with pytest.raises(ValueError): + ham_dist = HammingDistance(threshold=threshold) + ham_dist(preds, target) + ham_dist.compute() + + with pytest.raises(ValueError): + hamming_distance(preds, target, threshold=threshold) diff --git a/EE/paddlemetric/src/tests/classification/test_hinge.py b/EE/paddlemetric/src/tests/classification/test_hinge.py new file mode 100644 index 000000000..7adbbb784 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_hinge.py @@ -0,0 +1,156 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import hinge_loss as sk_hinge +from sklearn.preprocessing import OneHotEncoder + +from tests.classification.inputs import Input +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES, MetricTester +from paddlemetrics import Hinge +from paddlemetrics.functional import hinge +from paddlemetrics.functional.classification.hinge import MulticlassMode + +B.manual_seed(42) + +_input_binary = Input( + preds=B.randn(NUM_BATCHES, BATCH_SIZE), target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)) +) + +_input_binary_single = Input(preds=B.randn((NUM_BATCHES, 1)), target=B.randint(high=2, size=(NUM_BATCHES, 1))) + +_input_multiclass = Input( + preds=B.randn(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), + target=B.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)), +) + + +def _sk_hinge(preds, target, squared, multiclass_mode): + sk_preds, sk_target = preds.numpy(), target.numpy() + + if multiclass_mode == MulticlassMode.ONE_VS_ALL: + enc = OneHotEncoder() + enc.fit(sk_target.reshape(-1, 1)) + sk_target = enc.transform(sk_target.reshape(-1, 1)).toarray() + + if sk_preds.ndim == 1 or multiclass_mode == MulticlassMode.ONE_VS_ALL: + sk_target = 2 * sk_target - 1 + + if squared or sk_target.max() != 1 or sk_target.min() != -1: + # Squared not an option in sklearn and infers classes incorrectly with single element, so adapted from source + if sk_preds.ndim == 1 or multiclass_mode == MulticlassMode.ONE_VS_ALL: + margin = sk_target * sk_preds + else: + mask = np.ones_like(sk_preds, dtype=bool) + mask[np.arange(sk_target.shape[0]), sk_target] = False + margin = sk_preds[~mask] + margin -= np.max(sk_preds[mask].reshape(sk_target.shape[0], -1), axis=1) + measures = 1 - margin + measures = np.clip(measures, 0, None) + + if squared: + measures = measures ** 2 + return measures.mean(axis=0) + if multiclass_mode == MulticlassMode.ONE_VS_ALL: + result = np.zeros(sk_preds.shape[1]) + for i in range(result.shape[0]): + result[i] = sk_hinge(y_true=sk_target[:, i], pred_decision=sk_preds[:, i]) + return result + + return sk_hinge(y_true=sk_target, pred_decision=sk_preds) + + +@pytest.mark.parametrize( + "preds, target, squared, multiclass_mode", + [ + (_input_binary.preds, _input_binary.target, False, None), + (_input_binary.preds, _input_binary.target, True, None), + (_input_binary_single.preds, _input_binary_single.target, False, None), + (_input_binary_single.preds, _input_binary_single.target, True, None), + (_input_multiclass.preds, _input_multiclass.target, False, MulticlassMode.CRAMMER_SINGER), + (_input_multiclass.preds, _input_multiclass.target, True, MulticlassMode.CRAMMER_SINGER), + (_input_multiclass.preds, _input_multiclass.target, False, MulticlassMode.ONE_VS_ALL), + (_input_multiclass.preds, _input_multiclass.target, True, MulticlassMode.ONE_VS_ALL), + ], +) +class TestHinge(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_hinge_class(self, ddp, dist_sync_on_step, preds, target, squared, multiclass_mode): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=Hinge, + sk_metric=partial(_sk_hinge, squared=squared, multiclass_mode=multiclass_mode), + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "squared": squared, + "multiclass_mode": multiclass_mode, + }, + ) + + def test_hinge_fn(self, preds, target, squared, multiclass_mode): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=partial(hinge, squared=squared, multiclass_mode=multiclass_mode), + sk_metric=partial(_sk_hinge, squared=squared, multiclass_mode=multiclass_mode), + ) + + def test_hinge_differentiability(self, preds, target, squared, multiclass_mode): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=Hinge, + metric_functional=partial(hinge, squared=squared, multiclass_mode=multiclass_mode), + ) + + +_input_multi_target = Input(preds=B.randn(BATCH_SIZE), target=B.randint(high=2, size=(BATCH_SIZE, 2))) + +_input_binary_different_sizes = Input( + preds=B.randn(BATCH_SIZE * 2), target=B.randint(high=2, size=(BATCH_SIZE,)) +) + +_input_multi_different_sizes = Input( + preds=B.randn(BATCH_SIZE * 2, NUM_CLASSES), target=B.randint(high=NUM_CLASSES, size=(BATCH_SIZE,)) +) + +_input_extra_dim = Input( + preds=B.randn(BATCH_SIZE, NUM_CLASSES, 2), target=B.randint(high=2, size=(BATCH_SIZE,)) +) + + +@pytest.mark.parametrize( + "preds, target, multiclass_mode", + [ + (_input_multi_target.preds, _input_multi_target.target, None), + (_input_binary_different_sizes.preds, _input_binary_different_sizes.target, None), + (_input_multi_different_sizes.preds, _input_multi_different_sizes.target, None), + (_input_extra_dim.preds, _input_extra_dim.target, None), + (_input_multiclass.preds[0], _input_multiclass.target[0], "invalid_mode"), + ], +) +def test_bad_inputs_fn(preds, target, multiclass_mode): + with pytest.raises(ValueError): + _ = hinge(preds, target, multiclass_mode=multiclass_mode) + + +def test_bad_inputs_class(): + with pytest.raises(ValueError): + Hinge(multiclass_mode="invalid_mode") diff --git a/EE/paddlemetric/src/tests/classification/test_inputs.py b/EE/paddlemetric/src/tests/classification/test_inputs.py new file mode 100644 index 000000000..4f924af26 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_inputs.py @@ -0,0 +1,312 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, rand, randint, tensor + +from tests.classification.inputs import Input +from tests.classification.inputs import _input_binary as _bin +from tests.classification.inputs import _input_binary_prob as _bin_prob +from tests.classification.inputs import _input_multiclass as _mc +from tests.classification.inputs import _input_multiclass_prob as _mc_prob +from tests.classification.inputs import _input_multidim_multiclass as _mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _mdmc_prob +from tests.classification.inputs import _input_multilabel as _ml +from tests.classification.inputs import _input_multilabel_multidim as _mlmd +from tests.classification.inputs import _input_multilabel_multidim_prob as _mlmd_prob +from tests.classification.inputs import _input_multilabel_prob as _ml_prob +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES, THRESHOLD +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.data import select_topk, to_onehot +from paddlemetrics.utilities.enums import DataType + +seed_all(42) + +# Some additional inputs to test on +_ml_prob_half = Input(_ml_prob.preds.half(), _ml_prob.target) + +_mc_prob_2cls_preds = rand(NUM_BATCHES, BATCH_SIZE, 2) +_mc_prob_2cls_preds /= _mc_prob_2cls_preds.sum(dim=2, keepdim=True) +_mc_prob_2cls = Input(_mc_prob_2cls_preds, randint(high=2, size=(NUM_BATCHES, BATCH_SIZE))) + +_mdmc_prob_many_dims_preds = rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM, EXTRA_DIM) +_mdmc_prob_many_dims_preds /= _mdmc_prob_many_dims_preds.sum(dim=2, keepdim=True) +_mdmc_prob_many_dims = Input( + _mdmc_prob_many_dims_preds, + randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM, EXTRA_DIM)), +) + +_mdmc_prob_2cls_preds = rand(NUM_BATCHES, BATCH_SIZE, 2, EXTRA_DIM) +_mdmc_prob_2cls_preds /= _mdmc_prob_2cls_preds.sum(dim=2, keepdim=True) +_mdmc_prob_2cls = Input(_mdmc_prob_2cls_preds, randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM))) + +# Some utils +T = Tensor + + +def _idn(x): + return x + + +def _usq(x): + return x.unsqueeze(-1) + + +def _thrs(x): + return x >= THRESHOLD + + +def _rshp1(x): + return x.reshape(x.shape[0], -1) + + +def _rshp2(x): + return x.reshape(x.shape[0], x.shape[1], -1) + + +def _onehot(x): + return to_onehot(x, NUM_CLASSES) + + +def _onehot2(x): + return to_onehot(x, 2) + + +def _top1(x): + return select_topk(x, 1) + + +def _top2(x): + return select_topk(x, 2) + + +# To avoid ugly black line wrapping +def _ml_preds_tr(x): + return _rshp1(_thrs(x)) + + +def _onehot_rshp1(x): + return _onehot(_rshp1(x)) + + +def _onehot2_rshp1(x): + return _onehot2(_rshp1(x)) + + +def _top1_rshp2(x): + return _top1(_rshp2(x)) + + +def _top2_rshp2(x): + return _top2(_rshp2(x)) + + +def _probs_to_mc_preds_tr(x): + return _onehot2(_thrs(x)) + + +def _mlmd_prob_to_mc_preds_tr(x): + return _onehot2(_rshp1(_thrs(x))) + + +######################## +# Test correct inputs +######################## + + +@pytest.mark.parametrize( + "inputs, num_classes, multiclass, top_k, exp_mode, post_preds, post_target", + [ + ############################# + # Test usual expected cases + (_bin, None, False, None, "multi-class", _usq, _usq), + (_bin, 1, False, None, "multi-class", _usq, _usq), + (_bin_prob, None, None, None, "binary", lambda x: _usq(_thrs(x)), _usq), + (_ml_prob, None, None, None, "multi-label", _thrs, _idn), + (_ml, None, False, None, "multi-dim multi-class", _idn, _idn), + (_ml_prob, None, None, None, "multi-label", _ml_preds_tr, _rshp1), + (_ml_prob, None, None, 2, "multi-label", _top2, _rshp1), + (_mlmd, None, False, None, "multi-dim multi-class", _rshp1, _rshp1), + (_mc, NUM_CLASSES, None, None, "multi-class", _onehot, _onehot), + (_mc_prob, None, None, None, "multi-class", _top1, _onehot), + (_mc_prob, None, None, 2, "multi-class", _top2, _onehot), + (_mdmc, NUM_CLASSES, None, None, "multi-dim multi-class", _onehot, _onehot), + (_mdmc_prob, None, None, None, "multi-dim multi-class", _top1_rshp2, _onehot), + (_mdmc_prob, None, None, 2, "multi-dim multi-class", _top2_rshp2, _onehot), + (_mdmc_prob_many_dims, None, None, None, "multi-dim multi-class", _top1_rshp2, _onehot_rshp1), + (_mdmc_prob_many_dims, None, None, 2, "multi-dim multi-class", _top2_rshp2, _onehot_rshp1), + ########################### + # Test some special cases + # Make sure that half precision works, i.e. is converted to full precision + (_ml_prob_half, None, None, None, "multi-label", lambda x: _ml_preds_tr(x.float()), _rshp1), + # Binary as multiclass + (_bin, None, None, None, "multi-class", _onehot2, _onehot2), + # Binary probs as multiclass + (_bin_prob, None, True, None, "binary", _probs_to_mc_preds_tr, _onehot2), + # Multilabel as multiclass + (_ml, None, True, None, "multi-dim multi-class", _onehot2, _onehot2), + # Multilabel probs as multiclass + (_ml_prob, None, True, None, "multi-label", _probs_to_mc_preds_tr, _onehot2), + # Multidim multilabel as multiclass + (_mlmd, None, True, None, "multi-dim multi-class", _onehot2_rshp1, _onehot2_rshp1), + # Multidim multilabel probs as multiclass + (_mlmd_prob, None, True, None, "multi-label", _mlmd_prob_to_mc_preds_tr, _onehot2_rshp1), + # Multiclass prob with 2 classes as binary + (_mc_prob_2cls, None, False, None, "multi-class", lambda x: _top1(x)[:, [1]], _usq), + # Multi-dim multi-class with 2 classes as multi-label + (_mdmc_prob_2cls, None, False, None, "multi-dim multi-class", lambda x: _top1(x)[:, 1], _idn), + ], +) +def test_usual_cases(inputs, num_classes, multiclass, top_k, exp_mode, post_preds, post_target): + def __get_data_type_enum(str_exp_mode): + return next(DataType[n] for n in dir(DataType) if DataType[n] == str_exp_mode) + + for exp_mode in (exp_mode, __get_data_type_enum(exp_mode)): + preds_out, target_out, mode = _input_format_classification( + preds=inputs.preds[0], + target=inputs.target[0], + threshold=THRESHOLD, + num_classes=num_classes, + multiclass=multiclass, + top_k=top_k, + ) + + assert mode == exp_mode + assert B.equal(preds_out, post_preds(inputs.preds[0]).int()) + assert B.equal(target_out, post_target(inputs.target[0]).int()) + + # Test that things work when batch_size = 1 + preds_out, target_out, mode = _input_format_classification( + preds=inputs.preds[0][[0], ...], + target=inputs.target[0][[0], ...], + threshold=THRESHOLD, + num_classes=num_classes, + multiclass=multiclass, + top_k=top_k, + ) + + assert mode == exp_mode + assert B.equal(preds_out, post_preds(inputs.preds[0][[0], ...]).int()) + assert B.equal(target_out, post_target(inputs.target[0][[0], ...]).int()) + + +# Test that threshold is correctly applied +def test_threshold(): + target = T([1, 1, 1]).int() + preds_probs = T([0.5 - 1e-5, 0.5, 0.5 + 1e-5]) + + preds_probs_out, _, _ = _input_format_classification(preds_probs, target, threshold=0.5) + + assert B.equal(tensor([0, 1, 1], dtype=B.int), preds_probs_out.squeeze().int()) + + +######################################################################## +# Test incorrect inputs +######################################################################## + + +@pytest.mark.parametrize( + "preds, target, num_classes, multiclass", + [ + # Target not integer + (randint(high=2, size=(7,)), randint(high=2, size=(7,)).float(), None, None), + # Target negative + (randint(high=2, size=(7,)), -randint(high=2, size=(7,)), None, None), + # Preds negative integers + (-randint(high=2, size=(7,)), randint(high=2, size=(7,)), None, None), + # multiclass=False and target > 1 + (rand(size=(7,)), randint(low=2, high=4, size=(7,)), None, False), + # multiclass=False and preds integers with > 1 + (randint(low=2, high=4, size=(7,)), randint(high=2, size=(7,)), None, False), + # Wrong batch size + (randint(high=2, size=(8,)), randint(high=2, size=(7,)), None, None), + # Completely wrong shape + (randint(high=2, size=(7,)), randint(high=2, size=(7, 4)), None, None), + # Same #dims, different shape + (randint(high=2, size=(7, 3)), randint(high=2, size=(7, 4)), None, None), + # Same shape and preds floats, target not binary + (rand(size=(7, 3)), randint(low=2, high=4, size=(7, 3)), None, None), + # #dims in preds = 1 + #dims in target, C shape not second or last + (rand(size=(7, 3, 4, 3)), randint(high=4, size=(7, 3, 3)), None, None), + # #dims in preds = 1 + #dims in target, preds not float + (randint(high=2, size=(7, 3, 3, 4)), randint(high=4, size=(7, 3, 3)), None, None), + # multiclass=False, with C dimension > 2 + (_mc_prob.preds[0], randint(high=2, size=(BATCH_SIZE,)), None, False), + # Max target larger or equal to C dimension + (_mc_prob.preds[0], randint(low=NUM_CLASSES + 1, high=100, size=(BATCH_SIZE,)), None, None), + # C dimension not equal to num_classes + (_mc_prob.preds[0], _mc_prob.target[0], NUM_CLASSES + 1, None), + # Max target larger than num_classes (with #dim preds = 1 + #dims target) + (_mc_prob.preds[0], randint(low=NUM_CLASSES + 1, high=100, size=(BATCH_SIZE, NUM_CLASSES)), 4, None), + # Max target larger than num_classes (with #dim preds = #dims target) + (randint(high=4, size=(7, 3)), randint(low=5, high=7, size=(7, 3)), 4, None), + # Num_classes=1, but multiclass not false + (randint(high=2, size=(7,)), randint(high=2, size=(7,)), 1, None), + # multiclass=False, but implied class dimension (for multi-label, from shape) != num_classes + (randint(high=2, size=(7, 3, 3)), randint(high=2, size=(7, 3, 3)), 4, False), + # Multilabel input with implied class dimension != num_classes + (rand(size=(7, 3, 3)), randint(high=2, size=(7, 3, 3)), 4, False), + # Multilabel input with multiclass=True, but num_classes != 2 (or None) + (rand(size=(7, 3)), randint(high=2, size=(7, 3)), 4, True), + # Binary input, num_classes > 2 + (rand(size=(7,)), randint(high=2, size=(7,)), 4, None), + # Binary input, num_classes == 2 and multiclass not True + (rand(size=(7,)), randint(high=2, size=(7,)), 2, None), + (rand(size=(7,)), randint(high=2, size=(7,)), 2, False), + # Binary input, num_classes == 1 and multiclass=True + (rand(size=(7,)), randint(high=2, size=(7,)), 1, True), + ], +) +def test_incorrect_inputs(preds, target, num_classes, multiclass): + with pytest.raises(ValueError): + _input_format_classification( + preds=preds, target=target, threshold=THRESHOLD, num_classes=num_classes, multiclass=multiclass + ) + + +@pytest.mark.parametrize( + "preds, target, num_classes, multiclass, top_k", + [ + # Topk set with non (md)mc or ml prob data + (_bin.preds[0], _bin.target[0], None, None, 2), + (_bin_prob.preds[0], _bin_prob.target[0], None, None, 2), + (_mc.preds[0], _mc.target[0], None, None, 2), + (_ml.preds[0], _ml.target[0], None, None, 2), + (_mlmd.preds[0], _mlmd.target[0], None, None, 2), + (_mdmc.preds[0], _mdmc.target[0], None, None, 2), + # top_k = 0 + (_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, None, 0), + # top_k = float + (_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, None, 0.123), + # top_k =2 with 2 classes, multiclass=False + (_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, False, 2), + # top_k = number of classes (C dimension) + (_mc_prob.preds[0], _mc_prob.target[0], None, None, NUM_CLASSES), + # multiclass = True for ml prob inputs, top_k set + (_ml_prob.preds[0], _ml_prob.target[0], None, True, 2), + # top_k = num_classes for ml prob inputs + (_ml_prob.preds[0], _ml_prob.target[0], None, True, NUM_CLASSES), + ], +) +def test_incorrect_inputs_topk(preds, target, num_classes, multiclass, top_k): + with pytest.raises(ValueError): + _input_format_classification( + preds=preds, + target=target, + threshold=THRESHOLD, + num_classes=num_classes, + multiclass=multiclass, + top_k=top_k, + ) diff --git a/EE/paddlemetric/src/tests/classification/test_iou.py b/EE/paddlemetric/src/tests/classification/test_iou.py new file mode 100644 index 000000000..af22d787d --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_iou.py @@ -0,0 +1,235 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import jaccard_score as sk_jaccard_score +from paddleext.torchapi import Tensor, tensor + +from tests.classification.inputs import _input_binary, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers.testers import NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics.classification.iou import IoU +from paddlemetrics.functional import iou + + +def _sk_iou_binary_prob(preds, target, average=None): + sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +def _sk_iou_binary(preds, target, average=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +def _sk_iou_multilabel_prob(preds, target, average=None): + sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +def _sk_iou_multilabel(preds, target, average=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +def _sk_iou_multiclass_prob(preds, target, average=None): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 1).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +def _sk_iou_multiclass(preds, target, average=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +def _sk_iou_multidim_multiclass_prob(preds, target, average=None): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 2).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +def _sk_iou_multidim_multiclass(preds, target, average=None): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average) + + +@pytest.mark.parametrize("reduction", ["elementwise_mean", "none"]) +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_iou_binary_prob, 2), + (_input_binary.preds, _input_binary.target, _sk_iou_binary, 2), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_iou_multilabel_prob, 2), + (_input_mlb.preds, _input_mlb.target, _sk_iou_multilabel, 2), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_iou_multiclass_prob, NUM_CLASSES), + (_input_mcls.preds, _input_mcls.target, _sk_iou_multiclass, NUM_CLASSES), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_iou_multidim_multiclass_prob, NUM_CLASSES), + (_input_mdmc.preds, _input_mdmc.target, _sk_iou_multidim_multiclass, NUM_CLASSES), + ], +) +class TestIoU(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_iou(self, reduction, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step): + average = "macro" if reduction == "elementwise_mean" else None # convert tags + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=IoU, + sk_metric=partial(sk_metric, average=average), + dist_sync_on_step=dist_sync_on_step, + metric_args={"num_classes": num_classes, "threshold": THRESHOLD, "reduction": reduction}, + ) + + def test_iou_functional(self, reduction, preds, target, sk_metric, num_classes): + average = "macro" if reduction == "elementwise_mean" else None # convert tags + self.run_functional_metric_test( + preds, + target, + metric_functional=iou, + sk_metric=partial(sk_metric, average=average), + metric_args={"num_classes": num_classes, "threshold": THRESHOLD, "reduction": reduction}, + ) + + def test_iou_differentiability(self, reduction, preds, target, sk_metric, num_classes): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=IoU, + metric_functional=iou, + metric_args={"num_classes": num_classes, "threshold": THRESHOLD, "reduction": reduction}, + ) + + +@pytest.mark.parametrize( + ["half_ones", "reduction", "ignore_index", "expected"], + [ + pytest.param(False, "none", None, Tensor([1, 1, 1])), + pytest.param(False, "elementwise_mean", None, Tensor([1])), + pytest.param(False, "none", 0, Tensor([1, 1])), + pytest.param(True, "none", None, Tensor([0.5, 0.5, 0.5])), + pytest.param(True, "elementwise_mean", None, Tensor([0.5])), + pytest.param(True, "none", 0, Tensor([2 / 3, 1 / 2])), + ], +) +def test_iou(half_ones, reduction, ignore_index, expected): + preds = (B.arange(120) % 3).view(-1, 1) + target = (B.arange(120) % 3).view(-1, 1) + if half_ones: + preds[:60] = 1 + iou_val = iou( + preds=preds, + target=target, + ignore_index=ignore_index, + reduction=reduction, + ) + assert B.allclose(iou_val, expected, atol=1e-9) + + +# test `absent_score` +@pytest.mark.parametrize( + ["pred", "target", "ignore_index", "absent_score", "num_classes", "expected"], + [ + # Note that -1 is used as the absent_score in almost all tests here to distinguish it from the range of valid + # scores the function can return ([0., 1.] range, inclusive). + # 2 classes, class 0 is correct everywhere, class 1 is absent. + pytest.param([0], [0], None, -1.0, 2, [1.0, -1.0]), + pytest.param([0, 0], [0, 0], None, -1.0, 2, [1.0, -1.0]), + # absent_score not applied if only class 0 is present and it's the only class. + pytest.param([0], [0], None, -1.0, 1, [1.0]), + # 2 classes, class 1 is correct everywhere, class 0 is absent. + pytest.param([1], [1], None, -1.0, 2, [-1.0, 1.0]), + pytest.param([1, 1], [1, 1], None, -1.0, 2, [-1.0, 1.0]), + # When 0 index ignored, class 0 does not get a score (not even the absent_score). + pytest.param([1], [1], 0, -1.0, 2, [1.0]), + # 3 classes. Only 0 and 2 are present, and are perfectly predicted. 1 should get absent_score. + pytest.param([0, 2], [0, 2], None, -1.0, 3, [1.0, -1.0, 1.0]), + pytest.param([2, 0], [2, 0], None, -1.0, 3, [1.0, -1.0, 1.0]), + # 3 classes. Only 0 and 1 are present, and are perfectly predicted. 2 should get absent_score. + pytest.param([0, 1], [0, 1], None, -1.0, 3, [1.0, 1.0, -1.0]), + pytest.param([1, 0], [1, 0], None, -1.0, 3, [1.0, 1.0, -1.0]), + # 3 classes, class 0 is 0.5 IoU, class 1 is 0 IoU (in pred but not target; should not get absent_score), class + # 2 is absent. + pytest.param([0, 1], [0, 0], None, -1.0, 3, [0.5, 0.0, -1.0]), + # 3 classes, class 0 is 0.5 IoU, class 1 is 0 IoU (in target but not pred; should not get absent_score), class + # 2 is absent. + pytest.param([0, 0], [0, 1], None, -1.0, 3, [0.5, 0.0, -1.0]), + # Sanity checks with absent_score of 1.0. + pytest.param([0, 2], [0, 2], None, 1.0, 3, [1.0, 1.0, 1.0]), + pytest.param([0, 2], [0, 2], 0, 1.0, 3, [1.0, 1.0]), + ], +) +def test_iou_absent_score(pred, target, ignore_index, absent_score, num_classes, expected): + iou_val = iou( + preds=tensor(pred), + target=tensor(target), + ignore_index=ignore_index, + absent_score=absent_score, + num_classes=num_classes, + reduction="none", + ) + assert B.allclose(iou_val, tensor(expected).to(iou_val)) + + +# example data taken from +# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py +@pytest.mark.parametrize( + ["pred", "target", "ignore_index", "num_classes", "reduction", "expected"], + [ + # Ignoring an index outside of [0, num_classes-1] should have no effect. + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], None, 3, "none", [1, 1 / 2, 2 / 3]), + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], -1, 3, "none", [1, 1 / 2, 2 / 3]), + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 255, 3, "none", [1, 1 / 2, 2 / 3]), + # Ignoring a valid index drops only that index from the result. + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, "none", [1 / 2, 2 / 3]), + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 1, 3, "none", [1, 2 / 3]), + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 2, 3, "none", [1, 1]), + # When reducing to mean or sum, the ignored index does not contribute to the output. + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, "elementwise_mean", [7 / 12]), + pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, "sum", [7 / 6]), + ], +) +def test_iou_ignore_index(pred, target, ignore_index, num_classes, reduction, expected): + iou_val = iou( + preds=tensor(pred), + target=tensor(target), + ignore_index=ignore_index, + num_classes=num_classes, + reduction=reduction, + ) + assert B.allclose(iou_val, tensor(expected).to(iou_val)) diff --git a/EE/paddlemetric/src/tests/classification/test_kl_divergence.py b/EE/paddlemetric/src/tests/classification/test_kl_divergence.py new file mode 100644 index 000000000..b5137c3af --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_kl_divergence.py @@ -0,0 +1,114 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial +from typing import Optional + +import numpy as np +import pytest +import paddleext.torchapi as B +from scipy.stats import entropy +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, MetricTester +from paddlemetrics.classification import KLDivergence +from paddlemetrics.functional import kl_divergence + +seed_all(42) + +Input = namedtuple("Input", ["p", "q"]) + +_probs_inputs = Input( + p=B.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM), + q=B.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM), +) + +_log_probs_inputs = Input( + p=B.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM).softmax(dim=-1).log(), + q=B.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM).softmax(dim=-1).log(), +) + + +def _sk_metric(p: Tensor, q: Tensor, log_prob: bool, reduction: Optional[str] = "mean"): + if log_prob: + p = p.softmax(dim=-1) + q = q.softmax(dim=-1) + res = entropy(p, q, axis=1) + if reduction == "mean": + return np.mean(res) + if reduction == "sum": + return np.sum(res) + return res + + +@pytest.mark.parametrize("reduction", ["mean", "sum"]) +@pytest.mark.parametrize( + "p, q, log_prob", [(_probs_inputs.p, _probs_inputs.q, False), (_log_probs_inputs.p, _log_probs_inputs.q, True)] +) +class TestKLDivergence(MetricTester): + atol = 1e-6 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_kldivergence(self, reduction, p, q, log_prob, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + p, + q, + KLDivergence, + partial(_sk_metric, log_prob=log_prob, reduction=reduction), + dist_sync_on_step, + metric_args=dict(log_prob=log_prob, reduction=reduction), + ) + + def test_kldivergence_functional(self, reduction, p, q, log_prob): + # todo: `num_outputs` is unused + self.run_functional_metric_test( + p, + q, + kl_divergence, + partial(_sk_metric, log_prob=log_prob, reduction=reduction), + metric_args=dict(log_prob=log_prob, reduction=reduction), + ) + + def test_kldivergence_differentiability(self, reduction, p, q, log_prob): + self.run_differentiability_test( + p, + q, + metric_module=KLDivergence, + metric_functional=kl_divergence, + metric_args=dict(log_prob=log_prob, reduction=reduction), + ) + + # KLDivergence half + cpu does not work due to missing support in B.clamp + @pytest.mark.xfail(reason="KLDivergence metric does not support cpu + half precision") + def test_kldivergence_half_cpu(self, reduction, p, q, log_prob): + self.run_precision_test_cpu(p, q, KLDivergence, kl_divergence, {"log_prob": log_prob, "reduction": reduction}) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_r2_half_gpu(self, reduction, p, q, log_prob): + self.run_precision_test_gpu(p, q, KLDivergence, kl_divergence, {"log_prob": log_prob, "reduction": reduction}) + + +def test_error_on_different_shape(): + metric = KLDivergence() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) + + +def test_error_on_multidim_tensors(): + metric = KLDivergence() + with pytest.raises(ValueError, match="Expected both p and q distribution to be 2D but got 3 and 3 respectively"): + metric(B.randn(10, 20, 5), B.randn(10, 20, 5)) diff --git a/EE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py b/EE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py new file mode 100644 index 000000000..ce1a5a90d --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py @@ -0,0 +1,142 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import matthews_corrcoef as sk_matthews_corrcoef + +from tests.classification.inputs import _input_binary, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics.classification.matthews_corrcoef import MatthewsCorrcoef +from paddlemetrics.functional.classification.matthews_corrcoef import matthews_corrcoef + +seed_all(42) + + +def _sk_matthews_corrcoef_binary_prob(preds, target): + sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +def _sk_matthews_corrcoef_binary(preds, target): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +def _sk_matthews_corrcoef_multilabel_prob(preds, target): + sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8) + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +def _sk_matthews_corrcoef_multilabel(preds, target): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +def _sk_matthews_corrcoef_multiclass_prob(preds, target): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 1).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +def _sk_matthews_corrcoef_multiclass(preds, target): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +def _sk_matthews_corrcoef_multidim_multiclass_prob(preds, target): + sk_preds = B.argmax(preds, dim=len(preds.shape) - 2).view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +def _sk_matthews_corrcoef_multidim_multiclass(preds, target): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return sk_matthews_corrcoef(y_true=sk_target, y_pred=sk_preds) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_matthews_corrcoef_binary_prob, 2), + (_input_binary.preds, _input_binary.target, _sk_matthews_corrcoef_binary, 2), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_matthews_corrcoef_multilabel_prob, 2), + (_input_mlb.preds, _input_mlb.target, _sk_matthews_corrcoef_multilabel, 2), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_matthews_corrcoef_multiclass_prob, NUM_CLASSES), + (_input_mcls.preds, _input_mcls.target, _sk_matthews_corrcoef_multiclass, NUM_CLASSES), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_matthews_corrcoef_multidim_multiclass_prob, NUM_CLASSES), + (_input_mdmc.preds, _input_mdmc.target, _sk_matthews_corrcoef_multidim_multiclass, NUM_CLASSES), + ], +) +class TestMatthewsCorrCoef(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_matthews_corrcoef(self, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=MatthewsCorrcoef, + sk_metric=sk_metric, + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "num_classes": num_classes, + "threshold": THRESHOLD, + }, + ) + + def test_matthews_corrcoef_functional(self, preds, target, sk_metric, num_classes): + self.run_functional_metric_test( + preds, + target, + metric_functional=matthews_corrcoef, + sk_metric=sk_metric, + metric_args={ + "num_classes": num_classes, + "threshold": THRESHOLD, + }, + ) + + def test_matthews_corrcoef_differentiability(self, preds, target, sk_metric, num_classes): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=MatthewsCorrcoef, + metric_functional=matthews_corrcoef, + metric_args={ + "num_classes": num_classes, + "threshold": THRESHOLD, + }, + ) diff --git a/EE/paddlemetric/src/tests/classification/test_precision_recall.py b/EE/paddlemetric/src/tests/classification/test_precision_recall.py new file mode 100644 index 000000000..981b44ab6 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_precision_recall.py @@ -0,0 +1,461 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial +from typing import Callable, Optional + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import precision_score, recall_score +from paddleext.torchapi import Tensor, tensor + +from tests.classification.inputs import _input_binary, _input_binary_logits, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_logits as _input_mcls_logits +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multiclass_with_missing_class as _input_miss_class +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_logits as _input_mlb_logits +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_BATCHES, NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics import Metric, Precision, Recall +from paddlemetrics.functional import precision, precision_recall, recall +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod + +seed_all(42) + + +def _sk_prec_recall(preds, target, sk_fn, num_classes, average, multiclass, ignore_index, mdmc_average=None): + # todo: `mdmc_average` is unused + if average == "none": + average = None + if num_classes == 1: + average = "binary" + + labels = list(range(num_classes)) + try: + labels.remove(ignore_index) + except ValueError: + pass + + sk_preds, sk_target, _ = _input_format_classification( + preds, target, THRESHOLD, num_classes=num_classes, multiclass=multiclass + ) + sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy() + + sk_scores = sk_fn(sk_target, sk_preds, average=average, zero_division=0, labels=labels) + + if len(labels) != num_classes and not average: + sk_scores = np.insert(sk_scores, ignore_index, np.nan) + + return sk_scores + + +def _sk_prec_recall_multidim_multiclass( + preds, target, sk_fn, num_classes, average, multiclass, ignore_index, mdmc_average +): + preds, target, _ = _input_format_classification( + preds, target, threshold=THRESHOLD, num_classes=num_classes, multiclass=multiclass + ) + + if mdmc_average == "global": + preds = B.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) + target = B.transpose(target, 1, 2).reshape(-1, target.shape[1]) + + return _sk_prec_recall(preds, target, sk_fn, num_classes, average, False, ignore_index) + if mdmc_average == "samplewise": + scores = [] + + for i in range(preds.shape[0]): + pred_i = preds[i, ...].T + target_i = target[i, ...].T + scores_i = _sk_prec_recall(pred_i, target_i, sk_fn, num_classes, average, False, ignore_index) + + scores.append(np.expand_dims(scores_i, 0)) + + return np.concatenate(scores).mean(axis=0) + + +@pytest.mark.parametrize("metric, fn_metric", [(Precision, precision), (Recall, recall)]) +@pytest.mark.parametrize( + "average, mdmc_average, num_classes, ignore_index, match_str", + [ + ("wrong", None, None, None, "`average`"), + ("micro", "wrong", None, None, "`mdmc"), + ("macro", None, None, None, "number of classes"), + ("macro", None, 1, 0, "ignore_index"), + ], +) +def test_wrong_params(metric, fn_metric, average, mdmc_average, num_classes, ignore_index, match_str): + with pytest.raises(ValueError, match=match_str): + metric( + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + ) + + with pytest.raises(ValueError, match=match_str): + fn_metric( + _input_binary.preds[0], + _input_binary.target[0], + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + ) + + with pytest.raises(ValueError, match=match_str): + precision_recall( + _input_binary.preds[0], + _input_binary.target[0], + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + ) + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Recall, recall), (Precision, precision)]) +def test_zero_division(metric_class, metric_fn): + """Test that zero_division works correctly (currently should just set to 0).""" + + preds = tensor([0, 2, 1, 1]) + target = tensor([2, 1, 2, 1]) + + cl_metric = metric_class(average="none", num_classes=3) + cl_metric(preds, target) + + result_cl = cl_metric.compute() + result_fn = metric_fn(preds, target, average="none", num_classes=3) + + assert result_cl[0] == result_fn[0] == 0 + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Recall, recall), (Precision, precision)]) +def test_no_support(metric_class, metric_fn): + """This tests a rare edge case, where there is only one class present. + + in target, and ignore_index is set to exactly that class - and the + average method is equal to 'weighted'. + + This would mean that the sum of weights equals zero, and would, without + taking care of this case, return NaN. However, the reduction function + should catch that and set the metric to equal the value of zero_division + in this case (zero_division is for now not configurable and equals 0). + """ + + preds = tensor([1, 1, 0, 0]) + target = tensor([0, 0, 0, 0]) + + cl_metric = metric_class(average="weighted", num_classes=2, ignore_index=0) + cl_metric(preds, target) + + result_cl = cl_metric.compute() + result_fn = metric_fn(preds, target, average="weighted", num_classes=2, ignore_index=0) + + assert result_cl == result_fn == 0 + + +@pytest.mark.parametrize( + "metric_class, metric_fn, sk_fn", [(Recall, recall, recall_score), (Precision, precision, precision_score)] +) +@pytest.mark.parametrize("average", ["micro", "macro", None, "weighted", "samples"]) +@pytest.mark.parametrize("ignore_index", [None, 0]) +@pytest.mark.parametrize( + "preds, target, num_classes, multiclass, mdmc_average, sk_wrapper", + [ + (_input_binary_logits.preds, _input_binary_logits.target, 1, None, None, _sk_prec_recall), + (_input_binary_prob.preds, _input_binary_prob.target, 1, None, None, _sk_prec_recall), + (_input_binary.preds, _input_binary.target, 1, False, None, _sk_prec_recall), + (_input_mlb_logits.preds, _input_mlb_logits.target, NUM_CLASSES, None, None, _sk_prec_recall), + (_input_mlb_prob.preds, _input_mlb_prob.target, NUM_CLASSES, None, None, _sk_prec_recall), + (_input_mlb.preds, _input_mlb.target, NUM_CLASSES, False, None, _sk_prec_recall), + (_input_mcls_logits.preds, _input_mcls_logits.target, NUM_CLASSES, None, None, _sk_prec_recall), + (_input_mcls_prob.preds, _input_mcls_prob.target, NUM_CLASSES, None, None, _sk_prec_recall), + (_input_mcls.preds, _input_mcls.target, NUM_CLASSES, None, None, _sk_prec_recall), + (_input_mdmc.preds, _input_mdmc.target, NUM_CLASSES, None, "global", _sk_prec_recall_multidim_multiclass), + ( + _input_mdmc_prob.preds, + _input_mdmc_prob.target, + NUM_CLASSES, + None, + "global", + _sk_prec_recall_multidim_multiclass, + ), + (_input_mdmc.preds, _input_mdmc.target, NUM_CLASSES, None, "samplewise", _sk_prec_recall_multidim_multiclass), + ( + _input_mdmc_prob.preds, + _input_mdmc_prob.target, + NUM_CLASSES, + None, + "samplewise", + _sk_prec_recall_multidim_multiclass, + ), + ], +) +class TestPrecisionRecall(MetricTester): + @pytest.mark.parametrize("ddp", [False]) + @pytest.mark.parametrize("dist_sync_on_step", [False]) + def test_precision_recall_class( + self, + ddp: bool, + dist_sync_on_step: bool, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + sk_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + # todo: `metric_fn` is unused + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=metric_class, + sk_metric=partial( + sk_wrapper, + sk_fn=sk_fn, + average=average, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + mdmc_average=mdmc_average, + ), + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + check_dist_sync_on_step=True, + check_batch=True, + ) + + def test_precision_recall_fn( + self, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + sk_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + # todo: `metric_class` is unused + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_functional_metric_test( + preds, + target, + metric_functional=metric_fn, + sk_metric=partial( + sk_wrapper, + sk_fn=sk_fn, + average=average, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + mdmc_average=mdmc_average, + ), + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + ) + + def test_precision_recall_differentiability( + self, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + sk_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + # todo: `metric_class` is unused + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=metric_class, + metric_functional=metric_fn, + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + ) + + +@pytest.mark.parametrize("average", ["micro", "macro", None, "weighted", "samples"]) +def test_precision_recall_joint(average): + """A simple test of the joint precision_recall metric. + + No need to test this thorougly, as it is just a combination of precision and recall, which are already tested + thoroughly. + """ + + precision_result = precision( + _input_mcls_prob.preds[0], _input_mcls_prob.target[0], average=average, num_classes=NUM_CLASSES + ) + recall_result = recall( + _input_mcls_prob.preds[0], _input_mcls_prob.target[0], average=average, num_classes=NUM_CLASSES + ) + + prec_recall_result = precision_recall( + _input_mcls_prob.preds[0], _input_mcls_prob.target[0], average=average, num_classes=NUM_CLASSES + ) + + assert B.allclose(precision_result, prec_recall_result[0]) + assert B.allclose(recall_result, prec_recall_result[1]) + + +_mc_k_target = tensor([0, 1, 2]) +_mc_k_preds = tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]]) +_ml_k_target = tensor([[0, 1, 0], [1, 1, 0], [0, 0, 0]]) +_ml_k_preds = tensor([[0.9, 0.2, 0.75], [0.1, 0.7, 0.8], [0.6, 0.1, 0.7]]) + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Recall, recall), (Precision, precision)]) +@pytest.mark.parametrize( + "k, preds, target, average, expected_prec, expected_recall", + [ + (1, _mc_k_preds, _mc_k_target, "micro", tensor(2 / 3), tensor(2 / 3)), + (2, _mc_k_preds, _mc_k_target, "micro", tensor(1 / 2), tensor(1.0)), + (1, _ml_k_preds, _ml_k_target, "micro", tensor(0.0), tensor(0.0)), + (2, _ml_k_preds, _ml_k_target, "micro", tensor(1 / 6), tensor(1 / 3)), + ], +) +def test_top_k( + metric_class, + metric_fn, + k: int, + preds: Tensor, + target: Tensor, + average: str, + expected_prec: Tensor, + expected_recall: Tensor, +): + """A simple test to check that top_k works as expected. + + Just a sanity check, the tests in StatScores should already guarantee the correctness of results. + """ + + class_metric = metric_class(top_k=k, average=average, num_classes=3) + class_metric.update(preds, target) + + if metric_class.__name__ == "Precision": + result = expected_prec + else: + result = expected_recall + + assert B.equal(class_metric.compute(), result) + assert B.equal(metric_fn(preds, target, top_k=k, average=average, num_classes=3), result) + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Precision, precision), (Recall, recall)]) +@pytest.mark.parametrize( + "ignore_index, expected", [(None, B.tensor([1.0, np.nan])), (0, B.tensor([np.nan, np.nan]))] +) +def test_class_not_present(metric_class, metric_fn, ignore_index, expected): + """This tests that when metric is computed per class and a given class is not present in both the `preds` and + `target`, the resulting score is `nan`.""" + preds = B.tensor([0, 0, 0]) + target = B.tensor([0, 0, 0]) + num_classes = 2 + + # test functional + result_fn = metric_fn(preds, target, average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + assert B.allclose(expected, result_fn, equal_nan=True) + + # test class + cl_metric = metric_class(average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + cl_metric(preds, target) + result_cl = cl_metric.compute() + assert B.allclose(expected, result_cl, equal_nan=True) + + +@pytest.mark.parametrize("average", ["micro", "macro", "weighted"]) +@pytest.mark.parametrize( + "metric_class, metric_functional, sk_fn", [(Precision, precision, precision_score), (Recall, recall, recall_score)] +) +def test_same_input(metric_class, metric_functional, sk_fn, average): + preds = _input_miss_class.preds + target = _input_miss_class.target + preds_flat = B.cat(list(preds), dim=0) + target_flat = B.cat(list(target), dim=0) + + mc = metric_class(num_classes=NUM_CLASSES, average=average) + for i in range(NUM_BATCHES): + mc.update(preds[i], target[i]) + class_res = mc.compute() + func_res = metric_functional(preds_flat, target_flat, num_classes=NUM_CLASSES, average=average) + sk_res = sk_fn(target_flat, preds_flat, average=average, zero_division=1) + + assert B.allclose(class_res, B.tensor(sk_res).float()) + assert B.allclose(func_res, B.tensor(sk_res).float()) diff --git a/EE/paddlemetric/src/tests/classification/test_precision_recall_curve.py b/EE/paddlemetric/src/tests/classification/test_precision_recall_curve.py new file mode 100644 index 000000000..acd555ca2 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_precision_recall_curve.py @@ -0,0 +1,121 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import precision_recall_curve as sk_precision_recall_curve +from paddleext.torchapi import tensor + +from tests.classification.inputs import _input_binary_prob +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, MetricTester +from paddlemetrics.classification.precision_recall_curve import PrecisionRecallCurve +from paddlemetrics.functional import precision_recall_curve + +seed_all(42) + + +def _sk_precision_recall_curve(y_true, probas_pred, num_classes=1): + """Adjusted comparison function that can also handles multiclass.""" + if num_classes == 1: + return sk_precision_recall_curve(y_true, probas_pred) + + precision, recall, thresholds = [], [], [] + for i in range(num_classes): + y_true_temp = np.zeros_like(y_true) + y_true_temp[y_true == i] = 1 + res = sk_precision_recall_curve(y_true_temp, probas_pred[:, i]) + precision.append(res[0]) + recall.append(res[1]) + thresholds.append(res[2]) + return precision, recall, thresholds + + +def _sk_prec_rc_binary_prob(preds, target, num_classes=1): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return _sk_precision_recall_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes) + + +def _sk_prec_rc_multiclass_prob(preds, target, num_classes=1): + sk_preds = preds.reshape(-1, num_classes).numpy() + sk_target = target.view(-1).numpy() + + return _sk_precision_recall_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes) + + +def _sk_prec_rc_multidim_multiclass_prob(preds, target, num_classes=1): + sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + sk_target = target.view(-1).numpy() + return _sk_precision_recall_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_prec_rc_binary_prob, 1), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_prec_rc_multiclass_prob, NUM_CLASSES), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_prec_rc_multidim_multiclass_prob, NUM_CLASSES), + ], +) +class TestPrecisionRecallCurve(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_precision_recall_curve(self, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=PrecisionRecallCurve, + sk_metric=partial(sk_metric, num_classes=num_classes), + dist_sync_on_step=dist_sync_on_step, + metric_args={"num_classes": num_classes}, + ) + + def test_precision_recall_curve_functional(self, preds, target, sk_metric, num_classes): + self.run_functional_metric_test( + preds, + target, + metric_functional=precision_recall_curve, + sk_metric=partial(sk_metric, num_classes=num_classes), + metric_args={"num_classes": num_classes}, + ) + + def test_precision_recall_curve_differentiability(self, preds, target, sk_metric, num_classes): + self.run_differentiability_test( + preds, + target, + metric_module=PrecisionRecallCurve, + metric_functional=precision_recall_curve, + metric_args={"num_classes": num_classes}, + ) + + +@pytest.mark.parametrize( + ["pred", "target", "expected_p", "expected_r", "expected_t"], + [pytest.param([1, 2, 3, 4], [1, 0, 0, 1], [0.5, 1 / 3, 0.5, 1.0, 1.0], [1, 0.5, 0.5, 0.5, 0.0], [1, 2, 3, 4])], +) +def test_pr_curve(pred, target, expected_p, expected_r, expected_t): + p, r, t = precision_recall_curve(tensor(pred), tensor(target)) + assert p.size() == r.size() + assert p.size(0) == t.size(0) + 1 + + assert B.allclose(p, tensor(expected_p).to(p)) + assert B.allclose(r, tensor(expected_r).to(r)) + assert B.allclose(t, tensor(expected_t).to(t)) diff --git a/EE/paddlemetric/src/tests/classification/test_roc.py b/EE/paddlemetric/src/tests/classification/test_roc.py new file mode 100644 index 000000000..efe453352 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_roc.py @@ -0,0 +1,146 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import roc_curve as sk_roc_curve +from paddleext.torchapi import tensor + +from tests.classification.inputs import _input_binary_prob +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, MetricTester +from paddlemetrics.classification.roc import ROC +from paddlemetrics.functional import roc + +seed_all(42) + + +def _sk_roc_curve(y_true, probas_pred, num_classes: int = 1, multilabel: bool = False): + """Adjusted comparison function that can also handles multiclass.""" + if num_classes == 1: + return sk_roc_curve(y_true, probas_pred, drop_intermediate=False) + + fpr, tpr, thresholds = [], [], [] + for i in range(num_classes): + if multilabel: + y_true_temp = y_true[:, i] + else: + y_true_temp = np.zeros_like(y_true) + y_true_temp[y_true == i] = 1 + + res = sk_roc_curve(y_true_temp, probas_pred[:, i], drop_intermediate=False) + fpr.append(res[0]) + tpr.append(res[1]) + thresholds.append(res[2]) + return fpr, tpr, thresholds + + +def _sk_roc_binary_prob(preds, target, num_classes=1): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + return _sk_roc_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes) + + +def _sk_roc_multiclass_prob(preds, target, num_classes=1): + sk_preds = preds.reshape(-1, num_classes).numpy() + sk_target = target.view(-1).numpy() + + return _sk_roc_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes) + + +def _sk_roc_multidim_multiclass_prob(preds, target, num_classes=1): + sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + sk_target = target.view(-1).numpy() + return _sk_roc_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes) + + +def _sk_roc_multilabel_prob(preds, target, num_classes=1): + sk_preds = preds.numpy() + sk_target = target.numpy() + return _sk_roc_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes, multilabel=True) + + +def _sk_roc_multilabel_multidim_prob(preds, target, num_classes=1): + sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + sk_target = target.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy() + return _sk_roc_curve(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes, multilabel=True) + + +@pytest.mark.parametrize( + "preds, target, sk_metric, num_classes", + [ + (_input_binary_prob.preds, _input_binary_prob.target, _sk_roc_binary_prob, 1), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_roc_multiclass_prob, NUM_CLASSES), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_roc_multidim_multiclass_prob, NUM_CLASSES), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_roc_multilabel_prob, NUM_CLASSES), + (_input_mlmd_prob.preds, _input_mlmd_prob.target, _sk_roc_multilabel_multidim_prob, NUM_CLASSES), + ], +) +class TestROC(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_roc(self, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=ROC, + sk_metric=partial(sk_metric, num_classes=num_classes), + dist_sync_on_step=dist_sync_on_step, + metric_args={"num_classes": num_classes}, + ) + + def test_roc_functional(self, preds, target, sk_metric, num_classes): + self.run_functional_metric_test( + preds, + target, + metric_functional=roc, + sk_metric=partial(sk_metric, num_classes=num_classes), + metric_args={"num_classes": num_classes}, + ) + + def test_roc_differentiability(self, preds, target, sk_metric, num_classes): + self.run_differentiability_test( + preds, + target, + metric_module=ROC, + metric_functional=roc, + metric_args={"num_classes": num_classes}, + ) + + +@pytest.mark.parametrize( + ["pred", "target", "expected_tpr", "expected_fpr"], + [ + pytest.param([0, 1], [0, 1], [0, 1, 1], [0, 0, 1]), + pytest.param([1, 0], [0, 1], [0, 0, 1], [0, 1, 1]), + pytest.param([1, 1], [1, 0], [0, 1], [0, 1]), + pytest.param([1, 0], [1, 0], [0, 1, 1], [0, 0, 1]), + pytest.param([0.5, 0.5], [0, 1], [0, 1], [0, 1]), + ], +) +def test_roc_curve(pred, target, expected_tpr, expected_fpr): + fpr, tpr, thresh = roc(tensor(pred), tensor(target)) + + assert fpr.shape == tpr.shape + assert fpr.size(0) == thresh.size(0) + assert B.allclose(fpr, tensor(expected_fpr).to(fpr)) + assert B.allclose(tpr, tensor(expected_tpr).to(tpr)) diff --git a/EE/paddlemetric/src/tests/classification/test_specificity.py b/EE/paddlemetric/src/tests/classification/test_specificity.py new file mode 100644 index 000000000..90611d06c --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_specificity.py @@ -0,0 +1,414 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from functools import partial +from typing import Callable, Optional + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import multilabel_confusion_matrix +from paddleext.torchapi import Tensor, tensor + +from tests.classification.inputs import _input_binary, _input_binary_prob +from tests.classification.inputs import _input_multiclass as _input_mcls +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mlb +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, THRESHOLD, MetricTester +from paddlemetrics import Metric, Specificity +from paddlemetrics.functional import specificity +from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores +from paddlemetrics.utilities.checks import _input_format_classification +from paddlemetrics.utilities.enums import AverageMethod + +seed_all(42) + + +def _sk_stats_score(preds, target, reduce, num_classes, multiclass, ignore_index, top_k): + preds, target, _ = _input_format_classification( + preds, target, threshold=THRESHOLD, num_classes=num_classes, multiclass=multiclass, top_k=top_k + ) + sk_preds, sk_target = preds.numpy(), target.numpy() + + if reduce != "macro" and ignore_index is not None and preds.shape[1] > 1: + sk_preds = np.delete(sk_preds, ignore_index, 1) + sk_target = np.delete(sk_target, ignore_index, 1) + + if preds.shape[1] == 1 and reduce == "samples": + sk_target = sk_target.T + sk_preds = sk_preds.T + + sk_stats = multilabel_confusion_matrix( + sk_target, sk_preds, samplewise=(reduce == "samples") and preds.shape[1] != 1 + ) + + if preds.shape[1] == 1 and reduce != "samples": + sk_stats = sk_stats[[1]].reshape(-1, 4)[:, [3, 1, 0, 2]] + else: + sk_stats = sk_stats.reshape(-1, 4)[:, [3, 1, 0, 2]] + + if reduce == "micro": + sk_stats = sk_stats.sum(axis=0, keepdims=True) + + sk_stats = np.concatenate([sk_stats, sk_stats[:, [3]] + sk_stats[:, [0]]], 1) + + if reduce == "micro": + sk_stats = sk_stats[0] + + if reduce == "macro" and ignore_index is not None and preds.shape[1]: + sk_stats[ignore_index, :] = -1 + + if reduce == "micro": + _, fp, tn, _, _ = sk_stats + else: + _, fp, tn, _ = sk_stats[:, 0], sk_stats[:, 1], sk_stats[:, 2], sk_stats[:, 3] + return fp, tn + + +def _sk_spec(preds, target, reduce, num_classes, multiclass, ignore_index, top_k=None, mdmc_reduce=None, stats=None): + + if stats: + fp, tn = stats + else: + stats = _sk_stats_score(preds, target, reduce, num_classes, multiclass, ignore_index, top_k) + fp, tn = stats + + fp, tn = tensor(fp), tensor(tn) + spec = _reduce_stat_scores( + numerator=tn, + denominator=tn + fp, + weights=None if reduce != "weighted" else tn + fp, + average=reduce, + mdmc_average=mdmc_reduce, + ) + if reduce in [None, "none"] and ignore_index is not None and preds.shape[1] > 1: + spec = spec.numpy() + spec = np.insert(spec, ignore_index, math.nan) + spec = tensor(spec) + + return spec + + +def _sk_spec_mdim_mcls(preds, target, reduce, mdmc_reduce, num_classes, multiclass, ignore_index, top_k=None): + preds, target, _ = _input_format_classification( + preds, target, threshold=THRESHOLD, num_classes=num_classes, multiclass=multiclass, top_k=top_k + ) + + if mdmc_reduce == "global": + preds = B.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) + target = B.transpose(target, 1, 2).reshape(-1, target.shape[1]) + return _sk_spec(preds, target, reduce, num_classes, False, ignore_index, top_k, mdmc_reduce) + fp, tn = [], [] + stats = [] + + for i in range(preds.shape[0]): + pred_i = preds[i, ...].T + target_i = target[i, ...].T + fp_i, tn_i = _sk_stats_score(pred_i, target_i, reduce, num_classes, False, ignore_index, top_k) + fp.append(fp_i) + tn.append(tn_i) + + stats.append(fp) + stats.append(tn) + return _sk_spec(preds[0], target[0], reduce, num_classes, multiclass, ignore_index, top_k, mdmc_reduce, stats) + + +@pytest.mark.parametrize("metric, fn_metric", [(Specificity, specificity)]) +@pytest.mark.parametrize( + "average, mdmc_average, num_classes, ignore_index, match_str", + [ + ("wrong", None, None, None, "`average`"), + ("micro", "wrong", None, None, "`mdmc"), + ("macro", None, None, None, "number of classes"), + ("macro", None, 1, 0, "ignore_index"), + ], +) +def test_wrong_params(metric, fn_metric, average, mdmc_average, num_classes, ignore_index, match_str): + with pytest.raises(ValueError, match=match_str): + metric( + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + ) + + with pytest.raises(ValueError, match=match_str): + fn_metric( + _input_binary.preds[0], + _input_binary.target[0], + average=average, + mdmc_average=mdmc_average, + num_classes=num_classes, + ignore_index=ignore_index, + ) + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Specificity, specificity)]) +def test_zero_division(metric_class, metric_fn): + """Test that zero_division works correctly (currently should just set to 0).""" + + preds = tensor([1, 2, 1, 1]) + target = tensor([0, 0, 0, 0]) + + cl_metric = metric_class(average="none", num_classes=3) + cl_metric(preds, target) + + result_cl = cl_metric.compute() + result_fn = metric_fn(preds, target, average="none", num_classes=3) + + assert result_cl[0] == result_fn[0] == 0 + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Specificity, specificity)]) +def test_no_support(metric_class, metric_fn): + """This tests a rare edge case, where there is only one class present. + + in target, and ignore_index is set to exactly that class - and the + average method is equal to 'weighted'. + + This would mean that the sum of weights equals zero, and would, without + taking care of this case, return NaN. However, the reduction function + should catch that and set the metric to equal the value of zero_division + in this case (zero_division is for now not configurable and equals 0). + """ + + preds = tensor([1, 1, 0, 0]) + target = tensor([0, 0, 0, 0]) + + cl_metric = metric_class(average="weighted", num_classes=2, ignore_index=1) + cl_metric(preds, target) + + result_cl = cl_metric.compute() + result_fn = metric_fn(preds, target, average="weighted", num_classes=2, ignore_index=1) + + assert result_cl == result_fn == 0 + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Specificity, specificity)]) +@pytest.mark.parametrize("average", ["micro", "macro", None, "weighted", "samples"]) +@pytest.mark.parametrize("ignore_index", [None, 0]) +@pytest.mark.parametrize( + "preds, target, num_classes, multiclass, mdmc_average, sk_wrapper", + [ + (_input_binary_prob.preds, _input_binary_prob.target, 1, None, None, _sk_spec), + (_input_binary.preds, _input_binary.target, 1, False, None, _sk_spec), + (_input_mlb_prob.preds, _input_mlb_prob.target, NUM_CLASSES, None, None, _sk_spec), + (_input_mlb.preds, _input_mlb.target, NUM_CLASSES, False, None, _sk_spec), + (_input_mcls_prob.preds, _input_mcls_prob.target, NUM_CLASSES, None, None, _sk_spec), + (_input_mcls.preds, _input_mcls.target, NUM_CLASSES, None, None, _sk_spec), + (_input_mdmc.preds, _input_mdmc.target, NUM_CLASSES, None, "global", _sk_spec_mdim_mcls), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, NUM_CLASSES, None, "global", _sk_spec_mdim_mcls), + (_input_mdmc.preds, _input_mdmc.target, NUM_CLASSES, None, "samplewise", _sk_spec_mdim_mcls), + (_input_mdmc_prob.preds, _input_mdmc_prob.target, NUM_CLASSES, None, "samplewise", _sk_spec_mdim_mcls), + ], +) +class TestSpecificity(MetricTester): + @pytest.mark.parametrize("ddp", [False, True]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_specificity_class( + self, + ddp: bool, + dist_sync_on_step: bool, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + # todo: `metric_fn` is unused + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=metric_class, + sk_metric=partial( + sk_wrapper, + reduce=average, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + mdmc_reduce=mdmc_average, + ), + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + check_dist_sync_on_step=True, + check_batch=True, + ) + + def test_specificity_fn( + self, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + # todo: `metric_class` is unused + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_functional_metric_test( + preds, + target, + metric_functional=metric_fn, + sk_metric=partial( + sk_wrapper, + reduce=average, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + mdmc_reduce=mdmc_average, + ), + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + ) + + def test_accuracy_differentiability( + self, + preds: Tensor, + target: Tensor, + sk_wrapper: Callable, + metric_class: Metric, + metric_fn: Callable, + multiclass: Optional[bool], + num_classes: Optional[int], + average: str, + mdmc_average: Optional[str], + ignore_index: Optional[int], + ): + + if num_classes == 1 and average != "micro": + pytest.skip("Only test binary data for 'micro' avg (equivalent of 'binary' in sklearn)") + + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + if average == "weighted" and ignore_index is not None and mdmc_average is not None: + pytest.skip("Ignore special case where we are ignoring entire sample for 'weighted' average") + + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=metric_class, + metric_functional=metric_fn, + metric_args={ + "num_classes": num_classes, + "average": average, + "threshold": THRESHOLD, + "multiclass": multiclass, + "ignore_index": ignore_index, + "mdmc_average": mdmc_average, + }, + ) + + +_mc_k_target = tensor([0, 1, 2]) +_mc_k_preds = tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]]) +_ml_k_target = tensor([[0, 1, 0], [1, 1, 0], [0, 0, 0]]) +_ml_k_preds = tensor([[0.9, 0.2, 0.75], [0.1, 0.7, 0.8], [0.6, 0.1, 0.7]]) + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Specificity, specificity)]) +@pytest.mark.parametrize( + "k, preds, target, average, expected_spec", + [ + (1, _mc_k_preds, _mc_k_target, "micro", tensor(5 / 6)), + (2, _mc_k_preds, _mc_k_target, "micro", tensor(1 / 2)), + (1, _ml_k_preds, _ml_k_target, "micro", tensor(1 / 2)), + (2, _ml_k_preds, _ml_k_target, "micro", tensor(1 / 6)), + ], +) +def test_top_k( + metric_class, + metric_fn, + k: int, + preds: Tensor, + target: Tensor, + average: str, + expected_spec: Tensor, +): + """A simple test to check that top_k works as expected. + + Just a sanity check, the tests in StatScores should already guarantee the correctness of results. + """ + + class_metric = metric_class(top_k=k, average=average, num_classes=3) + class_metric.update(preds, target) + + assert B.equal(class_metric.compute(), expected_spec) + assert B.equal(metric_fn(preds, target, top_k=k, average=average, num_classes=3), expected_spec) + + +@pytest.mark.parametrize("metric_class, metric_fn", [(Specificity, specificity)]) +@pytest.mark.parametrize( + "ignore_index, expected", [(None, B.tensor([0.0, np.nan])), (0, B.tensor([np.nan, np.nan]))] +) +def test_class_not_present(metric_class, metric_fn, ignore_index, expected): + """This tests that when metric is computed per class and a given class is not present in both the `preds` and + `target`, the resulting score is `nan`.""" + preds = B.tensor([0, 0, 0]) + target = B.tensor([0, 0, 0]) + num_classes = 2 + + # test functional + result_fn = metric_fn(preds, target, average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + assert B.allclose(expected, result_fn, equal_nan=True) + + # test class + cl_metric = metric_class(average=AverageMethod.NONE, num_classes=num_classes, ignore_index=ignore_index) + cl_metric(preds, target) + result_cl = cl_metric.compute() + assert B.allclose(expected, result_cl, equal_nan=True) diff --git a/EE/paddlemetric/src/tests/classification/test_stat_scores.py b/EE/paddlemetric/src/tests/classification/test_stat_scores.py new file mode 100644 index 000000000..c0e2656c6 --- /dev/null +++ b/EE/paddlemetric/src/tests/classification/test_stat_scores.py @@ -0,0 +1,323 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial +from typing import Callable, Optional + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import multilabel_confusion_matrix +from paddleext.torchapi import Tensor, tensor + +from tests.classification.inputs import _input_binary, _input_binary_logits, _input_binary_prob, _input_multiclass +from tests.classification.inputs import _input_multiclass_logits as _input_mcls_logits +from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob +from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc +from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob +from tests.classification.inputs import _input_multilabel as _input_mcls +from tests.classification.inputs import _input_multilabel_logits as _input_mlb_logits +from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob +from tests.helpers import seed_all +from tests.helpers.testers import NUM_CLASSES, MetricTester +from paddlemetrics import StatScores +from paddlemetrics.functional import stat_scores +from paddlemetrics.utilities.checks import _input_format_classification + +seed_all(42) + + +def _sk_stat_scores(preds, target, reduce, num_classes, multiclass, ignore_index, top_k, threshold, mdmc_reduce=None): + # todo: `mdmc_reduce` is unused + preds, target, _ = _input_format_classification( + preds, target, threshold=threshold, num_classes=num_classes, multiclass=multiclass, top_k=top_k + ) + sk_preds, sk_target = preds.numpy(), target.numpy() + + if reduce != "macro" and ignore_index is not None and preds.shape[1] > 1: + sk_preds = np.delete(sk_preds, ignore_index, 1) + sk_target = np.delete(sk_target, ignore_index, 1) + + if preds.shape[1] == 1 and reduce == "samples": + sk_target = sk_target.T + sk_preds = sk_preds.T + + sk_stats = multilabel_confusion_matrix( + sk_target, sk_preds, samplewise=(reduce == "samples") and preds.shape[1] != 1 + ) + + if preds.shape[1] == 1 and reduce != "samples": + sk_stats = sk_stats[[1]].reshape(-1, 4)[:, [3, 1, 0, 2]] + else: + sk_stats = sk_stats.reshape(-1, 4)[:, [3, 1, 0, 2]] + + if reduce == "micro": + sk_stats = sk_stats.sum(axis=0, keepdims=True) + + sk_stats = np.concatenate([sk_stats, sk_stats[:, [3]] + sk_stats[:, [0]]], 1) + + if reduce == "micro": + sk_stats = sk_stats[0] + + if reduce == "macro" and ignore_index is not None and preds.shape[1]: + sk_stats[ignore_index, :] = -1 + + return sk_stats + + +def _sk_stat_scores_mdim_mcls( + preds, target, reduce, mdmc_reduce, num_classes, multiclass, ignore_index, top_k, threshold +): + preds, target, _ = _input_format_classification( + preds, target, threshold=threshold, num_classes=num_classes, multiclass=multiclass, top_k=top_k + ) + + if mdmc_reduce == "global": + preds = B.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) + target = B.transpose(target, 1, 2).reshape(-1, target.shape[1]) + + return _sk_stat_scores(preds, target, reduce, None, False, ignore_index, top_k, threshold) + if mdmc_reduce == "samplewise": + scores = [] + + for i in range(preds.shape[0]): + pred_i = preds[i, ...].T + target_i = target[i, ...].T + scores_i = _sk_stat_scores(pred_i, target_i, reduce, None, False, ignore_index, top_k, threshold) + + scores.append(np.expand_dims(scores_i, 0)) + + return np.concatenate(scores) + + +@pytest.mark.parametrize( + "reduce, mdmc_reduce, num_classes, inputs, ignore_index", + [ + ["unknown", None, None, _input_binary, None], + ["micro", "unknown", None, _input_binary, None], + ["macro", None, None, _input_binary, None], + ["micro", None, None, _input_mdmc_prob, None], + ["micro", None, None, _input_binary_prob, 0], + ["micro", None, None, _input_mcls_prob, NUM_CLASSES], + ["micro", None, NUM_CLASSES, _input_mcls_prob, NUM_CLASSES], + ], +) +def test_wrong_params(reduce, mdmc_reduce, num_classes, inputs, ignore_index): + """Test a combination of parameters that are invalid and should raise an error. + + This includes invalid ``reduce`` and ``mdmc_reduce`` parameter values, not setting ``num_classes`` when + ``reduce='macro'`, not setting ``mdmc_reduce`` when inputs are multi-dim multi-class``, setting ``ignore_index`` + when inputs are binary, as well as setting ``ignore_index`` to a value higher than the number of classes. + """ + with pytest.raises(ValueError): + stat_scores( + inputs.preds[0], inputs.target[0], reduce, mdmc_reduce, num_classes=num_classes, ignore_index=ignore_index + ) + + with pytest.raises(ValueError): + sts = StatScores(reduce=reduce, mdmc_reduce=mdmc_reduce, num_classes=num_classes, ignore_index=ignore_index) + sts(inputs.preds[0], inputs.target[0]) + + +@pytest.mark.parametrize("ignore_index", [None, 0]) +@pytest.mark.parametrize("reduce", ["micro", "macro", "samples"]) +@pytest.mark.parametrize( + "preds, target, sk_fn, mdmc_reduce, num_classes, multiclass, top_k, threshold", + [ + (_input_binary_logits.preds, _input_binary_logits.target, _sk_stat_scores, None, 1, None, None, 0.0), + (_input_binary_prob.preds, _input_binary_prob.target, _sk_stat_scores, None, 1, None, None, 0.5), + (_input_binary.preds, _input_binary.target, _sk_stat_scores, None, 1, False, None, 0.5), + (_input_mlb_logits.preds, _input_mlb_logits.target, _sk_stat_scores, None, NUM_CLASSES, None, None, 0.0), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_stat_scores, None, NUM_CLASSES, None, None, 0.5), + (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_stat_scores, None, NUM_CLASSES, None, 2, 0.5), + (_input_mcls.preds, _input_mcls.target, _sk_stat_scores, None, NUM_CLASSES, False, None, 0.5), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_stat_scores, None, NUM_CLASSES, None, None, 0.5), + (_input_mcls_logits.preds, _input_mcls_logits.target, _sk_stat_scores, None, NUM_CLASSES, None, None, 0.0), + (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_stat_scores, None, NUM_CLASSES, None, 2, 0.0), + (_input_multiclass.preds, _input_multiclass.target, _sk_stat_scores, None, NUM_CLASSES, None, None, 0.0), + (_input_mdmc.preds, _input_mdmc.target, _sk_stat_scores_mdim_mcls, "samplewise", NUM_CLASSES, None, None, 0.0), + ( + _input_mdmc_prob.preds, + _input_mdmc_prob.target, + _sk_stat_scores_mdim_mcls, + "samplewise", + NUM_CLASSES, + None, + None, + 0.0, + ), + (_input_mdmc.preds, _input_mdmc.target, _sk_stat_scores_mdim_mcls, "global", NUM_CLASSES, None, None, 0.0), + ( + _input_mdmc_prob.preds, + _input_mdmc_prob.target, + _sk_stat_scores_mdim_mcls, + "global", + NUM_CLASSES, + None, + None, + 0.0, + ), + ], +) +class TestStatScores(MetricTester): + # DDP tests temporarily disabled due to hanging issues + @pytest.mark.parametrize("ddp", [False]) + @pytest.mark.parametrize("dist_sync_on_step", [False]) #True, + def test_stat_scores_class( + self, + ddp: bool, + dist_sync_on_step: bool, + sk_fn: Callable, + preds: Tensor, + target: Tensor, + reduce: str, + mdmc_reduce: Optional[str], + num_classes: Optional[int], + multiclass: Optional[bool], + ignore_index: Optional[int], + top_k: Optional[int], + threshold: Optional[float], + ): + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=StatScores, + sk_metric=partial( + sk_fn, + reduce=reduce, + mdmc_reduce=mdmc_reduce, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + top_k=top_k, + threshold=threshold, + ), + dist_sync_on_step=dist_sync_on_step, + metric_args={ + "num_classes": num_classes, + "reduce": reduce, + "mdmc_reduce": mdmc_reduce, + "threshold": threshold, + "multiclass": multiclass, + "ignore_index": ignore_index, + "top_k": top_k, + }, + check_dist_sync_on_step=True, + check_batch=True, + ) + + def test_stat_scores_fn( + self, + sk_fn: Callable, + preds: Tensor, + target: Tensor, + reduce: str, + mdmc_reduce: Optional[str], + num_classes: Optional[int], + multiclass: Optional[bool], + ignore_index: Optional[int], + top_k: Optional[int], + threshold: Optional[float], + ): + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + self.run_functional_metric_test( + preds, + target, + metric_functional=stat_scores, + sk_metric=partial( + sk_fn, + reduce=reduce, + mdmc_reduce=mdmc_reduce, + num_classes=num_classes, + multiclass=multiclass, + ignore_index=ignore_index, + top_k=top_k, + threshold=threshold, + ), + metric_args={ + "num_classes": num_classes, + "reduce": reduce, + "mdmc_reduce": mdmc_reduce, + "threshold": threshold, + "multiclass": multiclass, + "ignore_index": ignore_index, + "top_k": top_k, + }, + ) + + def test_stat_scores_differentiability( + self, + sk_fn: Callable, + preds: Tensor, + target: Tensor, + reduce: str, + mdmc_reduce: Optional[str], + num_classes: Optional[int], + multiclass: Optional[bool], + ignore_index: Optional[int], + top_k: Optional[int], + threshold: Optional[float], + ): + if ignore_index is not None and preds.ndim == 2: + pytest.skip("Skipping ignore_index test with binary inputs.") + + self.run_differentiability_test( + preds, + target, + metric_module=StatScores, + metric_functional=stat_scores, + metric_args={ + "num_classes": num_classes, + "reduce": reduce, + "mdmc_reduce": mdmc_reduce, + "threshold": threshold, + "multiclass": multiclass, + "ignore_index": ignore_index, + "top_k": top_k, + }, + ) + + +_mc_k_target = tensor([0, 1, 2]) +_mc_k_preds = tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]]) +_ml_k_target = tensor([[0, 1, 0], [1, 1, 0], [0, 0, 0]]) +_ml_k_preds = tensor([[0.9, 0.2, 0.75], [0.1, 0.7, 0.8], [0.6, 0.1, 0.7]]) + + +@pytest.mark.parametrize( + "k, preds, target, reduce, expected", + [ + (1, _mc_k_preds, _mc_k_target, "micro", tensor([2, 1, 5, 1, 3])), + (2, _mc_k_preds, _mc_k_target, "micro", tensor([3, 3, 3, 0, 3])), + (1, _ml_k_preds, _ml_k_target, "micro", tensor([0, 3, 3, 3, 3])), + (2, _ml_k_preds, _ml_k_target, "micro", tensor([1, 5, 1, 2, 3])), + (1, _mc_k_preds, _mc_k_target, "macro", tensor([[0, 1, 1], [0, 1, 0], [2, 1, 2], [1, 0, 0], [1, 1, 1]])), + (2, _mc_k_preds, _mc_k_target, "macro", tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0], [1, 1, 1]])), + (1, _ml_k_preds, _ml_k_target, "macro", tensor([[0, 0, 0], [1, 0, 2], [1, 1, 1], [1, 2, 0], [1, 2, 0]])), + (2, _ml_k_preds, _ml_k_target, "macro", tensor([[0, 1, 0], [2, 0, 3], [0, 1, 0], [1, 1, 0], [1, 2, 0]])), + ], +) +def test_top_k(k: int, preds: Tensor, target: Tensor, reduce: str, expected: Tensor): + """A simple test to check that top_k works as expected.""" + + class_metric = StatScores(top_k=k, reduce=reduce, num_classes=3) + class_metric.update(preds, target) + + assert B.allclose(class_metric.compute(), expected.T) + assert B.allclose(stat_scores(preds, target, top_k=k, reduce=reduce, num_classes=3), expected.T) diff --git a/EE/paddlemetric/src/tests/functional/__init__.py b/EE/paddlemetric/src/tests/functional/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/functional/test_classification.py b/EE/paddlemetric/src/tests/functional/test_classification.py new file mode 100644 index 000000000..d20483871 --- /dev/null +++ b/EE/paddlemetric/src/tests/functional/test_classification.py @@ -0,0 +1,123 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor + +from tests.helpers import seed_all +from paddlemetrics.functional import dice_score +from paddlemetrics.functional.classification.precision_recall_curve import _binary_clf_curve +from paddlemetrics.utilities.data import get_num_classes, to_categorical, to_onehot + + +def test_onehot(): + test_tensor = tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + expected = B.stack( + [ + B.cat([B.eye(5, dtype=int), B.zeros((5, 5), dtype=int)]), + B.cat([B.zeros((5, 5), dtype=int), B.eye(5, dtype=int)]), + ] + ) + + assert tuple(test_tensor.shape) == (2, 5) + assert tuple(expected.shape) == (2, 10, 5) + + onehot_classes = to_onehot(test_tensor, num_classes=10) + onehot_no_classes = to_onehot(test_tensor) + + assert B.allclose(onehot_classes, onehot_no_classes) + + assert onehot_classes.shape == expected.shape + assert onehot_no_classes.shape == expected.shape + + assert B.allclose(expected.to(onehot_no_classes), onehot_no_classes) + assert B.allclose(expected.to(onehot_classes), onehot_classes) + + +def test_to_categorical(): + test_tensor = B.stack( + [ + B.cat([B.eye(5, dtype=int), B.zeros((5, 5), dtype=int)]), + B.cat([B.zeros((5, 5), dtype=int), B.eye(5, dtype=int)]), + ] + ).to(B.float) + + expected = tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + assert tuple(expected.shape) == (2, 5) + assert tuple(test_tensor.shape) == (2, 10, 5) + + result = to_categorical(test_tensor) + + assert result.shape == expected.shape + assert B.allclose(result, expected.to(result.dtype)) + + +@pytest.mark.parametrize( + ["preds", "target", "num_classes", "expected_num_classes"], + [ + pytest.param(B.rand(32, 10, 28, 28), B.randint(10, (32, 28, 28)), 10, 10), + pytest.param(B.rand(32, 10, 28, 28), B.randint(10, (32, 28, 28)), None, 10), + pytest.param(B.rand(32, 28, 28), B.randint(10, (32, 28, 28)), None, 10), + ], +) +def test_get_num_classes(preds, target, num_classes, expected_num_classes): + assert get_num_classes(preds, target, num_classes) == expected_num_classes + + +@pytest.mark.parametrize( + ["sample_weight", "pos_label", "exp_shape"], + [ + pytest.param(1, 1.0, 42), + pytest.param(None, 1.0, 42), + ], +) +def test_binary_clf_curve(sample_weight, pos_label, exp_shape): + # TODO: move back the pred and target to test func arguments + # if you fix the array inside the function, you'd also have fix the shape, + # because when the array changes, you also have to fix the shape + seed_all(0) + pred = B.randint(low=51, high=99, size=(100,), dtype=B.float) / 100 + target = tensor([0, 1] * 50, dtype=B.int) + if sample_weight is not None: + sample_weight = B.ones_like(pred) * sample_weight + + fps, tps, thresh = _binary_clf_curve(preds=pred, target=target, sample_weights=sample_weight, pos_label=pos_label) + + assert isinstance(tps, Tensor) + assert isinstance(fps, Tensor) + assert isinstance(thresh, Tensor) + if B.platform() == "torch": + assert tuple(tps.shape) == (exp_shape,) + assert tuple(fps.shape) == (exp_shape,) + assert tuple(thresh.shape) == (exp_shape,) + elif B.platform() == "paddle": + assert tuple(tps.shape) == (exp_shape - 1,) + assert tuple(fps.shape) == (exp_shape - 1,) + assert tuple(thresh.shape) == (exp_shape - 1,) + else: + raise Exception(f"unknown platform {B.platform()}") + + +@pytest.mark.parametrize( + ["pred", "target", "expected"], + [ + pytest.param([[0, 0], [1, 1]], [[0, 0], [1, 1]], 1.0), + pytest.param([[1, 1], [0, 0]], [[0, 0], [1, 1]], 0.0), + pytest.param([[1, 1], [1, 1]], [[1, 1], [0, 0]], 2 / 3), + pytest.param([[1, 1], [0, 0]], [[1, 1], [0, 0]], 1.0), + ], +) +def test_dice_score(pred, target, expected): + score = dice_score(tensor(pred), tensor(target)) + assert score == expected diff --git a/EE/paddlemetric/src/tests/functional/test_image_gradients.py b/EE/paddlemetric/src/tests/functional/test_image_gradients.py new file mode 100644 index 000000000..b4bad7f10 --- /dev/null +++ b/EE/paddlemetric/src/tests/functional/test_image_gradients.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import paddleext.torchapi as B +from paddleext.torchapi import Tensor + +from paddlemetrics.functional import image_gradients + + +def test_invalid_input_img_type(): + """Test Whether the module successfully handles invalid input data type.""" + invalid_dummy_input = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + + with pytest.raises(TypeError): + image_gradients(invalid_dummy_input) + + +def test_invalid_input_ndims(): + """Test whether the module successfully handles invalid number of dimensions of input tensor.""" + + BATCH_SIZE = 1 + HEIGHT = 5 + WIDTH = 5 + CHANNELS = 1 + + image = B.arange(0, BATCH_SIZE * HEIGHT * WIDTH * CHANNELS, dtype=B.float32) + image = B.reshape(image, (HEIGHT, WIDTH)) + + with pytest.raises(RuntimeError): + image_gradients(image) + + +def test_multi_batch_image_gradients(): + """Test whether the module correctly calculates gradients for known input with non-unity batch size.Example + input-output pair taken from TF's implementation of i mage-gradients.""" + + BATCH_SIZE = 5 + HEIGHT = 5 + WIDTH = 5 + CHANNELS = 1 + + single_channel_img = B.arange(0, 1 * HEIGHT * WIDTH * CHANNELS, dtype=B.float32) + single_channel_img = B.reshape(single_channel_img, (CHANNELS, HEIGHT, WIDTH)) + image = B.stack([single_channel_img for _ in range(BATCH_SIZE)], dim=0) + + true_dy = [ + [5.0, 5.0, 5.0, 5.0, 5.0], + [5.0, 5.0, 5.0, 5.0, 5.0], + [5.0, 5.0, 5.0, 5.0, 5.0], + [5.0, 5.0, 5.0, 5.0, 5.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ] + true_dy = B.to_tensor(true_dy) + + dy, dx = image_gradients(image) + + for batch_id in range(BATCH_SIZE): + assert B.allclose(dy[batch_id, 0, :, :], true_dy) + assert tuple(dy.shape) == (BATCH_SIZE, 1, HEIGHT, WIDTH) + assert tuple(dx.shape) == (BATCH_SIZE, 1, HEIGHT, WIDTH) + + +def test_image_gradients(): + """Test whether the module correctly calculates gradients for known input. + + Example input-output pair taken from TF's implementation of image- gradients + """ + + BATCH_SIZE = 1 + HEIGHT = 5 + WIDTH = 5 + CHANNELS = 1 + + image = B.arange(0, BATCH_SIZE * HEIGHT * WIDTH * CHANNELS, dtype=B.float32) + image = B.reshape(image, (BATCH_SIZE, CHANNELS, HEIGHT, WIDTH)) + + true_dy = [ + [5.0, 5.0, 5.0, 5.0, 5.0], + [5.0, 5.0, 5.0, 5.0, 5.0], + [5.0, 5.0, 5.0, 5.0, 5.0], + [5.0, 5.0, 5.0, 5.0, 5.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ] + + true_dx = [ + [1.0, 1.0, 1.0, 1.0, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + ] + + true_dy = B.to_tensor(true_dy) + true_dx = B.to_tensor(true_dx) + + dy, dx = image_gradients(image) + + assert B.allclose(dy.squeeze(), true_dy), "dy fails test" + assert B.allclose(dx.squeeze(), true_dx), "dx fails tests" diff --git a/EE/paddlemetric/src/tests/functional/test_reduction.py b/EE/paddlemetric/src/tests/functional/test_reduction.py new file mode 100644 index 000000000..729cd5b97 --- /dev/null +++ b/EE/paddlemetric/src/tests/functional/test_reduction.py @@ -0,0 +1,41 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import paddleext.torchapi as B + +from paddlemetrics.utilities.distributed import class_reduce, reduce + + +def test_reduce(): + start_tensor = B.rand(50, 40, 30) + + assert B.allclose(reduce(start_tensor, "elementwise_mean"), B.mean(start_tensor)) + assert B.allclose(reduce(start_tensor, "sum"), B.sum(start_tensor)) + assert B.allclose(reduce(start_tensor, "none"), start_tensor) + + with pytest.raises(ValueError): + reduce(start_tensor, "error_reduction") + + +def test_class_reduce(): + num = B.randint(1, 10, (100,)).float() + denom = B.randint(10, 20, (100,)).float() + weights = B.randint(1, 100, (100,)).float() + + assert B.allclose(class_reduce(num, denom, weights, "micro"), B.sum(num) / B.sum(denom)) + assert B.allclose(class_reduce(num, denom, weights, "macro"), B.mean(num / denom)) + assert B.allclose( + class_reduce(num, denom, weights, "weighted"), B.sum(num / denom * (weights / B.sum(weights))) + ) + assert B.allclose(class_reduce(num, denom, weights, "none"), num / denom) diff --git a/EE/paddlemetric/src/tests/functional/test_self_supervised.py b/EE/paddlemetric/src/tests/functional/test_self_supervised.py new file mode 100644 index 000000000..4c675192c --- /dev/null +++ b/EE/paddlemetric/src/tests/functional/test_self_supervised.py @@ -0,0 +1,46 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import paddleext.torchapi as B +from sklearn.metrics import pairwise +from paddleext.torchapi import tensor + +from paddlemetrics.functional import embedding_similarity + + +@pytest.mark.parametrize("similarity", ["cosine", "dot"]) +@pytest.mark.parametrize("reduction", ["none", "mean", "sum"]) +def test_against_sklearn(similarity, reduction): + """Compare PL metrics to sklearn version.""" + device = "cuda" if B.cuda.is_available() else "cpu" + + batch = B.randn(5, 10, device=device) # 100 samples in 10 dimensions + + pl_dist = embedding_similarity(batch, similarity=similarity, reduction=reduction, zero_diagonal=False) + + def sklearn_embedding_distance(batch, similarity, reduction): + + metric_func = {"cosine": pairwise.cosine_similarity, "dot": pairwise.linear_kernel}[similarity] + + dist = metric_func(batch, batch) + if reduction == "mean": + return dist.mean(axis=-1) + if reduction == "sum": + return dist.sum(axis=-1) + return dist + + sk_dist = sklearn_embedding_distance(batch.cpu().detach().numpy(), similarity=similarity, reduction=reduction) + sk_dist = tensor(sk_dist, dtype=B.float, device=device) + + assert B.allclose(sk_dist, pl_dist) diff --git a/EE/paddlemetric/src/tests/helpers/__init__.py b/EE/paddlemetric/src/tests/helpers/__init__.py new file mode 100644 index 000000000..3773a49fc --- /dev/null +++ b/EE/paddlemetric/src/tests/helpers/__init__.py @@ -0,0 +1,20 @@ +import operator +import random + +import numpy +import paddleext.torchapi as B + +from paddlemetrics.utilities.imports import _TORCH_LOWER_1_4, _TORCH_LOWER_1_5, _TORCH_LOWER_1_6, _compare_version + +_MARK_TORCH_MIN_1_4 = dict(condition=_TORCH_LOWER_1_4, reason="required PT >= 1.4") +_MARK_TORCH_MIN_1_5 = dict(condition=_TORCH_LOWER_1_5, reason="required PT >= 1.5") +_MARK_TORCH_MIN_1_6 = dict(condition=_TORCH_LOWER_1_6, reason="required PT >= 1.6") + +_LIGHTNING_GREATER_EQUAL_1_3 = _compare_version("pytorch_lightning", operator.ge, "1.3.0") + + +def seed_all(seed): + random.seed(seed) + numpy.random.seed(seed) + B.manual_seed(seed) + B.cuda.manual_seed_all(seed) diff --git a/EE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py b/EE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py new file mode 100644 index 000000000..fa4f84293 --- /dev/null +++ b/EE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py @@ -0,0 +1,187 @@ +"""File for non sklearn metrics that are to be used for reference for tests.""" +from typing import Optional, Union + +import numpy as np +from sklearn.metrics._regression import _check_reg_targets +from sklearn.utils import assert_all_finite, check_consistent_length, column_or_1d + + +def symmetric_mean_absolute_percentage_error( + y_true: np.ndarray, + y_pred: np.ndarray, + sample_weight: Optional[np.ndarray] = None, + multioutput: str = "uniform_average", +): + r"""Symmetric mean absolute percentage error regression loss (SMAPE_): + + .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{max(| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + y_true: array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + y_pred: array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + sample_weight: array-like of shape (n_samples,), default=None + Sample weights. + multioutput: {'raw_values', 'uniform_average'} or array-like + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + If input is list then the shape must be (n_outputs,). + + - 'raw_values': Returns a full set of errors in case of multioutput input. + - 'uniform_average': Errors of all outputs are averaged with uniform weight. + + Returns: + loss: float or ndarray of floats in the range [0, 1] + If multioutput is 'raw_values', then symmetric mean absolute percentage error + is returned for each output separately. + If multioutput is 'uniform_average' or an ndarray of weights, then the + weighted average of all output errors is returned. + MAPE output is non-negative floating point. The best value is 0.0. + But note the fact that bad predictions can lead to arbitarily large + MAPE values, especially if some y_true values are very close to zero. + Note that we return a large value instead of `inf` when y_true is zero. + + """ + _, y_true, y_pred, multioutput = _check_reg_targets(y_true, y_pred, multioutput) + check_consistent_length(y_true, y_pred, sample_weight) + epsilon = np.finfo(np.float64).eps + smape = 2 * np.abs(y_pred - y_true) / np.maximum(np.abs(y_true) + np.abs(y_pred), epsilon) + output_errors = np.average(smape, weights=sample_weight, axis=0) + if isinstance(multioutput, str): + if multioutput == "raw_values": + return output_errors + # pass None as weights to np.average: uniform mean + multioutput = None + + return np.average(output_errors, weights=multioutput) + + +# sklearn reference function from +# https://github.com/samronsin/scikit-learn/blob/calibration-loss/sklearn/metrics/_classification.py. +# TODO: when the PR into sklearn is accepted, update this to use the official function. +def calibration_error( + y_true: np.ndarray, + y_prob: np.ndarray, + sample_weight: Optional[np.ndarray] = None, + norm: str = "l2", + n_bins: int = 10, + strategy: str = "uniform", + pos_label: Optional[Union[int, str]] = None, + reduce_bias: bool = True, +) -> float: + """Compute calibration error of a binary classifier. Across all items in a set of N predictions, the + calibration error measures the aggregated difference between (1) the average predicted probabilities assigned + to the positive class, and (2) the frequencies of the positive class in the actual outcome. The calibration + error is only appropriate for binary categorical outcomes. Which label is considered to be the positive label + is controlled via the parameter pos_label, which defaults to 1. + + Args: + y_true: array-like of shape (n_samples,) + True targets of a binary classification task. + y_prob: array-like of (n_samples,) + Probabilities of the positive class. + sample_weight: array-like of shape (n_samples,) + norm: {'l1', 'l2', 'max'} + Norm method. The l1-norm is the Expected Calibration Error (ECE), + and the max-norm corresponds to Maximum Calibration Error (MCE). + n_bins: int, default=10 + The number of bins to compute error on. + strategy: {'uniform', 'quantile'} + Strategy used to define the widths of the bins. + uniform + All bins have identical widths. + quantile + All bins have the same number of points. + pos_label: int or str, default=None + Label of the positive class. If None, the maximum label is used as positive class. + reduce_bias: bool, default=True + Add debiasing term as in Verified Uncertainty Calibration, A. Kumar. + Only effective for the l2-norm. + + Returns: + score: float with calibration error + """ + y_true = column_or_1d(y_true) + y_prob = column_or_1d(y_prob) + assert_all_finite(y_true) + assert_all_finite(y_prob) + check_consistent_length(y_true, y_prob, sample_weight) + if any(y_prob < 0) or any(y_prob > 1): + raise ValueError("y_prob has values outside of [0, 1] range") + + labels = np.unique(y_true) + if len(labels) > 2: + raise ValueError("Only binary classification is supported. " "Provided labels %s." % labels) + + if pos_label is None: + pos_label = y_true.max() + if pos_label not in labels: + raise ValueError("pos_label=%r is not a valid label: " "%r" % (pos_label, labels)) + y_true = np.array(y_true == pos_label, int) + + norm_options = ("l1", "l2", "max") + if norm not in norm_options: + raise ValueError(f"norm has to be one of {norm_options}, got: {norm}.") + + remapping = np.argsort(y_prob) + y_true = y_true[remapping] + y_prob = y_prob[remapping] + if sample_weight is not None: + sample_weight = sample_weight[remapping] + else: + sample_weight = np.ones(y_true.shape[0]) + + n_bins = int(n_bins) + if strategy == "quantile": + quantiles = np.percentile(y_prob, np.arange(0, 1, 1.0 / n_bins) * 100) + elif strategy == "uniform": + quantiles = np.arange(0, 1, 1.0 / n_bins) + else: + raise ValueError( + f"Invalid entry to 'strategy' input. \ + The strategy must be either quantile' or 'uniform'. Got {strategy} instead." + ) + + threshold_indices = np.searchsorted(y_prob, quantiles).tolist() + threshold_indices.append(y_true.shape[0]) + avg_pred_true = np.zeros(n_bins) + bin_centroid = np.zeros(n_bins) + delta_count = np.zeros(n_bins) + debias = np.zeros(n_bins) + + loss = 0.0 + count = float(sample_weight.sum()) + for i, i_start in enumerate(threshold_indices[:-1]): + i_end = threshold_indices[i + 1] + # ignore empty bins + if i_end == i_start: + continue + delta_count[i] = float(sample_weight[i_start:i_end].sum()) + avg_pred_true[i] = np.dot(y_true[i_start:i_end], sample_weight[i_start:i_end]) / delta_count[i] + bin_centroid[i] = np.dot(y_prob[i_start:i_end], sample_weight[i_start:i_end]) / delta_count[i] + if norm == "l2" and reduce_bias: + # NOTE: I think there's a mistake in the original implementation. + # delta_debias = ( + # avg_pred_true[i] * (avg_pred_true[i] - 1) * delta_count[i] + # ) + # delta_debias /= (count * delta_count[i] - 1) + delta_debias = avg_pred_true[i] * (avg_pred_true[i] - 1) * delta_count[i] + delta_debias /= count * (delta_count[i] - 1) + debias[i] = delta_debias + + if norm == "max": + loss = np.max(np.abs(avg_pred_true - bin_centroid)) + elif norm == "l1": + delta_loss = np.abs(avg_pred_true - bin_centroid) * delta_count + loss = np.sum(delta_loss) / count + elif norm == "l2": + delta_loss = (avg_pred_true - bin_centroid) ** 2 * delta_count + loss = np.sum(delta_loss) / count + if reduce_bias: + # convert nans to zero + loss += np.sum(np.nan_to_num(debias)) + loss = np.sqrt(max(loss, 0.0)) + return loss diff --git a/EE/paddlemetric/src/tests/helpers/testers.py b/EE/paddlemetric/src/tests/helpers/testers.py new file mode 100644 index 000000000..02f237c8c --- /dev/null +++ b/EE/paddlemetric/src/tests/helpers/testers.py @@ -0,0 +1,578 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import pickle +import sys +from functools import partial +from typing import Any, Callable, Dict, Optional, Sequence + +import numpy as np +import pytest +import paddleext.torchapi as B +from paddleext.torchapi import Tensor, tensor +from multiprocessing import Pool, set_start_method + +from paddlemetrics import Metric + +try: + set_start_method("spawn") +except RuntimeError: + pass + +NUM_PROCESSES = 2 +NUM_BATCHES = 10 +BATCH_SIZE = 32 +NUM_CLASSES = 5 +EXTRA_DIM = 3 +THRESHOLD = 0.5 + +MAX_PORT = 8100 +START_PORT = 8088 +CURRENT_PORT = START_PORT + + +def setup_ddp(rank, world_size): + """Setup ddp environment.""" + global CURRENT_PORT + + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = str(CURRENT_PORT) + + CURRENT_PORT += 1 + if CURRENT_PORT > MAX_PORT: + CURRENT_PORT = START_PORT + + if B.distributed.is_available() and sys.platform not in ("win32", "cygwin"): + B.distributed.init_process_group("gloo", rank=rank, world_size=world_size) + + +def _assert_allclose(pl_result: Any, sk_result: Any, atol: float = 1e-8, key: Optional[str] = None) -> None: + """Utility function for recursively asserting that two results are within a certain tolerance.""" + # single output compare + if isinstance(pl_result, Tensor): + assert np.allclose(pl_result.detach().cpu().numpy(), sk_result, atol=atol, equal_nan=True) + # multi output compare + elif isinstance(pl_result, Sequence): + for pl_res, sk_res in zip(pl_result, sk_result): + _assert_allclose(pl_res, sk_res, atol=atol) + elif isinstance(pl_result, Dict): + if key is None: + raise KeyError("Provide Key for Dict based metric results.") + assert np.allclose(pl_result[key].detach().cpu().numpy(), sk_result, atol=atol, equal_nan=True) + else: + raise ValueError("Unknown format for comparison") + + +def _assert_tensor(pl_result: Any, key: Optional[str] = None) -> None: + """Utility function for recursively checking that some input only consists of torch tensors.""" + if isinstance(pl_result, Sequence): + for plr in pl_result: + _assert_tensor(plr) + elif isinstance(pl_result, Dict): + if key is None: + raise KeyError("Provide Key for Dict based metric results.") + assert isinstance(pl_result[key], Tensor) + else: + assert isinstance(pl_result, Tensor) + + +def _assert_requires_grad(metric: Metric, pl_result: Any, key: Optional[str] = None) -> None: + """Utility function for recursively asserting that metric output is consistent with the `is_differentiable` + attribute.""" + if isinstance(pl_result, Sequence): + for plr in pl_result: + _assert_requires_grad(metric, plr, key=key) + elif isinstance(pl_result, Dict): + if key is None: + raise KeyError("Provide Key for Dict based metric results.") + assert metric.is_differentiable == pl_result[key].requires_grad + else: + assert metric.is_differentiable == pl_result.requires_grad, f"{metric.is_differentiable} vs {pl_result.requires_grad}" + + +def _class_test( + rank: int, + worldsize: int, + preds: Tensor, + target: Tensor, + metric_class: Metric, + sk_metric: Callable, + dist_sync_on_step: bool, + metric_args: dict = None, + check_dist_sync_on_step: bool = True, + check_batch: bool = True, + atol: float = 1e-8, + device: str = "cpu", + fragment_kwargs: bool = False, + check_scriptable: bool = True, + **kwargs_update: Any, +): + """Utility function doing the actual comparison between lightning class metric and reference metric. + + Args: + rank: rank of current process + worldsize: number of processes + preds: torch tensor with predictions + target: torch tensor with targets + metric_class: lightning metric class that should be tested + sk_metric: callable function that is used for comparison + dist_sync_on_step: bool, if true will synchronize metric state across + processes at each ``forward()`` + metric_args: dict with additional arguments used for class initialization + check_dist_sync_on_step: bool, if true will check if the metric is also correctly + calculated per batch per device (and not just at the end) + check_batch: bool, if true will check if the metric is also correctly + calculated across devices for each batch (and not just at the end) + device: determine which device to run on, either 'cuda' or 'cpu' + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes + kwargs_update: Additional keyword arguments that will be passed with preds and + target when running update on the metric. + """ + assert preds.shape[0] == target.shape[0] + num_batches = preds.shape[0] + + if not metric_args: + metric_args = {} + + # Instantiate lightning metric + metric = metric_class( + compute_on_step=check_dist_sync_on_step or check_batch, dist_sync_on_step=dist_sync_on_step, **metric_args + ) + with pytest.raises(RuntimeError): + metric.is_differentiable = not metric.is_differentiable + with pytest.raises(RuntimeError): + metric.higher_is_better = not metric.higher_is_better + + # check that the metric is scriptable + # if check_scriptable: + # B.jit.script(metric) + + # move to device + metric = metric.to(device) + preds = preds.to(device) + target = target.to(device) + kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + + # verify metrics work after being loaded from pickled state +# pickled_metric = pickle.dumps(metric) +# metric = pickle.loads(pickled_metric) + + for i in range(rank, num_batches, worldsize): + batch_kwargs_update = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + + batch_result = metric(preds[i], target[i], **batch_kwargs_update) + + if metric.dist_sync_on_step and check_dist_sync_on_step and rank == 0: + ddp_preds = B.cat([preds[i + r] for r in range(worldsize)]).cpu() + ddp_target = B.cat([target[i + r] for r in range(worldsize)]).cpu() + ddp_kwargs_upd = { + k: B.cat([v[i + r] for r in range(worldsize)]).cpu() if isinstance(v, Tensor) else v + for k, v in (kwargs_update if fragment_kwargs else batch_kwargs_update).items() + } + + sk_batch_result = sk_metric(ddp_preds, ddp_target, **ddp_kwargs_upd) + _assert_allclose(batch_result, sk_batch_result, atol=atol) + + elif check_batch and not metric.dist_sync_on_step: + batch_kwargs_update = { + k: v.cpu() if isinstance(v, Tensor) else v + for k, v in (batch_kwargs_update if fragment_kwargs else kwargs_update).items() + } + sk_batch_result = sk_metric(preds[i].cpu(), target[i].cpu(), **batch_kwargs_update) + _assert_allclose(batch_result, sk_batch_result, atol=atol) + + # check that metrics are hashable + assert hash(metric) + + # check on all batches on all ranks + result = metric.compute() + _assert_tensor(result) + + total_preds = B.cat([preds[i] for i in range(num_batches)]).cpu() + total_target = B.cat([target[i] for i in range(num_batches)]).cpu() + total_kwargs_update = { + k: B.cat([v[i] for i in range(num_batches)]).cpu() if isinstance(v, Tensor) else v + for k, v in kwargs_update.items() + } + sk_result = sk_metric(total_preds, total_target, **total_kwargs_update) + + # assert after aggregation + _assert_allclose(result, sk_result, atol=atol) + + +def _functional_test( + preds: Tensor, + target: Tensor, + metric_functional: Callable, + sk_metric: Callable, + metric_args: dict = None, + atol: float = 1e-8, + device: str = "cpu", + fragment_kwargs: bool = False, + **kwargs_update, +): + """Utility function doing the actual comparison between lightning functional metric and reference metric. + + Args: + preds: torch tensor with predictions + target: torch tensor with targets + metric_functional: lightning metric functional that should be tested + sk_metric: callable function that is used for comparison + metric_args: dict with additional arguments used for class initialization + device: determine which device to run on, either 'cuda' or 'cpu' + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes + kwargs_update: Additional keyword arguments that will be passed with preds and + target when running update on the metric. + """ + assert preds.shape[0] == target.shape[0] + num_batches = preds.shape[0] + + if not metric_args: + metric_args = {} + + metric = partial(metric_functional, **metric_args) + + # move to device + preds = preds.to(device) + target = target.to(device) + kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + + for i in range(num_batches): + extra_kwargs = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + lightning_result = metric(preds[i], target[i], **extra_kwargs) + extra_kwargs = { + k: v.cpu() if isinstance(v, Tensor) else v + for k, v in (extra_kwargs if fragment_kwargs else kwargs_update).items() + } + sk_result = sk_metric(preds[i].cpu(), target[i].cpu(), **extra_kwargs) + + # assert its the same + _assert_allclose(lightning_result, sk_result, atol=atol) + + +def _assert_half_support( + metric_module: Optional[Metric], + metric_functional: Optional[Callable], + preds: Tensor, + target: Tensor, + device: str = "cpu", + **kwargs_update, +): + """Test if an metric can be used with half precision tensors. + + Args: + metric_module: the metric module to test + metric_functional: the metric functional to test + preds: torch tensor with predictions + target: torch tensor with targets + device: determine device, either "cpu" or "cuda" + kwargs_update: Additional keyword arguments that will be passed with preds and + target when running update on the metric. + """ + y_hat = preds[0].half().to(device) if preds[0].is_floating_point() else preds[0].to(device) + y = target[0].half().to(device) if target[0].is_floating_point() else target[0].to(device) + kwargs_update = { + k: (v[0].half() if v.is_floating_point() else v[0]).to(device) if isinstance(v, Tensor) else v + for k, v in kwargs_update.items() + } + if metric_module is not None: + metric_module = metric_module.to(device) + _assert_tensor(metric_module(y_hat, y, **kwargs_update)) + if metric_functional is not None: + _assert_tensor(metric_functional(y_hat, y, **kwargs_update)) + + +gpu_device_name = "cuda" if B.platform() == "torch" else "gpu" + +class MetricTester: + """Class used for efficiently run alot of parametrized tests in ddp mode. Makes sure that ddp is only setup + once and that pool of processes are used for all tests. + + All tests should subclass from this and implement a new method called `test_metric_name` where the method + `self.run_metric_test` is called inside. + """ + + atol = 1e-8 + + def setup_class(self): + """Setup the metric class. + + This will spawn the pool of workers that are used for metric testing and setup_ddp + """ + + self.poolSize = NUM_PROCESSES + self.pool = Pool(processes=self.poolSize) + self.pool.starmap(setup_ddp, [(rank, self.poolSize) for rank in range(self.poolSize)]) + + def teardown_class(self): + """Close pool of workers.""" + self.pool.close() + self.pool.join() + + def run_functional_metric_test( + self, + preds: Tensor, + target: Tensor, + metric_functional: Callable, + sk_metric: Callable, + metric_args: dict = None, + fragment_kwargs: bool = False, + **kwargs_update, + ): + """Main method that should be used for testing functions. Call this inside testing method. + + Args: + preds: torch tensor with predictions + target: torch tensor with targets + metric_functional: lightning metric class that should be tested + sk_metric: callable function that is used for comparison + metric_args: dict with additional arguments used for class initialization + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes + kwargs_update: Additional keyword arguments that will be passed with preds and + target when running update on the metric. + """ + device = gpu_device_name if (B.cuda.is_available() and B.cuda.device_count() > 0) else "cpu" + + _functional_test( + preds=preds, + target=target, + metric_functional=metric_functional, + sk_metric=sk_metric, + metric_args=metric_args, + atol=self.atol, + device=device, + fragment_kwargs=fragment_kwargs, + **kwargs_update, + ) + + def run_class_metric_test( + self, + ddp: bool, + preds: Tensor, + target: Tensor, + metric_class: Metric, + sk_metric: Callable, + dist_sync_on_step: bool, + metric_args: dict = None, + check_dist_sync_on_step: bool = True, + check_batch: bool = True, + fragment_kwargs: bool = False, + check_scriptable: bool = True, + **kwargs_update, + ): + """Main method that should be used for testing class. Call this inside testing methods. + + Args: + ddp: bool, if running in ddp mode or not + preds: torch tensor with predictions + target: torch tensor with targets + metric_class: lightning metric class that should be tested + sk_metric: callable function that is used for comparison + dist_sync_on_step: bool, if true will synchronize metric state across + processes at each ``forward()`` + metric_args: dict with additional arguments used for class initialization + check_dist_sync_on_step: bool, if true will check if the metric is also correctly + calculated per batch per device (and not just at the end) + check_batch: bool, if true will check if the metric is also correctly + calculated across devices for each batch (and not just at the end) + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes + kwargs_update: Additional keyword arguments that will be passed with preds and + target when running update on the metric. + """ + if not metric_args: + metric_args = {} + if ddp: + if sys.platform == "win32": + pytest.skip("DDP not supported on windows") + + self.pool.starmap( + partial( + _class_test, + preds=preds, + target=target, + metric_class=metric_class, + sk_metric=sk_metric, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + check_dist_sync_on_step=check_dist_sync_on_step, + check_batch=check_batch, + atol=self.atol, + fragment_kwargs=fragment_kwargs, + check_scriptable=check_scriptable, + **kwargs_update, + ), + [(rank, self.poolSize) for rank in range(self.poolSize)], + ) + else: + device = gpu_device_name if (B.cuda.is_available() and B.cuda.device_count() > 0) else "cpu" + + _class_test( + rank=0, + worldsize=1, + preds=preds, + target=target, + metric_class=metric_class, + sk_metric=sk_metric, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + check_dist_sync_on_step=check_dist_sync_on_step, + check_batch=check_batch, + atol=self.atol, + device=device, + fragment_kwargs=fragment_kwargs, + check_scriptable=check_scriptable, + **kwargs_update, + ) + + @staticmethod + def run_precision_test_cpu( + preds: Tensor, + target: Tensor, + metric_module: Optional[Metric] = None, + metric_functional: Optional[Callable] = None, + metric_args: Optional[dict] = None, + **kwargs_update, + ): + """Test if a metric can be used with half precision tensors on cpu + Args: + preds: torch tensor with predictions + target: torch tensor with targets + metric_module: the metric module to test + metric_functional: the metric functional to test + metric_args: dict with additional arguments used for class initialization + kwargs_update: Additional keyword arguments that will be passed with preds and + target when running update on the metric. + """ + metric_args = metric_args or {} + _assert_half_support( + metric_module(**metric_args) if metric_module is not None else None, + metric_functional, + preds, + target, + device="cpu", + **kwargs_update, + ) + + @staticmethod + def run_precision_test_gpu( + preds: Tensor, + target: Tensor, + metric_module: Optional[Metric] = None, + metric_functional: Optional[Callable] = None, + metric_args: Optional[dict] = None, + **kwargs_update, + ): + """Test if a metric can be used with half precision tensors on gpu + Args: + preds: torch tensor with predictions + target: torch tensor with targets + metric_module: the metric module to test + metric_functional: the metric functional to test + metric_args: dict with additional arguments used for class initialization + kwargs_update: Additional keyword arguments that will be passed with preds and + target when running update on the metric. + """ + metric_args = metric_args or {} + _assert_half_support( + metric_module(**metric_args) if metric_module is not None else None, + metric_functional, + preds, + target, + device=gpu_device_name, + **kwargs_update, + ) + + @staticmethod + def run_differentiability_test( + preds: Tensor, + target: Tensor, + metric_module: Metric, + metric_functional: Optional[Callable] = None, + metric_args: Optional[dict] = None, + ): + """Test if a metric is differentiable or not. + + Args: + preds: torch tensor with predictions + target: torch tensor with targets + metric_module: the metric module to test + metric_args: dict with additional arguments used for class initialization + """ + metric_args = metric_args or {} + # only floating point tensors can require grad + metric = metric_module(**metric_args) + if preds.is_floating_point(): + preds.requires_grad = True + out = metric(preds[0], target[0]) + + # Check if requires_grad matches is_differentiable attribute + # _assert_requires_grad(metric, out) + + if metric.is_differentiable and metric_functional is not None: + # check for numerical correctness + assert B.autograd.gradcheck( + partial(metric_functional, **metric_args), (preds[0].double(), target[0]) + ) + + # reset as else it will carry over to other tests + preds.requires_grad = False + + +class DummyMetric(Metric): + name = "Dummy" + + def __init__(self): + super().__init__() + self.add_state("x", tensor(0.0), dist_reduce_fx=None) + + def update(self): + pass + + def compute(self): + pass + + +class DummyListMetric(Metric): + name = "DummyList" + + def __init__(self): + super().__init__() + self.add_state("x", [], dist_reduce_fx=None) + + def update(self): + pass + + def compute(self): + pass + + +class DummyMetricSum(DummyMetric): + def update(self, x): + self.x += x + + def compute(self): + return self.x + + +class DummyMetricDiff(DummyMetric): + def update(self, y): + self.x -= y + + def compute(self): + return self.x + + +class DummyMetricMultiOutput(DummyMetricSum): + def compute(self): + return [self.x, self.x] diff --git a/EE/paddlemetric/src/tests/image/__init__.py b/EE/paddlemetric/src/tests/image/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/image/test_fid.py b/EE/paddlemetric/src/tests/image/test_fid.py new file mode 100644 index 000000000..fe76a1054 --- /dev/null +++ b/EE/paddlemetric/src/tests/image/test_fid.py @@ -0,0 +1,156 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pickle + +import pytest +import paddleext.torchapi as B +from scipy.linalg import sqrtm as scipy_sqrtm +from B.utils.data import Dataset + +from paddlemetrics.image.fid import FID, sqrtm +from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE + +B.manual_seed(42) + + +@pytest.mark.parametrize("matrix_size", [2, 10, 100, 500]) +def test_matrix_sqrt(matrix_size): + """test that metrix sqrt function works as expected.""" + + def generate_cov(n): + data = B.randn(2 * n, n) + return (data - data.mean(dim=0)).T @ (data - data.mean(dim=0)) + + cov1 = generate_cov(matrix_size) + cov2 = generate_cov(matrix_size) + + scipy_res = scipy_sqrtm((cov1 @ cov2).numpy()).real + tm_res = sqrtm(cov1 @ cov2) + assert B.allclose(B.tensor(scipy_res).float(), tm_res, atol=1e-3) + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_no_train(): + """Assert that metric never leaves evaluation mode.""" + + class MyModel(B.nn.Module): + def __init__(self): + super().__init__() + self.metric = FID() + + def forward(self, x): + return x + + model = MyModel() + model.train() + assert model.training + assert not model.metric.inception.training, "FID metric was changed to training mode which should not happen" + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_fid_pickle(): + """Assert that we can initialize the metric and pickle it.""" + metric = FID() + assert metric + + # verify metrics work after being loaded from pickled state + pickled_metric = pickle.dumps(metric) + metric = pickle.loads(pickled_metric) + + +def test_fid_raises_errors_and_warnings(): + """Test that expected warnings and errors are raised.""" + with pytest.warns( + UserWarning, + match="Metric `FID` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + ): + _ = FID() + + if _TORCH_FIDELITY_AVAILABLE: + with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"): + _ = FID(feature=2) + else: + with pytest.raises( + ValueError, + match="FID metric requires that Torch-fidelity is installed." + "Either install as `pip install paddlemetrics[image-quality]`" + " or `pip install torch-fidelity`", + ): + _ = FID() + + with pytest.raises(TypeError, match="Got unknown input to argument `feature`"): + _ = FID(feature=[1, 2]) + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +@pytest.mark.parametrize("feature", [64, 192, 768, 2048]) +def test_fid_same_input(feature): + """if real and fake are update on the same data the fid score should be + 0.""" + metric = FID(feature=feature) + + for _ in range(2): + img = B.randint(0, 255, (10, 3, 299, 299), dtype=B.uint8) + metric.update(img, real=True) + metric.update(img, real=False) + + assert B.allclose(B.cat(metric.real_features, dim=0), B.cat(metric.fake_features, dim=0)) + + val = metric.compute() + assert B.allclose(val, B.zeros_like(val), atol=1e-3) + + +class _ImgDataset(Dataset): + def __init__(self, imgs): + self.imgs = imgs + + def __getitem__(self, idx): + return self.imgs[idx] + + def __len__(self): + return self.imgs.shape[0] + + +@pytest.mark.skipif(not B.cuda.is_available(), reason="test is too slow without gpu") +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_compare_fid(tmpdir, feature=2048): + """check that the hole pipeline give the same result as torch-fidelity.""" + from torch_fidelity import calculate_metrics + + metric = FID(feature=feature).cuda() + + # Generate some synthetic data + img1 = B.randint(0, 180, (100, 3, 299, 299), dtype=B.uint8) + img2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) + + batch_size = 10 + for i in range(img1.shape[0] // batch_size): + metric.update(img1[batch_size * i : batch_size * (i + 1)].cuda(), real=True) + + for i in range(img2.shape[0] // batch_size): + metric.update(img2[batch_size * i : batch_size * (i + 1)].cuda(), real=False) + + torch_fid = calculate_metrics( + input1=_ImgDataset(img1), + input2=_ImgDataset(img2), + fid=True, + feature_layer_fid=str(feature), + batch_size=batch_size, + save_cpu_ram=True, + ) + + tm_res = metric.compute() + + assert B.allclose(tm_res.cpu(), B.tensor([torch_fid["frechet_inception_distance"]]), atol=1e-3) diff --git a/EE/paddlemetric/src/tests/image/test_inception.py b/EE/paddlemetric/src/tests/image/test_inception.py new file mode 100644 index 000000000..4bfd5db6b --- /dev/null +++ b/EE/paddlemetric/src/tests/image/test_inception.py @@ -0,0 +1,125 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pickle + +import pytest +import paddleext.torchapi as B +from B.utils.data import Dataset + +from paddlemetrics.image.inception import IS +from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE + +B.manual_seed(42) + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_no_train(): + """Assert that metric never leaves evaluation mode.""" + + class MyModel(B.nn.Module): + def __init__(self): + super().__init__() + self.metric = IS() + + def forward(self, x): + return x + + model = MyModel() + model.train() + assert model.training + assert not model.metric.inception.training, "IS metric was changed to training mode which should not happen" + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_is_pickle(): + """Assert that we can initialize the metric and pickle it.""" + metric = IS() + assert metric + + # verify metrics work after being loaded from pickled state + pickled_metric = pickle.dumps(metric) + metric = pickle.loads(pickled_metric) + + +def test_is_raises_errors_and_warnings(): + """Test that expected warnings and errors are raised.""" + with pytest.warns( + UserWarning, + match="Metric `IS` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + ): + IS() + + if _TORCH_FIDELITY_AVAILABLE: + with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"): + _ = IS(feature=2) + else: + with pytest.raises( + ValueError, + match="IS metric requires that Torch-fidelity is installed." + "Either install as `pip install paddlemetrics[image-quality]`" + " or `pip install torch-fidelity`", + ): + IS() + + with pytest.raises(TypeError, match="Got unknown input to argument `feature`"): + IS(feature=[1, 2]) + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_is_update_compute(): + metric = IS() + + for _ in range(2): + img = B.randint(0, 255, (10, 3, 299, 299), dtype=B.uint8) + metric.update(img) + + mean, std = metric.compute() + assert mean >= 0.0 + assert std >= 0.0 + + +class _ImgDataset(Dataset): + def __init__(self, imgs): + self.imgs = imgs + + def __getitem__(self, idx): + return self.imgs[idx] + + def __len__(self): + return self.imgs.shape[0] + + +@pytest.mark.skipif(not B.cuda.is_available(), reason="test is too slow without gpu") +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_compare_is(tmpdir): + """check that the hole pipeline give the same result as torch-fidelity.""" + from torch_fidelity import calculate_metrics + + metric = IS(splits=1).cuda() + + # Generate some synthetic data + img1 = B.randint(0, 255, (100, 3, 299, 299), dtype=B.uint8) + + batch_size = 10 + for i in range(img1.shape[0] // batch_size): + metric.update(img1[batch_size * i : batch_size * (i + 1)].cuda()) + + torch_fid = calculate_metrics( + input1=_ImgDataset(img1), isc=True, isc_splits=1, batch_size=batch_size, save_cpu_ram=True + ) + + tm_mean, _ = metric.compute() + + assert B.allclose(tm_mean.cpu(), B.tensor([torch_fid["inception_score_mean"]]), atol=1e-3) diff --git a/EE/paddlemetric/src/tests/image/test_kid.py b/EE/paddlemetric/src/tests/image/test_kid.py new file mode 100644 index 000000000..586c5f04d --- /dev/null +++ b/EE/paddlemetric/src/tests/image/test_kid.py @@ -0,0 +1,166 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pickle + +import pytest +import paddleext.torchapi as B +from B.utils.data import Dataset + +from paddlemetrics.image.kid import KID +from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE + +B.manual_seed(42) + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_no_train(): + """Assert that metric never leaves evaluation mode.""" + + class MyModel(B.nn.Module): + def __init__(self): + super().__init__() + self.metric = KID() + + def forward(self, x): + return x + + model = MyModel() + model.train() + assert model.training + assert not model.metric.inception.training, "FID metric was changed to training mode which should not happen" + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_kid_pickle(): + """Assert that we can initialize the metric and pickle it.""" + metric = KID() + assert metric + + # verify metrics work after being loaded from pickled state + pickled_metric = pickle.dumps(metric) + metric = pickle.loads(pickled_metric) + + +def test_kid_raises_errors_and_warnings(): + """Test that expected warnings and errors are raised.""" + with pytest.warns( + UserWarning, + match="Metric `KID` will save all extracted features in buffer." + " For large datasets this may lead to large memory footprint.", + ): + KID() + + if _TORCH_FIDELITY_AVAILABLE: + with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"): + KID(feature=2) + else: + with pytest.raises( + ValueError, + match="KID metric requires that Torch-fidelity is installed." + "Either install as `pip install paddlemetrics[image]`" + " or `pip install torch-fidelity`", + ): + KID() + + with pytest.raises(TypeError, match="Got unknown input to argument `feature`"): + KID(feature=[1, 2]) + + with pytest.raises(ValueError, match="Argument `subset_size` should be smaller than the number of samples"): + m = KID() + m.update(B.randint(0, 255, (5, 3, 299, 299), dtype=B.uint8), real=True) + m.update(B.randint(0, 255, (5, 3, 299, 299), dtype=B.uint8), real=False) + m.compute() + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_kid_extra_parameters(): + with pytest.raises(ValueError, match="Argument `subsets` expected to be integer larger than 0"): + KID(subsets=-1) + + with pytest.raises(ValueError, match="Argument `subset_size` expected to be integer larger than 0"): + KID(subset_size=-1) + + with pytest.raises(ValueError, match="Argument `degree` expected to be integer larger than 0"): + KID(degree=-1) + + with pytest.raises(ValueError, match="Argument `gamma` expected to be `None` or float larger than 0"): + KID(gamma=-1) + + with pytest.raises(ValueError, match="Argument `coef` expected to be float larger than 0"): + KID(coef=-1) + + +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +@pytest.mark.parametrize("feature", [64, 192, 768, 2048]) +def test_kid_same_input(feature): + """test that the metric works.""" + metric = KID(feature=feature, subsets=5, subset_size=2) + + for _ in range(2): + img = B.randint(0, 255, (10, 3, 299, 299), dtype=B.uint8) + metric.update(img, real=True) + metric.update(img, real=False) + + assert B.allclose(B.cat(metric.real_features, dim=0), B.cat(metric.fake_features, dim=0)) + + mean, std = metric.compute() + assert mean != 0.0 + assert std >= 0.0 + + +class _ImgDataset(Dataset): + def __init__(self, imgs): + self.imgs = imgs + + def __getitem__(self, idx): + return self.imgs[idx] + + def __len__(self): + return self.imgs.shape[0] + + +@pytest.mark.skipif(not B.cuda.is_available(), reason="test is too slow without gpu") +@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity") +def test_compare_kid(tmpdir, feature=2048): + """check that the hole pipeline give the same result as torch-fidelity.""" + from torch_fidelity import calculate_metrics + + metric = KID(feature=feature, subsets=1, subset_size=100).cuda() + + # Generate some synthetic data + img1 = B.randint(0, 180, (100, 3, 299, 299), dtype=B.uint8) + img2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) + + batch_size = 10 + for i in range(img1.shape[0] // batch_size): + metric.update(img1[batch_size * i : batch_size * (i + 1)].cuda(), real=True) + + for i in range(img2.shape[0] // batch_size): + metric.update(img2[batch_size * i : batch_size * (i + 1)].cuda(), real=False) + + torch_fid = calculate_metrics( + input1=_ImgDataset(img1), + input2=_ImgDataset(img2), + kid=True, + feature_layer_fid=str(feature), + batch_size=batch_size, + kid_subsets=1, + kid_subset_size=100, + save_cpu_ram=True, + ) + + tm_mean, tm_std = metric.compute() + + assert B.allclose(tm_mean.cpu(), B.tensor([torch_fid["kernel_inception_distance_mean"]]), atol=1e-3) + assert B.allclose(tm_std.cpu(), B.tensor([torch_fid["kernel_inception_distance_std"]]), atol=1e-3) diff --git a/EE/paddlemetric/src/tests/image/test_lpips.py b/EE/paddlemetric/src/tests/image/test_lpips.py new file mode 100644 index 000000000..fd3e83a1d --- /dev/null +++ b/EE/paddlemetric/src/tests/image/test_lpips.py @@ -0,0 +1,103 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from lpips import LPIPS as reference_LPIPS +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.image.lpip_similarity import LPIPS +from paddlemetrics.utilities.imports import _LPIPS_AVAILABLE + +seed_all(42) + +Input = namedtuple("Input", ["img1", "img2"]) + +_inputs = Input( + img1=B.rand(int(NUM_BATCHES * 0.4), int(BATCH_SIZE / 16), 3, 100, 100), + img2=B.rand(int(NUM_BATCHES * 0.4), int(BATCH_SIZE / 16), 3, 100, 100), +) + + +def _compare_fn(img1: Tensor, img2: Tensor, net_type: str, reduction: str = "mean") -> Tensor: + """comparison function for tm implementation.""" + ref = reference_LPIPS(net=net_type) + res = ref(img1, img2).detach().cpu().numpy() + if reduction == "mean": + return res.mean() + return res.sum() + + +@pytest.mark.skipif(not _LPIPS_AVAILABLE, reason="test requires that lpips is installed") +@pytest.mark.parametrize("net_type", ["vgg", "alex", "squeeze"]) +class TestLPIPS(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + def test_lpips(self, net_type, ddp): + """test modular implementation for correctness.""" + self.run_class_metric_test( + ddp=ddp, + preds=_inputs.img1, + target=_inputs.img2, + metric_class=LPIPS, + sk_metric=partial(_compare_fn, net_type=net_type), + dist_sync_on_step=False, + check_scriptable=False, + metric_args={"net_type": net_type}, + ) + + def test_lpips_differentiability(self, net_type): + """test for differentiability of LPIPS metric.""" + self.run_differentiability_test(preds=_inputs.img1, target=_inputs.img2, metric_module=LPIPS) + + # LPIPS half + cpu does not work due to missing support in B.min + @pytest.mark.xfail(reason="PearsonCorrcoef metric does not support cpu + half precision") + def test_lpips_half_cpu(self, net_type): + """test for half + cpu support.""" + self.run_precision_test_cpu(_inputs.img1, _inputs.img2, LPIPS) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_lpips_half_gpu(self, net_type): + """test for half + gpu support.""" + self.run_precision_test_gpu(_inputs.img1, _inputs.img2, LPIPS) + + +@pytest.mark.skipif(not _LPIPS_AVAILABLE, reason="test requires that lpips is installed") +def test_error_on_wrong_init(): + """Test class raises the expected errors.""" + with pytest.raises(ValueError, match="Argument `net_type` must be one .*"): + LPIPS(net_type="resnet") + + with pytest.raises(ValueError, match="Argument `reduction` must be one .*"): + LPIPS(reduction=None) + + +@pytest.mark.skipif(not _LPIPS_AVAILABLE, reason="test requires that lpips is installed") +@pytest.mark.parametrize( + "inp1, inp2", + [ + (B.rand(1, 1, 28, 28), B.rand(1, 3, 28, 28)), # wrong number of channels + (B.rand(1, 3, 28, 28), B.rand(1, 1, 28, 28)), # wrong number of channels + (B.randn(1, 3, 28, 28), B.rand(1, 3, 28, 28)), # non-normalized input + (B.rand(1, 3, 28, 28), B.randn(1, 3, 28, 28)), # non-normalized input + ], +) +def test_error_on_wrong_update(inp1, inp2): + """test error is raised on wrong input to update method.""" + metric = LPIPS() + with pytest.raises(ValueError, match="Expected both input arguments to be normalized tensors .*"): + metric(inp1, inp2) diff --git a/EE/paddlemetric/src/tests/image/test_psnr.py b/EE/paddlemetric/src/tests/image/test_psnr.py new file mode 100644 index 000000000..cc8b857a5 --- /dev/null +++ b/EE/paddlemetric/src/tests/image/test_psnr.py @@ -0,0 +1,149 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from skimage.metrics import peak_signal_noise_ratio + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional import psnr +from paddlemetrics.image import PSNR + +seed_all(42) + +Input = namedtuple("Input", ["preds", "target"]) + +_input_size = (NUM_BATCHES, BATCH_SIZE, 32, 32) +_inputs = [ + Input( + preds=B.randint(n_cls_pred, _input_size, dtype=B.float), + target=B.randint(n_cls_target, _input_size, dtype=B.float), + ) + for n_cls_pred, n_cls_target in [(10, 10), (5, 10), (10, 5)] +] + + +def _to_sk_peak_signal_noise_ratio_inputs(value, dim): + value = value.numpy() + batches = value[None] if value.ndim == len(_input_size) - 1 else value + + if dim is None: + return [batches] + + num_dims = np.size(dim) + if not num_dims: + return batches + + inputs = [] + for batch in batches: + batch = np.moveaxis(batch, dim, np.arange(-num_dims, 0)) + psnr_input_shape = batch.shape[-num_dims:] + inputs.extend(batch.reshape(-1, *psnr_input_shape)) + return inputs + + +def _sk_psnr(preds, target, data_range, reduction, dim): + sk_preds_lists = _to_sk_peak_signal_noise_ratio_inputs(preds, dim=dim) + sk_target_lists = _to_sk_peak_signal_noise_ratio_inputs(target, dim=dim) + np_reduce_map = {"elementwise_mean": np.mean, "none": np.array, "sum": np.sum} + return np_reduce_map[reduction]( + [ + peak_signal_noise_ratio(sk_target, sk_preds, data_range=data_range) + for sk_target, sk_preds in zip(sk_target_lists, sk_preds_lists) + ] + ) + + +def _base_e_sk_psnr(preds, target, data_range, reduction, dim): + return _sk_psnr(preds, target, data_range, reduction, dim) * np.log(10) + + +@pytest.mark.parametrize( + "preds, target, data_range, reduction, dim", + [ + (_inputs[0].preds, _inputs[0].target, 10, "elementwise_mean", None), + (_inputs[1].preds, _inputs[1].target, 10, "elementwise_mean", None), + (_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", None), + (_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", 1), + (_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", (1, 2)), + (_inputs[2].preds, _inputs[2].target, 5, "sum", (1, 2)), + ], +) +@pytest.mark.parametrize( + "base, sk_metric", + [ + (10.0, _sk_psnr), + (2.718281828459045, _base_e_sk_psnr), + ], +) +class TestPSNR(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_psnr(self, preds, target, data_range, base, reduction, dim, sk_metric, ddp, dist_sync_on_step): + _args = {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim} + self.run_class_metric_test( + ddp, + preds, + target, + PSNR, + partial(sk_metric, data_range=data_range, reduction=reduction, dim=dim), + metric_args=_args, + dist_sync_on_step=dist_sync_on_step, + ) + + def test_psnr_functional(self, preds, target, sk_metric, data_range, base, reduction, dim): + _args = {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim} + self.run_functional_metric_test( + preds, + target, + psnr, + partial(sk_metric, data_range=data_range, reduction=reduction, dim=dim), + metric_args=_args, + ) + + # PSNR half + cpu does not work due to missing support in B.log + @pytest.mark.xfail(reason="PSNR metric does not support cpu + half precision") + def test_psnr_half_cpu(self, preds, target, data_range, reduction, dim, base, sk_metric): + self.run_precision_test_cpu( + preds, target, PSNR, psnr, {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim} + ) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_psnr_half_gpu(self, preds, target, data_range, reduction, dim, base, sk_metric): + self.run_precision_test_gpu( + preds, target, PSNR, psnr, {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim} + ) + + +@pytest.mark.parametrize("reduction", ["none", "sum"]) +def test_reduction_for_dim_none(reduction): + match = f"The `reduction={reduction}` will not have any effect when `dim` is None." + with pytest.warns(UserWarning, match=match): + PSNR(reduction=reduction, dim=None) + + with pytest.warns(UserWarning, match=match): + psnr(_inputs[0].preds, _inputs[0].target, reduction=reduction, dim=None) + + +def test_missing_data_range(): + with pytest.raises(ValueError): + PSNR(data_range=None, dim=0) + + with pytest.raises(ValueError): + psnr(_inputs[0].preds, _inputs[0].target, data_range=None, dim=0) diff --git a/EE/paddlemetric/src/tests/image/test_ssim.py b/EE/paddlemetric/src/tests/image/test_ssim.py new file mode 100644 index 000000000..d249db0d1 --- /dev/null +++ b/EE/paddlemetric/src/tests/image/test_ssim.py @@ -0,0 +1,167 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from skimage.metrics import structural_similarity + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional import ssim +from paddlemetrics.image import SSIM + +seed_all(42) + +Input = namedtuple("Input", ["preds", "target", "multichannel"]) + +_inputs = [] +for size, channel, coef, multichannel, dtype in [ + (12, 3, 0.9, True, B.float), + (13, 1, 0.8, False, B.float32), + (14, 1, 0.7, False, B.double), + (15, 3, 0.6, True, B.float64), +]: + preds = B.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype) + _inputs.append( + Input( + preds=preds, + target=preds * coef, + multichannel=multichannel, + ) + ) + + +def _sk_ssim(preds, target, data_range, multichannel, kernel_size): + c, h, w = preds.shape[-3:] + sk_preds = preds.view(-1, c, h, w).permute(0, 2, 3, 1).numpy() + sk_target = target.view(-1, c, h, w).permute(0, 2, 3, 1).numpy() + if not multichannel: + sk_preds = sk_preds[:, :, :, 0] + sk_target = sk_target[:, :, :, 0] + + return structural_similarity( + sk_target, + sk_preds, + data_range=data_range, + multichannel=multichannel, + gaussian_weights=True, + win_size=kernel_size, + sigma=1.5, + use_sample_covariance=False, + ) + + +@pytest.mark.parametrize( + "preds, target, multichannel", + [(i.preds, i.target, i.multichannel) for i in _inputs], +) +@pytest.mark.parametrize("kernel_size", [5, 11]) +class TestSSIM(MetricTester): + atol = 6e-3 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_ssim(self, preds, target, multichannel, kernel_size, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + SSIM, + partial(_sk_ssim, data_range=1.0, multichannel=multichannel, kernel_size=kernel_size), + metric_args={"data_range": 1.0, "kernel_size": (kernel_size, kernel_size)}, + dist_sync_on_step=dist_sync_on_step, + ) + + def test_ssim_functional(self, preds, target, multichannel, kernel_size): + self.run_functional_metric_test( + preds, + target, + ssim, + partial(_sk_ssim, data_range=1.0, multichannel=multichannel, kernel_size=kernel_size), + metric_args={"data_range": 1.0, "kernel_size": (kernel_size, kernel_size)}, + ) + + # SSIM half + cpu does not work due to missing support in B.log + @pytest.mark.xfail(reason="SSIM metric does not support cpu + half precision") + def test_ssim_half_cpu(self, preds, target, multichannel, kernel_size): + self.run_precision_test_cpu(preds, target, SSIM, ssim, {"data_range": 1.0}) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_ssim_half_gpu(self, preds, target, multichannel, kernel_size): + self.run_precision_test_gpu(preds, target, SSIM, ssim, {"data_range": 1.0}) + + +@pytest.mark.parametrize( + ["pred", "target", "kernel", "sigma"], + [ + pytest.param([1, 16, 16], [1, 16, 16], [11, 11], [1.5, 1.5]), # len(shape) + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5]), # len(kernel), len(sigma) + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5, 1.5]), # len(kernel), len(sigma) + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5]), # len(kernel), len(sigma) + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, 1.5]), # invalid kernel input + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 10], [1.5, 1.5]), # invalid kernel input + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, -11], [1.5, 1.5]), # invalid kernel input + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5, 0]), # invalid sigma input + pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, -1.5]), # invalid sigma input + ], +) +def test_ssim_invalid_inputs(pred, target, kernel, sigma): + pred_t = B.rand(pred) + target_t = B.rand(target, dtype=B.float64) + with pytest.raises(TypeError): + ssim(pred_t, target_t) + + pred = B.rand(pred) + target = B.rand(target) + with pytest.raises(ValueError): + ssim(pred, target, kernel, sigma) + + +def test_ssim_unequal_kernel_size(): + """Test the case where kernel_size[0] != kernel_size[1]""" + preds = B.tensor( + [ + [ + [ + [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0], + [1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0], + ] + ] + ] + ) + target = B.tensor( + [ + [ + [ + [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0], + [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0], + [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0], + [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0], + ] + ] + ] + ) + # kernel order matters + assert ssim(preds, target, kernel_size=(3, 5)) == B.tensor(0.10814697) + assert ssim(preds, target, kernel_size=(5, 3)) != B.tensor(0.10814697) diff --git a/EE/paddlemetric/src/tests/pairwise/__init__.py b/EE/paddlemetric/src/tests/pairwise/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py b/EE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py new file mode 100644 index 000000000..5a642a603 --- /dev/null +++ b/EE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py @@ -0,0 +1,121 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances, linear_kernel, manhattan_distances + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional import ( + pairwise_cosine_similarity, + pairwise_euclidean_distance, + pairwise_linear_similarity, + pairwise_manhatten_distance, +) +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_7 + +seed_all(42) + +extra_dim = 5 + +Input = namedtuple("Input", ["x", "y"]) + + +_inputs1 = Input( + x=B.rand(NUM_BATCHES, BATCH_SIZE, extra_dim), + y=B.rand(NUM_BATCHES, BATCH_SIZE, extra_dim), +) + + +_inputs2 = Input( + x=B.rand(NUM_BATCHES, BATCH_SIZE, extra_dim), + y=B.rand(NUM_BATCHES, BATCH_SIZE, extra_dim), +) + + +def _sk_metric(x, y, sk_fn, reduction): + """comparison function.""" + x = x.view(-1, extra_dim).numpy() + y = y.view(-1, extra_dim).numpy() + res = sk_fn(x, y) + if reduction == "sum": + return res.sum(axis=-1) + elif reduction == "mean": + return res.mean(axis=-1) + return res + + +@pytest.mark.parametrize( + "x, y", + [ + (_inputs1.x, _inputs1.y), + (_inputs2.x, _inputs2.y), + ], +) +@pytest.mark.parametrize( + "metric_functional, sk_fn", + [ + (pairwise_cosine_similarity, cosine_similarity), + (pairwise_euclidean_distance, euclidean_distances), + (pairwise_manhatten_distance, manhattan_distances), + (pairwise_linear_similarity, linear_kernel), + ], +) +@pytest.mark.parametrize("reduction", ["sum", "mean", None]) +class TestPairwise(MetricTester): + """test pairwise implementations.""" + + atol = 1e-4 + + def test_pairwise_functional(self, x, y, metric_functional, sk_fn, reduction): + """test functional pairwise implementations.""" + self.run_functional_metric_test( + preds=x, + target=y, + metric_functional=metric_functional, + sk_metric=partial(_sk_metric, sk_fn=sk_fn, reduction=reduction), + metric_args={"reduction": reduction}, + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_7, reason="half support of core operations on not support before pytorch v1.7" + ) + def test_pairwise_half_cpu(self, x, y, metric_functional, sk_fn, reduction): + """test half precision support on cpu.""" + if metric_functional == pairwise_euclidean_distance: + pytest.xfail("pairwise_euclidean_distance metric does not support cpu + half precision") + self.run_precision_test_cpu(x, y, None, metric_functional, metric_args={"reduction": reduction}) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_pairwise_half_gpu(self, x, y, metric_functional, sk_fn, reduction): + """test half precision support on gpu.""" + self.run_precision_test_gpu(x, y, None, metric_functional, metric_args={"reduction": reduction}) + + +@pytest.mark.parametrize( + "metric", [pairwise_cosine_similarity, pairwise_euclidean_distance, pairwise_manhatten_distance] +) +def test_error_on_wrong_shapes(metric): + """Test errors are raised on wrong input.""" + with pytest.raises(ValueError, match="Expected argument `x` to be a 2D tensor .*"): + metric(B.randn(10)) + + with pytest.raises(ValueError, match="Expected argument `y` to be a 2D tensor .*"): + metric(B.randn(10, 5), B.randn(5, 3)) + + with pytest.raises(ValueError, match="Expected reduction to be one of .*"): + metric(B.randn(10, 5), B.randn(10, 5), reduction=1) diff --git a/EE/paddlemetric/src/tests/regression/__init__.py b/EE/paddlemetric/src/tests/regression/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/regression/test_cosine_similarity.py b/EE/paddlemetric/src/tests/regression/test_cosine_similarity.py new file mode 100644 index 000000000..0821cc112 --- /dev/null +++ b/EE/paddlemetric/src/tests/regression/test_cosine_similarity.py @@ -0,0 +1,111 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics.pairwise import cosine_similarity as sk_cosine + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional.regression.cosine_similarity import cosine_similarity +from paddlemetrics.regression.cosine_similarity import CosineSimilarity + +seed_all(42) + +num_targets = 5 + +Input = namedtuple("Input", ["preds", "target"]) + +_single_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_multi_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), + target=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), +) + + +def _multi_target_sk_metric(preds, target, reduction, sk_fn=sk_cosine): + sk_preds = preds.view(-1, num_targets).numpy() + sk_target = target.view(-1, num_targets).numpy() + result_array = sk_fn(sk_target, sk_preds) + col = np.diagonal(result_array) + col_sum = col.sum() + if reduction == "sum": + to_return = col_sum + elif reduction == "mean": + mean = col_sum / len(col) + to_return = mean + else: + to_return = col + return to_return + + +def _single_target_sk_metric(preds, target, reduction, sk_fn=sk_cosine): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + result_array = sk_fn(np.expand_dims(sk_preds, axis=0), np.expand_dims(sk_target, axis=0)) + col = np.diagonal(result_array) + col_sum = col.sum() + if reduction == "sum": + to_return = col_sum + elif reduction == "mean": + mean = col_sum / len(col) + to_return = mean + else: + to_return = col + return to_return + + +@pytest.mark.parametrize("reduction", ["sum", "mean"]) +@pytest.mark.parametrize( + "preds, target, sk_metric", + [ + (_single_target_inputs.preds, _single_target_inputs.target, _single_target_sk_metric), + (_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_sk_metric), + ], +) +class TestCosineSimilarity(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_cosine_similarity(self, reduction, preds, target, sk_metric, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + CosineSimilarity, + partial(sk_metric, reduction=reduction), + dist_sync_on_step, + metric_args=dict(reduction=reduction), + ) + + def test_cosine_similarity_functional(self, reduction, preds, target, sk_metric): + self.run_functional_metric_test( + preds, + target, + cosine_similarity, + partial(sk_metric, reduction=reduction), + metric_args=dict(reduction=reduction), + ) + + +def test_error_on_different_shape(metric_class=CosineSimilarity): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) diff --git a/EE/paddlemetric/src/tests/regression/test_explained_variance.py b/EE/paddlemetric/src/tests/regression/test_explained_variance.py new file mode 100644 index 000000000..a227d0d48 --- /dev/null +++ b/EE/paddlemetric/src/tests/regression/test_explained_variance.py @@ -0,0 +1,110 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from sklearn.metrics import explained_variance_score + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional import explained_variance +from paddlemetrics.regression import ExplainedVariance +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 + +seed_all(42) + +num_targets = 5 + +Input = namedtuple("Input", ["preds", "target"]) + +_single_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_multi_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), + target=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), +) + + +def _single_target_sk_metric(preds, target, sk_fn=explained_variance_score): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + return sk_fn(sk_target, sk_preds) + + +def _multi_target_sk_metric(preds, target, sk_fn=explained_variance_score): + sk_preds = preds.view(-1, num_targets).numpy() + sk_target = target.view(-1, num_targets).numpy() + return sk_fn(sk_target, sk_preds) + + +@pytest.mark.parametrize("multioutput", ["raw_values", "uniform_average", "variance_weighted"]) +@pytest.mark.parametrize( + "preds, target, sk_metric", + [ + (_single_target_inputs.preds, _single_target_inputs.target, _single_target_sk_metric), + (_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_sk_metric), + ], +) +class TestExplainedVariance(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_explained_variance(self, multioutput, preds, target, sk_metric, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + ExplainedVariance, + partial(sk_metric, sk_fn=partial(explained_variance_score, multioutput=multioutput)), + dist_sync_on_step, + metric_args=dict(multioutput=multioutput), + ) + + def test_explained_variance_functional(self, multioutput, preds, target, sk_metric): + self.run_functional_metric_test( + preds, + target, + explained_variance, + partial(sk_metric, sk_fn=partial(explained_variance_score, multioutput=multioutput)), + metric_args=dict(multioutput=multioutput), + ) + + def test_explained_variance_differentiability(self, multioutput, preds, target, sk_metric): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=ExplainedVariance, + metric_functional=explained_variance, + metric_args={"multioutput": multioutput}, + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_explained_variance_half_cpu(self, multioutput, preds, target, sk_metric): + self.run_precision_test_cpu(preds, target, ExplainedVariance, explained_variance) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_explained_variance_half_gpu(self, multioutput, preds, target, sk_metric): + self.run_precision_test_gpu(preds, target, ExplainedVariance, explained_variance) + + +def test_error_on_different_shape(metric_class=ExplainedVariance): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) diff --git a/EE/paddlemetric/src/tests/regression/test_mean_error.py b/EE/paddlemetric/src/tests/regression/test_mean_error.py new file mode 100644 index 000000000..b9d9a31ea --- /dev/null +++ b/EE/paddlemetric/src/tests/regression/test_mean_error.py @@ -0,0 +1,177 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from sklearn.metrics import mean_absolute_error as sk_mean_absolute_error +from sklearn.metrics import mean_absolute_percentage_error as sk_mean_abs_percentage_error +from sklearn.metrics import mean_squared_error as sk_mean_squared_error +from sklearn.metrics import mean_squared_log_error as sk_mean_squared_log_error + +from tests.helpers import seed_all +from tests.helpers.non_sklearn_metrics import ( + symmetric_mean_absolute_percentage_error as sk_sym_mean_abs_percentage_error, +) +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional import ( + mean_absolute_error, + mean_absolute_percentage_error, + mean_squared_error, + mean_squared_log_error, +) +from paddlemetrics.functional.regression.symmetric_mean_absolute_percentage_error import ( + symmetric_mean_absolute_percentage_error, +) +from paddlemetrics.regression import ( + MeanAbsoluteError, + MeanAbsolutePercentageError, + MeanSquaredError, + MeanSquaredLogError, +) +from paddlemetrics.regression.symmetric_mean_absolute_percentage_error import SymmetricMeanAbsolutePercentageError +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 + +seed_all(42) + +num_targets = 5 + +Input = namedtuple("Input", ["preds", "target"]) + +_single_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_multi_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), + target=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), +) + + +def _single_target_sk_metric(preds, target, sk_fn, metric_args): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + + # `sk_target` and `sk_preds` switched to fix failing tests. + # For more info, check https://github.com/PyTorchLightning/metrics/pull/248#issuecomment-841232277 + res = sk_fn(sk_target, sk_preds) + + return math.sqrt(res) if (metric_args and not metric_args["squared"]) else res + + +def _multi_target_sk_metric(preds, target, sk_fn, metric_args): + sk_preds = preds.view(-1, num_targets).numpy() + sk_target = target.view(-1, num_targets).numpy() + + # `sk_target` and `sk_preds` switched to fix failing tests. + # For more info, check https://github.com/PyTorchLightning/metrics/pull/248#issuecomment-841232277 + res = sk_fn(sk_target, sk_preds) + + return math.sqrt(res) if (metric_args and not metric_args["squared"]) else res + + +@pytest.mark.parametrize( + "preds, target, sk_metric", + [ + (_single_target_inputs.preds, _single_target_inputs.target, _single_target_sk_metric), + (_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_sk_metric), + ], +) +@pytest.mark.parametrize( + "metric_class, metric_functional, sk_fn, metric_args", + [ + (MeanSquaredError, mean_squared_error, sk_mean_squared_error, {"squared": True}), + (MeanSquaredError, mean_squared_error, sk_mean_squared_error, {"squared": False}), + (MeanAbsoluteError, mean_absolute_error, sk_mean_absolute_error, {}), + (MeanAbsolutePercentageError, mean_absolute_percentage_error, sk_mean_abs_percentage_error, {}), + ( + SymmetricMeanAbsolutePercentageError, + symmetric_mean_absolute_percentage_error, + sk_sym_mean_abs_percentage_error, + {}, + ), + (MeanSquaredLogError, mean_squared_log_error, sk_mean_squared_log_error, {}), + ], +) +class TestMeanError(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_mean_error_class( + self, preds, target, sk_metric, metric_class, metric_functional, sk_fn, metric_args, ddp, dist_sync_on_step + ): + # todo: `metric_functional` is unused + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=metric_class, + sk_metric=partial(sk_metric, sk_fn=sk_fn, metric_args=metric_args), + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + def test_mean_error_functional(self, preds, target, sk_metric, metric_class, metric_functional, sk_fn, metric_args): + # todo: `metric_class` is unused + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=metric_functional, + sk_metric=partial(sk_metric, sk_fn=sk_fn, metric_args=metric_args), + metric_args=metric_args, + ) + + def test_mean_error_differentiability( + self, preds, target, sk_metric, metric_class, metric_functional, sk_fn, metric_args + ): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=metric_class, + metric_functional=metric_functional, + metric_args=metric_args, + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_mean_error_half_cpu(self, preds, target, sk_metric, metric_class, metric_functional, sk_fn, metric_args): + if metric_class == MeanSquaredLogError: + # MeanSquaredLogError half + cpu does not work due to missing support in B.log + pytest.xfail("MeanSquaredLogError metric does not support cpu + half precision") + + if metric_class == MeanAbsolutePercentageError: + # MeanSquaredPercentageError half + cpu does not work due to missing support in B.log + pytest.xfail("MeanSquaredPercentageError metric does not support cpu + half precision") + + if metric_class == SymmetricMeanAbsolutePercentageError: + # MeanSquaredPercentageError half + cpu does not work due to missing support in B.log + pytest.xfail("SymmetricMeanAbsolutePercentageError metric does not support cpu + half precision") + + self.run_precision_test_cpu(preds, target, metric_class, metric_functional) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_mean_error_half_gpu(self, preds, target, sk_metric, metric_class, metric_functional, sk_fn, metric_args): + self.run_precision_test_gpu(preds, target, metric_class, metric_functional) + + +@pytest.mark.parametrize( + "metric_class", [MeanSquaredError, MeanAbsoluteError, MeanSquaredLogError, MeanAbsolutePercentageError] +) +def test_error_on_different_shape(metric_class): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) diff --git a/EE/paddlemetric/src/tests/regression/test_pearson.py b/EE/paddlemetric/src/tests/regression/test_pearson.py new file mode 100644 index 000000000..09e1ac21f --- /dev/null +++ b/EE/paddlemetric/src/tests/regression/test_pearson.py @@ -0,0 +1,93 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple + +import pytest +import paddleext.torchapi as B +from scipy.stats import pearsonr + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional.regression.pearson import pearson_corrcoef +from paddlemetrics.regression.pearson import PearsonCorrcoef + +seed_all(42) + +Input = namedtuple("Input", ["preds", "target"]) + +_single_target_inputs1 = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_single_target_inputs2 = Input( + preds=B.randn(NUM_BATCHES, BATCH_SIZE), + target=B.randn(NUM_BATCHES, BATCH_SIZE), +) + + +def _sk_pearsonr(preds, target): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + return pearsonr(sk_target, sk_preds)[0] + + +@pytest.mark.parametrize( + "preds, target", + [ + (_single_target_inputs1.preds, _single_target_inputs1.target), + (_single_target_inputs2.preds, _single_target_inputs2.target), + ], +) +class TestPearsonCorrcoef(MetricTester): + atol = 1e-2 + + @pytest.mark.parametrize("ddp", [True, False]) + def test_pearson_corrcoef(self, preds, target, ddp): + self.run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=PearsonCorrcoef, + sk_metric=_sk_pearsonr, + dist_sync_on_step=False, + ) + + def test_pearson_corrcoef_functional(self, preds, target): + self.run_functional_metric_test( + preds=preds, target=target, metric_functional=pearson_corrcoef, sk_metric=_sk_pearsonr + ) + + def test_pearson_corrcoef_differentiability(self, preds, target): + self.run_differentiability_test( + preds=preds, target=target, metric_module=PearsonCorrcoef, metric_functional=pearson_corrcoef + ) + + # Pearson half + cpu does not work due to missing support in B.sqrt + @pytest.mark.xfail(reason="PearsonCorrcoef metric does not support cpu + half precision") + def test_pearson_corrcoef_half_cpu(self, preds, target): + self.run_precision_test_cpu(preds, target, PearsonCorrcoef, pearson_corrcoef) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_pearson_corrcoef_half_gpu(self, preds, target): + self.run_precision_test_gpu(preds, target, PearsonCorrcoef, pearson_corrcoef) + + +def test_error_on_different_shape(): + metric = PearsonCorrcoef() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) + + with pytest.raises(ValueError, match="Expected both predictions and target to be 1 dimensional tensors."): + metric(B.randn(100, 2), B.randn(100, 2)) diff --git a/EE/paddlemetric/src/tests/regression/test_r2.py b/EE/paddlemetric/src/tests/regression/test_r2.py new file mode 100644 index 000000000..ebed636a1 --- /dev/null +++ b/EE/paddlemetric/src/tests/regression/test_r2.py @@ -0,0 +1,164 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from sklearn.metrics import r2_score as sk_r2score + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional import r2_score +from paddlemetrics.regression import R2Score +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 + +seed_all(42) + +num_targets = 5 + +Input = namedtuple("Input", ["preds", "target"]) + +_single_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_multi_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), + target=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), +) + + +def _single_target_sk_metric(preds, target, adjusted, multioutput): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput) + if adjusted != 0: + r2_score = 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1) + return r2_score + + +def _multi_target_sk_metric(preds, target, adjusted, multioutput): + sk_preds = preds.view(-1, num_targets).numpy() + sk_target = target.view(-1, num_targets).numpy() + r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput) + if adjusted != 0: + r2_score = 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1) + return r2_score + + +@pytest.mark.parametrize("adjusted", [0, 5, 10]) +@pytest.mark.parametrize("multioutput", ["raw_values", "uniform_average", "variance_weighted"]) +@pytest.mark.parametrize( + "preds, target, sk_metric, num_outputs", + [ + (_single_target_inputs.preds, _single_target_inputs.target, _single_target_sk_metric, 1), + (_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_sk_metric, num_targets), + ], +) +class TestR2Score(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_r2(self, adjusted, multioutput, preds, target, sk_metric, num_outputs, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + R2Score, + partial(sk_metric, adjusted=adjusted, multioutput=multioutput), + dist_sync_on_step, + metric_args=dict(adjusted=adjusted, multioutput=multioutput, num_outputs=num_outputs), + ) + + def test_r2_functional(self, adjusted, multioutput, preds, target, sk_metric, num_outputs): + # todo: `num_outputs` is unused + self.run_functional_metric_test( + preds, + target, + r2_score, + partial(sk_metric, adjusted=adjusted, multioutput=multioutput), + metric_args=dict(adjusted=adjusted, multioutput=multioutput), + ) + + def test_r2_differentiability(self, adjusted, multioutput, preds, target, sk_metric, num_outputs): + self.run_differentiability_test( + preds=preds, + target=target, + metric_module=partial(R2Score, num_outputs=num_outputs), + metric_functional=r2_score, + metric_args=dict(adjusted=adjusted, multioutput=multioutput), + ) + + @pytest.mark.skipif( + not _TORCH_GREATER_EQUAL_1_6, reason="half support of core operations on not support before pytorch v1.6" + ) + def test_r2_half_cpu(self, adjusted, multioutput, preds, target, sk_metric, num_outputs): + self.run_precision_test_cpu( + preds, + target, + partial(R2Score, num_outputs=num_outputs), + r2_score, + {"adjusted": adjusted, "multioutput": multioutput}, + ) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_r2_half_gpu(self, adjusted, multioutput, preds, target, sk_metric, num_outputs): + self.run_precision_test_gpu( + preds, + target, + partial(R2Score, num_outputs=num_outputs), + r2_score, + {"adjusted": adjusted, "multioutput": multioutput}, + ) + + +def test_error_on_different_shape(metric_class=R2Score): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) + + +def test_error_on_multidim_tensors(metric_class=R2Score): + metric = metric_class() + with pytest.raises( + ValueError, + match=r"Expected both prediction and target to be 1D or 2D tensors," r" but received tensors with dimension .", + ): + metric(B.randn(10, 20, 5), B.randn(10, 20, 5)) + + +def test_error_on_too_few_samples(metric_class=R2Score): + metric = metric_class() + with pytest.raises(ValueError, match="Needs at least two samples to calculate r2 score."): + metric(B.randn(1), B.randn(1)) + metric.reset() + + # calling update twice should still work + metric.update(B.randn(1), B.randn(1)) + metric.update(B.randn(1), B.randn(1)) + assert metric.compute() + + +def test_warning_on_too_large_adjusted(metric_class=R2Score): + metric = metric_class(adjusted=10) + + with pytest.warns( + UserWarning, + match="More independent regressions than data points in" " adjusted r2 score. Falls back to standard r2 score.", + ): + metric(B.randn(10), B.randn(10)) + + with pytest.warns(UserWarning, match="Division by zero in adjusted r2 score. Falls back to" " standard r2 score."): + metric(B.randn(11), B.randn(11)) diff --git a/EE/paddlemetric/src/tests/regression/test_spearman.py b/EE/paddlemetric/src/tests/regression/test_spearman.py new file mode 100644 index 000000000..a3764fdef --- /dev/null +++ b/EE/paddlemetric/src/tests/regression/test_spearman.py @@ -0,0 +1,115 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple + +import pytest +import paddleext.torchapi as B +from scipy.stats import rankdata, spearmanr + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional.regression.spearman import _rank_data, spearman_corrcoef +from paddlemetrics.regression.spearman import SpearmanCorrcoef + +seed_all(42) + +Input = namedtuple("Input", ["preds", "target"]) + +_single_target_inputs1 = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_single_target_inputs2 = Input( + preds=B.randn(NUM_BATCHES, BATCH_SIZE), + target=B.randn(NUM_BATCHES, BATCH_SIZE), +) + +_specific_input = Input( + preds=B.stack([B.tensor([1.0, 0.0, 4.0, 1.0, 0.0, 3.0, 0.0]) for _ in range(NUM_BATCHES)]), + target=B.stack([B.tensor([4.0, 0.0, 3.0, 3.0, 3.0, 1.0, 1.0]) for _ in range(NUM_BATCHES)]), +) + + +@pytest.mark.parametrize( + "preds, target", + [ + (_single_target_inputs1.preds, _single_target_inputs1.target), + (_single_target_inputs2.preds, _single_target_inputs2.target), + (_specific_input.preds, _specific_input.target), + ], +) +def test_ranking(preds, target): + """test that ranking function works as expected.""" + for p, t in zip(preds, target): + scipy_ranking = [rankdata(p.numpy()), rankdata(t.numpy())] + tm_ranking = [_rank_data(p), _rank_data(t)] + assert (B.tensor(scipy_ranking[0]) == tm_ranking[0]).all() + assert (B.tensor(scipy_ranking[1]) == tm_ranking[1]).all() + + +def _sk_metric(preds, target): + sk_preds = preds.view(-1).numpy() + sk_target = target.view(-1).numpy() + return spearmanr(sk_target, sk_preds)[0] + + +@pytest.mark.parametrize( + "preds, target", + [ + (_single_target_inputs1.preds, _single_target_inputs1.target), + (_single_target_inputs2.preds, _single_target_inputs2.target), + (_specific_input.preds, _specific_input.target), + ], +) +class TestSpearmanCorrcoef(MetricTester): + atol = 1e-2 + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_spearman_corrcoef(self, preds, target, ddp, dist_sync_on_step): + self.run_class_metric_test( + ddp, + preds, + target, + SpearmanCorrcoef, + _sk_metric, + dist_sync_on_step, + ) + + def test_spearman_corrcoef_functional(self, preds, target): + self.run_functional_metric_test(preds, target, spearman_corrcoef, _sk_metric) + + def test_spearman_corrcoef_differentiability(self, preds, target): + self.run_differentiability_test( + preds=preds, target=target, metric_module=SpearmanCorrcoef, metric_functional=spearman_corrcoef + ) + + # Spearman half + cpu does not work due to missing support in B.arange + @pytest.mark.xfail(reason="Spearman metric does not support cpu + half precision") + def test_spearman_corrcoef_half_cpu(self, preds, target): + self.run_precision_test_cpu(preds, target, SpearmanCorrcoef, spearman_corrcoef) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_spearman_corrcoef_half_gpu(self, preds, target): + self.run_precision_test_gpu(preds, target, SpearmanCorrcoef, spearman_corrcoef) + + +def test_error_on_different_shape(): + metric = SpearmanCorrcoef() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) + + with pytest.raises(ValueError, match="Expected both predictions and target to be 1 dimensional tensors."): + metric(B.randn(100, 2), B.randn(100, 2)) diff --git a/EE/paddlemetric/src/tests/regression/test_tweedie_deviance.py b/EE/paddlemetric/src/tests/regression/test_tweedie_deviance.py new file mode 100644 index 000000000..af1303132 --- /dev/null +++ b/EE/paddlemetric/src/tests/regression/test_tweedie_deviance.py @@ -0,0 +1,140 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +from functools import partial + +import pytest +import paddleext.torchapi as B +from sklearn.metrics import mean_tweedie_deviance +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from paddlemetrics.functional.regression.tweedie_deviance import tweedie_deviance_score +from paddlemetrics.regression.tweedie_deviance import TweedieDevianceScore + +seed_all(42) + +Input = namedtuple("Input", ["preds", "targets"]) + +_single_target_inputs1 = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + targets=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_single_target_inputs2 = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + targets=B.rand(NUM_BATCHES, BATCH_SIZE), +) + +_multi_target_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, 5), + targets=B.rand(NUM_BATCHES, BATCH_SIZE, 5), +) + + +def _sk_deviance(preds: Tensor, targets: Tensor, power: float): + sk_preds = preds.view(-1).numpy() + sk_target = targets.view(-1).numpy() + return mean_tweedie_deviance(sk_target, sk_preds, power=power) + + +@pytest.mark.parametrize("power", [-0.5, 0, 1, 1.5, 2, 3]) +@pytest.mark.parametrize( + "preds, targets", + [ + (_single_target_inputs1.preds, _single_target_inputs1.targets), + (_single_target_inputs2.preds, _single_target_inputs2.targets), + (_multi_target_inputs.preds, _multi_target_inputs.targets), + ], +) +class TestDevianceScore(MetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_deviance_scores_class(self, ddp, dist_sync_on_step, preds, targets, power): + self.run_class_metric_test( + ddp, + preds, + targets, + TweedieDevianceScore, + partial(_sk_deviance, power=power), + dist_sync_on_step, + metric_args=dict(power=power), + ) + + def test_deviance_scores_functional(self, preds, targets, power): + self.run_functional_metric_test( + preds, + targets, + tweedie_deviance_score, + partial(_sk_deviance, power=power), + metric_args=dict(power=power), + ) + + def test_pearson_corrcoef_differentiability(self, preds, targets, power): + self.run_differentiability_test( + preds, targets, metric_module=TweedieDevianceScore, metric_functional=tweedie_deviance_score + ) + + # Tweedie Deviance Score half + cpu does not work due to missing support in B.log + @pytest.mark.xfail(reason="TweedieDevianceScore metric does not support cpu + half precision") + def test_pearson_corrcoef_half_cpu(self, preds, targets, power): + metric_args = {"power": power} + self.run_precision_test_cpu( + preds, + targets, + metric_module=TweedieDevianceScore, + metric_functional=tweedie_deviance_score, + metric_args=metric_args, + ) + + @pytest.mark.skipif(not B.cuda.is_available(), reason="test requires cuda") + def test_pearson_corrcoef_half_gpu(self, preds, targets, power): + metric_args = {"power": power} + self.run_precision_test_gpu( + preds, + targets, + metric_module=TweedieDevianceScore, + metric_functional=tweedie_deviance_score, + metric_args=metric_args, + ) + + +def test_error_on_different_shape(metric_class=TweedieDevianceScore): + metric = metric_class() + with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): + metric(B.randn(100), B.randn(50)) + + +def test_error_on_invalid_inputs(metric_class=TweedieDevianceScore): + with pytest.raises(ValueError, match="Deviance Score is not defined for power=0.5."): + metric_class(power=0.5) + + metric = metric_class(power=1) + with pytest.raises( + ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative." + ): + metric(B.tensor([-1.0, 2.0, 3.0]), B.rand(3)) + + with pytest.raises( + ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative." + ): + metric(B.rand(3), B.tensor([-1.0, 2.0, 3.0])) + + metric = metric_class(power=2) + with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."): + metric(B.tensor([-1.0, 2.0, 3.0]), B.rand(3)) + + with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."): + metric(B.rand(3), B.tensor([-1.0, 2.0, 3.0])) diff --git a/EE/paddlemetric/src/tests/retrieval/__init__.py b/EE/paddlemetric/src/tests/retrieval/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/retrieval/helpers.py b/EE/paddlemetric/src/tests/retrieval/helpers.py new file mode 100644 index 000000000..419fe19bb --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/helpers.py @@ -0,0 +1,511 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial +from typing import Callable, Dict, List, Tuple, Type, Union + +import numpy as np +import pytest +import paddleext.torchapi as B +from numpy import array +from paddleext.torchapi import Tensor, tensor + +from tests.helpers import seed_all +from tests.helpers.testers import Metric, MetricTester +from tests.retrieval.inputs import _input_retrieval_scores as _irs +from tests.retrieval.inputs import _input_retrieval_scores_all_target as _irs_all +from tests.retrieval.inputs import _input_retrieval_scores_empty as _irs_empty +from tests.retrieval.inputs import _input_retrieval_scores_extra as _irs_extra +from tests.retrieval.inputs import _input_retrieval_scores_float_target as _irs_float_tgt +from tests.retrieval.inputs import _input_retrieval_scores_int_target as _irs_int_tgt +from tests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes as _irs_mis_sz +from tests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes_func as _irs_mis_sz_fn +from tests.retrieval.inputs import _input_retrieval_scores_no_target as _irs_no_tgt +from tests.retrieval.inputs import _input_retrieval_scores_wrong_targets as _irs_bad_tgt + +seed_all(42) + +# a version of get_group_indexes that depends on NumPy is here to avoid this dependency for the full library + + +def get_group_indexes(indexes: Union[Tensor, np.ndarray]) -> List[Union[Tensor, np.ndarray]]: + """Given an integer `B.Tensor` or `np.ndarray` `indexes`, return a `B.Tensor` or `np.ndarray` of + indexes for each different value in `indexes`. + + Args: + indexes: a `B.Tensor` or `np.ndarray` of integers + + Return: + A list of integer `B.Tensor`s or `np.ndarray`s + + Example: + >>> indexes = B.tensor([0, 0, 0, 1, 1, 1, 1]) + >>> get_group_indexes(indexes) + [tensor([0, 1, 2]), tensor([3, 4, 5, 6])] + """ + structure, dtype = (tensor, B.long) if isinstance(indexes, Tensor) else (np.array, np.int64) + + res = {} + for i, _id in enumerate(indexes): + _id = _id.item() + if _id in res: + res[_id] += [i] + else: + res[_id] = [i] + + return [structure(x, dtype=dtype) for x in res.values()] + + +def _compute_sklearn_metric( + preds: Union[Tensor, array], + target: Union[Tensor, array], + indexes: np.ndarray = None, + metric: Callable = None, + empty_target_action: str = "skip", + reverse: bool = False, + **kwargs, +) -> Tensor: + """Compute metric with multiple iterations over every query predictions set.""" + + if indexes is None: + indexes = np.full_like(preds, fill_value=0, dtype=np.int64) + if isinstance(indexes, Tensor): + indexes = indexes.cpu().numpy() + if isinstance(preds, Tensor): + preds = preds.cpu().numpy() + if isinstance(target, Tensor): + target = target.cpu().numpy() + + assert isinstance(indexes, np.ndarray) + assert isinstance(preds, np.ndarray) + assert isinstance(target, np.ndarray) + + indexes = indexes.flatten() + preds = preds.flatten() + target = target.flatten() + groups = get_group_indexes(indexes) + + sk_results = [] + for group in groups: + trg, pds = target[group], preds[group] + + if ((1 - trg) if reverse else trg).sum() == 0: + if empty_target_action == "skip": + pass + elif empty_target_action == "pos": + sk_results.append(1.0) + else: + sk_results.append(0.0) + else: + res = metric(trg, pds, **kwargs) + sk_results.append(res) + + if len(sk_results) > 0: + return np.mean(sk_results) + return np.array(0.0) + + +def _concat_tests(*tests: Tuple[Dict]) -> Dict: + """Concat tests composed by a string and a list of arguments.""" + assert len(tests), "`_concat_tests` expects at least an argument" + assert all(tests[0]["argnames"] == x["argnames"] for x in tests[1:]), "the header must be the same for all tests" + return dict(argnames=tests[0]["argnames"], argvalues=sum((x["argvalues"] for x in tests), [])) + + +_errors_test_functional_metric_parameters_default = dict( + argnames="preds,target,message,metric_args", + argvalues=[ + # check input shapes are consistent (func) + (_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, "`preds` and `target` must be of the same shape", {}), + # check input tensors are not empty + (_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}), + # check on input dtypes + (_irs.preds.bool(), _irs.target, "`preds` must be a tensor of floats", {}), + # check targets are between 0 and 1 + (_irs_bad_tgt.preds, _irs_bad_tgt.target, "`target` must contain `binary` values", {}), + ], +) + +_errors_test_functional_metric_parameters_with_nonbinary = dict( + argnames="preds,target,message,metric_args", + argvalues=[ + # check input shapes are consistent (func) + (_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, "`preds` and `target` must be of the same shape", {}), + # check input tensors are not empty + (_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}), + # check on input dtypes + (_irs.preds.bool(), _irs.target, "`preds` must be a tensor of floats", {}), + ], +) + +_errors_test_functional_metric_parameters_k = dict( + argnames="preds,target,message,metric_args", + argvalues=[ + (_irs.preds, _irs.target, "`k` has to be a positive integer or None", dict(k=-10)), + (_irs.preds, _irs.target, "`k` has to be a positive integer or None", dict(k=4.0)), + ], +) + +_errors_test_class_metric_parameters_no_pos_target = dict( + argnames="indexes,preds,target,message,metric_args", + argvalues=[ + # check when error when there are no positive targets + ( + _irs_no_tgt.indexes, + _irs_no_tgt.preds, + _irs_no_tgt.target, + "`compute` method was provided with a query with no positive target.", + dict(empty_target_action="error"), + ), + ], +) + +_errors_test_class_metric_parameters_no_neg_target = dict( + argnames="indexes,preds,target,message,metric_args", + argvalues=[ + # check when error when there are no negative targets + ( + _irs_all.indexes, + _irs_all.preds, + _irs_all.target, + "`compute` method was provided with a query with no negative target.", + dict(empty_target_action="error"), + ), + ], +) + +_errors_test_class_metric_parameters_with_nonbinary = dict( + argnames="indexes,preds,target,message,metric_args", + argvalues=[ + (None, _irs.preds, _irs.target, "`indexes` cannot be None", dict(empty_target_action="error")), + # check when input arguments are invalid + ( + _irs.indexes, + _irs.preds, + _irs.target, + "`empty_target_action` received a wrong value `casual_argument`.", + dict(empty_target_action="casual_argument"), + ), + # check input shapes are consistent + ( + _irs_mis_sz.indexes, + _irs_mis_sz.preds, + _irs_mis_sz.target, + "`indexes`, `preds` and `target` must be of the same shape", + dict(empty_target_action="skip"), + ), + # check input tensors are not empty + ( + _irs_empty.indexes, + _irs_empty.preds, + _irs_empty.target, + "`indexes`, `preds` and `target` must be non-empty and non-scalar tensors", + dict(empty_target_action="skip"), + ), + # check on input dtypes + ( + _irs.indexes.bool(), + _irs.preds, + _irs.target, + "`indexes` must be a tensor of long integers", + dict(empty_target_action="skip"), + ), + ( + _irs.indexes, + _irs.preds.bool(), + _irs.target, + "`preds` must be a tensor of floats", + dict(empty_target_action="skip"), + ), + ], +) + +_errors_test_class_metric_parameters_default = dict( + argnames="indexes,preds,target,message,metric_args", + argvalues=[ + (None, _irs.preds, _irs.target, "`indexes` cannot be None", dict(empty_target_action="error")), + # check when input arguments are invalid + ( + _irs.indexes, + _irs.preds, + _irs.target, + "`empty_target_action` received a wrong value `casual_argument`.", + dict(empty_target_action="casual_argument"), + ), + # check input shapes are consistent + ( + _irs_mis_sz.indexes, + _irs_mis_sz.preds, + _irs_mis_sz.target, + "`indexes`, `preds` and `target` must be of the same shape", + dict(empty_target_action="skip"), + ), + # check input tensors are not empty + ( + _irs_empty.indexes, + _irs_empty.preds, + _irs_empty.target, + "`indexes`, `preds` and `target` must be non-empty and non-scalar tensors", + dict(empty_target_action="skip"), + ), + # check on input dtypes + ( + _irs.indexes.bool(), + _irs.preds, + _irs.target, + "`indexes` must be a tensor of long integers", + dict(empty_target_action="skip"), + ), + ( + _irs.indexes, + _irs.preds.bool(), + _irs.target, + "`preds` must be a tensor of floats", + dict(empty_target_action="skip"), + ), + ], +) + +_errors_test_class_metric_parameters_k = dict( + argnames="indexes,preds,target,message,metric_args", + argvalues=[ + (_irs.index, _irs.preds, _irs.target, "`k` has to be a positive integer or None", dict(k=-10)), + ], +) + +_default_metric_class_input_arguments = dict( + argnames="indexes,preds,target", + argvalues=[ + (_irs.indexes, _irs.preds, _irs.target), + (_irs_extra.indexes, _irs_extra.preds, _irs_extra.target), + (_irs_no_tgt.indexes, _irs_no_tgt.preds, _irs_no_tgt.target), + ], +) + +_default_metric_class_input_arguments_with_non_binary_target = dict( + argnames="indexes,preds,target", + argvalues=[ + (_irs.indexes, _irs.preds, _irs.target), + (_irs_extra.indexes, _irs_extra.preds, _irs_extra.target), + (_irs_no_tgt.indexes, _irs_no_tgt.preds, _irs_no_tgt.target), + (_irs_int_tgt.indexes, _irs_int_tgt.preds, _irs_int_tgt.target), + (_irs_float_tgt.indexes, _irs_float_tgt.preds, _irs_float_tgt.target), + ], +) + +_default_metric_functional_input_arguments = dict( + argnames="preds,target", + argvalues=[ + (_irs.preds, _irs.target), + (_irs_extra.preds, _irs_extra.target), + (_irs_no_tgt.preds, _irs_no_tgt.target), + ], +) + +_default_metric_functional_input_arguments_with_non_binary_target = dict( + argnames="preds,target", + argvalues=[ + (_irs.preds, _irs.target), + (_irs_extra.preds, _irs_extra.target), + (_irs_no_tgt.preds, _irs_no_tgt.target), + (_irs_int_tgt.preds, _irs_int_tgt.target), + (_irs_float_tgt.preds, _irs_float_tgt.target), + ], +) + + +def _errors_test_class_metric( + indexes: Tensor, + preds: Tensor, + target: Tensor, + metric_class: Metric, + message: str = "", + metric_args: dict = None, + exception_type: Type[Exception] = ValueError, + kwargs_update: dict = None, +): + """Utility function doing checks about types, parameters and errors. + + Args: + indexes: torch tensor with indexes + preds: torch tensor with predictions + target: torch tensor with targets + metric_class: lightning metric class that should be tested + message: message that exception should return + metric_args: arguments for class initialization + exception_type: callable function that is used for comparison + kwargs_update: Additional keyword arguments that will be passed with indexes, preds and + target when running update on the metric. + """ + metric_args = metric_args or {} + kwargs_update = kwargs_update or {} + with pytest.raises(exception_type, match=message): + metric = metric_class(**metric_args) + metric(preds, target, indexes=indexes, **kwargs_update) + + +def _errors_test_functional_metric( + preds: Tensor, + target: Tensor, + metric_functional: Metric, + message: str = "", + exception_type: Type[Exception] = ValueError, + kwargs_update: dict = None, +): + """Utility function doing checks about types, parameters and errors. + + Args: + preds: torch tensor with predictions + target: torch tensor with targets + metric_functional: lightning functional metric that should be tested + message: message that exception should return + exception_type: callable function that is used for comparison + kwargs_update: Additional keyword arguments that will be passed with indexes, preds and + target when running update on the metric. + """ + kwargs_update = kwargs_update or {} + with pytest.raises(exception_type, match=message): + metric_functional(preds, target, **kwargs_update) + + +class RetrievalMetricTester(MetricTester): + def run_class_metric_test( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + metric_class: Metric, + sk_metric: Callable, + dist_sync_on_step: bool, + metric_args: dict, + reverse: bool = False, + ): + _sk_metric_adapted = partial(_compute_sklearn_metric, metric=sk_metric, reverse=reverse, **metric_args) + + super().run_class_metric_test( + ddp=ddp, + preds=preds, + target=target, + metric_class=metric_class, + sk_metric=_sk_metric_adapted, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + fragment_kwargs=True, + indexes=indexes, # every additional argument will be passed to metric_class and _sk_metric_adapted + ) + + def run_functional_metric_test( + self, + preds: Tensor, + target: Tensor, + metric_functional: Callable, + sk_metric: Callable, + metric_args: dict, + reverse: bool = False, + **kwargs, + ): + _sk_metric_adapted = partial(_compute_sklearn_metric, metric=sk_metric, reverse=reverse, **metric_args) + + super().run_functional_metric_test( + preds=preds, + target=target, + metric_functional=metric_functional, + sk_metric=_sk_metric_adapted, + metric_args=metric_args, + fragment_kwargs=True, + **kwargs, + ) + + def run_precision_test_cpu( + self, + indexes: Tensor, + preds: Tensor, + target: Tensor, + metric_module: Metric, + metric_functional: Callable, + ): + def metric_functional_ignore_indexes(preds, target, indexes): + return metric_functional(preds, target) + + super().run_precision_test_cpu( + preds=preds, + target=target, + metric_module=metric_module, + metric_functional=metric_functional_ignore_indexes, + metric_args={"empty_target_action": "neg"}, + indexes=indexes, # every additional argument will be passed to RetrievalMAP and _sk_metric_adapted + ) + + def run_precision_test_gpu( + self, + indexes: Tensor, + preds: Tensor, + target: Tensor, + metric_module: Metric, + metric_functional: Callable, + ): + if not B.cuda.is_available(): + pytest.skip() + + def metric_functional_ignore_indexes(preds, target, indexes): + return metric_functional(preds, target) + + super().run_precision_test_gpu( + preds=preds, + target=target, + metric_module=metric_module, + metric_functional=metric_functional_ignore_indexes, + metric_args={"empty_target_action": "neg"}, + indexes=indexes, # every additional argument will be passed to RetrievalMAP and _sk_metric_adapted + ) + + @staticmethod + def run_metric_class_arguments_test( + indexes: Tensor, + preds: Tensor, + target: Tensor, + metric_class: Metric, + message: str = "", + metric_args: dict = None, + exception_type: Type[Exception] = ValueError, + kwargs_update: dict = None, + ): + _errors_test_class_metric( + indexes=indexes, + preds=preds, + target=target, + metric_class=metric_class, + message=message, + metric_args=metric_args, + exception_type=exception_type, + **kwargs_update, + ) + + @staticmethod + def run_functional_metric_arguments_test( + preds: Tensor, + target: Tensor, + metric_functional: Callable, + message: str = "", + exception_type: Type[Exception] = ValueError, + kwargs_update: dict = None, + ): + _errors_test_functional_metric( + preds=preds, + target=target, + metric_functional=metric_functional, + message=message, + exception_type=exception_type, + kwargs_update=kwargs_update, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/inputs.py b/EE/paddlemetric/src/tests/retrieval/inputs.py new file mode 100644 index 000000000..d1e40b814 --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/inputs.py @@ -0,0 +1,82 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple + +import paddleext.torchapi as B + +from tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES + +Input = namedtuple("InputMultiple", ["indexes", "preds", "target"]) + +# correct +_input_retrieval_scores = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)), + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)), +) + +_input_retrieval_scores_extra = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)), + preds=B.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)), +) + +_input_retrieval_scores_int_target = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, 2 * BATCH_SIZE)), + preds=B.rand(NUM_BATCHES, 2 * BATCH_SIZE), + target=B.randint(low=-1, high=4, size=(NUM_BATCHES, 2 * BATCH_SIZE)), +) + +_input_retrieval_scores_float_target = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, 2 * BATCH_SIZE)), + preds=B.rand(NUM_BATCHES, 2 * BATCH_SIZE), + target=B.rand(NUM_BATCHES, 2 * BATCH_SIZE), +) + +# with errors +_input_retrieval_scores_no_target = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)), + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.randint(high=1, size=(NUM_BATCHES, BATCH_SIZE)), +) + +_input_retrieval_scores_all_target = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)), + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.randint(low=1, high=2, size=(NUM_BATCHES, BATCH_SIZE)), +) + +_input_retrieval_scores_empty = Input( + indexes=B.randint(high=10, size=[0]), + preds=B.rand(0), + target=B.randint(high=2, size=[0]), +) + +_input_retrieval_scores_mismatching_sizes = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE - 2)), + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)), +) + +_input_retrieval_scores_mismatching_sizes_func = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)), + preds=B.rand(NUM_BATCHES, BATCH_SIZE - 2), + target=B.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)), +) + +_input_retrieval_scores_wrong_targets = Input( + indexes=B.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)), + preds=B.rand(NUM_BATCHES, BATCH_SIZE), + target=B.randint(low=-(2 ** 31), high=2 ** 31, size=(NUM_BATCHES, BATCH_SIZE)), +) diff --git a/EE/paddlemetric/src/tests/retrieval/test_fallout.py b/EE/paddlemetric/src/tests/retrieval/test_fallout.py new file mode 100644 index 000000000..e69ddd593 --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_fallout.py @@ -0,0 +1,152 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments, + _default_metric_functional_input_arguments, + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_k, + _errors_test_class_metric_parameters_no_neg_target, + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, +) +from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out +from paddlemetrics.retrieval.retrieval_fallout import RetrievalFallOut + +seed_all(42) + + +def _fallout_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): + """Didn't find a reliable implementation of Fall-out in Information Retrieval, so, reimplementing here. + + See Wikipedia for `Fall-out`_ for more information about the metric definition. + """ + assert target.shape == preds.shape + assert len(target.shape) == 1 # works only with single dimension inputs + + k = len(preds) if k is None else k + + target = 1 - target + if target.sum(): + order_indexes = np.argsort(preds, axis=0)[::-1] + relevant = np.sum(target[order_indexes][:k]) + return relevant * 1.0 / target.sum() + return np.NaN + + +class TestFallOut(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + k: int, + ): + metric_args = {"empty_target_action": empty_target_action, "k": k} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalFallOut, + sk_metric=_fallout_at_k, + dist_sync_on_step=dist_sync_on_step, + reverse=True, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + def test_functional_metric(self, preds: Tensor, target: Tensor, k: int): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_fall_out, + sk_metric=_fallout_at_k, + reverse=True, + metric_args={}, + k=k, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalFallOut, + metric_functional=retrieval_fall_out, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalFallOut, + metric_functional=retrieval_fall_out, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_neg_target, + _errors_test_class_metric_parameters_k, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalFallOut, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, + ) + ) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_fall_out, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/test_hit_rate.py b/EE/paddlemetric/src/tests/retrieval/test_hit_rate.py new file mode 100644 index 000000000..a9d908389 --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_hit_rate.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments, + _default_metric_functional_input_arguments, + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_k, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, +) +from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate +from paddlemetrics.retrieval.retrieval_hit_rate import RetrievalHitRate + +seed_all(42) + + +def _hit_rate_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): + """Didn't find a reliable implementation of Hit Rate in Information Retrieval, so, reimplementing here.""" + assert target.shape == preds.shape + assert len(target.shape) == 1 # works only with single dimension inputs + + if k is None: + k = len(preds) + + if target.sum() > 0: + order_indexes = np.argsort(preds, axis=0)[::-1] + relevant = np.sum(target[order_indexes][:k]) + return float(relevant > 0.0) + return np.NaN + + +class TestHitRate(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + k: int, + ): + metric_args = {"empty_target_action": empty_target_action, "k": k} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalHitRate, + sk_metric=_hit_rate_at_k, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + def test_functional_metric(self, preds: Tensor, target: Tensor, k: int): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_hit_rate, + sk_metric=_hit_rate_at_k, + metric_args={}, + k=k, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalHitRate, + metric_functional=retrieval_hit_rate, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalHitRate, + metric_functional=retrieval_hit_rate, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_class_metric_parameters_k, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalHitRate, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, + ) + ) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_hit_rate, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/test_map.py b/EE/paddlemetric/src/tests/retrieval/test_map.py new file mode 100644 index 000000000..bef75b553 --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_map.py @@ -0,0 +1,120 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +from sklearn.metrics import average_precision_score as sk_average_precision_score +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments, + _default_metric_functional_input_arguments, + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_functional_metric_parameters_default, +) +from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision +from paddlemetrics.retrieval.mean_average_precision import RetrievalMAP + +seed_all(42) + + +class TestMAP(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + ): + metric_args = {"empty_target_action": empty_target_action} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalMAP, + sk_metric=sk_average_precision_score, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments) + def test_functional_metric(self, preds: Tensor, target: Tensor): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_average_precision, + sk_metric=sk_average_precision_score, + metric_args={}, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalMAP, + metric_functional=retrieval_average_precision, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalMAP, + metric_functional=retrieval_average_precision, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalMAP, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize(**_errors_test_functional_metric_parameters_default) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_average_precision, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/test_mrr.py b/EE/paddlemetric/src/tests/retrieval/test_mrr.py new file mode 100644 index 000000000..d5a80af94 --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_mrr.py @@ -0,0 +1,142 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +from sklearn.metrics import label_ranking_average_precision_score +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments, + _default_metric_functional_input_arguments, + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_functional_metric_parameters_default, +) +from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank +from paddlemetrics.retrieval.mean_reciprocal_rank import RetrievalMRR + +seed_all(42) + + +def _reciprocal_rank(target: np.ndarray, preds: np.ndarray): + """Adaptation of `sklearn.metrics.label_ranking_average_precision_score`. + + Since the original sklearn metric works as RR only when the number of positive targets is exactly 1, here we remove + every positive target that is not the most important. Remember that in RR only the positive target with the highest + score is considered. + """ + assert target.shape == preds.shape + assert len(target.shape) == 1 # works only with single dimension inputs + + # going to remove T targets that are not ranked as highest + indexes = preds[target.astype(bool)] + if len(indexes) > 0: + target[preds != indexes.max(-1, keepdims=True)[0]] = 0 # ensure that only 1 positive label is present + + if target.sum() > 0: + # sklearn `label_ranking_average_precision_score` requires at most 2 dims + return label_ranking_average_precision_score(np.expand_dims(target, axis=0), np.expand_dims(preds, axis=0)) + return 0.0 + + +class TestMRR(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + ): + metric_args = {"empty_target_action": empty_target_action} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalMRR, + sk_metric=_reciprocal_rank, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments) + def test_functional_metric(self, preds: Tensor, target: Tensor): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_reciprocal_rank, + sk_metric=_reciprocal_rank, + metric_args={}, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalMRR, + metric_functional=retrieval_reciprocal_rank, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalMRR, + metric_functional=retrieval_reciprocal_rank, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalMRR, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize(**_errors_test_functional_metric_parameters_default) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_reciprocal_rank, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/test_ndcg.py b/EE/paddlemetric/src/tests/retrieval/test_ndcg.py new file mode 100644 index 000000000..4fa099f15 --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_ndcg.py @@ -0,0 +1,151 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +from sklearn.metrics import ndcg_score +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments_with_non_binary_target, + _default_metric_functional_input_arguments_with_non_binary_target, + _errors_test_class_metric_parameters_k, + _errors_test_class_metric_parameters_with_nonbinary, + _errors_test_functional_metric_parameters_k, + _errors_test_functional_metric_parameters_with_nonbinary, +) +from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg +from paddlemetrics.retrieval.retrieval_ndcg import RetrievalNormalizedDCG + +seed_all(42) + + +def _ndcg_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): + """Adapting `from sklearn.metrics.ndcg_score`.""" + assert target.shape == preds.shape + assert len(target.shape) == 1 # works only with single dimension inputs + + if target.shape[0] < 2: # ranking is equal to ideal ranking with a single document + return np.array(1.0) + + preds = np.expand_dims(preds, axis=0) + target = np.expand_dims(target, axis=0) + + return ndcg_score(target, preds, k=k) + + +class TestNDCG(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + k: int, + ): + metric_args = {"empty_target_action": empty_target_action, "k": k} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalNormalizedDCG, + sk_metric=_ndcg_at_k, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments_with_non_binary_target) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + def test_functional_metric(self, preds: Tensor, target: Tensor, k: int): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_normalized_dcg, + sk_metric=_ndcg_at_k, + metric_args={}, + k=k, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalNormalizedDCG, + metric_functional=retrieval_normalized_dcg, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalNormalizedDCG, + metric_functional=retrieval_normalized_dcg, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_with_nonbinary, + _errors_test_class_metric_parameters_k, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + if target.is_floating_point(): + pytest.skip("NDCG metric works with float target input") + + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalNormalizedDCG, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_functional_metric_parameters_with_nonbinary, + _errors_test_functional_metric_parameters_k, + ) + ) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + if target.is_floating_point(): + pytest.skip("NDCG metric works with float target input") + + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_normalized_dcg, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/test_precision.py b/EE/paddlemetric/src/tests/retrieval/test_precision.py new file mode 100644 index 000000000..260e0242d --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_precision.py @@ -0,0 +1,151 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments, + _default_metric_functional_input_arguments, + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_k, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, +) +from paddlemetrics.functional.retrieval.precision import retrieval_precision +from paddlemetrics.retrieval.retrieval_precision import RetrievalPrecision + +seed_all(42) + + +def _precision_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): + """Didn't find a reliable implementation of Precision in Information Retrieval, so, reimplementing here. + + A good explanation can be found + `here _`. + """ + assert target.shape == preds.shape + assert len(target.shape) == 1 # works only with single dimension inputs + + if k is None: + k = len(preds) + + if target.sum() > 0: + order_indexes = np.argsort(preds, axis=0)[::-1] + relevant = np.sum(target[order_indexes][:k]) + return relevant * 1.0 / k + return np.NaN + + +class TestPrecision(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + k: int, + ): + metric_args = {"empty_target_action": empty_target_action, "k": k} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalPrecision, + sk_metric=_precision_at_k, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + def test_functional_metric(self, preds: Tensor, target: Tensor, k: int): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_precision, + sk_metric=_precision_at_k, + metric_args={}, + k=k, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalPrecision, + metric_functional=retrieval_precision, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalPrecision, + metric_functional=retrieval_precision, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_class_metric_parameters_k, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalPrecision, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, + ) + ) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_precision, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/test_r_precision.py b/EE/paddlemetric/src/tests/retrieval/test_r_precision.py new file mode 100644 index 000000000..e9787482a --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_r_precision.py @@ -0,0 +1,136 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments, + _default_metric_functional_input_arguments, + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_functional_metric_parameters_default, +) +from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision +from paddlemetrics.retrieval.retrieval_r_precision import RetrievalRPrecision + +seed_all(42) + + +def _r_precision(target: np.ndarray, preds: np.ndarray): + """Didn't find a reliable implementation of R-Precision in Information Retrieval, so, reimplementing here. + + A good explanation can be found + `here _`. + """ + assert target.shape == preds.shape + assert len(target.shape) == 1 # works only with single dimension inputs + + if target.sum() > 0: + order_indexes = np.argsort(preds, axis=0)[::-1] + relevant = np.sum(target[order_indexes][: target.sum()]) + return relevant * 1.0 / target.sum() + return np.NaN + + +class TestRPrecision(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + ): + metric_args = {"empty_target_action": empty_target_action} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalRPrecision, + sk_metric=_r_precision, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments) + def test_functional_metric(self, preds: Tensor, target: Tensor): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_r_precision, + sk_metric=_r_precision, + metric_args={}, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalRPrecision, + metric_functional=retrieval_r_precision, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalRPrecision, + metric_functional=retrieval_r_precision, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalRPrecision, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize(**_errors_test_functional_metric_parameters_default) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_r_precision, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/retrieval/test_recall.py b/EE/paddlemetric/src/tests/retrieval/test_recall.py new file mode 100644 index 000000000..8f01120b2 --- /dev/null +++ b/EE/paddlemetric/src/tests/retrieval/test_recall.py @@ -0,0 +1,150 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import pytest +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from tests.retrieval.helpers import ( + RetrievalMetricTester, + _concat_tests, + _default_metric_class_input_arguments, + _default_metric_functional_input_arguments, + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_k, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, +) +from paddlemetrics.functional.retrieval.recall import retrieval_recall +from paddlemetrics.retrieval.retrieval_recall import RetrievalRecall + +seed_all(42) + + +def _recall_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): + """Didn't find a reliable implementation of Recall in Information Retrieval, so, reimplementing here. + + See wikipedia for more information about definition. + """ + assert target.shape == preds.shape + assert len(target.shape) == 1 # works only with single dimension inputs + + if k is None: + k = len(preds) + + if target.sum() > 0: + order_indexes = np.argsort(preds, axis=0)[::-1] + relevant = np.sum(target[order_indexes][:k]) + return relevant * 1.0 / target.sum() + return np.NaN + + +class TestRecall(RetrievalMetricTester): + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + @pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"]) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_class_metric( + self, + ddp: bool, + indexes: Tensor, + preds: Tensor, + target: Tensor, + dist_sync_on_step: bool, + empty_target_action: str, + k: int, + ): + metric_args = {"empty_target_action": empty_target_action, "k": k} + + self.run_class_metric_test( + ddp=ddp, + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalRecall, + sk_metric=_recall_at_k, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + ) + + @pytest.mark.parametrize(**_default_metric_functional_input_arguments) + @pytest.mark.parametrize("k", [None, 1, 4, 10]) + def test_functional_metric(self, preds: Tensor, target: Tensor, k: int): + self.run_functional_metric_test( + preds=preds, + target=target, + metric_functional=retrieval_recall, + sk_metric=_recall_at_k, + metric_args={}, + k=k, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_cpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalRecall, + metric_functional=retrieval_recall, + ) + + @pytest.mark.parametrize(**_default_metric_class_input_arguments) + def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor): + self.run_precision_test_gpu( + indexes=indexes, + preds=preds, + target=target, + metric_module=RetrievalRecall, + metric_functional=retrieval_recall, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_class_metric_parameters_default, + _errors_test_class_metric_parameters_no_pos_target, + _errors_test_class_metric_parameters_k, + ) + ) + def test_arguments_class_metric( + self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict + ): + self.run_metric_class_arguments_test( + indexes=indexes, + preds=preds, + target=target, + metric_class=RetrievalRecall, + message=message, + metric_args=metric_args, + exception_type=ValueError, + kwargs_update={}, + ) + + @pytest.mark.parametrize( + **_concat_tests( + _errors_test_functional_metric_parameters_default, + _errors_test_functional_metric_parameters_k, + ) + ) + def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict): + self.run_functional_metric_arguments_test( + preds=preds, + target=target, + metric_functional=retrieval_recall, + message=message, + exception_type=ValueError, + kwargs_update=metric_args, + ) diff --git a/EE/paddlemetric/src/tests/test_utilities.py b/EE/paddlemetric/src/tests/test_utilities.py new file mode 100644 index 000000000..0f7aacd03 --- /dev/null +++ b/EE/paddlemetric/src/tests/test_utilities.py @@ -0,0 +1,21 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddlemetrics.utilities import rank_zero_debug, rank_zero_info, rank_zero_warn + + +def test_prints(): + rank_zero_debug("DEBUG") + rank_zero_info("INFO") + rank_zero_warn("WARN") diff --git a/EE/paddlemetric/src/tests/text/__init__.py b/EE/paddlemetric/src/tests/text/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/text/helpers.py b/EE/paddlemetric/src/tests/text/helpers.py new file mode 100644 index 000000000..ee896504b --- /dev/null +++ b/EE/paddlemetric/src/tests/text/helpers.py @@ -0,0 +1,479 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pickle +import sys +from enum import Enum, unique +from functools import partial +from typing import Any, Callable, Sequence, Union + +import pytest +import paddleext.torchapi as B +from paddleext.torchapi import Tensor +from B.multiprocessing import set_start_method + +from tests.helpers.testers import MetricTester, _assert_allclose, _assert_requires_grad, _assert_tensor +from paddlemetrics import Metric + +try: + set_start_method("spawn") +except RuntimeError: + pass + + +@unique +class INPUT_ORDER(Enum): + PREDS_FIRST = 1 + TARGETS_FIRST = 2 + + +TEXT_METRIC_INPUT = Union[Sequence[str], Sequence[Sequence[str]], Sequence[Sequence[Sequence[str]]]] +NUM_BATCHES = 2 + + +def _class_test( + rank: int, + worldsize: int, + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + metric_class: Metric, + sk_metric: Callable, + dist_sync_on_step: bool, + metric_args: dict = None, + check_dist_sync_on_step: bool = True, + check_batch: bool = True, + atol: float = 1e-8, + device: str = "cpu", + fragment_kwargs: bool = False, + check_scriptable: bool = True, + input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST, + key: str = None, + **kwargs_update: Any, +): + """Utility function doing the actual comparison between lightning class metric and reference metric. + + Args: + rank: rank of current process + worldsize: number of processes + preds: Sequence of predicted tokens or predicted sentences + targets: Sequence of target tokens or target sentences + metric_class: lightning metric class that should be tested + sk_metric: callable function that is used for comparison + dist_sync_on_step: bool, if true will synchronize metric state across + processes at each ``forward()`` + metric_args: dict with additional arguments used for class initialization + check_dist_sync_on_step: bool, if true will check if the metric is also correctly + calculated per batch per device (and not just at the end) + check_batch: bool, if true will check if the metric is also correctly + calculated across devices for each batch (and not just at the end) + device: determine which device to run on, either 'cuda' or 'cpu' + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes + input_order: Define the ordering for the preds and targets positional arguments. + key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against + the sk_metric. + kwargs_update: Additional keyword arguments that will be passed with preds and + targets when running update on the metric. + """ + if not metric_args: + metric_args = {} + + # Instanciate lightning metric + metric = metric_class( + compute_on_step=check_dist_sync_on_step or check_batch, dist_sync_on_step=dist_sync_on_step, **metric_args + ) + + # check that the metric is scriptable + if check_scriptable: + B.jit.script(metric) + + # move to device + metric = metric.to(device) + kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + + # verify metrics work after being loaded from pickled state + pickled_metric = pickle.dumps(metric) + metric = pickle.loads(pickled_metric) + + for i in range(rank, NUM_BATCHES, worldsize): + batch_kwargs_update = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + + if input_order == INPUT_ORDER.PREDS_FIRST: + batch_result = metric(preds[i], targets[i], **batch_kwargs_update) + elif input_order == INPUT_ORDER.TARGETS_FIRST: + batch_result = metric(targets[i], preds[i], **batch_kwargs_update) + + if metric.dist_sync_on_step and check_dist_sync_on_step and rank == 0: + # Concatenation of Sequence of strings + ddp_preds = type(preds)() + ddp_targets = type(targets)() + for r in range(worldsize): + ddp_preds = ddp_preds + preds[i + r] + ddp_targets = ddp_targets + targets[i + r] + ddp_kwargs_upd = { + k: B.cat([v[i + r] for r in range(worldsize)]).cpu() if isinstance(v, Tensor) else v + for k, v in (kwargs_update if fragment_kwargs else batch_kwargs_update).items() + } + + if input_order == INPUT_ORDER.PREDS_FIRST: + sk_batch_result = sk_metric(ddp_preds, ddp_targets, **ddp_kwargs_upd) + elif input_order == INPUT_ORDER.TARGETS_FIRST: + sk_batch_result = sk_metric(ddp_targets, ddp_preds, **ddp_kwargs_upd) + _assert_allclose(batch_result, sk_batch_result, atol=atol, key=key) + + elif check_batch and not metric.dist_sync_on_step: + batch_kwargs_update = { + k: v.cpu() if isinstance(v, Tensor) else v + for k, v in (batch_kwargs_update if fragment_kwargs else kwargs_update).items() + } + if input_order == INPUT_ORDER.PREDS_FIRST: + sk_batch_result = sk_metric(preds[i], targets[i], **batch_kwargs_update) + elif input_order == INPUT_ORDER.TARGETS_FIRST: + sk_batch_result = sk_metric(targets[i], preds[i], **batch_kwargs_update) + + _assert_allclose(batch_result, sk_batch_result, atol=atol, key=key) + + # check that metrics are hashable + assert hash(metric) + + # check on all batches on all ranks + result = metric.compute() + _assert_tensor(result, key=key) + + # Concatenation of Sequence of strings + total_preds = type(preds)() + total_targets = type(targets)() + for i in range(NUM_BATCHES): + total_preds = total_preds + preds[i] + total_targets = total_targets + targets[i] + total_kwargs_update = { + k: B.cat([v[i] for i in range(NUM_BATCHES)]).cpu() if isinstance(v, Tensor) else v + for k, v in kwargs_update.items() + } + if input_order == INPUT_ORDER.PREDS_FIRST: + sk_result = sk_metric(total_preds, total_targets, **total_kwargs_update) + elif input_order == INPUT_ORDER.TARGETS_FIRST: + sk_result = sk_metric(total_targets, total_preds, **total_kwargs_update) + + # assert after aggregation + _assert_allclose(result, sk_result, atol=atol, key=key) + + +def _functional_test( + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + metric_functional: Callable, + sk_metric: Callable, + metric_args: dict = None, + atol: float = 1e-8, + device: str = "cpu", + fragment_kwargs: bool = False, + input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST, + key: str = None, + **kwargs_update, +): + """Utility function doing the actual comparison between lightning functional metric and reference metric. + + Args: + preds: torch tensor with predictions + targets: torch tensor with targets + metric_functional: lightning metric functional that should be tested + sk_metric: callable function that is used for comparison + metric_args: dict with additional arguments used for class initialization + device: determine which device to run on, either 'cuda' or 'cpu' + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes + input_order: Define the ordering for the preds and targets positional arguments. + key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against + the sk_metric. + kwargs_update: Additional keyword arguments that will be passed with preds and + targets when running update on the metric. + """ + if not metric_args: + metric_args = {} + + metric = partial(metric_functional, **metric_args) + + # Move to device + kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + + for i in range(NUM_BATCHES): + extra_kwargs = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()} + if input_order == INPUT_ORDER.PREDS_FIRST: + lightning_result = metric(preds[i], targets[i], **extra_kwargs) + elif input_order == INPUT_ORDER.TARGETS_FIRST: + lightning_result = metric(targets[i], preds[i], **extra_kwargs) + + extra_kwargs = { + k: v.cpu() if isinstance(v, Tensor) else v + for k, v in (extra_kwargs if fragment_kwargs else kwargs_update).items() + } + if input_order == INPUT_ORDER.PREDS_FIRST: + sk_result = sk_metric(preds[i], targets[i], **extra_kwargs) + elif input_order == INPUT_ORDER.TARGETS_FIRST: + sk_result = sk_metric(targets[i], preds[i], **extra_kwargs) + + # assert its the same + _assert_allclose(lightning_result, sk_result, atol=atol, key=key) + + +def _assert_half_support( + metric_module: Metric, + metric_functional: Callable, + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + device: str = "cpu", + **kwargs_update, +): + """Test if an metric can be used with half precision tensors. + + Args: + metric_module: the metric module to test + metric_functional: the metric functional to test + preds: torch tensor with predictions + targets: torch tensor with targets + device: determine device, either "cpu" or "cuda" + kwargs_update: Additional keyword arguments that will be passed with preds and + targets when running update on the metric. + """ + y_hat = preds[0] + y = targets[0] + kwargs_update = { + k: (v[0].half() if v.is_floating_point() else v[0]).to(device) if isinstance(v, Tensor) else v + for k, v in kwargs_update.items() + } + metric_module = metric_module.to(device) + _assert_tensor(metric_module(y_hat, y, **kwargs_update)) + _assert_tensor(metric_functional(y_hat, y, **kwargs_update)) + + +class TextTester(MetricTester): + """Class used for efficiently run alot of parametrized tests in ddp mode. Makes sure that ddp is only setup + once and that pool of processes are used for all tests. + + All tests for text metrics should subclass from this and implement a new method called `test_metric_name` where the + method `self.run_metric_test` is called inside. + """ + + def run_functional_metric_test( + self, + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + metric_functional: Callable, + sk_metric: Callable, + metric_args: dict = None, + fragment_kwargs: bool = False, + input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST, + key: str = None, + **kwargs_update, + ): + """Main method that should be used for testing functions. Call this inside testing method. + + Args: + preds: torch tensor with predictions + targets: torch tensor with targets + metric_functional: lightning metric class that should be tested + sk_metric: callable function that is used for comparison + metric_args: dict with additional arguments used for class initialization + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes + input_order: Define the ordering for the preds and targets positional arguments. + key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output + against the sk_metric. + kwargs_update: Additional keyword arguments that will be passed with preds and + targets when running update on the metric. + """ + device = "cuda" if (B.cuda.is_available() and B.cuda.device_count() > 0) else "cpu" + + _functional_test( + preds=preds, + targets=targets, + metric_functional=metric_functional, + sk_metric=sk_metric, + metric_args=metric_args, + atol=self.atol, + device=device, + fragment_kwargs=fragment_kwargs, + input_order=input_order, + key=key, + **kwargs_update, + ) + + def run_class_metric_test( + self, + ddp: bool, + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + metric_class: Metric, + sk_metric: Callable, + dist_sync_on_step: bool, + metric_args: dict = None, + check_dist_sync_on_step: bool = True, + check_batch: bool = True, + fragment_kwargs: bool = False, + check_scriptable: bool = True, + input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST, + key: str = None, + **kwargs_update, + ): + """Main method that should be used for testing class. Call this inside testing methods. + + Args: + ddp: bool, if running in ddp mode or not + preds: torch tensor with predictions + targets: torch tensor with targets + metric_class: lightning metric class that should be tested + sk_metric: callable function that is used for comparison + dist_sync_on_step: bool, if true will synchronize metric state across + processes at each ``forward()`` + metric_args: dict with additional arguments used for class initialization + check_dist_sync_on_step: bool, if true will check if the metric is also correctly + calculated per batch per device (and not just at the end) + check_batch: bool, if true will check if the metric is also correctly + calculated across devices for each batch (and not just at the end) + fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes + input_order: Define the ordering for the preds and targets positional arguments. + key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output + against the sk_metric. + kwargs_update: Additional keyword arguments that will be passed with preds and + targets when running update on the metric. + """ + if not metric_args: + metric_args = {} + if ddp: + if sys.platform == "win32": + pytest.skip("DDP not supported on windows") + + self.pool.starmap( + partial( + _class_test, + preds=preds, + targets=targets, + metric_class=metric_class, + sk_metric=sk_metric, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + check_dist_sync_on_step=check_dist_sync_on_step, + check_batch=check_batch, + atol=self.atol, + fragment_kwargs=fragment_kwargs, + check_scriptable=check_scriptable, + input_order=input_order, + key=key, + **kwargs_update, + ), + [(rank, self.poolSize) for rank in range(self.poolSize)], + ) + else: + device = "cuda" if (B.cuda.is_available() and B.cuda.device_count() > 0) else "cpu" + + _class_test( + rank=0, + worldsize=1, + preds=preds, + targets=targets, + metric_class=metric_class, + sk_metric=sk_metric, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + check_dist_sync_on_step=check_dist_sync_on_step, + check_batch=check_batch, + atol=self.atol, + device=device, + fragment_kwargs=fragment_kwargs, + check_scriptable=check_scriptable, + input_order=input_order, + key=key, + **kwargs_update, + ) + + @staticmethod + def run_precision_test_cpu( + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + metric_module: Metric, + metric_functional: Callable, + metric_args: dict = None, + **kwargs_update, + ): + """Test if a metric can be used with half precision tensors on cpu + Args: + preds: torch tensor with predictions + targets: torch tensor with targets + metric_module: the metric module to test + metric_functional: the metric functional to test + metric_args: dict with additional arguments used for class initialization + kwargs_update: Additional keyword arguments that will be passed with preds and + targets when running update on the metric. + """ + metric_args = metric_args or {} + _assert_half_support( + metric_module(**metric_args), metric_functional, preds, targets, device="cpu", **kwargs_update + ) + + @staticmethod + def run_precision_test_gpu( + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + metric_module: Metric, + metric_functional: Callable, + metric_args: dict = None, + **kwargs_update, + ): + """Test if a metric can be used with half precision tensors on gpu + Args: + preds: torch tensor with predictions + targets: torch tensor with targets + metric_module: the metric module to test + metric_functional: the metric functional to test + metric_args: dict with additional arguments used for class initialization + kwargs_update: Additional keyword arguments that will be passed with preds and + targets when running update on the metric. + """ + metric_args = metric_args or {} + _assert_half_support( + metric_module(**metric_args), metric_functional, preds, targets, device="cuda", **kwargs_update + ) + + @staticmethod + def run_differentiability_test( + preds: TEXT_METRIC_INPUT, + targets: TEXT_METRIC_INPUT, + metric_module: Metric, + metric_functional: Callable, + metric_args: dict = None, + input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST, + key: str = None, + ): + """Test if a metric is differentiable or not. + + Args: + preds: torch tensor with predictions + targets: torch tensor with targets + metric_module: the metric module to test + metric_args: dict with additional arguments used for class initialization + input_order: Define the ordering for the preds and targets positional arguments. + key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output + against the sk_metric. + """ + metric_args = metric_args or {} + # only floating point tensors can require grad + metric = metric_module(**metric_args) + if input_order == INPUT_ORDER.PREDS_FIRST: + out = metric(preds[0], targets[0]) + elif input_order == INPUT_ORDER.TARGETS_FIRST: + out = metric(targets[0], preds[0]) + + # Check if requires_grad matches is_differentiable attribute + _assert_requires_grad(metric, out, key=key) + + if metric.is_differentiable: + # check for numerical correctness + assert B.autograd.gradcheck(partial(metric_functional, **metric_args), (preds[0], targets[0])) diff --git a/EE/paddlemetric/src/tests/text/test_bertscore.py b/EE/paddlemetric/src/tests/text/test_bertscore.py new file mode 100644 index 000000000..68e515683 --- /dev/null +++ b/EE/paddlemetric/src/tests/text/test_bertscore.py @@ -0,0 +1,318 @@ +import os +from typing import Any, Dict, List + +import numpy as np +import pytest +import paddleext.torchapi as B +import paddleext.torchapi as B.distributed as dist +import paddleext.torchapi as B.multiprocessing as mp + +from paddlemetrics.functional import bert_score as metrics_bert_score +from paddlemetrics.text import BERTScore +from paddlemetrics.utilities.imports import _BERTSCORE_AVAILABLE + +if _BERTSCORE_AVAILABLE: + from bert_score import score as original_bert_score + +os.environ["TOKENIZERS_PARALLELISM"] = "1" + +# Examples and expected values taken from: +# https://github.com/Tiiiger/bert_score/blob/master/tests/test_scorer.py +preds = [ + "28-year-old chef found dead in San Francisco mall", + "A 28-year-old chef who recently moved to San Francisco was " + "found dead in the staircase of a local shopping center.", + "The victim's brother said he cannot imagine anyone who would want to harm him,\"Finally, it went uphill again at " + 'him."', +] +refs = [ + "28-Year-Old Chef Found Dead at San Francisco Mall", + "A 28-year-old chef who had recently moved to San Francisco was found dead in the stairwell of a local mall this " + "week.", + "But the victim's brother says he can't think of anyone who would want to hurt him, saying, \"Things were finally " + 'going well for him."', +] + + +_METRICS = ["precision", "recall", "f1"] + +MODEL_NAME = "albert-base-v2" + + +def _assert_list(preds: Any, refs: Any, threshold: float = 1e-8): + """Assert two lists are equal.""" + assert np.allclose(preds, refs, atol=threshold, equal_nan=True) + + +def _parse_original_bert_score(score: B.Tensor) -> Dict[str, List[float]]: + """Parse the BERT score returned by the original `bert-score` package.""" + score_dict = {metric: value.tolist() for metric, value in zip(_METRICS, score)} + return score_dict + + +preds_batched = [preds[0:2], preds[2:]] +refs_batched = [refs[0:2], refs[2:]] + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_fn(preds, refs): + """Tests for functional.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + metrics_score = metrics_bert_score( + preds, refs, model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3 + ) + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_fn_with_idf(preds, refs): + """Tests for functional with IDF rescaling.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, num_layers=12, idf=True, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + metrics_score = metrics_bert_score( + preds, refs, model_name_or_path=MODEL_NAME, num_layers=12, idf=True, batch_size=3 + ) + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_fn_all_layers(preds, refs): + """Tests for functional and all layers.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, all_layers=True, idf=False, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + metrics_score = metrics_bert_score( + preds, refs, model_name_or_path=MODEL_NAME, all_layers=True, idf=False, batch_size=3 + ) + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_fn_all_layers_with_idf(preds, refs): + """Tests for functional and all layers with IDF rescaling.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, all_layers=True, idf=True, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + metrics_score = metrics_bert_score( + preds, refs, model_name_or_path=MODEL_NAME, all_layers=True, idf=True, batch_size=3 + ) + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_fn_all_layers_rescale_with_baseline(preds, refs): + """Tests for functional with baseline rescaling.""" + original_score = original_bert_score( + preds, + refs, + model_type=MODEL_NAME, + lang="en", + num_layers=8, + idf=False, + batch_size=3, + rescale_with_baseline=True, + ) + original_score = _parse_original_bert_score(original_score) + + metrics_score = metrics_bert_score( + preds, + refs, + model_name_or_path=MODEL_NAME, + lang="en", + num_layers=8, + idf=False, + batch_size=3, + rescale_with_baseline=True, + ) + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_fn_rescale_with_baseline(preds, refs): + """Tests for functional with baseline rescaling with all layers.""" + original_score = original_bert_score( + preds, + refs, + model_type=MODEL_NAME, + lang="en", + all_layers=True, + idf=False, + batch_size=3, + rescale_with_baseline=True, + ) + original_score = _parse_original_bert_score(original_score) + + metrics_score = metrics_bert_score( + preds, + refs, + model_name_or_path=MODEL_NAME, + lang="en", + all_layers=True, + idf=False, + batch_size=3, + rescale_with_baseline=True, + ) + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score(preds, refs): + """Tests for metric.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + Scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3) + Scorer.update(predictions=preds, references=refs) + metrics_score = Scorer.compute() + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_with_idf(preds, refs): + """Tests for metric with IDF rescaling.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, num_layers=8, idf=True, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + Scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=True, batch_size=3) + Scorer.update(predictions=preds, references=refs) + metrics_score = Scorer.compute() + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_all_layers(preds, refs): + """Tests for metric and all layers.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, all_layers=True, idf=False, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + Scorer = BERTScore(model_name_or_path=MODEL_NAME, all_layers=True, idf=False, batch_size=3) + Scorer.update(predictions=preds, references=refs) + metrics_score = Scorer.compute() + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_score_all_layers_with_idf(preds, refs): + """Tests for metric and all layers with IDF rescaling.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, all_layers=True, idf=True, batch_size=3) + original_score = _parse_original_bert_score(original_score) + + Scorer = BERTScore(model_name_or_path=MODEL_NAME, all_layers=True, idf=True, batch_size=3) + Scorer.update(predictions=preds, references=refs) + metrics_score = Scorer.compute() + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds_batched, refs_batched)], +) +@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score") +def test_accumulation(preds, refs): + """Tests for metric works with accumulation.""" + original_score = original_bert_score( + sum(preds, []), sum(refs, []), model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3 + ) + original_score = _parse_original_bert_score(original_score) + + Scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3) + for p, r in zip(preds, refs): + Scorer.update(predictions=p, references=r) + metrics_score = Scorer.compute() + + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + + +def _bert_score_ddp(rank, world_size, preds, refs, original_score): + """Define a DDP process for BERTScore.""" + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = "12355" + dist.init_process_group("gloo", rank=rank, world_size=world_size) + Scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3, max_length=128) + Scorer.update(preds, refs) + metrics_score = Scorer.compute() + for metric in _METRICS: + _assert_list(metrics_score[metric], original_score[metric]) + dist.destroy_process_group() + + +def _test_score_ddp_fn(rank, world_size, preds, refs): + """Core functionality for the `test_score_ddp` test.""" + original_score = original_bert_score(preds, refs, model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3) + original_score = _parse_original_bert_score(original_score) + _bert_score_ddp(rank, world_size, preds, refs, original_score) + + +@pytest.mark.parametrize( + "preds,refs", + [(preds, refs)], +) +@pytest.mark.skipif(not (_BERTSCORE_AVAILABLE and dist.is_available()), reason="test requires bert_score") +def test_score_ddp(preds, refs): + """Tests for metric using DDP.""" + world_size = 2 + mp.spawn(_test_score_ddp_fn, args=(world_size, preds, refs), nprocs=world_size, join=False) diff --git a/EE/paddlemetric/src/tests/text/test_bleu.py b/EE/paddlemetric/src/tests/text/test_bleu.py new file mode 100644 index 000000000..168dc7607 --- /dev/null +++ b/EE/paddlemetric/src/tests/text/test_bleu.py @@ -0,0 +1,141 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial + +import pytest +from nltk.translate.bleu_score import SmoothingFunction, corpus_bleu +from paddleext.torchapi import tensor + +from tests.text.helpers import INPUT_ORDER, TextTester +from paddlemetrics.functional.text.bleu import bleu_score +from paddlemetrics.text.bleu import BLEUScore + +# example taken from +# https://www.nltk.org/api/nltk.translate.html?highlight=bleu%20score#nltk.translate.bleu_score.corpus_bleu +# EXAMPLE 1 +HYPOTHESIS_A = tuple( + "It is a guide to action which ensures that the military always obeys the commands of the party".split() +) +REFERENCE_1A = tuple("It is a guide to action that ensures that the military will forever heed Party commands".split()) +REFERENCE_2A = tuple( + "It is a guiding principle which makes the military forces always being under the command of the Party".split() +) +REFERENCE_3A = tuple("It is the practical guide for the army always to heed the directions of the party".split()) + +# EXAMPLE 2 +HYPOTHESIS_B = tuple("he read the book because he was interested in world history".split()) +REFERENCE_1B = tuple("he was interested in world history because he read the book".split()) + +# EXAMPLE 3 +HYPOTHESIS_C = tuple("the cat the cat on the mat".split()) +REFERENCE_1C = tuple("the cat is on the mat".split()) +REFERENCE_2C = tuple("there is a cat on the mat".split()) + +TUPLE_OF_REFERENCES = ( + ((REFERENCE_1A, REFERENCE_2A, REFERENCE_3A), tuple([REFERENCE_1B])), + (tuple([REFERENCE_1B]), (REFERENCE_1C, REFERENCE_2C)), +) +TUPLE_OF_HYPOTHESES = ((HYPOTHESIS_A, HYPOTHESIS_B), (HYPOTHESIS_B, HYPOTHESIS_C)) + +BATCHES = {"preds": TUPLE_OF_HYPOTHESES, "targets": TUPLE_OF_REFERENCES} + +# https://www.nltk.org/api/nltk.translate.html?highlight=bleu%20score#nltk.translate.bleu_score.SmoothingFunction +smooth_func = SmoothingFunction().method2 + + +@pytest.mark.parametrize( + ["weights", "n_gram", "smooth_func", "smooth"], + [ + pytest.param([1], 1, None, False), + pytest.param([0.5, 0.5], 2, smooth_func, True), + pytest.param([0.333333, 0.333333, 0.333333], 3, None, False), + pytest.param([0.25, 0.25, 0.25, 0.25], 4, smooth_func, True), + ], +) +@pytest.mark.parametrize( + ["preds", "targets"], + [ + pytest.param(BATCHES["preds"], BATCHES["targets"]), + ], +) +class TestBLEUScore(TextTester): + @pytest.mark.parametrize("ddp", [False, True]) + @pytest.mark.parametrize("dist_sync_on_step", [False, True]) + def test_bleu_score_class(self, ddp, dist_sync_on_step, preds, targets, weights, n_gram, smooth_func, smooth): + metric_args = {"n_gram": n_gram, "smooth": smooth} + + nltk_metric = partial(corpus_bleu, weights=weights, smoothing_function=smooth_func) + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + targets=targets, + metric_class=BLEUScore, + sk_metric=nltk_metric, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + input_order=INPUT_ORDER.TARGETS_FIRST, + ) + + def test_bleu_score_functional(self, preds, targets, weights, n_gram, smooth_func, smooth): + metric_args = {"n_gram": n_gram, "smooth": smooth} + nltk_metric = partial(corpus_bleu, weights=weights, smoothing_function=smooth_func) + + self.run_functional_metric_test( + preds, + targets, + metric_functional=bleu_score, + sk_metric=nltk_metric, + metric_args=metric_args, + input_order=INPUT_ORDER.TARGETS_FIRST, + ) + + def test_bleu_score_differentiability(self, preds, targets, weights, n_gram, smooth_func, smooth): + metric_args = {"n_gram": n_gram, "smooth": smooth} + + self.run_differentiability_test( + preds=preds, + targets=targets, + metric_module=BLEUScore, + metric_functional=bleu_score, + metric_args=metric_args, + input_order=INPUT_ORDER.TARGETS_FIRST, + ) + + +def test_bleu_empty_functional(): + hyp = [[]] + ref = [[[]]] + assert bleu_score(ref, hyp) == tensor(0.0) + + +def test_no_4_gram_functional(): + hyps = [["My", "full", "pytorch-lightning"]] + refs = [[["My", "full", "pytorch-lightning", "test"], ["Completely", "Different"]]] + assert bleu_score(refs, hyps) == tensor(0.0) + + +def test_bleu_empty_class(): + bleu = BLEUScore() + hyp = [[]] + ref = [[[]]] + assert bleu(ref, hyp) == tensor(0.0) + + +def test_no_4_gram_class(): + bleu = BLEUScore() + hyps = [["My", "full", "pytorch-lightning"]] + refs = [[["My", "full", "pytorch-lightning", "test"], ["Completely", "Different"]]] + assert bleu(refs, hyps) == tensor(0.0) diff --git a/EE/paddlemetric/src/tests/text/test_rouge.py b/EE/paddlemetric/src/tests/text/test_rouge.py new file mode 100644 index 000000000..4696dcee3 --- /dev/null +++ b/EE/paddlemetric/src/tests/text/test_rouge.py @@ -0,0 +1,147 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import List + +import pytest + +from tests.text.helpers import INPUT_ORDER, TextTester +from paddlemetrics.functional.text.rouge import rouge_score +from paddlemetrics.text.rouge import ROUGEScore +from paddlemetrics.utilities.imports import _NLTK_AVAILABLE, _ROUGE_SCORE_AVAILABLE + +if _ROUGE_SCORE_AVAILABLE: + from rouge_score.rouge_scorer import RougeScorer + from rouge_score.scoring import BootstrapAggregator +else: + RougeScorer, BootstrapAggregator = object, object + +ROUGE_KEYS = ("rouge1", "rouge2", "rougeL", "rougeLsum") + +SINGLE_SENTENCE_EXAMPLE_PREDS = "The quick brown fox jumps over the lazy dog" +SINGLE_SENTENCE_EXAMPLE_TARGET = "The quick brown dog jumps on the log." + +PREDS = "My name is John" +TARGETS = "Is your name John" + + +BATCHES_1 = { + "preds": [["the cat was under the bed"], ["the cat was found under the bed"]], + "targets": [["the cat was found under the bed"], ["the tiny little cat was found under the big funny bed "]], +} + + +BATCHES_2 = { + "preds": [["The quick brown fox jumps over the lazy dog"], ["My name is John"]], + "targets": [["The quick brown dog jumps on the log."], ["Is your name John"]], +} + + +def _compute_rouge_score(preds: List[str], targets: List[str], use_stemmer: bool, rouge_level: str, metric: str): + if isinstance(preds, str): + preds = [preds] + if isinstance(targets, str): + targets = [targets] + scorer = RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer) + aggregator = BootstrapAggregator() + for pred, target in zip(preds, targets): + aggregator.add_scores(scorer.score(target, pred)) + rs_scores = aggregator.aggregate() + rs_result = getattr(rs_scores[rouge_level].mid, metric) + return rs_result + + +@pytest.mark.skipif(not _NLTK_AVAILABLE, reason="test requires nltk") +@pytest.mark.parametrize( + ["pl_rouge_metric_key", "use_stemmer"], + [ + pytest.param("rouge1_precision", True), + pytest.param("rouge1_recall", True), + pytest.param("rouge1_fmeasure", False), + pytest.param("rouge2_precision", False), + pytest.param("rouge2_recall", True), + pytest.param("rouge2_fmeasure", True), + pytest.param("rougeL_precision", False), + pytest.param("rougeL_recall", False), + pytest.param("rougeL_fmeasure", True), + pytest.param("rougeLsum_precision", True), + pytest.param("rougeLsum_recall", False), + pytest.param("rougeLsum_fmeasure", False), + ], +) +@pytest.mark.parametrize( + ["preds", "targets"], + [ + pytest.param(BATCHES_1["preds"], BATCHES_1["targets"]), + pytest.param(BATCHES_2["preds"], BATCHES_2["targets"]), + ], +) +class TestROUGEScore(TextTester): + @pytest.mark.parametrize("ddp", [False, True]) + @pytest.mark.parametrize("dist_sync_on_step", [False, True]) + def test_rouge_score_class(self, ddp, dist_sync_on_step, preds, targets, pl_rouge_metric_key, use_stemmer): + metric_args = {"use_stemmer": use_stemmer} + + rouge_level, metric = pl_rouge_metric_key.split("_") + rouge_metric = partial(_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric) + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + targets=targets, + metric_class=ROUGEScore, + sk_metric=rouge_metric, + dist_sync_on_step=dist_sync_on_step, + metric_args=metric_args, + input_order=INPUT_ORDER.PREDS_FIRST, + key=pl_rouge_metric_key, + ) + + def test_rouge_score_functional(self, preds, targets, pl_rouge_metric_key, use_stemmer): + metric_args = {"use_stemmer": use_stemmer} + + rouge_level, metric = pl_rouge_metric_key.split("_") + rouge_metric = partial(_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric) + + self.run_functional_metric_test( + preds, + targets, + metric_functional=rouge_score, + sk_metric=rouge_metric, + metric_args=metric_args, + input_order=INPUT_ORDER.PREDS_FIRST, + key=pl_rouge_metric_key, + ) + + +def test_rouge_metric_raises_errors_and_warnings(): + """Test that expected warnings and errors are raised.""" + if not _NLTK_AVAILABLE: + with pytest.raises( + ValueError, + match="ROUGE metric requires that nltk is installed." + "Either as `pip install paddlemetrics[text]` or `pip install nltk`", + ): + ROUGEScore() + + +def test_rouge_metric_wrong_key_value_error(): + key = ("rouge1", "rouge") + + with pytest.raises(ValueError): + ROUGEScore(rouge_keys=key) + + with pytest.raises(ValueError): + rouge_score(PREDS, TARGETS, rouge_keys=key) diff --git a/EE/paddlemetric/src/tests/text/test_sacre_bleu.py b/EE/paddlemetric/src/tests/text/test_sacre_bleu.py new file mode 100644 index 000000000..289a7d703 --- /dev/null +++ b/EE/paddlemetric/src/tests/text/test_sacre_bleu.py @@ -0,0 +1,73 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import paddleext.torchapi as B + +from tests.text.helpers import TextTester +from paddlemetrics.functional.text.sacre_bleu import sacre_bleu_score +from paddlemetrics.text.sacre_bleu import SacreBLEUScore +from paddlemetrics.utilities.imports import _SACREBLEU_AVAILABLE + +if _SACREBLEU_AVAILABLE: + from sacrebleu.metrics import BLEU + +# example taken from https://github.com/mjpost/sacrebleu +REFERENCES = ( + # First set of references + ("The dog bit the man.", "It was not unexpected.", "The man bit him first."), + # Second set of references + ("The dog had bit the man.", "No one was surprised.", "The man had bitten the dog."), +) + +HYPOTHESES = ("The dog bit the man.", "It wasn't surprising.", "The man had just bitten him.") + +TOKENIZERS = ("none", "13a", "zh", "intl", "char") + +ROUND_N_DIGITS = 4 + + +def metrics_score_fn(targets, preds, tokenize): + metrics_score = sacre_bleu_score(targets, preds, tokenize=tokenize) + # rescale to 0-100 and round to 4 decimals to match blue + metrics_score_normed = B.round(100 * metrics_score * 10 ** ROUND_N_DIGITS) / 10 ** ROUND_N_DIGITS + return metrics_score_normed + + +@pytest.mark.parametrize( + ["preds", "targets"], + [ + (HYPOTHESES, REFERENCES), + ], +) +@pytest.mark.parametrize("tokenize", TOKENIZERS) +@pytest.mark.skipif(not _SACREBLEU_AVAILABLE, reason="test requires sacrebleu") +class TestSacreBLEUScore(TextTester): + def test_sacrebleu_score_functional(self, preds, targets, tokenize): + sacrebleu_metrics = BLEU(tokenize=tokenize) + original_score = B.tensor(round(sacrebleu_metrics.corpus_score(preds, targets).score, ROUND_N_DIGITS)) + + metrics_targets = [[ref[i] for ref in targets] for i in range(len(targets[0]))] + metrics_score = metrics_score_fn(metrics_targets, preds, tokenize) + assert metrics_score == original_score + + def test_sacrebleu_score_metrics(self, preds, targets, tokenize): + sacrebleu_metrics = BLEU(tokenize=tokenize) + original_score = B.tensor(round(sacrebleu_metrics.corpus_score(preds, targets).score, ROUND_N_DIGITS)) + + metrics_targets = [[ref[i] for ref in targets] for i in range(len(targets[0]))] + tm_metrics = SacreBLEUScore(tokenize=tokenize) + tm_metrics.update(metrics_targets, preds) + metrics_score = metrics_score_fn(metrics_targets, preds, tokenize) + assert metrics_score == original_score diff --git a/EE/paddlemetric/src/tests/text/test_wer.py b/EE/paddlemetric/src/tests/text/test_wer.py new file mode 100644 index 000000000..65a7ca8b8 --- /dev/null +++ b/EE/paddlemetric/src/tests/text/test_wer.py @@ -0,0 +1,75 @@ +from typing import Callable, List, Union + +import pytest + +from tests.text.helpers import INPUT_ORDER, TextTester +from paddlemetrics.utilities.imports import _JIWER_AVAILABLE + +if _JIWER_AVAILABLE: + from jiwer import compute_measures +else: + compute_measures = Callable + +from paddlemetrics.functional.text.wer import wer +from paddlemetrics.text.wer import WER + +BATCHES_1 = {"preds": [["hello world"], ["what a day"]], "targets": [["hello world"], ["what a wonderful day"]]} + +BATCHES_2 = { + "preds": [ + ["i like python", "what you mean or swallow"], + ["hello duck", "i like python"], + ], + "targets": [ + ["i like monthy python", "what do you mean, african or european swallow"], + ["hello world", "i like monthy python"], + ], +} + + +def _compute_wer_metric_jiwer(prediction: Union[str, List[str]], reference: Union[str, List[str]]): + return compute_measures(reference, prediction)["wer"] + + +@pytest.mark.skipif(not _JIWER_AVAILABLE, reason="test requires jiwer") +@pytest.mark.parametrize( + ["preds", "targets"], + [ + pytest.param(BATCHES_1["preds"], BATCHES_1["targets"]), + pytest.param(BATCHES_2["preds"], BATCHES_2["targets"]), + ], +) +class TestWER(TextTester): + @pytest.mark.parametrize("ddp", [False, True]) + @pytest.mark.parametrize("dist_sync_on_step", [False, True]) + def test_wer_class(self, ddp, dist_sync_on_step, preds, targets): + + self.run_class_metric_test( + ddp=ddp, + preds=preds, + targets=targets, + metric_class=WER, + sk_metric=_compute_wer_metric_jiwer, + dist_sync_on_step=dist_sync_on_step, + input_order=INPUT_ORDER.PREDS_FIRST, + ) + + def test_wer_functional(self, preds, targets): + + self.run_functional_metric_test( + preds, + targets, + metric_functional=wer, + sk_metric=_compute_wer_metric_jiwer, + input_order=INPUT_ORDER.PREDS_FIRST, + ) + + def test_wer_differentiability(self, preds, targets): + + self.run_differentiability_test( + preds=preds, + targets=targets, + metric_module=WER, + metric_functional=wer, + input_order=INPUT_ORDER.PREDS_FIRST, + ) diff --git a/EE/paddlemetric/src/tests/wrappers/__init__.py b/EE/paddlemetric/src/tests/wrappers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/EE/paddlemetric/src/tests/wrappers/test_bootstrapping.py b/EE/paddlemetric/src/tests/wrappers/test_bootstrapping.py new file mode 100644 index 000000000..ec74c4bf3 --- /dev/null +++ b/EE/paddlemetric/src/tests/wrappers/test_bootstrapping.py @@ -0,0 +1,123 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import operator +from functools import partial + +import numpy as np +import pytest +import paddleext.torchapi as B +from sklearn.metrics import mean_squared_error, precision_score, recall_score +from paddleext.torchapi import Tensor + +from tests.helpers import seed_all +from paddlemetrics import MeanSquaredError, Precision, Recall +from paddlemetrics.utilities import apply_to_collection +from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_7 +from paddlemetrics.wrappers.bootstrapping import BootStrapper, _bootstrap_sampler + +seed_all(42) + +_preds = B.randint(10, (10, 32)) +_target = B.randint(10, (10, 32)) + + +class TestBootStrapper(BootStrapper): + """For testing purpose, we subclass the bootstrapper class so we can get the exact permutation the class is + creating.""" + + def update(self, *args) -> None: + self.out = [] + for idx in range(self.num_bootstraps): + size = len(args[0]) + sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy).to(self.device) + new_args = apply_to_collection(args, Tensor, B.index_select, dim=0, index=sample_idx) + self.metrics[idx].update(*new_args) + self.out.append(new_args) + + +def _sample_checker(old_samples, new_samples, op: operator, threshold: int): + found_one = False + for os in old_samples: + cond = op(os, new_samples) + if cond.sum() > threshold: + found_one = True + break + return found_one + + +@pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"]) +def test_bootstrap_sampler(sampling_strategy): + """make sure that the bootstrap sampler works as intended.""" + old_samples = B.randn(20, 2) + + # make sure that the new samples are only made up of old samples + idx = _bootstrap_sampler(20, sampling_strategy=sampling_strategy) + new_samples = old_samples[idx] + for ns in new_samples: + assert ns in old_samples + + found_one = _sample_checker(old_samples, new_samples, operator.eq, 2) + assert found_one, "resampling did not work because no samples were sampled twice" + + found_zero = _sample_checker(old_samples, new_samples, operator.ne, 0) + assert found_zero, "resampling did not work because all samples were atleast sampled once" + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +@pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"]) +@pytest.mark.parametrize( + "metric, sk_metric", + [ + [Precision(average="micro"), partial(precision_score, average="micro")], + [Recall(average="micro"), partial(recall_score, average="micro")], + [MeanSquaredError(), mean_squared_error], + ], +) +def test_bootstrap(device, sampling_strategy, metric, sk_metric): + """Test that the different bootstraps gets updated as we expected and that the compute method works.""" + if device == "cuda" and not B.cuda.is_available(): + pytest.skip("Test with device='cuda' requires gpu") + + _kwargs = {"base_metric": metric, "mean": True, "std": True, "raw": True, "sampling_strategy": sampling_strategy} + if _TORCH_GREATER_EQUAL_1_7: + _kwargs.update(dict(quantile=B.tensor([0.05, 0.95], device=device))) + + bootstrapper = TestBootStrapper(**_kwargs) + bootstrapper.to(device) + + collected_preds = [[] for _ in range(10)] + collected_target = [[] for _ in range(10)] + for p, t in zip(_preds, _target): + p, t = p.to(device), t.to(device) + bootstrapper.update(p, t) + + for i, o in enumerate(bootstrapper.out): + + collected_preds[i].append(o[0]) + collected_target[i].append(o[1]) + + collected_preds = [B.cat(cp).cpu() for cp in collected_preds] + collected_target = [B.cat(ct).cpu() for ct in collected_target] + + sk_scores = [sk_metric(ct, cp) for ct, cp in zip(collected_target, collected_preds)] + + output = bootstrapper.compute() + # quantile only avaible for pytorch v1.7 and forward + if _TORCH_GREATER_EQUAL_1_7: + assert np.allclose(output["quantile"][0].cpu(), np.quantile(sk_scores, 0.05)) + assert np.allclose(output["quantile"][1].cpu(), np.quantile(sk_scores, 0.95)) + + assert np.allclose(output["mean"].cpu(), np.mean(sk_scores)) + assert np.allclose(output["std"].cpu(), np.std(sk_scores, ddof=1)) + assert np.allclose(output["raw"].cpu(), sk_scores) diff --git a/EE/paddlemetric/src/tests/wrappers/test_multioutput.py b/EE/paddlemetric/src/tests/wrappers/test_multioutput.py new file mode 100644 index 000000000..421dd7221 --- /dev/null +++ b/EE/paddlemetric/src/tests/wrappers/test_multioutput.py @@ -0,0 +1,142 @@ +from collections import namedtuple +from functools import partial +from typing import Any, Callable, Optional + +import pytest +import paddleext.torchapi as B +from sklearn.metrics import accuracy_score +from sklearn.metrics import r2_score as sk_r2score + +from tests.helpers import seed_all +from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES, MetricTester +from paddlemetrics import Metric +from paddlemetrics.classification import Accuracy +from paddlemetrics.regression import R2Score +from paddlemetrics.wrappers.multioutput import MultioutputWrapper + +seed_all(42) + + +class _MultioutputMetric(Metric): + """Test class that allows passing base metric as a class rather than its instantiation to the wrapper.""" + + def __init__( + self, + base_metric_class, + num_outputs: int = 1, + compute_on_step: bool = True, + dist_sync_on_step: bool = False, + process_group: Any = None, + dist_sync_fn: Optional[Callable] = None, + **base_metric_kwargs, + ) -> None: + super().__init__( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + self.metric = MultioutputWrapper( + base_metric_class( + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + **base_metric_kwargs, + ), + num_outputs=num_outputs, + compute_on_step=compute_on_step, + dist_sync_on_step=dist_sync_on_step, + dist_sync_fn=dist_sync_fn, + ) + + def update(self, preds: B.Tensor, target: B.Tensor) -> None: + """Update the each pair of outputs and predictions.""" + return self.metric.update(preds, target) + + def compute(self) -> B.Tensor: + """Compute the R2 score between each pair of outputs and predictions.""" + return self.metric.compute() + + @B.jit.unused + def forward(self, *args, **kwargs): + """Run forward on the underlying metric.""" + return self.metric(*args, **kwargs) + + def reset(self) -> None: + """Reset the underlying metric state.""" + self.metric.reset() + + +num_targets = 2 + +Input = namedtuple("Input", ["preds", "target"]) + +_multi_target_regression_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), + target=B.rand(NUM_BATCHES, BATCH_SIZE, num_targets), +) +_multi_target_classification_inputs = Input( + preds=B.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, num_targets), + target=B.randint(NUM_CLASSES, (NUM_BATCHES, BATCH_SIZE, num_targets)), +) + + +def _multi_target_sk_r2score(preds, target, adjusted=0, multioutput="raw_values"): + """Compute R2 score over multiple outputs.""" + sk_preds = preds.view(-1, num_targets).numpy() + sk_target = target.view(-1, num_targets).numpy() + r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput) + if adjusted != 0: + r2_score = 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1) + return r2_score + + +def _multi_target_sk_accuracy(preds, target, num_outputs): + """Compute accuracy over multiple outputs.""" + accs = [] + for i in range(num_outputs): + accs.append(accuracy_score(B.argmax(preds[:, :, i], dim=1), target[:, i])) + return accs + + +@pytest.mark.parametrize( + "base_metric_class, compare_metric, preds, target, num_outputs, metric_kwargs", + [ + ( + R2Score, + _multi_target_sk_r2score, + _multi_target_regression_inputs.preds, + _multi_target_regression_inputs.target, + num_targets, + {}, + ), + ( + Accuracy, + partial(_multi_target_sk_accuracy, num_outputs=2), + _multi_target_classification_inputs.preds, + _multi_target_classification_inputs.target, + num_targets, + dict(num_classes=NUM_CLASSES), + ), + ], +) +class TestMultioutputWrapper(MetricTester): + """Test the MultioutputWrapper class with regression and classification inner metrics.""" + + @pytest.mark.parametrize("ddp", [True, False]) + @pytest.mark.parametrize("dist_sync_on_step", [True, False]) + def test_multioutput_wrapper( + self, base_metric_class, compare_metric, preds, target, num_outputs, metric_kwargs, ddp, dist_sync_on_step + ): + """Test that the multioutput wrapper properly slices and computes outputs along the output dimension for + both classification and regression metrics.""" + self.run_class_metric_test( + ddp, + preds, + target, + _MultioutputMetric, + compare_metric, + dist_sync_on_step, + metric_args=dict(num_outputs=num_outputs, base_metric_class=base_metric_class, **metric_kwargs), + ) diff --git a/EE/paddlemetric/src/tests/wrappers/test_tracker.py b/EE/paddlemetric/src/tests/wrappers/test_tracker.py new file mode 100644 index 000000000..07a94eea0 --- /dev/null +++ b/EE/paddlemetric/src/tests/wrappers/test_tracker.py @@ -0,0 +1,76 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial + +import pytest +import paddleext.torchapi as B + +from tests.helpers import seed_all +from paddlemetrics import Accuracy, MeanAbsoluteError, MeanSquaredError, Precision, Recall +from paddlemetrics.wrappers import MetricTracker + +seed_all(42) + + +def test_raises_error_on_wrong_input(): + with pytest.raises(TypeError, match="metric arg need to be an instance of a paddlemetrics metric .*"): + MetricTracker([1, 2, 3]) + + +@pytest.mark.parametrize( + "method, method_input", + [ + ("update", (B.randint(10, (50,)), B.randint(10, (50,)))), + ("forward", (B.randint(10, (50,)), B.randint(10, (50,)))), + ("compute", None), + ], +) +def test_raises_error_if_increment_not_called(method, method_input): + tracker = MetricTracker(Accuracy(num_classes=10)) + with pytest.raises(ValueError, match=f"`{method}` cannot be called before .*"): + if method_input is not None: + getattr(tracker, method)(*method_input) + else: + getattr(tracker, method)() + + +@pytest.mark.parametrize( + "base_metric, metric_input, maximize", + [ + (partial(Accuracy, num_classes=10), (B.randint(10, (50,)), B.randint(10, (50,))), True), + (partial(Precision, num_classes=10), (B.randint(10, (50,)), B.randint(10, (50,))), True), + (partial(Recall, num_classes=10), (B.randint(10, (50,)), B.randint(10, (50,))), True), + (MeanSquaredError, (B.randn(50), B.randn(50)), False), + (MeanAbsoluteError, (B.randn(50), B.randn(50)), False), + ], +) +def test_tracker(base_metric, metric_input, maximize): + tracker = MetricTracker(base_metric(), maximize=maximize) + for i in range(5): + tracker.increment() + # check both update and forward works + for _ in range(5): + tracker.update(*metric_input) + for _ in range(5): + tracker(*metric_input) + + val = tracker.compute() + assert val != 0.0 + assert tracker.n_steps == i + 1 + + assert tracker.n_steps == 5 + assert tracker.compute_all().shape[0] == 5 + val, idx = tracker.best_metric(return_step=True) + assert val != 0.0 + assert idx in list(range(5)) From bd8e584778dc3dfe77319754411ac053b760cbed Mon Sep 17 00:00:00 2001 From: rudaoshi Date: Sat, 26 Nov 2022 11:18:57 +0800 Subject: [PATCH 2/4] remove unnecessary file --- .../src/build/lib/paddlemetrics/__about__.py | 27 - .../src/build/lib/paddlemetrics/__init__.py | 143 ---- .../build/lib/paddlemetrics/aggregation.py | 445 ---------- .../build/lib/paddlemetrics/audio/__init__.py | 19 - .../src/build/lib/paddlemetrics/audio/pesq.py | 130 --- .../src/build/lib/paddlemetrics/audio/pit.py | 113 --- .../build/lib/paddlemetrics/audio/si_sdr.py | 105 --- .../build/lib/paddlemetrics/audio/si_snr.py | 101 --- .../src/build/lib/paddlemetrics/audio/snr.py | 110 --- .../src/build/lib/paddlemetrics/audio/stoi.py | 133 --- .../paddlemetrics/classification/__init__.py | 34 - .../paddlemetrics/classification/accuracy.py | 276 ------- .../lib/paddlemetrics/classification/auc.py | 91 -- .../lib/paddlemetrics/classification/auroc.py | 186 ----- .../classification/average_precision.py | 147 ---- .../classification/binned_precision_recall.py | 324 -------- .../classification/calibration_error.py | 115 --- .../classification/cohen_kappa.py | 119 --- .../classification/confusion_matrix.py | 141 ---- .../paddlemetrics/classification/f_beta.py | 301 ------- .../classification/hamming_distance.py | 110 --- .../lib/paddlemetrics/classification/hinge.py | 127 --- .../lib/paddlemetrics/classification/iou.py | 107 --- .../classification/kl_divergence.py | 109 --- .../classification/matthews_corrcoef.py | 111 --- .../classification/precision_recall.py | 320 -------- .../classification/precision_recall_curve.py | 149 ---- .../lib/paddlemetrics/classification/roc.py | 169 ---- .../classification/specificity.py | 171 ---- .../classification/stat_scores.py | 267 ------ .../build/lib/paddlemetrics/collections.py | 239 ------ .../lib/paddlemetrics/functional/__init__.py | 138 ---- .../functional/audio/__init__.py | 19 - .../paddlemetrics/functional/audio/pesq.py | 100 --- .../lib/paddlemetrics/functional/audio/pit.py | 206 ----- .../paddlemetrics/functional/audio/si_sdr.py | 64 -- .../paddlemetrics/functional/audio/si_snr.py | 46 -- .../lib/paddlemetrics/functional/audio/snr.py | 66 -- .../paddlemetrics/functional/audio/stoi.py | 105 --- .../functional/classification/__init__.py | 32 - .../functional/classification/accuracy.py | 418 ---------- .../functional/classification/auc.py | 133 --- .../functional/classification/auroc.py | 257 ------ .../classification/average_precision.py | 236 ------ .../classification/calibration_error.py | 156 ---- .../functional/classification/cohen_kappa.py | 112 --- .../classification/confusion_matrix.py | 184 ----- .../functional/classification/dice.py | 112 --- .../functional/classification/f_beta.py | 351 -------- .../classification/hamming_distance.py | 97 --- .../functional/classification/hinge.py | 231 ------ .../functional/classification/iou.py | 133 --- .../classification/kl_divergence.py | 110 --- .../classification/matthews_corrcoef.py | 78 -- .../classification/precision_recall.py | 568 ------------- .../classification/precision_recall_curve.py | 332 -------- .../functional/classification/roc.py | 273 ------ .../functional/classification/specificity.py | 215 ----- .../functional/classification/stat_scores.py | 396 --------- .../functional/image/__init__.py | 16 - .../functional/image/gradients.py | 81 -- .../paddlemetrics/functional/image/psnr.py | 150 ---- .../paddlemetrics/functional/image/ssim.py | 225 ----- .../functional/pairwise/__init__.py | 17 - .../functional/pairwise/cosine.py | 85 -- .../functional/pairwise/euclidean.py | 79 -- .../functional/pairwise/helpers.py | 59 -- .../functional/pairwise/linear.py | 78 -- .../functional/pairwise/manhatten.py | 78 -- .../functional/regression/__init__.py | 27 - .../regression/cosine_similarity.py | 98 --- .../regression/explained_variance.py | 137 ---- .../regression/mean_absolute_error.py | 73 -- .../mean_absolute_percentage_error.py | 91 -- .../regression/mean_squared_error.py | 74 -- .../regression/mean_squared_log_error.py | 76 -- .../functional/regression/pearson.py | 102 --- .../paddlemetrics/functional/regression/r2.py | 173 ---- .../functional/regression/spearman.py | 129 --- ...ymmetric_mean_absolute_percentage_error.py | 99 --- .../functional/regression/tweedie_deviance.py | 139 ---- .../functional/retrieval/__init__.py | 22 - .../functional/retrieval/average_precision.py | 49 -- .../functional/retrieval/fall_out.py | 62 -- .../functional/retrieval/hit_rate.py | 57 -- .../functional/retrieval/ndcg.py | 72 -- .../functional/retrieval/precision.py | 60 -- .../functional/retrieval/r_precision.py | 49 -- .../functional/retrieval/recall.py | 61 -- .../functional/retrieval/reciprocal_rank.py | 49 -- .../functional/self_supervised.py | 57 -- .../paddlemetrics/functional/text/__init__.py | 17 - .../lib/paddlemetrics/functional/text/bert.py | 650 --------------- .../lib/paddlemetrics/functional/text/bleu.py | 171 ---- .../paddlemetrics/functional/text/rouge.py | 325 -------- .../functional/text/sacre_bleu.py | 355 -------- .../lib/paddlemetrics/functional/text/wer.py | 114 --- .../build/lib/paddlemetrics/image/__init__.py | 19 - .../src/build/lib/paddlemetrics/image/fid.py | 283 ------- .../lib/paddlemetrics/image/inception.py | 179 ---- .../src/build/lib/paddlemetrics/image/kid.py | 277 ------- .../paddlemetrics/image/lpip_similarity.py | 156 ---- .../src/build/lib/paddlemetrics/image/psnr.py | 147 ---- .../src/build/lib/paddlemetrics/image/ssim.py | 105 --- .../src/build/lib/paddlemetrics/metric.py | 775 ------------------ .../lib/paddlemetrics/regression/__init__.py | 26 - .../regression/cosine_similarity.py | 105 --- .../regression/explained_variance.py | 136 --- .../regression/mean_absolute_error.py | 86 -- .../mean_absolute_percentage_error.py | 95 --- .../regression/mean_squared_error.py | 91 -- .../regression/mean_squared_log_error.py | 90 -- .../lib/paddlemetrics/regression/pearson.py | 140 ---- .../build/lib/paddlemetrics/regression/r2.py | 149 ---- .../lib/paddlemetrics/regression/spearman.py | 96 --- ...ymmetric_mean_absolute_percentage_error.py | 92 --- .../regression/tweedie_deviance.py | 116 --- .../lib/paddlemetrics/retrieval/__init__.py | 22 - .../retrieval/mean_average_precision.py | 70 -- .../retrieval/mean_reciprocal_rank.py | 70 -- .../retrieval/retrieval_fallout.py | 131 --- .../retrieval/retrieval_hit_rate.py | 98 --- .../retrieval/retrieval_metric.py | 147 ---- .../paddlemetrics/retrieval/retrieval_ndcg.py | 99 --- .../retrieval/retrieval_precision.py | 98 --- .../retrieval/retrieval_r_precision.py | 70 -- .../retrieval/retrieval_recall.py | 98 --- .../build/lib/paddlemetrics/setup_tools.py | 74 -- .../build/lib/paddlemetrics/text/__init__.py | 18 - .../src/build/lib/paddlemetrics/text/bert.py | 251 ------ .../src/build/lib/paddlemetrics/text/bleu.py | 120 --- .../src/build/lib/paddlemetrics/text/rouge.py | 171 ---- .../lib/paddlemetrics/text/sacre_bleu.py | 134 --- .../src/build/lib/paddlemetrics/text/wer.py | 109 --- .../lib/paddlemetrics/utilities/__init__.py | 3 - .../lib/paddlemetrics/utilities/checks.py | 582 ------------- .../build/lib/paddlemetrics/utilities/data.py | 240 ------ .../paddlemetrics/utilities/distributed.py | 145 ---- .../lib/paddlemetrics/utilities/enums.py | 83 -- .../lib/paddlemetrics/utilities/exceptions.py | 17 - .../lib/paddlemetrics/utilities/imports.py | 90 -- .../lib/paddlemetrics/utilities/prints.py | 49 -- .../lib/paddlemetrics/wrappers/__init__.py | 16 - .../paddlemetrics/wrappers/bootstrapping.py | 173 ---- .../lib/paddlemetrics/wrappers/multioutput.py | 165 ---- .../lib/paddlemetrics/wrappers/tracker.py | 127 --- .../paddlemetrics-1.0.0b0-py3-none-any.whl | Bin 306551 -> 0 bytes .../src/paddle_extension.egg-info/PKG-INFO | 22 - .../src/paddle_extension.egg-info/SOURCES.txt | 152 ---- .../dependency_links.txt | 1 - .../paddle_extension.egg-info/top_level.txt | 1 - .../src/paddlemetrics.egg-info/PKG-INFO | 22 - .../src/paddlemetrics.egg-info/SOURCES.txt | 152 ---- .../dependency_links.txt | 1 - .../src/paddlemetrics.egg-info/top_level.txt | 1 - 155 files changed, 21348 deletions(-) delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/collections.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/metric.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py delete mode 100644 EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py delete mode 100644 EE/paddlemetric/src/dist/paddlemetrics-1.0.0b0-py3-none-any.whl delete mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO delete mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt delete mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt delete mode 100644 EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt delete mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO delete mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt delete mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt delete mode 100644 EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py deleted file mode 100644 index 53a9cfa4f..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/__about__.py +++ /dev/null @@ -1,27 +0,0 @@ -__version__ = "0.6.0dev" -__author__ = "PyTorchLightning et al." -__author_email__ = "name@pytorchlightning.ai" -__license__ = "Apache-2.0" -__copyright__ = f"Copyright (c) 2020-2021, {__author__}." -__homepage__ = "https://github.com/PyTorchLightning/metrics" -__docs__ = "PyTorch native Metrics" -__docs_url__ = "https://paddlemetrics.readthedocs.io/en/stable/" -__long_doc__ = """ -paddlemetrics is a metrics API created for easy metric development and usage in both PyTorch and -[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of -Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics -implemented without having to install Pytorch Lightning (even though we would love for you to try it out). -We currently have around 25+ metrics implemented and we continuously is adding more metrics, both within -already covered domains (classification, regression ect.) but also new domains (object detection ect.). -We make sure that all our metrics are rigorously tested such that you can trust them. -""" - -__all__ = [ - "__author__", - "__author_email__", - "__copyright__", - "__docs__", - "__homepage__", - "__license__", - "__version__", -] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py deleted file mode 100644 index ea557086b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/__init__.py +++ /dev/null @@ -1,143 +0,0 @@ -r"""Root package info.""" -import logging as __logging -import os -import sys - -from paddlemetrics.__about__ import * # noqa: F401, F403 - -_logger = __logging.getLogger("paddlemetrics") -_logger.addHandler(__logging.StreamHandler()) -_logger.setLevel(__logging.INFO) - -_PACKAGE_ROOT = os.path.dirname(__file__) -_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT) - -from paddlemetrics import functional # noqa: E402 -from paddlemetrics.aggregation import CatMetric, MaxMetric, MeanMetric, MinMetric, SumMetric # noqa: E402 -from paddlemetrics.audio import PESQ, PIT, SI_SDR, SI_SNR, SNR, STOI # noqa: E402 -from paddlemetrics.classification import ( # noqa: E402 - AUC, - AUROC, - F1, - ROC, - Accuracy, - AveragePrecision, - BinnedAveragePrecision, - BinnedPrecisionRecallCurve, - BinnedRecallAtFixedPrecision, - CalibrationError, - CohenKappa, - ConfusionMatrix, - FBeta, - HammingDistance, - Hinge, - IoU, - KLDivergence, - MatthewsCorrcoef, - Precision, - PrecisionRecallCurve, - Recall, - Specificity, - StatScores, -) -from paddlemetrics.collections import MetricCollection # noqa: E402 -#from paddlemetrics.image import FID, IS, KID, LPIPS, PSNR, SSIM # noqa: E402 -from paddlemetrics.metric import Metric # noqa: E402 -from paddlemetrics.regression import ( # noqa: E402 - CosineSimilarity, - ExplainedVariance, - MeanAbsoluteError, - MeanAbsolutePercentageError, - MeanSquaredError, - MeanSquaredLogError, - PearsonCorrcoef, - R2Score, - SpearmanCorrcoef, - SymmetricMeanAbsolutePercentageError, - TweedieDevianceScore, -) -from paddlemetrics.retrieval import ( # noqa: E402 - RetrievalFallOut, - RetrievalHitRate, - RetrievalMAP, - RetrievalMRR, - RetrievalNormalizedDCG, - RetrievalPrecision, - RetrievalRecall, - RetrievalRPrecision, -) -from paddlemetrics.text import WER, BLEUScore, ROUGEScore, SacreBLEUScore # noqa: E402 BERTScore, -from paddlemetrics.wrappers import BootStrapper, MetricTracker, MultioutputWrapper # noqa: E402 - -__all__ = [ - "functional", - "Accuracy", - "AUC", - "AUROC", - "AveragePrecision", - "BinnedAveragePrecision", - "BinnedPrecisionRecallCurve", - "BinnedRecallAtFixedPrecision", -# "BERTScore", - "BLEUScore", - "BootStrapper", - "CalibrationError", - "CatMetric", - "CohenKappa", - "ConfusionMatrix", - "CosineSimilarity", - "TweedieDevianceScore", - "ExplainedVariance", - "F1", - "FBeta", -# "FID", - "HammingDistance", - "Hinge", - "IoU", -# "IS", -# "KID", - "KLDivergence", -# "LPIPS", - "MatthewsCorrcoef", - "MaxMetric", - "MeanAbsoluteError", - "MeanAbsolutePercentageError", - "MeanMetric", - "MeanSquaredError", - "MeanSquaredLogError", - "Metric", - "MetricCollection", - "MetricTracker", - "MinMetric", - "MultioutputWrapper", - "PearsonCorrcoef", - "PESQ", - "PIT", - "Precision", - "PrecisionRecallCurve", -# "PSNR", - "R2Score", - "Recall", - "RetrievalFallOut", - "RetrievalHitRate", - "RetrievalMAP", - "RetrievalMRR", - "RetrievalNormalizedDCG", - "RetrievalPrecision", - "RetrievalRecall", - "RetrievalRPrecision", - "ROC", - "ROUGEScore", - "SacreBLEUScore", - "SI_SDR", - "SI_SNR", - "SNR", - "SpearmanCorrcoef", - "Specificity", -# "SSIM", - "StatScores", - "STOI", - "SumMetric", - "SymmetricMeanAbsolutePercentageError", - "WER", -] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py b/EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py deleted file mode 100644 index a95c51c0e..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/aggregation.py +++ /dev/null @@ -1,445 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import warnings -from typing import Any, Callable, List, Optional, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.data import dim_zero_cat - - -class BaseAggregator(Metric): - """Base class for aggregation metrics. - - Args: - fn: string specifying the reduction function - default_value: default tensor value to use for the metric state - nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue - - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value - - compute_on_step: - Forward only calls ``update()`` and returns None if this is - set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. - When `None`, DDP will be used to perform the allgather. - - Raises: - ValueError: - If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float - """ - - value: Tensor - is_differentiable = None - higher_is_better = None - - def __init__( - self, - fn: Union[Callable, str], - default_value: Union[Tensor, List], - nan_strategy: Union[str, float] = "error", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - allowed_nan_strategy = ("error", "warn", "ignore") - if nan_strategy not in allowed_nan_strategy and not isinstance(nan_strategy, float): - raise ValueError( - f"Arg `nan_strategy` should either be a float or one of {allowed_nan_strategy}" - f" but got {nan_strategy}." - ) - - self.nan_strategy = nan_strategy - self.add_state("value", default=default_value, dist_reduce_fx=fn) - - def _cast_and_nan_check_input(self, x: Union[float, Tensor]) -> Tensor: - """Converts input x to a tensor if not already and afterwards checks for nans that either give an error, - warning or just ignored.""" - if not isinstance(x, Tensor): - x = B.as_tensor(x, dtype=B.float32, device=self.device) - - nans = B.isnan(x) - if any(nans.flatten()): - if self.nan_strategy == "error": - raise RuntimeError("Encounted `nan` values in tensor") - if self.nan_strategy == "warn": - warnings.warn("Encounted `nan` values in tensor. Will be removed.", UserWarning) - x = x[~nans] - elif self.nan_strategy == "ignore": - x = x[~nans] - else: - x[nans] = self.nan_strategy - - return x.float() - - def update(self, value: Union[float, Tensor]) -> None: # type: ignore - """Overwrite in child class.""" - pass - - def compute(self) -> Tensor: - """Compute the aggregated value.""" - return self.value.squeeze() if isinstance(self.value, Tensor) else self.value - - -class MaxMetric(BaseAggregator): - """Aggregate a stream of value into their maximum value. - - Args: - nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue - - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value - - compute_on_step: - Forward only calls ``update()`` and returns None if this is - set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. - When `None`, DDP will be used to perform the allgather. - - Raises: - ValueError: - If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float - - Example: - >>> from paddlemetrics import MaxMetric - >>> metric = MaxMetric() - >>> metric.update(1) - >>> metric.update(B.tensor([2, 3])) - >>> metric.compute() - tensor(3.) - """ - - def __init__( - self, - nan_strategy: Union[str, float] = "warn", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - "max", - -B.tensor(float("inf")), - nan_strategy, - compute_on_step, - dist_sync_on_step, - process_group, - dist_sync_fn, - ) - - def update(self, value: Union[float, Tensor]) -> None: # type: ignore - """Update state with data. - - Args: - value: Either a float or tensor containing data. Additional tensor - dimensions will be flattened - """ - value = self._cast_and_nan_check_input(value) - if any(value.flatten()): # make sure tensor not empty - self.value = B.max(self.value, B.max(value)) - - -class MinMetric(BaseAggregator): - """Aggregate a stream of value into their minimum value. - - Args: - nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue - - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value - - compute_on_step: - Forward only calls ``update()`` and returns None if this is - set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. - When `None`, DDP will be used to perform the allgather. - - Raises: - ValueError: - If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float - - Example: - >>> from paddlemetrics import MinMetric - >>> metric = MinMetric() - >>> metric.update(1) - >>> metric.update(B.tensor([2, 3])) - >>> metric.compute() - tensor(1.) - """ - - def __init__( - self, - nan_strategy: Union[str, float] = "warn", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - "min", - B.tensor(float("inf")), - nan_strategy, - compute_on_step, - dist_sync_on_step, - process_group, - dist_sync_fn, - ) - - def update(self, value: Union[float, Tensor]) -> None: # type: ignore - """Update state with data. - - Args: - value: Either a float or tensor containing data. Additional tensor - dimensions will be flattened - """ - value = self._cast_and_nan_check_input(value) - if any(value.flatten()): # make sure tensor not empty - self.value = B.min(self.value, B.min(value)) - - -class SumMetric(BaseAggregator): - """Aggregate a stream of value into their sum. - - Args: - nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue - - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value - - compute_on_step: - Forward only calls ``update()`` and returns None if this is - set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. - When `None`, DDP will be used to perform the allgather. - - Raises: - ValueError: - If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float - - Example: - >>> from paddlemetrics import SumMetric - >>> metric = SumMetric() - >>> metric.update(1) - >>> metric.update(B.tensor([2, 3])) - >>> metric.compute() - tensor(6.) - """ - - def __init__( - self, - nan_strategy: Union[str, float] = "warn", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - "sum", B.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn - ) - - def update(self, value: Union[float, Tensor]) -> None: # type: ignore - """Update state with data. - - Args: - value: Either a float or tensor containing data. Additional tensor - dimensions will be flattened - """ - value = self._cast_and_nan_check_input(value) - self.value += value.sum() - - -class CatMetric(BaseAggregator): - """Concatenate a stream of values. - - Args: - nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue - - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value - - compute_on_step: - Forward only calls ``update()`` and returns None if this is - set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. - When `None`, DDP will be used to perform the allgather. - - Raises: - ValueError: - If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float - - Example: - >>> from paddlemetrics import CatMetric - >>> metric = CatMetric() - >>> metric.update(1) - >>> metric.update(B.tensor([2, 3])) - >>> metric.compute() - tensor([1., 2., 3.]) - """ - - def __init__( - self, - nan_strategy: Union[str, float] = "warn", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__("cat", [], nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn) - - def update(self, value: Union[float, Tensor]) -> None: # type: ignore - """Update state with data. - - Args: - value: Either a float or tensor containing data. Additional tensor - dimensions will be flattened - """ - value = self._cast_and_nan_check_input(value) - if any(value.flatten()): - self.value.append(value) - - def compute(self) -> Tensor: - """Compute the aggregated value.""" - if isinstance(self.value, list) and self.value: - return dim_zero_cat(self.value) - return self.value - - -class MeanMetric(BaseAggregator): - """Aggregate a stream of value into their mean value. - - Args: - nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue - - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value - - compute_on_step: - Forward only calls ``update()`` and returns None if this is - set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. - When `None`, DDP will be used to perform the allgather. - - Raises: - ValueError: - If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float - - Example: - >>> from paddlemetrics import MeanMetric - >>> metric = MeanMetric() - >>> metric.update(1) - >>> metric.update(B.tensor([2, 3])) - >>> metric.compute() - tensor([2.]) - """ - - def __init__( - self, - nan_strategy: Union[str, float] = "warn", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - "sum", B.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn - ) - self.add_state("weight", default=B.zeros(1), dist_reduce_fx="sum") - - def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: # type: ignore - """Update state with data. - - Args: - value: Either a float or tensor containing data. Additional tensor - dimensions will be flattened - weight: Either a float or tensor containing weights for calculating - the average. Shape of weight should be able to broadcast with - the shape of `value`. Default to `1.0` corresponding to simple - harmonic average. - """ - value = self._cast_and_nan_check_input(value) - weight = self._cast_and_nan_check_input(weight) - - # broadcast weight to values shape - if not hasattr(B, "broadcast_to"): - if weight.shape == (): - weight = B.ones_like(value) * weight - if weight.shape != value.shape: - raise ValueError("Broadcasting not supported on PyTorch <1.8") - else: - weight = B.broadcast_to(weight, value.shape) - - self.value += (value * weight).sum() - self.weight += weight.sum() - - def compute(self) -> Tensor: - """Compute the aggregated value.""" - return self.value / self.weight diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py deleted file mode 100644 index efd0b451e..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.audio.pesq import PESQ # noqa: F401 -from paddlemetrics.audio.pit import PIT # noqa: F401 -from paddlemetrics.audio.si_sdr import SI_SDR # noqa: F401 -from paddlemetrics.audio.si_snr import SI_SNR # noqa: F401 -from paddlemetrics.audio.snr import SNR # noqa: F401 -from paddlemetrics.audio.stoi import STOI # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py deleted file mode 100644 index d45fab53d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pesq.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.audio.pesq import pesq -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.imports import _PESQ_AVAILABLE - - -class PESQ(Metric): - """PESQ (Perceptual Evaluation of Speech Quality) - - This is a wrapper for the pesq package [1]. . Note that input will be moved to `cpu` - to perform the metric calculation. - - .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install - paddlemetrics[audio]`` or ``pip install pesq`` - - Forward accepts - - - ``preds``: ``shape [...,time]`` - - ``target``: ``shape [...,time]`` - - Args: - fs: - sampling frequency, should be 16000 or 8000 (Hz) - mode: - 'wb' (wide-band) or 'nb' (narrow-band) - keep_same_device: - whether to move the pesq value to the device of preds - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Raises: - ValueError: - If ``peqs`` package is not installed - ValueError: - If ``fs`` is not either ``8000`` or ``16000`` - ValueError: - If ``mode`` is not either ``"wb"`` or ``"nb"`` - - Example: - >>> from paddlemetrics.audio import PESQ - >>> import torchapi as B - >>> g = B.manual_seed(1) - >>> preds = B.randn(8000) - >>> target = B.randn(8000) - >>> nb_pesq = PESQ(8000, 'nb') - >>> nb_pesq(preds, target) - tensor(2.2076) - >>> wb_pesq = PESQ(16000, 'wb') - >>> wb_pesq(preds, target) - tensor(1.7359) - - References: - [1] https://github.com/ludlows/python-pesq - """ - - sum_pesq: Tensor - total: Tensor - is_differentiable = False - higher_is_better = True - - def __init__( - self, - fs: int, - mode: str, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - if not _PESQ_AVAILABLE: - raise ValueError( - "PESQ metric requires that pesq is installed." - "Either install as `pip install paddlemetrics[audio]` or `pip install pesq`" - ) - if fs not in (8000, 16000): - raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}") - self.fs = fs - if mode not in ("wb", "nb"): - raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}") - self.mode = mode - - self.add_state("sum_pesq", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - pesq_batch = pesq(preds, target, self.fs, self.mode, False).to(self.sum_pesq.device) - - self.sum_pesq += pesq_batch.sum() - self.total += pesq_batch.numel() - - def compute(self) -> Tensor: - """Computes average PESQ.""" - return self.sum_pesq / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py deleted file mode 100644 index 9d9dc7576..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/pit.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Dict, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.audio.pit import pit -from paddlemetrics.metric import Metric - - -class PIT(Metric): - """Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method. - - [1] in speech separation field in order to calculate audio metrics in a permutation invariant way. - - Forward accepts - - - ``preds``: ``shape [batch, spk, ...]`` - - ``target``: ``shape [batch, spk, ...]`` - - Args: - metric_func: - a metric function accept a batch of target and estimate, i.e. metric_func(preds[:, i, ...], - target[:, j, ...]), and returns a batch of metric tensors [batch] - eval_func: - the function to find the best permutation, can be 'min' or 'max', i.e. the smaller the better - or the larger the better. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - kwargs: - additional args for metric_func - - Returns: - average PIT metric - - Example: - >>> import torchapi as B - >>> from paddlemetrics import PIT - >>> from paddlemetrics.functional import si_snr - >>> _ = B.manual_seed(42) - >>> preds = B.randn(3, 2, 5) # [batch, spk, time] - >>> target = B.randn(3, 2, 5) # [batch, spk, time] - >>> pit = PIT(si_snr, 'max') - >>> pit(preds, target) - tensor(-2.1065) - - Reference: - [1] D. Yu, M. Kolbaek, Z.-H. Tan, J. Jensen, Permutation invariant training of deep models for - speaker-independent multi-talker speech separation, in: 2017 IEEE Int. Conf. Acoust. Speech - Signal Process. ICASSP, IEEE, New Orleans, LA, 2017: pp. 241–245. https://doi.org/10.1109/ICASSP.2017.7952154. - """ - - is_differentiable = True - sum_pit_metric: Tensor - total: Tensor - - def __init__( - self, - metric_func: Callable, - eval_func: str = "max", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, - **kwargs: Dict[str, Any], - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - self.metric_func = metric_func - self.eval_func = eval_func - self.kwargs = kwargs - - self.add_state("sum_pit_metric", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - pit_metric = pit(preds, target, self.metric_func, self.eval_func, **self.kwargs)[0] - - self.sum_pit_metric += pit_metric.sum() - self.total += pit_metric.numel() - - def compute(self) -> Tensor: - """Computes average PIT metric.""" - return self.sum_pit_metric / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py deleted file mode 100644 index f6a463780..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_sdr.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.audio.si_sdr import si_sdr -from paddlemetrics.metric import Metric - - -class SI_SDR(Metric): - """Scale-invariant signal-to-distortion ratio (SI-SDR). The SI-SDR value is in general considered an overall - measure of how good a source sound. - - Forward accepts - - - ``preds``: ``shape [...,time]`` - - ``target``: ``shape [...,time]`` - - Args: - zero_mean: - if to zero mean target and preds or not - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - - Raises: - TypeError: - if target and preds have a different shape - - Returns: - average si-sdr value - - Example: - >>> import torchapi as B - >>> from paddlemetrics import SI_SDR - >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) - >>> si_sdr = SI_SDR() - >>> si_sdr_val = si_sdr(preds, target) - >>> si_sdr_val - tensor(18.4030) - - References: - [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech - and Signal Processing (ICASSP) 2019. - """ - - is_differentiable = True - higher_is_better = True - sum_si_sdr: Tensor - total: Tensor - - def __init__( - self, - zero_mean: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - self.zero_mean = zero_mean - - self.add_state("sum_si_sdr", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - si_sdr_batch = si_sdr(preds=preds, target=target, zero_mean=self.zero_mean) - - self.sum_si_sdr += si_sdr_batch.sum() - self.total += si_sdr_batch.numel() - - def compute(self) -> Tensor: - """Computes average SI-SDR.""" - return self.sum_si_sdr / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py deleted file mode 100644 index 31747a28d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/si_snr.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.audio.si_snr import si_snr -from paddlemetrics.metric import Metric - - -class SI_SNR(Metric): - """Scale-invariant signal-to-noise ratio (SI-SNR). - - Forward accepts - - - ``preds``: ``shape [...,time]`` - - ``target``: ``shape [...,time]`` - - Args: - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - - Raises: - TypeError: - if target and preds have a different shape - - Returns: - average si-snr value - - Example: - >>> import torchapi as B - >>> from paddlemetrics import SI_SNR - >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) - >>> si_snr = SI_SNR() - >>> si_snr_val = si_snr(preds, target) - >>> si_snr_val - tensor(15.0918) - - References: - [1] Y. Luo and N. Mesgarani, "TaSNet: Time-Domain Audio Separation Network for Real-Time, Single-Channel Speech - Separation," 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp. - 696-700, doi: 10.1109/ICASSP.2018.8462116. - """ - - is_differentiable = True - sum_si_snr: Tensor - total: Tensor - higher_is_better = True - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("sum_si_snr", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - si_snr_batch = si_snr(preds=preds, target=target) - - self.sum_si_snr += si_snr_batch.sum() - self.total += si_snr_batch.numel() - - def compute(self) -> Tensor: - """Computes average SI-SNR.""" - return self.sum_si_snr / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py deleted file mode 100644 index 683cb8bf3..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/snr.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.audio.snr import snr -from paddlemetrics.metric import Metric - - -class SNR(Metric): - r"""Signal-to-noise ratio (SNR_): - - .. math:: - \text{SNR} = \frac{P_{signal}}{P_{noise}} - - where :math:`P` denotes the power of each signal. The SNR metric compares the level - of the desired signal to the level of background noise. Therefore, a high value of - SNR means that the audio is clear. - - Forward accepts - - - ``preds``: ``shape [..., time]`` - - ``target``: ``shape [..., time]`` - - Args: - zero_mean: - if to zero mean target and preds or not - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - - Raises: - TypeError: - if target and preds have a different shape - - Returns: - average snr value - - Example: - >>> import torchapi as B - >>> from paddlemetrics import SNR - >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) - >>> snr = SNR() - >>> snr_val = snr(preds, target) - >>> snr_val - tensor(16.1805) - - References: - [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech - and Signal Processing (ICASSP) 2019. - - """ - is_differentiable = True - sum_snr: Tensor - total: Tensor - - def __init__( - self, - zero_mean: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - self.zero_mean = zero_mean - - self.add_state("sum_snr", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - snr_batch = snr(preds=preds, target=target, zero_mean=self.zero_mean) - - self.sum_snr += snr_batch.sum() - self.total += snr_batch.numel() - - def compute(self) -> Tensor: - """Computes average SNR.""" - return self.sum_snr / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py b/EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py deleted file mode 100644 index 1c8cf3788..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/audio/stoi.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.audio.stoi import stoi -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.imports import _PYSTOI_AVAILABLE - - -class STOI(Metric): - r"""STOI (Short Term Objective Intelligibility, see [2,3]), a wrapper for the pystoi package [1]. - Note that input will be moved to `cpu` to perform the metric calculation. - - Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due - to additive noise, single/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations. - The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good - alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are - interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, - on speech intelligibility. Description taken from [Cees Taal's website](http://www.ceestaal.nl/code/). - - .. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install - paddlemetrics[audio]`` or ``pip install pystoi`` - - Forward accepts - - - ``preds``: ``shape [...,time]`` - - ``target``: ``shape [...,time]`` - - Args: - fs: - sampling frequency (Hz) - extended: - whether to use the extended STOI described in [4] - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - - Returns: - average STOI value - - Raises: - ModuleNotFoundError: - If ``pystoi`` package is not installed - - Example: - >>> from paddlemetrics.audio import STOI - >>> import torchapi as B - >>> g = B.manual_seed(1) - >>> preds = B.randn(8000) - >>> target = B.randn(8000) - >>> stoi = STOI(8000, False) - >>> stoi(preds, target) - tensor(-0.0100) - - References: - [1] https://github.com/mpariente/pystoi - - [2] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'A Short-Time Objective Intelligibility Measure for - Time-Frequency Weighted Noisy Speech', ICASSP 2010, Texas, Dallas. - - [3] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'An Algorithm for Intelligibility Prediction of - Time-Frequency Weighted Noisy Speech', IEEE Transactions on Audio, Speech, and Language Processing, 2011. - - [4] J. Jensen and C. H. Taal, 'An Algorithm for Predicting the Intelligibility of Speech Masked by Modulated - Noise Maskers', IEEE Transactions on Audio, Speech and Language Processing, 2016. - - """ - sum_stoi: Tensor - total: Tensor - is_differentiable = False - higher_is_better = True - - def __init__( - self, - fs: int, - extended: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - if not _PYSTOI_AVAILABLE: - raise ModuleNotFoundError( - "STOI metric requires that pystoi is installed." - " Either install as `pip install paddlemetrics[audio]` or `pip install pystoi`" - ) - self.fs = fs - self.extended = extended - - self.add_state("sum_stoi", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - stoi_batch = stoi(preds, target, self.fs, self.extended, False).to(self.sum_stoi.device) - - self.sum_stoi += stoi_batch.sum() - self.total += stoi_batch.numel() - - def compute(self) -> Tensor: - """Computes average STOI.""" - return self.sum_stoi / self.total diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py deleted file mode 100644 index e928018b6..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.classification.accuracy import Accuracy # noqa: F401 -from paddlemetrics.classification.auc import AUC # noqa: F401 -from paddlemetrics.classification.auroc import AUROC # noqa: F401 -from paddlemetrics.classification.average_precision import AveragePrecision # noqa: F401 -from paddlemetrics.classification.binned_precision_recall import BinnedAveragePrecision # noqa: F401 -from paddlemetrics.classification.binned_precision_recall import BinnedPrecisionRecallCurve # noqa: F401 -from paddlemetrics.classification.binned_precision_recall import BinnedRecallAtFixedPrecision # noqa: F401 -from paddlemetrics.classification.calibration_error import CalibrationError # noqa: F401 -from paddlemetrics.classification.cohen_kappa import CohenKappa # noqa: F401 -from paddlemetrics.classification.confusion_matrix import ConfusionMatrix # noqa: F401 -from paddlemetrics.classification.f_beta import F1, FBeta # noqa: F401 -from paddlemetrics.classification.hamming_distance import HammingDistance # noqa: F401 -from paddlemetrics.classification.hinge import Hinge # noqa: F401 -from paddlemetrics.classification.iou import IoU # noqa: F401 -from paddlemetrics.classification.kl_divergence import KLDivergence # noqa: F401 -from paddlemetrics.classification.matthews_corrcoef import MatthewsCorrcoef # noqa: F401 -from paddlemetrics.classification.precision_recall import Precision, Recall # noqa: F401 -from paddlemetrics.classification.precision_recall_curve import PrecisionRecallCurve # noqa: F401 -from paddlemetrics.classification.roc import ROC # noqa: F401 -from paddlemetrics.classification.specificity import Specificity # noqa: F401 -from paddlemetrics.classification.stat_scores import StatScores # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py deleted file mode 100644 index 325a18d42..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/accuracy.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.classification.accuracy import ( - _accuracy_compute, - _accuracy_update, - _check_subset_validity, - _mode, - _subset_accuracy_compute, - _subset_accuracy_update, -) -from paddlemetrics.utilities.enums import DataType - -from paddlemetrics.classification.stat_scores import StatScores # isort:skip - - -class Accuracy(StatScores): - r""" - Computes Accuracy_: - - .. math:: - \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a - tensor of predictions. - - For multi-class and multi-dimensional multi-class data with probability or logits predictions, the - parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the - top-K highest probability or logit score items are considered to find the correct label. - - For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" - accuracy by default, which counts all labels or sub-samples separately. This can be - changed to subset accuracy (which requires all labels or sub-samples in the sample to - be correctly predicted) by setting ``subset_accuracy=True``. - - Accepts all input types listed in :ref:`references/modules:input types`. - - Args: - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - subset_accuracy: - Whether to compute subset accuracy for multi-label and multi-dimensional - multi-class inputs (has no effect for other input types). - - - For multi-label inputs, if the parameter is set to ``True``, then all labels for - each sample must be correctly predicted for the sample to count as correct. If it - is set to ``False``, then all labels are counted separately - this is equivalent to - flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). - - - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all - sub-sample (on the extra axis) must be correct for the sample to be counted as correct. - If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, - in the case of label predictions, to flattening the inputs beforehand (i.e. - ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter - still applies in both cases, if set. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Raises: - ValueError: - If ``top_k`` is not an ``integer`` larger than ``0``. - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. - ValueError: - If two different input modes are provided, eg. using ``multi-label`` with ``multi-class``. - ValueError: - If ``top_k`` parameter is set for ``multi-label`` inputs. - - Example: - >>> import paddleext.torchapi as B - >>> from paddlemetrics import Accuracy - >>> target = B.tensor([0, 1, 2, 3]) - >>> preds = B.tensor([0, 2, 1, 3]) - >>> accuracy = Accuracy() - >>> accuracy(preds, target) - tensor(0.5000) - - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) - >>> accuracy = Accuracy(top_k=2) - >>> accuracy(preds, target) - tensor(0.6667) - - """ - is_differentiable = False - correct: Tensor - total: Tensor - - def __init__( - self, - threshold: float = 0.5, - num_classes: Optional[int] = None, - average: str = "micro", - mdmc_average: Optional[str] = "global", - ignore_index: Optional[int] = None, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, - subset_accuracy: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - super().__init__( - reduce="macro" if average in ["weighted", "none", None] else average, - mdmc_reduce=mdmc_average, - threshold=threshold, - top_k=top_k, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("correct", default=tensor(0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): - raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") - - self.average = average - self.threshold = threshold - self.top_k = top_k - self.subset_accuracy = subset_accuracy - self.mode: DataType = None # type: ignore - self.multiclass = multiclass - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. See - :ref:`references/modules:input types` for more information on input - types. - - Args: - preds: Predictions from model (logits, probabilities, or labels) - target: Ground truth labels - """ - """ returns the mode of the data (binary, multi label, multi class, multi-dim multi class) """ - mode = _mode(preds, target, self.threshold, self.top_k, self.num_classes, self.multiclass) - - if not self.mode: - self.mode = mode - elif self.mode != mode: - raise ValueError(f"You can not use {mode} inputs with {self.mode} inputs.") - - if self.subset_accuracy and not _check_subset_validity(self.mode): - self.subset_accuracy = False - - if self.subset_accuracy: - correct, total = _subset_accuracy_update(preds, target, threshold=self.threshold, top_k=self.top_k) - self.correct += correct - self.total += total - else: - if not self.mode: - raise RuntimeError("You have to have determined mode.") - tp, fp, tn, fn = _accuracy_update( - preds, - target, - reduce=self.reduce, - mdmc_reduce=self.mdmc_reduce, - threshold=self.threshold, - num_classes=self.num_classes, - top_k=self.top_k, - multiclass=self.multiclass, - ignore_index=self.ignore_index, - mode=self.mode, - ) - - # Update states - if self.reduce != "samples" and self.mdmc_reduce != "samplewise": - self.tp += tp - self.fp += fp - self.tn += tn - self.fn += fn - else: - self.tp.append(tp) - self.fp.append(fp) - self.tn.append(tn) - self.fn.append(fn) - - def compute(self) -> Tensor: - """Computes accuracy based on inputs passed in to ``update`` previously.""" - if not self.mode: - raise RuntimeError("You have to have determined mode.") - if self.subset_accuracy: - return _subset_accuracy_compute(self.correct, self.total) - tp, fp, tn, fn = self._get_final_stats() - return _accuracy_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce, self.mode) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py deleted file mode 100644 index 99b64048d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auc.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional - -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.auc import _auc_compute, _auc_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat - - -class AUC(Metric): - r""" - Computes Area Under the Curve (AUC) using the trapezoidal rule - - Forward accepts two input tensors that should be 1D and have the same number - of elements - - Args: - reorder: AUC expects its first input to be sorted. If this is not the case, - setting this argument to ``True`` will use a stable sorting algorithm to - sort the input in descending order - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the ``allgather`` operation on the metric state. When ``None``, DDP - will be used to perform the ``allgather``. - """ - is_differentiable = False - x: List[Tensor] - y: List[Tensor] - - def __init__( - self, - reorder: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.reorder = reorder - - self.add_state("x", default=[], dist_reduce_fx="cat") - self.add_state("y", default=[], dist_reduce_fx="cat") - - rank_zero_warn( - "Metric `AUC` will save all targets and predictions in buffer." - " For large datasets this may lead to large memory footprint." - ) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model (probabilities, or labels) - target: Ground truth labels - """ - x, y = _auc_update(preds, target) - - self.x.append(x) - self.y.append(y) - - def compute(self) -> Tensor: - """Computes AUC based on inputs passed in to ``update`` previously.""" - x = dim_zero_cat(self.x) - y = dim_zero_cat(self.y) - return _auc_compute(x, y, reorder=self.reorder) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py deleted file mode 100644 index 6236391de..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/auroc.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.auroc import _auroc_compute, _auroc_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat -from paddlemetrics.utilities.enums import DataType -from paddlemetrics.utilities.imports import _TORCH_LOWER_1_6 - - -class AUROC(Metric): - r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). - Works for both binary, multilabel and multiclass problems. In the case of - multiclass, the values will be calculated based on a one-vs-the-rest approach. - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor - with probabilities, where C is the number of classes. - - - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels - - For non-binary input, if the ``preds`` and ``target`` tensor have the same - size the input will be interpretated as multilabel and if ``preds`` have one - dimension more than the ``target`` tensor the input will be interpretated as - multiclass. - - Args: - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - average: - - ``'micro'`` computes metric globally. Only works for multilabel problems - - ``'macro'`` computes metric for each class and uniformly averages them - - ``'weighted'`` computes metric for each class and does a weighted-average, - where each class is weighted by their support (accounts for class imbalance) - - ``None`` computes and returns the metric per class - max_fpr: - If not ``None``, calculates standardized partial AUC over the - range [0, max_fpr]. Should be a float between 0 and 1. - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Raises: - ValueError: - If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``. - ValueError: - If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``. - RuntimeError: - If ``PyTorch version`` is ``below 1.6`` since max_fpr requires ``B.bucketize`` - which is not available below 1.6. - ValueError: - If the mode of data (binary, multi-label, multi-class) changes between batches. - - Example (binary case): - >>> from paddlemetrics import AUROC - >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) - >>> target = B.tensor([0, 0, 1, 1, 1]) - >>> auroc = AUROC(pos_label=1) - >>> auroc(preds, target) - tensor(0.5000) - - Example (multiclass case): - >>> preds = B.tensor([[0.90, 0.05, 0.05], - ... [0.05, 0.90, 0.05], - ... [0.05, 0.05, 0.90], - ... [0.85, 0.05, 0.10], - ... [0.10, 0.10, 0.80]]) - >>> target = B.tensor([0, 1, 1, 2, 2]) - >>> auroc = AUROC(num_classes=3) - >>> auroc(preds, target) - tensor(0.7778) - - """ - is_differentiable = False - preds: List[Tensor] - target: List[Tensor] - - def __init__( - self, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - max_fpr: Optional[float] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.num_classes = num_classes - self.pos_label = pos_label - self.average = average - self.max_fpr = max_fpr - - allowed_average = (None, "macro", "weighted", "micro") - if self.average not in allowed_average: - raise ValueError( - f"Argument `average` expected to be one of the following: {allowed_average} but got {average}" - ) - - if self.max_fpr is not None: - if not isinstance(max_fpr, float) or not 0 < max_fpr <= 1: - raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}") - - if _TORCH_LOWER_1_6: - raise RuntimeError( - "`max_fpr` argument requires `B.bucketize` which is not available below PyTorch version 1.6" - ) - - self.mode: DataType = None # type: ignore - self.add_state("preds", default=[], dist_reduce_fx="cat") - self.add_state("target", default=[], dist_reduce_fx="cat") - - rank_zero_warn( - "Metric `AUROC` will save all targets and predictions in buffer." - " For large datasets this may lead to large memory footprint." - ) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model (probabilities, or labels) - target: Ground truth labels - """ - preds, target, mode = _auroc_update(preds, target) - - self.preds.append(preds) - self.target.append(target) - - if self.mode and self.mode != mode: - raise ValueError( - "The mode of data (binary, multi-label, multi-class) should be constant, but changed" - f" between batches from {self.mode} to {mode}" - ) - self.mode = mode - - def compute(self) -> Tensor: - """Computes AUROC based on inputs passed in to ``update`` previously.""" - if not self.mode: - raise RuntimeError("You have to have determined mode.") - preds = dim_zero_cat(self.preds) - target = dim_zero_cat(self.target) - return _auroc_compute( - preds, - target, - self.mode, - self.num_classes, - self.pos_label, - self.average, - self.max_fpr, - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py deleted file mode 100644 index 0e37da588..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/average_precision.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, List, Optional, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.average_precision import ( - _average_precision_compute, - _average_precision_update, -) -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat - - -class AveragePrecision(Metric): - """Computes the average precision score, which summarises the precision recall curve into one number. Works for - both binary and multiclass problems. In the case of multiclass, the values will be calculated based on a one- - vs-the-rest approach. - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor - with probabilities, where C is the number of classes. - - - ``target`` (long tensor): ``(N, ...)`` with integer labels - - Args: - num_classes: integer with number of classes. Not nessesary to provide - for binary problems. - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - average: - defines the reduction that is applied in the case of multiclass and multilabel input. - Should be one of the following: - - - ``'macro'`` [default]: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be - used with multiclass input. - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support. - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example (binary case): - >>> from paddlemetrics import AveragePrecision - >>> pred = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> average_precision = AveragePrecision(pos_label=1) - >>> average_precision(pred, target) - tensor(1.) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> average_precision = AveragePrecision(num_classes=5, average=None) - >>> average_precision(pred, target) - [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] - """ - - is_differentiable = False - preds: List[Tensor] - target: List[Tensor] - - def __init__( - self, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - - self.num_classes = num_classes - self.pos_label = pos_label - allowed_average = ("micro", "macro", "weighted", None) - if average not in allowed_average: - raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}") - self.average = average - - self.add_state("preds", default=[], dist_reduce_fx="cat") - self.add_state("target", default=[], dist_reduce_fx="cat") - - rank_zero_warn( - "Metric `AveragePrecision` will save all targets and predictions in buffer." - " For large datasets this may lead to large memory footprint." - ) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - preds, target, num_classes, pos_label = _average_precision_update( - preds, target, self.num_classes, self.pos_label, self.average - ) - self.preds.append(preds) - self.target.append(target) - self.num_classes = num_classes - self.pos_label = pos_label - - def compute(self) -> Union[Tensor, List[Tensor]]: - """Compute the average precision score. - - Returns: - tensor with average precision. If multiclass will return list - of such tensors, one for each class - """ - preds = dim_zero_cat(self.preds) - target = dim_zero_cat(self.target) - if not self.num_classes: - raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") - return _average_precision_compute(preds, target, self.num_classes, self.pos_label, self.average) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py deleted file mode 100644 index ffc86ae69..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/binned_precision_recall.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, List, Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.average_precision import _average_precision_compute_with_precision_recall -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.data import METRIC_EPS, to_onehot - - -def _recall_at_precision( - precision: Tensor, - recall: Tensor, - thresholds: Tensor, - min_precision: float, -) -> Tuple[Tensor, Tensor]: - try: - max_recall, _, best_threshold = max( - (r, p, t) for p, r, t in zip(precision, recall, thresholds) if p >= min_precision - ) - - except ValueError: - max_recall = B.tensor(0.0, device=recall.device, dtype=recall.dtype) - best_threshold = B.tensor(0) - - if max_recall == 0.0: - best_threshold = B.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype) - - return max_recall, best_threshold - - -class BinnedPrecisionRecallCurve(Metric): - """Computes precision-recall pairs for different thresholds. Works for both binary and multiclass problems. In - the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. - - Computation is performed in constant-memory by computing precision and recall - for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor - with probabilities, where C is the number of classes. - - - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels - - Args: - num_classes: integer with number of classes. For binary, set to 1. - thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. - It is used for computation will lead to more detailed curve and accurate estimates, - but will be slower and consume more memory. - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``thresholds`` is not a int, list or tensor - - Example (binary case): - >>> from paddlemetrics import BinnedPrecisionRecallCurve - >>> pred = B.tensor([0, 0.1, 0.8, 0.4]) - >>> target = B.tensor([0, 1, 1, 0]) - >>> pr_curve = BinnedPrecisionRecallCurve(num_classes=1, thresholds=5) - >>> precision, recall, thresholds = pr_curve(pred, target) - >>> precision - tensor([0.5000, 0.5000, 1.0000, 1.0000, 1.0000, 1.0000]) - >>> recall - tensor([1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000]) - >>> thresholds - tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> pr_curve = BinnedPrecisionRecallCurve(num_classes=5, thresholds=3) - >>> precision, recall, thresholds = pr_curve(pred, target) - >>> precision # doctest: +NORMALIZE_WHITESPACE - [tensor([0.2500, 1.0000, 1.0000, 1.0000]), - tensor([0.2500, 1.0000, 1.0000, 1.0000]), - tensor([2.5000e-01, 1.0000e-06, 1.0000e+00, 1.0000e+00]), - tensor([2.5000e-01, 1.0000e-06, 1.0000e+00, 1.0000e+00]), - tensor([2.5000e-07, 1.0000e+00, 1.0000e+00, 1.0000e+00])] - >>> recall # doctest: +NORMALIZE_WHITESPACE - [tensor([1.0000, 1.0000, 0.0000, 0.0000]), - tensor([1.0000, 1.0000, 0.0000, 0.0000]), - tensor([1.0000, 0.0000, 0.0000, 0.0000]), - tensor([1.0000, 0.0000, 0.0000, 0.0000]), - tensor([0., 0., 0., 0.])] - >>> thresholds # doctest: +NORMALIZE_WHITESPACE - [tensor([0.0000, 0.5000, 1.0000]), - tensor([0.0000, 0.5000, 1.0000]), - tensor([0.0000, 0.5000, 1.0000]), - tensor([0.0000, 0.5000, 1.0000]), - tensor([0.0000, 0.5000, 1.0000])] - """ - - TPs: Tensor - FPs: Tensor - FNs: Tensor - - def __init__( - self, - num_classes: int, - thresholds: Union[int, Tensor, List[float], None] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - - self.num_classes = num_classes - if isinstance(thresholds, int): - self.num_thresholds = thresholds - thresholds = B.linspace(0, 1.0, thresholds) - self.register_buffer("thresholds", thresholds) - elif thresholds is not None: - if not isinstance(thresholds, (list, Tensor)): - raise ValueError("Expected argument `thresholds` to either be an integer, list of floats or a tensor") - thresholds = B.tensor(thresholds) if isinstance(thresholds, list) else thresholds - self.num_thresholds = thresholds.numel() - self.register_buffer("thresholds", thresholds) - - for name in ("TPs", "FPs", "FNs"): - self.add_state( - name=name, - default=B.zeros(num_classes, self.num_thresholds, dtype=B.float32), - dist_reduce_fx="sum", - ) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """ - Args - preds: (n_samples, n_classes) tensor - target: (n_samples, n_classes) tensor - """ - # binary case - if len(preds.shape) == len(target.shape) == 1: - preds = preds.reshape(-1, 1) - target = target.reshape(-1, 1) - - if len(preds.shape) == len(target.shape) + 1: - target = to_onehot(target, num_classes=self.num_classes) - - target = target == 1 - # Iterate one threshold at a time to conserve memory - for i in range(self.num_thresholds): - predictions = preds >= self.thresholds[i] - self.TPs[:, i] += (target & predictions).sum(dim=0) - self.FPs[:, i] += ((~target) & (predictions)).sum(dim=0) - self.FNs[:, i] += ((target) & (~predictions)).sum(dim=0) - - def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - """Returns float tensor of size n_classes.""" - precisions = (self.TPs + METRIC_EPS) / (self.TPs + self.FPs + METRIC_EPS) - recalls = self.TPs / (self.TPs + self.FNs + METRIC_EPS) - - # Need to guarantee that last precision=1 and recall=0, similar to precision_recall_curve - t_ones = B.ones(self.num_classes, 1, dtype=precisions.dtype, device=precisions.device) - precisions = B.cat([precisions, t_ones], dim=1) - t_zeros = B.zeros(self.num_classes, 1, dtype=recalls.dtype, device=recalls.device) - recalls = B.cat([recalls, t_zeros], dim=1) - if self.num_classes == 1: - return precisions[0, :], recalls[0, :], self.thresholds - return list(precisions), list(recalls), [self.thresholds for _ in range(self.num_classes)] - - -class BinnedAveragePrecision(BinnedPrecisionRecallCurve): - """Computes the average precision score, which summarises the precision recall curve into one number. Works for - both binary and multiclass problems. In the case of multiclass, the values will be calculated based on a one- - vs-the-rest approach. - - Computation is performed in constant-memory by computing precision and recall - for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor - with probabilities, where C is the number of classes. - - - ``target`` (long tensor): ``(N, ...)`` with integer labels - - Args: - num_classes: integer with number of classes. Not nessesary to provide - for binary problems. - thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. - It is used for computation will lead to more detailed curve and accurate estimates, - but will be slower and consume more memory - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``thresholds`` is not a list or tensor - - Example (binary case): - >>> from paddlemetrics import BinnedAveragePrecision - >>> pred = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> average_precision = BinnedAveragePrecision(num_classes=1, thresholds=10) - >>> average_precision(pred, target) - tensor(1.0000) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> average_precision = BinnedAveragePrecision(num_classes=5, thresholds=10) - >>> average_precision(pred, target) - [tensor(1.0000), tensor(1.0000), tensor(0.2500), tensor(0.2500), tensor(-0.)] - """ - - def compute(self) -> Union[List[Tensor], Tensor]: # type: ignore - precisions, recalls, _ = super().compute() - return _average_precision_compute_with_precision_recall(precisions, recalls, self.num_classes, average=None) - - -class BinnedRecallAtFixedPrecision(BinnedPrecisionRecallCurve): - """Computes the higest possible recall value given the minimum precision thresholds provided. - - Computation is performed in constant-memory by computing precision and recall - for ``thresholds`` buckets/thresholds (evenly distributed between 0 and 1). - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor - with probabilities, where C is the number of classes. - - - ``target`` (long tensor): ``(N, ...)`` with integer labels - - Args: - num_classes: integer with number of classes. Provide 1 for for binary problems. - min_precision: float value specifying minimum precision threshold. - thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. - It is used for computation will lead to more detailed curve and accurate estimates, - but will be slower and consume more memory - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``thresholds`` is not a list or tensor - - Example (binary case): - >>> from paddlemetrics import BinnedRecallAtFixedPrecision - >>> pred = B.tensor([0, 0.2, 0.5, 0.8]) - >>> target = B.tensor([0, 1, 1, 0]) - >>> average_precision = BinnedRecallAtFixedPrecision(num_classes=1, thresholds=10, min_precision=0.5) - >>> average_precision(pred, target) - (tensor(1.0000), tensor(0.1111)) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> average_precision = BinnedRecallAtFixedPrecision(num_classes=5, thresholds=10, min_precision=0.5) - >>> average_precision(pred, target) # doctest: +NORMALIZE_WHITESPACE - (tensor([1.0000, 1.0000, 0.0000, 0.0000, 0.0000]), - tensor([6.6667e-01, 6.6667e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06])) - """ - - def __init__( - self, - num_classes: int, - min_precision: float, - thresholds: Union[int, Tensor, List[float], None] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - num_classes=num_classes, - thresholds=thresholds, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - self.min_precision = min_precision - - def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore - """Returns float tensor of size n_classes.""" - precisions, recalls, thresholds = super().compute() - - if self.num_classes == 1: - return _recall_at_precision(precisions, recalls, thresholds, self.min_precision) - - recalls_at_p = B.zeros(self.num_classes, device=recalls[0].device, dtype=recalls[0].dtype) - thresholds_at_p = B.zeros(self.num_classes, device=thresholds[0].device, dtype=thresholds[0].dtype) - for i in range(self.num_classes): - recalls_at_p[i], thresholds_at_p[i] = _recall_at_precision( - precisions[i], recalls[i], thresholds[i], self.min_precision - ) - return recalls_at_p, thresholds_at_p diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py deleted file mode 100644 index 5fc9d10a0..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/calibration_error.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, List, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.calibration_error import _ce_compute, _ce_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.data import dim_zero_cat - - -class CalibrationError(Metric): - r""" - - `Computes the Top-label Calibration Error`_ - Three different norms are implemented, each corresponding to variations on the calibration error metric. - - L1 norm (Expected Calibration Error) - - .. math:: - \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\| - - Infinity norm (Maximum Calibration Error) - - .. math:: - \text{RMSCE} = \max_{i} (p_i - c_i) - - L2 norm (Root Mean Square Calibration Error) - - .. math:: - \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2 - - Where :math:`p_i` is the top-1 prediction accuracy in bin i - and :math:`c_i` is the average confidence of predictions in bin i. - - .. note:: - L2-norm debiasing is not yet supported. - - Args: - n_bins: Number of bins to use when computing probabilites and accuracies. - norm: Norm used to compare empirical and expected probability bins. - Defaults to "l1", or Expected Calibration Error. - debias: Applies debiasing term, only implemented for l2 norm. Defaults to True. - compute_on_step: Forward only calls ``update()`` and return None if this is set to False. - dist_sync_on_step: Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: Specify the process group on which synchronization is called. - default: None (which selects the entire world) - """ - DISTANCES = {"l1", "l2", "max"} - confidences: List[Tensor] - accuracies: List[Tensor] - - def __init__( - self, - n_bins: int = 15, - norm: str = "l1", - compute_on_step: bool = False, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ): - - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=None, - ) - - if norm not in self.DISTANCES: - raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") - - if not isinstance(n_bins, int) or n_bins <= 0: - raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}") - self.n_bins = n_bins - self.register_buffer("bin_boundaries", B.linspace(0, 1, n_bins + 1)) - self.norm = norm - - self.add_state("confidences", [], dist_reduce_fx="cat") - self.add_state("accuracies", [], dist_reduce_fx="cat") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Computes top-level confidences and accuracies for the input probabilites and appends them to internal - state. - - Args: - preds (Tensor): Model output probabilities. - target (Tensor): Ground-truth target class labels. - """ - confidences, accuracies = _ce_update(preds, target) - - self.confidences.append(confidences) - self.accuracies.append(accuracies) - - def compute(self) -> Tensor: - """Computes calibration error across all confidences and accuracies. - - Returns: - Tensor: Calibration error across previously collected examples. - """ - confidences = dim_zero_cat(self.confidences) - accuracies = dim_zero_cat(self.accuracies) - return _ce_compute(confidences, accuracies, self.bin_boundaries, norm=self.norm) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py deleted file mode 100644 index 3a4817cf4..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/cohen_kappa.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.cohen_kappa import _cohen_kappa_compute, _cohen_kappa_update -from paddlemetrics.metric import Metric - - -class CohenKappa(Metric): - r""" - Calculates `Cohen's kappa score`_ that measures - inter-annotator agreement. It is defined as - - .. math:: - \kappa = (p_o - p_e) / (1 - p_e) - - where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is - the expected agreement when both annotators assign labels randomly. Note that - :math:`p_e` is estimated using a per-annotator empirical prior over the - class labels. - - Works with binary, multiclass, and multilabel data. Accepts probabilities from a model output or - integer class values in prediction. Works with multi-dimensional preds and target. - - Forward accepts - - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes - - - ``target`` (long tensor): ``(N, ...)`` - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label probabilities or logits. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - Args: - num_classes: Number of classes in the dataset. - - weights: Weighting type to calculate the score. Choose from - - ``None`` or ``'none'``: no weighting - - ``'linear'``: linear weighting - - ``'quadratic'``: quadratic weighting - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example: - >>> from paddlemetrics import CohenKappa - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> cohenkappa = CohenKappa(num_classes=2) - >>> cohenkappa(preds, target) - tensor(0.5000) - - """ - is_differentiable = False - confmat: Tensor - - def __init__( - self, - num_classes: int, - weights: Optional[str] = None, - threshold: float = 0.5, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - self.num_classes = num_classes - self.weights = weights - self.threshold = threshold - - allowed_weights = ("linear", "quadratic", "none", None) - if self.weights not in allowed_weights: - raise ValueError(f"Argument weights needs to one of the following: {allowed_weights}") - - self.add_state("confmat", default=B.zeros(num_classes, num_classes), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - confmat = _cohen_kappa_update(preds, target, self.num_classes, self.threshold) - self.confmat += confmat - - def compute(self) -> Tensor: - """Computes cohen kappa score.""" - return _cohen_kappa_compute(self.confmat, self.weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py deleted file mode 100644 index a3485570d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/confusion_matrix.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_compute, _confusion_matrix_update -from paddlemetrics.metric import Metric - - -class ConfusionMatrix(Metric): - r""" - Computes the `confusion matrix`_. Works with binary, - multiclass, and multilabel data. Accepts probabilities or logits from a model output or integer class - values in prediction. Works with multi-dimensional preds and target, but it should be noted that - additional dimensions will be flattened. - - Forward accepts - - - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes - - ``target`` (long tensor): ``(N, ...)`` - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label probabilities or logits. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - If working with multilabel data, setting the `is_multilabel` argument to `True` will make sure that a - `confusion matrix gets calculated per label`_. - - Args: - num_classes: Number of classes in the dataset. - normalize: Normalization mode for confusion matrix. Choose from - - - ``None`` or ``'none'``: no normalization (default) - - ``'true'``: normalization over the targets (most commonly used) - - ``'pred'``: normalization over the predictions - - ``'all'``: normalization over the whole matrix - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - - multilabel: - determines if data is multilabel or not. - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example (binary data): - >>> from paddlemetrics import ConfusionMatrix - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> confmat = ConfusionMatrix(num_classes=2) - >>> confmat(preds, target) - tensor([[2., 0.], - [1., 1.]]) - - Example (multiclass data): - >>> target = B.tensor([2, 1, 0, 0]) - >>> preds = B.tensor([2, 1, 0, 1]) - >>> confmat = ConfusionMatrix(num_classes=3) - >>> confmat(preds, target) - tensor([[1., 1., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - Example (multilabel data): - >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) - >>> confmat = ConfusionMatrix(num_classes=3, multilabel=True) - >>> confmat(preds, target) # doctest: +NORMALIZE_WHITESPACE - tensor([[[1., 0.], [0., 1.]], - [[1., 0.], [1., 0.]], - [[0., 1.], [0., 1.]]]) - - """ - is_differentiable = False - confmat: Tensor - - def __init__( - self, - num_classes: int, - normalize: Optional[str] = None, - threshold: float = 0.5, - multilabel: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - self.num_classes = num_classes - self.normalize = normalize - self.threshold = threshold - self.multilabel = multilabel - - allowed_normalize = ("true", "pred", "all", "none", None) - if self.normalize not in allowed_normalize: - raise ValueError(f"Argument average needs to one of the following: {allowed_normalize}") - - default = B.zeros(num_classes, 2, 2) if multilabel else B.zeros(num_classes, num_classes) - self.add_state("confmat", default=default, dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - confmat = _confusion_matrix_update(preds, target, self.num_classes, self.threshold, self.multilabel) - self.confmat += confmat - - def compute(self) -> Tensor: - """Computes confusion matrix. - - Returns: - If `multilabel=False` this will be a `[n_classes, n_classes]` tensor and if `multilabel=True` - this will be a `[n_classes, 2, 2]` tensor - """ - return _confusion_matrix_compute(self.confmat, self.normalize) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py deleted file mode 100644 index 4b24dc0e9..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/f_beta.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.classification.stat_scores import StatScores -from paddlemetrics.functional.classification.f_beta import _fbeta_compute -from paddlemetrics.utilities.enums import AverageMethod - - -class FBeta(StatScores): - r""" - Computes `F-score`_, specifically: - - .. math:: - F_\beta = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} - {(\beta^2 * \text{precision}) + \text{recall}} - - Where :math:`\beta` is some positive real factor. Works with binary, multiclass, and multilabel data. - Accepts logit scores or probabilities from a model output or integer class values in prediction. - Works with multi-dimensional preds and target. - - Forward accepts - - - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes - - ``target`` (long tensor): ``(N, ...)`` - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label logits and probabilities. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - Args: - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - beta: - Beta coefficient in the F measure. - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather. - - Raises: - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"``, ``None``. - - Example: - >>> from paddlemetrics import FBeta - >>> target = B.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) - >>> f_beta = FBeta(num_classes=3, beta=0.5) - >>> f_beta(preds, target) - tensor(0.3333) - - """ - - def __init__( - self, - num_classes: Optional[int] = None, - beta: float = 1.0, - threshold: float = 0.5, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - self.beta = beta - allowed_average = list(AverageMethod) - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - super().__init__( - reduce="macro" if average in [AverageMethod.WEIGHTED, AverageMethod.NONE] else average, - mdmc_reduce=mdmc_average, - threshold=threshold, - top_k=top_k, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.average = average - - def compute(self) -> Tensor: - """Computes fbeta over state.""" - tp, fp, tn, fn = self._get_final_stats() - return _fbeta_compute(tp, fp, tn, fn, self.beta, self.ignore_index, self.average, self.mdmc_reduce) - - -class F1(FBeta): - """Computes F1 metric. F1 metrics correspond to a harmonic mean of the precision and recall scores. - - Works with binary, multiclass, and multilabel data. Accepts logits or probabilities from a model - output or integer class values in prediction. Works with multi-dimensional preds and target. - - Forward accepts - - - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes - - ``target`` (long tensor): ``(N, ...)`` - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument. - This is the case for binary and multi-label logits. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - Args: - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather. - - - Example: - >>> from paddlemetrics import F1 - >>> target = B.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) - >>> f1 = F1(num_classes=3) - >>> f1(preds, target) - tensor(0.3333) - """ - - is_differentiable = False - - def __init__( - self, - num_classes: Optional[int] = None, - threshold: float = 0.5, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - num_classes=num_classes, - beta=1.0, - threshold=threshold, - average=average, - mdmc_average=mdmc_average, - ignore_index=ignore_index, - top_k=top_k, - multiclass=multiclass, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py deleted file mode 100644 index 855d7f7e8..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hamming_distance.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.classification.hamming_distance import _hamming_distance_compute, _hamming_distance_update -from paddlemetrics.metric import Metric - - -class HammingDistance(Metric): - r""" - Computes the average `Hamming distance`_ (also - known as Hamming loss) between targets and predictions: - - .. math:: - \text{Hamming distance} = \frac{1}{N \cdot L}\sum_i^N \sum_l^L 1(y_{il} \neq \hat{y_{il}}) - - Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, - and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that - tensor. - - This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it - treats each possible label separately - meaning that, for example, multi-class data is - treated as if it were multi-label. - - Accepts all input types listed in :ref:`references/modules:input types`. - - Args: - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the all gather. - - Raises: - ValueError: - If ``threshold`` is not between ``0`` and ``1``. - - Example: - >>> from paddlemetrics import HammingDistance - >>> target = B.tensor([[0, 1], [1, 1]]) - >>> preds = B.tensor([[0, 1], [0, 1]]) - >>> hamming_distance = HammingDistance() - >>> hamming_distance(preds, target) - tensor(0.2500) - - """ - is_differentiable = False - correct: Tensor - total: Tensor - - def __init__( - self, - threshold: float = 0.5, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("correct", default=tensor(0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - self.threshold = threshold - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. See - :ref:`references/modules:input types` for more information on input - types. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth labels - """ - correct, total = _hamming_distance_update(preds, target, self.threshold) - - self.correct += correct - self.total += total - - def compute(self) -> Tensor: - """Computes hamming distance based on inputs passed in to ``update`` previously.""" - return _hamming_distance_compute(self.correct, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py deleted file mode 100644 index 099864a35..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/hinge.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional, Union - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.classification.hinge import MulticlassMode, _hinge_compute, _hinge_update -from paddlemetrics.metric import Metric - - -class Hinge(Metric): - r""" - Computes the mean `Hinge loss`_, typically used for Support Vector - Machines (SVMs). In the binary case it is defined as: - - .. math:: - \text{Hinge loss} = \max(0, 1 - y \times \hat{y}) - - Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction. - - In the multi-class case, when ``multiclass_mode=None`` (default), ``multiclass_mode=MulticlassMode.CRAMMER_SINGER`` - or ``multiclass_mode="crammer-singer"``, this metric will compute the multi-class hinge loss defined by Crammer and - Singer as: - - .. math:: - \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right) - - Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes), - and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. - - In the multi-class case when ``multiclass_mode=MulticlassMode.ONE_VS_ALL`` or ``multiclass_mode='one-vs-all'``, this - metric will use a one-vs-all approach to compute the hinge loss, giving a vector of C outputs where each entry pits - that class against all remaining classes. - - This metric can optionally output the mean of the squared hinge loss by setting ``squared=True`` - - Only accepts inputs with preds shape of (N) (binary) or (N, C) (multi-class) and target shape of (N). - - Args: - squared: - If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss (default). - multiclass_mode: - Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), - ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. - ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. - - Raises: - ValueError: - If ``multiclass_mode`` is not: None, ``MulticlassMode.CRAMMER_SINGER``, ``"crammer-singer"``, - ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"``. - - Example (binary case): - >>> import torchapi as B - >>> from paddlemetrics import Hinge - >>> target = B.tensor([0, 1, 1]) - >>> preds = B.tensor([-2.2, 2.4, 0.1]) - >>> hinge = Hinge() - >>> hinge(preds, target) - tensor(0.3000) - - Example (default / multiclass case): - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) - >>> hinge = Hinge() - >>> hinge(preds, target) - tensor(2.9000) - - Example (multiclass example, one vs all mode): - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) - >>> hinge = Hinge(multiclass_mode="one-vs-all") - >>> hinge(preds, target) - tensor([2.2333, 1.5000, 1.2333]) - - """ - is_differentiable = True - measure: Tensor - total: Tensor - - def __init__( - self, - squared: bool = False, - multiclass_mode: Optional[Union[str, MulticlassMode]] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("measure", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - if multiclass_mode not in (None, MulticlassMode.CRAMMER_SINGER, MulticlassMode.ONE_VS_ALL): - raise ValueError( - "The `multiclass_mode` should be either None / 'crammer-singer' / MulticlassMode.CRAMMER_SINGER" - "(default) or 'one-vs-all' / MulticlassMode.ONE_VS_ALL," - f" got {multiclass_mode}." - ) - - self.squared = squared - self.multiclass_mode = multiclass_mode - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - measure, total = _hinge_update(preds, target, squared=self.squared, multiclass_mode=self.multiclass_mode) - - self.measure = measure + self.measure - self.total = total + self.total - - def compute(self) -> Tensor: - return _hinge_compute(self.measure, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py deleted file mode 100644 index 9e89946a1..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/iou.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.classification.confusion_matrix import ConfusionMatrix -from paddlemetrics.functional.classification.iou import _iou_from_confmat - - -class IoU(ConfusionMatrix): - r""" - Computes Intersection over union, or `Jaccard index`_: - - .. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|} - - Where: :math:`A` and :math:`B` are both tensors of the same size, containing integer class values. - They may be subject to conversion from input data (see description below). Note that it is different from box IoU. - - Works with binary, multiclass and multi-label data. - Accepts probabilities from a model output or integer class values in prediction. - Works with multi-dimensional preds and target. - - Forward accepts - - - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes - - ``target`` (long tensor): ``(N, ...)`` - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label probabilities. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - Args: - num_classes: Number of classes in the dataset. - ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. Has no effect if given an int that is not in the - range [0, num_classes-1]. By default, no index is ignored, and all classes are used. - absent_score: score to use for an individual class, if no instances of the class index were present in - `pred` AND no instances of the class index were present in `target`. For example, if we have 3 classes, - [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be assigned the `absent_score`. - threshold: - Threshold value for binary or multi-label probabilities. - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example: - >>> from paddlemetrics import IoU - >>> target = B.randint(0, 2, (10, 25, 25)) - >>> pred = B.tensor(target) - >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] - >>> iou = IoU(num_classes=2) - >>> iou(pred, target) - tensor(0.9660) - - """ - is_differentiable = False - - def __init__( - self, - num_classes: int, - ignore_index: Optional[int] = None, - absent_score: float = 0.0, - threshold: float = 0.5, - reduction: str = "elementwise_mean", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - num_classes=num_classes, - normalize=None, - threshold=threshold, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - self.reduction = reduction - self.ignore_index = ignore_index - self.absent_score = absent_score - - def compute(self) -> Tensor: - """Computes intersection over union (IoU)""" - return _iou_from_confmat(self.confmat, self.num_classes, self.ignore_index, self.absent_score, self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py deleted file mode 100644 index cce887f09..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/kl_divergence.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.kl_divergence import _kld_compute, _kld_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.data import dim_zero_cat - - -class KLDivergence(Metric): - r"""Computes the `KL divergence`_: - - .. math:: - D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}} - - Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution - over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence - is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. - - Args: - p: data distribution with shape ``[N, d]`` - q: prior or approximate distribution with shape ``[N, d]`` - log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, - will normalize to make sure the distributes sum to 1 - reduction: - Determines how to reduce over the ``N``/batch dimension: - - - ``'mean'`` [default]: Averages score across samples - - ``'sum'``: Sum score across samples - - ``'none'`` or ``None``: Returns score per sample - - Raises: - TypeError: - If ``log_prob`` is not an ``bool`` - ValueError: - If ``reduction`` is not one of ``'mean'``, ``'sum'``, ``'none'`` or ``None`` - - .. note:: - Half precision is only support on GPU for this metric - - Example: - >>> import torchapi as B - >>> from paddlemetrics.functional import kl_divergence - >>> p = B.tensor([[0.36, 0.48, 0.16]]) - >>> q = B.tensor([[1/3, 1/3, 1/3]]) - >>> kl_divergence(p, q) - tensor(0.0853) - - """ - is_differentiable = True - # TODO: canot be used because if scripting - # measures: Union[List[Tensor], Tensor] - total: Tensor - - def __init__( - self, - log_prob: bool = False, - reduction: Optional[str] = "mean", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - if not isinstance(log_prob, bool): - raise TypeError(f"Expected argument `log_prob` to be bool but got {log_prob}") - self.log_prob = log_prob - - allowed_reduction = ["mean", "sum", "none", None] - if reduction not in allowed_reduction: - raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}") - self.reduction = reduction - - if self.reduction in ["mean", "sum"]: - self.add_state("measures", B.zeros(1), dist_reduce_fx="sum") - else: - self.add_state("measures", [], dist_reduce_fx="cat") - self.add_state("total", B.zeros(1), dist_reduce_fx="sum") - - def update(self, p: Tensor, q: Tensor) -> None: # type: ignore - measures, total = _kld_update(p, q, self.log_prob) - if self.reduction is None or self.reduction == "none": - self.measures.append(measures) - else: - self.measures += measures.sum() - self.total += total - - def compute(self) -> Tensor: - measures = dim_zero_cat(self.measures) if self.reduction is None or self.reduction == "none" else self.measures - return _kld_compute(measures, self.total, self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py deleted file mode 100644 index 2ea52673b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/matthews_corrcoef.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.matthews_corrcoef import ( - _matthews_corrcoef_compute, - _matthews_corrcoef_update, -) -from paddlemetrics.metric import Metric - - -class MatthewsCorrcoef(Metric): - r""" - Calculates `Matthews correlation coefficient`_ that measures - the general correlation or quality of a classification. In the binary case it - is defined as: - - .. math:: - MCC = \frac{TP*TN - FP*FN}{\sqrt{(TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)}} - - where TP, TN, FP and FN are respectively the true postitives, true negatives, - false positives and false negatives. Also works in the case of multi-label or - multi-class input. - - Note: - This metric produces a multi-dimensional output, so it can not be directly logged. - - Forward accepts - - - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes - - ``target`` (long tensor): ``(N, ...)`` - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label probabilities. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - Args: - num_classes: Number of classes in the dataset. - threshold: - Threshold value for binary or multi-label probabilites. default: 0.5 - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Example: - >>> from paddlemetrics import MatthewsCorrcoef - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> matthews_corrcoef = MatthewsCorrcoef(num_classes=2) - >>> matthews_corrcoef(preds, target) - tensor(0.5774) - - """ - is_differentiable = False - confmat: Tensor - - def __init__( - self, - num_classes: int, - threshold: float = 0.5, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - self.num_classes = num_classes - self.threshold = threshold - - self.add_state("confmat", default=B.zeros(num_classes, num_classes), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - confmat = _matthews_corrcoef_update(preds, target, self.num_classes, self.threshold) - self.confmat += confmat - - def compute(self) -> Tensor: - """Computes matthews correlation coefficient.""" - return _matthews_corrcoef_compute(self.confmat) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py deleted file mode 100644 index 77920cfc9..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall.py +++ /dev/null @@ -1,320 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.classification.stat_scores import StatScores -from paddlemetrics.functional.classification.precision_recall import _precision_compute, _recall_compute - - -class Precision(StatScores): - r""" - Computes `Precision`_: - - .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} - - Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and - false positives respecitively. With the use of ``top_k`` parameter, this metric can - generalize to Precision@K. - - The reduction method (how the precision scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather. - - Raises: - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. - - Example: - >>> from paddlemetrics import Precision - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> precision = Precision(average='macro', num_classes=3) - >>> precision(preds, target) - tensor(0.1667) - >>> precision = Precision(average='micro') - >>> precision(preds, target) - tensor(0.2500) - - """ - is_differentiable = False - - def __init__( - self, - num_classes: Optional[int] = None, - threshold: float = 0.5, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - super().__init__( - reduce="macro" if average in ["weighted", "none", None] else average, - mdmc_reduce=mdmc_average, - threshold=threshold, - top_k=top_k, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.average = average - - def compute(self) -> Tensor: - """Computes the precision score based on inputs passed in to ``update`` previously. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - """ - tp, fp, _, fn = self._get_final_stats() - return _precision_compute(tp, fp, fn, self.average, self.mdmc_reduce) - - -class Recall(StatScores): - r""" - Computes `Recall`_: - - .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} - - Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and - false negatives respecitively. With the use of ``top_k`` parameter, this metric can - generalize to Recall@K. - - The reduction method (how the recall scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather. - - Raises: - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. - - Example: - >>> from paddlemetrics import Recall - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> recall = Recall(average='macro', num_classes=3) - >>> recall(preds, target) - tensor(0.3333) - >>> recall = Recall(average='micro') - >>> recall(preds, target) - tensor(0.2500) - - """ - is_differentiable = False - - def __init__( - self, - num_classes: Optional[int] = None, - threshold: float = 0.5, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - super().__init__( - reduce="macro" if average in ["weighted", "none", None] else average, - mdmc_reduce=mdmc_average, - threshold=threshold, - top_k=top_k, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.average = average - - def compute(self) -> Tensor: - """Computes the recall score based on inputs passed in to ``update`` previously. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - """ - tp, fp, _, fn = self._get_final_stats() - return _recall_compute(tp, fp, fn, self.average, self.mdmc_reduce) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py deleted file mode 100644 index 341419092..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/precision_recall_curve.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, List, Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.precision_recall_curve import ( - _precision_recall_curve_compute, - _precision_recall_curve_update, -) -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat - - -class PrecisionRecallCurve(Metric): - """Computes precision-recall pairs for different thresholds. Works for both binary and multiclass problems. In - the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor - with probabilities, where C is the number of classes. - - - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels - - Args: - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example (binary case): - >>> from paddlemetrics import PrecisionRecallCurve - >>> pred = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 0]) - >>> pr_curve = PrecisionRecallCurve(pos_label=1) - >>> precision, recall, thresholds = pr_curve(pred, target) - >>> precision - tensor([0.6667, 0.5000, 0.0000, 1.0000]) - >>> recall - tensor([1.0000, 0.5000, 0.0000, 0.0000]) - >>> thresholds - tensor([1, 2, 3]) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> pr_curve = PrecisionRecallCurve(num_classes=5) - >>> precision, recall, thresholds = pr_curve(pred, target) - >>> precision # doctest: +NORMALIZE_WHITESPACE - [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), - tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] - >>> recall - [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] - >>> thresholds - [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] - """ - - is_differentiable = False - preds: List[Tensor] - target: List[Tensor] - - def __init__( - self, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - - self.num_classes = num_classes - self.pos_label = pos_label - - self.add_state("preds", default=[], dist_reduce_fx="cat") - self.add_state("target", default=[], dist_reduce_fx="cat") - - rank_zero_warn( - "Metric `PrecisionRecallCurve` will save all targets and predictions in buffer." - " For large datasets this may lead to large memory footprint." - ) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - preds, target, num_classes, pos_label = _precision_recall_curve_update( - preds, target, self.num_classes, self.pos_label - ) - self.preds.append(preds) - self.target.append(target) - self.num_classes = num_classes - self.pos_label = pos_label - - def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - """Compute the precision-recall curve. - - Returns: - 3-element tuple containing - - precision: - tensor where element i is the precision of predictions with - score >= thresholds[i] and the last element is 1. - If multiclass, this is a list of such tensors, one for each class. - recall: - tensor where element i is the recall of predictions with - score >= thresholds[i] and the last element is 0. - If multiclass, this is a list of such tensors, one for each class. - thresholds: - Thresholds used for computing precision/recall scores - """ - preds = dim_zero_cat(self.preds) - target = dim_zero_cat(self.target) - if not self.num_classes: - raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") - return _precision_recall_curve_compute(preds, target, self.num_classes, self.pos_label) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py deleted file mode 100644 index a01a5b94d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/roc.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.roc import _roc_compute, _roc_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn - - -class ROC(Metric): - """Computes the Receiver Operating Characteristic (ROC). Works for both binary, multiclass and multilabel - problems. In the case of multiclass, the values will be calculated based on a one-vs-the-rest approach. - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass/multilabel) tensor - with probabilities, where C is the number of classes/labels. - - - ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels - - Args: - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Example (binary case): - >>> from paddlemetrics import ROC - >>> pred = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> roc = ROC(pos_label=1) - >>> fpr, tpr, thresholds = roc(pred, target) - >>> fpr - tensor([0., 0., 0., 0., 1.]) - >>> tpr - tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) - >>> thresholds - tensor([4, 3, 2, 1, 0]) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05], - ... [0.05, 0.05, 0.05, 0.75]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> roc = ROC(num_classes=4) - >>> fpr, tpr, thresholds = roc(pred, target) - >>> fpr - [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] - >>> tpr - [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] - >>> thresholds # doctest: +NORMALIZE_WHITESPACE - [tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500])] - - Example (multilabel case): - >>> pred = B.tensor([[0.8191, 0.3680, 0.1138], - ... [0.3584, 0.7576, 0.1183], - ... [0.2286, 0.3468, 0.1338], - ... [0.8603, 0.0745, 0.1837]]) - >>> target = B.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) - >>> roc = ROC(num_classes=3, pos_label=1) - >>> fpr, tpr, thresholds = roc(pred, target) - >>> fpr # doctest: +NORMALIZE_WHITESPACE - [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]), - tensor([0., 0., 0., 1., 1.]), - tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])] - >>> tpr # doctest: +NORMALIZE_WHITESPACE - [tensor([0., 0., 1., 1., 1.]), - tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), - tensor([0., 1., 1., 1., 1.])] - >>> thresholds # doctest: +NORMALIZE_WHITESPACE - [tensor([1.8603, 0.8603, 0.8191, 0.3584, 0.2286]), - tensor([1.7576, 0.7576, 0.3680, 0.3468, 0.0745]), - tensor([1.1837, 0.1837, 0.1338, 0.1183, 0.1138])] - """ - - is_differentiable = False - preds: List[Tensor] - target: List[Tensor] - - def __init__( - self, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.num_classes = num_classes - self.pos_label = pos_label - - self.add_state("preds", default=[], dist_reduce_fx=None) - self.add_state("target", default=[], dist_reduce_fx=None) - - rank_zero_warn( - "Metric `ROC` will save all targets and predictions in buffer." - " For large datasets this may lead to large memory footprint." - ) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - preds, target, num_classes, pos_label = _roc_update(preds, target, self.num_classes, self.pos_label) - self.preds.append(preds) - self.target.append(target) - self.num_classes = num_classes - self.pos_label = pos_label - - def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - """Compute the receiver operating characteristic. - - Returns: - 3-element tuple containing - - fpr: - tensor with false positive rates. - If multiclass, this is a list of such tensors, one for each class. - tpr: - tensor with true positive rates. - If multiclass, this is a list of such tensors, one for each class. - thresholds: - thresholds used for computing false- and true postive rates - """ - preds = B.cat(self.preds, dim=0) - target = B.cat(self.target, dim=0) - if not self.num_classes: - raise ValueError(f"`num_classes` bas to be positive number, but got {self.num_classes}") - return _roc_compute(preds, target, self.num_classes, self.pos_label) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py deleted file mode 100644 index 0ad44268a..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/specificity.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.classification.stat_scores import StatScores -from paddlemetrics.functional.classification.specificity import _specificity_compute - - -class Specificity(StatScores): - r""" - Computes `Specificity`_: - - .. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}} - - Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and - false positives respecitively. With the use of ``top_k`` parameter, this metric can - generalize to Specificity@K. - - The reduction method (how the specificity scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - threshold: - Threshold probability value for transforming probability predictions to binary - (0,1) predictions, in the case of binary or multi-label inputs. - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tn + fp``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - top_k: - Number of highest probability entries for each sample to convert to 1s - relevant - only for inputs with probability predictions. If this parameter is set for multi-label - inputs, it will take precedence over ``threshold``. For (multi-dim) multi-class inputs, - this parameter defaults to 1. - - Should be left unset (``None``) for inputs with label predictions. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather. - - Raises: - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. - - Example: - >>> from paddlemetrics import Specificity - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> specificity = Specificity(average='macro', num_classes=3) - >>> specificity(preds, target) - tensor(0.6111) - >>> specificity = Specificity(average='micro') - >>> specificity(preds, target) - tensor(0.6250) - - """ - is_differentiable = False - - def __init__( - self, - num_classes: Optional[int] = None, - threshold: float = 0.5, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - super().__init__( - reduce="macro" if average in ["weighted", "none", None] else average, - mdmc_reduce=mdmc_average, - threshold=threshold, - top_k=top_k, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.average = average - - def compute(self) -> Tensor: - """Computes the specificity score based on inputs passed in to ``update`` previously. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - """ - tp, fp, tn, fn = self._get_final_stats() - return _specificity_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py b/EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py deleted file mode 100644 index ec099c867..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/classification/stat_scores.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.stat_scores import _stat_scores_compute, _stat_scores_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod - - -class StatScores(Metric): - r"""Computes the number of true positives, false positives, true negatives, false negatives. - Related to `Type I and Type II errors`_ - and the `confusion matrix`_. - - The reduction method (how the statistics are aggregated) is controlled by the - ``reduce`` parameter, and additionally by the ``mdmc_reduce`` parameter in the - multi-dimensional multi-class case. - - Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - - reduce: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Counts the statistics by summing over all [sample, class] - combinations (globally). Each statistic is represented by a single integer. - - ``'macro'``: Counts the statistics for each class separately (over all samples). - Each statistic is represented by a ``(C,)`` tensor. Requires ``num_classes`` - to be set. - - ``'samples'``: Counts the statistics for each sample separately (over all classes). - Each statistic is represented by a ``(N, )`` 1d tensor. - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_reduce``. - - num_classes: - Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. - - ignore_index: - Specify a class (label) to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and - ``reduce='macro'``, the class statistics for the ignored class will all be returned - as ``-1``. - - mdmc_reduce: - Defines how the multi-dimensional multi-class inputs are handeled. Should be - one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class (see :ref:`references/modules:input types` for the definition of input types). - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then the outputs are concatenated together. In each - sample the extra axes ``...`` are flattened to become the sub-sample axis, and - statistics for each sample are computed by treating the sub-sample axis as the - ``N`` axis for that sample. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs are - flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``reduce`` parameter applies as usual. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather. - - Raises: - ValueError: - If ``reduce`` is none of ``"micro"``, ``"macro"`` or ``"samples"``. - ValueError: - If ``mdmc_reduce`` is none of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``reduce`` is set to ``"macro"`` and ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``0`` <= ``ignore_index`` < ``num_classes``. - - Example: - >>> from paddlemetrics.classification import StatScores - >>> preds = B.tensor([1, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> stat_scores = StatScores(reduce='macro', num_classes=3) - >>> stat_scores(preds, target) - tensor([[0, 1, 2, 1, 1], - [1, 1, 1, 1, 2], - [1, 0, 3, 0, 1]]) - >>> stat_scores = StatScores(reduce='micro') - >>> stat_scores(preds, target) - tensor([2, 2, 6, 2, 4]) - - """ - is_differentiable = False - # TODO: canot be used because if scripting - # tp: Union[Tensor, List[Tensor]] - # fp: Union[Tensor, List[Tensor]] - # tn: Union[Tensor, List[Tensor]] - # fn: Union[Tensor, List[Tensor]] - - def __init__( - self, - threshold: float = 0.5, - top_k: Optional[int] = None, - reduce: str = "micro", - num_classes: Optional[int] = None, - ignore_index: Optional[int] = None, - mdmc_reduce: Optional[str] = None, - multiclass: Optional[bool] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.reduce = reduce - self.mdmc_reduce = mdmc_reduce - self.num_classes = num_classes - self.threshold = threshold - self.multiclass = multiclass - self.ignore_index = ignore_index - self.top_k = top_k - - if reduce not in ["micro", "macro", "samples"]: - raise ValueError(f"The `reduce` {reduce} is not valid.") - - if mdmc_reduce not in [None, "samplewise", "global"]: - raise ValueError(f"The `mdmc_reduce` {mdmc_reduce} is not valid.") - - if reduce == "macro" and (not num_classes or num_classes < 1): - raise ValueError("When you set `reduce` as 'macro', you have to provide the number of classes.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - default: Callable = lambda: [] - reduce_fn: Optional[str] = None - if mdmc_reduce != "samplewise" and reduce != "samples": - if reduce == "micro": - zeros_shape = [] - elif reduce == "macro": - zeros_shape = [num_classes] - else: - raise ValueError(f'Wrong reduce="{reduce}"') - default = lambda: B.zeros(zeros_shape, dtype=B.long) - reduce_fn = "sum" - - for s in ("tp", "fp", "tn", "fn"): - self.add_state(s, default=default(), dist_reduce_fx=reduce_fn) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. See - :ref:`references/modules:input types` for more information on input - types. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth values - """ - - tp, fp, tn, fn = _stat_scores_update( - preds, - target, - reduce=self.reduce, - mdmc_reduce=self.mdmc_reduce, - threshold=self.threshold, - num_classes=self.num_classes, - top_k=self.top_k, - multiclass=self.multiclass, - ignore_index=self.ignore_index, - ) - - # Update states - if self.reduce != AverageMethod.SAMPLES and self.mdmc_reduce != MDMCAverageMethod.SAMPLEWISE: - self.tp += tp - self.fp += fp - self.tn += tn - self.fn += fn - else: - self.tp.append(tp) - self.fp.append(fp) - self.tn.append(tn) - self.fn.append(fn) - - def _get_final_stats(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Performs concatenation on the stat scores if neccesary, before passing them to a compute function.""" - tp = B.cat(self.tp) if isinstance(self.tp, list) else self.tp - fp = B.cat(self.fp) if isinstance(self.fp, list) else self.fp - tn = B.cat(self.tn) if isinstance(self.tn, list) else self.tn - fn = B.cat(self.fn) if isinstance(self.fn, list) else self.fn - return tp, fp, tn, fn - - def compute(self) -> Tensor: - """Computes the stat scores based on inputs passed in to ``update`` previously. - - Return: - The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds - to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The - shape depends on the ``reduce`` and ``mdmc_reduce`` (in case of multi-dimensional - multi-class data) parameters: - - - If the data is not multi-dimensional multi-class, then - - - If ``reduce='micro'``, the shape will be ``(5, )`` - - If ``reduce='macro'``, the shape will be ``(C, 5)``, - where ``C`` stands for the number of classes - - If ``reduce='samples'``, the shape will be ``(N, 5)``, where ``N`` stands for - the number of samples - - - If the data is multi-dimensional multi-class and ``mdmc_reduce='global'``, then - - - If ``reduce='micro'``, the shape will be ``(5, )`` - - If ``reduce='macro'``, the shape will be ``(C, 5)`` - - If ``reduce='samples'``, the shape will be ``(N*X, 5)``, where ``X`` stands for - the product of sizes of all "extra" dimensions of the data (i.e. all dimensions - except for ``C`` and ``N``) - - - If the data is multi-dimensional multi-class and ``mdmc_reduce='samplewise'``, then - - - If ``reduce='micro'``, the shape will be ``(N, 5)`` - - If ``reduce='macro'``, the shape will be ``(N, C, 5)`` - - If ``reduce='samples'``, the shape will be ``(N, X, 5)`` - """ - tp, fp, tn, fn = self._get_final_stats() - return _stat_scores_compute(tp, fp, tn, fn) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/collections.py b/EE/paddlemetric/src/build/lib/paddlemetrics/collections.py deleted file mode 100644 index 3b03856e7..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/collections.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict -from copy import deepcopy -from typing import Any, Dict, Hashable, Iterable, Optional, Sequence, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import nn - -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn - - -class MetricCollection(nn.ModuleDict): - """MetricCollection class can be used to chain metrics that have the same call pattern into one single class. - - Args: - metrics: One of the following - - * list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name - as key for output dict. Therefore, two metrics of the same class cannot be chained this way. - - * arguments: similar to passing in as a list, metrics passed in as arguments will use their metric - class name as key for the output dict. - - * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict. - Use this format if you want to chain together multiple of the same metric with different parameters. - Note that the keys in the output dict will be sorted alphabetically. - - prefix: a string to append in front of the keys of the output dict - - postfix: a string to append after the keys of the output dict - - Raises: - ValueError: - If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``. - ValueError: - If two elements in ``metrics`` have the same ``name``. - ValueError: - If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``. - ValueError: - If ``metrics`` is ``dict`` and additional_metrics are passed in. - ValueError: - If ``prefix`` is set and it is not a string. - ValueError: - If ``postfix`` is set and it is not a string. - - Example (input as list): - >>> import torchapi as B - >>> from pprint import pprint - >>> from paddlemetrics import MetricCollection, Accuracy, Precision, Recall - >>> target = B.tensor([0, 2, 0, 2, 0, 1, 0, 2]) - >>> preds = B.tensor([2, 1, 2, 0, 1, 2, 2, 2]) - >>> metrics = MetricCollection([Accuracy(), - ... Precision(num_classes=3, average='macro'), - ... Recall(num_classes=3, average='macro')]) - >>> metrics(preds, target) - {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)} - - Example (input as arguments): - >>> metrics = MetricCollection(Accuracy(), Precision(num_classes=3, average='macro'), - ... Recall(num_classes=3, average='macro')) - >>> metrics(preds, target) - {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)} - - Example (input as dict): - >>> metrics = MetricCollection({'micro_recall': Recall(num_classes=3, average='micro'), - ... 'macro_recall': Recall(num_classes=3, average='macro')}) - >>> same_metric = metrics.clone() - >>> pprint(metrics(preds, target)) - {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} - >>> pprint(same_metric(preds, target)) - {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} - >>> metrics.persistent() - """ - - def __init__( - self, - metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], - *additional_metrics: Metric, - prefix: Optional[str] = None, - postfix: Optional[str] = None, - ) -> None: - super().__init__() - - self._modules = self._sub_layers - - self.add_metrics(metrics, *additional_metrics) - - self.prefix = self._check_arg(prefix, "prefix") - self.postfix = self._check_arg(postfix, "postfix") - - def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]: - """Iteratively call forward for each metric. - - Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) - will be filtered based on the signature of the individual metric. - """ - return {k: m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items()} - - def update(self, *args: Any, **kwargs: Any) -> None: - """Iteratively call update for each metric. - - Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) - will be filtered based on the signature of the individual metric. - """ - for _, m in self.items(keep_base=True): - m_kwargs = m._filter_kwargs(**kwargs) - m.update(*args, **m_kwargs) - - def compute(self) -> Dict[str, Any]: - return {k: m.compute() for k, m in self.items()} - - def reset(self) -> None: - """Iteratively call reset for each metric.""" - for _, m in self.items(keep_base=True): - m.reset() - - def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> "MetricCollection": - """Make a copy of the metric collection - Args: - prefix: a string to append in front of the metric keys - postfix: a string to append after the keys of the output dict - - """ - mc = deepcopy(self) - if prefix: - mc.prefix = self._check_arg(prefix, "prefix") - if postfix: - mc.postfix = self._check_arg(postfix, "postfix") - return mc - - def persistent(self, mode: bool = True) -> None: - """Method for post-init to change if metric states should be saved to its state_dict.""" - for _, m in self.items(keep_base=True): - m.persistent(mode) - - def add_metrics( - self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric - ) -> None: - """Add new metrics to Metric Collection.""" - if isinstance(metrics, Metric): - # set compatible with original type expectations - metrics = [metrics] - if isinstance(metrics, Sequence): - # prepare for optional additions - metrics = list(metrics) - remain: list = [] - for m in additional_metrics: - (metrics if isinstance(m, Metric) else remain).append(m) - - if remain: - rank_zero_warn( - f"You have passes extra arguments {remain} which are not `Metric` so they will be ignored." - ) - elif additional_metrics: - raise ValueError( - f"You have passes extra arguments {additional_metrics} which are not compatible" - f" with first passed dictionary {metrics} so they will be ignored." - ) - - if isinstance(metrics, dict): - # Check all values are metrics - # Make sure that metrics are added in deterministic order - for name in sorted(metrics.keys()): - metric = metrics[name] - if not isinstance(metric, Metric): - raise ValueError( - f"Value {metric} belonging to key {name} is not an instance of `pl.metrics.Metric`" - ) - self[name] = metric - elif isinstance(metrics, Sequence): - for metric in metrics: - if not isinstance(metric, Metric): - raise ValueError(f"Input {metric} to `MetricCollection` is not a instance of `pl.metrics.Metric`") - name = metric.__class__.__name__ - if name in self: - raise ValueError(f"Encountered two metrics both named {name}") - self[name] = metric - else: - raise ValueError("Unknown input to MetricCollection.") - - def _set_name(self, base: str) -> str: - name = base if self.prefix is None else self.prefix + base - name = name if self.postfix is None else name + self.postfix - return name - - def _to_renamed_ordered_dict(self) -> OrderedDict: - od = OrderedDict() - for k, v in self._modules.items(): - od[self._set_name(k)] = v - return od - - def keys(self, keep_base: bool = False) -> Iterable[Hashable]: - r"""Return an iterable of the ModuleDict key. - Args: - keep_base: Whether to add prefix/postfix on the items collection. - """ - if keep_base: - return self._modules.keys() - return self._to_renamed_ordered_dict().keys() - - def items(self, keep_base: bool = False) -> Iterable[Tuple[str, nn.Module]]: - r"""Return an iterable of the ModuleDict key/value pairs. - Args: - keep_base: Whether to add prefix/postfix on the items collection. - """ - if keep_base: - return self._modules.items() - return self._to_renamed_ordered_dict().items() - - @staticmethod - def _check_arg(arg: Optional[str], name: str) -> Optional[str]: - if arg is None or isinstance(arg, str): - return arg - raise ValueError(f"Expected input `{name}` to be a string, but got {type(arg)}") - - def __repr__(self) -> str: - repr_str = super().__repr__()[:-2] - if self.prefix: - repr_str += f",\n prefix={self.prefix}{',' if self.postfix else ''}" - if self.postfix: - repr_str += f"{',' if not self.prefix else ''}\n postfix={self.postfix}" - return repr_str + "\n)" - - def to(self, device): - pass \ No newline at end of file diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py deleted file mode 100644 index 365d93c97..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/__init__.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.functional.audio.pesq import pesq -from paddlemetrics.functional.audio.pit import pit, pit_permutate -from paddlemetrics.functional.audio.si_sdr import si_sdr -from paddlemetrics.functional.audio.si_snr import si_snr -from paddlemetrics.functional.audio.snr import snr -from paddlemetrics.functional.audio.stoi import stoi -from paddlemetrics.functional.classification.accuracy import accuracy -from paddlemetrics.functional.classification.auc import auc -from paddlemetrics.functional.classification.auroc import auroc -from paddlemetrics.functional.classification.average_precision import average_precision -from paddlemetrics.functional.classification.calibration_error import calibration_error -from paddlemetrics.functional.classification.cohen_kappa import cohen_kappa -from paddlemetrics.functional.classification.confusion_matrix import confusion_matrix -from paddlemetrics.functional.classification.dice import dice_score -from paddlemetrics.functional.classification.f_beta import f1, fbeta -from paddlemetrics.functional.classification.hamming_distance import hamming_distance -from paddlemetrics.functional.classification.hinge import hinge -from paddlemetrics.functional.classification.iou import iou -from paddlemetrics.functional.classification.kl_divergence import kl_divergence -from paddlemetrics.functional.classification.matthews_corrcoef import matthews_corrcoef -from paddlemetrics.functional.classification.precision_recall import precision, precision_recall, recall -from paddlemetrics.functional.classification.precision_recall_curve import precision_recall_curve -from paddlemetrics.functional.classification.roc import roc -from paddlemetrics.functional.classification.specificity import specificity -from paddlemetrics.functional.classification.stat_scores import stat_scores -from paddlemetrics.functional.image.gradients import image_gradients -from paddlemetrics.functional.image.psnr import psnr -from paddlemetrics.functional.image.ssim import ssim -from paddlemetrics.functional.pairwise.cosine import pairwise_cosine_similarity -from paddlemetrics.functional.pairwise.euclidean import pairwise_euclidean_distance -from paddlemetrics.functional.pairwise.linear import pairwise_linear_similarity -from paddlemetrics.functional.pairwise.manhatten import pairwise_manhatten_distance -from paddlemetrics.functional.regression.cosine_similarity import cosine_similarity -from paddlemetrics.functional.regression.explained_variance import explained_variance -from paddlemetrics.functional.regression.mean_absolute_error import mean_absolute_error -from paddlemetrics.functional.regression.mean_absolute_percentage_error import mean_absolute_percentage_error -from paddlemetrics.functional.regression.mean_squared_error import mean_squared_error -from paddlemetrics.functional.regression.mean_squared_log_error import mean_squared_log_error -from paddlemetrics.functional.regression.pearson import pearson_corrcoef -from paddlemetrics.functional.regression.r2 import r2_score -from paddlemetrics.functional.regression.spearman import spearman_corrcoef -from paddlemetrics.functional.regression.symmetric_mean_absolute_percentage_error import ( - symmetric_mean_absolute_percentage_error, -) -from paddlemetrics.functional.regression.tweedie_deviance import tweedie_deviance_score -from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision -from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out -from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate -from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg -from paddlemetrics.functional.retrieval.precision import retrieval_precision -from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision -from paddlemetrics.functional.retrieval.recall import retrieval_recall -from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank -from paddlemetrics.functional.self_supervised import embedding_similarity -#from paddlemetrics.functional.text.bert import bert_score -from paddlemetrics.functional.text.bleu import bleu_score -from paddlemetrics.functional.text.rouge import rouge_score -from paddlemetrics.functional.text.sacre_bleu import sacre_bleu_score -from paddlemetrics.functional.text.wer import wer - -__all__ = [ - "accuracy", - "auc", - "auroc", - "average_precision", -# "bert_score", - "bleu_score", - "calibration_error", - "cohen_kappa", - "confusion_matrix", - "cosine_similarity", - "tweedie_deviance_score", - "dice_score", - "embedding_similarity", - "explained_variance", - "f1", - "fbeta", - "hamming_distance", - "hinge", - "image_gradients", - "iou", - "kl_divergence", - "kldivergence", - "matthews_corrcoef", - "mean_absolute_error", - "mean_absolute_percentage_error", - "mean_squared_error", - "mean_squared_log_error", - "pairwise_cosine_similarity", - "pairwise_euclidean_distance", - "pairwise_linear_similarity", - "pairwise_manhatten_distance", - "pearson_corrcoef", - "pesq", - "pit", - "pit_permutate", - "precision", - "precision_recall", - "precision_recall_curve", - "psnr", - "r2_score", - "r2score", - "recall", - "retrieval_average_precision", - "retrieval_fall_out", - "retrieval_hit_rate", - "retrieval_normalized_dcg", - "retrieval_precision", - "retrieval_r_precision", - "retrieval_recall", - "retrieval_reciprocal_rank", - "roc", - "rouge_score", - "sacre_bleu_score", - "si_sdr", - "si_snr", - "snr", - "spearman_corrcoef", - "specificity", - "ssim", - "stat_scores", - "stoi", - "symmetric_mean_absolute_percentage_error", - "wer", -] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py deleted file mode 100644 index a7e7d89c0..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.functional.audio.pesq import pesq # noqa: F401 -from paddlemetrics.functional.audio.pit import pit, pit_permutate # noqa: F401 -from paddlemetrics.functional.audio.si_sdr import si_sdr # noqa: F401 -from paddlemetrics.functional.audio.si_snr import si_snr # noqa: F401 -from paddlemetrics.functional.audio.snr import snr # noqa: F401 -from paddlemetrics.functional.audio.stoi import stoi # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py deleted file mode 100644 index ab81723da..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pesq.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import numpy as np - -from paddlemetrics.utilities.imports import _PESQ_AVAILABLE - -if _PESQ_AVAILABLE: - import pesq as pesq_backend -else: - pesq_backend = None -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def pesq(preds: Tensor, target: Tensor, fs: int, mode: str, keep_same_device: bool = False) -> Tensor: - r"""PESQ (Perceptual Evaluation of Speech Quality) - - This is a wrapper for the ``pesq`` package [1]. Note that input will be moved to `cpu` - to perform the metric calculation. - - .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install - paddlemetrics[audio]`` or ``pip install pesq`` - - Args: - preds: - shape ``[...,time]`` - target: - shape ``[...,time]`` - fs: - sampling frequency, should be 16000 or 8000 (Hz) - mode: - 'wb' (wide-band) or 'nb' (narrow-band) - keep_same_device: - whether to move the pesq value to the device of preds - - Returns: - pesq value of shape [...] - - Raises: - ValueError: - If ``peqs`` package is not installed - ValueError: - If ``fs`` is not either ``8000`` or ``16000`` - ValueError: - If ``mode`` is not either ``"wb"`` or ``"nb"`` - - Example: - >>> from paddlemetrics.functional.audio import pesq - >>> import torchapi as B - >>> g = B.manual_seed(1) - >>> preds = B.randn(8000) - >>> target = B.randn(8000) - >>> pesq(preds, target, 8000, 'nb') - tensor(2.2076) - >>> pesq(preds, target, 16000, 'wb') - tensor(1.7359) - - References: - [1] https://github.com/ludlows/python-pesq - """ - if not _PESQ_AVAILABLE: - raise ValueError( - "PESQ metric requires that pesq is installed." - "Either install as `pip install paddlemetrics[audio]` or `pip install pesq`" - ) - if fs not in (8000, 16000): - raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}") - if mode not in ("wb", "nb"): - raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}") - _check_same_shape(preds, target) - - if preds.ndim == 1: - pesq_val_np = pesq_backend.pesq(fs, target.detach().cpu().numpy(), preds.detach().cpu().numpy(), mode) - pesq_val = B.tensor(pesq_val_np) - else: - preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy() - target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy() - pesq_val_np = np.empty(shape=(preds_np.shape[0])) - for b in range(preds_np.shape[0]): - pesq_val_np[b] = pesq_backend.pesq(fs, target_np[b, :], preds_np[b, :], mode) - pesq_val = B.from_numpy(pesq_val_np) - pesq_val = pesq_val.reshape(preds.shape[:-1]) - - if keep_same_device: - pesq_val = pesq_val.to(preds.device) - - return pesq_val diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py deleted file mode 100644 index 3ca729a2d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/pit.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import warnings -from itertools import permutations -from typing import Any, Callable, Dict, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape -from paddlemetrics.utilities.imports import _SCIPY_AVAILABLE - -# _ps_dict: cache of permutations -# it's necessary to cache it, otherwise it will consume a large amount of time -_ps_dict: dict = {} # _ps_dict[str(spk_num)+str(device)] = permutations - - -def _find_best_perm_by_linear_sum_assignment( - metric_mtx: B.Tensor, - eval_func: Union[B.min, B.max], -) -> Tuple[Tensor, Tensor]: - """Solves the linear sum assignment problem using scipy, and returns the best metric values and the - corresponding permutations. - - Args: - metric_mtx: - the metric matrix, shape [batch_size, spk_num, spk_num] - eval_func: - the function to reduce the metric values of different the permutations - - Returns: - best_metric: - shape [batch] - best_perm: - shape [batch, spk] - """ - from scipy.optimize import linear_sum_assignment - - mmtx = metric_mtx.detach().cpu() - best_perm = B.tensor([linear_sum_assignment(pwm, eval_func == B.max)[1] for pwm in mmtx]) - best_perm = best_perm.to(metric_mtx.device) - best_metric = B.gather(metric_mtx, 2, best_perm[:, :, None]).mean([-1, -2]) - return best_metric, best_perm # shape [batch], shape [batch, spk] - - -def _find_best_perm_by_exhuastive_method( - metric_mtx: B.Tensor, - eval_func: Union[B.min, B.max], -) -> Tuple[Tensor, Tensor]: - """Solves the linear sum assignment problem using exhuastive method, i.e. exhuastively calculates the metric - values of all possible permutations, and returns the best metric values and the corresponding permutations. - - Args: - metric_mtx: - the metric matrix, shape [batch_size, spk_num, spk_num] - eval_func: - the function to reduce the metric values of different the permutations - - Returns: - best_metric: - shape [batch] - best_perm: - shape [batch, spk] - """ - # create/read/cache the permutations and its indexes - # reading from cache would be much faster than creating in CPU then moving to GPU - batch_size, spk_num = metric_mtx.shape[:2] - key = str(spk_num) + str(metric_mtx.device) - if key not in _ps_dict: - # ps: all the permutations, shape [spk_num, perm_num] - # ps: In i-th permutation, the predcition corresponds to the j-th target is ps[j,i] - ps = B.tensor(list(permutations(range(spk_num))), device=metric_mtx.device).T - _ps_dict[key] = ps - else: - ps = _ps_dict[key] # all the permutations, shape [spk_num, perm_num] - - # find the metric of each permutation - perm_num = ps.shape[-1] - # shape [batch_size, spk_num, perm_num] - bps = ps[None, ...].expand(batch_size, spk_num, perm_num) - # shape [batch_size, spk_num, perm_num] - metric_of_ps_details = B.gather(metric_mtx, 2, bps) - # shape [batch_size, perm_num] - metric_of_ps = metric_of_ps_details.mean(dim=1) - - # find the best metric and best permutation - best_metric, best_indexes = eval_func(metric_of_ps, dim=1) - best_indexes = best_indexes.detach() - best_perm = ps.T[best_indexes, :] - return best_metric, best_perm # shape [batch], shape [batch, spk] - - -def pit( - preds: B.Tensor, target: B.Tensor, metric_func: Callable, eval_func: str = "max", **kwargs: Dict[str, Any] -) -> Tuple[Tensor, Tensor]: - """Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method. - - [1] in speech separation field in order to calculate audio metrics in a permutation invariant way. - - Args: - preds: - shape [batch, spk, ...] - target: - shape [batch, spk, ...] - metric_func: - a metric function accept a batch of target and estimate, - i.e. metric_func(preds[:, i, ...], target[:, j, ...]), and returns a batch of metric tensors [batch] - eval_func: - the function to find the best permutation, can be 'min' or 'max', - i.e. the smaller the better or the larger the better. - kwargs: - additional args for metric_func - - Returns: - best_metric of shape [batch], - best_perm of shape [batch] - - Example: - >>> from paddlemetrics.functional.audio import si_sdr - >>> # [batch, spk, time] - >>> preds = B.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]]) - >>> target = B.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]]) - >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max') - >>> best_metric - tensor([-5.1091]) - >>> best_perm - tensor([[0, 1]]) - >>> pit_permutate(preds, best_perm) - tensor([[[-0.0579, 0.3560, -0.9604], - [-0.1719, 0.3205, 0.2951]]]) - - Reference: - [1] `Permutation Invariant Training of Deep Models`_ - """ - _check_same_shape(preds, target) - if eval_func not in ["max", "min"]: - raise ValueError(f'eval_func can only be "max" or "min" but got {eval_func}') - if target.ndim < 2: - raise ValueError(f"Inputs must be of shape [batch, spk, ...], got {target.shape} and {preds.shape} instead") - - # calculate the metric matrix - batch_size, spk_num = target.shape[0:2] - metric_mtx = None - for target_idx in range(spk_num): # we have spk_num speeches in target in each sample - for preds_idx in range(spk_num): # we have spk_num speeches in preds in each sample - if metric_mtx is not None: - metric_mtx[:, target_idx, preds_idx] = metric_func( - preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs - ) - else: - first_ele = metric_func(preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs) - metric_mtx = B.empty((batch_size, spk_num, spk_num), dtype=first_ele.dtype, device=first_ele.device) - metric_mtx[:, target_idx, preds_idx] = first_ele - - # find best - op = B.max if eval_func == "max" else B.min - if spk_num < 3 or not _SCIPY_AVAILABLE: - if spk_num >= 3 and not _SCIPY_AVAILABLE: - warnings.warn( - f"In pit metric for speaker-num {spk_num}>3, we recommend installing scipy for better performance" - ) - - best_metric, best_perm = _find_best_perm_by_exhuastive_method(metric_mtx, op) - else: - best_metric, best_perm = _find_best_perm_by_linear_sum_assignment(metric_mtx, op) - - return best_metric, best_perm - - -def pit_permutate(preds: Tensor, perm: Tensor) -> Tensor: - """permutate estimate according to perm. - - Args: - preds (Tensor): the estimates you want to permutate, shape [batch, spk, ...] - perm (Tensor): the permutation returned from pit, shape [batch, spk] - - Returns: - Tensor: the permutated version of estimate - - Example: - >>> from paddlemetrics.functional.audio import si_sdr - >>> # [batch, spk, time] - >>> preds = B.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]]) - >>> target = B.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]]) - >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max') - >>> best_metric - tensor([-5.1091]) - >>> best_perm - tensor([[0, 1]]) - >>> pit_permutate(preds, best_perm) - tensor([[[-0.0579, 0.3560, -0.9604], - [-0.1719, 0.3205, 0.2951]]]) - """ - preds_pmted = B.stack([B.index_select(pred, 0, p) for pred, p in zip(preds, perm)]) - return preds_pmted diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py deleted file mode 100644 index 66eb9e3ae..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_sdr.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def si_sdr(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: - """Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general - considered an overall measure of how good a source sound. - - Args: - preds: - shape ``[...,time]`` - target: - shape ``[...,time]`` - zero_mean: - If to zero mean target and preds or not - - Returns: - si-sdr value of shape [...] - - Example: - >>> from paddlemetrics.functional.audio import si_sdr - >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) - >>> si_sdr_val = si_sdr(preds, target) - >>> si_sdr_val - tensor(18.4030) - - References: - [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech - and Signal Processing (ICASSP) 2019. - """ - _check_same_shape(preds, target) - EPS = B.finfo(preds.dtype).eps - - if zero_mean: - target = target - B.mean(target, dim=-1, keepdim=True) - preds = preds - B.mean(preds, dim=-1, keepdim=True) - - alpha = (B.sum(preds * target, dim=-1, keepdim=True) + EPS) / ( - B.sum(target ** 2, dim=-1, keepdim=True) + EPS - ) - target_scaled = alpha * target - - noise = target_scaled - preds - - si_sdr_value = (B.sum(target_scaled ** 2, dim=-1) + EPS) / (B.sum(noise ** 2, dim=-1) + EPS) - si_sdr_value = 10 * B.log10(si_sdr_value) - - return si_sdr_value diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py deleted file mode 100644 index abddf039f..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/si_snr.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.audio.si_sdr import si_sdr - - -def si_snr(preds: Tensor, target: Tensor) -> Tensor: - """Scale-invariant signal-to-noise ratio (SI-SNR). - - Args: - preds: - shape ``[...,time]`` - target: - shape ``[...,time]`` - - Returns: - si-snr value of shape [...] - - Example: - >>> import torchapi as B - >>> from paddlemetrics.functional.audio import si_snr - >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) - >>> si_snr_val = si_snr(preds, target) - >>> si_snr_val - tensor(15.0918) - - References: - [1] Y. Luo and N. Mesgarani, "TaSNet: Time-Domain Audio Separation Network for Real-Time, Single-Channel Speech - Separation," 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp. - 696-700, doi: 10.1109/ICASSP.2018.8462116. - """ - - return si_sdr(target=target, preds=preds, zero_mean=True) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py deleted file mode 100644 index 8c54128ba..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/snr.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def snr(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: - r"""Signal-to-noise ratio (SNR_): - - .. math:: - \text{SNR} = \frac{P_{signal}}{P_{noise}} - - where :math:`P` denotes the power of each signal. The SNR metric compares the level - of the desired signal to the level of background noise. Therefore, a high value of - SNR means that the audio is clear. - - Args: - preds: - shape ``[...,time]`` - target: - shape ``[...,time]`` - zero_mean: - if to zero mean target and preds or not - - Returns: - snr value of shape [...] - - Example: - >>> from paddlemetrics.functional.audio import snr - >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) - >>> snr_val = snr(preds, target) - >>> snr_val - tensor(16.1805) - - References: - [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech - and Signal Processing (ICASSP) 2019. - - """ - _check_same_shape(preds, target) - EPS = B.finfo(preds.dtype).eps - - if zero_mean: - target = target - B.mean(target, dim=-1, keepdim=True) - preds = preds - B.mean(preds, dim=-1, keepdim=True) - - noise = target - preds - - snr_value = (B.sum(target ** 2, dim=-1) + EPS) / (B.sum(noise ** 2, dim=-1) + EPS) - snr_value = 10 * B.log10(snr_value) - - return snr_value diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py deleted file mode 100644 index 4c1f5806d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/audio/stoi.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import numpy as np -import paddleext.torchapi as B - -from paddlemetrics.utilities.imports import _PYSTOI_AVAILABLE - -if _PYSTOI_AVAILABLE: - from pystoi import stoi as stoi_backend -else: - stoi_backend = None -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def stoi(preds: Tensor, target: Tensor, fs: int, extended: bool = False, keep_same_device: bool = False) -> Tensor: - r"""STOI (Short Term Objective Intelligibility, see [2,3]), a wrapper for the pystoi package [1]. - Note that input will be moved to `cpu` to perform the metric calculation. - - Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due - to additive noise, single/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations. - The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good - alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are - interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, - on speech intelligibility. Description taken from [Cees Taal's website](http://www.ceestaal.nl/code/). - - .. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install - paddlemetrics[audio]`` or ``pip install pystoi`` - - Args: - preds: - shape ``[...,time]`` - target: - shape ``[...,time]`` - fs: - sampling frequency (Hz) - extended: - whether to use the extended STOI described in [4] - keep_same_device: - whether to move the stoi value to the device of preds - - Returns: - stoi value of shape [...] - - Raises: - ValueError: - If ``pystoi`` package is not installed - - Example: - >>> from paddlemetrics.functional.audio import stoi - >>> import torchapi as B - >>> g = B.manual_seed(1) - >>> preds = B.randn(8000) - >>> target = B.randn(8000) - >>> stoi(preds, target, 8000).float() - tensor(-0.0100) - - References: - [1] https://github.com/mpariente/pystoi - - [2] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'A Short-Time Objective Intelligibility Measure for - Time-Frequency Weighted Noisy Speech', ICASSP 2010, Texas, Dallas. - - [3] C.H.Taal, R.C.Hendriks, R.Heusdens, J.Jensen 'An Algorithm for Intelligibility Prediction of - Time-Frequency Weighted Noisy Speech', IEEE Transactions on Audio, Speech, and Language Processing, 2011. - - [4] J. Jensen and C. H. Taal, 'An Algorithm for Predicting the Intelligibility of Speech Masked by Modulated - Noise Maskers', IEEE Transactions on Audio, Speech and Language Processing, 2016. - - """ - if not _PYSTOI_AVAILABLE: - raise ValueError( - "STOI metric requires that pystoi is installed." - "Either install as `pip install paddlemetrics[audio]` or `pip install pystoi`" - ) - _check_same_shape(preds, target) - - if len(preds.shape) == 1: - stoi_val_np = stoi_backend(target.detach().cpu().numpy(), preds.detach().cpu().numpy(), fs, extended) - stoi_val = B.tensor(stoi_val_np) - else: - preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy() - target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy() - stoi_val_np = np.empty(shape=(preds_np.shape[0])) - for b in range(preds_np.shape[0]): - stoi_val_np[b] = stoi_backend(target_np[b, :], preds_np[b, :], fs, extended) - stoi_val = B.from_numpy(stoi_val_np) - stoi_val = stoi_val.reshape(preds.shape[:-1]) - - if keep_same_device: - stoi_val = stoi_val.to(preds.device) - - return stoi_val diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py deleted file mode 100644 index a03982c8c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.functional.classification.accuracy import accuracy # noqa: F401 -from paddlemetrics.functional.classification.auc import auc # noqa: F401 -from paddlemetrics.functional.classification.auroc import auroc # noqa: F401 -from paddlemetrics.functional.classification.average_precision import average_precision # noqa: F401 -from paddlemetrics.functional.classification.calibration_error import calibration_error # noqa: F401 -from paddlemetrics.functional.classification.cohen_kappa import cohen_kappa # noqa: F401 -from paddlemetrics.functional.classification.confusion_matrix import confusion_matrix # noqa: F401 -from paddlemetrics.functional.classification.dice import dice_score # noqa: F401 -from paddlemetrics.functional.classification.f_beta import f1, fbeta # noqa: F401 -from paddlemetrics.functional.classification.hamming_distance import hamming_distance # noqa: F401 -from paddlemetrics.functional.classification.hinge import hinge # noqa: F401 -from paddlemetrics.functional.classification.iou import iou # noqa: F401 -from paddlemetrics.functional.classification.kl_divergence import kl_divergence # noqa: F401 -from paddlemetrics.functional.classification.matthews_corrcoef import matthews_corrcoef # noqa: F401 -from paddlemetrics.functional.classification.precision_recall import precision, precision_recall, recall # noqa: F401 -from paddlemetrics.functional.classification.precision_recall_curve import precision_recall_curve # noqa: F401 -from paddlemetrics.functional.classification.roc import roc # noqa: F401 -from paddlemetrics.functional.classification.specificity import specificity # noqa: F401 -from paddlemetrics.functional.classification.stat_scores import stat_scores # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py deleted file mode 100644 index 44c89fa92..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/accuracy.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update -from paddlemetrics.utilities.checks import _check_classification_inputs, _input_format_classification, _input_squeeze -from paddlemetrics.utilities.enums import AverageMethod, DataType, MDMCAverageMethod - - -def _check_subset_validity(mode: DataType) -> bool: - """Checks input mode is valid.""" - return mode in (DataType.MULTILABEL, DataType.MULTIDIM_MULTICLASS) - - -def _mode( - preds: Tensor, - target: Tensor, - threshold: float, - top_k: Optional[int], - num_classes: Optional[int], - multiclass: Optional[bool], -) -> DataType: - """Finds the mode of the input tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the - case of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. - num_classes: Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. - - Example: - >>> target = B.tensor([0, 1, 2, 3]) - >>> preds = B.tensor([0, 2, 1, 3]) - >>> _mode(preds, target, 0.5, None, None, None) - - """ - - mode = _check_classification_inputs( - preds, target, threshold=threshold, top_k=top_k, num_classes=num_classes, multiclass=multiclass - ) - return mode - - -def _accuracy_update( - preds: Tensor, - target: Tensor, - reduce: Optional[str], - mdmc_reduce: Optional[str], - threshold: float, - num_classes: Optional[int], - top_k: Optional[int], - multiclass: Optional[bool], - ignore_index: Optional[int], - mode: DataType, -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Updates and returns stat scores (true positives, false positives, true negatives, false negatives) required - to compute accuracy. - - Args: - preds: Predicted tensor - target: Ground truth tensor - reduce: Defines the reduction that is applied. - mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handeled. - threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in - the case of binary or multi-label inputs. - num_classes: Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - top_k: Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. - multiclass: Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. - ignore_index: Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - mode: Mode of the input tensors - """ - - if mode == DataType.MULTILABEL and top_k: - raise ValueError("You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.") - - preds, target = _input_squeeze(preds, target) - tp, fp, tn, fn = _stat_scores_update( - preds, - target, - reduce=reduce, - mdmc_reduce=mdmc_reduce, - threshold=threshold, - num_classes=num_classes, - top_k=top_k, - multiclass=multiclass, - ignore_index=ignore_index, - ) - return tp, fp, tn, fn - - -def _accuracy_compute( - tp: Tensor, - fp: Tensor, - tn: Tensor, - fn: Tensor, - average: Optional[str], - mdmc_average: Optional[str], - mode: DataType, -) -> Tensor: - """Computes accuracy from stat scores: true positives, false positives, true negatives, false negatives. - - Args: - tp: True positives - fp: False positives - tn: True negatives - fn: False negatives - average: Defines the reduction that is applied. - mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). - mode: Mode of the input tensors - - Example: - >>> preds = B.tensor([0, 2, 1, 3]) - >>> target = B.tensor([0, 1, 2, 3]) - >>> threshold = 0.5 - >>> reduce = average = 'micro' - >>> mdmc_average = 'global' - >>> mode = _mode(preds, target, threshold, top_k=None, num_classes=None, multiclass=None) - >>> tp, fp, tn, fn = _accuracy_update( - ... preds, - ... target, - ... reduce, - ... mdmc_average, - ... threshold=0.5, - ... num_classes=None, - ... top_k=None, - ... multiclass=None, - ... ignore_index=None, - ... mode=mode) - >>> _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) - tensor(0.5000) - - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) - >>> top_k, threshold = 2, 0.5 - >>> reduce = average = 'micro' - >>> mdmc_average = 'global' - >>> mode = _mode(preds, target, threshold, top_k, num_classes=None, multiclass=None) - >>> tp, fp, tn, fn = _accuracy_update( - ... preds, - ... target, - ... reduce, - ... mdmc_average, - ... threshold, - ... num_classes=None, - ... top_k=top_k, - ... multiclass=None, - ... ignore_index=None, - ... mode=mode) - >>> _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) - tensor(0.6667) - """ - - simple_average = [AverageMethod.MICRO, AverageMethod.SAMPLES] - if (mode == DataType.BINARY and average in simple_average) or mode == DataType.MULTILABEL: - numerator = tp + tn - denominator = tp + tn + fp + fn - else: - numerator = tp - denominator = tp + fn - - if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - cond = tp + fp + fn == 0 - numerator = numerator[~cond] - denominator = denominator[~cond] - - if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - # a class is not present if there exists no TPs, no FPs, and no FNs - meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() - numerator[meaningless_indeces, ...] = -1 - denominator[meaningless_indeces, ...] = -1 - - return _reduce_stat_scores( - numerator=numerator, - denominator=denominator, - weights=None if average != AverageMethod.WEIGHTED else tp + fn, - average=average, - mdmc_average=mdmc_average, - ) - - -def _subset_accuracy_update( - preds: Tensor, - target: Tensor, - threshold: float, - top_k: Optional[int], -) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute subset accuracy. - - Args: - preds: Predicted tensor - target: Ground truth tensor - threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. - """ - - preds, target = _input_squeeze(preds, target) - preds, target, mode = _input_format_classification(preds, target, threshold=threshold, top_k=top_k) - - if mode == DataType.MULTILABEL and top_k: - raise ValueError("You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.") - - if mode == DataType.MULTILABEL: - correct = (preds == target).all(dim=1).sum() - total = tensor(target.shape[0], device=target.device) - elif mode == DataType.MULTICLASS: - correct = (preds * target).sum() - total = target.sum() - elif mode == DataType.MULTIDIM_MULTICLASS: - sample_correct = (preds * target).sum(dim=(1, 2)) - correct = (sample_correct == target.shape[2]).sum() - total = tensor(target.shape[0], device=target.device) - else: - correct, total = tensor(0), tensor(0) - - return correct, total - - -def _subset_accuracy_compute(correct: Tensor, total: Tensor) -> Tensor: - """Computes subset accuracy from number of correct observations and total number of observations. - - Args: - correct: Number of correct observations - total: Number of observations - """ - - return correct.float() / total - - -def accuracy( - preds: Tensor, - target: Tensor, - average: str = "micro", - mdmc_average: Optional[str] = "global", - threshold: float = 0.5, - top_k: Optional[int] = None, - subset_accuracy: bool = False, - num_classes: Optional[int] = None, - multiclass: Optional[bool] = None, - ignore_index: Optional[int] = None, -) -> Tensor: - r"""Computes `Accuracy`_ - - .. math:: - \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a - tensor of predictions. - - For multi-class and multi-dimensional multi-class data with probability or logits predictions, the - parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the - top-K highest probability or logits items are considered to find the correct label. - - For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" - accuracy by default, which counts all labels or sub-samples separately. This can be - changed to subset accuracy (which requires all labels or sub-samples in the sample to - be correctly predicted) by setting ``subset_accuracy=True``. - - Accepts all input types listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth labels - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - subset_accuracy: - Whether to compute subset accuracy for multi-label and multi-dimensional - multi-class inputs (has no effect for other input types). - - - For multi-label inputs, if the parameter is set to ``True``, then all labels for - each sample must be correctly predicted for the sample to count as correct. If it - is set to ``False``, then all labels are counted separately - this is equivalent to - flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). - - - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all - sub-sample (on the extra axis) must be correct for the sample to be counted as correct. - If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, - in the case of label predictions, to flattening the inputs beforehand (i.e. - ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter - still applies in both cases, if set. - - Raises: - ValueError: - If ``top_k`` parameter is set for ``multi-label`` inputs. - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. - ValueError: - If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``average`` is set but ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - ValueError: - If ``top_k`` is not an ``integer`` larger than ``0``. - - Example: - >>> import torchapi as B - >>> from paddlemetrics.functional import accuracy - >>> target = B.tensor([0, 1, 2, 3]) - >>> preds = B.tensor([0, 2, 1, 3]) - >>> accuracy(preds, target) - tensor(0.5000) - - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) - >>> accuracy(preds, target, top_k=2) - tensor(0.6667) - """ - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): - raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") - - allowed_mdmc_average = [None, "samplewise", "global"] - if mdmc_average not in allowed_mdmc_average: - raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): - raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") - - preds, target = _input_squeeze(preds, target) - mode = _mode(preds, target, threshold, top_k, num_classes, multiclass) - reduce = "macro" if average in ["weighted", "none", None] else average - - if subset_accuracy and _check_subset_validity(mode): - correct, total = _subset_accuracy_update(preds, target, threshold, top_k) - return _subset_accuracy_compute(correct, total) - tp, fp, tn, fn = _accuracy_update( - preds, target, reduce, mdmc_average, threshold, num_classes, top_k, multiclass, ignore_index, mode - ) - return _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py deleted file mode 100644 index 0e2fddb3d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auc.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - - -def _auc_update(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute area under the curve. Checks if the 2 input tenseor have - the same number of elements and if they are 1d. - - Args: - x: x-coordinates - y: y-coordinates - """ - - if x.ndim > 1: - x = x.squeeze() - - if y.ndim > 1: - y = y.squeeze() - - if x.ndim > 1 or y.ndim > 1: - raise ValueError( - f"Expected both `x` and `y` tensor to be 1d, but got tensors with dimension {x.ndim} and {y.ndim}" - ) - if x.numel() != y.numel(): - raise ValueError( - f"Expected the same number of elements in `x` and `y` tensor but received {x.numel()} and {y.numel()}" - ) - return x, y - - -def _auc_compute_without_check(x: Tensor, y: Tensor, direction: float) -> Tensor: - """Computes area under the curve using the trapezoidal rule. Assumes increasing or decreasing order of `x`. - - Args: - x: x-coordinates, must be either increasing or decreasing - y: y-coordinates - direction: 1 if increaing, -1 if decreasing - - Example: - >>> x = B.tensor([0, 1, 2, 3]) - >>> y = B.tensor([0, 1, 2, 2]) - >>> x, y = _auc_update(x, y) - >>> _auc_compute_without_check(x, y, direction=1.0) - tensor(4.) - """ - - with B.no_grad(): - auc_: Tensor = B.trapz(y, x) * direction - return auc_ - - -def _auc_compute(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor: - """Computes area under the curve using the trapezoidal rule. Checks for increasing or decreasing order of `x`. - - Args: - x: x-coordinates, must be either increasing or decreasing - y: y-coordinates - reorder: if True, will reorder the arrays to make it either increasing or decreasing - - Example: - >>> x = B.tensor([0, 1, 2, 3]) - >>> y = B.tensor([0, 1, 2, 2]) - >>> x, y = _auc_update(x, y) - >>> _auc_compute(x, y) - tensor(4.) - >>> _auc_compute(x, y, reorder=True) - tensor(4.) - """ - - with B.no_grad(): - if reorder: - # TODO: include stable=True arg when pytorch v1.9 is released - x, x_idx = B.sort(x) - y = y[x_idx] - - dx = x[1:] - x[:-1] - if (dx < 0).any(): - if (dx <= 0).all(): - direction = -1.0 - else: - raise ValueError( - "The `x` tensor is neither increasing or decreasing. Try setting the reorder argument to `True`." - ) - else: - direction = 1.0 - return _auc_compute_without_check(x, y, direction) - - -def auc(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor: - """Computes Area Under the Curve (AUC) using the trapezoidal rule. - - Args: - x: x-coordinates, must be either increasing or decreasing - y: y-coordinates - reorder: if True, will reorder the arrays to make it either increasing or decreasing - - Return: - Tensor containing AUC score (float) - - Raises: - ValueError: - If both ``x`` and ``y`` tensors are not ``1d``. - ValueError: - If both ``x`` and ``y`` don't have the same numnber of elements. - ValueError: - If ``x`` tesnsor is neither increasing or decreasing. - - Example: - >>> from paddlemetrics.functional import auc - >>> x = B.tensor([0, 1, 2, 3]) - >>> y = B.tensor([0, 1, 2, 2]) - >>> auc(x, y) - tensor(4.) - >>> auc(x, y, reorder=True) - tensor(4.) - """ - x, y = _auc_update(x, y) - return _auc_compute(x, y, reorder=reorder) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py deleted file mode 100644 index a393f20e5..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/auroc.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import warnings -from typing import Optional, Sequence, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.classification.auc import _auc_compute_without_check -from paddlemetrics.functional.classification.roc import roc -from paddlemetrics.utilities.checks import _input_format_classification -from paddlemetrics.utilities.enums import AverageMethod, DataType -from paddlemetrics.utilities.imports import _TORCH_LOWER_1_6 - - -def _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]: - """Updates and returns variables required to compute Area Under the Receiver Operating Characteristic Curve. - Validates the inputs and returns the mode of the inputs. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - # use _input_format_classification for validating the input and get the mode of data - _, _, mode = _input_format_classification(preds, target) - - if mode == "multi class multi dim": - n_classes = preds.shape[1] - preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) - target = target.flatten() - if mode == "multi-label" and preds.ndim > 2: - n_classes = preds.shape[1] - preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) - target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1) - - return preds, target, mode - - -def _auroc_compute( - preds: Tensor, - target: Tensor, - mode: DataType, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - max_fpr: Optional[float] = None, - sample_weights: Optional[Sequence] = None, -) -> Tensor: - """Computes Area Under the Receiver Operating Characteristic Curve. - - Args: - preds: predictions from model (logits or probabilities) - target: Ground truth labels - mode: 'multi class multi dim' or 'multi-label' or 'binary' - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. - Should be set to ``None`` for binary problems - average: Defines the reduction that is applied to the output: - max_fpr: If not ``None``, calculates standardized partial AUC over the - range [0, max_fpr]. Should be a float between 0 and 1. - sample_weights: sample weights for each data point - - Example: - >>> # binary case - >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) - >>> target = B.tensor([0, 0, 1, 1, 1]) - >>> preds, target, mode = _auroc_update(preds, target) - >>> _auroc_compute(preds, target, mode, pos_label=1) - tensor(0.5000) - - >>> # multiclass case - >>> preds = B.tensor([[0.90, 0.05, 0.05], - ... [0.05, 0.90, 0.05], - ... [0.05, 0.05, 0.90], - ... [0.85, 0.05, 0.10], - ... [0.10, 0.10, 0.80]]) - >>> target = B.tensor([0, 1, 1, 2, 2]) - >>> preds, target, mode = _auroc_update(preds, target) - >>> _auroc_compute(preds, target, mode, num_classes=3) - tensor(0.7778) - """ - - # binary mode override num_classes - if mode == DataType.BINARY: - num_classes = 1 - - # check max_fpr parameter - if max_fpr is not None: - if not isinstance(max_fpr, float) and 0 < max_fpr <= 1: - raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}") - - if _TORCH_LOWER_1_6: - raise RuntimeError( - "`max_fpr` argument requires `B.bucketize` which" " is not available below PyTorch version 1.6" - ) - - # max_fpr parameter is only support for binary - if mode != DataType.BINARY: - raise ValueError( - f"Partial AUC computation not available in" - f" multilabel/multiclass setting, 'max_fpr' must be" - f" set to `None`, received `{max_fpr}`." - ) - - # calculate fpr, tpr - if mode == DataType.MULTILABEL: - if average == AverageMethod.MICRO: - fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights) - elif num_classes: - # for multilabel we iteratively evaluate roc in a binary fashion - output = [ - roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights) - for i in range(num_classes) - ] - fpr = [o[0] for o in output] - tpr = [o[1] for o in output] - else: - raise ValueError("Detected input to be `multilabel` but you did not provide `num_classes` argument") - else: - if mode != DataType.BINARY: - if num_classes is None: - raise ValueError("Detected input to `multiclass` but you did not provide `num_classes` argument") - if average == AverageMethod.WEIGHTED and len(B.unique(target)) < num_classes: - # If one or more classes has 0 observations, we should exclude them, as its weight will be 0 - target_bool_mat = B.zeros((len(target), num_classes), dtype=bool) - target_bool_mat[B.arange(len(target)), target.long()] = 1 - class_observed = target_bool_mat.sum(axis=0) > 0 - for c in range(num_classes): - if not class_observed[c]: - warnings.warn(f"Class {c} had 0 observations, omitted from AUROC calculation", UserWarning) - preds = preds[:, class_observed] - target = target_bool_mat[:, class_observed] - target = B.where(target)[1] - num_classes = class_observed.sum() - if num_classes == 1: - raise ValueError("Found 1 non-empty class in `multiclass` AUROC calculation") - fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights) - - # calculate standard roc auc score - if max_fpr is None or max_fpr == 1: - if mode == DataType.MULTILABEL and average == AverageMethod.MICRO: - pass - elif num_classes != 1: - # calculate auc scores per class - auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)] - - # calculate average - if average == AverageMethod.NONE: - return tensor(auc_scores) - if average == AverageMethod.MACRO: - return B.mean(B.stack(auc_scores)) - if average == AverageMethod.WEIGHTED: - if mode == DataType.MULTILABEL: - support = B.sum(target, dim=0) - else: - support = B.bincount(target.flatten(), minlength=num_classes) - return B.sum(B.stack(auc_scores) * support / support.sum()) - - allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value) - raise ValueError( - f"Argument `average` expected to be one of the following:" f" {allowed_average} but got {average}" - ) - - return _auc_compute_without_check(fpr, tpr, 1.0) - - _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device - max_area: Tensor = tensor(max_fpr, device=_device) - # Add a single point at max_fpr and interpolate its tpr value - stop = B.bucketize(max_area, fpr, out_int32=True, right=True) - weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1]) - interp_tpr: Tensor = B.lerp(tpr[stop - 1], tpr[stop], weight) - tpr = B.cat([tpr[:stop], interp_tpr.view(1)]) - fpr = B.cat([fpr[:stop], max_area.view(1)]) - - # Compute partial AUC - partial_auc = _auc_compute_without_check(fpr, tpr, 1.0) - - # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal - min_area: Tensor = 0.5 * max_area ** 2 - return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) - - -def auroc( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - max_fpr: Optional[float] = None, - sample_weights: Optional[Sequence] = None, -) -> Tensor: - """Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) - - Args: - preds: predictions from model (logits or probabilities) - target: Ground truth labels - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - average: - - ``'micro'`` computes metric globally. Only works for multilabel problems - - ``'macro'`` computes metric for each class and uniformly averages them - - ``'weighted'`` computes metric for each class and does a weighted-average, - where each class is weighted by their support (accounts for class imbalance) - - ``None`` computes and returns the metric per class - max_fpr: - If not ``None``, calculates standardized partial AUC over the - range [0, max_fpr]. Should be a float between 0 and 1. - sample_weights: sample weights for each data point - - Raises: - ValueError: - If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``. - RuntimeError: - If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `B.bucketize` - which is not available below 1.6. - ValueError: - If ``max_fpr`` is not set to ``None`` and the mode is ``not binary`` - since partial AUC computation is not available in multilabel/multiclass. - ValueError: - If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``. - - Example (binary case): - >>> from paddlemetrics.functional import auroc - >>> preds = B.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) - >>> target = B.tensor([0, 0, 1, 1, 1]) - >>> auroc(preds, target, pos_label=1) - tensor(0.5000) - - Example (multiclass case): - >>> preds = B.tensor([[0.90, 0.05, 0.05], - ... [0.05, 0.90, 0.05], - ... [0.05, 0.05, 0.90], - ... [0.85, 0.05, 0.10], - ... [0.10, 0.10, 0.80]]) - >>> target = B.tensor([0, 1, 1, 2, 2]) - >>> auroc(preds, target, num_classes=3) - tensor(0.7778) - """ - preds, target, mode = _auroc_update(preds, target) - return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py deleted file mode 100644 index bc6118168..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/average_precision.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import warnings -from typing import List, Optional, Sequence, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.precision_recall_curve import ( - _precision_recall_curve_compute, - _precision_recall_curve_update, -) - - -def _average_precision_update( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", -) -> Tuple[Tensor, Tensor, int, Optional[int]]: - """Format the predictions and target based on the ``num_classes``, ``pos_label`` and ``average`` parameter - Args: - preds: predictions from model (logits or probabilities) - target: ground truth values - num_classes: integer with number of classes. - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - average: reduction method for multi-class or multi-label problems - """ - preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label) - if average == "micro": - if preds.ndim == target.ndim: - # Considering each element of the label indicator matrix as a label - preds = preds.flatten() - target = target.flatten() - num_classes = 1 - else: - raise ValueError("Cannot use `micro` average with multi-class input") - - return preds, target, num_classes, pos_label - - -def _average_precision_compute( - preds: Tensor, - target: Tensor, - num_classes: int, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - sample_weights: Optional[Sequence] = None, -) -> Union[List[Tensor], Tensor]: - """Computes the average precision score. - - Args: - preds: predictions from model (logits or probabilities) - target: ground truth values - num_classes: integer with number of classes. - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - average: reduction method for multi-class or multi-label problems - sample_weights: sample weights for each data point - - Example: - >>> # binary case - >>> preds = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> pos_label = 1 - >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label) - >>> _average_precision_compute(preds, target, num_classes, pos_label) - tensor(1.) - - >>> # multiclass case - >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> num_classes = 5 - >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes) - >>> _average_precision_compute(preds, target, num_classes, average=None) - [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] - """ - - # todo: `sample_weights` is unused - precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label) - if average == "weighted": - if preds.ndim == target.ndim and target.ndim > 1: - weights = target.sum(dim=0).float() - else: - weights = B.bincount(target, minlength=num_classes).float() - weights = weights / B.sum(weights) - else: - weights = None - return _average_precision_compute_with_precision_recall(precision, recall, num_classes, average, weights) - - -def _average_precision_compute_with_precision_recall( - precision: Tensor, - recall: Tensor, - num_classes: int, - average: Optional[str] = "macro", - weights: Optional[Tensor] = None, -) -> Union[List[Tensor], Tensor]: - """Computes the average precision score from precision and recall. - - Args: - precision: precision values - recall: recall values - num_classes: integer with number of classes. Not nessesary to provide - for binary problems. - average: reduction method for multi-class or multi-label problems - weights: weights to use when average='weighted' - - Example: - >>> # binary case - >>> preds = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> pos_label = 1 - >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label) - >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label) - >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None) - tensor(1.) - - >>> # multiclass case - >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> num_classes = 5 - >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes) - >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes) - >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None) - [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] - """ - - # Return the step function integral - # The following works because the last entry of precision is - # guaranteed to be 1, as returned by precision_recall_curve - if num_classes == 1: - return -B.sum((recall[1:] - recall[:-1]) * precision[:-1]) - - res = [] - for p, r in zip(precision, recall): - res.append(-B.sum((r[1:] - r[:-1]) * p[:-1])) - - # Reduce - if average in ("macro", "weighted"): - res = B.stack(res) - if B.isnan(res).any(): - warnings.warn( - "Average precision score for one or more classes was `nan`. Ignoring these classes in average", - UserWarning, - ) - if average == "macro": - return res[~B.isnan(res)].mean() - weights = B.ones_like(res) if weights is None else weights - return (res * weights)[~B.isnan(res)].sum() - if average is None: - return res - allowed_average = ("micro", "macro", "weighted", None) - raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}") - - -def average_precision( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - sample_weights: Optional[Sequence] = None, -) -> Union[List[Tensor], Tensor]: - """Computes the average precision score. - - Args: - preds: predictions from model (logits or probabilities) - target: ground truth values - num_classes: integer with number of classes. Not nessesary to provide - for binary problems. - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - average: - defines the reduction that is applied in the case of multiclass and multilabel input. - Should be one of the following: - - - ``'macro'`` [default]: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be - used with multiclass input. - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support. - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - sample_weights: sample weights for each data point - - Returns: - tensor with average precision. If multiclass will return list - of such tensors, one for each class - - Example (binary case): - >>> from paddlemetrics.functional import average_precision - >>> pred = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> average_precision(pred, target, pos_label=1) - tensor(1.) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> average_precision(pred, target, num_classes=5, average=None) - [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)] - """ - # fixme: `sample_weights` is unused - preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes, pos_label, average) - return _average_precision_compute(preds, target, num_classes, pos_label, average, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py deleted file mode 100644 index 132036417..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/calibration_error.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import FloatTensor, Tensor - -from paddlemetrics.utilities.checks import _input_format_classification -from paddlemetrics.utilities.enums import DataType - - -def _ce_compute( - confidences: FloatTensor, - accuracies: FloatTensor, - bin_boundaries: FloatTensor, - norm: str = "l1", - debias: bool = False, -) -> Tensor: - """Computes the calibration error given the provided bin boundaries and norm. - - Args: - confidences (FloatTensor): The confidence (i.e. predicted prob) of the top1 prediction. - accuracies (FloatTensor): 1.0 if the top-1 prediction was correct, 0.0 otherwise. - bin_boundaries (FloatTensor): Bin boundaries separating the linspace from 0 to 1. - norm (str, optional): Norm function to use when computing calibration error. Defaults to "l1". - debias (bool, optional): Apply debiasing to L2 norm computation as in - `Verified Uncertainty Calibration`_. Defaults to False. - - Raises: - ValueError: If an unsupported norm function is provided. - - Returns: - Tensor: Calibration error scalar. - """ - if norm not in {"l1", "l2", "max"}: - raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") - - conf_bin = B.zeros_like(bin_boundaries) - acc_bin = B.zeros_like(bin_boundaries) - prop_bin = B.zeros_like(bin_boundaries) - for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])): - # Calculated confidence and accuracy in each bin - in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) - prop_in_bin = in_bin.float().mean() - if prop_in_bin.item() > 0: - acc_bin[i] = accuracies[in_bin].float().mean() - conf_bin[i] = confidences[in_bin].mean() - prop_bin[i] = prop_in_bin - - if norm == "l1": - ce = B.sum(B.abs(acc_bin - conf_bin) * prop_bin) - elif norm == "max": - ce = B.max(B.abs(acc_bin - conf_bin)) - elif norm == "l2": - ce = B.sum(B.pow(acc_bin - conf_bin, 2) * prop_bin) - # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn. - if debias: - # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from - # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/ - debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1) - ce += B.sum(B.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin - ce = B.sqrt(ce) if ce > 0 else B.tensor(0) - return ce - - -def _ce_update(preds: Tensor, target: Tensor) -> Tuple[FloatTensor, FloatTensor]: - """Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their - correctness. - - Args: - preds (Tensor): Input softmaxed predictions. - target (Tensor): Labels. - - Raises: - ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass. - - Returns: - Tuple[FloatTensor, FloatTensor]: [description] - """ - _, _, mode = _input_format_classification(preds, target) - - if mode == DataType.BINARY: - confidences, accuracies = preds, target - elif mode == DataType.MULTICLASS: - confidences, predictions = preds.max(dim=1) - accuracies = predictions.eq(target) - elif mode == DataType.MULTIDIM_MULTICLASS: - # reshape tensors - # for preds, move the class dimension to the final axis and flatten the rest - confidences, predictions = B.transpose(preds, 1, -1).flatten(0, -2).max(dim=1) - # for targets, just flatten the target - accuracies = predictions.eq(target.flatten()) - else: - raise ValueError( - f"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}." - ) - # must be cast to float for ddp allgather to work - return confidences.float(), accuracies.float() - - -def calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = "l1") -> Tensor: - r""" - `Computes the Top-label Calibration Error`_ - - Three different norms are implemented, each corresponding to variations on the calibration error metric. - - L1 norm (Expected Calibration Error) - - .. math:: - \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\| - - Infinity norm (Maximum Calibration Error) - - .. math:: - \text{RMSCE} = \max_{i} (p_i - c_i) - - L2 norm (Root Mean Square Calibration Error) - - .. math:: - \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2 - - Where :math:`p_i` is the top-1 prediction accuracy in - bin i and :math:`c_i` is the average confidence of predictions in bin i. - - .. note: - L2-norm debiasing is not yet supported. - - Args: - preds (Tensor): Model output probabilities. - target (Tensor): Ground-truth target class labels. - n_bins (int, optional): Number of bins to use when computing t. Defaults to 15. - norm (str, optional): Norm used to compare empirical and expected probability bins. - Defaults to "l1", or Expected Calibration Error. - """ - if norm not in ("l1", "l2", "max"): - raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") - - if not isinstance(n_bins, int) or n_bins <= 0: - raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}") - - confidences, accuracies = _ce_update(preds, target) - - bin_boundaries = B.linspace(0, 1, n_bins + 1, dtype=B.float, device=preds.device) - - return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py deleted file mode 100644 index 2face7a5c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/cohen_kappa.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_compute, _confusion_matrix_update - -_cohen_kappa_update = _confusion_matrix_update - - -def _cohen_kappa_compute(confmat: Tensor, weights: Optional[str] = None) -> Tensor: - """Computes Cohen's kappa based on the weighting type. - - Args: - confmat: Confusion matrix without normalization - weights: Weighting type to calculate the score. Choose from - - ``None`` or ``'none'``: no weighting - - ``'linear'``: linear weighting - - ``'quadratic'``: quadratic weighting - - Example: - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> confmat = _cohen_kappa_update(preds, target, num_classes=2) - >>> _cohen_kappa_compute(confmat) - tensor(0.5000) - """ - - confmat = _confusion_matrix_compute(confmat) - confmat = confmat.float() if not confmat.is_floating_point() else confmat - n_classes = confmat.shape[0] - sum0 = confmat.sum(dim=0, keepdim=True) - sum1 = confmat.sum(dim=1, keepdim=True) - expected = sum1 @ sum0 / sum0.sum() # outer product - - if weights is None: - w_mat = B.ones_like(confmat).flatten() - w_mat[:: n_classes + 1] = 0 - w_mat = w_mat.reshape(n_classes, n_classes) - elif weights in ("linear", "quadratic"): - w_mat = B.zeros_like(confmat) - w_mat += B.arange(n_classes, dtype=w_mat.dtype, device=w_mat.device) - if weights == "linear": - w_mat = B.abs(w_mat - w_mat.T) - else: - w_mat = B.pow(w_mat - w_mat.T, 2.0) - else: - raise ValueError( - f"Received {weights} for argument ``weights`` but should be either" " None, 'linear' or 'quadratic'" - ) - - k = B.sum(w_mat * confmat) / B.sum(w_mat * expected) - return 1 - k - - -def cohen_kappa( - preds: Tensor, - target: Tensor, - num_classes: int, - weights: Optional[str] = None, - threshold: float = 0.5, -) -> Tensor: - r""" - Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. - It is defined as - - .. math:: - \kappa = (p_o - p_e) / (1 - p_e) - - where :math:`p_o` is the empirical probability of agreement and :math:`p_e` isg - the expected agreement when both annotators assign labels randomly. Note that - :math:`p_e` is estimated using a per-annotator empirical prior over the - class labels. - - Args: - preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or - ``(N, C, ...)`` where C is the number of classes, tensor with labels/probabilities - - target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels - - num_classes: Number of classes in the dataset. - - weights: Weighting type to calculate the score. Choose from - - ``None`` or ``'none'``: no weighting - - ``'linear'``: linear weighting - - ``'quadratic'``: quadratic weighting - - threshold: - Threshold value for binary or multi-label probabilities. default: 0.5 - - Example: - >>> from paddlemetrics.functional import cohen_kappa - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> cohen_kappa(preds, target, num_classes=2) - tensor(0.5000) - """ - confmat = _cohen_kappa_update(preds, target, num_classes, threshold) - return _cohen_kappa_compute(confmat, weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py deleted file mode 100644 index b4f3c12de..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/confusion_matrix.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.checks import _input_format_classification -from paddlemetrics.utilities.enums import DataType - - -def _confusion_matrix_update( - preds: Tensor, target: Tensor, num_classes: int, threshold: float = 0.5, multilabel: bool = False -) -> Tensor: - """Updates and returns confusion matrix (without any normalization) based on the mode of the input. - - Args: - preds: Predicted tensor - target: Ground truth tensor - threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the - case of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - multilabel: determines if data is multilabel or not. - """ - - preds, target, mode = _input_format_classification(preds, target, threshold) - if mode not in (DataType.BINARY, DataType.MULTILABEL): - preds = preds.argmax(dim=1) - target = target.argmax(dim=1) - if multilabel: - unique_mapping = ((2 * target + preds) + 4 * B.arange(num_classes, device=preds.device)).flatten() - minlength = 4 * num_classes - else: - unique_mapping = (target.view(-1) * num_classes + preds.view(-1)).to(B.long) - minlength = num_classes ** 2 - - bins = B.bincount(unique_mapping, minlength=minlength) - if multilabel: - confmat = bins.reshape(num_classes, 2, 2) - else: - confmat = bins.reshape(num_classes, num_classes) - return confmat - - -def _confusion_matrix_compute(confmat: Tensor, normalize: Optional[str] = None) -> Tensor: - """Computes confusion matrix based on the normalization mode. - - Args: - confmat: Confusion matrix without normalization - normalize: Normalization mode for confusion matrix. Choose from - - ``None`` or ``'none'``: no normalization (default) - - ``'true'``: normalization over the targets (most commonly used) - - ``'pred'``: normalization over the predictions - - ``'all'``: normalization over the whole matrix - - Example: - >>> # binary case - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> confmat = _confusion_matrix_update(preds, target, num_classes=2) - >>> _confusion_matrix_compute(confmat) - tensor([[2, 0], - [1, 1]]) - - >>> # multiclass case - >>> target = B.tensor([2, 1, 0, 0]) - >>> preds = B.tensor([2, 1, 0, 1]) - >>> confmat = _confusion_matrix_update(preds, target, num_classes=3) - >>> _confusion_matrix_compute(confmat) - tensor([[1, 1, 0], - [0, 1, 0], - [0, 0, 1]]) - - >>> # multilabel case - >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) - >>> confmat = _confusion_matrix_update(preds, target, num_classes=3, multilabel=True) - >>> _confusion_matrix_compute(confmat) # doctest: +NORMALIZE_WHITESPACE - tensor([[[1, 0], [0, 1]], - [[1, 0], [1, 0]], - [[0, 1], [0, 1]]]) - """ - - allowed_normalize = ("true", "pred", "all", "none", None) - if normalize not in allowed_normalize: - raise ValueError(f"Argument average needs to one of the following: {allowed_normalize}") - if normalize is not None and normalize != "none": - confmat = confmat.float() if not confmat.is_floating_point() else confmat - if normalize == "true": - confmat = confmat / confmat.sum(axis=1, keepdim=True) - elif normalize == "pred": - confmat = confmat / confmat.sum(axis=0, keepdim=True) - elif normalize == "all": - confmat = confmat / confmat.sum() - - nan_elements = confmat[B.isnan(confmat)].nelement() - if nan_elements != 0: - confmat[B.isnan(confmat)] = 0 - rank_zero_warn(f"{nan_elements} nan values found in confusion matrix have been replaced with zeros.") - return confmat - - -def confusion_matrix( - preds: Tensor, - target: Tensor, - num_classes: int, - normalize: Optional[str] = None, - threshold: float = 0.5, - multilabel: bool = False, -) -> Tensor: - r""" - Computes the `confusion matrix`_. Works with binary, - multiclass, and multilabel data. Accepts probabilities or logits from a model output or integer class - values in prediction. Works with multi-dimensional preds and target, but it should be noted that - additional dimensions will be flattened. - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label probabilities or logits. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - If working with multilabel data, setting the `is_multilabel` argument to `True` will make sure that a - `confusion matrix gets calculated per label`_. - - Args: - preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or - ``(N, C, ...)`` where C is the number of classes, tensor with labels/logits/probabilities - target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels - num_classes: Number of classes in the dataset. - normalize: Normalization mode for confusion matrix. Choose from - - - ``None`` or ``'none'``: no normalization (default) - - ``'true'``: normalization over the targets (most commonly used) - - ``'pred'``: normalization over the predictions - - ``'all'``: normalization over the whole matrix - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - - multilabel: - determines if data is multilabel or not. - - Example (binary data): - >>> from paddlemetrics import ConfusionMatrix - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> confmat = ConfusionMatrix(num_classes=2) - >>> confmat(preds, target) - tensor([[2., 0.], - [1., 1.]]) - - Example (multiclass data): - >>> target = B.tensor([2, 1, 0, 0]) - >>> preds = B.tensor([2, 1, 0, 1]) - >>> confmat = ConfusionMatrix(num_classes=3) - >>> confmat(preds, target) - tensor([[1., 1., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - Example (multilabel data): - >>> target = B.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = B.tensor([[0, 0, 1], [1, 0, 1]]) - >>> confmat = ConfusionMatrix(num_classes=3, multilabel=True) - >>> confmat(preds, target) # doctest: +NORMALIZE_WHITESPACE - tensor([[[1., 0.], [0., 1.]], - [[1., 0.], [1., 0.]], - [[0., 1.], [0., 1.]]]) - - """ - confmat = _confusion_matrix_update(preds, target, num_classes, threshold, multilabel) - return _confusion_matrix_compute(confmat, normalize) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py deleted file mode 100644 index 5f90fe02b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/dice.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.data import to_categorical -from paddlemetrics.utilities.distributed import reduce - - -def _stat_scores( - preds: Tensor, - target: Tensor, - class_index: int, - argmax_dim: int = 1, -) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: - """Calculates the number of true positive, false positive, true negative and false negative for a specific - class. - - Args: - preds: prediction tensor - target: target tensor - class_index: class to calculate over - argmax_dim: if pred is a tensor of probabilities, this indicates the - axis the argmax transformation will be applied over - - Return: - True Positive, False Positive, True Negative, False Negative, Support - - Example: - >>> x = B.tensor([1, 2, 3]) - >>> y = B.tensor([0, 2, 3]) - >>> tp, fp, tn, fn, sup = _stat_scores(x, y, class_index=1) - >>> tp, fp, tn, fn, sup - (tensor(0), tensor(1), tensor(2), tensor(0), tensor(0)) - """ - if preds.ndim == target.ndim + 1: - preds = to_categorical(preds, argmax_dim=argmax_dim) - - tp = ((preds == class_index) * (target == class_index)).to(B.long).sum() - fp = ((preds == class_index) * (target != class_index)).to(B.long).sum() - tn = ((preds != class_index) * (target != class_index)).to(B.long).sum() - fn = ((preds != class_index) * (target == class_index)).to(B.long).sum() - sup = (target == class_index).to(B.long).sum() - - return tp, fp, tn, fn, sup - - -def dice_score( - preds: Tensor, - target: Tensor, - bg: bool = False, - nan_score: float = 0.0, - no_fg_score: float = 0.0, - reduction: str = "elementwise_mean", -) -> Tensor: - """Compute dice score from prediction scores. - - Args: - preds: estimated probabilities - target: ground-truth labels - bg: whether to also compute dice for the background - nan_score: score to return, if a NaN occurs during computation - no_fg_score: score to return, if no foreground pixel was found in target - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - Return: - Tensor containing dice score - - Example: - >>> from paddlemetrics.functional import dice_score - >>> pred = B.tensor([[0.85, 0.05, 0.05, 0.05], - ... [0.05, 0.85, 0.05, 0.05], - ... [0.05, 0.05, 0.85, 0.05], - ... [0.05, 0.05, 0.05, 0.85]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> dice_score(pred, target) - tensor(0.3333) - """ - num_classes = preds.shape[1] - bg_inv = 1 - int(bg) - scores = B.zeros(num_classes - bg_inv, device=preds.device, dtype=B.float32) - for i in range(bg_inv, num_classes): - if not (target == i).any(): - # no foreground class - scores[i - bg_inv] += no_fg_score - continue - - # TODO: rewrite to use general `stat_scores` - tp, fp, _, fn, _ = _stat_scores(preds=preds, target=target, class_index=i) - denom = (2 * tp + fp + fn).to(B.float) - # nan result - score_cls = (2 * tp).to(B.float) / denom if B.is_nonzero(denom) else nan_score - scores[i - bg_inv] += score_cls.item() - - return reduce(scores, reduction=reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py deleted file mode 100644 index 7b9b626ce..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/f_beta.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update -from paddlemetrics.utilities.enums import AverageMethod as AvgMethod -from paddlemetrics.utilities.enums import MDMCAverageMethod - - -def _safe_divide(num: Tensor, denom: Tensor) -> Tensor: - """prevent zero division.""" - denom[denom == 0.0] = 1 - return num / denom - - -def _fbeta_compute( - tp: Tensor, - fp: Tensor, - tn: Tensor, - fn: Tensor, - beta: float, - ignore_index: Optional[int], - average: str, - mdmc_average: Optional[str], -) -> Tensor: - """Computes f_beta metric from stat scores: true positives, false positives, true negatives, false negatives. - - Args: - tp: True positives - fp: False positives - tn: True negatives - fn: False negatives - beta: The parameter `beta` (which determines the weight of recall in the combined score) - ignore_index: Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method - average: Defines the reduction that is applied - mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter) - - Example: - >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update - >>> target = B.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) - >>> tp, fp, tn, fn = _stat_scores_update( - ... preds, - ... target, - ... reduce='micro', - ... num_classes=3, - ... ) - >>> _fbeta_compute(tp, fp, tn, fn, beta=0.5, ignore_index=None, average='micro', mdmc_average=None) - tensor(0.3333) - """ - if average == AvgMethod.MICRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - mask = tp >= 0 - precision = _safe_divide(tp[mask].sum().float(), (tp[mask] + fp[mask]).sum()) - recall = _safe_divide(tp[mask].sum().float(), (tp[mask] + fn[mask]).sum()) - else: - precision = _safe_divide(tp.float(), tp + fp) - recall = _safe_divide(tp.float(), tp + fn) - - num = (1 + beta ** 2) * precision * recall - denom = beta ** 2 * precision + recall - denom[denom == 0.0] = 1.0 # avoid division by 0 - - # if classes matter and a given class is not present in both the preds and the target, - # computing the score for this class is meaningless, thus they should be ignored - if average == AvgMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - # a class is not present if there exists no TPs, no FPs, and no FNs - meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() - if ignore_index is None: - ignore_index = meaningless_indeces - else: - ignore_index = B.unique(B.cat((meaningless_indeces, B.tensor([[ignore_index]])))) - - if ignore_index is not None: - if average not in (AvgMethod.MICRO, AvgMethod.SAMPLES) and mdmc_average == MDMCAverageMethod.SAMPLEWISE: - num[..., ignore_index] = -1 - denom[..., ignore_index] = -1 - elif average not in (AvgMethod.MICRO, AvgMethod.SAMPLES): - num[ignore_index, ...] = -1 - denom[ignore_index, ...] = -1 - - if average == AvgMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - cond = (tp + fp + fn == 0) | (tp + fp + fn == -3) - num = num[~cond] - denom = denom[~cond] - - return _reduce_stat_scores( - numerator=num, - denominator=denom, - weights=None if average != AvgMethod.WEIGHTED else tp + fn, - average=average, - mdmc_average=mdmc_average, - ) - - -def fbeta( - preds: Tensor, - target: Tensor, - beta: float = 1.0, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -) -> Tensor: - r""" - Computes f_beta metric. - - .. math:: - F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} - {(\beta^2 * \text{precision}) + \text{recall}} - - Works with binary, multiclass, and multilabel data. - Accepts probabilities or logits from a model output or integer class values in prediction. - Works with multi-dimensional preds and target. - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label logits or probabilities. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - The reduction method (how the precision scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth values - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - - Example: - >>> from paddlemetrics.functional import fbeta - >>> target = B.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) - >>> fbeta(preds, target, num_classes=3, beta=0.5) - tensor(0.3333) - - """ - allowed_average = list(AvgMethod) - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - if mdmc_average is not None and MDMCAverageMethod.from_str(mdmc_average) is None: - raise ValueError(f"The `mdmc_average` has to be one of {list(MDMCAverageMethod)}, got {mdmc_average}.") - - if average in [AvgMethod.MACRO, AvgMethod.WEIGHTED, AvgMethod.NONE] and (not num_classes or num_classes < 1): - raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - reduce = AvgMethod.MACRO if average in [AvgMethod.WEIGHTED, AvgMethod.NONE] else average - tp, fp, tn, fn = _stat_scores_update( - preds, - target, - reduce=reduce, - mdmc_reduce=mdmc_average, - threshold=threshold, - num_classes=num_classes, - top_k=top_k, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - return _fbeta_compute(tp, fp, tn, fn, beta, ignore_index, average, mdmc_average) - - -def f1( - preds: Tensor, - target: Tensor, - beta: float = 1.0, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -) -> Tensor: - """Computes F1 metric. F1 metrics correspond to a equally weighted average of the precision and recall scores. - - Works with binary, multiclass, and multilabel data. - Accepts probabilities or logits from a model output or integer class values in prediction. - Works with multi-dimensional preds and target. - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label probabilities or logits. - - If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``. - - The reduction method (how the precision scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth values - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - - Example: - >>> from paddlemetrics.functional import f1 - >>> target = B.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = B.tensor([0, 2, 1, 0, 0, 1]) - >>> f1(preds, target, num_classes=3) - tensor(0.3333) - """ - return fbeta(preds, target, 1.0, average, mdmc_average, ignore_index, num_classes, threshold, top_k, multiclass) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py deleted file mode 100644 index e3f95bad4..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hamming_distance.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _input_format_classification - - -def _hamming_distance_update( - preds: Tensor, - target: Tensor, - threshold: float = 0.5, -) -> Tuple[Tensor, int]: - """Returns the number of positions where prediction equals target, and number of predictions. - - Args: - preds: Predicted tensor - target: Ground truth tensor - threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - """ - - preds, target, _ = _input_format_classification(preds, target, threshold=threshold) - - correct = (preds == target).sum() - total = preds.numel() - - return correct, total - - -def _hamming_distance_compute(correct: Tensor, total: Union[int, Tensor]) -> Tensor: - """Computes the Hamming distance. - - Args: - correct: Number of positions where prediction equals target - total: Total number of predictions - - Example: - >>> target = B.tensor([[0, 1], [1, 1]]) - >>> preds = B.tensor([[0, 1], [0, 1]]) - >>> correct, total = _hamming_distance_update(preds, target) - >>> _hamming_distance_compute(correct, total) - tensor(0.2500) - """ - - return 1 - correct.float() / total - - -def hamming_distance(preds: Tensor, target: Tensor, threshold: float = 0.5) -> Tensor: - r""" - Computes the average `Hamming distance`_ (also - known as Hamming loss) between targets and predictions: - - .. math:: - \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il}) - - Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, - and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that - tensor. - - This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it - treats each possible label separately - meaning that, for example, multi-class data is - treated as if it were multi-label. - - Accepts all input types listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - - Example: - >>> from paddlemetrics.functional import hamming_distance - >>> target = B.tensor([[0, 1], [1, 1]]) - >>> preds = B.tensor([[0, 1], [0, 1]]) - >>> hamming_distance(preds, target) - tensor(0.2500) - - """ - - correct, total = _hamming_distance_update(preds, target, threshold) - return _hamming_distance_compute(correct, total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py deleted file mode 100644 index 59d8be1af..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/hinge.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _input_squeeze -from paddlemetrics.utilities.data import to_onehot -from paddlemetrics.utilities.enums import DataType, EnumStr - - -class MulticlassMode(EnumStr): - """Enum to represent possible multiclass modes of hinge. - - >>> "Crammer-Singer" in list(MulticlassMode) - True - """ - - CRAMMER_SINGER = "crammer-singer" - ONE_VS_ALL = "one-vs-all" - - -def _check_shape_and_type_consistency_hinge( - preds: Tensor, - target: Tensor, -) -> DataType: - """Checks shape and type of `preds` and `target` and returns mode of the input tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - - Raises: - `ValueError`: if `target` is not one dimensional - `ValueError`: if `preds` and `target` do not have the same shape in the first dimension - `ValueError`: if `pred` is neither one nor two dimensional - """ - - if target.ndim > 1: - raise ValueError( - f"The `target` should be one dimensional, got `target` with shape={target.shape}.", - ) - - if preds.ndim == 1: - if preds.shape != target.shape: - raise ValueError( - "The `preds` and `target` should have the same shape,", - f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", - ) - mode = DataType.BINARY - elif preds.ndim == 2: - if preds.shape[0] != target.shape[0]: - raise ValueError( - "The `preds` and `target` should have the same shape in the first dimension,", - f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", - ) - mode = DataType.MULTICLASS - else: - raise ValueError(f"The `preds` should be one or two dimensional, got `preds` with shape={preds.shape}.") - return mode - - -def _hinge_update( - preds: Tensor, - target: Tensor, - squared: bool = False, - multiclass_mode: Optional[Union[str, MulticlassMode]] = None, -) -> Tuple[Tensor, Tensor]: - """Updates and returns sum over Hinge loss scores for each observation and the total number of observations. - - Args: - preds: Predicted tensor - target: Ground truth tensor - squared: If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss. - multiclass_mode: - Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), - ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. - ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. - """ - preds, target = _input_squeeze(preds, target) - - mode = _check_shape_and_type_consistency_hinge(preds, target) - - if mode == DataType.MULTICLASS: - target = to_onehot(target, max(2, preds.shape[1])).bool() - - if mode == DataType.MULTICLASS and (multiclass_mode is None or multiclass_mode == MulticlassMode.CRAMMER_SINGER): - margin = preds[target] - margin -= B.max(preds[~target].view(preds.shape[0], -1), dim=1)[0] - elif mode == DataType.BINARY or multiclass_mode == MulticlassMode.ONE_VS_ALL: - target = target.bool() - margin = B.zeros_like(preds) - margin[target] = preds[target] - margin[~target] = -preds[~target] - else: - raise ValueError( - "The `multiclass_mode` should be either None / 'crammer-singer' / MulticlassMode.CRAMMER_SINGER" - "(default) or 'one-vs-all' / MulticlassMode.ONE_VS_ALL," - f" got {multiclass_mode}." - ) - - measures = 1 - margin - measures = B.clamp(measures, 0) - - if squared: - measures = measures.pow(2) - - total = tensor(target.shape[0], device=target.device) - return measures.sum(dim=0), total - - -def _hinge_compute(measure: Tensor, total: Tensor) -> Tensor: - """Computes mean Hinge loss. - - Args: - measure: Sum over hinge losses for each each observation - total: Number of observations - - Example: - >>> # binary case - >>> target = B.tensor([0, 1, 1]) - >>> preds = B.tensor([-2.2, 2.4, 0.1]) - >>> measure, total = _hinge_update(preds, target) - >>> _hinge_compute(measure, total) - tensor(0.3000) - - >>> # multiclass case - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) - >>> measure, total = _hinge_update(preds, target) - >>> _hinge_compute(measure, total) - tensor(2.9000) - - >>> # multiclass one-vs-all mode case - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) - >>> measure, total = _hinge_update(preds, target, multiclass_mode="one-vs-all") - >>> _hinge_compute(measure, total) - tensor([2.2333, 1.5000, 1.2333]) - """ - - return measure / total - - -def hinge( - preds: Tensor, - target: Tensor, - squared: bool = False, - multiclass_mode: Optional[Union[str, MulticlassMode]] = None, -) -> Tensor: - r""" - Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs). - - In the binary case it is defined as: - - .. math:: - \text{Hinge loss} = \max(0, 1 - y \times \hat{y}) - - Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction. - - In the multi-class case, when ``multiclass_mode=None`` (default), ``multiclass_mode=MulticlassMode.CRAMMER_SINGER`` - or ``multiclass_mode="crammer-singer"``, this metric will compute the multi-class hinge loss defined by Crammer and - Singer as: - - .. math:: - \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right) - - Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes), - and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. - - In the multi-class case when ``multiclass_mode=MulticlassMode.ONE_VS_ALL`` or ``multiclass_mode='one-vs-all'``, this - metric will use a one-vs-all approach to compute the hinge loss, giving a vector of C outputs where each entry pits - that class against all remaining classes. - - This metric can optionally output the mean of the squared hinge loss by setting ``squared=True`` - - Only accepts inputs with preds shape of (N) (binary) or (N, C) (multi-class) and target shape of (N). - - Args: - preds: Predictions from model (as float outputs from decision function). - target: Ground truth labels. - squared: - If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss (default). - multiclass_mode: - Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), - ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. - ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. - - Raises: - ValueError: - If preds shape is not of size (N) or (N, C). - ValueError: - If target shape is not of size (N). - ValueError: - If ``multiclass_mode`` is not: None, ``MulticlassMode.CRAMMER_SINGER``, ``"crammer-singer"``, - ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"``. - - Example (binary case): - >>> import torchapi as B - >>> from paddlemetrics.functional import hinge - >>> target = B.tensor([0, 1, 1]) - >>> preds = B.tensor([-2.2, 2.4, 0.1]) - >>> hinge(preds, target) - tensor(0.3000) - - Example (default / multiclass case): - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) - >>> hinge(preds, target) - tensor(2.9000) - - Example (multiclass example, one vs all mode): - >>> target = B.tensor([0, 1, 2]) - >>> preds = B.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) - >>> hinge(preds, target, multiclass_mode="one-vs-all") - tensor([2.2333, 1.5000, 1.2333]) - """ - measure, total = _hinge_update(preds, target, squared=squared, multiclass_mode=multiclass_mode) - return _hinge_compute(measure, total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py deleted file mode 100644 index b7cf60774..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/iou.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_update -from paddlemetrics.utilities.data import get_num_classes -from paddlemetrics.utilities.distributed import reduce - - -def _iou_from_confmat( - confmat: Tensor, - num_classes: int, - ignore_index: Optional[int] = None, - absent_score: float = 0.0, - reduction: str = "elementwise_mean", -) -> Tensor: - """Computes the intersection over union from confusion matrix. - - Args: - confmat: Confusion matrix without normalization - num_classes: Number of classes for a given prediction and target tensor - ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. - absent_score: score to use for an individual class, if no instances of the class index were present in `pred` - AND no instances of the class index were present in `target`. - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - """ - - # Remove the ignored class index from the scores. - if ignore_index is not None and 0 <= ignore_index < num_classes: - confmat[ignore_index] = 0.0 - - intersection = B.diag(confmat) - union = confmat.sum(0) + confmat.sum(1) - intersection - - # If this class is absent in both target AND pred (union == 0), then use the absent_score for this class. - scores = intersection.float() / union.float() - scores[union == 0] = absent_score - - if ignore_index is not None and 0 <= ignore_index < num_classes: - scores = B.cat( - [ - scores[:ignore_index], - scores[ignore_index + 1 :], - ] - ) - - return reduce(scores, reduction=reduction) - - -def iou( - preds: Tensor, - target: Tensor, - ignore_index: Optional[int] = None, - absent_score: float = 0.0, - threshold: float = 0.5, - num_classes: Optional[int] = None, - reduction: str = "elementwise_mean", -) -> Tensor: - r""" - Computes `Jaccard index`_ - - .. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|} - - Where: :math:`A` and :math:`B` are both tensors of the same size, - containing integer class values. They may be subject to conversion from - input data (see description below). - - Note that it is different from box IoU. - - If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument - to convert into integer labels. This is the case for binary and multi-label probabilities. - - If pred has an extra dimension as in the case of multi-class scores we - perform an argmax on ``dim=1``. - - Args: - preds: tensor containing predictions from model (probabilities, or labels) with shape ``[N, d1, d2, ...]`` - target: tensor containing ground truth labels with shape ``[N, d1, d2, ...]`` - ignore_index: optional int specifying a target class to ignore. If given, - this class index does not contribute to the returned score, regardless - of reduction method. Has no effect if given an int that is not in the - range [0, num_classes-1], where num_classes is either given or derived - from pred and target. By default, no index is ignored, and all classes are used. - absent_score: score to use for an individual class, if no instances of - the class index were present in `pred` AND no instances of the class - index were present in `target`. For example, if we have 3 classes, - [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be - assigned the `absent_score`. - threshold: - Threshold value for binary or multi-label probabilities. default: 0.5 - num_classes: - Optionally specify the number of classes - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - Return: - IoU score: Tensor containing single value if reduction is - 'elementwise_mean', or number of classes if reduction is 'none' - - Example: - >>> from paddlemetrics.functional import iou - >>> target = B.randint(0, 2, (10, 25, 25)) - >>> pred = B.tensor(target) - >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] - >>> iou(pred, target) - tensor(0.9660) - """ - - num_classes = get_num_classes(preds=preds, target=target, num_classes=num_classes) - confmat = _confusion_matrix_update(preds, target, num_classes, threshold) - return _iou_from_confmat(confmat, num_classes, ignore_index, absent_score, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py deleted file mode 100644 index 0d7685c1e..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/kl_divergence.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape -from paddlemetrics.utilities.data import METRIC_EPS - - -def _kld_update(p: Tensor, q: Tensor, log_prob: bool) -> Tuple[Tensor, int]: - """Updates and returns KL divergence scores for each observation and the total number of observations. Checks - same shape and 2D nature of the input tensors else raises ValueError. - - Args: - p: data distribution with shape ``[N, d]`` - q: prior or approximate distribution with shape ``[N, d]`` - log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, - will normalize to make sure the distributes sum to 1 - """ - _check_same_shape(p, q) - if p.ndim != 2 or q.ndim != 2: - raise ValueError(f"Expected both p and q distribution to be 2D but got {p.ndim} and {q.ndim} respectively") - - total = p.shape[0] - if log_prob: - measures = B.sum(p.exp() * (p - q), axis=-1) - else: - p = p / p.sum(axis=-1, keepdim=True) - q = q / q.sum(axis=-1, keepdim=True) - q = B.clamp(q, METRIC_EPS) - measures = B.sum(p * B.log(p / q), axis=-1) - - return measures, total - - -def _kld_compute(measures: Tensor, total: Tensor, reduction: Optional[str] = "mean") -> Tensor: - """Computes the KL divergenece based on the type of reduction. - - Args: - measures: Tensor of KL divergence scores for each observation - total: Number of observations - reduction: - Determines how to reduce over the ``N``/batch dimension: - - - ``'mean'`` [default]: Averages score across samples - - ``'sum'``: Sum score across samples - - ``'none'`` or ``None``: Returns score per sample - - Example: - >>> p = B.tensor([[0.36, 0.48, 0.16]]) - >>> q = B.tensor([[1/3, 1/3, 1/3]]) - >>> measures, total = _kld_update(p, q, log_prob=False) - >>> _kld_compute(measures, total) - tensor(0.0853) - """ - - if reduction == "sum": - return measures.sum() - if reduction == "mean": - return measures.sum() / total - if reduction is None or reduction == "none": - return measures - return measures / total - - -def kl_divergence(p: Tensor, q: Tensor, log_prob: bool = False, reduction: Optional[str] = "mean") -> Tensor: - r"""Computes `KL divergence`_ - - .. math:: - D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}} - - Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution - over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence - is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. - - Args: - p: data distribution with shape ``[N, d]`` - q: prior or approximate distribution with shape ``[N, d]`` - log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities, - will normalize to make sure the distributes sum to 1 - reduction: - Determines how to reduce over the ``N``/batch dimension: - - - ``'mean'`` [default]: Averages score across samples - - ``'sum'``: Sum score across samples - - ``'none'`` or ``None``: Returns score per sample - - Example: - >>> import torchapi as B - >>> p = B.tensor([[0.36, 0.48, 0.16]]) - >>> q = B.tensor([[1/3, 1/3, 1/3]]) - >>> kl_divergence(p, q) - tensor(0.0853) - """ - measures, total = _kld_update(p, q, log_prob) - return _kld_compute(measures, total, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py deleted file mode 100644 index 8532a358d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/matthews_corrcoef.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.confusion_matrix import _confusion_matrix_update - -_matthews_corrcoef_update = _confusion_matrix_update - - -def _matthews_corrcoef_compute(confmat: Tensor) -> Tensor: - """Computes Matthews correlation coefficient. - - Args: - confmat: Confusion matrix - - Example: - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> confmat = _matthews_corrcoef_update(preds, target, num_classes=2) - >>> _matthews_corrcoef_compute(confmat) - tensor(0.5774) - """ - - tk = confmat.sum(dim=1).float() - pk = confmat.sum(dim=0).float() - c = B.trace(confmat).float() - s = confmat.sum().float() - return (c * s - sum(tk * pk)) / (B.sqrt(s ** 2 - sum(pk * pk)) * B.sqrt(s ** 2 - sum(tk * tk))) - - -def matthews_corrcoef( - preds: Tensor, - target: Tensor, - num_classes: int, - threshold: float = 0.5, -) -> Tensor: - r""" - Calculates `Matthews correlation coefficient`_ that measures - the general correlation or quality of a classification. In the binary case it - is defined as: - - .. math:: - MCC = \frac{TP*TN - FP*FN}{\sqrt{(TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)}} - - where TP, TN, FP and FN are respectively the true postitives, true negatives, - false positives and false negatives. Also works in the case of multi-label or - multi-class input. - - Args: - preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or - ``(N, C, ...)`` where C is the number of classes, tensor with labels/probabilities - target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels - num_classes: Number of classes in the dataset. - threshold: - Threshold value for binary or multi-label probabilities. default: 0.5 - - Example: - >>> from paddlemetrics.functional import matthews_corrcoef - >>> target = B.tensor([1, 1, 0, 0]) - >>> preds = B.tensor([0, 1, 0, 0]) - >>> matthews_corrcoef(preds, target, num_classes=2) - tensor(0.5774) - - """ - confmat = _matthews_corrcoef_update(preds, target, num_classes, threshold) - return _matthews_corrcoef_compute(confmat) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py deleted file mode 100644 index 4b8528dc2..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall.py +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update -from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod - - -def _precision_compute( - tp: Tensor, - fp: Tensor, - fn: Tensor, - average: str, - mdmc_average: Optional[str], -) -> Tensor: - """Computes precision from the stat scores: true positives, false positives, true negatives, false negatives. - - Args: - tp: True positives - fp: False positives - fn: False negatives - average: Defines the reduction that is applied - mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter) - - Example: - >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> tp, fp, tn, fn = _stat_scores_update( preds, target, reduce='macro', num_classes=3) - >>> _precision_compute(tp, fp, fn, average='macro', mdmc_average=None) - tensor(0.1667) - >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') - >>> _precision_compute(tp, fp, fn, average='micro', mdmc_average=None) - tensor(0.2500) - """ - - numerator = tp - denominator = tp + fp - - if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - cond = tp + fp + fn == 0 - numerator = numerator[~cond] - denominator = denominator[~cond] - - if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - # a class is not present if there exists no TPs, no FPs, and no FNs - meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() - numerator[meaningless_indeces, ...] = -1 - denominator[meaningless_indeces, ...] = -1 - - return _reduce_stat_scores( - numerator=numerator, - denominator=denominator, - weights=None if average != "weighted" else tp + fn, - average=average, - mdmc_average=mdmc_average, - ) - - -def precision( - preds: Tensor, - target: Tensor, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -) -> Tensor: - r""" - Computes `Precision`_ - - .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} - - Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and - false positives respecitively. With the use of ``top_k`` parameter, this metric can - generalize to Precision@K. - - The reduction method (how the precision scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth values - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - - Raises: - ValueError: - If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, - ``"samples"``, ``"none"`` or ``None``. - ValueError: - If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``average`` is set but ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - - Example: - >>> from paddlemetrics.functional import precision - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> precision(preds, target, average='macro', num_classes=3) - tensor(0.1667) - >>> precision(preds, target, average='micro') - tensor(0.2500) - - """ - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - allowed_mdmc_average = [None, "samplewise", "global"] - if mdmc_average not in allowed_mdmc_average: - raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") - - if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): - raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - reduce = "macro" if average in ["weighted", "none", None] else average - tp, fp, _, fn = _stat_scores_update( - preds, - target, - reduce=reduce, - mdmc_reduce=mdmc_average, - threshold=threshold, - num_classes=num_classes, - top_k=top_k, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - return _precision_compute(tp, fp, fn, average, mdmc_average) - - -def _recall_compute( - tp: Tensor, - fp: Tensor, - fn: Tensor, - average: str, - mdmc_average: Optional[str], -) -> Tensor: - """Computes precision from the stat scores: true positives, false positives, true negatives, false negatives. - - Args: - tp: True positives - fp: False positives - fn: False negatives - average: Defines the reduction that is applied - mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter) - - Example: - >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) - >>> _recall_compute(tp, fp, fn, average='macro', mdmc_average=None) - tensor(0.3333) - >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') - >>> _recall_compute(tp, fp, fn, average='micro', mdmc_average=None) - tensor(0.2500) - """ - numerator = tp - denominator = tp + fn - - if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - cond = tp + fp + fn == 0 - numerator = numerator[~cond] - denominator = denominator[~cond] - - if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - # a class is not present if there exists no TPs, no FPs, and no FNs - meaningless_indeces = ((tp | fn | fp) == 0).nonzero().cpu() - numerator[meaningless_indeces, ...] = -1 - denominator[meaningless_indeces, ...] = -1 - - return _reduce_stat_scores( - numerator=numerator, - denominator=denominator, - weights=None if average != AverageMethod.WEIGHTED else tp + fn, - average=average, - mdmc_average=mdmc_average, - ) - - -def recall( - preds: Tensor, - target: Tensor, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -) -> Tensor: - r""" - Computes `Recall`_ - - .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} - - Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and - false negatives respecitively. With the use of ``top_k`` parameter, this metric can - generalize to Recall@K. - - The reduction method (how the recall scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth values - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - - Raises: - ValueError: - If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, - ``"samples"``, ``"none"`` or ``None``. - ValueError: - If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``average`` is set but ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - - Example: - >>> from paddlemetrics.functional import recall - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> recall(preds, target, average='macro', num_classes=3) - tensor(0.3333) - >>> recall(preds, target, average='micro') - tensor(0.2500) - - """ - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - allowed_mdmc_average = [None, "samplewise", "global"] - if mdmc_average not in allowed_mdmc_average: - raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") - - if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): - raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - reduce = "macro" if average in ["weighted", "none", None] else average - tp, fp, _, fn = _stat_scores_update( - preds, - target, - reduce=reduce, - mdmc_reduce=mdmc_average, - threshold=threshold, - num_classes=num_classes, - top_k=top_k, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - return _recall_compute(tp, fp, fn, average, mdmc_average) - - -def precision_recall( - preds: Tensor, - target: Tensor, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -) -> Tuple[Tensor, Tensor]: - r""" - Computes `Precision`_ - - .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} - - - .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} - - Where :math:`\text{TP}`m :math:`\text{FN}` and :math:`\text{FP}` represent the number - of true positives, false negatives and false positives respecitively. With the use of - ``top_k`` parameter, this metric can generalize to Recall@K and Precision@K. - - The reduction method (how the recall scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth values - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Return: - The function returns a tuple with two elements: precision and recall. Their shape - depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, they are a single element tensor - - If ``average in ['none', None]``, they are a tensor of shape ``(C, )``, where ``C`` stands for - the number of classes - - Raises: - ValueError: - If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, - ``"samples"``, ``"none"`` or ``None``. - ValueError: - If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``average`` is set but ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - - Example: - >>> from paddlemetrics.functional import precision_recall - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> precision_recall(preds, target, average='macro', num_classes=3) - (tensor(0.1667), tensor(0.3333)) - >>> precision_recall(preds, target, average='micro') - (tensor(0.2500), tensor(0.2500)) - - """ - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - allowed_mdmc_average = [None, "samplewise", "global"] - if mdmc_average not in allowed_mdmc_average: - raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") - - if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): - raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - reduce = "macro" if average in ["weighted", "none", None] else average - tp, fp, _, fn = _stat_scores_update( - preds, - target, - reduce=reduce, - mdmc_reduce=mdmc_average, - threshold=threshold, - num_classes=num_classes, - top_k=top_k, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - precision_ = _precision_compute(tp, fp, fn, average, mdmc_average) - recall_ = _recall_compute(tp, fp, fn, average, mdmc_average) - - return precision_, recall_ diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py deleted file mode 100644 index 11b32500b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/precision_recall_curve.py +++ /dev/null @@ -1,332 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List, Optional, Sequence, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities import rank_zero_warn - - -def _binary_clf_curve( - preds: Tensor, - target: Tensor, - sample_weights: Optional[Sequence] = None, - pos_label: int = 1, -) -> Tuple[Tensor, Tensor, Tensor]: - """adapted from https://github.com/scikit-learn/scikit- learn/blob/master/sklearn/metrics/_ranking.py.""" - if sample_weights is not None and not isinstance(sample_weights, Tensor): - sample_weights = tensor(sample_weights, device=preds.device, dtype=B.float) - - # remove class dimension if necessary - if preds.ndim > target.ndim: - preds = preds[:, 0] - desc_score_indices = B.argsort(preds, descending=True) - - preds = preds[desc_score_indices] - target = target[desc_score_indices] - - if sample_weights is not None: - weight = sample_weights[desc_score_indices] - else: - weight = 1.0 - - # pred typically has many tied values. Here we extract - # the indices associated with the distinct values. We also - # concatenate a value for the end of the curve. - distinct_value_indices = B.where(preds[1:] - preds[:-1])[0] - threshold_idxs = B.nn.functional.pad(distinct_value_indices, [0, 1], value=target.size(0) - 1) - target = (target == pos_label).to(B.long) - tps = B.cumsum(target * weight, dim=0)[threshold_idxs] - - if sample_weights is not None: - # express fps as a cumsum to ensure fps is increasing even in - # the presence of floating point errors - fps = B.cumsum((1 - target) * weight, dim=0)[threshold_idxs] - else: - fps = 1 + threshold_idxs - tps - - return fps, tps, preds[threshold_idxs] - - -def _precision_recall_curve_update( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, -) -> Tuple[Tensor, Tensor, int, Optional[int]]: - """Updates and returns variables required to compute the precision-recall pairs for different thresholds. - - Args: - preds: Predicted tensor - target: Ground truth tensor - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - """ - - if len(preds.shape) == len(target.shape): - if pos_label is None: - pos_label = 1 - if num_classes is not None and num_classes != 1: - # multilabel problem - if num_classes != preds.shape[1]: - raise ValueError( - f"Argument `num_classes` was set to {num_classes} in" - f" metric `precision_recall_curve` but detected {preds.shape[1]}" - " number of classes from predictions" - ) - preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) - target = target.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) - else: - # binary problem - preds = preds.flatten() - target = target.flatten() - num_classes = 1 - - # multi class problem - elif len(preds.shape) == len(target.shape) + 1: - if pos_label is not None: - rank_zero_warn( - "Argument `pos_label` should be `None` when running" - f" multiclass precision recall curve. Got {pos_label}" - ) - if num_classes != preds.shape[1]: - raise ValueError( - f"Argument `num_classes` was set to {num_classes} in" - f" metric `precision_recall_curve` but detected {preds.shape[1]}" - " number of classes from predictions" - ) - preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1) - target = target.flatten() - - else: - raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds") - - return preds, target, num_classes, pos_label - - -def _precision_recall_curve_compute_single_class( - preds: Tensor, - target: Tensor, - pos_label: int, - sample_weights: Optional[Sequence] = None, -) -> Tuple[Tensor, Tensor, Tensor]: - """Computes precision-recall pairs for single class inputs. - - Args: - preds: Predicted tensor - target: Ground truth tensor - pos_label: integer determining the positive class. - sample_weights: sample weights for each data point - """ - - fps, tps, thresholds = _binary_clf_curve( - preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label - ) - precision = tps / (tps + fps) - recall = tps / tps[-1] - - # stop when full recall attained and reverse the outputs so recall is decreasing - last_ind = B.where(tps == tps[-1])[0][0] - sl = slice(0, last_ind.item() + 1) - - # need to call reversed explicitly, since including that to slice would - # introduce negative strides that are not yet supported in pytorch - precision = B.cat([reversed(precision[sl]), B.ones(1, dtype=precision.dtype, device=precision.device)]) - - recall = B.cat([reversed(recall[sl]), B.zeros(1, dtype=recall.dtype, device=recall.device)]) - - thresholds = reversed(thresholds[sl]).detach().clone() # type: ignore - - return precision, recall, thresholds - - -def _precision_recall_curve_compute_multi_class( - preds: Tensor, - target: Tensor, - num_classes: int, - sample_weights: Optional[Sequence] = None, -) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: - """Computes precision-recall pairs for multi class inputs. - - Args: - preds: Predicted tensor - target: Ground truth tensor - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - sample_weights: sample weights for each data point - """ - - # Recursively call per class - precision, recall, thresholds = [], [], [] - for cls in range(num_classes): - preds_cls = preds[:, cls] - - prc_args = dict( - preds=preds_cls, - target=target, - num_classes=1, - pos_label=cls, - sample_weights=sample_weights, - ) - if target.ndim > 1: - prc_args.update( - dict( - target=target[:, cls], - pos_label=1, - ) - ) - res = precision_recall_curve(**prc_args) - precision.append(res[0]) - recall.append(res[1]) - thresholds.append(res[2]) - - return precision, recall, thresholds - - -def _precision_recall_curve_compute( - preds: Tensor, - target: Tensor, - num_classes: int, - pos_label: Optional[int] = None, - sample_weights: Optional[Sequence] = None, -) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - """Computes precision-recall pairs based on the number of classes. - - Args: - preds: Predicted tensor - target: Ground truth tensor - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - sample_weights: sample weights for each data point - - Example: - >>> # binary case - >>> preds = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 0]) - >>> pos_label = 1 - >>> preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, pos_label=pos_label) - >>> precision, recall, thresholds = _precision_recall_curve_compute(preds, target, num_classes, pos_label) - >>> precision - tensor([0.6667, 0.5000, 0.0000, 1.0000]) - >>> recall - tensor([1.0000, 0.5000, 0.0000, 0.0000]) - >>> thresholds - tensor([1, 2, 3]) - - >>> # multiclass case - >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> num_classes = 5 - >>> preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes) - >>> precision, recall, thresholds = _precision_recall_curve_compute(preds, target, num_classes) - >>> precision # doctest: +NORMALIZE_WHITESPACE - [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), - tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] - >>> recall - [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] - >>> thresholds - [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] - """ - - with B.no_grad(): - if num_classes == 1: - if pos_label is None: - pos_label = 1 - return _precision_recall_curve_compute_single_class(preds, target, pos_label, sample_weights) - return _precision_recall_curve_compute_multi_class(preds, target, num_classes, sample_weights) - - -def precision_recall_curve( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - sample_weights: Optional[Sequence] = None, -) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - """Computes precision-recall pairs for different thresholds. - - Args: - preds: predictions from model (probabilities) - target: ground truth labels - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - sample_weights: sample weights for each data point - - Returns: - 3-element tuple containing - - precision: - tensor where element i is the precision of predictions with - score >= thresholds[i] and the last element is 1. - If multiclass, this is a list of such tensors, one for each class. - recall: - tensor where element i is the recall of predictions with - score >= thresholds[i] and the last element is 0. - If multiclass, this is a list of such tensors, one for each class. - thresholds: - Thresholds used for computing precision/recall scores - - Raises: - ValueError: - If ``preds`` and ``target`` don't have the same number of dimensions, - or one additional dimension for ``preds``. - ValueError: - If the number of classes deduced from ``preds`` is not the same as the - ``num_classes`` provided. - - Example (binary case): - >>> from paddlemetrics.functional import precision_recall_curve - >>> pred = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 0]) - >>> precision, recall, thresholds = precision_recall_curve(pred, target, pos_label=1) - >>> precision - tensor([0.6667, 0.5000, 0.0000, 1.0000]) - >>> recall - tensor([1.0000, 0.5000, 0.0000, 0.0000]) - >>> thresholds - tensor([1, 2, 3]) - - Example (multiclass case): - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> precision, recall, thresholds = precision_recall_curve(pred, target, num_classes=5) - >>> precision # doctest: +NORMALIZE_WHITESPACE - [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), - tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] - >>> recall - [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] - >>> thresholds - [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] - """ - preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label) - return _precision_recall_curve_compute(preds, target, num_classes, pos_label, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py deleted file mode 100644 index 86f4e2a4c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/roc.py +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List, Optional, Sequence, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.precision_recall_curve import ( - _binary_clf_curve, - _precision_recall_curve_update, -) - - -def _roc_update( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, -) -> Tuple[Tensor, Tensor, int, Optional[int]]: - """Updates and returns variables required to compute the Receiver Operating Characteristic. - - Args: - preds: Predicted tensor - target: Ground truth tensor - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - """ - - return _precision_recall_curve_update(preds, target, num_classes, pos_label) - - -def _roc_compute_single_class( - preds: Tensor, - target: Tensor, - pos_label: int, - sample_weights: Optional[Sequence] = None, -) -> Tuple[Tensor, Tensor, Tensor]: - """Computes Receiver Operating Characteristic for single class inputs. Returns tensor with false positive - rates, tensor with true positive rates, tensor with thresholds used for computing false- and true postive - rates. - - Args: - preds: Predicted tensor - target: Ground truth tensor - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - sample_weights: sample weights for each data point - """ - - fps, tps, thresholds = _binary_clf_curve( - preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label - ) - # Add an extra threshold position to make sure that the curve starts at (0, 0) - tps = B.cat([B.zeros(1, dtype=tps.dtype, device=tps.device), tps]) - fps = B.cat([B.zeros(1, dtype=fps.dtype, device=fps.device), fps]) - thresholds = B.cat([thresholds[0][None] + 1, thresholds]) - - if fps[-1] <= 0: - raise ValueError("No negative samples in targets, false positive value should be meaningless") - fpr = fps / fps[-1] - - if tps[-1] <= 0: - raise ValueError("No positive samples in targets, true positive value should be meaningless") - tpr = tps / tps[-1] - - return fpr, tpr, thresholds - - -def _roc_compute_multi_class( - preds: Tensor, - target: Tensor, - num_classes: int, - sample_weights: Optional[Sequence] = None, -) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: - """Computes Receiver Operating Characteristic for multi class inputs. Returns tensor with false positive rates, - tensor with true positive rates, tensor with thresholds used for computing false- and true postive rates. - - Args: - preds: Predicted tensor - target: Ground truth tensor - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - sample_weights: sample weights for each data point - """ - - fpr, tpr, thresholds = [], [], [] - for cls in range(num_classes): - if preds.shape == target.shape: - target_cls = target[:, cls] - pos_label = 1 - else: - target_cls = target - pos_label = cls - res = roc( - preds=preds[:, cls], - target=target_cls, - num_classes=1, - pos_label=pos_label, - sample_weights=sample_weights, - ) - fpr.append(res[0]) - tpr.append(res[1]) - thresholds.append(res[2]) - - return fpr, tpr, thresholds - - -def _roc_compute( - preds: Tensor, - target: Tensor, - num_classes: int, - pos_label: Optional[int] = None, - sample_weights: Optional[Sequence] = None, -) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - """Computes Receiver Operating Characteristic based on the number of classes. - - Args: - preds: Predicted tensor - target: Ground truth tensor - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - sample_weights: sample weights for each data point - - Example: - >>> # binary case - >>> preds = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> pos_label = 1 - >>> preds, target, num_classes, pos_label = _roc_update(preds, target, pos_label=pos_label) - >>> fpr, tpr, thresholds = _roc_compute(preds, target, num_classes, pos_label) - >>> fpr - tensor([0., 0., 0., 0., 1.]) - >>> tpr - tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) - >>> thresholds - tensor([4, 3, 2, 1, 0]) - - >>> # multiclass case - >>> preds = B.tensor([[0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05], - ... [0.05, 0.05, 0.05, 0.75]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> num_classes = 4 - >>> preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes) - >>> fpr, tpr, thresholds = _roc_compute(preds, target, num_classes) - >>> fpr - [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] - >>> tpr - [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] - >>> thresholds # doctest: +NORMALIZE_WHITESPACE - [tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500])] - """ - - with B.no_grad(): - if num_classes == 1 and preds.ndim == 1: # binary - if pos_label is None: - pos_label = 1 - return _roc_compute_single_class(preds, target, pos_label, sample_weights) - return _roc_compute_multi_class(preds, target, num_classes, sample_weights) - - -def roc( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - pos_label: Optional[int] = None, - sample_weights: Optional[Sequence] = None, -) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - """Computes the Receiver Operating Characteristic (ROC). Works with both binary, multiclass and multilabel - input. - - Args: - preds: predictions from model (logits or probabilities) - target: ground truth values - num_classes: integer with number of classes for multi-label and multiclass problems. - Should be set to ``None`` for binary problems - pos_label: integer determining the positive class. Default is ``None`` - which for binary problem is translate to 1. For multiclass problems - this argument should not be set as we iteratively change it in the - range [0,num_classes-1] - sample_weights: sample weights for each data point - - Returns: - 3-element tuple containing - - fpr: - tensor with false positive rates. - If multiclass or multilabel, this is a list of such tensors, one for each class/label. - tpr: - tensor with true positive rates. - If multiclass or multilabel, this is a list of such tensors, one for each class/label. - thresholds: - tensor with thresholds used for computing false- and true postive rates - If multiclass or multilabel, this is a list of such tensors, one for each class/label. - - Example (binary case): - >>> from paddlemetrics.functional import roc - >>> pred = B.tensor([0, 1, 2, 3]) - >>> target = B.tensor([0, 1, 1, 1]) - >>> fpr, tpr, thresholds = roc(pred, target, pos_label=1) - >>> fpr - tensor([0., 0., 0., 0., 1.]) - >>> tpr - tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) - >>> thresholds - tensor([4, 3, 2, 1, 0]) - - Example (multiclass case): - >>> from paddlemetrics.functional import roc - >>> pred = B.tensor([[0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05], - ... [0.05, 0.05, 0.05, 0.75]]) - >>> target = B.tensor([0, 1, 3, 2]) - >>> fpr, tpr, thresholds = roc(pred, target, num_classes=4) - >>> fpr - [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] - >>> tpr - [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] - >>> thresholds # doctest: +NORMALIZE_WHITESPACE - [tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500]), - tensor([1.7500, 0.7500, 0.0500])] - - Example (multilabel case): - >>> from paddlemetrics.functional import roc - >>> pred = B.tensor([[0.8191, 0.3680, 0.1138], - ... [0.3584, 0.7576, 0.1183], - ... [0.2286, 0.3468, 0.1338], - ... [0.8603, 0.0745, 0.1837]]) - >>> target = B.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) - >>> fpr, tpr, thresholds = roc(pred, target, num_classes=3, pos_label=1) - >>> fpr # doctest: +NORMALIZE_WHITESPACE - [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]), - tensor([0., 0., 0., 1., 1.]), - tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])] - >>> tpr - [tensor([0., 0., 1., 1., 1.]), tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), tensor([0., 1., 1., 1., 1.])] - >>> thresholds # doctest: +NORMALIZE_WHITESPACE - [tensor([1.8603, 0.8603, 0.8191, 0.3584, 0.2286]), - tensor([1.7576, 0.7576, 0.3680, 0.3468, 0.0745]), - tensor([1.1837, 0.1837, 0.1338, 0.1183, 0.1138])] - """ - preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes, pos_label) - return _roc_compute(preds, target, num_classes, pos_label, sample_weights) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py deleted file mode 100644 index be87dce7d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/specificity.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update -from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod - - -def _specificity_compute( - tp: Tensor, - fp: Tensor, - tn: Tensor, - fn: Tensor, - average: str, - mdmc_average: Optional[str], -) -> Tensor: - """Computes specificity from the stat scores: true positives, false positives, true negatives, false negatives. - - Args: - tp: True positives - fp: False positives - tn: True negatives - fn: False negatives - average: Defines the reduction that is applied - mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter) - - Example: - >>> from paddlemetrics.functional.classification.stat_scores import _stat_scores_update - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) - >>> _specificity_compute(tp, fp, tn, fn, average='macro', mdmc_average=None) - tensor(0.6111) - >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') - >>> _specificity_compute(tp, fp, tn, fn, average='micro', mdmc_average=None) - tensor(0.6250) - """ - - numerator = tn - denominator = tn + fp - if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: - # a class is not present if there exists no TPs, no FPs, and no FNs - meaningless_indeces = B.nonzero((tp | fn | fp) == 0).cpu() - numerator[meaningless_indeces, ...] = -1 - denominator[meaningless_indeces, ...] = -1 - return _reduce_stat_scores( - numerator=numerator, - denominator=denominator, - weights=None if average != AverageMethod.WEIGHTED else denominator, - average=average, - mdmc_average=mdmc_average, - ) - - -def specificity( - preds: Tensor, - target: Tensor, - average: str = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -) -> Tensor: - r""" - Computes `Specificity`_ - - .. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}} - - Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and - false positives respecitively. With the use of ``top_k`` parameter, this metric can - generalize to Specificity@K. - - The reduction method (how the specificity scores are aggregated) is controlled by the - ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, or labels) - target: Ground truth values - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tn + fp``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the `preds` or `target`, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`references/modules:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`references/modules:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold probability value for transforming probability predictions to binary - (0,1) predictions, in the case of binary or multi-label inputs - top_k: - Number of highest probability entries for each sample to convert to 1s - relevant - only for inputs with probability predictions. If this parameter is set for multi-label - inputs, it will take precedence over ``threshold``. For (multi-dim) multi-class inputs, - this parameter defaults to 1. - - Should be left unset (``None``) for inputs with label predictions. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Return: - The shape of the returned tensor depends on the ``average`` parameter - - - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned - - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number - of classes - - Raises: - ValueError: - If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, - ``"samples"``, ``"none"`` or ``None``. - ValueError: - If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``average`` is set but ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - - Example: - >>> from paddlemetrics.functional import specificity - >>> preds = B.tensor([2, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> specificity(preds, target, average='macro', num_classes=3) - tensor(0.6111) - >>> specificity(preds, target, average='micro') - tensor(0.6250) - - """ - - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - allowed_mdmc_average = [None, "samplewise", "global"] - if mdmc_average not in allowed_mdmc_average: - raise ValueError("The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") - - if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): - raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - reduce = "macro" if average in ["weighted", "none", None] else average - tp, fp, tn, fn = _stat_scores_update( - preds, - target, - reduce=reduce, - mdmc_reduce=mdmc_average, - threshold=threshold, - num_classes=num_classes, - top_k=top_k, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - return _specificity_compute(tp, fp, tn, fn, average, mdmc_average) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py deleted file mode 100644 index 33e1cafdd..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/classification/stat_scores.py +++ /dev/null @@ -1,396 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List, Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _input_format_classification -from paddlemetrics.utilities.enums import AverageMethod, MDMCAverageMethod - - -def _del_column(data: Tensor, idx: int) -> Tensor: - """Delete the column at index.""" - return B.cat([data[:, :idx], data[:, (idx + 1) :]], 1) - - -def _stat_scores( - preds: Tensor, - target: Tensor, - reduce: Optional[str] = "micro", -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Calculate the number of tp, fp, tn, fn. - - Args: - preds: - An ``(N, C)`` or ``(N, C, X)`` tensor of predictions (0 or 1) - target: - An ``(N, C)`` or ``(N, C, X)`` tensor of true labels (0 or 1) - reduce: - One of ``'micro'``, ``'macro'``, ``'samples'`` - - Return: - Returns a list of 4 tensors; tp, fp, tn, fn. - The shape of the returned tensors depnds on the shape of the inputs - and the ``reduce`` parameter: - - If inputs are of the shape ``(N, C)``, then - - If ``reduce='micro'``, the returned tensors are 1 element tensors - - If ``reduce='macro'``, the returned tensors are ``(C,)`` tensors - - If ``reduce'samples'``, the returned tensors are ``(N,)`` tensors - - If inputs are of the shape ``(N, C, X)``, then - - If ``reduce='micro'``, the returned tensors are ``(N,)`` tensors - - If ``reduce='macro'``, the returned tensors are ``(N,C)`` tensors - - If ``reduce='samples'``, the returned tensors are ``(N,X)`` tensors - """ - dim: Union[int, List[int]] = 1 # for "samples" - if reduce == "micro": - dim = [0, 1] if preds.ndim == 2 else [1, 2] - elif reduce == "macro": - dim = 0 if preds.ndim == 2 else 2 - - true_pred, false_pred = target == preds, target != preds - pos_pred, neg_pred = preds == 1, preds == 0 - - tp = (true_pred * pos_pred).sum(dim=dim) - fp = (false_pred * pos_pred).sum(dim=dim) - - tn = (true_pred * neg_pred).sum(dim=dim) - fn = (false_pred * neg_pred).sum(dim=dim) - return tp.long(), fp.long(), tn.long(), fn.long() - - -def _stat_scores_update( - preds: Tensor, - target: Tensor, - reduce: Optional[str] = "micro", - mdmc_reduce: Optional[str] = None, - num_classes: Optional[int] = None, - top_k: Optional[int] = None, - threshold: float = 0.5, - multiclass: Optional[bool] = None, - ignore_index: Optional[int] = None, -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Updates and returns the the number of true positives, false positives, true negatives, false negatives. - Raises ValueError if: - - - The `ignore_index` is not valid - - When `ignore_index` is used with binary data - - When inputs are multi-dimensional multi-class, and the `mdmc_reduce` parameter is not set - - Args: - preds: Predicted tensor - target: Ground truth tensor - reduce: Defines the reduction that is applied - mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handeled - num_classes: Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. - top_k: Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs - threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities - multiclass: Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be - ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and - ``reduce='macro'``, the class statistics for the ignored class will all be returned - as ``-1``. - """ - - preds, target, _ = _input_format_classification( - preds, target, threshold=threshold, num_classes=num_classes, multiclass=multiclass, top_k=top_k - ) - - if ignore_index is not None and not 0 <= ignore_index < preds.shape[1]: - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {preds.shape[0]} classes") - - if ignore_index is not None and preds.shape[1] == 1: - raise ValueError("You can not use `ignore_index` with binary data.") - - if preds.ndim == 3: - if not mdmc_reduce: - raise ValueError( - "When your inputs are multi-dimensional multi-class, you have to set the `mdmc_reduce` parameter" - ) - if mdmc_reduce == "global": - preds = B.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) - target = B.transpose(target, 1, 2).reshape(-1, target.shape[1]) - - # Delete what is in ignore_index, if applicable (and classes don't matter): - if ignore_index is not None and reduce != "macro": - preds = _del_column(preds, ignore_index) - target = _del_column(target, ignore_index) - - tp, fp, tn, fn = _stat_scores(preds, target, reduce=reduce) - - # Take care of ignore_index - if ignore_index is not None and reduce == "macro": - tp[..., ignore_index] = -1 - fp[..., ignore_index] = -1 - tn[..., ignore_index] = -1 - fn[..., ignore_index] = -1 - - return tp, fp, tn, fn - - -def _stat_scores_compute(tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> Tensor: - """Computes the number of true positives, false positives, true negatives, false negatives. Concatenates the - input tensors along with the support into one output. - - Args: - tp: True positives - fp: False positives - tn: True negatives - fn: False negatives - - Example: - >>> preds = B.tensor([1, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='macro', num_classes=3) - >>> _stat_scores_compute(tp, fp, tn, fn) - tensor([[0, 1, 2, 1, 1], - [1, 1, 1, 1, 2], - [1, 0, 3, 0, 1]]) - >>> tp, fp, tn, fn = _stat_scores_update(preds, target, reduce='micro') - >>> _stat_scores_compute(tp, fp, tn, fn) - tensor([2, 2, 6, 2, 4]) - """ - stats = [ - tp.unsqueeze(-1), - fp.unsqueeze(-1), - tn.unsqueeze(-1), - fn.unsqueeze(-1), - tp.unsqueeze(-1) + fn.unsqueeze(-1), # support - ] - outputs: Tensor = B.cat(stats, -1) - outputs = B.where(outputs < 0, tensor(-1, device=outputs.device, dtype=outputs.dtype), outputs) - - return outputs - - -def _reduce_stat_scores( - numerator: Tensor, - denominator: Tensor, - weights: Optional[Tensor], - average: Optional[str], - mdmc_average: Optional[str], - zero_division: int = 0, -) -> Tensor: - """Reduces scores of type ``numerator/denominator`` or. - - ``weights * (numerator/denominator)``, if ``average='weighted'``. - - Args: - numerator: A tensor with numerator numbers. - denominator: A tensor with denominator numbers. If a denominator is - negative, the class will be ignored (if averaging), or its score - will be returned as ``nan`` (if ``average=None``). - If the denominator is zero, then ``zero_division`` score will be - used for those elements. - weights: A tensor of weights to be used if ``average='weighted'``. - average: The method to average the scores - mdmc_average: The method to average the scores if inputs were multi-dimensional multi-class (MDMC) - zero_division: The value to use for the score if denominator equals zero. - """ - numerator, denominator = numerator.float(), denominator.float() - zero_div_mask = denominator == 0 - ignore_mask = denominator < 0 - - if weights is None: - weights = B.ones_like(denominator) - else: - weights = weights.float() - - numerator = B.where(zero_div_mask, tensor(float(zero_division), device=numerator.device), numerator) - denominator = B.where(zero_div_mask | ignore_mask, tensor(1.0, device=denominator.device), denominator) - weights = B.where(ignore_mask, tensor(0.0, device=weights.device), weights) - - if average not in (AverageMethod.MICRO, AverageMethod.NONE, None): - weights = weights / weights.sum(dim=-1, keepdim=True) - - scores = weights * (numerator / denominator) - - # This is in case where sum(weights) = 0, which happens if we ignore the only present class with average='weighted' - scores = B.where(B.isnan(scores), tensor(float(zero_division), device=scores.device), scores) - - if mdmc_average == MDMCAverageMethod.SAMPLEWISE: - scores = scores.mean(dim=0) - ignore_mask = ignore_mask.sum(dim=0).bool() - - if average in (AverageMethod.NONE, None): - scores = B.where(ignore_mask, tensor(float("nan"), device=scores.device), scores) - else: - scores = scores.sum() - - return scores - - -def stat_scores( - preds: Tensor, - target: Tensor, - reduce: str = "micro", - mdmc_reduce: Optional[str] = None, - num_classes: Optional[int] = None, - top_k: Optional[int] = None, - threshold: float = 0.5, - multiclass: Optional[bool] = None, - ignore_index: Optional[int] = None, -) -> Tensor: - r"""Computes the number of true positives, false positives, true negatives, false negatives. - Related to `Type I and Type II errors`_ - and the `confusion matrix`_. - - The reduction method (how the statistics are aggregated) is controlled by the - ``reduce`` parameter, and additionally by the ``mdmc_reduce`` parameter in the - multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth values - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - - top_k: - Number of highest probability or logit score predictions considered to find the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - - reduce: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Counts the statistics by summing over all [sample, class] - combinations (globally). Each statistic is represented by a single integer. - - ``'macro'``: Counts the statistics for each class separately (over all samples). - Each statistic is represented by a ``(C,)`` tensor. Requires ``num_classes`` - to be set. - - ``'samples'``: Counts the statistics for each sample separately (over all classes). - Each statistic is represented by a ``(N, )`` 1d tensor. - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_reduce``. - - num_classes: - Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. - - ignore_index: - Specify a class (label) to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and - ``reduce='macro'``, the class statistics for the ignored class will all be returned - as ``-1``. - - mdmc_reduce: - Defines how the multi-dimensional multi-class inputs are handeled. Should be - one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional - multi-class (see :ref:`references/modules:input types` for the definition of input types). - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then the outputs are concatenated together. In each - sample the extra axes ``...`` are flattened to become the sub-sample axis, and - statistics for each sample are computed by treating the sub-sample axis as the - ``N`` axis for that sample. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs are - flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``reduce`` parameter applies as usual. - - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Return: - The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds - to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The - shape depends on the ``reduce`` and ``mdmc_reduce`` (in case of multi-dimensional - multi-class data) parameters: - - - If the data is not multi-dimensional multi-class, then - - - If ``reduce='micro'``, the shape will be ``(5, )`` - - If ``reduce='macro'``, the shape will be ``(C, 5)``, - where ``C`` stands for the number of classes - - If ``reduce='samples'``, the shape will be ``(N, 5)``, where ``N`` stands for - the number of samples - - - If the data is multi-dimensional multi-class and ``mdmc_reduce='global'``, then - - - If ``reduce='micro'``, the shape will be ``(5, )`` - - If ``reduce='macro'``, the shape will be ``(C, 5)`` - - If ``reduce='samples'``, the shape will be ``(N*X, 5)``, where ``X`` stands for - the product of sizes of all "extra" dimensions of the data (i.e. all dimensions - except for ``C`` and ``N``) - - - If the data is multi-dimensional multi-class and ``mdmc_reduce='samplewise'``, then - - - If ``reduce='micro'``, the shape will be ``(N, 5)`` - - If ``reduce='macro'``, the shape will be ``(N, C, 5)`` - - If ``reduce='samples'``, the shape will be ``(N, X, 5)`` - - Raises: - ValueError: - If ``reduce`` is none of ``"micro"``, ``"macro"`` or ``"samples"``. - ValueError: - If ``mdmc_reduce`` is none of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``reduce`` is set to ``"macro"`` and ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - ValueError: - If ``ignore_index`` is used with ``binary data``. - ValueError: - If inputs are ``multi-dimensional multi-class`` and ``mdmc_reduce`` is not provided. - - Example: - >>> from paddlemetrics.functional import stat_scores - >>> preds = B.tensor([1, 0, 2, 1]) - >>> target = B.tensor([1, 1, 2, 0]) - >>> stat_scores(preds, target, reduce='macro', num_classes=3) - tensor([[0, 1, 2, 1, 1], - [1, 1, 1, 1, 2], - [1, 0, 3, 0, 1]]) - >>> stat_scores(preds, target, reduce='micro') - tensor([2, 2, 6, 2, 4]) - - """ - if reduce not in ["micro", "macro", "samples"]: - raise ValueError(f"The `reduce` {reduce} is not valid.") - - if mdmc_reduce not in [None, "samplewise", "global"]: - raise ValueError(f"The `mdmc_reduce` {mdmc_reduce} is not valid.") - - if reduce == "macro" and (not num_classes or num_classes < 1): - raise ValueError("When you set `reduce` as 'macro', you have to provide the number of classes.") - - if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1): - raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") - - tp, fp, tn, fn = _stat_scores_update( - preds, - target, - reduce=reduce, - mdmc_reduce=mdmc_reduce, - top_k=top_k, - threshold=threshold, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - ) - return _stat_scores_compute(tp, fp, tn, fn) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py deleted file mode 100644 index 9fe64120c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddlemetrics.functional.image.gradients import image_gradients # noqa: F401 -from paddlemetrics.functional.image.psnr import psnr # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py deleted file mode 100644 index abe1b08d5..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/gradients.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - - -def _image_gradients_validate(img: Tensor) -> None: - """Validates whether img is a 4D torch Tensor.""" - - if not isinstance(img, Tensor): - raise TypeError(f"The `img` expects a value of type but got {type(img)}") - if img.ndim != 4: - raise RuntimeError(f"The `img` expects a 4D tensor but got {img.ndim}D tensor") - - -def _compute_image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]: - """Computes image gradients (dy/dx) for a given image.""" - - batch_size, channels, height, width = img.shape - - dy = img[..., 1:, :] - img[..., :-1, :] - dx = img[..., :, 1:] - img[..., :, :-1] - - shapey = [batch_size, channels, 1, width] - dy = B.cat([dy, B.zeros(shapey, device=img.device, dtype=img.dtype)], dim=2) - dy = dy.view(img.shape) - - shapex = [batch_size, channels, height, 1] - dx = B.cat([dx, B.zeros(shapex, device=img.device, dtype=img.dtype)], dim=3) - dx = dx.view(img.shape) - - return dy, dx - - -def image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]: - """Computes `Gradient Computation of Image`_ of a given image using finite difference. - - Args: - img: An ``(N, C, H, W)`` input tensor where C is the number of image channels - - Return: - Tuple of (dy, dx) with each gradient of shape ``[N, C, H, W]`` - - Raises: - TypeError: - If ``img`` is not of the type . - RuntimeError: - If ``img`` is not a 4D tensor. - - Example: - >>> from paddlemetrics.functional import image_gradients - >>> image = B.arange(0, 1*1*5*5, dtype=B.float32) - >>> image = B.reshape(image, (1, 1, 5, 5)) - >>> dy, dx = image_gradients(image) - >>> dy[0, 0, :, :] - tensor([[5., 5., 5., 5., 5.], - [5., 5., 5., 5., 5.], - [5., 5., 5., 5., 5.], - [5., 5., 5., 5., 5.], - [0., 0., 0., 0., 0.]]) - - .. note:: The implementation follows the 1-step finite difference method as followed - by the TF implementation. The values are organized such that the gradient of - [I(x+1, y)-[I(x, y)]] are at the (x, y) location - """ - _image_gradients_validate(img) - - return _compute_image_gradients(img) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py deleted file mode 100644 index 2ffd60461..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/psnr.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities import rank_zero_warn, reduce - - -def _psnr_compute( - sum_squared_error: Tensor, - n_obs: Tensor, - data_range: Tensor, - base: float = 10.0, - reduction: str = "elementwise_mean", -) -> Tensor: - """Computes peak signal-to-noise ratio. - - Args: - sum_squared_error: Sum of square of errors over all observations - n_obs: Number of predictions or observations - data_range: - the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given - when ``dim`` is not None. - base: a base of a logarithm to use (default: 10) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - Example: - >>> preds = B.tensor([[0.0, 1.0], [2.0, 3.0]]) - >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) - >>> data_range = target.max() - target.min() - >>> sum_squared_error, n_obs = _psnr_update(preds, target) - >>> _psnr_compute(sum_squared_error, n_obs, data_range) - tensor(2.5527) - """ - - psnr_base_e = 2 * B.log(data_range) - B.log(sum_squared_error / n_obs) - psnr_vals = psnr_base_e * (10 / B.log(tensor(base))) - return reduce(psnr_vals, reduction=reduction) - - -def _psnr_update( - preds: Tensor, - target: Tensor, - dim: Optional[Union[int, Tuple[int, ...]]] = None, -) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute peak signal-to-noise ratio. - - Args: - preds: Predicted tensor - target: Ground truth tensor - dim: - Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is - None meaning scores will be reduced across all dimensions. - """ - - if dim is None: - sum_squared_error = B.sum(B.pow(preds - target, 2)) - n_obs = tensor(target.numel(), device=target.device) - return sum_squared_error, n_obs - - diff = preds - target - sum_squared_error = B.sum(diff * diff, dim=dim) - - if isinstance(dim, int): - dim_list = [dim] - else: - dim_list = list(dim) - if not dim_list: - n_obs = tensor(target.numel(), device=target.device) - else: - n_obs = tensor(target.size(), device=target.device)[dim_list].prod() - n_obs = n_obs.expand_as(sum_squared_error) - - return sum_squared_error, n_obs - - -def psnr( - preds: Tensor, - target: Tensor, - data_range: Optional[float] = None, - base: float = 10.0, - reduction: str = "elementwise_mean", - dim: Optional[Union[int, Tuple[int, ...]]] = None, -) -> Tensor: - """Computes the peak signal-to-noise ratio. - - Args: - preds: estimated signal - target: groun truth signal - data_range: - the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given - when ``dim`` is not None. - base: a base of a logarithm to use (default: 10) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - dim: - Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is - None meaning scores will be reduced across all dimensions. - Return: - Tensor with PSNR score - - Raises: - ValueError: - If ``dim`` is not ``None`` and ``data_range`` is not provided. - - Example: - >>> from paddlemetrics.functional import psnr - >>> pred = B.tensor([[0.0, 1.0], [2.0, 3.0]]) - >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) - >>> psnr(pred, target) - tensor(2.5527) - - .. note:: - Half precision is only support on GPU for this metric - """ - if dim is None and reduction != "elementwise_mean": - rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.") - - if data_range is None: - if dim is not None: - # Maybe we could use `B.amax(target, dim=dim) - B.amin(target, dim=dim)` in PyTorch 1.7 to calculate - # `data_range` in the future. - raise ValueError("The `data_range` must be given when `dim` is not None.") - - data_range = target.max() - target.min() - else: - data_range = tensor(float(data_range)) - sum_squared_error, n_obs = _psnr_update(preds, target, dim=dim) - return _psnr_compute(sum_squared_error, n_obs, data_range, base=base, reduction=reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py deleted file mode 100644 index 52af9b793..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/image/ssim.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional, Sequence, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape -from paddlemetrics.utilities.distributed import reduce - - -def _gaussian(kernel_size: int, sigma: float, dtype: B.dtype, device: B.device) -> Tensor: - """Computes 1D gaussian kernel. - - Args: - kernel_size: size of the gaussian kernel - sigma: Standard deviation of the gaussian kernel - dtype: data type of the output tensor - device: device of the output tensor - - Example: - >>> _gaussian(3, 1, B.float, 'cpu') - tensor([[0.2741, 0.4519, 0.2741]]) - """ - dist = B.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=dtype, device=device) - gauss = B.exp(-B.pow(dist / sigma, 2) / 2) - return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size) - - -def _gaussian_kernel( - channel: int, kernel_size: Sequence[int], sigma: Sequence[float], dtype: B.dtype, device: B.device -) -> Tensor: - """Computes 2D gaussian kernel. - - Args: - channel: number of channels in the image - kernel_size: size of the gaussian kernel as a tuple (h, w) - sigma: Standard deviation of the gaussian kernel - dtype: data type of the output tensor - device: device of the output tensor - - Example: - >>> _gaussian_kernel(1, (5,5), (1,1), B.float, "cpu") - tensor([[[[0.0030, 0.0133, 0.0219, 0.0133, 0.0030], - [0.0133, 0.0596, 0.0983, 0.0596, 0.0133], - [0.0219, 0.0983, 0.1621, 0.0983, 0.0219], - [0.0133, 0.0596, 0.0983, 0.0596, 0.0133], - [0.0030, 0.0133, 0.0219, 0.0133, 0.0030]]]]) - """ - - gaussian_kernel_x = _gaussian(kernel_size[0], sigma[0], dtype, device) - gaussian_kernel_y = _gaussian(kernel_size[1], sigma[1], dtype, device) - kernel = B.matmul(gaussian_kernel_x.t(), gaussian_kernel_y) # (kernel_size, 1) * (1, kernel_size) - - return kernel.expand(channel, 1, kernel_size[0], kernel_size[1]) - - -def _ssim_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute Structural Similarity Index Measure. Checks for same shape - and type of the input tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - if preds.dtype != target.dtype: - raise TypeError( - "Expected `preds` and `target` to have the same data type." - f" Got preds: {preds.dtype} and target: {target.dtype}." - ) - _check_same_shape(preds, target) - if len(preds.shape) != 4: - raise ValueError( - "Expected `preds` and `target` to have BxCxHxW shape." - f" Got preds: {preds.shape} and target: {target.shape}." - ) - return preds, target - - -def _ssim_compute( - preds: Tensor, - target: Tensor, - kernel_size: Sequence[int] = (11, 11), - sigma: Sequence[float] = (1.5, 1.5), - reduction: str = "elementwise_mean", - data_range: Optional[float] = None, - k1: float = 0.01, - k2: float = 0.03, -) -> Tensor: - """Computes Structual Similarity Index Measure. - - Args: - preds: estimated image - target: ground truth image - kernel_size: size of the gaussian kernel (default: (11, 11)) - sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - data_range: Range of the image. If ``None``, it is determined from the image (max - min) - k1: Parameter of SSIM. Default: 0.01 - k2: Parameter of SSIM. Default: 0.03 - - Example: - >>> preds = B.rand([16, 1, 16, 16]) - >>> target = preds * 0.75 - >>> preds, target = _ssim_update(preds, target) - >>> _ssim_compute(preds, target) - tensor(0.9219) - """ - if len(kernel_size) != 2 or len(sigma) != 2: - raise ValueError( - "Expected `kernel_size` and `sigma` to have the length of two." - f" Got kernel_size: {len(kernel_size)} and sigma: {len(sigma)}." - ) - - if any(x % 2 == 0 or x <= 0 for x in kernel_size): - raise ValueError(f"Expected `kernel_size` to have odd positive number. Got {kernel_size}.") - - if any(y <= 0 for y in sigma): - raise ValueError(f"Expected `sigma` to have positive number. Got {sigma}.") - - if data_range is None: - data_range = max(preds.max() - preds.min(), target.max() - target.min()) - - c1 = pow(k1 * data_range, 2) - c2 = pow(k2 * data_range, 2) - device = preds.device - - channel = preds.size(1) - dtype = preds.dtype - kernel = _gaussian_kernel(channel, kernel_size, sigma, dtype, device) - pad_h = (kernel_size[0] - 1) // 2 - pad_w = (kernel_size[1] - 1) // 2 - - preds = B.pad(preds, (pad_h, pad_h, pad_w, pad_w), mode="reflect") - target = B.pad(target, (pad_h, pad_h, pad_w, pad_w), mode="reflect") - - input_list = B.cat((preds, target, preds * preds, target * target, preds * target)) # (5 * B, C, H, W) - outputs = B.conv2d(input_list, kernel, groups=channel) - output_list = outputs.split(preds.shape[0]) - - mu_pred_sq = output_list[0].pow(2) - mu_target_sq = output_list[1].pow(2) - mu_pred_target = output_list[0] * output_list[1] - - sigma_pred_sq = output_list[2] - mu_pred_sq - sigma_target_sq = output_list[3] - mu_target_sq - sigma_pred_target = output_list[4] - mu_pred_target - - upper = 2 * sigma_pred_target + c2 - lower = sigma_pred_sq + sigma_target_sq + c2 - - ssim_idx = ((2 * mu_pred_target + c1) * upper) / ((mu_pred_sq + mu_target_sq + c1) * lower) - ssim_idx = ssim_idx[..., pad_h:-pad_h, pad_w:-pad_w] - - return reduce(ssim_idx, reduction) - - -def ssim( - preds: Tensor, - target: Tensor, - kernel_size: Sequence[int] = (11, 11), - sigma: Sequence[float] = (1.5, 1.5), - reduction: str = "elementwise_mean", - data_range: Optional[float] = None, - k1: float = 0.01, - k2: float = 0.03, -) -> Tensor: - """Computes Structual Similarity Index Measure. - - Args: - preds: estimated image - target: ground truth image - kernel_size: size of the gaussian kernel (default: (11, 11)) - sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - data_range: Range of the image. If ``None``, it is determined from the image (max - min) - k1: Parameter of SSIM. Default: 0.01 - k2: Parameter of SSIM. Default: 0.03 - - Return: - Tensor with SSIM score - - Raises: - TypeError: - If ``preds`` and ``target`` don't have the same data type. - ValueError: - If ``preds`` and ``target`` don't have ``BxCxHxW shape``. - ValueError: - If the length of ``kernel_size`` or ``sigma`` is not ``2``. - ValueError: - If one of the elements of ``kernel_size`` is not an ``odd positive number``. - ValueError: - If one of the elements of ``sigma`` is not a ``positive number``. - - Example: - >>> from paddlemetrics.functional import ssim - >>> preds = B.rand([16, 1, 16, 16]) - >>> target = preds * 0.75 - >>> ssim(preds, target) - tensor(0.9219) - """ - preds, target = _ssim_update(preds, target) - return _ssim_compute(preds, target, kernel_size, sigma, reduction, data_range, k1, k2) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py deleted file mode 100644 index 1d28d0c4b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.functional.pairwise.cosine import pairwise_cosine_similarity # noqa: F401 -from paddlemetrics.functional.pairwise.euclidean import pairwise_euclidean_distance # noqa: F401 -from paddlemetrics.functional.pairwise.linear import pairwise_linear_similarity # noqa: F401 -from paddlemetrics.functional.pairwise.manhatten import pairwise_manhatten_distance # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py deleted file mode 100644 index cdd24e155..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/cosine.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix - - -def _pairwise_cosine_similarity_update( - x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - """Calculates the pairwise cosine similarity matrix. - - Args: - x: tensor of shape ``[N,d]`` - y: tensor of shape ``[M,d]`` - zero_diagonal: determines if the diagonal of the distance matrix should be set to zero - """ - x, y, zero_diagonal = _check_input(x, y, zero_diagonal) - - norm = B.norm(x, p=2, dim=1) - x /= norm.unsqueeze(1) - norm = B.norm(y, p=2, dim=1) - y /= norm.unsqueeze(1) - - distance = x @ y.T - if zero_diagonal: - distance.fill_diagonal_(0) - return distance - - -def pairwise_cosine_similarity( - x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - r""" - Calculates pairwise cosine similarity: - - .. math:: - s_{cos}(x,y) = \frac{}{||x|| \cdot ||y||} - = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}} - - If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. - If only `x` is passed in, the calculation will be performed between the rows of `x`. - - Args: - x: Tensor with shape ``[N, d]`` - y: Tensor with shape ``[M, d]``, optional - reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` - (applied along column dimension) or `'none'`, `None` for no reduction - zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given - this defaults to `True` else if `y` is also given it defaults to `False` - - Returns: - A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix - - Example: - >>> import torchapi as B - >>> from paddlemetrics.functional import pairwise_cosine_similarity - >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) - >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) - >>> pairwise_cosine_similarity(x, y) - tensor([[0.5547, 0.8682], - [0.5145, 0.8437], - [0.5300, 0.8533]]) - >>> pairwise_cosine_similarity(x) - tensor([[0.0000, 0.9989, 0.9996], - [0.9989, 0.0000, 0.9998], - [0.9996, 0.9998, 0.0000]]) - - """ - distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal) - return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py deleted file mode 100644 index fd31cd7f7..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/euclidean.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix - - -def _pairwise_euclidean_distance_update( - x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - """Calculates the pairwise euclidean distance matrix. - - Args: - x: tensor of shape ``[N,d]`` - y: tensor of shape ``[M,d]`` - zero_diagonal: determines if the diagonal of the distance matrix should be set to zero - """ - x, y, zero_diagonal = _check_input(x, y, zero_diagonal) - x_norm = x.norm(dim=1, keepdim=True) - y_norm = y.norm(dim=1).T - distance = x_norm * x_norm + y_norm * y_norm - 2 * x.mm(y.T) - if zero_diagonal: - distance.fill_diagonal_(0) - return distance.sqrt() - - -def pairwise_euclidean_distance( - x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - r""" - Calculates pairwise euclidean distances: - - .. math:: - d_{euc}(x,y) = ||x - y||_2 = \sqrt{\sum_{d=1}^D (x_d - y_d)^2} - - If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. - If only `x` is passed in, the calculation will be performed between the rows of `x`. - - Args: - x: Tensor with shape ``[N, d]`` - y: Tensor with shape ``[M, d]``, optional - reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` - (applied along column dimension) or `'none'`, `None` for no reduction - zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given - this defaults to `True` else if `y` is also given it defaults to `False` - - Returns: - A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix - - Example: - >>> import torchapi as B - >>> from paddlemetrics.functional import pairwise_euclidean_distance - >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) - >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) - >>> pairwise_euclidean_distance(x, y) - tensor([[3.1623, 2.0000], - [5.3852, 4.1231], - [8.9443, 7.6158]]) - >>> pairwise_euclidean_distance(x) - tensor([[0.0000, 2.2361, 5.8310], - [2.2361, 0.0000, 3.6056], - [5.8310, 3.6056, 0.0000]]) - - """ - distance = _pairwise_euclidean_distance_update(x, y, zero_diagonal) - return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py deleted file mode 100644 index 2d38916af..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/helpers.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional, Tuple - -from paddleext.torchapi import Tensor - - -def _check_input( - x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None -) -> Tuple[Tensor, Tensor, bool]: - """Check that input has the right dimensionality and sets the zero_diagonal argument if user has not provided - import module. - - Args: - x: tensor of shape ``[N,d]`` - y: if provided, a tensor of shape ``[M,d]`` - zero_diagonal: determines if the diagonal of the distance matrix should be set to zero - """ - if x.ndim != 2: - raise ValueError(f"Expected argument `x` to be a 2D tensor of shape `[N, d]` but got {x.shape}") - - if y is not None: - if y.ndim != 2 or y.shape[1] != x.shape[1]: - raise ValueError( - "Expected argument `y` to be a 2D tensor of shape `[M, d]` where" - " `d` should be same as the last dimension of `x`" - ) - zero_diagonal = False if zero_diagonal is None else zero_diagonal - else: - y = x.clone() - zero_diagonal = True if zero_diagonal is None else zero_diagonal - return x, y, zero_diagonal - - -def _reduce_distance_matrix(distmat: Tensor, reduction: Optional[str] = None) -> Tensor: - """Final reduction of distance matrix. - - Args: - distance: a ``[N,M]`` matrix - reduction: string determining how to reduce along last dimension - """ - if reduction == "mean": - return distmat.mean(dim=-1) - if reduction == "sum": - return distmat.sum(dim=-1) - if reduction is None or reduction == "none": - return distmat - raise ValueError(f"Expected reduction to be one of `['mean', 'sum', None]` but got {reduction}") diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py deleted file mode 100644 index 08e793019..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/linear.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix - - -def _pairwise_linear_similarity_update( - x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - """Calculates the pairwise linear similarity matrix. - - Args: - x: tensor of shape ``[N,d]`` - y: tensor of shape ``[M,d]`` - zero_diagonal: determines if the diagonal of the distance matrix should be set to zero - """ - x, y, zero_diagonal = _check_input(x, y, zero_diagonal) - - distance = x @ y.T - if zero_diagonal: - distance.fill_diagonal_(0) - return distance - - -def pairwise_linear_similarity( - x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - r""" - Calculates pairwise linear similarity: - - .. math:: - s_{lin}(x,y) = = \sum_{d=1}^D x_d \cdot y_d - - If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. - If only `x` is passed in, the calculation will be performed between the rows of `x`. - - Args: - x: Tensor with shape ``[N, d]`` - y: Tensor with shape ``[M, d]``, optional - reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` - (applied along column dimension) or `'none'`, `None` for no reduction - zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given - this defaults to `True` else if `y` is also given it defaults to `False` - - Returns: - A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix - - Example: - >>> import torchapi as B - >>> from paddlemetrics.functional import pairwise_linear_similarity - >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) - >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) - >>> pairwise_linear_similarity(x, y) - tensor([[ 2., 7.], - [ 3., 11.], - [ 5., 18.]]) - >>> pairwise_linear_similarity(x) - tensor([[ 0., 21., 34.], - [21., 0., 55.], - [34., 55., 0.]]) - - """ - distance = _pairwise_linear_similarity_update(x, y, zero_diagonal) - return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py deleted file mode 100644 index d0079bd62..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/pairwise/manhatten.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix - - -def _pairwise_manhatten_distance_update( - x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - """Calculates the pairwise manhatten similarity matrix. - - Args: - x: tensor of shape ``[N,d]`` - y: if provided, a tensor of shape ``[M,d]`` - zero_diagonal: determines if the diagonal of the distance matrix should be set to zero - """ - x, y, zero_diagonal = _check_input(x, y, zero_diagonal) - - distance = (x.unsqueeze(1) - y.unsqueeze(0).repeat(x.shape[0], 1, 1)).abs().sum(dim=-1) - if zero_diagonal: - distance.fill_diagonal_(0) - return distance - - -def pairwise_manhatten_distance( - x: Tensor, y: Optional[Tensor] = None, reduction: Optional[str] = None, zero_diagonal: Optional[bool] = None -) -> Tensor: - r""" - Calculates pairwise manhatten distance: - - .. math:: - d_{man}(x,y) = ||x-y||_1 = \sum_{d=1}^D |x_d - y_d| - - If both `x` and `y` are passed in, the calculation will be performed pairwise between the rows of `x` and `y`. - If only `x` is passed in, the calculation will be performed between the rows of `x`. - - Args: - x: Tensor with shape ``[N, d]`` - y: Tensor with shape ``[M, d]``, optional - reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` - (applied along column dimension) or `'none'`, `None` for no reduction - zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given - this defaults to `True` else if `y` is also given it defaults to `False` - - Returns: - A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix - - Example: - >>> import torchapi as B - >>> from paddlemetrics.functional import pairwise_manhatten_distance - >>> x = B.tensor([[2, 3], [3, 5], [5, 8]], dtype=B.float32) - >>> y = B.tensor([[1, 0], [2, 1]], dtype=B.float32) - >>> pairwise_manhatten_distance(x, y) - tensor([[ 4., 2.], - [ 7., 5.], - [12., 10.]]) - >>> pairwise_manhatten_distance(x) - tensor([[0., 3., 8.], - [3., 0., 5.], - [8., 5., 0.]]) - - """ - distance = _pairwise_manhatten_distance_update(x, y, zero_diagonal) - return _reduce_distance_matrix(distance, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py deleted file mode 100644 index 7ddc60404..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.functional.image.psnr import psnr # noqa: F401 -from paddlemetrics.functional.image.ssim import ssim # noqa: F401 -from paddlemetrics.functional.regression.cosine_similarity import cosine_similarity # noqa: F401 -from paddlemetrics.functional.regression.explained_variance import explained_variance # noqa: F401 -from paddlemetrics.functional.regression.mean_absolute_error import mean_absolute_error # noqa: F401 -from paddlemetrics.functional.regression.mean_absolute_percentage_error import ( # noqa: F401 - mean_absolute_percentage_error, -) -from paddlemetrics.functional.regression.mean_squared_error import mean_squared_error # noqa: F401 -from paddlemetrics.functional.regression.mean_squared_log_error import mean_squared_log_error # noqa: F401 -from paddlemetrics.functional.regression.pearson import pearson_corrcoef # noqa: F401 -from paddlemetrics.functional.regression.r2 import r2_score # noqa: F401 -from paddlemetrics.functional.regression.spearman import spearman_corrcoef # noqa: F401 -from paddlemetrics.functional.regression.tweedie_deviance import tweedie_deviance_score # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py deleted file mode 100644 index ea0f77a3b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/cosine_similarity.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _cosine_similarity_update( - preds: Tensor, - target: Tensor, -) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute Cosine Similarity. Checks for same shape of input tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - _check_same_shape(preds, target) - preds = preds.float() - target = target.float() - - return preds, target - - -def _cosine_similarity_compute(preds: Tensor, target: Tensor, reduction: str = "sum") -> Tensor: - """Computes Cosine Similarity. - - Args: - preds: Predicted tensor - target: Ground truth tensor - reduction: - The method of reducing along the batch dimension using sum, mean or taking the individual scores - - Example: - >>> target = B.tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) - >>> preds = B.tensor([[1, 2, 3, 4], [-1, -2, -3, -4]]) - >>> preds, target = _cosine_similarity_update(preds, target) - >>> _cosine_similarity_compute(preds, target, 'none') - tensor([ 1.0000, -1.0000]) - """ - - dot_product = (preds * target).sum(dim=-1) - preds_norm = preds.norm(dim=-1) - target_norm = target.norm(dim=-1) - similarity = dot_product / (preds_norm * target_norm) - reduction_mapping = { - "sum": B.sum, - "mean": B.mean, - "none": lambda x: x, - None: lambda x: x, - } - return reduction_mapping[reduction](similarity) - - -def cosine_similarity(preds: Tensor, target: Tensor, reduction: str = "sum") -> Tensor: - r""" - Computes the `Cosine Similarity`_ - between targets and predictions: - - .. math:: - cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} = - \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}} - - where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions. - - Args: - preds: Predicted tensor with shape ``(N,d)`` - target: Ground truth tensor with shape ``(N,d)`` - reduction: - The method of reducing along the batch dimension using sum, mean or taking the individual scores - - Example: - >>> from paddlemetrics.functional.regression import cosine_similarity - >>> target = B.tensor([[1, 2, 3, 4], - ... [1, 2, 3, 4]]) - >>> preds = B.tensor([[1, 2, 3, 4], - ... [-1, -2, -3, -4]]) - >>> cosine_similarity(preds, target, 'none') - tensor([ 1.0000, -1.0000]) - - """ - preds, target = _cosine_similarity_update(preds, target) - return _cosine_similarity_compute(preds, target, reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py deleted file mode 100644 index 95ef6acf4..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/explained_variance.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Sequence, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _explained_variance_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, Tensor, Tensor, Tensor]: - """Updates and returns variables required to compute Explained Variance. Checks for same shape of input - tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - _check_same_shape(preds, target) - - n_obs = preds.size(0) - sum_error = B.sum(target - preds, dim=0) - diff = target - preds - sum_squared_error = B.sum(diff * diff, dim=0) - - sum_target = B.sum(target, dim=0) - sum_squared_target = B.sum(target * target, dim=0) - - return n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target - - -def _explained_variance_compute( - n_obs: Tensor, - sum_error: Tensor, - sum_squared_error: Tensor, - sum_target: Tensor, - sum_squared_target: Tensor, - multioutput: str = "uniform_average", -) -> Tensor: - """Computes Explained Variance. - - Args: - n_obs: Number of predictions or observations - sum_error: Sum of errors over all observations - sum_squared_error: Sum of square of errors over all observations - sum_target: Sum of target values - sum_squared_target: Sum of squares of target values - multioutput: Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is `'uniform_average'`.): - - * `'raw_values'` returns full set of scores - * `'uniform_average'` scores are uniformly averaged - * `'variance_weighted'` scores are weighted by their individual variances - - Example: - >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> n_obs, sum_error, ss_error, sum_target, ss_target = _explained_variance_update(preds, target) - >>> _explained_variance_compute(n_obs, sum_error, ss_error, sum_target, ss_target, multioutput='raw_values') - tensor([0.9677, 1.0000]) - """ - - diff_avg = sum_error / n_obs - numerator = sum_squared_error / n_obs - (diff_avg * diff_avg) - - target_avg = sum_target / n_obs - denominator = sum_squared_target / n_obs - (target_avg * target_avg) - - # Take care of division by zero - nonzero_numerator = numerator != 0 - nonzero_denominator = denominator != 0 - valid_score = nonzero_numerator & nonzero_denominator - output_scores = B.ones_like(diff_avg) - output_scores[valid_score] = 1.0 - (numerator[valid_score] / denominator[valid_score]) - output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 - - # Decide what to do in multioutput case - # Todo: allow user to pass in tensor with weights - if multioutput == "raw_values": - return output_scores - if multioutput == "uniform_average": - return B.mean(output_scores) - if multioutput == "variance_weighted": - denom_sum = B.sum(denominator) - return B.sum(denominator / denom_sum * output_scores) - - -def explained_variance( - preds: Tensor, - target: Tensor, - multioutput: str = "uniform_average", -) -> Union[Tensor, Sequence[Tensor]]: - """Computes explained variance. - - Args: - preds: estimated labels - target: ground truth labels - multioutput: Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is `'uniform_average'`.): - - * `'raw_values'` returns full set of scores - * `'uniform_average'` scores are uniformly averaged - * `'variance_weighted'` scores are weighted by their individual variances - - Example: - >>> from paddlemetrics.functional import explained_variance - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> explained_variance(preds, target) - tensor(0.9572) - - >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> explained_variance(preds, target, multioutput='raw_values') - tensor([0.9677, 1.0000]) - """ - n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target) - return _explained_variance_compute( - n_obs, - sum_error, - sum_squared_error, - sum_target, - sum_squared_target, - multioutput, - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py deleted file mode 100644 index 1ddb41533..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_error.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: - """Updates and returns variables required to compute Mean Absolute Error. Checks for same shape of input - tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - _check_same_shape(preds, target) - sum_abs_error = B.sum(B.abs(preds - target)) - n_obs = target.numel() - return sum_abs_error, n_obs - - -def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor: - """Computes Mean Absolute Error. - - Args: - sum_abs_error: Sum of absolute value of errors over all observations - n_obs: Number of predictions or observations - - Example: - >>> preds = B.tensor([0., 1, 2, 3]) - >>> target = B.tensor([0., 1, 2, 2]) - >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) - >>> _mean_absolute_error_compute(sum_abs_error, n_obs) - tensor(0.2500) - """ - - return sum_abs_error / n_obs - - -def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor: - """Computes mean absolute error. - - Args: - preds: estimated labels - target: ground truth labels - - Return: - Tensor with MAE - - Example: - >>> from paddlemetrics.functional import mean_absolute_error - >>> x = B.tensor([0., 1, 2, 3]) - >>> y = B.tensor([0., 1, 2, 2]) - >>> mean_absolute_error(x, y) - tensor(0.2500) - """ - sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) - return _mean_absolute_error_compute(sum_abs_error, n_obs) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py deleted file mode 100644 index 862617c01..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_absolute_percentage_error.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _mean_absolute_percentage_error_update( - preds: Tensor, - target: Tensor, - epsilon: float = 1.17e-06, -) -> Tuple[Tensor, int]: - """Updates and returns variables required to compute Mean Percentage Error. Checks for same shape of input - tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - epsilon: Specifies the lower bound for target values. Any target value below epsilon - is set to epsilon (avoids ZeroDivisionError). default: 1.17e-06 - """ - - _check_same_shape(preds, target) - - abs_diff = B.abs(preds - target) - abs_per_error = abs_diff / B.clamp(B.abs(target), min=epsilon) - - sum_abs_per_error = B.sum(abs_per_error) - - num_obs = target.numel() - - return sum_abs_per_error, num_obs - - -def _mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: int) -> Tensor: - """Computes Mean Absolute Percentage Error. - - Args: - sum_abs_per_error: Sum of absolute value of percentage errors over all observations - (percentage error = (target - prediction) / target) - num_obs: Number of predictions or observations - - Example: - >>> target = B.tensor([1, 10, 1e6]) - >>> preds = B.tensor([0.9, 15, 1.2e6]) - >>> sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) - >>> _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) - tensor(0.2667) - """ - - return sum_abs_per_error / num_obs - - -def mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: - """Computes mean absolute percentage error. - - Args: - preds: estimated labels - target: ground truth labels - - Return: - Tensor with MAPE - - Note: - The epsilon value is taken from `scikit-learn's implementation of MAPE`_. - - Example: - >>> from paddlemetrics.functional import mean_absolute_percentage_error - >>> target = B.tensor([1, 10, 1e6]) - >>> preds = B.tensor([0.9, 15, 1.2e6]) - >>> mean_absolute_percentage_error(preds, target) - tensor(0.2667) - """ - sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) - mean_ape = _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) - - return mean_ape diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py deleted file mode 100644 index 58af5d21b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_error.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _mean_squared_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: - """Updates and returns variables required to compute Mean Squared Error. Checks for same shape of input - tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - _check_same_shape(preds, target) - diff = preds - target - sum_squared_error = B.sum(diff * diff) - n_obs = target.numel() - return sum_squared_error, n_obs - - -def _mean_squared_error_compute(sum_squared_error: Tensor, n_obs: int, squared: bool = True) -> Tensor: - """Computes Mean Squared Error. - - Args: - sum_squared_error: Sum of square of errors over all observations - n_obs: Number of predictions or observations - squared: Returns RMSE value if set to False. default: True - - Example: - >>> preds = B.tensor([0., 1, 2, 3]) - >>> target = B.tensor([0., 1, 2, 2]) - >>> sum_squared_error, n_obs = _mean_squared_error_update(preds, target) - >>> _mean_squared_error_compute(sum_squared_error, n_obs) - tensor(0.2500) - """ - return sum_squared_error / n_obs if squared else B.sqrt(sum_squared_error / n_obs) - - -def mean_squared_error(preds: Tensor, target: Tensor, squared: bool = True) -> Tensor: - """Computes mean squared error. - - Args: - preds: estimated labels - target: ground truth labels - squared: returns RMSE value if set to False - - Return: - Tensor with MSE - - Example: - >>> from paddlemetrics.functional import mean_squared_error - >>> x = B.tensor([0., 1, 2, 3]) - >>> y = B.tensor([0., 1, 2, 2]) - >>> mean_squared_error(x, y) - tensor(0.2500) - """ - sum_squared_error, n_obs = _mean_squared_error_update(preds, target) - return _mean_squared_error_compute(sum_squared_error, n_obs, squared=squared) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py deleted file mode 100644 index 7270ffc00..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/mean_squared_log_error.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: - """Returns variables required to compute Mean Squared Log Error. Checks for same shape of tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - _check_same_shape(preds, target) - sum_squared_log_error = B.sum(B.pow(B.log1p(preds) - B.log1p(target), 2)) - n_obs = target.numel() - return sum_squared_log_error, n_obs - - -def _mean_squared_log_error_compute(sum_squared_log_error: Tensor, n_obs: int) -> Tensor: - """Computes Mean Squared Log Error. - - Args: - sum_squared_log_error: Sum of square of log errors over all observations - (log error = log(target) - log(prediction)) - n_obs: Number of predictions or observations - - Example: - >>> preds = B.tensor([0., 1, 2, 3]) - >>> target = B.tensor([0., 1, 2, 2]) - >>> sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) - >>> _mean_squared_log_error_compute(sum_squared_log_error, n_obs) - tensor(0.0207) - """ - - return sum_squared_log_error / n_obs - - -def mean_squared_log_error(preds: Tensor, target: Tensor) -> Tensor: - """Computes mean squared log error. - - Args: - preds: estimated labels - target: ground truth labels - - Return: - Tensor with RMSLE - - Example: - >>> from paddlemetrics.functional import mean_squared_log_error - >>> x = B.tensor([0., 1, 2, 3]) - >>> y = B.tensor([0., 1, 2, 2]) - >>> mean_squared_log_error(x, y) - tensor(0.0207) - - .. note:: - Half precision is only support on GPU for this metric - """ - sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) - return _mean_squared_log_error_compute(sum_squared_log_error, n_obs) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py deleted file mode 100644 index e1f7dd82f..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/pearson.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _pearson_corrcoef_update( - preds: Tensor, - target: Tensor, - mean_x: Tensor, - mean_y: Tensor, - var_x: Tensor, - var_y: Tensor, - corr_xy: Tensor, - n_prior: Tensor, -) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: - """Updates and returns variables required to compute Pearson Correlation Coefficient. Checks for same shape of - input tensors. - - Args: - mean_x: current mean estimate of x tensor - mean_y: current mean estimate of y tensor - var_x: current variance estimate of x tensor - var_y: current variance estimate of y tensor - corr_xy: current covariance estimate between x and y tensor - n_prior: current number of observed observations - """ - # Data checking - _check_same_shape(preds, target) - preds = preds.squeeze() - target = target.squeeze() - if preds.ndim > 1 or target.ndim > 1: - raise ValueError("Expected both predictions and target to be 1 dimensional tensors.") - - n_obs = preds.numel() - mx_new = (n_prior * mean_x + preds.mean() * n_obs) / (n_prior + n_obs) - my_new = (n_prior * mean_y + target.mean() * n_obs) / (n_prior + n_obs) - n_prior += n_obs - var_x += ((preds - mx_new) * (preds - mean_x)).sum() - var_y += ((target - my_new) * (target - mean_y)).sum() - corr_xy += ((preds - mx_new) * (target - mean_y)).sum() - mean_x = mx_new - mean_y = my_new - - return mean_x, mean_y, var_x, var_y, corr_xy, n_prior - - -def _pearson_corrcoef_compute( - var_x: Tensor, - var_y: Tensor, - corr_xy: Tensor, - nb: Tensor, -) -> Tensor: - """Computes the final pearson correlation based on accumulated statistics. - - Args: - var_x: variance estimate of x tensor - var_y: variance estimate of y tensor - corr_xy: covariance estimate between x and y tensor - nb: number of observations - """ - var_x /= nb - 1 - var_y /= nb - 1 - corr_xy /= nb - 1 - corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() - return B.clamp(corrcoef, -1.0, 1.0) - - -def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor: - """Computes pearson correlation coefficient. - - Args: - preds: estimated scores - target: ground truth scores - - Example: - >>> from paddlemetrics.functional import pearson_corrcoef - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> pearson_corrcoef(preds, target) - tensor(0.9849) - """ - _temp = B.zeros(1, dtype=preds.dtype, device=preds.device) - mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone() - var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone() - _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb) - return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py deleted file mode 100644 index a83219122..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/r2.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.checks import _check_same_shape - - -def _r2_score_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Updates and returns variables required to compute R2 score. Checks for same shape and 1D/2D input tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - _check_same_shape(preds, target) - if preds.ndim > 2: - raise ValueError( - "Expected both prediction and target to be 1D or 2D tensors," - f" but received tensors with dimension {preds.shape}" - ) - - sum_obs = B.sum(target, dim=0) - sum_squared_obs = B.sum(target * target, dim=0) - residual = target - preds - rss = B.sum(residual * residual, dim=0) - n_obs = target.size(0) - - return sum_squared_obs, sum_obs, rss, n_obs - - -def _r2_score_compute( - sum_squared_obs: Tensor, - sum_obs: Tensor, - rss: Tensor, - n_obs: Tensor, - adjusted: int = 0, - multioutput: str = "uniform_average", -) -> Tensor: - """Computes R2 score. - - Args: - sum_squared_obs: Sum of square of all observations - sum_obs: Sum of all observations - rss: Residual sum of squares - n_obs: Number of predictions or observations - adjusted: number of independent regressors for calculating adjusted r2 score. - Default 0 (standard r2 score). - multioutput: Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is `'uniform_average'`.): - - * `'raw_values'` returns full set of scores - * `'uniform_average'` scores are uniformly averaged - * `'variance_weighted'` scores are weighted by their individual variances - - Example: - >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) - >>> _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, multioutput="raw_values") - tensor([0.9654, 0.9082]) - """ - if n_obs < 2: - raise ValueError("Needs at least two samples to calculate r2 score.") - - mean_obs = sum_obs / n_obs - tss = sum_squared_obs - sum_obs * mean_obs - raw_scores = 1 - (rss / tss) - - if multioutput == "raw_values": - r2 = raw_scores - elif multioutput == "uniform_average": - r2 = B.mean(raw_scores) - elif multioutput == "variance_weighted": - tss_sum = B.sum(tss) - r2 = B.sum(tss / tss_sum * raw_scores) - else: - raise ValueError( - "Argument `multioutput` must be either `raw_values`," - f" `uniform_average` or `variance_weighted`. Received {multioutput}." - ) - - if adjusted < 0 or not isinstance(adjusted, int): - raise ValueError("`adjusted` parameter should be an integer larger or" " equal to 0.") - - if adjusted != 0: - if adjusted > n_obs - 1: - rank_zero_warn( - "More independent regressions than data points in" - " adjusted r2 score. Falls back to standard r2 score.", - UserWarning, - ) - elif adjusted == n_obs - 1: - rank_zero_warn("Division by zero in adjusted r2 score. Falls back to" " standard r2 score.", UserWarning) - else: - r2 = 1 - (1 - r2) * (n_obs - 1) / (n_obs - adjusted - 1) - return r2 - - -def r2_score( - preds: Tensor, - target: Tensor, - adjusted: int = 0, - multioutput: str = "uniform_average", -) -> Tensor: - r""" - Computes r2 score also known as `R2 Score_Coefficient Determination`_: - - .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} - - where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and - :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate - adjusted r2 score given by - - .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} - - where the parameter :math:`k` (the number of independent regressors) should - be provided as the ``adjusted`` argument. - - Args: - preds: estimated labels - target: ground truth labels - adjusted: number of independent regressors for calculating adjusted r2 score. - Default 0 (standard r2 score). - multioutput: Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is ``'uniform_average'``.): - - * ``'raw_values'`` returns full set of scores - * ``'uniform_average'`` scores are uniformly averaged - * ``'variance_weighted'`` scores are weighted by their individual variances - - Raises: - ValueError: - If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors. - ValueError: - If ``len(preds)`` is less than ``2`` - since at least ``2`` sampels are needed to calculate r2 score. - ValueError: - If ``multioutput`` is not one of ``raw_values``, - ``uniform_average`` or ``variance_weighted``. - ValueError: - If ``adjusted`` is not an ``integer`` greater than ``0``. - - Example: - >>> from paddlemetrics.functional import r2_score - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> r2_score(preds, target) - tensor(0.9486) - - >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> r2_score(preds, target, multioutput='raw_values') - tensor([0.9654, 0.9082]) - - """ - sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) - return _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, adjusted, multioutput) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py deleted file mode 100644 index 62f7a9d4a..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/spearman.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _find_repeats(data: Tensor) -> Tensor: - """find and return values which have repeats i.e. the same value are more than once in the tensor.""" - temp = data.detach().clone() - temp = temp.sort()[0] - - change = B.cat([B.tensor([True], device=temp.device), temp[1:] != temp[:-1]]) - unique = temp[change] - change_idx = B.cat([B.nonzero(change), B.tensor([[temp.numel()]], device=temp.device)]).flatten() - freq = change_idx[1:] - change_idx[:-1] - atleast2 = freq > 1 - return unique[atleast2] - - -def _rank_data(data: Tensor) -> Tensor: - """Calculate the rank for each element of a tensor. The rank refers to the indices of an element in the - corresponding sorted tensor (starting from 1). Duplicates of the same value will be assigned the mean of their - rank. - - Adopted from: `Rank of element tensor`_ - """ - n = data.numel() - rank = B.empty_like(data) - idx = data.argsort() - rank[idx[:n]] = B.arange(1, n + 1, dtype=data.dtype, device=data.device) - - repeats = _find_repeats(data) - for r in repeats: - condition = data == r - rank[condition] = rank[condition].mean() - return rank - - -def _spearman_corrcoef_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute Spearman Correlation Coefficient. Checks for same shape - and type of input tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - """ - - if preds.dtype != target.dtype: - raise TypeError( - "Expected `preds` and `target` to have the same data type." - f" Got preds: {preds.dtype} and target: {target.dtype}." - ) - _check_same_shape(preds, target) - preds = preds.squeeze() - target = target.squeeze() - if preds.ndim > 1 or target.ndim > 1: - raise ValueError("Expected both predictions and target to be 1 dimensional tensors.") - return preds, target - - -def _spearman_corrcoef_compute(preds: Tensor, target: Tensor, eps: float = 1e-6) -> Tensor: - """Computes Spearman Correlation Coefficient. - - Args: - preds: Predicted tensor - target: Ground truth tensor - eps: Avoids ZeroDivisionError. default: 1e-6 - - Example: - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> preds, target = _spearman_corrcoef_update(preds, target) - >>> _spearman_corrcoef_compute(preds, target) - tensor(1.0000) - """ - - preds = _rank_data(preds) - target = _rank_data(target) - - preds_diff = preds - preds.mean() - target_diff = target - target.mean() - - cov = (preds_diff * target_diff).mean() - preds_std = B.sqrt((preds_diff * preds_diff).mean()) - target_std = B.sqrt((target_diff * target_diff).mean()) - - corrcoef = cov / (preds_std * target_std + eps) - return B.clamp(corrcoef, -1.0, 1.0) - - -def spearman_corrcoef(preds: Tensor, target: Tensor) -> Tensor: - r""" - Computes `spearmans rank correlation coefficient`_: - - .. math: - r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}} - - where :math:`rg_x` and :math:`rg_y` are the rank associated to the variables x and y. Spearmans correlations - coefficient corresponds to the standard pearsons correlation coefficient calculated on the rank variables. - - Args: - preds: estimated scores - target: ground truth scores - - Example: - >>> from paddlemetrics.functional import spearman_corrcoef - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> spearman_corrcoef(preds, target) - tensor(1.0000) - - """ - preds, target = _spearman_corrcoef_update(preds, target) - return _spearman_corrcoef_compute(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py deleted file mode 100644 index 89eadf9e6..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _symmetric_mean_absolute_percentage_error_update( - preds: Tensor, - target: Tensor, - epsilon: float = 1.17e-06, -) -> Tuple[Tensor, int]: - """Updates and returns variables required to compute Symmetric Mean Absolute Percentage Error. Checks for same - shape of input tensors. - - Args: - preds: Predicted tensor - target: Ground truth tensor - epsilon: Avoids ZeroDivisionError. default: 1.17e-06 - """ - - _check_same_shape(preds, target) - - abs_diff = B.abs(preds - target) - abs_per_error = abs_diff / B.clamp(B.abs(target) + B.abs(preds), min=epsilon) - - sum_abs_per_error = 2 * B.sum(abs_per_error) - - num_obs = target.numel() - - return sum_abs_per_error, num_obs - - -def _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: int) -> Tensor: - """Computes Symmetric Mean Absolute Percentage Error. - - Args: - sum_abs_per_error: Sum of values of symmetric absolute percentage errors over all observations - (symmetric absolute percentage error = 2 * |target - prediction| / (target + prediction)) - num_obs: Number of predictions or observations - - Example: - >>> target = B.tensor([1, 10, 1e6]) - >>> preds = B.tensor([0.9, 15, 1.2e6]) - >>> sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) - >>> _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) - tensor(0.2290) - """ - - return sum_abs_per_error / num_obs - - -def symmetric_mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: - r""" - Computes symmetric mean absolute percentage error (SMAPE_): - - .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{max(| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon)} - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. - - Args: - preds: estimated labels - target: ground truth labels - - Return: - Tensor with SMAPE. - - Example: - >>> from paddlemetrics.functional import symmetric_mean_absolute_percentage_error - >>> target = B.tensor([1, 10, 1e6]) - >>> preds = B.tensor([0.9, 15, 1.2e6]) - >>> symmetric_mean_absolute_percentage_error(preds, target) - tensor(0.2290) - - """ - sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update( - preds, - target, - ) - mean_ape = _symmetric_mean_absolute_percentage_error_compute( - sum_abs_per_error, - num_obs, - ) - - return mean_ape diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py deleted file mode 100644 index 7cb366a2c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/regression/tweedie_deviance.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_same_shape - - -def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape - of input tensors. - - Args: - preds: Predicted tensor - targets: Ground truth tensor - power: see :func:`tweedie_deviance_score` - - Example: - >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) - >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) - >>> _tweedie_deviance_score_update(preds, targets, power=2) - (tensor(4.8333), tensor(4)) - """ - _check_same_shape(preds, targets) - - zero_tensor = B.zeros(preds.shape, device=preds.device) - - if 0 < power < 1: - raise ValueError(f"Deviance Score is not defined for power={power}.") - - if power == 0: - deviance_score = B.pow(targets - preds, exponent=2) - elif power == 1: - # Poisson distribution - if B.any(preds <= 0) or B.any(targets < 0): - raise ValueError( - f"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative." - ) - - deviance_score = 2 * (targets * B.log(targets / preds) + preds - targets) - elif power == 2: - # Gamma distribution - if B.any(preds <= 0) or B.any(targets <= 0): - raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") - - deviance_score = 2 * (B.log(preds / targets) + (targets / preds) - 1) - else: - if power < 0: - if B.any(preds <= 0): - raise ValueError(f"For power={power}, 'preds' has to be strictly positive.") - elif 1 < power < 2: - if B.any(preds <= 0) or B.any(targets < 0): - raise ValueError( - f"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative." - ) - else: - if B.any(preds <= 0) or B.any(targets <= 0): - raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") - - term_1 = B.pow(B.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power)) - term_2 = targets * B.pow(preds, 1 - power) / (1 - power) - term_3 = B.pow(preds, 2 - power) / (2 - power) - deviance_score = 2 * (term_1 - term_2 + term_3) - - sum_deviance_score = B.sum(deviance_score) - num_observations = B.tensor(B.numel(deviance_score), device=preds.device) - - return sum_deviance_score, num_observations - - -def _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor: - """Computes Deviance Score. - - Args: - sum_deviance_score: Sum of deviance scores accumalated until now. - num_observations: Number of observations encountered until now. - - Example: - >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) - >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) - >>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2) - >>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations) - tensor(1.2083) - """ - - return sum_deviance_score / num_observations - - -def tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor: - r""" - Computes the `Tweedie Deviance Score`_ between targets and predictions: - - .. math:: - deviance\_score(\hat{y},y) = - \begin{cases} - (\hat{y} - y)^2, & \text{for }power=0\\ - 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }power=1\\ - 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }power=2\\ - 2 * (\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \frac{y(\hat{y})^{1 - power}}{1 - power} + \frac{(\hat{y}) - ^{2 - power}}{2 - power}), & \text{otherwise} - \end{cases} - - where :math:`y` is a tensor of targets values, and :math:`\hat{y}` is a tensor of predictions. - - Args: - preds: Predicted tensor with shape ``(N,...)`` - targets: Ground truth tensor with shape ``(N,...)`` - power: - - power < 0 : Extreme stable distribution. (Requires: preds > 0.) - - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.) - - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) - - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) - - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.) - - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) - - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.) - - Example: - >>> from paddlemetrics.functional import tweedie_deviance_score - >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) - >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) - >>> tweedie_deviance_score(preds, targets, power=2) - tensor(1.2083) - - """ - sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power) - return _tweedie_deviance_score_compute(sum_deviance_score, num_observations) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py deleted file mode 100644 index d05abb6af..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision # noqa: F401 -from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out # noqa: F401 -from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate # noqa: F401 -from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg # noqa: F401 -from paddlemetrics.functional.retrieval.precision import retrieval_precision # noqa: F401 -from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision # noqa: F401 -from paddlemetrics.functional.retrieval.recall import retrieval_recall # noqa: F401 -from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py deleted file mode 100644 index 0b067a892..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/average_precision.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def retrieval_average_precision(preds: Tensor, target: Tensor) -> Tensor: - """Computes average precision (for information retrieval), as explained in `IR Average precision`_. - - ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, - ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document being relevant or not. - - Return: - a single-value tensor with the average precision (AP) of the predictions ``preds`` w.r.t. the labels ``target``. - - Example: - >>> from paddlemetrics.functional import retrieval_average_precision - >>> preds = tensor([0.2, 0.3, 0.5]) - >>> target = tensor([True, False, True]) - >>> retrieval_average_precision(preds, target) - tensor(0.8333) - """ - preds, target = _check_retrieval_functional_inputs(preds, target) - - if not target.sum(): - return tensor(0.0, device=preds.device) - - target = target[B.argsort(preds, dim=-1, descending=True)] - positions = B.arange(1, len(target) + 1, device=target.device, dtype=B.float32)[target > 0] - res = B.div((B.arange(len(positions), device=positions.device, dtype=B.float32) + 1), positions).mean() - return res diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py deleted file mode 100644 index 10c5762b0..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/fall_out.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def retrieval_fall_out(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: - """Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction - of non-relevant documents retrieved among all the non-relevant documents. - - ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, - ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document being relevant or not. - k: consider only the top k elements (default: None, which considers them all) - - Returns: - a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics.functional import retrieval_fall_out - >>> preds = tensor([0.2, 0.3, 0.5]) - >>> target = tensor([True, False, True]) - >>> retrieval_fall_out(preds, target, k=2) - tensor(1.) - """ - preds, target = _check_retrieval_functional_inputs(preds, target) - - k = preds.shape[-1] if k is None else k - - if not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - - target = 1 - target - - if not target.sum(): - return tensor(0.0, device=preds.device) - - relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() - return relevant / target.sum() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py deleted file mode 100644 index 83336a50b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/hit_rate.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def retrieval_hit_rate(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: - """Computes the hit rate (for information retrieval). The hit rate is 1.0 if there is at least one relevant - document among all the top `k` retrieved documents. - - ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, - ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. If you want to measure HitRate@K, ``k`` must be a positive integer. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document being relevant or not. - k: consider only the top k elements (default: None, which considers them all) - - Returns: - a single-value tensor with the hit rate (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> preds = tensor([0.2, 0.3, 0.5]) - >>> target = tensor([True, False, True]) - >>> retrieval_hit_rate(preds, target, k=2) - tensor(1.) - """ - preds, target = _check_retrieval_functional_inputs(preds, target) - - if k is None: - k = preds.shape[-1] - - if not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - - relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum() - return (relevant > 0).float() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py deleted file mode 100644 index 73fedad5e..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/ndcg.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def _dcg(target: Tensor) -> Tensor: - """Computes Discounted Cumulative Gain for input tensor.""" - denom = B.log2(B.arange(target.shape[-1], device=target.device) + 2.0) - return (target / denom).sum(dim=-1) - - -def retrieval_normalized_dcg(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: - """Computes `Normalized Discounted Cumulative Gain`_ (for information retrieval). - - ``preds`` and ``target`` should be of the same shape and live on the same device. - ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document relevance. - k: consider only the top k elements (default: None, which considers them all) - - Return: - a single-value tensor with the nDCG of the predictions ``preds`` w.r.t. the labels ``target``. - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics.functional import retrieval_normalized_dcg - >>> preds = B.tensor([.1, .2, .3, 4, 70]) - >>> target = B.tensor([10, 0, 0, 1, 5]) - >>> retrieval_normalized_dcg(preds, target) - tensor(0.6957) - """ - preds, target = _check_retrieval_functional_inputs(preds, target, allow_non_binary_target=True) - - k = preds.shape[-1] if k is None else k - - if not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - - sorted_target = target[B.argsort(preds, dim=-1, descending=True)][:k] - ideal_target = B.sort(target, descending=True)[0][:k] - - ideal_dcg = _dcg(ideal_target) - target_dcg = _dcg(sorted_target) - - # filter undefined scores - all_irrelevant = ideal_dcg == 0 - target_dcg[all_irrelevant] = 0 - target_dcg[~all_irrelevant] /= ideal_dcg[~all_irrelevant] - - return target_dcg.mean() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py deleted file mode 100644 index 83bd11727..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/precision.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def retrieval_precision(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: - """Computes the precision metric (for information retrieval). Precision is the fraction of relevant documents - among all the retrieved documents. - - ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, - ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document being relevant or not. - k: consider only the top k elements (default: None, which considers them all) - - Returns: - a single-value tensor with the precision (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> preds = tensor([0.2, 0.3, 0.5]) - >>> target = tensor([True, False, True]) - >>> retrieval_precision(preds, target, k=2) - tensor(0.5000) - """ - preds, target = _check_retrieval_functional_inputs(preds, target) - - if k is None: - k = preds.shape[-1] - - if not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - - if not target.sum(): - return tensor(0.0, device=preds.device) - - relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() - return relevant / k diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py deleted file mode 100644 index d26e32f8b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/r_precision.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def retrieval_r_precision(preds: Tensor, target: Tensor) -> Tensor: - """Computes the r-precision metric (for information retrieval). R-Precision is the fraction of relevant - documents among all the top ``k`` retrieved documents where ``k`` is equal to the total number of relevant - documents. - - ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, - ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document being relevant or not. - - Returns: - a single-value tensor with the r-precision of the predictions ``preds`` w.r.t. the labels ``target``. - - Example: - >>> preds = tensor([0.2, 0.3, 0.5]) - >>> target = tensor([True, False, True]) - >>> retrieval_r_precision(preds, target) - tensor(0.5000) - """ - preds, target = _check_retrieval_functional_inputs(preds, target) - - relevant_number = target.sum() - if not relevant_number: - return tensor(0.0, device=preds.device) - - relevant = target[B.argsort(preds, dim=-1, descending=True)][:relevant_number].sum().float() - return relevant / relevant_number diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py deleted file mode 100644 index e00d450c3..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/recall.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def retrieval_recall(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor: - """Computes the recall metric (for information retrieval). Recall is the fraction of relevant documents - retrieved among all the relevant documents. - - ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, - ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. If you want to measure Recall@K, ``k`` must be a positive integer. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document being relevant or not. - k: consider only the top k elements (default: None, which considers them all) - - Returns: - a single-value tensor with the recall (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics.functional import retrieval_recall - >>> preds = tensor([0.2, 0.3, 0.5]) - >>> target = tensor([True, False, True]) - >>> retrieval_recall(preds, target, k=2) - tensor(0.5000) - """ - preds, target = _check_retrieval_functional_inputs(preds, target) - - if k is None: - k = preds.shape[-1] - - if not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - - if not target.sum(): - return tensor(0.0, device=preds.device) - - relevant = target[B.argsort(preds, dim=-1, descending=True)][:k].sum().float() - return relevant / target.sum() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py deleted file mode 100644 index c92c223ec..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/retrieval/reciprocal_rank.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.checks import _check_retrieval_functional_inputs - - -def retrieval_reciprocal_rank(preds: Tensor, target: Tensor) -> Tensor: - """Computes reciprocal rank (for information retrieval). See `Mean Reciprocal Rank`_ - - ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, - 0 is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, - otherwise an error is raised. - - Args: - preds: estimated probabilities of each document to be relevant. - target: ground truth about each document being relevant or not. - - Return: - a single-value tensor with the reciprocal rank (RR) of the predictions ``preds`` wrt the labels ``target``. - - Example: - >>> from paddlemetrics.functional import retrieval_reciprocal_rank - >>> preds = B.tensor([0.2, 0.3, 0.5]) - >>> target = B.tensor([False, True, False]) - >>> retrieval_reciprocal_rank(preds, target) - tensor(0.5000) - """ - preds, target = _check_retrieval_functional_inputs(preds, target) - - if not target.sum(): - return tensor(0.0, device=preds.device) - - target = target[B.argsort(preds, dim=-1, descending=True)] - position = B.nonzero(target).view(-1) - res = 1.0 / (position[0] + 1.0) - return res diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py deleted file mode 100644 index 9af407aae..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/self_supervised.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from warnings import warn - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.pairwise import pairwise_cosine_similarity, pairwise_linear_similarity - - -def embedding_similarity( - batch: Tensor, similarity: str = "cosine", reduction: str = "none", zero_diagonal: bool = True -) -> Tensor: - """Computes representation similarity. - - Example: - >>> from paddlemetrics.functional import embedding_similarity - >>> embeddings = B.tensor([[1., 2., 3., 4.], [1., 2., 3., 4.], [4., 5., 6., 7.]]) - >>> embedding_similarity(embeddings) - tensor([[0.0000, 1.0000, 0.9759], - [1.0000, 0.0000, 0.9759], - [0.9759, 0.9759, 0.0000]]) - - Args: - batch: (batch, dim) - similarity: 'dot' or 'cosine' - reduction: 'none', 'sum', 'mean' (all along dim -1) - zero_diagonal: if True, the diagonals are set to zero - - Return: - A square matrix (batch, batch) with the similarity scores between all elements - If sum or mean are used, then returns (b, 1) with the reduced value for each row - - .. deprecated:: v0.6 - Use :func:`paddlemetrics.functional.pairwise_cosine_similarity` when `similarity='cosine'` - else use :func:`paddlemetrics.functional.pairwise_euclidean_distance`. Will be removed in v0.7. - """ - warn( - "Function `embedding_similarity` was deprecated v0.6 and will be removed in v0.7." - " Use `paddlemetrics.functional.pairwise_cosine_similarity` instead when argument" - " similarity='cosine' else use `paddlemetrics.functional.pairwise_linear_similarity", - DeprecationWarning, - ) - if similarity == "cosine": - return pairwise_cosine_similarity(batch, reduction=reduction, zero_diagonal=zero_diagonal) - return pairwise_linear_similarity(batch, reduction=reduction, zero_diagonal=zero_diagonal) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py deleted file mode 100644 index 971708401..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddlemetrics.functional.text.bleu import bleu_score # noqa: F401 -from paddlemetrics.functional.text.sacre_bleu import sacre_bleu_score # noqa: F401 -from paddlemetrics.functional.text.wer import wer # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py deleted file mode 100644 index 168be6eee..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bert.py +++ /dev/null @@ -1,650 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import csv -import math -import urllib -import warnings -from collections import Counter, defaultdict -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, nn -from paddleext.torchapi.data import DataLoader, Dataset - -from paddlemetrics.utilities.imports import _TQDM_AVAILABLE, _TRANSFORMERS_AVAILABLE - -if _TRANSFORMERS_AVAILABLE: - from transformers import AutoModel, AutoTokenizer - -if _TQDM_AVAILABLE: - import tqdm - - -def _preprocess_text( - text: List[str], - tokenizer: Any, - max_length: int = 512, - truncation: bool = True, - sort_according_length: bool = True, - own_tokenizer: bool = False, -) -> Dict[str, Tensor]: - """Default text pre-processing function using `transformers` `AutoTokenizer` instance. - - Args: - text: - An iterable of sentences. - tokenizer: - Either `AutoTokenizer` instance from `transformers` package, or a user's own tokenizer. - max_length: - A maximum sequence length. - truncation: - An indication of whether tokenized sequences should be padded only to the length of the longest sequence. - sort_according_length: - An indication of whether tokenized sequences should be sorted from shortest to longest. This is appropriate - to do for leveraging dynamic padding during embedding calculation and thereby to hasten inference. - own_tokenizer: - An indication of whether a non-default user's own tokenizer is used. - - Return: - A dictionary of tokenized sentences including input_ids and attention_mask. - - Raises: - BaseException: - If a tokenization with a user's own tokenizer is not successful. - """ - if not own_tokenizer: - tokenized_data = tokenizer( - text, padding="max_length", max_length=max_length, truncation=truncation, return_tensors="pt" - ) - else: - try: - tokenized_data = tokenizer(text, max_length) - except BaseException as e: - raise BaseException(f"Tokenization was not successful: {e}") - - input_ids, attention_mask = ( - _sort_data_according_length(tokenized_data["input_ids"], tokenized_data["attention_mask"]) - if sort_according_length - else (tokenized_data["input_ids"], tokenized_data["attention_mask"]) - ) - return {"input_ids": input_ids, "attention_mask": attention_mask} - - -def _process_attention_mask_for_special_tokens(attention_mask: Tensor) -> Tensor: - """Process attention mask to be zero for special [CLS] and [SEP] tokens as they're not included in a - calculation for BERT score. - - Args: - attention_mask: An attention mask to be returned, for example, by a `transformers` tokenizer. - - Return: - A processed attention mask. - """ - # Make attention_mask zero for [CLS] token - attention_mask[:, 0] = 0 - # Make attention_mask zero for [SEP] token - sep_token_position = (attention_mask - 0.1).cumsum(-1).argmax(-1) - attention_mask[B.arange(attention_mask.size(0)).long(), sep_token_position] = 0 - return attention_mask - - -def _sort_data_according_length(input_ids: Tensor, attention_mask: Tensor) -> Tuple[Tensor, Tensor]: - """Sort tokenized sentence from the shortest to the longest one.""" - sorted_indices = attention_mask.sum(1).argsort() - input_ids = input_ids[sorted_indices] - attention_mask = attention_mask[sorted_indices] - return input_ids, attention_mask - - -def _input_data_collator( - batch: Dict[str, Tensor], device: Optional[Union[str, B.device]] = None -) -> Dict[str, Tensor]: - """Helper function that trims model inputs to the longest sequence within the batch and put the input on the - proper device.""" - max_len = int(batch["attention_mask"].sum(1).max().item()) - input_ids = batch["input_ids"][:, :max_len].to(device) - attention_mask = batch["attention_mask"][:, :max_len].to(device) - batch.update({"input_ids": input_ids, "attention_mask": attention_mask}) - return batch - - -def _output_data_collator(model_output: Tensor, attention_mask: Tensor, target_len: int) -> Tuple[Tensor, Tensor]: - """Helper function that pads the model output and attention mask to the target length.""" - zeros_shape = list(model_output.shape) - zeros_shape[2] = target_len - zeros_shape[2] - model_output = B.cat( - [model_output, B.zeros(zeros_shape, dtype=model_output.dtype).to(model_output.device)], dim=2 - ) - zeros = B.zeros(zeros_shape[0], zeros_shape[2], dtype=attention_mask.dtype).to(attention_mask.device) - attention_mask = B.cat([attention_mask, zeros], dim=1) - return model_output, attention_mask - - -class TextDataset(Dataset): - """PyTorch dataset class for storing tokenized sentences and other properties used for BERT score - calculation.""" - - def __init__( - self, - text: List[str], - tokenizer: Any, - max_length: int = 512, - preprocess_text_fn: Callable[[List[str], Any, int], Dict[str, Tensor]] = _preprocess_text, - idf: bool = False, - tokens_idf: Optional[Dict[int, float]] = None, - ) -> None: - """ - Args: - text: - An iterable of sentences. - tokenizer: - `AutoTokenizer` instance from `transformers` package. - max_length: - A maximum sequence length. - preprocess_text_fn: - A function used for processing the input sentences. - idf: - An indication of whether calculate token inverse document frequencies to weight the model embeddings. - tokens_idf: - Inverse document frequencies (these should be calculated on reference sentences). - """ - self.text = preprocess_text_fn(text, tokenizer, max_length) - self.max_length = self.text["input_ids"].shape[1] - self.num_sentences = len(text) - self.idf = idf - self.tokens_idf = {} - if idf: - self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf() - - def __getitem__(self, idx: int) -> Dict[str, Tensor]: - input_ids = self.text["input_ids"][idx, :] - attention_mask = self.text["attention_mask"][idx, :] - inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} - if self.idf: - input_ids_idf = B.tensor([self.tokens_idf[input_idx] for input_idx in input_ids.tolist()]) - inputs_dict["input_ids_idf"] = input_ids_idf - return inputs_dict - - def __len__(self) -> int: - return self.num_sentences - - def _get_tokens_idf(self) -> Dict[int, float]: - """Calculate token inverse document frequences. - - Return: - A python dictionary containing inverse document frequences for token ids. - """ - token_counter: Counter = Counter() - for tokens in map(self._set_of_tokens, self.text["input_ids"]): - token_counter.update(tokens) - - tokens_idf: Dict[int, float] = defaultdict(self._get_tokens_idf_default_value) - tokens_idf.update( - {idx: math.log((self.num_sentences + 1) / (occurrence + 1)) for idx, occurrence in token_counter.items()} - ) - return tokens_idf - - def _get_tokens_idf_default_value(self) -> float: - """Helper function that ensures `defaultdict` to be pickled.""" - return math.log((self.num_sentences + 1) / 1) - - @staticmethod - def _set_of_tokens(input_ids: Tensor) -> Set: - """Return set of tokens from the `input_ids` `B.Tensor`.""" - return set(input_ids.tolist()) - - -class TokenizedDataset(TextDataset): - """The child class of `TextDataset` class used with already tokenized data.""" - - def __init__( - self, - input_ids: Tensor, - attention_mask: Tensor, - idf: bool = False, - tokens_idf: Optional[Dict[int, float]] = None, - ) -> None: - """ - Args: - input_ids: - Input ids (`B.Tensor`). - attention_mask: - Attention mask (`B.Tensor`). - idf: - An indication of whether calculate token inverse document frequencies to weight the model embeddings. - tokens_idf: - Inverse document frequencies (these should be calculated on reference sentences). - """ - self.text = dict(zip(["input_ids", "attention_mask"], _sort_data_according_length(input_ids, attention_mask))) - self.text = _input_data_collator(self.text) - self.num_sentences = len(self.text["input_ids"]) - self.max_length = self.text["input_ids"].shape[1] - self.idf = idf - self.tokens_idf = {} - if idf: - self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf() - - -def _get_progress_bar(dataloader: DataLoader, verbose: bool = False) -> Union[DataLoader, "tqdm.auto.tqdm"]: - """Helper function returning either the dataloader itself when `verbose = False`, or it wraps the dataloader with - `tqdm.auto.tqdm`, when `verbose = True` to display a progress bar during the embbeddings calculation.""" - return tqdm.auto.tqdm(dataloader) if verbose else dataloader - - -def _check_shape_of_model_output(output: Tensor, input_ids: Tensor) -> None: - """Check if the shape of the user's own model output.""" - bs, seq_len = input_ids.shape[:2] - invalid_out_shape = len(output.shape) != 3 or output.shape[0] != bs or output.shape[1] != seq_len - if invalid_out_shape: - raise ValueError( - "The model output must be `B.Tensor` of a shape `[batch_size, seq_len, model_dim]` " - f"i.e. [{bs}, {seq_len}. , `model_dim`], but got {output.shape}." - ) - - -def _get_embeddings_and_idf_scale( - dataloader: DataLoader, - target_len: int, - model: nn.Module, - device: Optional[Union[str, B.device]] = None, - num_layers: Optional[int] = None, - all_layers: bool = False, - idf: bool = False, - verbose: bool = False, - user_forward_fn: Callable[[nn.Module, Dict[str, Tensor]], Tensor] = None, -) -> Tuple[Tensor, Tensor]: - """Calculate sentence embeddings and the inverse-document-frequence scaling factor. - Args: - dataloader: - `B.utils.data.DataLoader` instance. - target_len: - A length of the longest sequence in the data. Used for padding the model output. - model: - BERT model. - device: - A device to be used for calculation. - num_layers: - The layer of representation to use. - all_layers: - An indication whether representation from all model layers should be used for BERTScore. - idf: - An Indication whether normalization using inverse document frequencies should be used. - verbose: - An indication of whether a progress bar to be displayed during the embeddings calculation. - user_forward_fn: - A user's own forward function used in a combination with `user_model`. This function must take `user_model` - and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` - as an input and return the model's output represented by the single `B.Tensor`. - - Return: - A tuple of B.Tensors containing the model's embeddings and the normalized tokens IDF. - When `idf = False`, tokens IDF is not calculated, and a matrix of mean weights is returned instead. - For a single sentence, `mean_weight = 1/seq_len`, where `seq_len` is a sum over the corresponding - `attention_mask`. - - Raises: - ValueError: - If `all_layers = True` and a model, which is not from the `transformers` package, is used. - """ - embeddings_list: List[Tensor] = [] - idf_scale_list: List[Tensor] = [] - for batch in _get_progress_bar(dataloader, verbose): - with B.no_grad(): - batch = _input_data_collator(batch, device) - # Output shape: batch_size x num_layers OR 1 x sequence_length x bert_dim - if not all_layers: - if not user_forward_fn: - out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True) - out = out.hidden_states[num_layers if num_layers is not None else -1] - else: - out = user_forward_fn(model, batch) - _check_shape_of_model_output(out, batch["input_ids"]) - out = out.unsqueeze(1) - else: - if user_forward_fn: - raise ValueError( - "The option `all_layers=True` can be used only with default `transformers` models." - ) - out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True) - out = B.cat([o.unsqueeze(1) for o in out.hidden_states], dim=1) - - out /= out.norm(dim=-1).unsqueeze(-1) # normalize embeddings - out, attention_mask = _output_data_collator(out, batch["attention_mask"], target_len) - processed_attention_mask = _process_attention_mask_for_special_tokens(attention_mask) - # Multiply embeddings with attention_mask (b=batch_size, l=num_layers, s=seq_len, d=emb_dim) - out = B.einsum("blsd, bs -> blsd", out, processed_attention_mask) - embeddings_list.append(out.cpu()) - - # Calculate weighted (w.r.t. sentence length) input_ids IDF matrix - input_ids_idf = ( - batch["input_ids_idf"] * processed_attention_mask if idf else processed_attention_mask.type(out.dtype) - ) - input_ids_idf /= input_ids_idf.sum(-1, keepdim=True) - idf_scale_list.append(input_ids_idf) - - embeddings = B.cat(embeddings_list) - idf_scale = B.cat(idf_scale_list) - - return embeddings, idf_scale - - -def _get_scaled_precision_or_recall(cos_sim: Tensor, metric: str, idf_scale: Tensor) -> Tensor: - """Helper function that calculates precision or recall, transpose it and scale it with idf_scale factor.""" - dim = 3 if metric == "precision" else 2 - res = cos_sim.max(dim=dim).values - res = B.einsum("bls, bs -> bls", res, idf_scale).sum(-1) - # We transpose the results and squeeze if possible to match the format of the original BERTScore implementation - res = res.transpose(0, 1).squeeze() - return res - - -def _get_precision_recall_f1( - pred_embeddings: Tensor, ref_embeddings: Tensor, pred_idf_scale: Tensor, ref_idf_scale: Tensor -) -> Tuple[Tensor, Tensor, Tensor]: - """Calculate precision, recall and F1 score over candidate and reference sentences. - - Args: - pred_embeddings: Embeddings of candidate sentenecs. - ref_embeddings: Embeddings of reference sentences. - pred_idf_scale: An IDF scale factor for candidate sentences. - ref_idf_scale: An IDF scale factor for reference sentences. - - Return: - Tensors containing precision, recall and F1 score, respectively. - """ - # Dimensions: b = batch_size, l = num_layers, p = predictions_seq_len, r = references_seq_len, d = bert_dim - cos_sim = B.einsum("blpd, blrd -> blpr", pred_embeddings, ref_embeddings) - # Final metrics shape = (batch_size * num_layers | batch_size) - precision = _get_scaled_precision_or_recall(cos_sim, "precision", pred_idf_scale) - recall = _get_scaled_precision_or_recall(cos_sim, "recall", ref_idf_scale) - - f1_score = 2 * precision * recall / (precision + recall) - f1_score = f1_score.masked_fill(B.isnan(f1_score), 0.0) - - return precision, recall, f1_score - - -def _get_hash(model_name_or_path: Optional[str] = None, num_layers: Optional[int] = None, idf: bool = False) -> str: - """Compute `BERT_score`_ (copied and adjusted)""" - msg = f"{model_name_or_path}_L{num_layers}{'_idf' if idf else '_no-idf'}" - return msg - - -def _read_csv_from_local_file(baseline_path: str) -> Tensor: - """Helper function which reads baseline the csv file from the local file. - - This method implemented to avoid `pandas` dependency. - """ - with open(baseline_path) as fname: - csv_file = csv.reader(fname) - baseline_list = [[float(item) for item in row] for idx, row in enumerate(csv_file) if idx > 0] - baseline = B.tensor(baseline_list)[:, 1:] - return baseline - - -def _read_csv_from_url(baseline_url: str) -> Tensor: - """Helper function which reads the baseline csv file from URL. - - This method is implemented to avoid `pandas` dependency. - """ - with urllib.request.urlopen(baseline_url) as http_request: # type: ignore - baseline_list = [ - [float(item) for item in row.strip().decode("utf-8").split(",")] - for idx, row in enumerate(http_request) - if idx > 0 - ] - baseline = B.tensor(baseline_list)[:, 1:] - return baseline - - -def _load_baseline( - lang: str = "en", - model_name_or_path: Optional[str] = None, - baseline_path: Optional[str] = None, - baseline_url: Optional[str] = None, -) -> Optional[Tensor]: - """Load a CSV file with the baseline values used for rescaling.""" - if baseline_path: - baseline: Optional[Tensor] = _read_csv_from_local_file(baseline_path) - elif baseline_url: - baseline = _read_csv_from_url(baseline_url) - # Read default baseline from the original `bert-score` package https://github.com/Tiiiger/bert_score - elif lang and model_name_or_path: - _URL_BASE = "https://raw.githubusercontent.com/Tiiiger/bert_score/master/bert_score/rescale_baseline" - baseline_url = f"{_URL_BASE}/{lang}/{model_name_or_path}.tsv" - baseline = _read_csv_from_url(baseline_url) - else: - baseline = None - warnings.warn("Baseline was not successfully loaded. No baseline is going to be used.") - - return baseline - - -def _rescale_metrics_with_baseline( - precision: Tensor, - recall: Tensor, - f1_score: Tensor, - baseline: Tensor, - num_layers: Optional[int] = None, - all_layers: bool = False, -) -> Tuple[Tensor, Tensor, Tensor]: - """Rescale the computed metrics with the pre-computed baseline.""" - if num_layers is None and all_layers is False: - num_layers = -1 - all_metrics = B.stack([precision, recall, f1_score], dim=-1) - baseline_scale = baseline.unsqueeze(1) if all_layers else baseline[num_layers] - all_metrics = (all_metrics - baseline_scale) / (1 - baseline_scale) - - return all_metrics[..., 0], all_metrics[..., 1], all_metrics[..., 2] - - -def bert_score( - predictions: Union[List[str], Dict[str, Tensor]], - references: Union[List[str], Dict[str, Tensor]], - model_name_or_path: Optional[str] = None, - num_layers: Optional[int] = None, - all_layers: bool = False, - model: Optional[nn.Module] = None, - user_tokenizer: Any = None, - user_forward_fn: Callable[[nn.Module, Dict[str, Tensor]], Tensor] = None, - verbose: bool = False, - idf: bool = False, - device: Optional[Union[str, B.device]] = None, - max_length: int = 512, - batch_size: int = 64, - num_threads: int = 4, - return_hash: bool = False, - lang: str = "en", - rescale_with_baseline: bool = False, - baseline_path: Optional[str] = None, - baseline_url: Optional[str] = None, -) -> Dict[str, Union[List[float], str]]: - """`Bert_score Evaluating Text Generation`_ leverages the pre-trained contextual embeddings from BERT and - matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with - human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, - and F1 measure, which can be useful for evaluating different language generation tasks. - - This implemenation follows the original implementation from `BERT_score`_ - - Args: - predictions: - Either an iterable of predicted sentences or a `Dict[str, B.Tensor]` containing `input_ids` and - `attention_mask` `B.Tensor`. - references: - Either an iterable of target sentences or a `Dict[str, B.Tensor]` containing `input_ids` and - `attention_mask` `B.Tensor`. - model_name_or_path: - A name or a model path used to load `transformers` pretrained model. - num_layers: - A layer of representation to use. - all_layers: - An indication of whether the representation from all model's layers should be used. - If `all_layers = True`, the argument `num_layers` is ignored. - model: - A user's own model. Must be of `nn.Module` instance. - user_tokenizer: - A user's own tokenizer used with the own model. This must be an instance with the `__call__` method. - This method must take an iterable of sentences (`List[str]`) and must return a python dictionary - containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor`. It is up to the user's model - of whether `"input_ids"` is a `B.Tensor` of input ids or embedding vectors. - This tokenizer must prepend an equivalent of `[CLS]` token and append an equivalent of `[SEP]` token - as `transformers` tokenizer does. - user_forward_fn: - A user's own forward function used in a combination with `user_model`. This function must take `user_model` - and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` - as an input and return the model's output represented by the single `B.Tensor`. - verbose: - An indication of whether a progress bar to be displayed during the embeddings calculation. - idf: - An indication of whether normalization using inverse document frequencies should be used. - device: - A device to be used for calculation. - max_length: - A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed. - batch_size: - A batch size used for model processing. - num_threads: - A number of threads to use for a dataloader. - return_hash: - An indication of whether the correspodning `hash_code` should be returned. - lang: - A language of input sentences. It is used when the scores are rescaled with a baseline. - rescale_with_baseline: - An indication of whether bertscore should be rescaled with a pre-computed baseline. - When a pretrained model from `transformers` model is used, the corresponding baseline is downloaded - from the original `bert-score` package from `BERT_score`_ if available. - In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting - of the files from `BERT_score`_ - baseline_path: - A path to the user's own local csv/tsv file with the baseline scale. - baseline_url: - A url path to the user's own csv/tsv file with the baseline scale. - - Returns: - Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. - - Raises: - ValueError: - If `len(predictions) != len(references)`. - ValueError: - If `tqdm` package is required and not installed. - ValueError: - If `transformers` package is required and not installed. - ValueError: - If `num_layer` is larger than the number of the model layers. - ValueError: - If invalid input is provided. - - Example: - >>> predictions = ["hello there", "general kenobi"] - >>> references = ["hello there", "master kenobi"] - >>> bert_score(predictions=predictions, references=references, lang="en") # doctest: +SKIP - {'precision': [0.99..., 0.99...], - 'recall': [0.99..., 0.99...], - 'f1': [0.99..., 0.99...]} - """ - if len(predictions) != len(references): - raise ValueError("Number of predicted and reference sententes must be the same!") - - if verbose and (not _TQDM_AVAILABLE): - raise ValueError( - "An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`." - ) - - if model is None: - if not _TRANSFORMERS_AVAILABLE: - raise ValueError( - "`bert_score` metric with default models requires `transformers` package be installed. " - "Either install with `pip install transformers>=4.0` or `pip install paddlemetrics[text]`" - ) - tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) - model = AutoModel.from_pretrained(model_name_or_path) - else: - tokenizer = user_tokenizer - model.eval() - model.to(device) - - try: - if num_layers and num_layers > model.config.num_hidden_layers: # type: ignore - raise ValueError( - f"num_layers={num_layers} is forbidden for {model_name_or_path}. " # type: ignore - f"Please use num_layers <= {model.config.num_hidden_layers}" # type: ignore - ) - except AttributeError: - warnings.warn("It was not possible to retrieve the parameter `num_layers` from the model specification.") - - _are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (predictions, references)) - _are_valid_lists = all( - isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (predictions, references) - ) - _are_valid_tensors = all( - isinstance(text, dict) and isinstance(text["input_ids"], Tensor) for text in (predictions, references) - ) - if _are_empty_lists: - warnings.warn("Predictions and references are empty.") - output_dict: Dict[str, Union[List[float], str]] = { - "precision": [0.0], - "recall": [0.0], - "f1": [0.0], - } - if return_hash: - output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)}) - return output_dict - - # Load baselines if needed - baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None - - # We ignore mypy typing below as the proper typing is ensured by conditions above, only mypy cannot infer that. - if _are_valid_lists: - ref_dataset = TextDataset(references, tokenizer, max_length, idf=idf) # type: ignore - pred_dataset = TextDataset( - predictions, # type: ignore - tokenizer, - max_length, - idf=idf, - tokens_idf=ref_dataset.tokens_idf, - ) - elif _are_valid_tensors: - ref_dataset = TokenizedDataset(**references, idf=idf) # type: ignore - pred_dataset = TokenizedDataset(**predictions, idf=idf, tokens_idf=ref_dataset.tokens_idf) # type: ignore - else: - raise ValueError("Invalid input provided.") - - ref_loader = DataLoader(ref_dataset, batch_size=batch_size, num_workers=num_threads) - pred_loader = DataLoader(pred_dataset, batch_size=batch_size, num_workers=num_threads) - - ref_embeddings, ref_idf_scale = _get_embeddings_and_idf_scale( - ref_loader, ref_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn - ) - pred_embeddings, pred_idf_scale = _get_embeddings_and_idf_scale( - pred_loader, pred_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn - ) - - precision, recall, f1_score = _get_precision_recall_f1( - pred_embeddings, ref_embeddings, pred_idf_scale, ref_idf_scale - ) - - if baseline is not None: - precision, recall, f1_score = _rescale_metrics_with_baseline( - precision, recall, f1_score, baseline, num_layers, all_layers - ) - - output_dict = { - "precision": precision.tolist(), - "recall": recall.tolist(), - "f1": f1_score.tolist(), - } - if return_hash: - output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)}) - return output_dict diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py deleted file mode 100644 index 4d00946b7..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/bleu.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# referenced from -# Library Name: torchtext -# Authors: torchtext authors and @sluks -# Date: 2020-07-18 -# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score -from collections import Counter -from typing import Sequence, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - - -def _count_ngram(ngram_input_list: Sequence[str], n_gram: int) -> Counter: - """Counting how many times each word appears in a given text with ngram. - - Args: - ngram_input_list: A list of translated text or reference texts - n_gram: gram value ranged 1 to 4 - - Return: - ngram_counter: a collections.Counter object of ngram - """ - - ngram_counter: Counter = Counter() - - for i in range(1, n_gram + 1): - for j in range(len(ngram_input_list) - i + 1): - ngram_key = tuple(ngram_input_list[j : (i + j)]) - ngram_counter[ngram_key] += 1 - - return ngram_counter - - -def _bleu_score_update( - reference_corpus: Sequence[Sequence[Sequence[str]]], - translate_corpus: Sequence[Sequence[str]], - numerator: Tensor, - denominator: Tensor, - trans_len: Tensor, - ref_len: Tensor, - n_gram: int = 4, -) -> Tuple[Tensor, Tensor]: - """Updates and returns variables required to compute the BLEU score. - - Args: - reference_corpus: An iterable of iterables of reference corpus - translate_corpus: An iterable of machine translated corpus - numerator: Numerator of precision score (true positives) - denominator: Denominator of precision score (true positives + false positives) - trans_len: count of words in a candidate translation - ref_len: count of words in a reference translation - n_gram: gram value ranged 1 to 4 - """ - - for (translation, references) in zip(translate_corpus, reference_corpus): - trans_len += len(translation) - ref_len_list = [len(ref) for ref in references] - ref_len_diff = [abs(len(translation) - x) for x in ref_len_list] - ref_len += ref_len_list[ref_len_diff.index(min(ref_len_diff))] - translation_counter: Counter = _count_ngram(translation, n_gram) - reference_counter: Counter = Counter() - - for ref in references: - reference_counter |= _count_ngram(ref, n_gram) - - ngram_counter_clip = translation_counter & reference_counter - - for counter_clip in ngram_counter_clip: - numerator[len(counter_clip) - 1] += ngram_counter_clip[counter_clip] - - for counter in translation_counter: - denominator[len(counter) - 1] += translation_counter[counter] - - return trans_len, ref_len - - -def _bleu_score_compute( - trans_len: Tensor, ref_len: Tensor, numerator: Tensor, denominator: Tensor, n_gram: int = 4, smooth: bool = False -) -> Tensor: - """Computes the BLEU score. - - Args: - trans_len: count of words in a candidate translation - ref_len: count of words in a reference translation - numerator: Numerator of precision score (true positives) - denominator: Denominator of precision score (true positives + false positives) - n_gram: gram value ranged 1 to 4 - smooth: Whether or not to apply smoothing - """ - device = numerator.device - if min(numerator) == 0.0: - return tensor(0.0, device=device) - - if smooth: - precision_scores = B.div( - B.add(numerator, B.ones(n_gram, device=device)), - B.add(denominator, B.ones(n_gram, device=device)), - ) - precision_scores[0] = numerator[0] / denominator[0] - else: - precision_scores = numerator / denominator - - log_precision_scores = tensor([1.0 / n_gram] * n_gram, device=device) * B.log(precision_scores) - geometric_mean = B.exp(B.sum(log_precision_scores)) - brevity_penalty = tensor(1.0, device=device) if trans_len > ref_len else B.exp(1 - (ref_len / trans_len)) - bleu = brevity_penalty * geometric_mean - - return bleu - - -def bleu_score( - reference_corpus: Sequence[Sequence[Sequence[str]]], - translate_corpus: Sequence[Sequence[str]], - n_gram: int = 4, - smooth: bool = False, -) -> Tensor: - """Calculate `BLEU score`_ of machine translated text with one or more references. - - Args: - reference_corpus: - An iterable of iterables of reference corpus - translate_corpus: - An iterable of machine translated corpus - n_gram: - Gram value ranged from 1 to 4 (Default 4) - smooth: - Whether or not to apply smoothing – see [2] - - Return: - Tensor with BLEU Score - - Example: - >>> from paddlemetrics.functional import bleu_score - >>> translate_corpus = ['the cat is on the mat'.split()] - >>> reference_corpus = [['there is a cat on the mat'.split(), 'a cat is on the mat'.split()]] - >>> bleu_score(reference_corpus, translate_corpus) - tensor(0.7598) - - References: - [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, - Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ - - [2] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence - and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ - """ - - if len(translate_corpus) != len(reference_corpus): - raise ValueError(f"Corpus has different size {len(translate_corpus)} != {len(reference_corpus)}") - numerator = B.zeros(n_gram) - denominator = B.zeros(n_gram) - trans_len = tensor(0, dtype=B.float) - ref_len = tensor(0, dtype=B.float) - - trans_len, ref_len = _bleu_score_update( - reference_corpus, translate_corpus, numerator, denominator, trans_len, ref_len, n_gram - ) - - return _bleu_score_compute(trans_len, ref_len, numerator, denominator, n_gram, smooth) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py deleted file mode 100644 index e83c00d0b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/rouge.py +++ /dev/null @@ -1,325 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import re -from collections import Counter -from typing import Any, Dict, List, Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.imports import _NLTK_AVAILABLE - -ALLOWED_ROUGE_KEYS: Dict[str, Union[int, str]] = { - "rouge1": 1, - "rouge2": 2, - "rouge3": 3, - "rouge4": 4, - "rouge5": 5, - "rouge6": 6, - "rouge7": 7, - "rouge8": 8, - "rouge9": 9, - "rougeL": "L", - "rougeLsum": "Lsum", -} - - -def _add_newline_to_end_of_each_sentence(x: str) -> str: - """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" - if not _NLTK_AVAILABLE: - raise ValueError("ROUGE-Lsum calculation requires that nltk is installed. Use `pip install nltk`.") - import nltk - - nltk.download("punkt", quiet=True, force=False) - - re.sub("", "", x) # remove pegasus newline char - return "\n".join(nltk.sent_tokenize(x)) - - -def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) -> Dict[str, Tensor]: - """This computes precision, recall and F1 score based on hits/lcs, and the length of lists of tokenizer - predicted and target sentences. - - Args: - hits_or_lcs: - A number of matches or a length of the longest common subsequence. - pred_len: - A length of a tokenized predicted sentence. - target_len: - A length of a tokenized target sentence. - """ - precision = hits_or_lcs / pred_len - recall = hits_or_lcs / target_len - if precision == recall == 0.0: - return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) - - fmeasure = 2 * precision * recall / (precision + recall) - return dict(precision=tensor(precision), recall=tensor(recall), fmeasure=tensor(fmeasure)) - - -def _lcs(pred_tokens: List[str], target_tokens: List[str]) -> int: - """Common DP algorithm to compute the length of the longest common subsequence. - - Args: - pred_tokens: - A tokenized predicted sentence. - target_toknes: - A tokenized target sentence. - """ - LCS = [[0] * (len(pred_tokens) + 1) for _ in range(len(target_tokens) + 1)] - for i in range(1, len(target_tokens) + 1): - for j in range(1, len(pred_tokens) + 1): - if target_tokens[i - 1] == pred_tokens[j - 1]: - LCS[i][j] = LCS[i - 1][j - 1] + 1 - else: - LCS[i][j] = max(LCS[i - 1][j], LCS[i][j - 1]) - return LCS[-1][-1] - - -def _normalize_and_tokenize_text(text: str, stemmer: Optional[Any] = None) -> List[str]: - """Rouge score should be calculated only over lowercased words and digits. Optionally, Porter stemmer can be - used to strip word suffixes to improve matching. The text normalization follows the implemantion from `Rouge - score_Text Normalizition`_ - - Args: - text: - An input sentence. - stemmer: - Porter stemmer instance to strip word suffixes to improve matching. - """ - # Replace any non-alpha-numeric characters with spaces. - text = re.sub(r"[^a-z0-9]+", " ", text.lower()) - - tokens = re.split(r"\s+", text) - if stemmer: - # Only stem words more than 3 characters long. - tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens] - - # One final check to drop any empty or invalid tokens. - tokens = [x for x in tokens if (isinstance(x, str) and re.match(r"^[a-z0-9]+$", x))] - - return tokens - - -def _rouge_n_score(pred: List[str], target: List[str], n_gram: int) -> Dict[str, Tensor]: - """This computes precision, recall and F1 score for the Rouge-N metric. - - Args: - pred: - A predicted sentence. - target: - A target sentence. - n_gram: - N-gram overlap. - """ - - def _create_ngrams(tokens: List[str], n: int) -> Counter: - ngrams: Counter = Counter() - for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)): - ngrams[ngram] += 1 - return ngrams - - pred_ngrams, target_ngrams = _create_ngrams(pred, n_gram), _create_ngrams(target, n_gram) - pred_len, target_len = sum(pred_ngrams.values()), sum(target_ngrams.values()) - if 0 in (pred_len, target_len): - return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) - - # It is sufficient to take a set(pred_tokenized) for hits count as we consider intersenction of pred & target - hits = sum(min(pred_ngrams[w], target_ngrams[w]) for w in set(pred_ngrams)) - return _compute_metrics(hits, max(pred_len, 1), max(target_len, 1)) - - -def _rouge_l_score(pred: List[str], target: List[str]) -> Dict[str, Tensor]: - """This computes precision, recall and F1 score for the Rouge-L or Rouge-LSum metric. - - Args: - pred: - A predicted sentence. - target: - A target sentence. - """ - pred_len, target_len = len(pred), len(target) - if 0 in (pred_len, target_len): - return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0)) - - lcs = _lcs(pred, target) - return _compute_metrics(lcs, pred_len, target_len) - - -def _rouge_score_update( - preds: List[str], - targets: List[str], - rouge_keys_values: List[Union[int, str]], - stemmer: Optional[Any] = None, -) -> Dict[Union[int, str], List[Dict[str, Tensor]]]: - """Update the rouge score with the current set of predicted and target sentences. - - Args: - preds: - An iterable of predicted sentences. - targets: - An iterable of target sentences. - rouge_keys_values: - List of N-grams/'L'/'Lsum' arguments. - stemmer: - Porter stemmer instance to strip word suffixes to improve matching. - - Example: - >>> targets = "Is your name John".split() - >>> preds = "My name is John".split() - >>> from pprint import pprint - >>> score = _rouge_score_update(preds, targets, rouge_keys_values=[1, 2, 3, 'L']) - >>> pprint(score) # doctest: +NORMALIZE_WHITESPACE +SKIP - {1: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(1.), 'precision': tensor(1.), 'recall': tensor(1.)}], - 2: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}], - 3: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}], - 'L': [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}, - {'fmeasure': tensor(1.), 'precision': tensor(1.), 'recall': tensor(1.)}]} - """ - results: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values} - for pred_raw, target_raw in zip(preds, targets): - pred = _normalize_and_tokenize_text(pred_raw, stemmer) - target = _normalize_and_tokenize_text(target_raw, stemmer) - - if "Lsum" in rouge_keys_values: - # rougeLsum expects "\n" separated sentences within a summary - pred_Lsum = _normalize_and_tokenize_text(_add_newline_to_end_of_each_sentence(pred_raw), stemmer) - target_Lsum = _normalize_and_tokenize_text(_add_newline_to_end_of_each_sentence(target_raw), stemmer) - - for rouge_key in rouge_keys_values: - if isinstance(rouge_key, int): - score = _rouge_n_score(pred, target, rouge_key) - else: - score = _rouge_l_score( - pred if rouge_key != "Lsum" else pred_Lsum, - target if rouge_key != "Lsum" else target_Lsum, - ) - results[rouge_key].append(score) - return results - - -def _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) -> Dict[str, Tensor]: - """Compute the combined ROUGE metric for all the input set of predicted and target sentences. - - Args: - sentence_results: - Rouge-N/Rouge-L/Rouge-LSum metrics calculated for single sentence. - """ - results: Dict[str, Tensor] = {} - # Obtain mean scores for individual rouge metrics - if sentence_results == {}: - return results - - for rouge_key, scores in sentence_results.items(): - results[rouge_key] = B.tensor(scores).mean() - - return results - - -def rouge_score( - preds: Union[str, List[str]], - targets: Union[str, List[str]], - use_stemmer: bool = False, - rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore -) -> Dict[str, Tensor]: - """Calculate `Calculate Rouge Score`_ , used for automatic summarization. - - Args: - preds: - An iterable of predicted sentences. - targets: - An iterable of target sentences. - use_stemmer: - Use Porter stemmer to strip word suffixes to improve matching. - rouge_keys: - A list of rouge types to calculate. - Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``. - - Return: - Python dictionary of rouge scores for each input rouge key. - - Example: - >>> targets = "Is your name John".split() - >>> preds = "My name is John".split() - >>> from pprint import pprint - >>> pprint(rouge_score(preds, targets)) # doctest: +NORMALIZE_WHITESPACE +SKIP - {'rouge1_fmeasure': 0.25, - 'rouge1_precision': 0.25, - 'rouge1_recall': 0.25, - 'rouge2_fmeasure': 0.0, - 'rouge2_precision': 0.0, - 'rouge2_recall': 0.0, - 'rougeL_fmeasure': 0.25, - 'rougeL_precision': 0.25, - 'rougeL_recall': 0.25, - 'rougeLsum_fmeasure': 0.25, - 'rougeLsum_precision': 0.25, - 'rougeLsum_recall': 0.25} - - Raises: - ValueError: - If the python package ``nltk`` is not installed. - ValueError: - If any of the ``rouge_keys`` does not belong to the allowed set of keys. - - References: - [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/ - """ - - if use_stemmer: - if not _NLTK_AVAILABLE: - raise ValueError("Stemmer requires that nltk is installed. Use `pip install nltk`.") - import nltk - - stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None - - if not isinstance(rouge_keys, tuple): - rouge_keys = tuple([rouge_keys]) - for key in rouge_keys: - if key not in ALLOWED_ROUGE_KEYS.keys(): - raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {list(ALLOWED_ROUGE_KEYS.keys())}") - rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys] - - if isinstance(preds, str): - preds = [preds] - - if isinstance(targets, str): - targets = [targets] - - sentence_results: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update( - preds, targets, rouge_keys_values, stemmer=stemmer - ) - - output: Dict[str, List[Tensor]] = {} - for rouge_key in rouge_keys_values: - for type in ["fmeasure", "precision", "recall"]: - output[f"rouge{rouge_key}_{type}"] = [] - - for rouge_key, metrics in sentence_results.items(): - for metric in metrics: - for type, value in metric.items(): - output[f"rouge{rouge_key}_{type}"].append(value) - - return _rouge_score_compute(output) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py deleted file mode 100644 index 1a59377f6..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/sacre_bleu.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# referenced from -# Library Name: torchtext -# Authors: torchtext authors and @sluks -# Date: 2020-07-18 -# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score - -############## - -# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may not -# use this file except in compliance with the License. A copy of the License -# is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed on -# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -############## - -# MIT License -# Copyright (c) 2017 - Shujian Huang - - -import re -from typing import Sequence - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor -from typing_extensions import Literal - -from paddlemetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update -from paddlemetrics.utilities.imports import _REGEX_AVAILABLE - -AVAILABLE_TOKENIZERS = ("none", "13a", "zh", "intl", "char") - -_UCODE_RANGES = ( - ("\u3400", "\u4db5"), # CJK Unified Ideographs Extension A, release 3.0 - ("\u4e00", "\u9fa5"), # CJK Unified Ideographs, release 1.1 - ("\u9fa6", "\u9fbb"), # CJK Unified Ideographs, release 4.1 - ("\uf900", "\ufa2d"), # CJK Compatibility Ideographs, release 1.1 - ("\ufa30", "\ufa6a"), # CJK Compatibility Ideographs, release 3.2 - ("\ufa70", "\ufad9"), # CJK Compatibility Ideographs, release 4.1 - ("\u20000", "\u2a6d6"), # (UTF16) CJK Unified Ideographs Extension B, release 3.1 - ("\u2f800", "\u2fa1d"), # (UTF16) CJK Compatibility Supplement, release 3.1 - ("\uff00", "\uffef"), # Full width ASCII, full width of English punctuation, - # half width Katakana, half wide half width kana, Korean alphabet - ("\u2e80", "\u2eff"), # CJK Radicals Supplement - ("\u3000", "\u303f"), # CJK punctuation mark - ("\u31c0", "\u31ef"), # CJK stroke - ("\u2f00", "\u2fdf"), # Kangxi Radicals - ("\u2ff0", "\u2fff"), # Chinese character structure - ("\u3100", "\u312f"), # Phonetic symbols - ("\u31a0", "\u31bf"), # Phonetic symbols (Taiwanese and Hakka expansion) - ("\ufe10", "\ufe1f"), - ("\ufe30", "\ufe4f"), - ("\u2600", "\u26ff"), - ("\u2700", "\u27bf"), - ("\u3200", "\u32ff"), - ("\u3300", "\u33ff"), -) - - -class _SacreBLEUTokenizer: - """Tokenizer used for SacreBLEU calculation. - - Source: https://github.com/mjpost/sacrebleu/tree/master/sacrebleu/tokenizers - """ - - _REGEX = ( - # language-dependent part (assuming Western languages) - (re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), r" \1 "), - # tokenize period and comma unless preceded by a digit - (re.compile(r"([^0-9])([\.,])"), r"\1 \2 "), - # tokenize period and comma unless followed by a digit - (re.compile(r"([\.,])([^0-9])"), r" \1 \2"), - # tokenize dash when preceded by a digit - (re.compile(r"([0-9])(-)"), r"\1 \2 "), - # one space only between words - # NOTE: Doing this in Python (below) is faster - # (re.compile(r'\s+'), r' '), - ) - - if _REGEX_AVAILABLE: - import regex - - _INT_REGEX = ( - # Separate out punctuations preceeded by a non-digit - (regex.compile(r"(\P{N})(\p{P})"), r"\1 \2 "), - # Separate out punctuations followed by a non-digit - (regex.compile(r"(\p{P})(\P{N})"), r" \1 \2"), - # Separate out symbols - (regex.compile(r"(\p{S})"), r" \1 "), - ) - - _TOKENIZE_FN = { - "none": "_tokenize_base", - "13a": "_tokenize_13a", - "zh": "_tokenize_zh", - "intl": "_tokenize_international", - "char": "_tokenize_char", - } - - def __init__(self, tokenize: Literal["none", "13a", "zh", "intl", "char"], lowercase: bool = False) -> None: - self.tokenize_fn = getattr(self, self._TOKENIZE_FN[tokenize]) - self.lowercase = lowercase - - def __call__(self, line: str) -> Sequence[str]: - tokenized_line = self.tokenize_fn(line) - return self._lower(tokenized_line, self.lowercase).split() - - @classmethod - def tokenize( - cls, line: str, tokenize: Literal["none", "13a", "zh", "intl", "char"], lowercase: bool = False - ) -> Sequence[str]: - tokenize_fn = getattr(cls, cls._TOKENIZE_FN[tokenize]) - tokenized_line = tokenize_fn(line) - return cls._lower(tokenized_line, lowercase).split() - - @classmethod - def _tokenize_regex(cls, line: str) -> str: - """Common post-processing tokenizer for `13a` and `zh` tokenizers. - Args: - line: a segment to tokenize - - Return: - the tokenized line - """ - for (_re, repl) in cls._REGEX: - line = _re.sub(repl, line) - # no leading or trailing spaces, single space within words - return " ".join(line.split()) - - @staticmethod - def _is_chinese_char(uchar: str) -> bool: - """ - Args: - uchar: input char in unicode - - Return: - whether the input char is a Chinese character. - """ - for start, end in _UCODE_RANGES: - if start <= uchar <= end: - return True - return False - - @classmethod - def _tokenize_base(cls, line: str) -> str: - """Tokenizes an input line with the tokenizer. - - Args: - line: a segment to tokenize - - Return: - the tokenized line - """ - return line - - @classmethod - def _tokenize_13a(cls, line: str) -> str: - """Tokenizes an input line using a relatively minimal tokenization that is however equivalent to - mteval-v13a, used by WMT. - - Args: - line: input sentence - - Return: - tokenized sentence - """ - # language-independent part: - line = line.replace("", "") - line = line.replace("-\n", "") - line = line.replace("\n", " ") - - if "&" in line: - line = line.replace(""", '"') - line = line.replace("&", "&") - line = line.replace("<", "<") - line = line.replace(">", ">") - - return cls._tokenize_regex(line) - - @classmethod - def _tokenize_zh(cls, line: str) -> str: - """The tokenization of Chinese text in this script contains two - steps: separate each Chinese characters (by utf-8 encoding); tokenize - the non Chinese part (following the `13a` i.e. mteval tokenizer). - Author: Shujian Huang huangsj@nju.edu.cn - - Args: - line: input sentence - - Return: - tokenized sentence - """ - - line = line.strip() - line_in_chars = "" - - for char in line: - if cls._is_chinese_char(char): - line_in_chars += " " - line_in_chars += char - line_in_chars += " " - else: - line_in_chars += char - - return cls._tokenize_regex(line_in_chars) - - @classmethod - def _tokenize_international(cls, line: str) -> str: - """Tokenizes a string following the official BLEU implementation. - - See github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983 - - In our case, the input string is expected to be just one line. - We just tokenize on punctuation and symbols, - except when a punctuation is preceded and followed by a digit - (e.g. a comma/dot as a thousand/decimal separator). - We do not recover escaped forms of punctuations such as ' or > - as these should never appear in MT system outputs (see issue #138) - - Note that a number (e.g., a year) followed by a dot at the end of - sentence is NOT tokenized, i.e. the dot stays with the number because - `s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a - space after each sentence). However, this error is already in the - original mteval-v14.pl and we want to be consistent with it. - The error is not present in the non-international version, - which uses `$norm_text = " $norm_text "`. - - Args: - line: the input string to tokenize. - - Return: - The tokenized string. - """ - for (_re, repl) in cls._INT_REGEX: - line = _re.sub(repl, line) - - return " ".join(line.split()) - - @classmethod - def _tokenize_char(cls, line: str) -> str: - """Tokenizes all the characters in the input line. - - Args: - line: a segment to tokenize - - Return: - the tokenized line - """ - return " ".join(char for char in line) - - @staticmethod - def _lower(line: str, lowercase: bool) -> str: - if lowercase: - return line.lower() - return line - - -def sacre_bleu_score( - reference_corpus: Sequence[Sequence[str]], - translate_corpus: Sequence[str], - n_gram: int = 4, - smooth: bool = False, - tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a", - lowercase: bool = False, -) -> Tensor: - """Calculate `BLEU score`_ [1] of machine translated text with one or more references. This implementation - follows the behaviour of SacreBLEU [2] implementation from https://github.com/mjpost/sacrebleu. - - Args: - reference_corpus: - An iterable of iterables of reference corpus - translate_corpus: - An iterable of machine translated corpus - n_gram: - Gram value ranged from 1 to 4 (Default 4) - smooth: - Whether or not to apply smoothing – see [2] - tokenize: - Tokenization technique to be used. (Default '13a') - Supported tokenization: ['none', '13a', 'zh', 'intl', 'char'] - lowercase: - If ``True``, BLEU score over lowercased text is calculated. - - Return: - Tensor with BLEU Score - - Example: - >>> from paddlemetrics.functional import sacre_bleu_score - >>> translate_corpus = ['the cat is on the mat'] - >>> reference_corpus = [['there is a cat on the mat', 'a cat is on the mat']] - >>> sacre_bleu_score(reference_corpus, translate_corpus) - tensor(0.7598) - - References: - [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, - Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ - - [2] A Call for Clarity in Reporting BLEU Scores by Matt Post. - - [3] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence - and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ - """ - if tokenize not in AVAILABLE_TOKENIZERS: - raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.") - - if tokenize not in _SacreBLEUTokenizer._TOKENIZE_FN.keys(): - raise ValueError( - f"Unsupported tokenizer selected. Please, choose one of {list(_SacreBLEUTokenizer._TOKENIZE_FN.keys())}" - ) - if len(translate_corpus) != len(reference_corpus): - raise ValueError(f"Corpus has different size {len(translate_corpus)} != {len(reference_corpus)}") - if tokenize == "intl" and not _REGEX_AVAILABLE: - raise ValueError( - "`'intl'` tokenization requires `regex` installed. Use `pip install regex` or `pip install " - "paddlemetrics[text]`." - ) - - reference_corpus_: Sequence[Sequence[Sequence[str]]] = [ - [_SacreBLEUTokenizer.tokenize(line, tokenize, lowercase) for line in reference] - for reference in reference_corpus - ] - translate_corpus_: Sequence[Sequence[str]] = [ - _SacreBLEUTokenizer.tokenize(line, tokenize, lowercase) for line in translate_corpus - ] - - numerator = B.zeros(n_gram) - denominator = B.zeros(n_gram) - trans_len = tensor(0, dtype=B.float) - ref_len = tensor(0, dtype=B.float) - - trans_len, ref_len = _bleu_score_update( - reference_corpus_, translate_corpus_, numerator, denominator, trans_len, ref_len, n_gram - ) - - return _bleu_score_compute(trans_len, ref_len, numerator, denominator, n_gram, smooth) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py b/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py deleted file mode 100644 index 4cd19b059..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/functional/text/wer.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import List, Optional, Tuple, Union -from warnings import warn - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - - -def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) -> int: - """Standard dynamic programming algorithm to compute the edit distance. - - Args: - prediction_tokens: A tokenized predicted sentence - reference_tokens: A tokenized reference sentence - - Returns: - (int) Edit distance between the predicted sentence and the reference sentence - """ - dp = [[0] * (len(reference_tokens) + 1) for _ in range(len(prediction_tokens) + 1)] - for i in range(len(prediction_tokens) + 1): - dp[i][0] = i - for j in range(len(reference_tokens) + 1): - dp[0][j] = j - for i in range(1, len(prediction_tokens) + 1): - for j in range(1, len(reference_tokens) + 1): - if prediction_tokens[i - 1] == reference_tokens[j - 1]: - dp[i][j] = dp[i - 1][j - 1] - else: - dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1 - return dp[-1][-1] - - -def _wer_update( - predictions: Union[str, List[str]], - references: Union[str, List[str]], -) -> Tuple[Tensor, Tensor]: - """Update the wer score with the current set of references and predictions. - - Args: - predictions: Transcription(s) to score as a string or list of strings - references: Reference(s) for each speech input as a string or list of strings - - Returns: - (Tensor) Number of edit operations to get from the reference to the prediction, summed over all samples - (Tensor) Number of words over all references - """ - if isinstance(predictions, str): - predictions = [predictions] - if isinstance(references, str): - references = [references] - errors = tensor(0, dtype=B.float) - total = tensor(0, dtype=B.float) - for prediction, reference in zip(predictions, references): - prediction_tokens = prediction.split() - reference_tokens = reference.split() - errors += _edit_distance(prediction_tokens, reference_tokens) - total += len(reference_tokens) - return errors, total - - -def _wer_compute(errors: Tensor, total: Tensor) -> Tensor: - """Compute the word error rate. - - Args: - errors: Number of edit operations to get from the reference to the prediction, summed over all samples - total: Number of words over all references - - Returns: - (Tensor) Word error rate - """ - return errors / total - - -def wer( - predictions: Union[str, List[str]], - references: Union[str, List[str]], - concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7 -) -> Tensor: - """Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. This - value indicates the percentage of words that were incorrectly predicted. The lower the value, the better the - performance of the ASR system with a WER of 0 being a perfect score. - - Args: - predictions: Transcription(s) to score as a string or list of strings - references: Reference(s) for each speech input as a string or list of strings - concatenate_texts: Whether to concatenate all input texts or compute WER iteratively - This argument is deprecated in v0.6 and it will be removed in v0.7. - - Returns: - (Tensor) Word error rate - - Examples: - >>> predictions = ["this is the prediction", "there is an other sample"] - >>> references = ["this is the reference", "there is another one"] - >>> wer(predictions=predictions, references=references) - tensor(0.5000) - """ - if concatenate_texts is not None: - warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning) - errors, total = _wer_update(predictions, references) - return _wer_compute(errors, total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py deleted file mode 100644 index c3fb3568f..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/image/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#from paddlemetrics.image.fid import FID # noqa: F401 -from paddlemetrics.image.inception import IS # noqa: F401 -from paddlemetrics.image.kid import KID # noqa: F401 -from paddlemetrics.image.lpip_similarity import LPIPS # noqa: F401 -from paddlemetrics.image.psnr import PSNR # noqa: F401 -from paddlemetrics.image.ssim import SSIM # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py deleted file mode 100644 index 6f2965db6..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/image/fid.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional, Union - -import numpy as np -import paddleext.torchapi as B -from paddleext.torchapi import Tensor -from paddleext.torchapi.autograd import Function - -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_info, rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat -from paddlemetrics.utilities.imports import _SCIPY_AVAILABLE, _TORCH_FIDELITY_AVAILABLE - -if _TORCH_FIDELITY_AVAILABLE: - from torch_fidelity.feature_extractor_inceptionv3 import FeatureExtractorInceptionV3 -else: - - class FeatureExtractorInceptionV3(B.nn.Module): # type: ignore - pass - - -if _SCIPY_AVAILABLE: - import scipy - - -class NoTrainInceptionV3(FeatureExtractorInceptionV3): - def __init__( - self, - name: str, - features_list: List[str], - feature_extractor_weights_path: Optional[str] = None, - ) -> None: - super().__init__(name, features_list, feature_extractor_weights_path) - # put into evaluation mode - self.eval() - - def train(self, mode: bool) -> "NoTrainInceptionV3": - """the inception network should not be able to be switched away from evaluation mode.""" - return super().train(False) - - def forward(self, x: Tensor) -> Tensor: - out = super().forward(x) - return out[0].reshape(x.shape[0], -1) - - -class MatrixSquareRoot(Function): - """Square root of a positive definite matrix. - - All credit to: `Square Root of a Positive Definite Matrix`_ - """ - - @staticmethod - def forward(ctx: Any, input_data: Tensor) -> Tensor: - # TODO: update whenever pytorch gets an matrix square root function - # Issue: https://github.com/pytorch/pytorch/issues/9983 - m = input_data.detach().cpu().numpy().astype(np.float_) - scipy_res, _ = scipy.linalg.sqrtm(m, disp=False) - sqrtm = B.from_numpy(scipy_res.real).to(input_data) - ctx.save_for_backward(sqrtm) - return sqrtm - - @staticmethod - def backward(ctx: Any, grad_output: Tensor) -> Tensor: - grad_input = None - if ctx.needs_input_grad[0]: - (sqrtm,) = ctx.saved_tensors - sqrtm = sqrtm.data.cpu().numpy().astype(np.float_) - gm = grad_output.data.cpu().numpy().astype(np.float_) - - # Given a positive semi-definite matrix X, - # since X = X^{1/2}X^{1/2}, we can compute the gradient of the - # matrix square root dX^{1/2} by solving the Sylvester equation: - # dX = (d(X^{1/2})X^{1/2} + X^{1/2}(dX^{1/2}). - grad_sqrtm = scipy.linalg.solve_sylvester(sqrtm, sqrtm, gm) - - grad_input = B.from_numpy(grad_sqrtm).to(grad_output) - return grad_input - - -sqrtm = MatrixSquareRoot.apply - - -def _compute_fid(mu1: Tensor, sigma1: Tensor, mu2: Tensor, sigma2: Tensor, eps: float = 1e-6) -> Tensor: - r""" - Adjusted version of `Fid Score`_ - - The Frechet Inception Distance between two multivariate Gaussians X_x ~ N(mu_1, sigm_1) - and X_y ~ N(mu_2, sigm_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(sigm_1 + sigm_2 - 2*sqrt(sigm_1*sigm_2)). - - Args: - mu1: mean of activations calculated on predicted (x) samples - sigma1: covariance matrix over activations calculated on predicted (x) samples - mu2: mean of activations calculated on target (y) samples - sigma2: covariance matrix over activations calculated on target (y) samples - eps: offset constant. used if sigma_1 @ sigma_2 matrix is singular - - Returns: - Scalar value of the distance between sets. - """ - diff = mu1 - mu2 - - covmean = sqrtm(sigma1.mm(sigma2)) - # Product might be almost singular - if not B.isfinite(covmean).all(): - rank_zero_info(f"FID calculation produces singular product; adding {eps} to diagonal of covariance estimates") - offset = B.eye(sigma1.size(0), device=mu1.device, dtype=mu1.dtype) * eps - covmean = sqrtm((sigma1 + offset).mm(sigma2 + offset)) - - tr_covmean = B.trace(covmean) - return diff.dot(diff) + B.trace(sigma1) + B.trace(sigma2) - 2 * tr_covmean - - -class FID(Metric): - r""" - Calculates Fréchet inception distance (FID_) which is used to access the quality of generated images. Given by - - .. math:: - FID = |\mu - \mu_w| + tr(\Sigma + \Sigma_w - 2(\Sigma \Sigma_w)^{\frac{1}{2}}) - - where :math:`\mathcal{N}(\mu, \Sigma)` is the multivariate normal distribution estimated from Inception v3 [1] - features calculated on real life images and :math:`\mathcal{N}(\mu_w, \Sigma_w)` is the multivariate normal - distribution estimated from Inception v3 features calculated on generated (fake) images. The metric was - originally proposed in [1]. - - Using the default feature extraction (Inception v3 using the original weights from [2]), the input is - expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images - will be resized to 299 x 299 which is the size of the original training data. The boolian flag ``real`` - determines if the images should update the statistics of the real distribution or the fake distribution. - - .. note:: using this metrics requires you to have ``scipy`` install. Either install as ``pip install - paddlemetrics[image]`` or ``pip install scipy`` - - .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` - is installed. Either install as ``pip install paddlemetrics[image]`` or - ``pip install torch-fidelity`` - - .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of - all other metrics) as this metric does not really make sense to calculate on a single batch. This - means that by default ``forward`` will just call ``update`` underneat. - - Args: - feature: - Either an integer or ``nn.Module``: - - - an integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: - 64, 192, 768, 2048 - - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns - an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - References: - [1] Rethinking the Inception Architecture for Computer Vision - Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna - https://arxiv.org/abs/1512.00567 - - [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, - Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter - https://arxiv.org/abs/1706.08500 - - Raises: - ValueError: - If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed - ValueError: - If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048] - TypeError: - If ``feature`` is not an ``str``, ``int`` or ``B.nn.Module`` - - Example: - >>> import torchapi as B - >>> _ = B.manual_seed(123) - >>> from paddlemetrics import FID - >>> fid = FID(feature=64) # doctest: +SKIP - >>> # generate two slightly overlapping image intensity distributions - >>> imgs_dist1 = B.randint(0, 200, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP - >>> imgs_dist2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP - >>> fid.update(imgs_dist1, real=True) # doctest: +SKIP - >>> fid.update(imgs_dist2, real=False) # doctest: +SKIP - >>> fid.compute() # doctest: +SKIP - tensor(12.7202) - - """ - real_features: List[Tensor] - fake_features: List[Tensor] - - def __init__( - self, - feature: Union[int, B.nn.Module] = 2048, - compute_on_step: bool = False, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - rank_zero_warn( - "Metric `FID` will save all extracted features in buffer." - " For large datasets this may lead to large memory footprint.", - UserWarning, - ) - - if isinstance(feature, int): - if not _TORCH_FIDELITY_AVAILABLE: - raise ValueError( - "FID metric requires that Torch-fidelity is installed." - "Either install as `pip install paddlemetrics[image]` or `pip install torch-fidelity`" - ) - valid_int_input = [64, 192, 768, 2048] - if feature not in valid_int_input: - raise ValueError( - f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}." - ) - - self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) - elif isinstance(feature, B.nn.Module): - self.inception = feature - else: - raise TypeError("Got unknown input to argument `feature`") - - self.add_state("real_features", [], dist_reduce_fx=None) - self.add_state("fake_features", [], dist_reduce_fx=None) - - def update(self, imgs: Tensor, real: bool) -> None: # type: ignore - """Update the state with extracted features. - - Args: - imgs: tensor with images feed to the feature extractor - real: bool indicating if imgs belong to the real or the fake distribution - """ - features = self.inception(imgs) - - if real: - self.real_features.append(features) - else: - self.fake_features.append(features) - - def compute(self) -> Tensor: - """Calculate FID score based on accumulated extracted features from the two distributions.""" - real_features = dim_zero_cat(self.real_features) - fake_features = dim_zero_cat(self.fake_features) - # computation is extremely sensitive so it needs to happen in double precision - orig_dtype = real_features.dtype - real_features = real_features.double() - fake_features = fake_features.double() - - # calculate mean and covariance - n = real_features.shape[0] - mean1 = real_features.mean(dim=0) - mean2 = fake_features.mean(dim=0) - diff1 = real_features - mean1 - diff2 = fake_features - mean2 - cov1 = 1.0 / (n - 1) * diff1.t().mm(diff1) - cov2 = 1.0 / (n - 1) * diff2.t().mm(diff2) - - # compute fid - return _compute_fid(mean1, cov1, mean2, cov2).to(orig_dtype) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py deleted file mode 100644 index 6c05b9a4b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/image/inception.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -#from paddlemetrics.image.fid import NoTrainInceptionV3 -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat -from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE - - -class IS(Metric): - r""" - Calculates the Inception Score (IS) which is used to access how realistic generated images are. - It is defined as - - .. math:: - IS = exp(\mathbb{E}_x KL(p(y | x ) || p(y))) - - where :math:`KL(p(y | x) || p(y))` is the KL divergence between the conditional distribution :math:`p(y|x)` - and the margianl distribution :math:`p(y)`. Both the conditional and marginal distribution is calculated - from features extracted from the images. The score is calculated on random splits of the images such that - both a mean and standard deviation of the score are returned. The metric was originally proposed in [1]. - - Using the default feature extraction (Inception v3 using the original weights from [2]), the input is - expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images - will be resized to 299 x 299 which is the size of the original training data. - - .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` - is installed. Either install as ``pip install paddlemetrics[image]`` or - ``pip install torch-fidelity`` - - .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of - all other metrics) as this metric does not really make sense to calculate on a single batch. This - means that by default ``forward`` will just call ``update`` underneat. - - Args: - feature: - Either an str, integer or ``nn.Module``: - - - an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: - 'logits_unbiased', 64, 192, 768, 2048 - - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns - an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. - - splits: integer determining how many splits the inception score calculation should be split among - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - References: - [1] Improved Techniques for Training GANs - Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, Xi Chen - https://arxiv.org/abs/1606.03498 - - [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, - Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter - https://arxiv.org/abs/1706.08500 - - Raises: - ValueError: - If ``feature`` is set to an ``str`` or ``int`` and ``torch-fidelity`` is not installed - ValueError: - If ``feature`` is set to an ``str`` or ``int`` and not one of ['logits_unbiased', 64, 192, 768, 2048] - TypeError: - If ``feature`` is not an ``str``, ``int`` or ``B.nn.Module`` - - Example: - >>> import torchapi as B - >>> _ = B.manual_seed(123) - >>> from paddlemetrics import IS - >>> inception = IS() # doctest: +SKIP - >>> # generate some images - >>> imgs = B.randint(0, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP - >>> inception.update(imgs) # doctest: +SKIP - >>> inception.compute() # doctest: +SKIP - (tensor(1.0569), tensor(0.0113)) - - """ - features: List - - def __init__( - self, - feature: Union[str, int, B.nn.Module] = "logits_unbiased", - splits: int = 10, - compute_on_step: bool = False, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - rank_zero_warn( - "Metric `IS` will save all extracted features in buffer." - " For large datasets this may lead to large memory footprint.", - UserWarning, - ) - - if isinstance(feature, (str, int)): - if not _TORCH_FIDELITY_AVAILABLE: - raise ValueError( - "IS metric requires that Torch-fidelity is installed." - "Either install as `pip install paddlemetrics[image]`" - " or `pip install torch-fidelity`" - ) - valid_int_input = ("logits_unbiased", 64, 192, 768, 2048) - if feature not in valid_int_input: - raise ValueError( - f"Integer input to argument `feature` must be one of {valid_int_input}," f" but got {feature}." - ) - - self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) - elif isinstance(feature, B.nn.Module): - self.inception = feature - else: - raise TypeError("Got unknown input to argument `feature`") - - self.splits = splits - self.add_state("features", [], dist_reduce_fx=None) - - def update(self, imgs: Tensor) -> None: # type: ignore - """Update the state with extracted features. - - Args: - imgs: tensor with images feed to the feature extractor - """ - features = self.inception(imgs) - self.features.append(features) - - def compute(self) -> Tuple[Tensor, Tensor]: - features = dim_zero_cat(self.features) - # random permute the features - idx = B.randperm(features.shape[0]) - features = features[idx] - - # calculate probs and logits - prob = features.softmax(dim=1) - log_prob = features.log_softmax(dim=1) - - # split into groups - prob = prob.chunk(self.splits, dim=0) - log_prob = log_prob.chunk(self.splits, dim=0) - - # calculate score per split - mean_prob = [p.mean(dim=0, keepdim=True) for p in prob] - kl_ = [p * (log_p - m_p.log()) for p, log_p, m_p in zip(prob, log_prob, mean_prob)] - kl_ = [k.sum(dim=1).mean().exp() for k in kl_] - kl = B.stack(kl_) - - # return mean and std - return kl.mean(), kl.std() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py deleted file mode 100644 index 2f3d3a6b7..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/image/kid.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor -from paddleext.torchapi.nn import Module - -from paddlemetrics.image.fid import NoTrainInceptionV3 -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat -from paddlemetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE - - -def maximum_mean_discrepancy(k_xx: Tensor, k_xy: Tensor, k_yy: Tensor) -> Tensor: - """Adapted from `KID Score`_""" - m = k_xx.shape[0] - - diag_x = B.diag(k_xx) - diag_y = B.diag(k_yy) - - kt_xx_sums = k_xx.sum(dim=-1) - diag_x - kt_yy_sums = k_yy.sum(dim=-1) - diag_y - k_xy_sums = k_xy.sum(dim=0) - - kt_xx_sum = kt_xx_sums.sum() - kt_yy_sum = kt_yy_sums.sum() - k_xy_sum = k_xy_sums.sum() - - value = (kt_xx_sum + kt_yy_sum) / (m * (m - 1)) - value -= 2 * k_xy_sum / (m ** 2) - return value - - -def poly_kernel(f1: Tensor, f2: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0) -> Tensor: - """Adapted from `KID Score`_""" - if gamma is None: - gamma = 1.0 / f1.shape[1] - kernel = (f1 @ f2.T * gamma + coef) ** degree - return kernel - - -def poly_mmd( - f_real: Tensor, f_fake: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0 -) -> Tensor: - """Adapted from `KID Score`_""" - k_11 = poly_kernel(f_real, f_real, degree, gamma, coef) - k_22 = poly_kernel(f_fake, f_fake, degree, gamma, coef) - k_12 = poly_kernel(f_real, f_fake, degree, gamma, coef) - return maximum_mean_discrepancy(k_11, k_12, k_22) - - -class KID(Metric): - r""" - Calculates Kernel Inception Distance (KID) which is used to access the quality of generated images. Given by - - .. math:: - KID = MMD(f_{real}, f_{fake})^2 - - where :math:`MMD` is the maximum mean discrepancy and :math:`I_{real}, I_{fake}` are extracted features - from real and fake images, see [1] for more details. In particular, calculating the MMD requires the - evaluation of a polynomial kernel function :math:`k` - - .. math:: - k(x,y) = (\gamma * x^T y + coef)^{degree} - - which controls the distance between two features. In practise the MMD is calculated over a number of - subsets to be able to both get the mean and standard deviation of KID. - - Using the default feature extraction (Inception v3 using the original weights from [2]), the input is - expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images - will be resized to 299 x 299 which is the size of the original training data. - - .. note:: using this metric with the default feature extractor requires that ``torch-fidelity`` - is installed. Either install as ``pip install paddlemetrics[image]`` or - ``pip install torch-fidelity`` - - .. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of - all other metrics) as this metric does not really make sense to calculate on a single batch. This - means that by default ``forward`` will just call ``update`` underneat. - - Args: - feature: - Either an str, integer or ``nn.Module``: - - - an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following: - 'logits_unbiased', 64, 192, 768, 2048 - - an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns - an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size. - - subsets: - Number of subsets to calculate the mean and standard deviation scores over - subset_size: - Number of randomly picked samples in each subset - degree: - Degree of the polynomial kernel function - gamma: - Scale-length of polynomial kernel. If set to ``None`` will be automatically set to the feature size - coef: - Bias term in the polynomial kernel. - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - References: - [1] Demystifying MMD GANs - Mikołaj Bińkowski, Danica J. Sutherland, Michael Arbel, Arthur Gretton - https://arxiv.org/abs/1801.01401 - - [2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium, - Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter - https://arxiv.org/abs/1706.08500 - - Raises: - ValueError: - If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed - ValueError: - If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048] - ValueError: - If ``subsets`` is not an integer larger than 0 - ValueError: - If ``subset_size`` is not an integer larger than 0 - ValueError: - If ``degree`` is not an integer larger than 0 - ValueError: - If ``gamma`` is niether ``None`` or a float larger than 0 - ValueError: - If ``coef`` is not an float larger than 0 - - Example: - >>> import torchapi as B - >>> _ = B.manual_seed(123) - >>> from paddlemetrics import KID - >>> kid = KID(subset_size=50) # doctest: +SKIP - >>> # generate two slightly overlapping image intensity distributions - >>> imgs_dist1 = B.randint(0, 200, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP - >>> imgs_dist2 = B.randint(100, 255, (100, 3, 299, 299), dtype=B.uint8) # doctest: +SKIP - >>> kid.update(imgs_dist1, real=True) # doctest: +SKIP - >>> kid.update(imgs_dist2, real=False) # doctest: +SKIP - >>> kid_mean, kid_std = kid.compute() # doctest: +SKIP - >>> print((kid_mean, kid_std)) # doctest: +SKIP - (tensor(0.0338), tensor(0.0025)) - - """ - real_features: List[Tensor] - fake_features: List[Tensor] - - def __init__( - self, - feature: Union[str, int, B.nn.Module] = 2048, - subsets: int = 100, - subset_size: int = 1000, - degree: int = 3, - gamma: Optional[float] = None, # type: ignore - coef: float = 1.0, - compute_on_step: bool = False, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - rank_zero_warn( - "Metric `KID` will save all extracted features in buffer." - " For large datasets this may lead to large memory footprint.", - UserWarning, - ) - - if isinstance(feature, (str, int)): - if not _TORCH_FIDELITY_AVAILABLE: - raise RuntimeError( - "KID metric requires that Torch-fidelity is installed." - " Either install as `pip install paddlemetrics[image]`" - " or `pip install torch-fidelity`" - ) - valid_int_input = ("logits_unbiased", 64, 192, 768, 2048) - if feature not in valid_int_input: - raise ValueError( - f"Integer input to argument `feature` must be one of {valid_int_input}," f" but got {feature}." - ) - - self.inception: Module = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) - elif isinstance(feature, Module): - self.inception = feature - else: - raise TypeError("Got unknown input to argument `feature`") - - if not (isinstance(subsets, int) and subsets > 0): - raise ValueError("Argument `subsets` expected to be integer larger than 0") - self.subsets = subsets - - if not (isinstance(subset_size, int) and subset_size > 0): - raise ValueError("Argument `subset_size` expected to be integer larger than 0") - self.subset_size = subset_size - - if not (isinstance(degree, int) and degree > 0): - raise ValueError("Argument `degree` expected to be integer larger than 0") - self.degree = degree - - if gamma is not None and not (isinstance(gamma, float) and gamma > 0): - raise ValueError("Argument `gamma` expected to be `None` or float larger than 0") - self.gamma = gamma - - if not (isinstance(coef, float) and coef > 0): - raise ValueError("Argument `coef` expected to be float larger than 0") - self.coef = coef - - # states for extracted features - self.add_state("real_features", [], dist_reduce_fx=None) - self.add_state("fake_features", [], dist_reduce_fx=None) - - def update(self, imgs: Tensor, real: bool) -> None: # type: ignore - """Update the state with extracted features. - - Args: - imgs: tensor with images feed to the feature extractor - real: bool indicating if imgs belong to the real or the fake distribution - """ - features = self.inception(imgs) - - if real: - self.real_features.append(features) - else: - self.fake_features.append(features) - - def compute(self) -> Tuple[Tensor, Tensor]: - """Calculate KID score based on accumulated extracted features from the two distributions. Returns a tuple - of mean and standard deviation of KID scores calculated on subsets of extracted features. - - Implementation inspired by `Fid Score`_ - """ - real_features = dim_zero_cat(self.real_features) - fake_features = dim_zero_cat(self.fake_features) - - n_samples_real = real_features.shape[0] - if n_samples_real < self.subset_size: - raise ValueError("Argument `subset_size` should be smaller than the number of samples") - n_samples_fake = fake_features.shape[0] - if n_samples_fake < self.subset_size: - raise ValueError("Argument `subset_size` should be smaller than the number of samples") - - kid_scores_ = [] - for _ in range(self.subsets): - perm = B.randperm(n_samples_real) - f_real = real_features[perm[: self.subset_size]] - perm = B.randperm(n_samples_fake) - f_fake = fake_features[perm[: self.subset_size]] - - o = poly_mmd(f_real, f_fake, self.degree, self.gamma, self.coef) - kid_scores_.append(o) - kid_scores = B.stack(kid_scores_) - return kid_scores.mean(), kid_scores.std(unbiased=False) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py deleted file mode 100644 index 7cf6d03a6..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/image/lpip_similarity.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.imports import _LPIPS_AVAILABLE - -if _LPIPS_AVAILABLE: - from lpips import LPIPS as Lpips_backbone -else: - - class Lpips_backbone(B.nn.Module): # type: ignore - pass - - -class NoTrainLpips(Lpips_backbone): - def train(self, mode: bool) -> "NoTrainLpips": - """the network should not be able to be switched away from evaluation mode.""" - return super().train(False) - - -def _valid_img(img: Tensor) -> bool: - """check that input is a valid image to the network.""" - return img.ndim == 4 and img.shape[1] == 3 and img.min() >= -1.0 and img.max() <= 1.0 - - -class LPIPS(Metric): - """The Learned Perceptual Image Patch Similarity (`LPIPS_`) is used to judge the perceptual similarity between - two images. LPIPS essentially computes the similarity between the activations of two image patches for some - pre-defined network. This measure have been shown to match human perseption well. A low LPIPS score means that - image patches are perceptual similar. - - Both input image patches are expected to have shape `[N, 3, H, W]` and be normalized to the [-1,1] - range. The minimum size of `H, W` depends on the chosen backbone (see `net_type` arg). - - .. note:: using this metrics requires you to have ``lpips`` package installed. Either install - as ``pip install paddlemetrics[image]`` or ``pip install lpips`` - - .. note:: this metric is not scriptable when using ``torch<1.8``. Please update your pytorch installation - if this is a issue. - - Args: - net_type: str indicating backbone network type to use. Choose between `'alex'`, `'vgg'` or `'squeeze'` - reduction: str indicating how to reduce over the batch dimension. Choose between `'sum'` or `'mean'`. - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Raises: - ValueError: - If ``lpips`` package is not installed - ValueError: - If ``net_type`` is not one of ``"vgg"``, ``"alex"`` or ``"squeeze"`` - ValueError: - If ``reduction`` is not one of ``"mean"`` or ``"sum"`` - - Example: - >>> import torchapi as B - >>> _ = B.manual_seed(123) - >>> from paddlemetrics import LPIPS - >>> lpips = LPIPS(net_type='vgg') - >>> img1 = B.rand(10, 3, 100, 100) - >>> img2 = B.rand(10, 3, 100, 100) - >>> lpips(img1, img2) - tensor([0.3566], grad_fn=) - """ - - is_differentiable = True - real_features: List[Tensor] - fake_features: List[Tensor] - - # due to the use of named tuple in the backbone the net variable cannot be scriptet - __jit_ignored_attributes__ = ["net"] - - def __init__( - self, - net_type: str = "alex", - reduction: str = "mean", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable[[Tensor], List[Tensor]] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - if not _LPIPS_AVAILABLE: - raise ValueError( - "LPIPS metric requires that lpips is installed." - "Either install as `pip install paddlemetrics[image]` or `pip install lpips`" - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError(f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}.") - self.net = NoTrainLpips(net=net_type, verbose=False) - - valid_reduction = ("mean", "sum") - if reduction not in valid_reduction: - raise ValueError(f"Argument `reduction` must be one of {valid_reduction}, but got {reduction}") - self.reduction = reduction - - self.add_state("sum_scores", B.zeros(1), dist_reduce_fx="sum") - self.add_state("total", B.zeros(1), dist_reduce_fx="sum") - - def update(self, img1: Tensor, img2: Tensor) -> None: # type: ignore - """Update internal states with lpips score. - - Args: - img1: tensor with images of shape [N, 3, H, W] - img2: tensor with images of shape [N, 3, H, W] - """ - if not (_valid_img(img1) and _valid_img(img2)): - raise ValueError( - "Expected both input arguments to be normalized tensors (all values in range [-1,1])" - f" and to have shape [N, 3, H, W] but `img1` have shape {img1.shape} with values in" - f" range {[img1.min(), img1.max()]} and `img2` have shape {img2.shape} with value" - f" in range {[img2.min(), img2.max()]}" - ) - - loss = self.net(img1, img2).squeeze() - self.sum_scores += loss.sum() - self.total += img1.shape[0] - - def compute(self) -> Tensor: - """Compute final perceptual similarity metric.""" - if self.reduction == "mean": - return self.sum_scores / self.total - if self.reduction == "sum": - return self.sum_scores diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py deleted file mode 100644 index 3226203d7..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/image/psnr.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Optional, Sequence, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.image.psnr import _psnr_compute, _psnr_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn - - -class PSNR(Metric): - r""" - Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR): - - .. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right) - - Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function. - - Args: - data_range: - the range of the data. If None, it is determined from the data (max - min). - The ``data_range`` must be given when ``dim`` is not None. - base: a base of a logarithm to use (default: 10) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - dim: - Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is - None meaning scores will be reduced across all dimensions and all batches. - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``dim`` is not ``None`` and ``data_range`` is not given. - - Example: - >>> from paddlemetrics import PSNR - >>> psnr = PSNR() - >>> preds = B.tensor([[0.0, 1.0], [2.0, 3.0]]) - >>> target = B.tensor([[3.0, 2.0], [1.0, 0.0]]) - >>> psnr(preds, target) - tensor(2.5527) - - .. note:: - Half precision is only support on GPU for this metric - - """ - min_target: Tensor - max_target: Tensor - - def __init__( - self, - data_range: Optional[float] = None, - base: float = 10.0, - reduction: str = "elementwise_mean", - dim: Optional[Union[int, Tuple[int, ...]]] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - - if dim is None and reduction != "elementwise_mean": - rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.") - - if dim is None: - self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - else: - self.add_state("sum_squared_error", default=[]) - self.add_state("total", default=[]) - - if data_range is None: - if dim is not None: - # Maybe we could use `B.amax(target, dim=dim) - B.amin(target, dim=dim)` in PyTorch 1.7 to - # calculate `data_range` in the future. - raise ValueError("The `data_range` must be given when `dim` is not None.") - - self.data_range = None - self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=B.min) - self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=B.max) - else: - self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx="mean") - self.base = base - self.reduction = reduction - self.dim = tuple(dim) if isinstance(dim, Sequence) else dim - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim) - if self.dim is None: - if self.data_range is None: - # keep track of min and max target values - self.min_target = min(target.min(), self.min_target) - self.max_target = max(target.max(), self.max_target) - - self.sum_squared_error += sum_squared_error - self.total += n_obs - else: - self.sum_squared_error.append(sum_squared_error) - self.total.append(n_obs) - - def compute(self) -> Tensor: - """Compute peak signal-to-noise ratio over state.""" - if self.data_range is not None: - data_range = self.data_range - else: - data_range = self.max_target - self.min_target - - if self.dim is None: - sum_squared_error = self.sum_squared_error - total = self.total - else: - sum_squared_error = B.cat([values.flatten() for values in self.sum_squared_error]) - total = B.cat([values.flatten() for values in self.total]) - return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py b/EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py deleted file mode 100644 index f34a19b1c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/image/ssim.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, List, Optional, Sequence - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.image.ssim import _ssim_compute, _ssim_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat - - -class SSIM(Metric): - """Computes Structual Similarity Index Measure (SSIM_). - - Args: - kernel_size: size of the gaussian kernel (default: (11, 11)) - sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - data_range: Range of the image. If ``None``, it is determined from the image (max - min) - k1: Parameter of SSIM. Default: 0.01 - k2: Parameter of SSIM. Default: 0.03 - - Return: - Tensor with SSIM score - - Example: - >>> from paddlemetrics import SSIM - >>> preds = B.rand([16, 1, 16, 16]) - >>> target = preds * 0.75 - >>> ssim = SSIM() - >>> ssim(preds, target) - tensor(0.9219) - """ - - preds: List[Tensor] - target: List[Tensor] - - def __init__( - self, - kernel_size: Sequence[int] = (11, 11), - sigma: Sequence[float] = (1.5, 1.5), - reduction: str = "elementwise_mean", - data_range: Optional[float] = None, - k1: float = 0.01, - k2: float = 0.03, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - rank_zero_warn( - "Metric `SSIM` will save all targets and" - " predictions in buffer. For large datasets this may lead" - " to large memory footprint." - ) - - self.add_state("preds", default=[], dist_reduce_fx="cat") - self.add_state("target", default=[], dist_reduce_fx="cat") - self.kernel_size = kernel_size - self.sigma = sigma - self.data_range = data_range - self.k1 = k1 - self.k2 = k2 - self.reduction = reduction - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - preds, target = _ssim_update(preds, target) - self.preds.append(preds) - self.target.append(target) - - def compute(self) -> Tensor: - """Computes explained variance over state.""" - preds = dim_zero_cat(self.preds) - target = dim_zero_cat(self.target) - return _ssim_compute( - preds, target, self.kernel_size, self.sigma, self.reduction, self.data_range, self.k1, self.k2 - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/metric.py b/EE/paddlemetric/src/build/lib/paddlemetrics/metric.py deleted file mode 100644 index 21c2148ba..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/metric.py +++ /dev/null @@ -1,775 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import functools -import inspect -import operator as op -from abc import ABC, abstractmethod -from collections.abc import Sequence -from contextlib import contextmanager -from copy import deepcopy -from typing import Any, Callable, Dict, Generator, List, Optional, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, Module - -from paddlemetrics.utilities import apply_to_collection, rank_zero_warn -from paddlemetrics.utilities.data import _flatten, dim_zero_cat, dim_zero_max, dim_zero_mean, dim_zero_min, dim_zero_sum -from paddlemetrics.utilities.distributed import gather_all_tensors -from paddlemetrics.utilities.exceptions import paddlemetricsUserError -from paddlemetrics.utilities.imports import _LIGHTNING_AVAILABLE, _compare_version - - -def jit_distributed_available() -> bool: - return B.distributed.is_available() and B.distributed.is_initialized() - - -class Metric(Module): - """Base class for all metrics present in the Metrics API. - - Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to - handle distributed synchronization and per-step metric computation. - - Override ``update()`` and ``compute()`` functions to implement your own metric. Use - ``add_state()`` to register metric state variables which keep track of state on each - call of ``update()`` and are synchronized across processes when ``compute()`` is called. - - Note: - Metric state variables can either be ``B.Tensors`` or an empty list which can we used - to store `B.Tensors``. - - Note: - Different metrics only override ``update()`` and not ``forward()``. A call to ``update()`` - is valid, but it won't return the metric value at the current step. A call to ``forward()`` - automatically calls ``update()`` and also returns the metric value at the current step. - - Args: - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - """ - - __jit_ignored_attributes__ = ["device"] - __jit_unused_properties__ = ["is_differentiable"] - is_differentiable: Optional[bool] = None - higher_is_better: Optional[bool] = None - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__() - - # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/ - # B.nn/modules/module.py#L227) -# B._C._log_api_usage_once(f"paddlemetrics.metric.{self.__class__.__name__}") - -# self._LIGHTNING_GREATER_EQUAL_1_3 = _compare_version("pytorch_lightning", op.ge, "1.3.0") - self._device = B.device("cpu") - - self.dist_sync_on_step = dist_sync_on_step - self.compute_on_step = compute_on_step - self.process_group = process_group - self.dist_sync_fn = dist_sync_fn - self._to_sync = True - self._should_unsync = True - - self._update_signature = inspect.signature(self.update) - self.update: Callable = self._wrap_update(self.update) # type: ignore - self.compute: Callable = self._wrap_compute(self.compute) # type: ignore - self._computed = None - self._forward_cache = None - self._update_called = False - - # initialize state - self._defaults: Dict[str, Union[List, Tensor]] = {} - self._persistent: Dict[str, bool] = {} - self._reductions: Dict[str, Union[str, Callable[[Union[List[Tensor], Tensor]], Tensor], None]] = {} - - # state management - self._is_synced = False - self._cache: Optional[Dict[str, Union[List[Tensor], Tensor]]] = None - - def to(self, *args, **kwargs): - - return self - # result = super().to(*args, **kwargs) - # - # return result if result is not None else self - - def add_state( - self, - name: str, - default: Union[list, Tensor], - dist_reduce_fx: Optional[Union[str, Callable]] = None, - persistent: bool = False, - ) -> None: - """Adds metric state variable. Only used by subclasses. - - Args: - name: The name of the state variable. The variable will then be accessible at ``self.name``. - default: Default value of the state; can either be a ``B.Tensor`` or an empty list. The state will be - reset to this value when ``self.reset()`` is called. - dist_reduce_fx (Optional): Function to reduce state across multiple processes in distributed mode. - If value is ``"sum"``, ``"mean"``, ``"cat"``, ``"min"`` or ``"max"`` we will use ``B.sum``, - ``B.mean``, ``B.cat``, ``B.min`` and ``B.max``` respectively, each with argument - ``dim=0``. Note that the ``"cat"`` reduction only makes sense if the state is a list, and not - a tensor. The user can also pass a custom function in this parameter. - persistent (Optional): whether the state will be saved as part of the modules ``state_dict``. - Default is ``False``. - - Note: - Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes. - However, there won't be any reduction function applied to the synchronized metric state. - - The metric states would be synced as follows - - - If the metric state is ``B.Tensor``, the synced value will be a stacked ``B.Tensor`` across - the process dimension if the metric state was a ``B.Tensor``. The original ``B.Tensor`` metric - state retains dimension and hence the synchronized output will be of shape ``(num_process, ...)``. - - - If the metric state is a ``list``, the synced value will be a ``list`` containing the - combined elements from all processes. - - Note: - When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow - the format discussed in the above note. - - Raises: - ValueError: - If ``default`` is not a ``tensor`` or an ``empty list``. - ValueError: - If ``dist_reduce_fx`` is not callable or one of ``"mean"``, ``"sum"``, ``"cat"``, ``None``. - """ - if not isinstance(default, (Tensor, list)) or (isinstance(default, list) and default): - raise ValueError(f"state variable must be a tensor or any empty list (where you can append tensors): {type(default)}") - - if dist_reduce_fx == "sum": - dist_reduce_fx = dim_zero_sum - elif dist_reduce_fx == "mean": - dist_reduce_fx = dim_zero_mean - elif dist_reduce_fx == "max": - dist_reduce_fx = dim_zero_max - elif dist_reduce_fx == "min": - dist_reduce_fx = dim_zero_min - elif dist_reduce_fx == "cat": - dist_reduce_fx = dim_zero_cat - elif dist_reduce_fx is not None and not callable(dist_reduce_fx): - raise ValueError("`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', None]") - - if isinstance(default, Tensor): - default = default.contiguous() - - setattr(self, name, default) - - self._defaults[name] = deepcopy(default) - self._persistent[name] = persistent - self._reductions[name] = dist_reduce_fx - -# @B.jit.unused - def forward(self, *args: Any, **kwargs: Any) -> Any: - """Automatically calls ``update()``. - - Returns the metric value over inputs if ``compute_on_step`` is True. - """ - # add current step - if self._is_synced: - raise paddlemetricsUserError( - "The Metric shouldn't be synced when performing ``update``. " - "HINT: Did you forget to call ``unsync`` ?." - ) - - with B.no_grad(): - self.update(*args, **kwargs) - - if self.compute_on_step: - self._to_sync = self.dist_sync_on_step - # skip restore cache operation from compute as cache is stored below. - self._should_unsync = False - - # save context before switch - cache = {attr: getattr(self, attr) for attr in self._defaults} - - # call reset, update, compute, on single batch - self.reset() - self.update(*args, **kwargs) - self._forward_cache = self.compute() - - # restore context - for attr, val in cache.items(): - setattr(self, attr, val) - self._is_synced = False - - self._should_unsync = True - self._to_sync = True - self._computed = None - - return self._forward_cache - - def _sync_dist(self, dist_sync_fn: Callable = gather_all_tensors, process_group: Optional[Any] = None) -> None: - input_dict = {attr: getattr(self, attr) for attr in self._reductions} - - for attr, reduction_fn in self._reductions.items(): - # pre-concatenate metric states that are lists to reduce number of all_gather operations - if reduction_fn == dim_zero_cat and isinstance(input_dict[attr], list) and len(input_dict[attr]) > 1: - input_dict[attr] = [dim_zero_cat(input_dict[attr])] - - output_dict = apply_to_collection( - input_dict, - Tensor, - dist_sync_fn, - group=process_group or self.process_group, - ) - - for attr, reduction_fn in self._reductions.items(): - # pre-processing ops (stack or flatten for inputs) - if isinstance(output_dict[attr][0], Tensor): - output_dict[attr] = B.stack(output_dict[attr]) - elif isinstance(output_dict[attr][0], list): - output_dict[attr] = _flatten(output_dict[attr]) - - if not (callable(reduction_fn) or reduction_fn is None): - raise TypeError("reduction_fn must be callable or None") - reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr] - setattr(self, attr, reduced) - - def _wrap_update(self, update: Callable) -> Callable: - @functools.wraps(update) - def wrapped_func(*args: Any, **kwargs: Any) -> Optional[Any]: - self._computed = None - self._update_called = True - return update(*args, **kwargs) - - return wrapped_func - - def sync( - self, - dist_sync_fn: Optional[Callable] = None, - process_group: Optional[Any] = None, - should_sync: bool = True, - distributed_available: Optional[Callable] = jit_distributed_available, - ) -> None: - """Sync function for manually controlling when metrics states should be synced across processes. - - Args: - dist_sync_fn: Function to be used to perform states synchronization - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - should_sync: Whether to apply to state synchronization. This will have an impact - only when running in a distributed setting. - distributed_available: Function to determine if we are running inside a distributed setting - """ - if self._is_synced and should_sync: - raise paddlemetricsUserError("The Metric has already been synced.") - - is_distributed = distributed_available() if callable(distributed_available) else None - - if not should_sync or not is_distributed: - return - - if dist_sync_fn is None: - dist_sync_fn = gather_all_tensors - - # cache prior to syncing - self._cache = {attr: getattr(self, attr) for attr in self._defaults} - - # sync - self._sync_dist(dist_sync_fn, process_group=process_group) - self._is_synced = True - - def unsync(self, should_unsync: bool = True) -> None: - """Unsync function for manually controlling when metrics states should be reverted back to their local - states. - - Args: - should_unsync: Whether to perform unsync - """ - if not should_unsync: - return - - if not self._is_synced: - raise paddlemetricsUserError("The Metric has already been un-synced.") - - if self._cache is None: - raise paddlemetricsUserError("The internal cache should exist to unsync the Metric.") - - # if we synced, restore to cache so that we can continue to accumulate un-synced state - for attr, val in self._cache.items(): - setattr(self, attr, val) - self._is_synced = False - self._cache = None - - @contextmanager - def sync_context( - self, - dist_sync_fn: Optional[Callable] = None, - process_group: Optional[Any] = None, - should_sync: bool = True, - should_unsync: bool = True, - distributed_available: Optional[Callable] = jit_distributed_available, - ) -> Generator: - """Context manager to synchronize the states between processes when running in a distributed setting and - restore the local cache states after yielding. - - Args: - dist_sync_fn: Function to be used to perform states synchronization - process_group: - Specify the process group on which synchronization is called. - default: None (which selects the entire world) - should_sync: Whether to apply to state synchronization. This will have an impact - only when running in a distributed setting. - should_unsync: Whether to restore the cache state so that the metrics can - continue to be accumulated. - distributed_available: Function to determine if we are running inside a distributed setting - """ - self.sync( - dist_sync_fn=dist_sync_fn, - process_group=process_group, - should_sync=should_sync, - distributed_available=distributed_available, - ) - - yield - - self.unsync(should_unsync=self._is_synced and should_unsync) - - def _wrap_compute(self, compute: Callable) -> Callable: - @functools.wraps(compute) - def wrapped_func(*args: Any, **kwargs: Any) -> Any: - if not self._update_called: - rank_zero_warn( - f"The ``compute`` method of metric {self.__class__.__name__}" - " was called before the ``update`` method which may lead to errors," - " as metric states have not yet been updated.", - UserWarning, - ) - - # return cached value - if self._computed is not None: - return self._computed - - # compute relies on the sync context manager to gather the states across processes and apply reduction - # if synchronization happened, the current rank accumulated states will be restored to keep - # accumulation going if ``should_unsync=True``, - with self.sync_context( - dist_sync_fn=self.dist_sync_fn, should_sync=self._to_sync, should_unsync=self._should_unsync - ): - self._computed = compute(*args, **kwargs) - - return self._computed - - return wrapped_func - - @abstractmethod - def update(self, *_: Any, **__: Any) -> None: - """Override this method to update the state variables of your metric class.""" - - @abstractmethod - def compute(self) -> Any: - """Override this method to compute the final metric value from state variables synchronized across the - distributed backend.""" - - def reset(self) -> None: - """This method automatically resets the metric state variables to their default value.""" - self._update_called = False - self._forward_cache = None - # lower lightning versions requires this implicitly to log metric objects correctly in self.log -# if not _LIGHTNING_AVAILABLE or self._LIGHTNING_GREATER_EQUAL_1_3: - self._computed = None - - for attr, default in self._defaults.items(): - current_val = getattr(self, attr) - if isinstance(default, Tensor): - setattr(self, attr, default.detach().clone().to(current_val.device)) - else: - setattr(self, attr, []) - - # reset internal states - self._cache = None - self._is_synced = False - - def clone(self) -> "Metric": - """Make a copy of the metric.""" - return deepcopy(self) - - def __getstate__(self) -> Dict[str, Any]: - # ignore update and compute functions for pickling - return {k: v for k, v in self.__dict__.items() if k not in ["update", "compute", "_update_signature"]} - - def __setstate__(self, state: Dict[str, Any]) -> None: - # manually restore update and compute functions for pickling - self.__dict__.update(state) - self._update_signature = inspect.signature(self.update) - self.update: Callable = self._wrap_update(self.update) # type: ignore - self.compute: Callable = self._wrap_compute(self.compute) # type: ignore - - def __setattr__(self, name: str, value: Any) -> None: - if name in ("higher_is_better", "is_differentiable"): - raise RuntimeError(f"Can't change const `{name}`.") - super().__setattr__(name, value) - - @property - def device(self) -> "B.device": - """Return the device of the metric.""" - return self._device - - def type(self, dst_type: Union[str, B.dtype]) -> "Metric": - """Method override default and prevent dtype casting. - - Please use `metric.set_dtype(dtype)` instead. - """ - return self - - def float(self) -> "Metric": - """Method override default and prevent dtype casting. - - Please use `metric.set_dtype(dtype)` instead. - """ - return self - - def double(self) -> "Metric": - """Method override default and prevent dtype casting. - - Please use `metric.set_dtype(dtype)` instead. - """ - return self - - def half(self) -> "Metric": - """Method override default and prevent dtype casting. - - Please use `metric.set_dtype(dtype)` instead. - """ - return self - - def set_dtype(self, dst_type: Union[str, B.dtype]) -> None: - """Special version of `type` for transferring all metric states to specific dtype - Arguments: - dst_type (type or string): the desired type - """ - return super().type(dst_type) - - def _apply(self, fn: Callable, *args, **kwargs) -> Module: - """Overwrite _apply function such that we can also move metric states to the correct device when `.to`, - `.cuda`, etc methods are called.""" - this = super()._apply(fn, *args, **kwargs) - if this is None: # for paddle - this = self - # Also apply fn to metric states and defaults - for key, value in this._defaults.items(): - if isinstance(value, Tensor): - this._defaults[key] = fn(value, *args, **kwargs) - elif isinstance(value, Sequence): - this._defaults[key] = [fn(v, *args, **kwargs) for v in value] - - current_val = getattr(this, key) - if isinstance(current_val, Tensor): - setattr(this, key, fn(current_val, *args, **kwargs)) - elif isinstance(current_val, Sequence): - setattr(this, key, [fn(cur_v, *args, **kwargs) for cur_v in current_val]) - else: - raise TypeError( - "Expected metric state to be either a Tensor" f"or a list of Tensor, but encountered {current_val}" - ) - - # make sure to update the device attribute - # if the dummy tensor moves device by fn function we should also update the attribute - self._device = fn(B.zeros(1, device=self.device), *args, **kwargs).device - - # Additional apply to forward cache and computed attributes (may be nested) - if this._computed is not None: - this._computed = apply_to_collection(this._computed, Tensor, fn) - if this._forward_cache is not None: - this._forward_cache = apply_to_collection(this._forward_cache, Tensor, fn) - - return this - - def persistent(self, mode: bool = False) -> None: - """Method for post-init to change if metric states should be saved to its state_dict.""" - for key in self._persistent: - self._persistent[key] = mode - - def state_dict( - self, - destination: Dict[str, Any] = None, - prefix: str = "", - keep_vars: bool = False, - ) -> Optional[Dict[str, Any]]: - destination = super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars) - # Register metric states to be part of the state_dict - for key in self._defaults: - if not self._persistent[key]: - continue - current_val = getattr(self, key) - if not keep_vars: - if isinstance(current_val, Tensor): - current_val = current_val.detach() - elif isinstance(current_val, list): - current_val = [cur_v.detach() if isinstance(cur_v, Tensor) else cur_v for cur_v in current_val] - destination[prefix + key] = deepcopy(current_val) # type: ignore - return destination - - def _load_from_state_dict( - self, - state_dict: dict, - prefix: str, - local_metadata: dict, - strict: bool, - missing_keys: List[str], - unexpected_keys: List[str], - error_msgs: List[str], - ) -> None: - """Loads metric states from state_dict.""" - - for key in self._defaults: - name = prefix + key - if name in state_dict: - setattr(self, key, state_dict.pop(name)) - super()._load_from_state_dict( - state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs - ) - - def _filter_kwargs(self, **kwargs: Any) -> Dict[str, Any]: - """filter kwargs such that they match the update signature of the metric.""" - - # filter all parameters based on update signature except those of - # type VAR_POSITIONAL (*args) and VAR_KEYWORD (**kwargs) - _params = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) - _sign_params = self._update_signature.parameters - filtered_kwargs = { - k: v for k, v in kwargs.items() if (k in _sign_params.keys() and _sign_params[k].kind not in _params) - } - - # if no kwargs filtered, return al kwargs as default - if not filtered_kwargs: - filtered_kwargs = kwargs - return filtered_kwargs - - def __hash__(self) -> int: - # we need to add the id here, since PyTorch requires a module hash to be unique. - # Internally, PyTorch nn.Module relies on that for children discovery - # (see https://github.com/pytorch/pytorch/blob/v1.9.0/B.nn/modules/module.py#L1544) - # For metrics that include tensors it is not a problem, - # since their hash is unique based on the memory location but we cannot rely on that for every metric. - hash_vals = [self.__class__.__name__, id(self)] - - for key in self._defaults: - val = getattr(self, key) - # Special case: allow list values, so long - # as their elements are hashable - if hasattr(val, "__iter__") and not isinstance(val, Tensor): - hash_vals.extend(val) - else: - hash_vals.append(val) - - return hash(tuple(hash_vals)) - - def __add__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.add, self, other) - - def __and__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.bitwise_and, self, other) - - # Fixme: this shall return bool instead of Metric - def __eq__(self, other: "Metric") -> "Metric": # type: ignore - return CompositionalMetric(B.eq, self, other) - - def __floordiv__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.floor_divide, self, other) - - def __ge__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.ge, self, other) - - def __gt__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.gt, self, other) - - def __le__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.le, self, other) - - def __lt__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.lt, self, other) - - def __matmul__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.matmul, self, other) - - def __mod__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.fmod, self, other) - - def __mul__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.mul, self, other) - - # Fixme: this shall return bool instead of Metric - def __ne__(self, other: "Metric") -> "Metric": # type: ignore - return CompositionalMetric(B.ne, self, other) - - def __or__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.bitwise_or, self, other) - - def __pow__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.pow, self, other) - - def __radd__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.add, other, self) - - def __rand__(self, other: "Metric") -> "Metric": - # swap them since bitwise_and only supports that way and it's commutative - return CompositionalMetric(B.bitwise_and, self, other) - - def __rfloordiv__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.floor_divide, other, self) - - def __rmatmul__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.matmul, other, self) - - def __rmod__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.fmod, other, self) - - def __rmul__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.mul, other, self) - - def __ror__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.bitwise_or, other, self) - - def __rpow__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.pow, other, self) - - def __rsub__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.sub, other, self) - - def __rtruediv__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.true_divide, other, self) - - def __rxor__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.bitwise_xor, other, self) - - def __sub__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.sub, self, other) - - def __truediv__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.true_divide, self, other) - - def __xor__(self, other: "Metric") -> "Metric": - return CompositionalMetric(B.bitwise_xor, self, other) - - def __abs__(self) -> "Metric": - return CompositionalMetric(B.abs, self, None) - - def __inv__(self) -> "Metric": - return CompositionalMetric(B.bitwise_not, self, None) - - def __invert__(self) -> "Metric": - return self.__inv__() - - def __neg__(self) -> "Metric": - return CompositionalMetric(_neg, self, None) - - def __pos__(self) -> "Metric": - return CompositionalMetric(B.abs, self, None) - - def __getitem__(self, idx: int) -> "Metric": - return CompositionalMetric(lambda x: x[idx], self, None) - - -def _neg(x: Tensor) -> Tensor: - return -B.abs(x) - - -class CompositionalMetric(Metric): - """Composition of two metrics with a specific operator which will be executed upon metrics compute.""" - - def __init__( - self, - operator: Callable, - metric_a: Union[Metric, int, float, Tensor], - metric_b: Union[Metric, int, float, Tensor, None], - ) -> None: - """ - Args: - operator: the operator taking in one (if metric_b is None) - or two arguments. Will be applied to outputs of metric_a.compute() - and (optionally if metric_b is not None) metric_b.compute() - metric_a: first metric whose compute() result is the first argument of operator - metric_b: second metric whose compute() result is the second argument of operator. - For operators taking in only one input, this should be None - """ - super().__init__() - - self.op = operator - - if isinstance(metric_a, Tensor): - self.register_buffer("metric_a", metric_a) - else: - self.metric_a = metric_a - - if isinstance(metric_b, Tensor): - self.register_buffer("metric_b", metric_b) - else: - self.metric_b = metric_b - - def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None: - # No syncing required here. syncing will be done in metric_a and metric_b - pass - - def update(self, *args: Any, **kwargs: Any) -> None: - if isinstance(self.metric_a, Metric): - self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs)) - - if isinstance(self.metric_b, Metric): - self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs)) - - def compute(self) -> Any: - - # also some parsing for kwargs? - if isinstance(self.metric_a, Metric): - val_a = self.metric_a.compute() - else: - val_a = self.metric_a - - if isinstance(self.metric_b, Metric): - val_b = self.metric_b.compute() - else: - val_b = self.metric_b - - if val_b is None: - return self.op(val_a) - - return self.op(val_a, val_b) - - def reset(self) -> None: - if isinstance(self.metric_a, Metric): - self.metric_a.reset() - - if isinstance(self.metric_b, Metric): - self.metric_b.reset() - - def persistent(self, mode: bool = False) -> None: - if isinstance(self.metric_a, Metric): - self.metric_a.persistent(mode=mode) - if isinstance(self.metric_b, Metric): - self.metric_b.persistent(mode=mode) - - def __repr__(self) -> str: - _op_metrics = f"(\n {self.op.__name__}(\n {repr(self.metric_a)},\n {repr(self.metric_b)}\n )\n)" - repr_str = self.__class__.__name__ + _op_metrics - - return repr_str diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py deleted file mode 100644 index aafc10247..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.regression.cosine_similarity import CosineSimilarity # noqa: F401 -from paddlemetrics.regression.explained_variance import ExplainedVariance # noqa: F401 -from paddlemetrics.regression.mean_absolute_error import MeanAbsoluteError # noqa: F401 -from paddlemetrics.regression.mean_absolute_percentage_error import MeanAbsolutePercentageError # noqa: F401 -from paddlemetrics.regression.mean_squared_error import MeanSquaredError # noqa: F401 -from paddlemetrics.regression.mean_squared_log_error import MeanSquaredLogError # noqa: F401 -from paddlemetrics.regression.pearson import PearsonCorrcoef # noqa: F401 -from paddlemetrics.regression.r2 import R2Score # noqa: F401 -from paddlemetrics.regression.spearman import SpearmanCorrcoef # noqa: F401 -from paddlemetrics.regression.symmetric_mean_absolute_percentage_error import ( # noqa: F401 - SymmetricMeanAbsolutePercentageError, -) -from paddlemetrics.regression.tweedie_deviance import TweedieDevianceScore # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py deleted file mode 100644 index 3b2946e2c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/cosine_similarity.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.regression.cosine_similarity import _cosine_similarity_compute, _cosine_similarity_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.data import dim_zero_cat - - -class CosineSimilarity(Metric): - r""" - Computes the `Cosine Similarity`_ - between targets and predictions: - - .. math:: - cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} = - \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}} - - where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions. - - Forward accepts - - - ``preds`` (float tensor): ``(N,d)`` - - ``target`` (float tensor): ``(N,d)`` - - Args: - reduction: how to reduce over the batch dimension using 'sum', 'mean' or 'none' - (taking the individual scores) - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the all gather. - - Example: - >>> from paddlemetrics import CosineSimilarity - >>> target = B.tensor([[0, 1], [1, 1]]) - >>> preds = B.tensor([[0, 1], [0, 1]]) - >>> cosine_similarity = CosineSimilarity(reduction = 'mean') - >>> cosine_similarity(preds, target) - tensor(0.8536) - - """ - is_differentiable = True - preds: List[Tensor] - target: List[Tensor] - - def __init__( - self, - reduction: str = "sum", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - allowed_reduction = ("sum", "mean", "none", None) - if reduction not in allowed_reduction: - raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}") - self.reduction = reduction - - self.add_state("preds", [], dist_reduce_fx="cat") - self.add_state("target", [], dist_reduce_fx="cat") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update metric states with predictions and targets. - - Args: - preds: Predicted tensor with shape ``(N,d)`` - target: Ground truth tensor with shape ``(N,d)`` - """ - preds, target = _cosine_similarity_update(preds, target) - - self.preds.append(preds) - self.target.append(target) - - def compute(self) -> Tensor: - preds = dim_zero_cat(self.preds) - target = dim_zero_cat(self.target) - return _cosine_similarity_compute(preds, target, self.reduction) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py deleted file mode 100644 index 226ac0760..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/explained_variance.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional, Sequence, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.regression.explained_variance import ( - _explained_variance_compute, - _explained_variance_update, -) -from paddlemetrics.metric import Metric - - -class ExplainedVariance(Metric): - r""" - Computes `explained variance`_: - - .. math:: \text{ExplainedVariance} = 1 - \frac{\text{Var}(y - \hat{y})}{\text{Var}(y)} - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a - tensor of predictions. - - Forward accepts - - - ``preds`` (float tensor): ``(N,)`` or ``(N, ...)`` (multioutput) - - ``target`` (long tensor): ``(N,)`` or ``(N, ...)`` (multioutput) - - In the case of multioutput, as default the variances will be uniformly - averaged over the additional dimensions. Please see argument `multioutput` - for changing this behavior. - - Args: - multioutput: - Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is `'uniform_average'`.): - - * `'raw_values'` returns full set of scores - * `'uniform_average'` scores are uniformly averaged - * `'variance_weighted'` scores are weighted by their individual variances - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. - - Example: - >>> from paddlemetrics import ExplainedVariance - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> explained_variance = ExplainedVariance() - >>> explained_variance(preds, target) - tensor(0.9572) - - >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> explained_variance = ExplainedVariance(multioutput='raw_values') - >>> explained_variance(preds, target) - tensor([0.9677, 1.0000]) - - """ - is_differentiable = True - n_obs: Tensor - sum_error: Tensor - sum_squared_error: Tensor - sum_target: Tensor - sum_squared_target: Tensor - - def __init__( - self, - multioutput: str = "uniform_average", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - allowed_multioutput = ("raw_values", "uniform_average", "variance_weighted") - if multioutput not in allowed_multioutput: - raise ValueError( - f"Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}" - ) - self.multioutput: str = multioutput - self.add_state("sum_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("sum_target", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("sum_squared_target", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("n_obs", default=tensor(0.0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target) - self.n_obs = self.n_obs + n_obs - self.sum_error = self.sum_error + sum_error - self.sum_squared_error = self.sum_squared_error + sum_squared_error - self.sum_target = self.sum_target + sum_target - self.sum_squared_target = self.sum_squared_target + sum_squared_target - - def compute(self) -> Union[Tensor, Sequence[Tensor]]: - """Computes explained variance over state.""" - return _explained_variance_compute( - self.n_obs, - self.sum_error, - self.sum_squared_error, - self.sum_target, - self.sum_squared_target, - self.multioutput, - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py deleted file mode 100644 index 8614bed21..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_error.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.regression.mean_absolute_error import ( - _mean_absolute_error_compute, - _mean_absolute_error_update, -) -from paddlemetrics.metric import Metric - - -class MeanAbsoluteError(Metric): - r""" - `Computes Mean Absolute Error`_ (MAE): - - .. math:: \text{MAE} = \frac{1}{N}\sum_i^N | y_i - \hat{y_i} | - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. - - Args: - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example: - >>> from paddlemetrics import MeanAbsoluteError - >>> target = B.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = B.tensor([2.5, 0.0, 2.0, 8.0]) - >>> mean_absolute_error = MeanAbsoluteError() - >>> mean_absolute_error(preds, target) - tensor(0.5000) - """ - is_differentiable = True - sum_abs_error: Tensor - total: Tensor - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("sum_abs_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - sum_abs_error, n_obs = _mean_absolute_error_update(preds, target) - - self.sum_abs_error += sum_abs_error - self.total += n_obs - - def compute(self) -> Tensor: - """Computes mean absolute error over state.""" - return _mean_absolute_error_compute(self.sum_abs_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py deleted file mode 100644 index 66d9c0916..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_absolute_percentage_error.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.regression.mean_absolute_percentage_error import ( - _mean_absolute_percentage_error_compute, - _mean_absolute_percentage_error_update, -) -from paddlemetrics.metric import Metric - - -class MeanAbsolutePercentageError(Metric): - r""" - Computes `Mean Absolute Percentage Error`_ (MAPE): - - .. math:: \text{MAPE} = \frac{1}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(\epsilon, y_i)} - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. - - Args: - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Note: - The epsilon value is taken from `scikit-learn's implementation of MAPE`_. - - Note: - MAPE output is a non-negative floating point. Best result is 0.0 . But it is important to note that, - bad predictions, can lead to arbitarily large values. Especially when some ``target`` values are close to 0. - This `MAPE implementation returns`_ a very large number instead of ``inf``. - - Example: - >>> from paddlemetrics import MeanAbsolutePercentageError - >>> target = B.tensor([1, 10, 1e6]) - >>> preds = B.tensor([0.9, 15, 1.2e6]) - >>> mean_abs_percentage_error = MeanAbsolutePercentageError() - >>> mean_abs_percentage_error(preds, target) - tensor(0.2667) - - """ - is_differentiable = True - sum_abs_per_error: Tensor - total: Tensor - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target) - - self.sum_abs_per_error += sum_abs_per_error - self.total += num_obs - - def compute(self) -> Tensor: - """Computes mean absolute percentage error over state.""" - return _mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py deleted file mode 100644 index 8c1c9245b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_error.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.regression.mean_squared_error import ( - _mean_squared_error_compute, - _mean_squared_error_update, -) -from paddlemetrics.metric import Metric - - -class MeanSquaredError(Metric): - r""" - Computes `mean squared error`_ (MSE): - - .. math:: \text{MSE} = \frac{1}{N}\sum_i^N(y_i - \hat{y_i})^2 - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. - - Args: - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - squared: - If True returns MSE value, if False returns RMSE value. - - Example: - >>> from paddlemetrics import MeanSquaredError - >>> target = B.tensor([2.5, 5.0, 4.0, 8.0]) - >>> preds = B.tensor([3.0, 5.0, 2.5, 7.0]) - >>> mean_squared_error = MeanSquaredError() - >>> mean_squared_error(preds, target) - tensor(0.8750) - - """ - is_differentiable = True - sum_squared_error: Tensor - total: Tensor - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - squared: bool = True, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - self.squared = squared - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - sum_squared_error, n_obs = _mean_squared_error_update(preds, target) - - self.sum_squared_error += sum_squared_error - self.total += n_obs - - def compute(self) -> Tensor: - """Computes mean squared error over state.""" - return _mean_squared_error_compute(self.sum_squared_error, self.total, squared=self.squared) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py deleted file mode 100644 index e36773b0e..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/mean_squared_log_error.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.regression.mean_squared_log_error import ( - _mean_squared_log_error_compute, - _mean_squared_log_error_update, -) -from paddlemetrics.metric import Metric - - -class MeanSquaredLogError(Metric): - r""" - Computes `mean squared logarithmic error`_ (MSLE): - - .. math:: \text{MSLE} = \frac{1}{N}\sum_i^N (\log_e(1 + y_i) - \log_e(1 + \hat{y_i}))^2 - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. - - Args: - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example: - >>> from paddlemetrics import MeanSquaredLogError - >>> target = B.tensor([2.5, 5, 4, 8]) - >>> preds = B.tensor([3, 5, 2.5, 7]) - >>> mean_squared_log_error = MeanSquaredLogError() - >>> mean_squared_log_error(preds, target) - tensor(0.0397) - - .. note:: - Half precision is only support on GPU for this metric - - """ - is_differentiable = True - sum_squared_log_error: Tensor - total: Tensor - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("sum_squared_log_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) - - self.sum_squared_log_error += sum_squared_log_error - self.total += n_obs - - def compute(self) -> Tensor: - """Compute mean squared logarithmic error over state.""" - return _mean_squared_log_error_compute(self.sum_squared_log_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py deleted file mode 100644 index 7927392a7..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/pearson.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, List, Optional, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update -from paddlemetrics.metric import Metric - - -def _final_aggregation( - means_x: Tensor, - means_y: Tensor, - vars_x: Tensor, - vars_y: Tensor, - corrs_xy: Tensor, - nbs: Tensor, -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Aggregate the statistics from multiple devices. - - Formula taken from here: `Aggregate the statistics from multiple devices`_ - """ - # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1 - mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0] - for i in range(1, len(means_x)): - mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i] - - nb = n1 + n2 - mean_x = (n1 * mx1 + n2 * mx2) / nb - mean_y = (n1 * my1 + n2 * my2) / nb - var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2) - var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2) - - corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y) - corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y) - corr_xy = (corr1 + corr2) / (n1 + n2) - - mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb - - return var_x, var_y, corr_xy, nb - - -class PearsonCorrcoef(Metric): - r""" - Computes `Pearson Correlation Coefficient`_: - - .. math:: - P_{corr}(x,y) = \frac{cov(x,y)}{\sigma_x \sigma_y} - - Where :math:`y` is a tensor of target values, and :math:`x` is a - tensor of predictions. - - Forward accepts - - - ``preds`` (float tensor): ``(N,)`` - - ``target``(float tensor): ``(N,)`` - - Args: - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Example: - >>> from paddlemetrics import PearsonCorrcoef - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> pearson = PearsonCorrcoef() - >>> pearson(preds, target) - tensor(0.9849) - - """ - is_differentiable = True - preds: List[Tensor] - target: List[Tensor] - mean_x: Tensor - mean_y: Tensor - var_x: Tensor - var_y: Tensor - corr_xy: Tensor - n_total: Tensor - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - - self.add_state("mean_x", default=B.zeros(1), dist_reduce_fx=None) - self.add_state("mean_y", default=B.zeros(1), dist_reduce_fx=None) - self.add_state("var_x", default=B.zeros(1), dist_reduce_fx=None) - self.add_state("var_y", default=B.zeros(1), dist_reduce_fx=None) - self.add_state("corr_xy", default=B.zeros(1), dist_reduce_fx=None) - self.add_state("n_total", default=B.zeros(1), dist_reduce_fx=None) - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update( - preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total - ) - - def compute(self) -> Tensor: - """Computes pearson correlation coefficient over state.""" - if self.mean_x.numel() > 1: # multiple devices, need further reduction - var_x, var_y, corr_xy, n_total = _final_aggregation( - self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total - ) - else: - var_x = self.var_x - var_y = self.var_y - corr_xy = self.corr_xy - n_total = self.n_total - - return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py deleted file mode 100644 index 36db3d8d5..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/r2.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.regression.r2 import _r2_score_compute, _r2_score_update -from paddlemetrics.metric import Metric - - -class R2Score(Metric): - r""" - Computes r2 score also known as `R2 Score_Coefficient Determination`_: - - .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} - - where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and - :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate - adjusted r2 score given by - - .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} - - where the parameter :math:`k` (the number of independent regressors) should - be provided as the `adjusted` argument. - - Forward accepts - - - ``preds`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) - - ``target`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) - - In the case of multioutput, as default the variances will be uniformly - averaged over the additional dimensions. Please see argument `multioutput` - for changing this behavior. - - Args: - num_outputs: - Number of outputs in multioutput setting (default is 1) - adjusted: - number of independent regressors for calculating adjusted r2 score. - Default 0 (standard r2 score). - multioutput: - Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is ``'uniform_average'``.): - - * ``'raw_values'`` returns full set of scores - * ``'uniform_average'`` scores are uniformly averaged - * ``'variance_weighted'`` scores are weighted by their individual variances - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``adjusted`` parameter is not an integer larger or equal to 0. - ValueError: - If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. - - Example: - >>> from paddlemetrics import R2Score - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> r2score = R2Score() - >>> r2score(preds, target) - tensor(0.9486) - - >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> r2score = R2Score(num_outputs=2, multioutput='raw_values') - >>> r2score(preds, target) - tensor([0.9654, 0.9082]) - - """ - is_differentiable = True - sum_squared_error: Tensor - sum_error: Tensor - residual: Tensor - total: Tensor - - def __init__( - self, - num_outputs: int = 1, - adjusted: int = 0, - multioutput: str = "uniform_average", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.num_outputs = num_outputs - - if adjusted < 0 or not isinstance(adjusted, int): - raise ValueError("`adjusted` parameter should be an integer larger or equal to 0.") - self.adjusted = adjusted - - allowed_multioutput = ("raw_values", "uniform_average", "variance_weighted") - if multioutput not in allowed_multioutput: - raise ValueError( - f"Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}" - ) - self.multioutput = multioutput - - self.add_state("sum_squared_error", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") - self.add_state("sum_error", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") - self.add_state("residual", default=B.zeros(self.num_outputs), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - sum_squared_error, sum_error, residual, total = _r2_score_update(preds, target) - - self.sum_squared_error += sum_squared_error - self.sum_error += sum_error - self.residual += residual - self.total += total - - def compute(self) -> Tensor: - """Computes r2 score over the metric states.""" - return _r2_score_compute( - self.sum_squared_error, self.sum_error, self.residual, self.total, self.adjusted, self.multioutput - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py deleted file mode 100644 index 76249378f..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/spearman.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.regression.spearman import _spearman_corrcoef_compute, _spearman_corrcoef_update -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import rank_zero_warn -from paddlemetrics.utilities.data import dim_zero_cat - - -class SpearmanCorrcoef(Metric): - r""" - Computes `spearmans rank correlation coefficient`_. - - .. math: - r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}} - - where rg_x and rg_y are the rank associated to the variables x and y. Spearmans correlations coefficient - corresponds to the standard pearsons correlation coefficient calculated on the rank variables. - - Args: - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Example: - >>> from paddlemetrics import SpearmanCorrcoef - >>> target = B.tensor([3, -0.5, 2, 7]) - >>> preds = B.tensor([2.5, 0.0, 2, 8]) - >>> spearman = SpearmanCorrcoef() - >>> spearman(preds, target) - tensor(1.0000) - - """ - is_differentiable = False - preds: List[Tensor] - target: List[Tensor] - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable] = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - rank_zero_warn( - "Metric `SpearmanCorrcoef` will save all targets and predictions in the buffer." - " For large datasets, this may lead to large memory footprint." - ) - - self.add_state("preds", default=[], dist_reduce_fx="cat") - self.add_state("target", default=[], dist_reduce_fx="cat") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - preds, target = _spearman_corrcoef_update(preds, target) - self.preds.append(preds) - self.target.append(target) - - def compute(self) -> Tensor: - """Computes spearmans correlation coefficient.""" - preds = dim_zero_cat(self.preds) - target = dim_zero_cat(self.target) - return _spearman_corrcoef_compute(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py deleted file mode 100644 index 3e545e08a..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.regression.symmetric_mean_absolute_percentage_error import ( - _symmetric_mean_absolute_percentage_error_compute, - _symmetric_mean_absolute_percentage_error_update, -) -from paddlemetrics.metric import Metric - - -class SymmetricMeanAbsolutePercentageError(Metric): - r""" - Computes symmetric mean absolute percentage error (`SMAPE`_). - - .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon}) - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. - - Args: - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Note: - The epsilon value is taken from `scikit-learn's implementation of SMAPE`_. - - Note: - SMAPE output is a non-negative floating point between 0 and 1. Best result is 0.0 . - - - Example: - >>> from paddlemetrics import SymmetricMeanAbsolutePercentageError - >>> target = B.tensor([1, 10, 1e6]) - >>> preds = B.tensor([0.9, 15, 1.2e6]) - >>> smape = SymmetricMeanAbsolutePercentageError() - >>> smape(preds, target) - tensor(0.2290) - """ - is_differentiable = True - sum_abs_per_error: Tensor - total: Tensor - - def __init__( - self, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore - """Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values - """ - sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) - - self.sum_abs_per_error += sum_abs_per_error - self.total += num_obs - - def compute(self) -> Tensor: - """Computes mean absolute percentage error over state.""" - return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py b/EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py deleted file mode 100644 index 4687bdd5c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/regression/tweedie_deviance.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.functional.regression.tweedie_deviance import ( - _tweedie_deviance_score_compute, - _tweedie_deviance_score_update, -) -from paddlemetrics.metric import Metric - - -class TweedieDevianceScore(Metric): - r""" - Computes the `Tweedie Deviance Score`_ between targets and predictions: - - .. math:: - deviance\_score(\hat{y},y) = - \begin{cases} - (\hat{y} - y)^2, & \text{for }power=0\\ - 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }power=1\\ - 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }power=2\\ - 2 * (\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \frac{y(\hat{y})^{1 - power}}{1 - power} + \frac{(\hat{y}) - ^{2 - power}}{2 - power}), & \text{otherwise} - \end{cases} - - where :math:`y` is a tensor of targets values, and :math:`\hat{y}` is a tensor of predictions. - - Forward accepts - - - ``preds`` (float tensor): ``(N,...)`` - - ``targets`` (float tensor): ``(N,...)`` - - Args: - power: - - power < 0 : Extreme stable distribution. (Requires: preds > 0.) - - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.) - - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) - - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) - - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.) - - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) - - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.) - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the all gather. - - Example: - >>> from paddlemetrics import TweedieDevianceScore - >>> targets = B.tensor([1.0, 2.0, 3.0, 4.0]) - >>> preds = B.tensor([4.0, 3.0, 2.0, 1.0]) - >>> deviance_score = TweedieDevianceScore(power=2) - >>> deviance_score(preds, targets) - tensor(1.2083) - - """ - is_differentiable = True - sum_deviance_score: Tensor - num_observations: Tensor - - def __init__( - self, - power: float = 0.0, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - if 0 < power < 1: - raise ValueError(f"Deviance Score is not defined for power={power}.") - - self.power: float = power - - self.add_state("sum_deviance_score", B.tensor(0.0), dist_reduce_fx="sum") - self.add_state("num_observations", B.tensor(0), dist_reduce_fx="sum") - - def update(self, preds: Tensor, targets: Tensor) -> None: # type: ignore - """Update metric states with predictions and targets. - - Args: - preds: Predicted tensor with shape ``(N,d)`` - targets: Ground truth tensor with shape ``(N,d)`` - """ - sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, self.power) - - self.sum_deviance_score += sum_deviance_score - self.num_observations += num_observations - - def compute(self) -> Tensor: - return _tweedie_deviance_score_compute(self.sum_deviance_score, self.num_observations) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py deleted file mode 100644 index 208a02246..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.retrieval.mean_average_precision import RetrievalMAP # noqa: F401 -from paddlemetrics.retrieval.mean_reciprocal_rank import RetrievalMRR # noqa: F401 -from paddlemetrics.retrieval.retrieval_fallout import RetrievalFallOut # noqa: F401 -from paddlemetrics.retrieval.retrieval_hit_rate import RetrievalHitRate # noqa: F401 -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric # noqa: F401 -from paddlemetrics.retrieval.retrieval_ndcg import RetrievalNormalizedDCG # noqa: F401 -from paddlemetrics.retrieval.retrieval_precision import RetrievalPrecision # noqa: F401 -from paddlemetrics.retrieval.retrieval_r_precision import RetrievalRPrecision # noqa: F401 -from paddlemetrics.retrieval.retrieval_recall import RetrievalRecall # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py deleted file mode 100644 index ee7f9065b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_average_precision.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.average_precision import retrieval_average_precision -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric - - -class RetrievalMAP(RetrievalMetric): - """Computes `Mean Average Precision`_. - - Works with binary target data. Accepts float predictions from a model output. - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then `MAP` will be computed as the mean - of the `Average Precisions` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Example: - >>> from paddlemetrics import RetrievalMAP - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([False, False, True, False, True, False, True]) - >>> rmap = RetrievalMAP() - >>> rmap(preds, target, indexes=indexes) - tensor(0.7917) - """ - - higher_is_better = True - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_average_precision(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py deleted file mode 100644 index 76f15bde8..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/mean_reciprocal_rank.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric - - -class RetrievalMRR(RetrievalMetric): - """Computes `Mean Reciprocal Rank`_. - - Works with binary target data. Accepts float predictions from a model output. - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then `MRR` will be computed as the mean - of the `Reciprocal Rank` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Example: - >>> from paddlemetrics import RetrievalMRR - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([False, False, True, False, True, False, True]) - >>> mrr = RetrievalMRR() - >>> mrr(preds, target, indexes=indexes) - tensor(0.7500) - """ - - higher_is_better = True - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_reciprocal_rank(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py deleted file mode 100644 index 38b70f7c1..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_fallout.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.fall_out import retrieval_fall_out -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric -from paddlemetrics.utilities.data import get_group_indexes - - -class RetrievalFallOut(RetrievalMetric): - """Computes `Fall-out`_. - - Works with binary target data. Accepts float predictions from a model output. - - Forward accepts: - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then `Fall-out` will be computed as the mean - of the `Fall-out` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a negative ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - k: consider only the top k elements for each query (default: None, which considers them all) - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics import RetrievalFallOut - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([False, False, True, False, True, False, True]) - >>> fo = RetrievalFallOut(k=2) - >>> fo(preds, target, indexes=indexes) - tensor(0.5000) - """ - - higher_is_better = False - - def __init__( - self, - empty_target_action: str = "pos", - k: int = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - empty_target_action=empty_target_action, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - if (k is not None) and not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - self.k = k - - def compute(self) -> Tensor: - """First concat state `indexes`, `preds` and `target` since they were stored as lists. - - After that, compute list of groups that will help in keeping together predictions about the same query. Finally, - for each group compute the `_metric` if the number of negative targets is at least 1, otherwise behave as - specified by `self.empty_target_action`. - """ - indexes = B.cat(self.indexes, dim=0) - preds = B.cat(self.preds, dim=0) - target = B.cat(self.target, dim=0) - - res = [] - groups = get_group_indexes(indexes) - - for group in groups: - mini_preds = preds[group] - mini_target = target[group] - - if not (1 - mini_target).sum(): - if self.empty_target_action == "error": - raise ValueError("`compute` method was provided with a query with no negative target.") - if self.empty_target_action == "pos": - res.append(tensor(1.0)) - elif self.empty_target_action == "neg": - res.append(tensor(0.0)) - else: - # ensure list containt only float tensors - res.append(self._metric(mini_preds, mini_target)) - - return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds) - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_fall_out(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py deleted file mode 100644 index 6a053b7b5..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_hit_rate.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.hit_rate import retrieval_hit_rate -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric - - -class RetrievalHitRate(RetrievalMetric): - """Computes `IR HitRate`. - - Works with binary target data. Accepts float predictions from a model output. - - Forward accepts: - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then the `Hit Rate` will be computed as the mean - of the `Hit Rate` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - k: consider only the top k elements for each query (default: None, which considers them all) - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics import RetrievalHitRate - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([True, False, False, False, True, False, True]) - >>> hr2 = RetrievalHitRate(k=2) - >>> hr2(preds, target, indexes=indexes) - tensor(0.5000) - """ - - higher_is_better = True - - def __init__( - self, - empty_target_action: str = "neg", - k: int = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - empty_target_action=empty_target_action, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - if (k is not None) and not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - self.k = k - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_hit_rate(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py deleted file mode 100644 index ab43876fa..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_metric.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from abc import ABC, abstractmethod -from typing import Any, Callable, List, Optional - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics import Metric -from paddlemetrics.utilities.checks import _check_retrieval_inputs -from paddlemetrics.utilities.data import get_group_indexes - -#: get_group_indexes is used to group predictions belonging to the same document - - -class RetrievalMetric(Metric): - """Works with binary target data. Accepts float predictions from a model output. - - Forward accepts - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - `indexes`, `preds` and `target` must have the same dimension and will be flatten - to single dimension once provided. - - `indexes` indicate to which query a prediction belongs. - Predictions will be first grouped by indexes. Then the - real metric, defined by overriding the `_metric` method, - will be computed as the mean of the scores over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive - or negative (depend on metric) target. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - """ - - indexes: List[Tensor] - preds: List[Tensor] - target: List[Tensor] - higher_is_better = True - - def __init__( - self, - empty_target_action: str = "neg", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - self.allow_non_binary_target = False - - empty_target_action_options = ("error", "skip", "neg", "pos") - if empty_target_action not in empty_target_action_options: - raise ValueError(f"Argument `empty_target_action` received a wrong value `{empty_target_action}`.") - - self.empty_target_action = empty_target_action - - self.add_state("indexes", default=[], dist_reduce_fx=None) - self.add_state("preds", default=[], dist_reduce_fx=None) - self.add_state("target", default=[], dist_reduce_fx=None) - - def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None: # type: ignore - """Check shape, check and convert dtypes, flatten and add to accumulators.""" - if indexes is None: - raise ValueError("Argument `indexes` cannot be None") - - indexes, preds, target = _check_retrieval_inputs( - indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target - ) - - self.indexes.append(indexes) - self.preds.append(preds) - self.target.append(target) - - def compute(self) -> Tensor: - """First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists. - - After that, compute list of groups that will help in keeping together predictions about the same query. Finally, - for each group compute the ``_metric`` if the number of positive targets is at least 1, otherwise behave as - specified by ``self.empty_target_action``. - """ - indexes = B.cat(self.indexes, dim=0) - preds = B.cat(self.preds, dim=0) - target = B.cat(self.target, dim=0) - - res = [] - groups = get_group_indexes(indexes) - - for group in groups: - mini_preds = preds[group] - mini_target = target[group] - - if not mini_target.sum(): - if self.empty_target_action == "error": - raise ValueError("`compute` method was provided with a query with no positive target.") - if self.empty_target_action == "pos": - res.append(tensor(1.0)) - elif self.empty_target_action == "neg": - res.append(tensor(0.0)) - else: - # ensure list contains only float tensors - res.append(self._metric(mini_preds, mini_target)) - - return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds) - - @abstractmethod - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - """Compute a metric over a predictions and target of a single group. - - This method should be overridden by subclasses. - """ diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py deleted file mode 100644 index bb0740cac..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_ndcg.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.ndcg import retrieval_normalized_dcg -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric - - -class RetrievalNormalizedDCG(RetrievalMetric): - """Computes `Normalized Discounted Cumulative Gain`_. - - Works with binary or positive integer target data. Accepts float predictions from a model output. - - Forward accepts: - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long, int, bool or float tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then `Normalized Discounted Cumulative Gain` - will be computed as the mean of the `Normalized Discounted Cumulative Gain` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - k: consider only the top k elements for each query (default: None, which considers them all) - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics import RetrievalNormalizedDCG - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([False, False, True, False, True, False, True]) - >>> ndcg = RetrievalNormalizedDCG() - >>> ndcg(preds, target, indexes=indexes) - tensor(0.8467) - """ - - higher_is_better = True - - def __init__( - self, - empty_target_action: str = "neg", - k: int = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - empty_target_action=empty_target_action, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - if (k is not None) and not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - self.k = k - self.allow_non_binary_target = True - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_normalized_dcg(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py deleted file mode 100644 index f0f983a89..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_precision.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.precision import retrieval_precision -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric - - -class RetrievalPrecision(RetrievalMetric): - """Computes `IR Precision`_. - - Works with binary target data. Accepts float predictions from a model output. - - Forward accepts: - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then `Precision` will be computed as the mean - of the `Precision` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - k: consider only the top k elements for each query (default: None, which considers them all) - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics import RetrievalPrecision - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([False, False, True, False, True, False, True]) - >>> p2 = RetrievalPrecision(k=2) - >>> p2(preds, target, indexes=indexes) - tensor(0.5000) - """ - - higher_is_better = True - - def __init__( - self, - empty_target_action: str = "neg", - k: int = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - empty_target_action=empty_target_action, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - if (k is not None) and not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - self.k = k - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_precision(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py deleted file mode 100644 index 75373532a..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_r_precision.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.r_precision import retrieval_r_precision -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric - - -class RetrievalRPrecision(RetrievalMetric): - """Computes `IR R-Precision`_. - - Works with binary target data. Accepts float predictions from a model output. - - Forward accepts: - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then `R-Precision` will be computed as the mean - of the `R-Precision` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Example: - >>> from paddlemetrics import RetrievalRPrecision - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([False, False, True, False, True, False, True]) - >>> p2 = RetrievalRPrecision() - >>> p2(preds, target, indexes=indexes) - tensor(0.7500) - """ - - higher_is_better = True - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_r_precision(preds, target) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py b/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py deleted file mode 100644 index 26ace51c2..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/retrieval/retrieval_recall.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, Optional - -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.retrieval.recall import retrieval_recall -from paddlemetrics.retrieval.retrieval_metric import RetrievalMetric - - -class RetrievalRecall(RetrievalMetric): - """Computes `IR Recall`_. - - Works with binary target data. Accepts float predictions from a model output. - - Forward accepts: - - - ``preds`` (float tensor): ``(N, ...)`` - - ``target`` (long or bool tensor): ``(N, ...)`` - - ``indexes`` (long tensor): ``(N, ...)`` - - ``indexes``, ``preds`` and ``target`` must have the same dimension. - ``indexes`` indicate to which query a prediction belongs. - Predictions will be first grouped by ``indexes`` and then `Recall` will be computed as the mean - of the `Recall` over each query. - - Args: - empty_target_action: - Specify what to do with queries that do not have at least a positive ``target``. Choose from: - - - ``'neg'``: those queries count as ``0.0`` (default) - - ``'pos'``: those queries count as ``1.0`` - - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned - - ``'error'``: raise a ``ValueError`` - - k: consider only the top k elements for each query (default: None, which considers them all) - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects - the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. default: None - - Raises: - ValueError: - If ``k`` parameter is not `None` or an integer larger than 0 - - Example: - >>> from paddlemetrics import RetrievalRecall - >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) - >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) - >>> target = tensor([False, False, True, False, True, False, True]) - >>> r2 = RetrievalRecall(k=2) - >>> r2(preds, target, indexes=indexes) - tensor(0.7500) - """ - - higher_is_better = True - - def __init__( - self, - empty_target_action: str = "neg", - k: int = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - super().__init__( - empty_target_action=empty_target_action, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - if (k is not None) and not (isinstance(k, int) and k > 0): - raise ValueError("`k` has to be a positive integer or None") - self.k = k - - def _metric(self, preds: Tensor, target: Tensor) -> Tensor: - return retrieval_recall(preds, target, k=self.k) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py b/EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py deleted file mode 100644 index e3233cef9..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/setup_tools.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import re -from typing import List - -_PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__)) - - -def _load_requirements(path_dir: str, file_name: str = "requirements.txt", comment_char: str = "#") -> List[str]: - """Load requirements from a file. - - >>> _load_requirements(_PROJECT_ROOT) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE - ['numpy...', 'B...'] - """ - with open(os.path.join(path_dir, file_name)) as file: - lines = [ln.strip() for ln in file.readlines()] - reqs = [] - for ln in lines: - # filer all comments - if comment_char in ln: - ln = ln[: ln.index(comment_char)].strip() - # skip directly installed dependencies - if ln.startswith("http"): - continue - if ln: # if requirement is not empty - reqs.append(ln) - return reqs - - -def _load_readme_description(path_dir: str, homepage: str, version: str) -> str: - """Load readme as decribtion. - - >>> _load_readme_description(_PROJECT_ROOT, "", "") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE - '
...' - """ - path_readme = os.path.join(path_dir, "README.md") - with open(path_readme, encoding="utf-8") as fp: - text = fp.read() - - # https://github.com/PyTorchLightning/paddlemetrics/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png - github_source_url = os.path.join(homepage, "raw", version) - # replace relative repository path to absolute link to the release - # do not replace all "docs" as in the readme we reger some other sources with particular path to docs - text = text.replace("docs/source/_static/", f"{os.path.join(github_source_url, 'docs/source/_static/')}") - - # readthedocs badge - text = text.replace("badge/?version=stable", f"badge/?version={version}") - text = text.replace("paddlemetrics.readthedocs.io/en/stable/", f"paddlemetrics.readthedocs.io/en/{version}") - # codecov badge - text = text.replace("/branch/master/graph/badge.svg", f"/release/{version}/graph/badge.svg") - # replace github badges for release ones - text = text.replace("badge.svg?branch=master&event=push", f"badge.svg?tag={version}") - # Azure... - text = text.replace("?branchName=master", f"?branchName=refs%2Ftags%2F{version}") - text = re.sub(r"\?definitionId=\d+&branchName=master", f"?definitionId=2&branchName=refs%2Ftags%2F{version}", text) - - skip_begin = r"" - skip_end = r"" - # todo: wrap content as commented description - text = re.sub(rf"{skip_begin}.+?{skip_end}", "", text, flags=re.IGNORECASE + re.DOTALL) - - return text diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py deleted file mode 100644 index 782ca2955..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/text/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#from paddlemetrics.text.bert import BERTScore # noqa: F401 -from paddlemetrics.text.bleu import BLEUScore # noqa: F401 -from paddlemetrics.text.rouge import ROUGEScore # noqa: F401 -from paddlemetrics.text.sacre_bleu import SacreBLEUScore # noqa: F401 -from paddlemetrics.text.wer import WER # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py deleted file mode 100644 index 0f602f30a..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/text/bert.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import warnings -from typing import Any, Callable, Dict, List, Optional, Union - -import paddleext.torchapi as B - -from paddlemetrics.functional import bert_score -from paddlemetrics.functional.text.bert import _preprocess_text -from paddlemetrics.metric import Metric -from paddlemetrics.utilities.imports import _TRANSFORMERS_AVAILABLE - -if _TRANSFORMERS_AVAILABLE: - from transformers import AutoTokenizer - - -# Default model recommended in the original implementation. -_DEFAULT_MODEL = "roberta-large" - - -def _concatenate(d: Dict[str, List[B.Tensor]]) -> Dict[str, B.Tensor]: - """Concatenate list of tensors within a given dictionary.""" - output_dict: Dict[str, B.Tensor] = {} - for k, v in d.items(): - output_dict[k] = B.cat(v) - return output_dict - - -class BERTScore(Metric): - """`Bert_score Evaluating Text Generation`_ leverages the pre-trained contextual embeddings from BERT and - matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with - human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, - and F1 measure, which can be useful for evaluating different language generation tasks. - - This implemenation follows the original implementation from `BERT_score`_. - - Args: - predictions: - An iterable of predicted sentences. - references: - An iterable of target sentences. - model_type: - A name or a model path used to load `transformers` pretrained model. - num_layers: - A layer of representation to use. - all_layers: - An indication of whether the representation from all model's layers should be used. - If `all_layers = True`, the argument `num_layers` is ignored. - model: - A user's own model. Must be of `B.nn.Module` instance. - user_tokenizer: - A user's own tokenizer used with the own model. This must be an instance with the `__call__` method. - This method must take an iterable of sentences (`List[str]`) and must return a python dictionary - containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor`. It is up to the user's model - of whether `"input_ids"` is a `B.Tensor` of input ids or embedding vectors. - This tokenizer must prepend an equivalent of `[CLS]` token and append an equivalent of `[SEP]` token - as `transformers` tokenizer does. - user_forward_fn: - A user's own forward function used in a combination with `user_model`. This function must take `user_model` - and a python dictionary of containing `"input_ids"` and `"attention_mask"` represented by `B.Tensor` - as an input and return the model's output represented by the single `B.Tensor`. - verbose: - An indication of whether a progress bar to be displayed during the embeddings calculation. - idf: - An indication whether normalization using inverse document frequencies should be used. - device: - A device to be used for calculation. - max_length: - A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed. - batch_size: - A batch size used for model processing. - num_threads: - A number of threads to use for a dataloader. - return_hash: - An indication of whether the correspodning `hash_code` should be returned. - lang: - A language of input sentences. - rescale_with_baseline: - An indication of whether bertscore should be rescaled with a pre-computed baseline. - When a pretrained model from `transformers` model is used, the corresponding baseline is downloaded - from the original `bert-score` package from `BERT_score`_ if available. - In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting - of the files from `BERT_score`_. - baseline_path: - A path to the user's own local csv/tsv file with the baseline scale. - baseline_url: - A url path to the user's own csv/tsv file with the baseline scale. - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Returns: - Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. - - Example: - >>> predictions = ["hello there", "general kenobi"] - >>> references = ["hello there", "master kenobi"] - >>> bertscore = BERTScore() - >>> bertscore.update(predictions=predictions,references=references) - >>> bertscore.compute() # doctest: +SKIP - {'precision': [0.99..., 0.99...], - 'recall': [0.99..., 0.99...], - 'f1': [0.99..., 0.99...]} - """ - - higher_is_better = True - - def __init__( - self, - model_name_or_path: Optional[str] = None, - num_layers: Optional[int] = None, - all_layers: bool = False, - model: Optional[B.nn.Module] = None, - user_tokenizer: Optional[Any] = None, - user_forward_fn: Callable[[B.nn.Module, Dict[str, B.Tensor]], B.Tensor] = None, - verbose: bool = False, - idf: bool = False, - device: Optional[Union[str, B.device]] = None, - max_length: int = 512, - batch_size: int = 64, - num_threads: int = 4, - return_hash: bool = False, - lang: str = "en", - rescale_with_baseline: bool = False, - baseline_path: Optional[str] = None, - baseline_url: Optional[str] = None, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - self.model_name_or_path = model_name_or_path - self.num_layers = num_layers - self.all_layers = all_layers - self.model = model - self.user_forward_fn = user_forward_fn - self.verbose = verbose - self.idf = idf - self.embedding_device = device - self.max_length = max_length - self.batch_size = batch_size - self.num_threads = num_threads - self.return_hash = return_hash - self.lang = lang - self.rescale_with_baseline = rescale_with_baseline - self.baseline_path = baseline_path - self.baseline_url = baseline_url - self.predictions: Dict[str, List[B.Tensor]] = {"input_ids": [], "attention_mask": []} - self.references: Dict[str, List[B.Tensor]] = {"input_ids": [], "attention_mask": []} - - if user_tokenizer: - self.tokenizer = user_tokenizer - self.user_tokenizer = True - else: - if not _TRANSFORMERS_AVAILABLE: - raise ValueError( - "`BERTScore` metric with default tokenizers requires `transformers` package be installed. " - "Either install with `pip install transformers>=4.0` or `pip install paddlemetrics[text]`" - ) - if not model_name_or_path: - model_name_or_path = _DEFAULT_MODEL - warnings.warn( - "The argument `model_name_or_path` was not specified while it is required when default " - " `transformers` model are used." - f"It is, therefore, used the default recommended model - {_DEFAULT_MODEL}." - ) - self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) - self.user_tokenizer = False - - def update(self, predictions: List[str], references: List[str]) -> None: # type: ignore - """Store predictions/references for computing BERT scores. It is necessary to store sentences in a - tokenized form to ensure the DDP mode working. - - Args: - predictions: - An iterable of predicted sentences. - references: - An iterable of predicted sentences. - """ - predictions_dict = _preprocess_text( - predictions, - self.tokenizer, - self.max_length, - truncation=False, - sort_according_length=False, - own_tokenizer=self.user_tokenizer, - ) - references_dict = _preprocess_text( - references, - self.tokenizer, - self.max_length, - truncation=False, - sort_according_length=False, - own_tokenizer=self.user_tokenizer, - ) - self.predictions["input_ids"].append(predictions_dict["input_ids"]) - self.predictions["attention_mask"].append(predictions_dict["attention_mask"]) - self.references["input_ids"].append(references_dict["input_ids"]) - self.references["attention_mask"].append(references_dict["attention_mask"]) - - def compute(self) -> Dict[str, Union[List[float], str]]: - """Calculate BERT scores. - - Return: - Python dictionary containing the keys `precision`, `recall` and `f1` with corresponding values. - """ - return bert_score( - predictions=_concatenate(self.predictions), - references=_concatenate(self.references), - model_name_or_path=self.model_name_or_path, - num_layers=self.num_layers, - all_layers=self.all_layers, - model=self.model, - user_tokenizer=self.tokenizer if self.user_tokenizer else None, - user_forward_fn=self.user_forward_fn, - verbose=self.verbose, - idf=self.idf, - device=self.embedding_device, - max_length=self.max_length, - batch_size=self.batch_size, - num_threads=self.num_threads, - return_hash=self.return_hash, - lang=self.lang, - rescale_with_baseline=self.rescale_with_baseline, - baseline_path=self.baseline_path, - baseline_url=self.baseline_url, - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py deleted file mode 100644 index 46937d98f..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/text/bleu.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# referenced from -# Library Name: torchtext -# Authors: torchtext authors and @sluks -# Date: 2020-07-18 -# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score -from typing import Any, Callable, Optional, Sequence - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics import Metric -from paddlemetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update - - -class BLEUScore(Metric): - """Calculate `BLEU score`_ of machine translated text with one or more references. - - Args: - n_gram: - Gram value ranged from 1 to 4 (Default 4) - smooth: - Whether or not to apply smoothing – see [2] - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - - Example: - >>> translate_corpus = ['the cat is on the mat'.split()] - >>> reference_corpus = [['there is a cat on the mat'.split(), 'a cat is on the mat'.split()]] - >>> metric = BLEUScore() - >>> metric(reference_corpus, translate_corpus) - tensor(0.7598) - - References: - [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, - Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ - - [2] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence - and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ - """ - - is_differentiable = False - higher_is_better = True - trans_len: Tensor - ref_len: Tensor - numerator: Tensor - denominator: Tensor - - def __init__( - self, - n_gram: int = 4, - smooth: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable] = None, - ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.n_gram = n_gram - self.smooth = smooth - - self.add_state("trans_len", tensor(0, dtype=B.float), dist_reduce_fx="sum") - self.add_state("ref_len", tensor(0, dtype=B.float), dist_reduce_fx="sum") - self.add_state("numerator", B.zeros(self.n_gram), dist_reduce_fx="sum") - self.add_state("denominator", B.zeros(self.n_gram), dist_reduce_fx="sum") - - def update( # type: ignore - self, reference_corpus: Sequence[Sequence[Sequence[str]]], translate_corpus: Sequence[Sequence[str]] - ) -> None: - """Compute Precision Scores. - - Args: - reference_corpus: An iterable of iterables of reference corpus - translate_corpus: An iterable of machine translated corpus - """ - self.trans_len, self.ref_len = _bleu_score_update( - reference_corpus, - translate_corpus, - self.numerator, - self.denominator, - self.trans_len, - self.ref_len, - self.n_gram, - ) - - def compute(self) -> Tensor: - """Calculate BLEU score. - - Return: - Tensor with BLEU Score - """ - return _bleu_score_compute( - self.trans_len, self.ref_len, self.numerator, self.denominator, self.n_gram, self.smooth - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py deleted file mode 100644 index 254f366d7..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/text/rouge.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from paddleext.torchapi import Tensor - -from paddlemetrics import Metric -from paddlemetrics.functional.text.rouge import ALLOWED_ROUGE_KEYS, _rouge_score_compute, _rouge_score_update -from paddlemetrics.utilities.imports import _NLTK_AVAILABLE - - -class ROUGEScore(Metric): - """`Calculate Rouge Score`_, used for automatic summarization. This implementation should imitate the behaviour - of the `rouge-score` package `Python ROUGE Implementation` - - Args: - newline_sep: - New line separate the inputs. - This argument has not been in use any more. It is deprecated in v0.6 and will be removed in v0.7. - use_stemmer: - Use Porter stemmer to strip word suffixes to improve matching. - rouge_keys: - A list of rouge types to calculate. - Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``. - decimal_places: - The number of digits to round the computed the values to. - This argument has not been in usd any more. It is deprecated in v0.6 and will be removed in v0.7. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - - Example: - - >>> targets = "Is your name John".split() - >>> preds = "My name is John".split() - >>> rouge = ROUGEScore() # doctest: +SKIP - >>> from pprint import pprint - >>> pprint(rouge(preds, targets)) # doctest: +NORMALIZE_WHITESPACE +SKIP - {'rouge1_fmeasure': 0.25, - 'rouge1_precision': 0.25, - 'rouge1_recall': 0.25, - 'rouge2_fmeasure': 0.0, - 'rouge2_precision': 0.0, - 'rouge2_recall': 0.0, - 'rougeL_fmeasure': 0.25, - 'rougeL_precision': 0.25, - 'rougeL_recall': 0.25, - 'rougeLsum_fmeasure': 0.25, - 'rougeLsum_precision': 0.25, - 'rougeLsum_recall': 0.25} - - Raises: - ValueError: - If the python packages ``nltk`` is not installed. - ValueError: - If any of the ``rouge_keys`` does not belong to the allowed set of keys. - - References: - [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin `Rouge Detail`_ - """ - - higher_is_better = True - - def __init__( - self, - newline_sep: Optional[bool] = None, # remove in v0.7 - use_stemmer: bool = False, - rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore - decimal_places: Optional[bool] = None, # remove in v0.7 - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable] = None, - ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - if newline_sep is not None: - warnings.warn("Argument `newline_sep` is deprecated in v0.6 and will be removed in v0.7") - if decimal_places is not None: - warnings.warn("Argument `decimal_places` is deprecated in v0.6 and will be removed in v0.7") - - if use_stemmer or "rougeLsum" in rouge_keys: - if not _NLTK_AVAILABLE: - raise ValueError("Stemmer and/or `rougeLsum` requires that nltk is installed. Use `pip install nltk`.") - import nltk - - if not isinstance(rouge_keys, tuple): - rouge_keys = tuple([rouge_keys]) - for key in rouge_keys: - if key not in ALLOWED_ROUGE_KEYS: - raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {ALLOWED_ROUGE_KEYS}") - - self.rouge_keys = rouge_keys - self.rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys] - self.stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None - - # Adding stated dynamically to prevent IndexError during sync function as some lists can be empty. - for rouge_key in self.rouge_keys: - for score in ["fmeasure", "precision", "recall"]: - self.add_state(f"{rouge_key}_{score}", [], dist_reduce_fx=None) - - def update(self, preds: Union[str, List[str]], targets: Union[str, List[str]]) -> None: # type: ignore - """Compute rouge scores. - - Args: - preds: An iterable of predicted sentences. - targets: An iterable of target sentences. - """ - - if isinstance(preds, str): - preds = [preds] - - if isinstance(targets, str): - targets = [targets] - - output: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update( - preds, targets, self.rouge_keys_values, stemmer=self.stemmer - ) - for rouge_key, metrics in output.items(): - for metric in metrics: - for type, value in metric.items(): - getattr(self, f"rouge{rouge_key}_{type}").append(value.to(self.device)) - - def compute(self) -> Dict[str, Tensor]: - """Calculate (Aggregate and provide confidence intervals) ROUGE score. - - Return: - Python dictionary of rouge scores for each input rouge key. - """ - update_output = {} - for rouge_key in self.rouge_keys_values: - for type in ["fmeasure", "precision", "recall"]: - update_output[f"rouge{rouge_key}_{type}"] = getattr(self, f"rouge{rouge_key}_{type}") - - return _rouge_score_compute(update_output) - - def __hash__(self) -> int: - # override to hash list objects. - # this is a bug in the upstream pytorch release. - hash_vals = [self.__class__.__name__] - - for key in self._defaults: - value = getattr(self, key) - if isinstance(value, list): - value = tuple(value) - hash_vals.append(value) - - return hash(tuple(hash_vals)) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py deleted file mode 100644 index 4f4d99e8f..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/text/sacre_bleu.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# referenced from -# Library Name: torchtext -# Authors: torchtext authors and @sluks -# Date: 2020-07-18 -# Link: https://pyB.org/text/_modules/torchtext/data/metrics.html#bleu_score -from typing import Any, Callable, Optional, Sequence - -from typing_extensions import Literal - -from paddlemetrics.functional.text.bleu import _bleu_score_update -from paddlemetrics.functional.text.sacre_bleu import _SacreBLEUTokenizer -from paddlemetrics.text.bleu import BLEUScore -from paddlemetrics.utilities.imports import _REGEX_AVAILABLE - -AVAILABLE_TOKENIZERS = ("none", "13a", "zh", "intl", "char") - - -class SacreBLEUScore(BLEUScore): - """Calculate `BLEU score`_ [1] of machine translated text with one or more references. This implementation - follows the behaviour of SacreBLEU [2] implementation from https://github.com/mjpost/sacrebleu. - - The SacreBLEU implementation differs from the NLTK BLEU implementation in tokenization techniques. - - Args: - n_gram: - Gram value ranged from 1 to 4 (Default 4) - smooth: - Whether or not to apply smoothing – see [2] - tokenize: - Tokenization technique to be used. (Default '13a') - Supported tokenization: ['none', '13a', 'zh', 'intl', 'char'] - lowercase: - If ``True``, BLEU score over lowercased text is calculated. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When `None`, DDP - will be used to perform the allgather. - - Raises: - ValueError: - If ``tokenize`` not one of 'none', '13a', 'zh', 'intl' or 'char' - ValueError: - If ``tokenize`` is set to 'intl' and `regex` is not installed - - - Example: - >>> translate_corpus = ['the cat is on the mat'] - >>> reference_corpus = [['there is a cat on the mat', 'a cat is on the mat']] - >>> metric = SacreBLEUScore() - >>> metric(reference_corpus, translate_corpus) - tensor(0.7598) - - References: - [1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni, - Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_ - - [2] A Call for Clarity in Reporting BLEU Scores by Matt Post. - - [3] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence - and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_ - """ - - def __init__( - self, - n_gram: int = 4, - smooth: bool = False, - tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a", - lowercase: bool = False, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Optional[Callable] = None, - ): - super().__init__( - n_gram=n_gram, - smooth=smooth, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - if tokenize not in AVAILABLE_TOKENIZERS: - raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.") - - if tokenize == "intl" and not _REGEX_AVAILABLE: - raise ValueError( - "`'intl'` tokenization requires `regex` installed. Use `pip install regex` or `pip install " - "paddlemetrics[text]`." - ) - self.tokenizer = _SacreBLEUTokenizer(tokenize, lowercase) - - def update( # type: ignore - self, reference_corpus: Sequence[Sequence[str]], translate_corpus: Sequence[str] - ) -> None: - """Compute Precision Scores. - - Args: - reference_corpus: An iterable of iterables of reference corpus - translate_corpus: An iterable of machine translated corpus - """ - reference_corpus_: Sequence[Sequence[Sequence[str]]] = [ - [self.tokenizer(line) for line in reference] for reference in reference_corpus - ] - translate_corpus_: Sequence[Sequence[str]] = [self.tokenizer(line) for line in translate_corpus] - - self.trans_len, self.ref_len = _bleu_score_update( - reference_corpus_, - translate_corpus_, - self.numerator, - self.denominator, - self.trans_len, - self.ref_len, - self.n_gram, - ) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py b/EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py deleted file mode 100644 index 7bb69740b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/text/wer.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Callable, List, Optional, Union -from warnings import warn - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.functional.text.wer import _wer_compute, _wer_update -from paddlemetrics.metric import Metric - - -class WER(Metric): - r""" - Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. - This value indicates the percentage of words that were incorrectly predicted. - The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. - Word error rate can then be computed as: - - .. math:: - WER = \frac{S + D + I}{N} = \frac{S + D + I}{S + D + C} - - where: - - S is the number of substitutions, - - D is the number of deletions, - - I is the number of insertions, - - C is the number of correct words, - - N is the number of words in the reference (N=S+D+C). - - Compute WER score of transcribed segments against references. - - Args: - concatenate_texts: Whether to concatenate all input texts or compute WER iteratively. - This argument is deprecated in v0.6 and it will be removed in v0.7. - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Returns: - (Tensor) Word error rate - - Examples: - >>> predictions = ["this is the prediction", "there is an other sample"] - >>> references = ["this is the reference", "there is another one"] - >>> metric = WER() - >>> metric(predictions, references) - tensor(0.5000) - """ - is_differentiable = False - higher_is_better = False - error: Tensor - total: Tensor - - def __init__( - self, - concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7 - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - if concatenate_texts is not None: - warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning) - self.add_state("errors", tensor(0, dtype=B.float), dist_reduce_fx="sum") - self.add_state("total", tensor(0, dtype=B.float), dist_reduce_fx="sum") - - def update(self, predictions: Union[str, List[str]], references: Union[str, List[str]]) -> None: # type: ignore - """Store references/predictions for computing Word Error Rate scores. - - Args: - predictions: Transcription(s) to score as a string or list of strings - references: Reference(s) for each speech input as a string or list of strings - """ - errors, total = _wer_update(predictions, references) - self.errors += errors - self.total += total - - def compute(self) -> Tensor: - """Calculate the word error rate. - - Returns: - (Tensor) Word error rate - """ - return _wer_compute(self.errors, self.total) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py deleted file mode 100644 index b1b2a5067..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from paddlemetrics.utilities.data import apply_to_collection # noqa: F401 -from paddlemetrics.utilities.distributed import class_reduce, reduce # noqa: F401 -from paddlemetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py deleted file mode 100644 index b948f103b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/checks.py +++ /dev/null @@ -1,582 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor - -from paddlemetrics.utilities.data import select_topk, to_onehot -from paddlemetrics.utilities.enums import DataType - - -def _check_same_shape(preds: Tensor, target: Tensor) -> None: - """Check that predictions and target have the same shape, else raise error.""" - if preds.shape != target.shape: - raise RuntimeError("Predictions and targets are expected to have the same shape") - - -def _basic_input_validation(preds: Tensor, target: Tensor, threshold: float, multiclass: Optional[bool]) -> None: - """Perform basic validation of inputs that does not require deducing any information of the type of inputs.""" - - if target.is_floating_point(): - raise ValueError("The `target` has to be an integer tensor.") - if target.min() < 0: - raise ValueError("The `target` has to be a non-negative tensor.") - - preds_float = preds.is_floating_point() - if not preds_float and preds.min() < 0: - raise ValueError("If `preds` are integers, they have to be non-negative.") - - if not preds.shape[0] == target.shape[0]: - raise ValueError("The `preds` and `target` should have the same first dimension.") - - if multiclass is False and target.max() > 1: - raise ValueError("If you set `multiclass=False`, then `target` should not exceed 1.") - - if multiclass is False and not preds_float and preds.max() > 1: - raise ValueError("If you set `multiclass=False` and `preds` are integers, then `preds` should not exceed 1.") - - -def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> Tuple[DataType, int]: - """This checks that the shape and type of inputs are consistent with each other and fall into one of the - allowed input types (see the documentation of docstring of ``_input_format_classification``). It does not check - for consistency of number of classes, other functions take care of that. - - It returns the name of the case in which the inputs fall, and the implied number of classes (from the ``C`` dim for - multi-class data, or extra dim(s) for multi-label data). - """ - - preds_float = preds.is_floating_point() - - if preds.ndim == target.ndim: - if preds.shape != target.shape: - raise ValueError( - "The `preds` and `target` should have the same shape,", - f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", - ) - if preds_float and target.max() > 1: - raise ValueError( - "If `preds` and `target` are of shape (N, ...) and `preds` are floats, `target` should be binary." - ) - - # Get the case - if preds.ndim == 1 and preds_float: - case = DataType.BINARY - elif preds.ndim == 1 and not preds_float: - case = DataType.MULTICLASS - elif preds.ndim > 1 and preds_float: - case = DataType.MULTILABEL - else: - case = DataType.MULTIDIM_MULTICLASS - - implied_classes = preds[0].numel() - - elif preds.ndim == target.ndim + 1: - if not preds_float: - raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.") - if preds.shape[2:] != target.shape[1:]: - raise ValueError( - "If `preds` have one dimension more than `target`, the shape of `preds` should be" - " (N, C, ...), and the shape of `target` should be (N, ...)." - ) - - implied_classes = preds.shape[1] - - if preds.ndim == 2: - case = DataType.MULTICLASS - else: - case = DataType.MULTIDIM_MULTICLASS - else: - raise ValueError( - "Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)" - " and `preds` should be (N, C, ...)." - ) - - return case, implied_classes - - -def _check_num_classes_binary(num_classes: int, multiclass: Optional[bool]) -> None: - """This checks that the consistency of `num_classes` with the data and `multiclass` param for binary data.""" - - if num_classes > 2: - raise ValueError("Your data is binary, but `num_classes` is larger than 2.") - if num_classes == 2 and not multiclass: - raise ValueError( - "Your data is binary and `num_classes=2`, but `multiclass` is not True." - " Set it to True if you want to transform binary data to multi-class format." - ) - if num_classes == 1 and multiclass: - raise ValueError( - "You have binary data and have set `multiclass=True`, but `num_classes` is 1." - " Either set `multiclass=None`(default) or set `num_classes=2`" - " to transform binary data to multi-class format." - ) - - -def _check_num_classes_mc( - preds: Tensor, - target: Tensor, - num_classes: int, - multiclass: Optional[bool], - implied_classes: int, -) -> None: - """This checks that the consistency of `num_classes` with the data and `multiclass` param for (multi- - dimensional) multi-class data.""" - - if num_classes == 1 and multiclass is not False: - raise ValueError( - "You have set `num_classes=1`, but predictions are integers." - " If you want to convert (multi-dimensional) multi-class data with 2 classes" - " to binary/multi-label, set `multiclass=False`." - ) - if num_classes > 1: - if multiclass is False and implied_classes != num_classes: - raise ValueError( - "You have set `multiclass=False`, but the implied number of classes " - " (from shape of inputs) does not match `num_classes`. If you are trying to" - " transform multi-dim multi-class data with 2 classes to multi-label, `num_classes`" - " should be either None or the product of the size of extra dimensions (...)." - " See Input Types in Metrics documentation." - ) - if num_classes <= target.max(): - raise ValueError("The highest label in `target` should be smaller than `num_classes`.") - if preds.shape != target.shape and num_classes != implied_classes: - raise ValueError("The size of C dimension of `preds` does not match `num_classes`.") - - -def _check_num_classes_ml(num_classes: int, multiclass: Optional[bool], implied_classes: int) -> None: - """This checks that the consistency of `num_classes` with the data and `multiclass` param for multi-label - data.""" - - if multiclass and num_classes != 2: - raise ValueError( - "Your have set `multiclass=True`, but `num_classes` is not equal to 2." - " If you are trying to transform multi-label data to 2 class multi-dimensional" - " multi-class, you should set `num_classes` to either 2 or None." - ) - if not multiclass and num_classes != implied_classes: - raise ValueError("The implied number of classes (from shape of inputs) does not match num_classes.") - - -def _check_top_k(top_k: int, case: str, implied_classes: int, multiclass: Optional[bool], preds_float: bool) -> None: - if case == DataType.BINARY: - raise ValueError("You can not use `top_k` parameter with binary data.") - if not isinstance(top_k, int) or top_k <= 0: - raise ValueError("The `top_k` has to be an integer larger than 0.") - if not preds_float: - raise ValueError("You have set `top_k`, but you do not have probability predictions.") - if multiclass is False: - raise ValueError("If you set `multiclass=False`, you can not set `top_k`.") - if case == DataType.MULTILABEL and multiclass: - raise ValueError( - "If you want to transform multi-label data to 2 class multi-dimensional" - "multi-class data using `multiclass=True`, you can not use `top_k`." - ) - if top_k >= implied_classes: - raise ValueError("The `top_k` has to be strictly smaller than the `C` dimension of `preds`.") - - -def _check_classification_inputs( - preds: Tensor, - target: Tensor, - threshold: float, - num_classes: Optional[int], - multiclass: Optional[bool], - top_k: Optional[int], -) -> DataType: - """Performs error checking on inputs for classification. - - This ensures that preds and target take one of the shape/type combinations that are - specified in ``_input_format_classification`` docstring. It also checks the cases of - over-rides with ``multiclass`` by checking (for multi-class and multi-dim multi-class - cases) that there are only up to 2 distinct labels. - - In case where preds are floats (probabilities), it is checked whether they are in [0,1] interval. - - When ``num_classes`` is given, it is checked that it is consistent with input cases (binary, - multi-label, ...), and that, if available, the implied number of classes in the ``C`` - dimension is consistent with it (as well as that max label in target is smaller than it). - - When ``num_classes`` is not specified in these cases, consistency of the highest target - value against ``C`` dimension is checked for (multi-dimensional) multi-class cases. - - If ``top_k`` is set (not None) for inputs that do not have probability predictions (and - are not binary), an error is raised. Similarly if ``top_k`` is set to a number that - is higher than or equal to the ``C`` dimension of ``preds``, an error is raised. - - Preds and target tensors are expected to be squeezed already - all dimensions should be - greater than 1, except perhaps the first one (``N``). - - Args: - preds: Tensor with predictions (labels or probabilities) - target: Tensor with ground truth labels, always integers (labels) - threshold: - Threshold value for transforming probability/logit predictions to binary - (0,1) predictions, in the case of binary or multi-label inputs. - num_classes: - Number of classes. If not explicitly set, the number of classes will be inferred - either from the shape of inputs, or the maximum label in the ``target`` and ``preds`` - tensor, where applicable. - top_k: - Number of highest probability entries for each sample to convert to 1s - relevant - only for inputs with probability predictions. The default value (``None``) will be - interpreted as 1 for these inputs. If this parameter is set for multi-label inputs, - it will take precedence over threshold. - - Should be left unset (``None``) for inputs with label predictions. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - - Return: - case: The case the inputs fall in, one of 'binary', 'multi-class', 'multi-label' or - 'multi-dim multi-class' - """ - - # Basic validation (that does not need case/type information) - _basic_input_validation(preds, target, threshold, multiclass) - - # Check that shape/types fall into one of the cases - case, implied_classes = _check_shape_and_type_consistency(preds, target) - - # Check consistency with the `C` dimension in case of multi-class data - if preds.shape != target.shape: - if multiclass is False and implied_classes != 2: - raise ValueError( - "You have set `multiclass=False`, but have more than 2 classes in your data," - " based on the C dimension of `preds`." - ) - if target.max() >= implied_classes: - raise ValueError( - "The highest label in `target` should be smaller than the size of the `C` dimension of `preds`." - ) - - # Check that num_classes is consistent - if num_classes: - if case == DataType.BINARY: - _check_num_classes_binary(num_classes, multiclass) - elif case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS): - _check_num_classes_mc(preds, target, num_classes, multiclass, implied_classes) - elif case.MULTILABEL: - _check_num_classes_ml(num_classes, multiclass, implied_classes) - - # Check that top_k is consistent - if top_k is not None: - _check_top_k(top_k, case, implied_classes, multiclass, preds.is_floating_point()) - - return case - - -def _input_squeeze( - preds: Tensor, - target: Tensor, -) -> Tuple[Tensor, Tensor]: - """Remove excess dimensions.""" - if preds.shape[0] == 1: - preds, target = preds.squeeze().unsqueeze(0), target.squeeze().unsqueeze(0) - else: - preds, target = preds.squeeze(), target.squeeze() - return preds, target - - -def _input_format_classification( - preds: Tensor, - target: Tensor, - threshold: float = 0.5, - top_k: Optional[int] = None, - num_classes: Optional[int] = None, - multiclass: Optional[bool] = None, -) -> Tuple[Tensor, Tensor, DataType]: - """Convert preds and target tensors into common format. - - Preds and targets are supposed to fall into one of these categories (and are - validated to make sure this is the case): - - * Both preds and target are of shape ``(N,)``, and both are integers (multi-class) - * Both preds and target are of shape ``(N,)``, and target is binary, while preds - are a float (binary) - * preds are of shape ``(N, C)`` and are floats, and target is of shape ``(N,)`` and - is integer (multi-class) - * preds and target are of shape ``(N, ...)``, target is binary and preds is a float - (multi-label) - * preds are of shape ``(N, C, ...)`` and are floats, target is of shape ``(N, ...)`` - and is integer (multi-dimensional multi-class) - * preds and target are of shape ``(N, ...)`` both are integers (multi-dimensional - multi-class) - - To avoid ambiguities, all dimensions of size 1, except the first one, are squeezed out. - - The returned output tensors will be binary tensors of the same shape, either ``(N, C)`` - of ``(N, C, X)``, the details for each case are described below. The function also returns - a ``case`` string, which describes which of the above cases the inputs belonged to - regardless - of whether this was "overridden" by other settings (like ``multiclass``). - - In binary case, targets are normally returned as ``(N,1)`` tensor, while preds are transformed - into a binary tensor (elements become 1 if the probability is greater than or equal to - ``threshold`` or 0 otherwise). If ``multiclass=True``, then then both targets are preds - become ``(N, 2)`` tensors by a one-hot transformation; with the thresholding being applied to - preds first. - - In multi-class case, normally both preds and targets become ``(N, C)`` binary tensors; targets - by a one-hot transformation and preds by selecting ``top_k`` largest entries (if their original - shape was ``(N,C)``). However, if ``multiclass=False``, then targets and preds will be - returned as ``(N,1)`` tensor. - - In multi-label case, normally targets and preds are returned as ``(N, C)`` binary tensors, with - preds being binarized as in the binary case. Here the ``C`` dimension is obtained by flattening - all dimensions after the first one. However if ``multiclass=True``, then both are returned as - ``(N, 2, C)``, by an equivalent transformation as in the binary case. - - In multi-dimensional multi-class case, normally both target and preds are returned as - ``(N, C, X)`` tensors, with ``X`` resulting from flattening of all dimensions except ``N`` and - ``C``. The transformations performed here are equivalent to the multi-class case. However, if - ``multiclass=False`` (and there are up to two classes), then the data is returned as - ``(N, X)`` binary tensors (multi-label). - - Note: - Where a one-hot transformation needs to be performed and the number of classes - is not implicitly given by a ``C`` dimension, the new ``C`` dimension will either be - equal to ``num_classes``, if it is given, or the maximum label value in preds and - target. - - Args: - preds: Tensor with predictions (labels or probabilities) - target: Tensor with ground truth labels, always integers (labels) - threshold: - Threshold value for transforming probability/logit predictions to binary - (0 or 1) predictions, in the case of binary or multi-label inputs. - num_classes: - Number of classes. If not explicitly set, the number of classes will be inferred - either from the shape of inputs, or the maximum label in the ``target`` and ``preds`` - tensor, where applicable. - top_k: - Number of highest probability entries for each sample to convert to 1s - relevant - only for (multi-dimensional) multi-class inputs with probability predictions. The - default value (``None``) will be interepreted as 1 for these inputs. - - Should be left unset (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - - Returns: - preds: binary tensor of shape ``(N, C)`` or ``(N, C, X)`` - target: binary tensor of shape ``(N, C)`` or ``(N, C, X)`` - case: The case the inputs fall in, one of ``'binary'``, ``'multi-class'``, ``'multi-label'`` or - ``'multi-dim multi-class'`` - """ - # Remove excess dimensions - preds, target = _input_squeeze(preds, target) - - # Convert half precision tensors to full precision, as not all ops are supported - # for example, min() is not supported - if preds.dtype == B.float16: - preds = preds.float() - - case = _check_classification_inputs( - preds, - target, - threshold=threshold, - num_classes=num_classes, - multiclass=multiclass, - top_k=top_k, - ) - - if case in (DataType.BINARY, DataType.MULTILABEL) and not top_k: - preds = (preds >= threshold).int() - num_classes = num_classes if not multiclass else 2 - - if case == DataType.MULTILABEL and top_k: - preds = select_topk(preds, top_k) - - if case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) or multiclass: - if preds.is_floating_point(): - num_classes = preds.shape[1] - preds = select_topk(preds, top_k or 1) - else: - num_classes = num_classes if num_classes else max(preds.max(), target.max()) + 1 - preds = to_onehot(preds, max(2, num_classes)) - - target = to_onehot(target, max(2, num_classes)) # type: ignore - - if multiclass is False: - preds, target = preds[:, 1, ...], target[:, 1, ...] - - if (case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) and multiclass is not False) or multiclass: - target = target.reshape(target.shape[0], target.shape[1], -1) - preds = preds.reshape(preds.shape[0], preds.shape[1], -1) - else: - target = target.reshape(target.shape[0], -1) - preds = preds.reshape(preds.shape[0], -1) - - # Some operations above create an extra dimension for MC/binary case - this removes it - if preds.ndim > 2: - preds, target = preds.squeeze(-1), target.squeeze(-1) - - return preds.int(), target.int(), case - - -def _input_format_classification_one_hot( - num_classes: int, - preds: Tensor, - target: Tensor, - threshold: float = 0.5, - multilabel: bool = False, -) -> Tuple[Tensor, Tensor]: - """Convert preds and target tensors into one hot spare label tensors. - - Args: - num_classes: number of classes - preds: either tensor with labels, tensor with probabilities/logits or multilabel tensor - target: tensor with ground true labels - threshold: float used for thresholding multilabel input - multilabel: boolean flag indicating if input is multilabel - - Raises: - ValueError: - If ``preds`` and ``target`` don't have the same number of dimensions - or one additional dimension for ``preds``. - - Returns: - preds: one hot tensor of shape [num_classes, -1] with predicted labels - target: one hot tensors of shape [num_classes, -1] with true labels - """ - if preds.ndim not in (target.ndim, target.ndim + 1): - raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds") - - if preds.ndim == target.ndim + 1: - # multi class probabilities - preds = B.argmax(preds, dim=1) - - if preds.ndim == target.ndim and preds.dtype in (B.long, B.int) and num_classes > 1 and not multilabel: - # multi-class - preds = to_onehot(preds, num_classes=num_classes) - target = to_onehot(target, num_classes=num_classes) - - elif preds.ndim == target.ndim and preds.is_floating_point(): - # binary or multilabel probabilities - preds = (preds >= threshold).long() - - # transpose class as first dim and reshape - if preds.ndim > 1: - preds = preds.transpose(1, 0) - target = target.transpose(1, 0) - - return preds.reshape(num_classes, -1), target.reshape(num_classes, -1) - - -def _check_retrieval_functional_inputs( - preds: Tensor, - target: Tensor, - allow_non_binary_target: bool = False, -) -> Tuple[Tensor, Tensor]: - """Check ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. - - Args: - preds: either tensor with scores/logits - target: tensor with ground true labels - allow_non_binary_target: whether to allow target to contain non-binary values - - Raises: - ValueError: - If ``preds`` and ``target`` don't have the same shape, if they are empty - or not of the correct ``dtypes``. - - Returns: - preds: as B.float32 - target: as B.long if not floating point else B.float32 - """ - if preds.shape != target.shape: - raise ValueError("`preds` and `target` must be of the same shape") - - if not preds.numel() or not preds.size(): - raise ValueError("`preds` and `target` must be non-empty and non-scalar tensors") - - return _check_retrieval_target_and_prediction_types(preds, target, allow_non_binary_target=allow_non_binary_target) - - -def _check_retrieval_inputs( - indexes: Tensor, - preds: Tensor, - target: Tensor, - allow_non_binary_target: bool = False, -) -> Tuple[Tensor, Tensor, Tensor]: - """Check ``indexes``, ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. - - Args: - indexes: tensor with queries indexes - preds: tensor with scores/logits - target: tensor with ground true labels - - Raises: - ValueError: - If ``preds`` and ``target`` don't have the same shape, if they are empty - or not of the correct ``dtypes``. - - Returns: - indexes: as B.long - preds: as B.float32 - target: as B.long - """ - if indexes.shape != preds.shape or preds.shape != target.shape: - raise ValueError("`indexes`, `preds` and `target` must be of the same shape") - - if not indexes.numel() or not indexes.size(): - raise ValueError( - "`indexes`, `preds` and `target` must be non-empty and non-scalar tensors", - ) - - if indexes.dtype is not B.long: - raise ValueError("`indexes` must be a tensor of long integers") - - preds, target = _check_retrieval_target_and_prediction_types( - preds, target, allow_non_binary_target=allow_non_binary_target - ) - - return indexes.long().flatten(), preds, target - - -def _check_retrieval_target_and_prediction_types( - preds: Tensor, - target: Tensor, - allow_non_binary_target: bool = False, -) -> Tuple[Tensor, Tensor]: - """Check ``preds`` and ``target`` tensors are of the same shape and of the correct dtype. - - Args: - preds: either tensor with scores/logits - target: tensor with ground true labels - allow_non_binary_target: whether to allow target to contain non-binary values - - Raises: - ValueError: - If ``preds`` and ``target`` don't have the same shape, if they are empty - or not of the correct ``dtypes``. - """ - if target.dtype not in (B.bool, B.long, B.int) and not B.is_floating_point(target): - raise ValueError("`target` must be a tensor of booleans, integers or floats") - - if not preds.is_floating_point(): - raise ValueError("`preds` must be a tensor of floats") - - if not allow_non_binary_target and (target.max() > 1 or target.min() < 0): - raise ValueError("`target` must contain `binary` values") - - target = target.float().flatten() if target.is_floating_point() else target.long().flatten() - return preds.float().flatten(), target diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py deleted file mode 100644 index 13e43fb60..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/data.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Callable, List, Mapping, Optional, Sequence, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, tensor - -from paddlemetrics.utilities.prints import rank_zero_warn - -METRIC_EPS = 1e-6 - - -def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor: - """concatenation along the zero dimension.""" - x = x if isinstance(x, (list, tuple)) else [x] - x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x] - if not x: # empty list - raise ValueError("No samples to concatenate") - return B.cat(x, dim=0) - - -def dim_zero_sum(x: Tensor) -> Tensor: - """summation along the zero dimension.""" - return B.sum(x, dim=0) - - -def dim_zero_mean(x: Tensor) -> Tensor: - """average along the zero dimension.""" - return B.mean(x, dim=0) - - -def dim_zero_max(x: Tensor) -> Tensor: - """max along the zero dimension.""" - return B.max(x, dim=0).values - - -def dim_zero_min(x: Tensor) -> Tensor: - """min along the zero dimension.""" - return B.min(x, dim=0).values - - -def _flatten(x: Sequence) -> list: - return [item for sublist in x for item in sublist] - - -def to_onehot( - label_tensor: Tensor, - num_classes: Optional[int] = None, -) -> Tensor: - """Converts a dense label tensor to one-hot format. - - Args: - label_tensor: dense label tensor, with shape [N, d1, d2, ...] - num_classes: number of classes C - - Returns: - A sparse label tensor with shape [N, C, d1, d2, ...] - - Example: - >>> x = B.tensor([1, 2, 3]) - >>> to_onehot(x) - tensor([[0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]) - """ - if num_classes is None: - num_classes = int(label_tensor.max().detach().item() + 1) - - tensor_onehot = B.zeros( - label_tensor.shape[0], - num_classes, - *label_tensor.shape[1:], - dtype=label_tensor.dtype, - device=label_tensor.device, - ) - index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot) - return (tensor_onehot.scatter_(1, index, 1.0) > 0).to(label_tensor.dtype) - - -def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor: - """Convert a probability tensor to binary by selecting top-k highest entries. - - Args: - prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the - position defined by the ``dim`` argument - topk: number of highest entries to turn into 1s - dim: dimension on which to compare entries - - Returns: - A binary tensor of the same shape as the input tensor of type B.int32 - - Example: - >>> x = B.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]]) - >>> select_topk(x, topk=2) - tensor([[0, 1, 1], - [1, 1, 0]], dtype=B.int32) - """ - zeros = B.zeros_like(prob_tensor) - if topk == 1: # argmax has better performance than topk - topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0) - else: - topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0) - return topk_tensor.int() - - -def to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor: - """Converts a tensor of probabilities to a dense label tensor. - - Args: - x: probabilities to get the categorical label [N, d1, d2, ...] - argmax_dim: dimension to apply - - Return: - A tensor with categorical labels [N, d2, ...] - - Example: - >>> x = B.tensor([[0.2, 0.5], [0.9, 0.1]]) - >>> to_categorical(x) - tensor([1, 0]) - """ - return B.argmax(x, dim=argmax_dim) - - -def get_num_classes( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, -) -> int: - """Calculates the number of classes for a given prediction and target tensor. - - Args: - preds: predicted values - target: true labels - num_classes: number of classes if known - - Return: - An integer that represents the number of classes. - """ - num_target_classes = int(target.max().detach().item() + 1) - num_pred_classes = int(preds.max().detach().item() + 1) - num_all_classes = max(num_target_classes, num_pred_classes) - - if num_classes is None: - num_classes = num_all_classes - elif num_classes != num_all_classes: - rank_zero_warn( - f"You have set {num_classes} number of classes which is" - f" different from predicted ({num_pred_classes}) and" - f" target ({num_target_classes}) number of classes", - RuntimeWarning, - ) - return num_classes - - -def apply_to_collection( - data: Any, - dtype: Union[type, tuple], - function: Callable, - *args: Any, - wrong_dtype: Optional[Union[type, tuple]] = None, - **kwargs: Any, -) -> Any: - """Recursively applies a function to all elements of a certain dtype. - - Args: - data: the collection to apply the function to - dtype: the given function will be applied to all elements of this dtype - function: the function to apply - *args: positional arguments (will be forwarded to calls of ``function``) - wrong_dtype: the given function won't be applied if this type is specified and the given collections is of - the :attr:`wrong_type` even if it is of type :attr`dtype` - **kwargs: keyword arguments (will be forwarded to calls of ``function``) - - Returns: - the resulting collection - - Example: - >>> apply_to_collection(B.tensor([8, 0, 2, 6, 7]), dtype=Tensor, function=lambda x: x ** 2) - tensor([64, 0, 4, 36, 49]) - >>> apply_to_collection([8, 0, 2, 6, 7], dtype=int, function=lambda x: x ** 2) - [64, 0, 4, 36, 49] - >>> apply_to_collection(dict(abc=123), dtype=int, function=lambda x: x ** 2) - {'abc': 15129} - """ - elem_type = type(data) - - # Breaking condition - if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)): - return function(data, *args, **kwargs) - - # Recursively apply to collection items - if isinstance(data, Mapping): - return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()}) - - if isinstance(data, tuple) and hasattr(data, "_fields"): # named tuple - return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data)) - - if isinstance(data, Sequence) and not isinstance(data, str): - return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data]) - - # data is neither of dtype, nor a collection - return data - - -def get_group_indexes(indexes: Tensor) -> List[Tensor]: - """Given an integer `B.Tensor` `indexes`, return a `B.Tensor` of indexes for each different value in - `indexes`. - - Args: - indexes: a `B.Tensor` - - Return: - A list of integer `B.Tensor`s - - Example: - >>> indexes = B.tensor([0, 0, 0, 1, 1, 1, 1]) - >>> get_group_indexes(indexes) - [tensor([0, 1, 2]), tensor([3, 4, 5, 6])] - """ - - res: dict = {} - for i, _id in enumerate(indexes): - _id = _id.item() - if _id in res: - res[_id] += [i] - else: - res[_id] = [i] - - return [tensor(x, dtype=B.long) for x in res.values()] diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py deleted file mode 100644 index aec42872a..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/distributed.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, List, Optional - -import paddleext.torchapi as B -#import torchapi.nn.functional as F -from paddleext.torchapi import Tensor - - -def reduce(to_reduce: Tensor, reduction: str) -> Tensor: - """Reduces a given tensor by a given reduction method. - - Args: - to_reduce: the tensor, which shall be reduced - reduction: a string specifying the reduction method ('elementwise_mean', 'none', 'sum') - - Return: - reduced Tensor - - Raise: - ValueError if an invalid reduction parameter was given - """ - if reduction == "elementwise_mean": - return B.mean(to_reduce) - if reduction == "none": - return to_reduce - if reduction == "sum": - return B.sum(to_reduce) - raise ValueError("Reduction parameter unknown.") - - -def class_reduce(num: Tensor, denom: Tensor, weights: Tensor, class_reduction: str = "none") -> Tensor: - """ - Function used to reduce classification metrics of the form `num / denom * weights`. - For example for calculating standard accuracy the num would be number of - true positives per class, denom would be the support per class, and weights - would be a tensor of 1s - - Args: - num: numerator tensor - denom: denominator tensor - weights: weights for each class - class_reduction: reduction method for multiclass problems - - - ``'micro'``: calculate metrics globally (default) - - ``'macro'``: calculate metrics for each label, and find their unweighted mean. - - ``'weighted'``: calculate metrics for each label, and find their weighted mean. - - ``'none'`` or ``None``: returns calculated metric per class - - Raises: - ValueError: - If ``class_reduction`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"`` or ``None``. - - """ - valid_reduction = ("micro", "macro", "weighted", "none", None) - if class_reduction == "micro": - fraction = B.sum(num) / B.sum(denom) - else: - fraction = num / denom - - # We need to take care of instances where the denom can be 0 - # for some (or all) classes which will produce nans - fraction[fraction != fraction] = 0 - - if class_reduction == "micro": - return fraction - if class_reduction == "macro": - return B.mean(fraction) - if class_reduction == "weighted": - return B.sum(fraction * (weights.float() / B.sum(weights))) - if class_reduction == "none" or class_reduction is None: - return fraction - - raise ValueError( - f"Reduction parameter {class_reduction} unknown." f" Choose between one of these: {valid_reduction}" - ) - - -def _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]: - gathered_result = [B.zeros_like(result) for _ in range(world_size)] - B.distributed.all_gather(gathered_result, result, group) - return gathered_result - - -def gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]: - """Function to gather all tensors from several ddp processes onto a list that is broadcasted to all processes. - Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case - tensors are padded, gathered and then trimmed to secure equal workload for all processes. - - Args: - result: the value to sync - group: the process group to gather results from. Defaults to all processes (world) - - Return: - gathered_result: list with size equal to the process group where - gathered_result[i] corresponds to result tensor from process i - """ - if group is None: - group = B.distributed.group.WORLD - - # convert tensors to contiguous format - result = result.contiguous() - - world_size = B.distributed.get_world_size(group) - B.distributed.barrier(group=group) - - # if the tensor is scalar, things are easy - if result.ndim == 0: - return _simple_gather_all_tensors(result, group, world_size) - - # 1. Gather sizes of all tensors - local_size = B.tensor(result.shape, device=result.device) - local_sizes = [B.zeros_like(local_size) for _ in range(world_size)] - B.distributed.all_gather(local_sizes, local_size, group=group) - max_size = B.stack(local_sizes).max(dim=0).values - all_sizes_equal = all(all(ls == max_size) for ls in local_sizes) - - # 2. If shapes are all the same, then do a simple gather: - if all_sizes_equal: - return _simple_gather_all_tensors(result, group, world_size) - - # 3. If not, we need to pad each local tensor to maximum size, gather and then truncate - pad_dims = [] - pad_by = (max_size - local_size).detach().cpu() - for val in reversed(pad_by): - pad_dims.append(0) - pad_dims.append(val.item()) - result_padded = B.pad(result, pad_dims) - gathered_result = [B.zeros_like(result_padded) for _ in range(world_size)] - B.distributed.all_gather(gathered_result, result_padded, group) - for idx, item_size in enumerate(local_sizes): - slice_param = [slice(dim_size) for dim_size in item_size] - gathered_result[idx] = gathered_result[idx][slice_param] - return gathered_result diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py deleted file mode 100644 index 7476c051d..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/enums.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from enum import Enum -from typing import Optional, Union - - -class EnumStr(str, Enum): - """Type of any enumerator with allowed comparison to string invariant to cases. - - Example: - >>> class MyEnum(EnumStr): - ... ABC = 'abc' - >>> MyEnum.from_str('Abc') - - >>> {MyEnum.ABC: 123} - {: 123} - """ - - @classmethod - def from_str(cls, value: str) -> Optional["EnumStr"]: - statuses = [status for status in dir(cls) if not status.startswith("_")] - for st in statuses: - if st.lower() == value.lower(): - return getattr(cls, st) - return None - - def __eq__(self, other: Union[str, "EnumStr", None]) -> bool: # type: ignore - other = other.value if isinstance(other, Enum) else str(other) - return self.value.lower() == other.lower() - - def __hash__(self) -> int: - # re-enable hashtable so it can be used as a dict key or in a set - # example: set(LightningEnum) - return hash(self.name) - - -class DataType(EnumStr): - """Enum to represent data type. - - >>> "Binary" in list(DataType) - True - """ - - BINARY = "binary" - MULTILABEL = "multi-label" - MULTICLASS = "multi-class" - MULTIDIM_MULTICLASS = "multi-dim multi-class" - - -class AverageMethod(EnumStr): - """Enum to represent average method. - - >>> None in list(AverageMethod) - True - >>> AverageMethod.NONE == None - True - >>> AverageMethod.NONE == 'none' - True - """ - - MICRO = "micro" - MACRO = "macro" - WEIGHTED = "weighted" - NONE = None - SAMPLES = "samples" - - -class MDMCAverageMethod(EnumStr): - """Enum to represent multi-dim multi-class average method.""" - - GLOBAL = "global" - SAMPLEWISE = "samplewise" diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py deleted file mode 100644 index 767fe9014..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/exceptions.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class paddlemetricsUserError(Exception): - """Error used to inform users of a wrong combinison of Metric API calls.""" diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py deleted file mode 100644 index f3794801c..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/imports.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Import utilities.""" -import operator -from importlib import import_module -from importlib.util import find_spec -from typing import Callable, Optional - -from packaging.version import Version -from pkg_resources import DistributionNotFound, get_distribution - - -def _module_available(module_path: str) -> bool: - """Check if a path is available in your environment. - - >>> _module_available('os') - True - >>> _module_available('bla.bla') - False - """ - try: - return find_spec(module_path) is not None - except AttributeError: - # Python 3.6 - return False - except ModuleNotFoundError: - # Python 3.7+ - return False - - -def _compare_version(package: str, op: Callable, version: str) -> Optional[bool]: - """Compare package version with some requirements. - - >>> import operator - >>> _compare_version("torch", operator.ge, "0.1") - True - >>> _compare_version("any_module", operator.ge, "0.0") # is None - """ - if not _module_available(package): - return None - try: - pkg = import_module(package) - pkg_version = pkg.__version__ # type: ignore - except (ModuleNotFoundError, DistributionNotFound): - return None - except AttributeError: - pkg_version = get_distribution(package).version - except ImportError: - # catches cyclic imports - the case with integrated libs - # see: https://stackoverflow.com/a/32965521 - pkg_version = get_distribution(package).version - try: - pkg_version = Version(pkg_version) - except TypeError: - # this is mock by sphinx, so it shall return True ro generate all summaries - return True - return op(pkg_version, Version(version)) - - -_TORCH_LOWER_1_4: Optional[bool] = False -_TORCH_LOWER_1_5: Optional[bool] = False -_TORCH_LOWER_1_6: Optional[bool] = False -_TORCH_GREATER_EQUAL_1_6: Optional[bool] = True -_TORCH_GREATER_EQUAL_1_7: Optional[bool] = True - -_LIGHTNING_AVAILABLE: bool = False -_JIWER_AVAILABLE: bool = _module_available("jiwer") -_NLTK_AVAILABLE: bool = _module_available("nltk") -_ROUGE_SCORE_AVAILABLE: bool = _module_available("rouge_score") -_BERTSCORE_AVAILABLE: bool = _module_available("bert_score") -_SCIPY_AVAILABLE: bool = _module_available("scipy") -_TORCH_FIDELITY_AVAILABLE: bool = _module_available("torch_fidelity") -_LPIPS_AVAILABLE: bool = _module_available("lpips") -_TQDM_AVAILABLE: bool = _module_available("tqdm") -_TRANSFORMERS_AVAILABLE: bool = _module_available("transformers") -_PESQ_AVAILABLE: bool = _module_available("pesq") -_SACREBLEU_AVAILABLE: bool = _module_available("sacrebleu") -_REGEX_AVAILABLE: bool = _module_available("regex") -_PYSTOI_AVAILABLE: bool = _module_available("pystoi") diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py b/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py deleted file mode 100644 index ff4b1b35e..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/utilities/prints.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import warnings -from functools import wraps -from typing import Any, Callable - -from paddlemetrics import _logger as log - - -def rank_zero_only(fn: Callable) -> Callable: - @wraps(fn) - def wrapped_fn(*args: Any, **kwargs: Any) -> Any: - if rank_zero_only.rank == 0: # type: ignore - return fn(*args, **kwargs) - - return wrapped_fn - - -# add the attribute to the function but don't overwrite in case Trainer has already set it -rank_zero_only.rank = getattr(rank_zero_only, "rank", int(os.environ.get("LOCAL_RANK", 0))) # type: ignore - - -def _warn(*args: Any, **kwargs: Any) -> None: - warnings.warn(*args, **kwargs) - - -def _info(*args: Any, **kwargs: Any) -> None: - log.info(*args, **kwargs) - - -def _debug(*args: Any, **kwargs: Any) -> None: - log.debug(*args, **kwargs) - - -rank_zero_debug = rank_zero_only(_debug) -rank_zero_info = rank_zero_only(_info) -rank_zero_warn = rank_zero_only(_warn) diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py deleted file mode 100644 index d74928f6a..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddlemetrics.wrappers.bootstrapping import BootStrapper # noqa: F401 -from paddlemetrics.wrappers.multioutput import MultioutputWrapper # noqa: F401 -from paddlemetrics.wrappers.tracker import MetricTracker # noqa: F401 diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py deleted file mode 100644 index 6a3e7b16b..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/bootstrapping.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from copy import deepcopy -from typing import Any, Callable, Dict, Optional, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, nn - -from paddlemetrics.metric import Metric -from paddlemetrics.utilities import apply_to_collection -from paddlemetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_7 - - -def _bootstrap_sampler( - size: int, - sampling_strategy: str = "poisson", -) -> Tensor: - """Resample a tensor along its first dimension with replacement - Args: - size: number of samples - sampling_strategy: the strategy to use for sampling, either ``'poisson'`` or ``'multinomial'`` - generator: a instance of ``B.Generator`` that controls the sampling - - Returns: - resampled tensor - - """ - if sampling_strategy == "poisson": - p = B.distributions.Poisson(1) - n = p.sample((size,)) - return B.arange(size).repeat_interleave(n.long(), dim=0) - if sampling_strategy == "multinomial": - idx = B.multinomial(B.ones(size), num_samples=size, replacement=True) - return idx - raise ValueError("Unknown sampling strategy") - - -class BootStrapper(Metric): - def __init__( - self, - base_metric: Metric, - num_bootstraps: int = 10, - mean: bool = True, - std: bool = True, - quantile: Optional[Union[float, Tensor]] = None, - raw: bool = False, - sampling_strategy: str = "poisson", - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ) -> None: - r""" - Using `Turn a Metric into a Bootstrapped`_ - That can automate the process of getting confidence intervals for metric values. This wrapper - class basically keeps multiple copies of the same base metric in memory and whenever ``update`` or - ``forward`` is called, all input tensors are resampled (with replacement) along the first dimension. - - Args: - base_metric: - base metric class to wrap - num_bootstraps: - number of copies to make of the base metric for bootstrapping - mean: - if ``True`` return the mean of the bootstraps - std: - if ``True`` return the standard diviation of the bootstraps - quantile: - if given, returns the quantile of the bootstraps. Can only be used with - pytorch version 1.6 or higher - raw: - if ``True``, return all bootstrapped values - sampling_strategy: - Determines how to produce bootstrapped samplings. Either ``'poisson'`` or ``multinomial``. - If ``'possion'`` is chosen, the number of times each sample will be included in the bootstrap - will be given by :math:`n\sim Poisson(\lambda=1)`, which approximates the true bootstrap distribution - when the number of samples is large. If ``'multinomial'`` is chosen, we will apply true bootstrapping - at the batch level to approximate bootstrapping over the hole dataset. - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather. - - Example:: - >>> from pprint import pprint - >>> from paddlemetrics import Accuracy, BootStrapper - >>> _ = B.manual_seed(123) - >>> base_metric = Accuracy() - >>> bootstrap = BootStrapper(base_metric, num_bootstraps=20) - >>> bootstrap.update(B.randint(5, (20,)), B.randint(5, (20,))) - >>> output = bootstrap.compute() - >>> pprint(output) - {'mean': tensor(0.2205), 'std': tensor(0.0859)} - - """ - super().__init__(compute_on_step, dist_sync_on_step, process_group, dist_sync_fn) - if not isinstance(base_metric, Metric): - raise ValueError( - "Expected base metric to be an instance of paddlemetrics.Metric" f" but received {base_metric}" - ) - - self.metrics = nn.ModuleList([deepcopy(base_metric) for _ in range(num_bootstraps)]) - self.num_bootstraps = num_bootstraps - - self.mean = mean - self.std = std - if quantile is not None and not _TORCH_GREATER_EQUAL_1_7: - raise ValueError("quantile argument can only be used with pytorch v1.7 or higher") - self.quantile = quantile - self.raw = raw - - allowed_sampling = ("poisson", "multinomial") - if sampling_strategy not in allowed_sampling: - raise ValueError( - f"Expected argument ``sampling_strategy`` to be one of {allowed_sampling}" - f" but recieved {sampling_strategy}" - ) - self.sampling_strategy = sampling_strategy - - def update(self, *args: Any, **kwargs: Any) -> None: - """Updates the state of the base metric. - - Any tensor passed in will be bootstrapped along dimension 0 - """ - for idx in range(self.num_bootstraps): - args_sizes = apply_to_collection(args, Tensor, len) - kwargs_sizes = list(apply_to_collection(kwargs, Tensor, len)) - if len(args_sizes) > 0: - size = args_sizes[0] - elif len(kwargs_sizes) > 0: - size = kwargs_sizes[0] - else: - raise ValueError("None of the input contained tensors, so could not determine the sampling size") - sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy).to(self.device) - new_args = apply_to_collection(args, Tensor, B.index_select, dim=0, index=sample_idx) - new_kwargs = apply_to_collection(kwargs, Tensor, B.index_select, dim=0, index=sample_idx) - self.metrics[idx].update(*new_args, **new_kwargs) - - def compute(self) -> Dict[str, Tensor]: - """Computes the bootstrapped metric values. - - Allways returns a dict of tensors, which can contain the following keys: ``mean``, ``std``, ``quantile`` and - ``raw`` depending on how the class was initialized - """ - computed_vals = B.stack([m.compute() for m in self.metrics], dim=0) - output_dict = {} - if self.mean: - output_dict["mean"] = computed_vals.mean(dim=0) - if self.std: - output_dict["std"] = computed_vals.std(dim=0) - if self.quantile is not None: - output_dict["quantile"] = B.quantile(computed_vals, self.quantile) - if self.raw: - output_dict["raw"] = computed_vals - return output_dict diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py deleted file mode 100644 index 789445be2..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/multioutput.py +++ /dev/null @@ -1,165 +0,0 @@ -from copy import deepcopy -from typing import Any, Callable, List, Optional, Tuple - -import paddleext.torchapi as B -from paddleext.torchapi import nn - -from paddlemetrics import Metric -from paddlemetrics.utilities import apply_to_collection - - -def _get_nan_indices(*tensors: B.Tensor) -> B.Tensor: - """Get indices of rows along dim 0 which have NaN values.""" - if len(tensors) == 0: - raise ValueError("Must pass at least one tensor as argument") - sentinel = tensors[0] - nan_idxs = B.zeros(len(sentinel), dtype=B.bool, device=sentinel.device) - for tensor in tensors: - permuted_tensor = tensor.flatten(start_dim=1) - nan_idxs |= B.any(B.isnan(permuted_tensor), dim=1) - return nan_idxs - - -class MultioutputWrapper(Metric): - """Wrap a base metric to enable it to support multiple outputs. - - Several paddlemetrics metrics, such as :class:`paddlemetrics.regression.spearman.SpearmanCorrcoef` lack support for - multioutput mode. This class wraps such metrics to support computing one metric per output. - Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs. - This means if you set `num_outputs` to 2, `compute()` will return a Tensor of dimension - (2, ...) where ... represents the dimensions the metric returns when not wrapped. - - In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude - fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the - intersection of NaN containing "rows" upon each update for each output. For example, suppose a user uses - `MultioutputWrapper` to wrap :class:`paddlemetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally - has missing labels for classes like ``R2Score`` is that this class supports removing NaN values - (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows - - Args: - base_metric: - Metric being wrapped. - num_outputs: - Expected dimensionality of the output dimension. This parameter is - used to determine the number of distinct metrics we need to track. - output_dim: - Dimension on which output is expected. Note that while this provides some flexibility, the output dimension - must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels - can have a different number of dimensions than the predictions. This can be worked around if the output - dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs. - remove_nans: - Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying - metric. Proper operation requires all tensors passed to update to have dimension `(N, ...)` where N - represents the length of the batch or dataset being passed in. - squeeze_outputs: - If true, will squeeze the 1-item dimensions left after `index_select` is applied. - This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful - for certain classification metrics that can't handle additional 1-item dimensions. - compute_on_step: - Whether to recompute the metric value on each update step. - dist_sync_on_step: - Required for distributed training support. - process_group: - Specify the process group on which synchronization is called. - The default: None (which selects the entire world) - dist_sync_fn: - Required for distributed training support. - - Example: - - >>> # Mimic R2Score in `multioutput`, `raw_values` mode: - >>> import torchapi as B - >>> from paddlemetrics import MultioutputWrapper, R2Score - >>> target = B.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = B.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> r2score = MultioutputWrapper(R2Score(), 2) - >>> r2score(preds, target) - [tensor(0.9654), tensor(0.9082)] - >>> # Classification metric where prediction and label tensors have different shapes. - >>> from paddlemetrics import BinnedAveragePrecision - >>> target = B.tensor([[1, 2], [2, 0], [1, 2]]) - >>> preds = B.tensor([ - ... [[.1, .8], [.8, .05], [.1, .15]], - ... [[.1, .1], [.2, .3], [.7, .6]], - ... [[.002, .4], [.95, .45], [.048, .15]] - ... ]) - >>> binned_avg_precision = MultioutputWrapper(BinnedAveragePrecision(3, thresholds=5), 2) - >>> binned_avg_precision(preds, target) - [[tensor(-0.), tensor(1.0000), tensor(1.0000)], [tensor(0.3333), tensor(-0.), tensor(0.6667)]] - """ - - is_differentiable = False - - def __init__( - self, - base_metric: Metric, - num_outputs: int, - output_dim: int = -1, - remove_nans: bool = True, - squeeze_outputs: bool = True, - compute_on_step: bool = True, - dist_sync_on_step: bool = False, - process_group: Optional[Any] = None, - dist_sync_fn: Callable = None, - ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - self.metrics = nn.ModuleList([deepcopy(base_metric) for _ in range(num_outputs)]) - self.output_dim = output_dim - self.remove_nans = remove_nans - self.squeeze_outputs = squeeze_outputs - - def _get_args_kwargs_by_output( - self, *args: B.Tensor, **kwargs: B.Tensor - ) -> List[Tuple[B.Tensor, B.Tensor]]: - """Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.""" - args_kwargs_by_output = [] - for i in range(len(self.metrics)): - selected_args = apply_to_collection( - args, B.Tensor, B.index_select, dim=self.output_dim, index=B.tensor(i, device=self.device) - ) - selected_kwargs = apply_to_collection( - kwargs, B.Tensor, B.index_select, dim=self.output_dim, index=B.tensor(i, device=self.device) - ) - if self.remove_nans: - args_kwargs = selected_args + tuple(selected_kwargs.values()) - nan_idxs = _get_nan_indices(*args_kwargs) - selected_args = [arg[~nan_idxs] for arg in selected_args] - selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()} - - if self.squeeze_outputs: - selected_args = [arg.squeeze(self.output_dim) for arg in selected_args] - args_kwargs_by_output.append((selected_args, selected_kwargs)) - return args_kwargs_by_output - - def update(self, *args: Any, **kwargs: Any) -> None: - """Update each underlying metric with the corresponding output.""" - reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) - for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): - metric.update(*selected_args, **selected_kwargs) - - def compute(self) -> List[B.Tensor]: - """Compute metrics.""" - return [m.compute() for m in self.metrics] - - def forward(self, *args: Any, **kwargs: Any) -> Any: - """Call underlying forward methods and aggregate the results if they're non-null. - - We override this method to ensure that state variables get copied over on the underlying metrics. - """ - results = [] - reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) - for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): - results.append(metric(*selected_args, **selected_kwargs)) - if results[0] is None: - return None - return results - - def reset(self) -> None: - """Reset all underlying metrics.""" - for metric in self.metrics: - metric.reset() diff --git a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py b/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py deleted file mode 100644 index b2b939d91..000000000 --- a/EE/paddlemetric/src/build/lib/paddlemetrics/wrappers/tracker.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from copy import deepcopy -from typing import Any, Tuple, Union - -import paddleext.torchapi as B -from paddleext.torchapi import Tensor, nn - -from paddlemetrics.metric import Metric - - -class MetricTracker(nn.ModuleList): - """A wrapper class that can help keeping track of a metric over time and implement useful methods. The wrapper - implements the standard `update`, `compute`, `reset` methods that just calls corresponding method of the - currently tracked metric. However, the following additional methods are provided: - - -``MetricTracker.n_steps``: number of metrics being tracked - - -``MetricTracker.increment()``: initialize a new metric for being tracked - - -``MetricTracker.compute_all()``: get the metric value for all steps - - -``MetricTracker.best_metric()``: returns the best value - - Args: - metric: instance of a torchmetric modular to keep track of at each timestep. - maximize: bool indicating if higher metric values are better (`True`) or lower - is better (`False`) - - Example: - - >>> from paddlemetrics import Accuracy, MetricTracker - >>> _ = B.manual_seed(42) - >>> tracker = MetricTracker(Accuracy(num_classes=10)) - >>> for epoch in range(5): - ... tracker.increment() - ... for batch_idx in range(5): - ... preds, target = B.randint(10, (100,)), B.randint(10, (100,)) - ... tracker.update(preds, target) - ... print(f"current acc={tracker.compute()}") # doctest: +NORMALIZE_WHITESPACE - current acc=0.1120000034570694 - current acc=0.08799999952316284 - current acc=0.12600000202655792 - current acc=0.07999999821186066 - current acc=0.10199999809265137 - >>> best_acc, which_epoch = tracker.best_metric(return_step=True) - >>> tracker.compute_all() - tensor([0.1120, 0.0880, 0.1260, 0.0800, 0.1020]) - """ - - def __init__(self, metric: Metric, maximize: bool = True) -> None: - super().__init__() - if not isinstance(metric, Metric): - raise TypeError("metric arg need to be an instance of a paddlemetrics metric" f" but got {metric}") - self._base_metric = metric - self.maximize = maximize - - self._increment_called = False - - @property - def n_steps(self) -> int: - """Returns the number of times the tracker has been incremented.""" - return len(self) - 1 # subtract the base metric - - def increment(self) -> None: - """Creates a new instace of the input metric that will be updated next.""" - self._increment_called = True - self.append(deepcopy(self._base_metric)) - - def forward(self, *args, **kwargs) -> None: # type: ignore - """Calls forward of the current metric being tracked.""" - self._check_for_increment("forward") - return self[-1](*args, **kwargs) - - def update(self, *args, **kwargs) -> None: # type: ignore - """Updates the current metric being tracked.""" - self._check_for_increment("update") - self[-1].update(*args, **kwargs) - - def compute(self) -> Any: - """Call compute of the current metric being tracked.""" - self._check_for_increment("compute") - return self[-1].compute() - - def compute_all(self) -> Tensor: - """Compute the metric value for all tracked metrics.""" - self._check_for_increment("compute_all") - return B.stack([metric.compute() for i, metric in enumerate(self) if i != 0], dim=0) - - def reset(self) -> None: - """Resets the current metric being tracked.""" - self[-1].reset() - - def reset_all(self) -> None: - """Resets all metrics being tracked.""" - for metric in self: - metric.reset() - - def best_metric(self, return_step: bool = False) -> Union[float, Tuple[int, float]]: - """Returns the highest metric out of all tracked. - - Args: - return_step: If `True` will also return the step with the highest metric value. - - Returns: - The best metric value, and optionally the timestep. - """ - fn = B.max if self.maximize else B.min - idx, max = fn(self.compute_all(), 0) - if return_step: - return idx.item(), max.item() - return max.item() - - def _check_for_increment(self, method: str) -> None: - if not self._increment_called: - raise ValueError(f"`{method}` cannot be called before `.increment()` has been called") diff --git a/EE/paddlemetric/src/dist/paddlemetrics-1.0.0b0-py3-none-any.whl b/EE/paddlemetric/src/dist/paddlemetrics-1.0.0b0-py3-none-any.whl deleted file mode 100644 index e737dee2b0276c2b2e7cf108ce78c9f605845969..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 306551 zcmZs@Q;=p|z@+<@ZQHhO+qP|+U3OKMZQHhO+jf`he1A;D%!!z}UDp}8pOq_fE6RX? zq5=Q_NB}xCsp^K!l&uLc0I&oK0AT-j>S$zYYHMa^=IU%^;=*8HU}Wsz=4xO-@95>G zDiggW2;2Lq0h!0nYEu^Usz~X2VO8*l57k>UH9rQ;XY*lvnq(m5=byAY@?FR&A|{`J zU*7S~tt655YZsnxn6rvsH9H*MlyTcoHaC3Jx6#+Yfa{*^7MX(|ObS5ng*L{!*&hCN zeJRaIHuHcU3r#!LeKjGT6=E08lDl(99mfp|e>DFf`AN~gr~6>E+OTWG-u~q78~cZL z{YM^=`_`)eRkuaPDkAT)v!!C{bJEeEFU;UyN-&#JG7kI0NWwnT2Fbub4JNMkG&pcu zY>6{vH9}iu32bex+KDx$gIy)Q*qPoHYL}o!TK(hT*g!|GZNBZ0MC(XwPDj8XXKoJR zV!}}Y1;f`TxN}OwfcLt~?eK7<#RM}mBU zzw&!p(&$sD<8g9aO4ZD2;XvshV}mS%W=;r)=Dzs)WB!931V!-D@?Jwk)o@%ko4_9= zR4e(a$~P)ZXUIjF(-iz*R!8*GHiBI}H19YGHBKtWyz05^qT`Bexgtn!cvmT%IpPbz zD#m%rlM?a?phSMiS&)CSmM3Nbzgnoo2rg(-P)PwbG{c$=DMnuM#Z`OJ^H*lOk?>-A z50w{gndX2ACTY3glpBj(w@k<>q7~QtKC*BYtx1tca(zLR-`d`Y7RhM%)4e>st<^S%= zQS%c(5NkcxpHnwO-b4~nhTVhbksU{|=^J)WGNCl$kmZ)RFUTCLfPDpwH;A>ETJzhz zE$jGccw2jlppR?MuJZ+{J}ocWJ^Pw+Mhc?qm`^p#^Z`9Mlb74|J>Pg^#hikY6Dg8G z5!{$vEGON~kjIZul876R8&}M{Hl;_r+Twm$Juwm?=uDzQB5p=LVz6Ghty0PX)qP%9 zf%2x~dN8&?oO0yCwi>V%8?a-RlvJ1xk?9KD-h$5kmUXvXfjk(CoaW)o+RrN`EuZAU z#sf0e3CBs=Ecg$(r_2lfGw%WxQ&@O$neH8H(;I4TBuQU;ba9Z3%J0C975^-TCZ$KX zm$(dN;pt0JJ$e=N$x&%xJ4EqABFRiM1V$S&iNg9xLNw*PDF; z&lINuCN8P9-^mz~wBY0dRL#9@xgu07D3a13C+gCI={=`brm$XTX{wo1hqw{m%}gLh z=87%J8pHR<@X{6ZChj-*e!zb68#zD>?V>FG}MPu+O;zCSmXcnti znu;{y-fyg-i~kB(b{{G&9ayt<5gbH6$~ah<1D2?O^q`SI3^7_ODiET6eYelg!P&?5 zbO&-S(iExpoaw!WfaNE6eOY~=3trA6uO?zQ_Cbv3Ycg%vre$PF8`Vg2G`}(uG7x%a zE(AMr82aNh<8;ao#OI$+!;5Mhqx)Ad%l_0C!)#!B;qbSaq0R1qM|WnXarrI?g$X1n zPE0)x3`qw4cC`)M^`lslAOHu)6}}G(doy-+<=`&yr(a!}SyWpMYMHWcGP(Mp#CpII zYS*2{SswSg(~{dL^H)Qu3d>$cY(%jQIeLb*Z@P8YCK2_eHtZ7J=C&kB4r`Ni*&3im z)N1IcTM|{O?-i}vKR}U3L+2?cj zdjUJz050Z^h~!g;WK-=|sApCw;XH#Dl z-S73_Cn}5|J9t?#jT}z9k7cWgT*q#~+JR12z(RJgiXwC&Yb{pj9?r(}Tb{k2;aSEm znht0rK;jqV|JVmG=dSAgrdky>Gysq-1OVXvU-n^SVc~3MVdQG%VE^B@?MBzgVT%*_ z`_^DUMWEWRta1yT6W93+SSoV_tSyT+(=}QQaXm(Kt9vp@J;k^yx@*thK~kFBj_c;O z#Ihk_L5$RiGaGA`mw)n<->A&1f$GFpsp0C6p@4nu6SIE(-Gvl+6IT_efp`c-2!$tl zv}|PPiWaf7y3kkqKGV;7YQi_EG}%*3PeiYo2r{It3gZV4tLZX8qr4bZ^WmQ1kk{ny9bZ!-UfW21gE;6U@5ifoCmMHK*t6 z4eOSuc6Rp3srRO3$V!X7yu4gJ-5kC@1D#V_v}n=|%3sP;$hEt6m<)yr(|3kulNz)P zSb1r{c53pkwbEKd@TLC&It-t_^i#YyVU*}g3MW(<8G=$(>CHxH zzD?&UECU}N8;v6XFB&0Q84Ox2RFWBXJhSXXJSSGt1F#EfYD8VMl4J@Z!}u5>;z|R_ zq-e%^VrXWh45UX*WYUPv%Xb#6H z9ofE!X8e(f`XNHu&Y&NmG)}w{{L@^tSlBs5ML;F`h@i{UF~_nLaf!vqR^T|68%NHy zvJ4;7jU?hw!Sfkho9EJ^?TN{5)t_^hvIfmL$V^9W+H$vE+R;U)=r1x4h96WVwl}1K za1n#eQR)h#O;F%tMenB(C*Iz;Tjx(Ha%FoFjnh*OrAs8_7Lti@S%qiIu%X#hO3PZ= zYgi|5(xp^cQ$8u=J|Pm$RI}xuz3P*Bdj-p-cqP<`Kp>_q>l*ZDTKdp6_*(6CIzUm4 z)H{UJGOEIjML|tknNO4J=Cu{+(ae2mQYMIvCGx|)H|W!Zbp){5JD{F`U@3Vcj6Vby zcaO-mNRs`PnBp{VN*LoZOt^Aon|sQtZjpuE<19j@7ed94&y-3F#{pw1%ht&_64#xs zgQEGYN?EkG>?hE4(adwl5CTq^@l(1NjKlr-&}Ji``RJ*$G4H08Wmod~VwzJ~+w4?P zv)EaT{>!foofJ>ZbsjU!V1+zdOr;}`ni?=rJgPPwRD7Y~!czO^*49OZAY9FUV$_Gy zRxjglw9vMH)2x^;A#o;GRU{QX#*Y4-!`20sJ9=BG7JzXICgdD-Fv0w~M=x^R?+BaP zO0UX^ynejIlONoIt~Be%T_gTLq8sfxMs{pqzoCffU=%n!zr#MA8Uq?(fr`ko29s2p z=)b9A(&a1er~gvXOQOtYTv{5=YZ?Yk0JHKXS|^Qf$flrb^$fFRJ2CG%?XTFnM>NGYGRf1*#nmD}kEOg7Il7>$ zhH=#lzBe>$>f%)(l2pUsS9Fifu3fzsL~4dPgG*h}!O5ntK@B4Z3&?;m&FhXS6SY--XYRHUu)K{bY8- zwdw&C=rc(B6=v#TLj|~XC1FvcSG%3JZe3%(lc_6Haf6*p=#<$5g!{jkO|28xtQHk} zcou4fC^>a9``oeK^YTRmK;A)js@CgrEsv+-!1e9Kcotk8{Dkf+BE{>C@}psbT1VJl zqv%xwCPgwflYYa3QENw*&4WpPN>@5Zr6Jjo=H&2a4 zq~=fdT7;v{d1Z(PML1TzuC+MyuaBp(5BMY7GcjGH=z+S>kCG*MejmZa-tcXGt10?* zd7jE<$nlg9oZDX?@ofJ*staL2teOuk%-$zlNCu)m#l135AGc4vgbirP?C_e0ZZgYr zvAyl5cRK$Iw{68o()0Cgj_7HcDl_vzN_wtAKH({{XyyRS41ApSp_c2^DHTK5wVXFP z+<;7`oFn!vB%L>N%(}-%w zMzgw-hFHJYx;^pVw`mH%`RMAraj*3fpN~F==_Q?K>sB)tMA4*|X+5^-s7_nGuZ7d^ z8@v3c`Hf$-XTmp%FJr^6^JJFDeKlI8QMdk6Rr9)|0pgb19Xq)$G^i9VXlirxE)q+T zNJh;;NY3COk<&O+SZ~*yqc2VpaQ8;_*`u&tfHnIYzLgKA=>#0m^xT{S7y<*%my*x=eFo57kfaDQ^yNtP?)bTq_R(3Ct9KmZZ4JPvcEJ}l*t)T*;(h` z0_?04?_m~7&-m<&`y$yVY{S`Ksw*tyei>|%Ht1~zll4|z)ffoE*($s@TuO3jUUVy7 z`p%@UY4)Y*`rTYg=zjc&YM<9~X&Uwh|JKifHm5Hr?4y#CKYPvN|GU`d;lUkisw-oQ zm$t!`e@tueD+E+S`CM_+vr=QO3SPnQ^HpNAlXqHI&&zb!raHGo@TkJcryvd143(s^@cw-mPoQ7pVh7koc~R zA+R|!&PBnP+b+Z?y&qIUu<4aHft=6KKIob{l$-!&w-+wuj@lEhvCD{^u0; zXI%j3K=XEVV3gCfW|OaRxfhfzw>2*A`cqM1Jr3kz)ZuA11(g4n3jB4t#rR&}|1hQ; zAC2nj6Df)y3;+;I@&95>69-#cGn4;8Oqc%{^F-V3e;D&8*FdP9z__JyapvF^K%lI< zY2GkkmrN|xrfCY2kw_UHL26A!UCS^Y(1+D4nPN&I@8hAlAG5QH9zK*|5%yHtOL!;N zlH}`DG(G}HlBbR#OwMtP;j8@UEQDGl*8F6A2F5F+snwLiYBou#BvA6=OexGf^a~AS5vtAlWKS>l-ZPqG-vGMp) zoWLX_5c8O0N?4Z$dKvYc=I!shYbZ4xqrZJhrMZvK)CqH;_PXTwh`v4KRgXd39zjkv zK@WJ)OX{52>}96ptHcmlxpxn7Gq4x3H=OdRt5hhdD-l*TRnQ|nnMRI0c3MHHG8*V~)qUqhtgkBLEuiP_H`j2?P<>;JEVeb#_B8AcGNj1x?0t z6lK0jm(_J(TzH$QVn&}y>mEB+E5wt^al&f=P3>bsvcl4WBDoM?bIc_JizG={Ok9R+ zsr3;=F}V;pa&0`R`KNCtN?XmdRgZ^OgJ{5x=sjbV!!jrVmxg?%Z3#NQzzg!@dx6n0 zJwcZhjoy#m$joBwvBEO*G{#RYQFyNujiQ$vH(ClQBn@zvLt17?J#^p~z12)x6i;mD999X@g!goRlCZWRs+*H2LcYe-l*4%fqST4yy7$Ui5f491wc-&-%1QV&`m5@3^RD9!t*NN?@J z=}$X`#RX>Vv&EbsMr(8?FB6uRwola}jG<}?$6Qn66^wPDMT4`AnouO(@qj-R^3#i0 zD-dBl2x`-YRU@NV;z20{-)39lr^8xl-QNc2gDlE4y+_5YEU|po#DgV=mx_@Al!7BY z4Bs~wS6P-5{*5j*ECJ@VZb#$#8^LE?w6mr{VT*8mb&2_$a=fUFpnB!{+9LYkCl@Y! zI8=tH@N#2qW{6107N9Bd+TIPd9u$+eD>0{#|5SZp1?(-I7mUkbADCfnu_Q_b3M+?5p++r|iH^Sy{Tsbv-mX_=ddW z_jMVI)g70pOttlNEJwl$E=xM`y^F$Q+WLh!1AoP)gX8YD+8p%oFDZob`b`_4->4;Q z{c8#yUGxO7+^MKZ!=)A_GX?Q~NefoXUxrx1JnF_&gakqH5XQSCg}8{0pC9EUUqu#w z-b3E~(Vky{j;(gKTI-~?_&!hj2G}Y74RC(yb0?Mwt=~o#M)1B`{mn|1L%MRDxeVzG z1+5JZ?YuN>%a26unlrQ#^p4u|?kUAioiE%>+`i*Jakm%-ekK1f`mJ*aPPlHBev5YS z|9Ws}w;aB(3sJbQpZMMc<+vY^Ncb1{%K+%a%MDQHT&hLg2A=rb2@c_Ia4q94;SDHt z!ESFobLjUwFN(ex7}Zf6EGZxjcV)-hcE$@Wy_z}LG9gqa&<*qh$n2RTT+Kd*e(hyn zIhNNa*9IwTG3J9Y?j|*amwV8XZV%5{Fxly=U@fO>pT!+vs>FWtoL)BpD z>AA`3$2}cRC%K8%+B)xGGIs&{2a&TpbYotwukRk4#xK=5H)g6Q-0>aq>6uI64ENUnlsb41-CrSTto{A=|_YoIrL zMZ8yj^|voi)UMz@GnpzEvze@!E+RkG%&|qI*04>?n>@aG--QpR+4Fjk~mNdHOs`9 zWMtV08x%WT$8uE>%Z{m5Q%ONjRD6q7O&V=$lGTqx-vV*y`ga7s+?-eZUupyOCW_gq6^|?09p=U#bx$iUfJ3nfNo(@-(sbZZ61n&*3_s}>- zFHZkB081z1@k=L=O0_+);4Wj>wx z=($s$k$ncVej16-noi9X*+6UvODQR~8*r9?c0@XTmP0`1L&*ncZ96ANY_a?Ah3B9c zRfSv!*3i6@$Vm+xWl5~n4t!Ff<(X8z_`~dYoUHov`N)R7c-HVDfNpgU0Alg5G-)HW z`dM)!K_ksIgTB}k(34XcSO_7$8a-;fqCQjDA+X?Kq3s}H)GZOnscX}@n4;v`veEeh zIwu*)R<42p;{td!Hjxx>$TIO0uHbb8)Y8ffbbC0j2+qK zUiRU^7F}@uyq2{OPr=<|_0o(ydqmjg#FlNal5Uk%4b+Q6mI0xVis@E9Q7G5I!e%x5 zjWE7s?2pEL`(yQG6f5$*kSxorkfXiq*EtM{11k}*&EnG%nGO-eTV28yOmZi*Pk`1y zZWALG`d*%Pi3xmmxq>l~^9Bt}8TdRyTn_Cv%IEKIEF+n*#MZ!FOrakYOY>wCzHKWZ zkn+=^q{*ycfj{e*xHt0f=;sA5rsQ!4IJLtM+AG;G!1X#b&Z^{vRB~PWea2nrjC>R*yt5Ay%t-FwdTrQ5t-~MhB$^p6=e1+2mH1CPw;oSJIU>+}&xn zIr54vcyBbj=`*ct9(Efi;xx5=7dXPjY3}=nfF8K{U=NWFu}!HOkPM(+y0cC1Ckkju z%Y_}BADMrPdk&NN?A$EbOi`NB8hsvhGY{3ZXn1WHyYB|}(=<}H@cG;+-B;o5Mh&CP-4z3gKSpa#}MI+oPZ`y@p-^_nLW?J6aeK(PTOD^ z`u)q%_nq42$u0w=pv1dM49^V#a+hYm@>Zf!FRXyp?p+S+{b*#AkjO2!T%3= zdvNcnT4i!5E#LtF{5}8x#{ZS>{;&7T{|R>aY+Sa)llER{O3M+5ni#X!vv#6en{g)G zkyp6gXVnik7b^)9FqI<&0|!J<*G_)@clhqG0*mb~tp8nVR-}WPclvmHe{ivIo6c3P zl+I$T(ed=1UZ2f)sm;}H$oMLso&>k>>WxB#LxkVkj!uPHRMW8NH=$?dRWD+Fk?e_H zYv^-$sWnnOXxPTUi9fDbK4JvcS8%t=a#UAqud?9^9>ZmwfmwXnk803O$*@)44ap+kzvyK2?L0L3(C_$LC;sCl9lVas}8=Kll+`S)PukAn?cXM)n-#*e@ zUT=?QvXV8jLS?*1P4K&)5 za>KbLVnHqh4QKDwvFixks2~xB=Wx89J;H^H#K0Rt3=r1&RjgCc@a;$F8mgU>PUB_3 zN^>$BH@NMQ{Q;GYwfhk6E`}00bD8ZnPr#;e9F+*t=j2vZ!;DnMZrZz~zlUyr0~J$s z4u-WhOQAKH85Ubl07O%1!2Ps#H3n19vp*v$T^G!40c<4PF0VSf7&+Ic&=A$UD=XJKwRR`w)H>H`aTKbI&zH3g0Z20=sL2xf9 z)l#t^hTK?+KsWW``tg85rI{Iw}D5a^^p z{UIC0+*e(WEN6E;4YMBxWlB&0>QGVX35^sTRcr%2E znp+6EWOLAhV?7I~XV`Q&2T%{8C^|wQ(M5GFc6Pos_(vAv{UQ$fIvi*OcTRp^r8d1e zo`Ap$3OC3WaqJK(mIpQr+!8=A1>Unuw*_bZR*pvEkKQUSoNH+Thzz!(8ejl;US{mc zm;a3IQAbKo4c)K+;tW0qNTSu)otP6s@@5qqjPVd%T?CyefjkvtN9|p#DiuSzHJi>K~l6D(gVzXrhITS(1uH0^kC00yDs(HrW6KqWWQp|*t{EV|=5_Bsa zf6!9t7Ggazk1DSS6v}#{rTdh=3N%{P2Z~2)S?w%YEO#o5d%`>>^WYIh`ML_T z$s3j;!pLMJx99?O333T+mmoq-&omideMTk?*A=<69Bk7D6N-`<^ISPFm}9U|Y7RoG z0+)!)fAkuU&a)UY?CQ)~m3R#?!yJrvRqeJ8d2Zg0vbSifXu4 zFqaY%poGv;GHR)g!t5i(yqXz%GL885GsZUrq?U zDdC!#cr%tKu9^w|#+^ItZJG@7eTyg#(HLrIpI>mGqPG^!srduh1vAB=9hX9=y~}OTaLpV|m>7 zVD0kuIFT%%VA>3tx5Vj%P9fGJ*-syVp9?dZk{rhurZ1(8(i0XOPrE!#q>#SUE>54kQ9EGtT8{Zq3 zH);?|4nYr2v;u(jrTpN=pHbf#H)d^tDF*fio!h4WFD!$&i`?2qqOiRF!Y4M@=XRZz zad6!~Jc6M*-kmz_yo`cHZ`c{OA%T@x3{;mCf*~m~^+mt3FtMiA_P$0#zzf1~TyyJ8 z>d(2QbVpPb!YBy$*5U?Lc;|QD!b4Qcn_lHTt!b7;oHtx8Y>PZjU__IaS)W>dI}~<- z0G4bm;VSfe0M^yV!qks&RN_=fD_7nPxc{hCycJ5BT)nMnHkrp=_NMsSM#S<{v+ZeT zMAel9h9&#Z5&cpu-pGuk?Tsi{NPayB{EZ{FXqd`EIc_ac`ou&@Lc`-9_MwHKja?;adahGL3_Vyxj3(`=D-;1rih$!-}zT2Ewd zKsGDJHk8R2SB6>$Y*-^zM#ZXSgh&Eyu=&Eb6B`f z!z97mNtJuw*DZ}MSqe*6aT^>Hs?jqqtcZ{U26RjJDYVZWxpYkq`Ai*GTK0o43XF`e zx%Cbnang2}6N@Ei?9kNW?%hs0JQk^KajXPDoMgXtf#G8FJ=Y{)Bx11M7g z_FAB*)klxwZwJt2)k%H4#MQHblzX1A&8%b!w8kI`@sCo9n7&=@>M!8DbOnMf@lLQW!HrTNG~2@%h!3Fb$1S{10d|4qyW_NnRE-004fOFone} zCf+PL2CvXIQVisjf7wG55t1vM3V75QKqGszK z4_6e~;g2-v@7{!^1%L*<+kYtbzy5)+Ya47L0sDKS zzWASM+$Pl~_W3q@zR>1HGdNQBMyz8nj=doue*B5K=f%22@oNy~jXKuB?R`TpN}a*b zTVXSy7+Ey#R1&v2&y?^6d&h!8V=ypmk^!cpZqz=82r=Cm;l++gfb$Ih!z}W$WA&x8 zJ4*apQN;OotTlL+LWOFDrBiRT8R39beW>m z>w|5dXtY57eIVD3!Tm0|=STRGQ}L$AMNih07=~C#W)T>EE;zVz*!kXvBmhFsu(0`ud`d_`Oc6BTH8F4LA$ zHdwv?HWz&uwLj+U`eTvAm|v$oAoBKVJ^_remWrT#`But(4MQ6@k9Ehb?WbHMOK1Ij`2rKVPdUVea^G*rwI9BM~nHpVWKUEqZvA^$|hj$_`Xsf9ewSrHpZl$G-7xg_X zU`ob^!dr^)m6|NbhoV42jZ`l+U7QtRlfc`~Ux=}#8{cPXys_B>UgK$Q7$31!*2HF( zY(W}9eG%tPDBKp-WyD5b{jwZ`q$y62n=>GHZoCHc(s}j>Hg~DxacE;k!1LOjfq#mxzqZL0jA$JIH3T$Ior8p@e>|dVUrKUL#N#R==J0ulzchnCh=DvQAS!Yf0K2Tr<8A=R>NGwjr(SWZKt6%GTz|TY{o)P)pl-C23b$ z)Xo2v_DMud#J2cO@Z!%d*ghZ>7GMdq_J()l8(xp;r~>=bHVN6x(nwI94dt#M(c#xx zB8A?*&Z^~H{=Dt$-FSU3pYKpfAB;I6X7_h9G#KX*z((`GWh6hW{-J7!Jno!iY@u=~ z7}^^0A&RqHijqYi{|H_0Dv=h4{xeVLiP{bz03*vO5?Bxw30xYTnoCj_^tbh-(Zd}C zf|qZ%gA+J!?v}Z@mR`>*8XuWiW0!mhaZt9f0+)?Nzu>?mX5B*S?TV7DtOF? z;!K|1Z$sFQ$`FwV5Jw5G%w+p&YM%jAZ2;% zCDQF+nA$c-MpOBxuqX3JTzKBj`~7QE$p*YUb=~EL-p;qowMWZGU)32bRL27LW+i83wKOu~J#2!M2t6 zv?M`hvnPp>-w@Eo8nk0nC$Tt858or*SHqCKncfo*Q!V^Yg37iHG7X% zgyE(Igpv2~=-Kc-3xvY9^JoFDOff4uHeU(zw3c&9mKuK-mF=14;=|8r#Ex<_4&+O$ zpb{QrH$IgDB`OE;AI-+-L_vzvTYGpJ# zC7+|V?`da@9`uHv%cX9MFm2tFc4YfOky7>YoIu{b?j2&$#<|JckLYe|@3aoy9M^LMMMA>_tpZCZb>9*~GJP z1l5`QC`1Y1&wMcYBhBKjtob(D8za7W9cjz@)|v6+TdhQ%^bz_2I@ zCk!+RTHPeyJ#W4^5WC@sd7Gb^A;@!#HGnkaA!3c*HoHpQ)8V=^zE0z6C9=aIn0Hw` ze%5q7Yf0{C$bCZA73HI*-G4uIlug#a>AT8eI*8hwq(VR+;p&jObwe4}z<_K+#|EHF zT*~8yoy=I$hNxM^#i5b8eIvG(5zd^w_fagy`Sw!dcBy;@EziWbE>Rk zea<>Rrm+rLnh^KjP(v`14obU4rD@MwSEk`?&ES>v5}=ke2Z8c$l8d8fOa@Qkv<23z z*biNZhQq#tCeVsiwAJqpj>Bd{tO8T#V1X=B4V`&5dJyHJNltG^+ER18Bzdrq5f4!B z;9t57AsLf3V$#c8^K3gL4fE?2FILk^TNpSlr0MBC9dW21$)?PadT%w-8G@kvPv%I! zVxhm*#l)?$P0SSx>M2c2#}04ed}+Nj(2c6B#h@~!6yPmBuRC2TQ&f-TbV=FwIaN^J zpAfZV*Y0oDZF4d5=aCO&%MXVj6blBfFXp`$#oC zIhfv<>Yre_!d5BT37iRpCg+hP1NE1%W-KpT%-P6mxM1)&TU`Hr|J~R{6l^>>igb;X zKhZ&c)b+$8P>!FWTe;-M3}+V!6`?z@;GUr$mDH=x9v`olF#G?jPn;f7-=VFs zI;?pOA=whhYx2M2ttL|1Ks;I2Dizj?mnJV(=MZNMM+(3YNSnAe}z9New)H51f<#z#{%DAv>VxU6btk_c9jucGdeh9 zA~-OBbG-XUQo6UFN@lIJpn#OxIXxO*D0p$kO`p<`V14H>C*u>w*b%z9=Z} zQ~MKwD*~g|3BWz9>L)A^AWjIDNCRdMcgM<%c>j}Wc&wxD7E$MZLgn}Q`*Hd zp@ZIrM)v&gKBbJPd*$EYL2u2oIKFK0Q*!5Qmh8(3r_d-^*8pjGR%2Frj|#>~CQQ=E zAGTPo#=pg@E0+9{uJB`jX-f~){nOyp)bh^DKo zRvZ(IM{@bs8CBr&)XZ8595-4Kygf6~ma;GHj;7(r@H^U1sFWKrsRwwvPI88@b$*y( zt-7Fp!y@o&a{jeGx<^%jOz;2~|I}+S-#Df{couWV67x}+R}RYJ11Z5Z>@)P;=39MG zvC6^8E%{kxVrQ26?5T73V*j8xRPu!GU7Eu`jfplOBVLV3UziE&vr<^aUJy}6cy2Vi zNrkz{+guUN8~%2%am|{FPb;1Gpx|$JocF#^2d#=X^FgNH3iZ3v04?c(!V(ZSna8o~ zvaKY!o6MWpr`%Eq27JPb@0NnqV5XTBc*CA_wVj>SqXNnE1^Q)n=B4D9i9}g01RzA5 z^r}@c!O>I$IO|A&<}&+g#xD6ax>xjy{*l+h_bcqZy>uKk))V)deC-|Yf^Q(~z%##B zw(gvjw%8v_R;m^1_L#5}#QthAd)antXFMG~W9k#qrhz0QEX;RZ9E0>`el=yU>&ymW zH$>XzpO5_*s-Q`h13Uiz`g5fSAQJF2^J+WFgYuNc^gtW&s$ygYt3p^E` zP#I>j=s)3N(4-sF77(gMfQ6o@Ap?YAY}6W(>M~U0)X?Wc>uu?w4et|z-DM?HE0r;V zr%>0W+y8hhVGb~^kBx?Aa>Xs;p0MSfemckS5HNnD&t5!n>er|v{TmRUJa$ro+4SUW zzl|sVW`SL=>P`dGoPzyQg9@pbU5&Acp*UfQY22qNg+1y3@;yTx>0%xvlGig|*{*2^ zYEFW>ib+?y1yswqra$!p21m$XyiwTGL9g8$nA2SsM#Bf5Z{EIB{LXiFUc{G#0tQDt zuDy9Xv1B2RkaBnHch5;a-55JBm<7w{;dm`Y9!wH~4MSrXvjDiDj2do&>LS#q#3S9N z`VY98wW)@_8*J&yn$)Eu_sq@85z3smU&#_uVsy`CF(#Atw}_D)N@OlJa^P8IBGcU~ z()M?web04e6FZ)~W*FsYR=4O%Skqd{l(}Sb8PW-}?vg}8 ze_X=W-T7*em0vBMk*;MVZU?~hh;i_eT?b~=Yw<#_uNNh}VqgWS%pn+0&;?HewyU0Hj5)^Onc?knx-}`P`Aod^OZab!d*)|dx2jC$P)ES+y8Nh4A<*tGoy)F7lFy41 z{++#HKo#$ZSQwCfbwhd16@9B%+A@Ju(5zZ6b3e!Bqyhn_1j&No4(~Uej%o=vpv;d$ z4h}IYlQm4yaH{f9IC!>cSYO&>J1APW@iPhq9>5kNa{qzFLDVxV4sa8-&HbBTYN#R` z=Y5;L3!@f1nras}<$eE=Y{8%(?9K-r**z9wIklR{ku9nm6_E$xbAmEp73GFGgvK&^sXRA0eU7|0)D zcQQoo3evgX&I5;f?j^;ct#y#1<>P2&^N^tic8h_Fu>vGx4H>r zT+qi%RAd@&XE!)|r`s?Ix5HIjU$uU(Njc0IP^cOStLixLM)JuSr+2yIu}`med6c|P zw{;`3-dnv39tHQ`?(bXnvU#Y4zbb^k0;6@}*FB%4cenIm8hes3Y}2~Cn*_uqo#C;) zO0@rC6YSpqn1Hx^eot|n2hq=|qVB9)uQCmk(crEMLt`g~Tpy~qe;i_AKsYlPd_lzP z-PI7$-ZYwmcjWU~25ao)U6f|FL+Go;ctpF5G*FAM^>ap6i=r*f*tTukPHv1F+s=(`+qP}n zwr$%sI`6&is;;W8?*0pB>@((?bI*Mbg)aid(iJ1AhlXqdjqi|MwdAC~4Z$=LGfe4otd-10E+457wwZ#pV_ zop=HN?=`RgSg^Pdc`FeF2LNC}0sz4N-<$wjV+Xrmea}+0irHX8`qg(vTfV_z^QH`( z>Z`Y1faziLC@Ghgf8>I7YNL%JYa+(-gk`Q<``;f^v5U|vvK|}*zO?qQw|kvDI9!SK6QHBICaCbz3 z3u4_?gv4SG%%ORRyZqg?_h0B%r4JBT9f1R#NZq4*`YaWV`ck|M8D)RoEX zRtDo{x7qk!8<4`7N~zI=+&VXF(GU;5cB*|je%OKLn7+I;WGoqU2X1(@c6Yh8sL@*6 z+@A4HT6+ii&|7c+>77$%xf6@`-yCJWUyFb66`;c<)V@V4a^pC_?biPpu;+4+#fvi}zp{2}e8FR=`Y68iA-*mydlQYOOM3qz0RA zHc(fw{WdZXrW~P~s|-LDl0?#`Crkdzle;7H`37*ts;gRmdo;`>G`eS zlM8Lr%cz@aj)t}x>T*`Dh*))r!maDo8m4v_Jl)B}sj88+$a9ck<55>iYz zUnManN@}NRnVo$TC=OaTM%qS)Z>l@m+S+rF-nlz-ay>B{Lmp{&2vTo^evS4U!!O^NrkT!z#c8WihMdRibOApIz3eutEEDpExy?n$vqfav{bkHcu{JE&SV z9Mjg4Go%cUm0g31;8vfMbU3axDs_HSW95faoXg4H^tgC@XCU*_4eq2fXsM2niXsm(20q*Yxx+h7vKA5rUzOTuOHS@kBByP`4L-pURkFk)J;wx}gwm{8DqpmRd7rF>8>O(ld>$}_!?t(Be9m|CtW*B$#f?qQUhA;!Ga-DB03*-5&5_LiA><&pVSHYUVS20T|%N`94iYk2CY@5@|3_ic`MV6XOJmrxroBruc zzU)zLo3bvev64S}Yqp<=QmMVlQJU*ue_za<)PhhD3n!Rkz9sf&s|8ud?^BWcT zMfghlk6YU@+C>0#t3><7mNnAiHCh)zt$A2|TapN_#L}Jj`j73;t@ju&38_@LHVOxE zD5<>(ho{ML+8uOz{E%$e)9|e4z?nCD3f1~7cSn4Lr%Z4I&1KhVSFgsP!;~S_Ln<{B zLJkT|ziJcm?2VQ=p+h^wjF}$j`;v;!tDM^ESUgj6ffg1OYY1c>B_bvHH_Baq>?mct zdK^@;SY~s-d6|%iCMj_@QbSJ45r@=6%B|o-d_}tlSuy<#(IS!fmNNOs)vP8AXu2WX+O zL9?`-T38U9WToT+V8t_>i#0R=U9zGNQ^8rKoDEsS8I5mL%);A@3;yw=-Tn91p)+4s z+T4~$dwWM$_H>C$i>rrJ!iL=A#p5#<0bHpqj1N`f6TU1IG;s{A3G*~J9LYR)Av9i? zh)AnyH{DwLf`<3rcu?0a74kZm;y@RMib5RZK%^#TtJ11e;^gQhR7GS%5fjUp3!ru# zOVci>BP|i~thT;PZQ}_m6xs;oi5wXV+npMrg;n}Mtr8&9thdV{Gu$f?E}1c&n*$-W zA>n{fss?_{h|;GE?PG?=#gn`IxbMc7Bj2smfc#9cT+dDqI8sp$$NpCkB}oZkL9F+4 zHSj5nurAL+TL7-x2Oj%IVkv>P?*JS!R6+Wdm|U0||Q;or)X2}C?r zp#C%cA?ZfJVDubdsDWF%woPd4;^j?Gsr4|qMGX(7tC5fPT0~2n8R`A?P#6Rd5+S_iDY(r)c%rL!(I8n4ovb=UX3(&|c5mZG$ZOiJ_1_fr-Y87tz74v8mB?hO1Lu=%S0I_h``DN_UzXLk5rEJY6lhB zzghZyJGzW1#eEI3YnfPy3!TSaOx2n-Pqp!I`Mra)qT<_L6IR6k9mI1fApKFN!1P<> z5Hbi27}(R^*KWRcvaI58c0H>4rwahQM`@*a`*;5nEM@%JpvvfNlXfJUyF@804!I$+ zYYRd%(_^NBX#tL});b^%*z)h#L!uZevoF!Cz8SSL7JQ6A(xJ8sO-!}gBj)&|F+5$m zo8qcE^T-d<#Wre&eFj}Nq1WbZX7X|Y>FMixw?nT=VVO+7!pF7>?i_EP;5GBD%}T?U zuUSxuVSPsRTqD%{La#@TL=>6uOx@JTys64vopz6#L9#S<-%5?{N@tf1n)E_ObT8r2 z3Rmq9?3fCFSyH3gsg~0t8cQ*|{_s7LaSbCRjE*)8H&V1~W}?v5VL%VKBjW5)td8Rv z;^YuMcRFg_7k_#4Q35~kc8p?75>YEGPYRo8pwbpqgwFQj>TXNukf_H^=A*5l4x>0utHw|aiCuNbf6 zbCAL@=xCqWiNEsugS9BwA+SiEX)&k!m!IJ2+nbxYbLj{NR@s3pVY5MZYz|x>%q9j_ z!d^EO+=XXLMrDGUM>OCXH)MXyy!rVMXZuTUK0og>LlqOVu&G$cCNx3V zv@L$`^FdDR;1zLn4Fri#R@ktZ5HFFZKxq%)usOtM@ws*r%XTYrFc;h_>N5AgO zkyeNqNXKAD4ir8EGoR49{UT$9-jHZ>_uxRl$UbAKfEy3X-IfcgejDQ>peoFzr^XbB zFqhjEN;=C5DTFNp47@(C|M2UYhs!(%fU#%oyz;3p^L3}!lzn{627T0OZ33J=HMFmM zMkcqxho$fAcN_Ko-C(!RsgsLf1@wv&*q1fcOF%n&H1#ki^bCVL=S`oq@gVxx!m7nn zh;GxF#cpU>4c2;!1nWxc`mG4Bg||6}WaUmqAwey@_xdhipx>Lp4VuMUK!d6b%{6)j zD;1IAk7N1ob-bXN{VXT7*tKSPH7})p4;1@W! zIwFshFF?^k_{;6y<(+zI$6e9QNEP$d@vb_C_GNb1k@4_?AhoozU>ir9z-wycx=ZIh z)B7k0Vl(0G!mhljoF?7V_y1&Dw=hg>VZi_Zl;HpXewUQ~C#vFqiKm0Pu7i>NuYNkI z{$Ihgoo)AOYGvzHuoI?MugvC@W(r7LM?M5lTCyygeA=QoRrJS&3(1qgS~OxW+1uvb zUKfuM-L3|j$IuEka0G1smp7quz2A(psBIlsfF<@bojW>9O73i`5T$~W9t0n)nvm|2 zB*0JpycU!zcC@}8;Fm=i|7RA@r7@&tGdLk^BGwq_O6(mDpe_;(j4fc;CK{(Hk>ILb zgWZObY$%v#h1O`2exOMpy;&mezAvWOsyEv(ifT$q#}=JYv?ayTJJXAR*PEL+Z%32~ zG0~-TcPLI1SkO+Ou_d?ypeA)8D>3a()_(9lif?k-8e`BQ>Crkx zHp7)gu%;Nmci&?UlXA$cLR^zZUNSBhGSWmIkQ zOsUp7w324E6%*^6)R@$C%O=Bti7XuIGZSfx-g6LZaaR!&1L_ArwR-l(E~u@lnql$_ zPqUtRI&pXE>}tFrXw8+LAYBnu>GP!vc+Vdwt^9#XlrR_cdZr_$MV{rda_!eSPT?Qz zgKxV6otl)x$bGb_gVSUEUDFU%UQNL-Z+=O&XkxnTwEXeL?Uly6(a|^=lD9h68cRz( zgekE@1B`a(dtg@+x3hcMr+QJp@FS%bA%}1xWq_ic;VhsdwoRgiA|cI1?6!hTasR(= zN#XAP-5{h(fcz4FK*b`$tnjYL#cH(_5DVSgI`ebVfg)}(g7x)Y(Q=X}z>HHh3fxeS zZ55lA`Cy(OfRiG_vdu~^Lrj~|FY}M+dbyj8fuqA{#Vp>2hnqQ)ylMVa!lG%LKIIjT zz6jXOu+C9OBx2&gq)448=TV|2?NyN%u^tEuu}Ctx>&r zKQyy@pv#73qb=a=i6#8{Hy`>uZmI1?=kcgMDE4jjAeYt{TbmVzQkG6d#oBjhEuOHt z6n0eBcC%g=AGkLemXQY*#&PJA!GXsrU+h!270uKqEs}{{=tHxm>0q6_hn!~lV!R&J zR;^0k_=>asA{W{i<~vC$EF4;n2^bOcSue@udRAN+LJ-B@tgV9Ajwgc&-ZNL5&UCV8 zf#cduAeNIgX$Q+HkmD*?;W0I^=P=VBkmVX6i*WotcBzRV91G0~g?qPxdo@SrPOoT2Tf`t$K|IvhHPvqyyz{_+y?S?D~;>#|O9Q>KG84Be)P21@|7WfknC@g_f$_6t)p^Gb=U%!_ygKFLMT1J=7|@wz|f1aQ}V!%-#xYW@p@*? zlOab=$ivgOHE`+8mBTk_`vf}NfPcH<=!_3Aps>e!Nvn#+?|;}FNmIuPjrk8iya>F| z0qr4iBsE11)p1CVR5#7uY9?*n)+(b6QO94Nse3ONtHmJ^#k@$;y7$ulBGN_5SdYdT zaC45PVF-Gcuw|O%pW8+znZ)pF2H~(m^l|5X0B3L_mB95{59|l*K>tvBVNj?ShfqYI zRK8nbP`S|S5~bKrOZ~@2l}43HrhKI)M^yis;>eA22<=3!a|iA)iy<$aPMMI&`YCm1 ztErKkcr>VTGtDey3Gd@|eDcEw5$Zf4AeN@Zzn-;#At24f(;K4wA^2G0Q< zw1}>xJd2@8b!Jm`73wR&b$o8l(NghnaqMJ#xO20@QD@&XBdh~7HzC*yM|9{7L!&NA z6lWJQ(0Z1qgRq`-OsQvjmH|=WegzF969@<8+ArMXxw!4H*T*Nq?8+pRPku0CaF%#T zF;*2M=p1THVmr+EDS!daKaOP~mV2xQw3ykoNL2|GhkMjhP~5NCtFHWOABlAZN%2D- zYSuu`(`78crY0r!9mk=z8FpbGcu?Q#1@63%p1RC{&=F>cy2byb16)L<$NoCNpXO{(&Fw_^{JJ|cRO=~!~j^u&7u zbn(3*teVpJ#uWa{TXDRRT=1zbx7$=@O-#i}mB|)J>dR({rgCY#{u$9mN(7imyJ?Xk z2!JpAF}Gc)lDrznU+H+|arMIo=|-D>`X~Aq=qa3v$`#;>6V`Mxnyp;=M#uURnY;Nu z+(ukhYCR8>>Al25G#leQw+W75P+ladQ&W*p%|t9>=;Esj{3jPy^{Wq7nmhqgI;amk z#GQ1+22q}eirNlJ+TjGf7YCy=iR*NU?*)mYT&jGyR{YYuxYL5+i~k-btIW2 z9{zXnB0%tGp~!_`Idk*V*^3oJD?#$Y>#Q~U`SR%c3Lp1T7CmG1MNf@S*?L13{84o%)j2R6W&ZcBeN^zl2Zu zi6wYBf2tfn9_zu1pZ+HbncN7=GS9EuDE)OC|0~1&|8yJwB@wFHza1D9ua}xuTOCVj z`FOZ3E_k?Iva{nV2me)V`9eP#sYDqiG}id=1GKjrZW80MdR$^(k@MSI?@W)e%-N(# zc1fD#U@|FLmy9r_xp=QMl`QT0HvH_}|6ws|3w zM>3w0Xwb$9xvT3Nz12~F6-(hX79}${sQ=36aPBr^M=9bfcu;8~II4ow+pQ##QU!G- zp*Y!L%)mUOJ$IT zg#V~QfU2Hg8*pBRt>UN@TgoUPYEneoh-!GONMa0963`+yn1lzAqODUf(n}MoX|H?=z_Va@ z<@JOVT=;Sb1zEj1R9Aj6D6BHvuMS&Ix()b>xCb~+6UT)@cpFFf({j)rBCFC}ajH`1 z{o0Mmh`#vnB!G~kkcmHfl-NwSuRx)!Biur;z=>f~ZJSHP`fXGSSayo?48Sxz6e zyt;V9w$v2**?u^}{nEl|i9kF2#)}SKj79{+3*y&C(@{Ka|3Q z!v!DFIp@zBDjelo)-gjs*qK;v=@NduloxzXzQ6WCPalFa-FSw+zNZF^?+f5K=>3sZ zp61<-{VNZ5X(GbQ#fY9hTQ929E526 zNWjJW&v=gOdFVuXXq&U-b3~OHb!yP0`lD4I36kVA%}6x|iGL)cMn3#3vJ^C2ZNU?E z!=&{{B(VczZGE)Dw9@I%!(x8J;0TC_tQM*V19-@Vc;yn(V1p7UeLBtWsDkhOB&R z0l$G+?4Iyvqy_(ALSanW^JL@k9}1~G0wpBoiKM6*00OZ6fe3R3>9)~7oq&JW^;#Mi z8oc&r4%%eP*Z3;`B(KYoCc4$D6D&JTcq4Hpom-w1)b3Q0JMi`o{crPxm*Q*zFA`m| z??+wbA0`C`8k4D_I>dyME{!*zai}fn-DNtB_}-;Z%z5HhM5azq_XEG zHCUie-`}h2`ZNkD_f6p$UEmTmSzIWf_pdLpDTD>rd_2XJE(yYwS{fM=d-xR-NKAFJRkMzLtM~e_)Nw9dour zy0u;Kyy0emxp?7sz@CW^W^4A|3|s0(J6bx9qyFS*Hfr8q>xDU0+^j$LeWG0NV{das zb5KNaK~)})XVH5@(r;(=Tv->K6X9NwUGDA2@!7Yh3^@q+?2Y=BGvTz2qNLk-czVT% z6pfu_dNqu+<+Fxx8Bma0F5Dg;2M4G1)s26;1!1eISQ{jrTcVb13xM;gOc;9JKGU6y-Q$9A%hug9zdv9qw)}<4Hb=^BUIf!BR1mF4QD{dmHFIx_{5=6+KEyB?gk*7PyX;7xgM^v-(uleDm zsE8W6-jgf2Duow1onKivg|D5H+tJ$N8Rf7?tD<~frBdXBC~ef-S1P&e%`_cMY>BQN zTfGZrp1z1n8&++|T02sfn35h9dx~2UvbqA>XjtXd>-Nn}$?=OewS}|>y>2}^rnTH6 ztEVX%?`&5je8A1IT$&AqFMR$-XEMJ=Tbl?006_M;i~PSUW;i(7nEyxdQtdb2`UPM9 zM=^uHGDT$LB`D4iyJoGf~w&M=f%_q$6P^wFcOy z_vPPLt5UwV8bimV1t19U)~2xW^Ye)rz-e9kFh5pXz}T z6?o=afEqzk6EksN6c{zO4I4nXS$GMvz!r&VooWf(R6a3G13S$ZqO7>L_nei;B(BDQ zmPnH7s7Q$WRoA2EFvx_YZnQu;%erFn4B1F5yMpF2k0W7xgfBm7y2vki-ut9eIDbBd zh>i=X=!mE*)o-tvZ3%3KansII77Lp-flE=n6fq7>(v~1yXMm*Ob-4Jrp+qs0KdgDPRy zwRfA1oCCO+sV3SR(wge?8r!x2&^3k+le4ojnQ#0Y#`Ix7)3`F2_Hp~De3E%6)pw&{>x;7p@I{(P@FiP8Z0jS_03<8y*BEay4y}&$xJnAv1 zSqfN^dKX?zQh#)L6YN^aX~-lMo`BGtQ1@zy+-{dvMn0F)gv-9Rz`KBGc4P&mUX2y& z3&HTW-b>!V4euTtin#uZW|Y+lE|qyOc5;OPRHe1?eK|H>c5nj&Ne#9^Prg>_-P# zB0xFZRtZrVQxMo&cW%Bgfm) z*+}S0{TqR@6SVCpAsJqBEH^v(DTf|>u!gy@G!Boc$qRuZGY*VRZpPlseKA{T5?x@EiX;G@ zOlv7~mU@LOuow7e@a@fopjU!cY>`LxUEEZ|N0$j*d?jP#(dbjvJL;J%&My{He%&k@ z-%$D))zoL96tV24{@2m54>Z&ss_rKO;u_tkb3xEYs}TV>x-S#z=ewHDfN6Q`GQG8a7Ozg82j;^a%ZJ$O1Op1EV<_*!zTbhZj z%qI6_Pj6_Q8juD~V4|m5y6m1-)hD89=03M*9)n;jNo2sAgt z>hX*V4tiip(5jKyiZIsKgyz!<>{cIHb|k$S+Qer5o)$vDbi5M1SIF6O&!lTPGr(fX~FXmq4D9; zY9?9t8+pT~bt;&LG~o)9v2Y_(sU?vRD7{x$%+t_QuX&O(=FmDF!Gv9iGnYtcJJa;v zU`n^qby?kp9V+C$Pv^K0RLD!zg%%Eb+plP<*w@@$^ls#ZgQpO1XMRQI6Fv6FKgdM` zHCc8Vr|6&TapwTp+Mz26wOxjYBA1OGaPk-JkmVE$1^U;I+eS@dn0#{-dbkbo)|vA; zWC#7I#D<5r?jmgO+O#i6!+UU6DZBdyAQ2UkrxR-pGzFLlDvIN<`@yc-7Y!=cQ$}Ah3R5dsR2KH*yrXP*~;s$w%7TvT&QaMcM_i0*WBp=F_PoHP5vTQ=8p&?{hSY zFJcfEy&m-KN45X`jQ@ylG!%kl)q9B`a1&H1lCK44q;O1t+u?T!uSq?ALz1F<% zUNJ}?a#5~sGc?2Ti_;Wsl)TM0jJQKrNj zF*OJWL4Gh}`rx+ZP|8{z3!tb_ant~e(2Ag;PyCy+TyFhGC)}y2B0?+qAL+AON0B)U zGJ^1@iJDS{3Q!ZsJJvN*itK1I$Z(F;#_~f5{L6bw5OCv?c?iRmMFFo z4qI$*<^On%)z9>%=m1Nlk`3&=N$IM|_D3EnA}SzUJG)xHPb%0~_OIS9R!&k@(CMoO zD^~W`Dke_QSI}$d_O6RKdOl@79qrzDkf4-67fR)v0`3E1Im_jXN*kz)cmV6eLFbz^ zPSm@Dosu$StrJ(3@Rfy4YLpu$Sg{oUgJw^8uf08~Z?9`^kAV1mJAsd2xP zM`S;Z21-eXL}%%>o<=scIoVu;;;Af3(~(pC5V$r#uxgCj=U;0J~&&Z9m}El^VU% zaIrz)TW*Yu3wSp#dwU`J2nXU#$K1>*iBA)zwAmqMokf9;%$(T(j$i@H7Kh@IoE^>D zTu8_5Mbq^Rhj^PNmQbe`Ewo=l=OX4W?uvC%gas_$d*vCb$TV|DUh$}<#^LR4Y+^l8MFc007Qb- zPol<;AwPkz1-}>I0snEunq9}fY^F!y5y3MUN;vgQGGm7uFo*n>3!lSb6ruu2kQS)*L1Ep^aDE#rS^!aa6e z7*!Y;ingC%v~kLZm}(i9La}p4<4u1rU3O9Ji;%k_Q)}mn8f!O~nf^7|&Z05T)f>kd3B1|g z7D5pHNcfErRAs1Ugyk)rUk`){NPvVWMFLj zwY8+B1JJp$uNjvlVK04&1QT^!@b}>FJJ%idlCun^I9@lL6wmF4mn{YmP8tFXF)=m3iDM$al)aECkvuxpGrKKuxMJJb*;K zl*>htxQ_`<{@@-*suJA`!=4?{MpVfoJZw;l432+H!Za#$WWW)g{-ioE_bFVO0=!hPTM z-u%Cqmhe&;6Av&iQLricwHTW`(aVUUkG18m5T*6PR}NJa7<7_!nzJrM(_gJ~bF_Xy z%CbtMg|dI2>2N14J(pauXi&b66s@ULFsQ9W`ljLOELR5sb1tf~lFL8jam}irY!}%i zktE~h2}|=nL9hQtup=FVg#i479gsj4r&_F{Z&w_rLY8GcJ-U~auTrN7ftL&lPt*c& zsU0Y(QM^Zkdj&XEFO&(dKm7{8$667^ck9L1OVL_}cwU$pyQSC`JiK!?506|W+ zuRgj}8i+x;XH*8?7S9dr?GdIL#BAh|qyh1-{`r05V4u_)$UI~5VPx7xgBNT$Q zJI*#wZ%_n@v?+!{t5n#oH*rQ{I=x<9Uf{lcf=M7#PKht3dMpc*3cH3HL*=Wn{((SC znDsh_Iy_4y(B{=q!aD@mLiBaBM#3LRx856a7sS`t_-YvP^A=lvoG{-1hQ2=s5OdEQ;iJF8!H?yB2S_cMI)Uqh{pSmsvM@O~}2~x~*x3b+E z;oIi2Rl>@NO+jo}=&d^1(vOsZS@c1dS`1o|w#vH`$DU|d<`5ey!IZh0 zAx#LSw)C-EC|NK{+a?6`VGmho5RO;FzKZpa)@LbEgt);nd&bN6>+`Mf4Ko(9FRM_o z3WtpJBB0c!p!}EjY1FV-PFuqPV~v&RoGL8v&3Xf#e?N2QO0J=F^(Mwm(D5YqCE{p` z+DKj%nAfh~xB~w8)16z*?pSl#_-9Nvzz~w|TG#IkYZ;!Gr3n^DGNz`IIB_t<#%Q47 zVF$!YHT3qO3UhH(T-N@Vimr)6DODN)GudUcda-La@>=N`4K(|rHB&l13tofyN*1=L zzx`+*SBn|E??qX$uz@&zLQqzy za`54#$5nBasbE3tD^=o?bG1chV)MfKI~>brjzt-Hu`cak-lT&{)*qvRn~H zVQ(Ct5GQ0&-TI)vR0N6LJ$a}cE*7`=sFw|HWN-g4=|CiZjOtw1Zobj(+vAy{$6u#I z8id>ru^2~BpyeFqbfO_*cFmNaSH|Om+_R1bah~wO198t6@8dZQlqYd~WGC@;bdiX? z+e?n2#8y$!SifOuJ2W33XJQBb%KQodKh3B(RAUswQWEdzazlKRx!gwuU*W(XXcjj@NMzXoD?V3MDPL z7;hn#nTNLfg|27}mMv?IpUoNPwyc>tl2G%`bMy~`LDLq{R8GZ}8s#V@b60&S$D8&y z{Y;y#U_5j`=ivbc8a_bbQZR*Hu1GAt?Ly{hcc!IR5x!2w#C>=PQmKFJZdwFpb-_tU z-uzW}`Jr}ELj4hQaHJaDe*VPxv3$~jZ{Rr`O!QF;bDQlA4#YxCEr-?PuYf%4T~Lls zK6?vHX_f@Cd&zvBSNDXMN|_rr2^BE;U-9C7@1bdt@KN-J=_EKt4p0^Vh2&j_dnr4(uU4 zr&O9X(^y_LTQ#$#Aw{QcgzLszXkCJ(cg5gv7VwFP5>2#LW)2@<-0AHXJH&y?B?uj9 z)FBmFY)KIw#I9^Ot-9jRdGp!<@1KI=a>N!}3I+8to`M|}=z;^y?v@+29)>C#Grc&F zV|Gv9x^JXC+q#v){Gj8V`YbuHwIvC=yDX6|vBJ#;eA!ovzhsMVzHCcuu{)Efw`U`9 zRV+sem4`a9L0Fu^=N&WSkUGv8>gx+N{k)@)-@&j z4gKHU`G4#^zx?t1$o(z;vHuqTi2uJ5DSaoyUt=Ei-}ol~Q6u*)xBNKm1MV!`igc2Y z=cW$NkNsW23LDQCRj6Ci?Ku%2OTJg2VL*uMOgV|3!MzE$c3i?}+Thl>A@wp_UN7`; zpW!Z|aycyI7%ZrmLw#0I*GQAsHkpqNc+E$m{B9yg{34E0ZZFi`&oS!PIVz7Okt?gjDT`O|Dup5)SDKe^>vCDZ0)%+_3posQM7FRM`Xm$wJb2)~#(fc}v#ScfI(yAK1 zd(~-F6Ro~BYW=wV)E3J{#Nea5TpVqnZut~ud~|Faxx4vY2Qv)f}mVnl#|8ZLhPy*6nWTVzcly1|jnRV_6OwwKqn4Ayp+MBE7rT=|(UQv&yhaVP9)C3cN1b^YMYrTCK^ulxFML`y zm#bv#6rK!lylX+qMX-AaNPK{~x-OYHil#GCBqqPOsQ?VT`vX+flyw4U6ZKq6n{Ai>0WAhdpe z>Mm{5M90NGSFw0K*jk;BFL#gXDD0o<`GejGqf0nt?lnebbMt8fGg$n1%-++vSzQ%( zIL6|G_X>SCnNu0!eWb4TV9G5VBw+q|E6K9)lJ$J7~di8$8uYx2* zlr{^WES5d3j3pAutZ^f~4L>HNxi!In$q((W!@P*zn*|c@2a3|mFX$nF$815bdajzO z+=*9lLCU;QdUYirM9=gp60zyD{VB-gk1EgNuJD!Q<8Xs^;&njFii;+H#q+^F$42Ub z)N?Sr(QYjGeO}4AX=uQ-ksxxc1$FZk0Aif%m?6_^TG~vbtnui1CHxW7j2_!66gHUt&-4CN zKTRO4+yMVMAYnQ)10R+4Mt&Ih96{nY2P2 zp3l_$c3ZUNVIh*typxG5nCjLQZ_%MmzPOfB&;LcQaqU z%hbwtSeET`KO^p-sMnc@-b!=0F}eZX7|5H&)s~9~8i0+SkjCt#Xks?(1kR_?tj6pG z!p0HUN%fuT_Ue;UH}7$)h!0TF`ic~*ryu!;g{UP48-qq16;Cqj4EkF$JBk_LAj<`O zU8{{I?FmfzbqAI-N{_S>2HiQD{^j(3bLiL#&P;?Y(&(9_&GUJ?v$FT_arOX#T1cJ| zp=&ge{FFC_`m23~K`)q-S|c!#X`7}LS(D_;N-V!qH4gJ?KH2zpDw!HkEB-HbQWe#L zeew#Nl?}3Qhg+Zj8!C4P)au9E(%KS+n-^!t_tX9Z>jzU$XJ(FWPL@txgj91Y2R~;A zUk67>3A~lhH}}WP+S^zvq#bG~B7Bk1m*5!876sDdS`q>-sDe4_d{JlvOGIC_f%aXK zo~1iIN>CSyMS?j@G1wE*ZoFy6q@A$sb0idaSvOfB_sbLVq&OfZX|-fBxTP2(Srda z_F?t&;b_RftO{2eTuqq@!av9vJA=9}&&EqgD_qSeRs_K;k0TjObJKpvkQjvopw(8s z%(E|8Y8R>#eZmX{X@OqBdnqxK8ZxM79IBeYk4+hl>LF>A5%H=(xl#!U$+AG$MJ4t! zRoG=Bv6a=yP^06-9TafQ#C=*tJbgrYx(+~9tAi8d94W2Vn+Y zLGTl4*zi(iy_?5if>cR6KShe|uj3f(EBpkoiSkM?mZ*nx)QtXsZ&lED&E%IB?Rj7y@S4Q!uKQ`f@^aWf}PMm%c(IY}Aoz zz^3=z)!qPK9US$;0FWeosm9HK?Ag6?O@*3mm(e>LDur*N2;5m;<~s-lTs#!&fb_zM z{ywu4K(Q;MMAZmf1`wpc^>z@}g+j}ncI8h=Z$Ap2VTYSss+PRYbAmf!(-#Ztx`&-z zrE(U?Ed?bDYY)RSOIeloAV~}TyQAuKGkr?6{!l|;eK(KTQf0(QbESz80k~VyP7FBW z1Zlmt%Sx3qx+a^P*~Z_$D`^vkbMrgRvy$Hiu6CQLS<2dA&g2?8z@7&Z&fXf#pyZ0ePV z@Vk=ifCLj(0lFu0HtyZfYa)R4EzX7(#(RVfG^*baszwC4X=*8oj}=4#E!eSpiG}{# zmv@^`gN8)zWE+3_nM{3F;tct09*nCa8k`~>s=F)!n)rlPn*X=b;tbG|{)^&EPDQRDg`z$w}}jl95{zDPt2^G_}{-oGJr zJ%*#&LmFZ=bw{D;`FvElKWJAlq^iM}6Sziy68y-?bGWwVreY*F4E+-3Q1wGbNN4Pn z7CZh4-(*vfc5uc>^k}uW2Y~?Hpq#>_H8-l!0b8*7;qUp;>$)NH$p%({&;x1E7lWe# zb=OzdIs-!hQk}duPrV~y0ZndB0yNgvT@ zc+e>$h%rajW&q43-Udz%~-KldL6}>#g{sdch1QVOuIEzsE&cA0 z+Mfr3*wo!J<}Weflfn`q0CAv>FWqA(R9l1`PCtvJzq>?aVTA~-EdZEi>)g3?xQb>B zQp;yOY~LG{#hUygtN=rHlYkH(CrU!a&HVZ@r;^73=%M^rPWa5K+Rg0*tLlDZ7ZlT? zCfi24p=B@cXgh@%lK}vPI5cE*(i}#MSWt#k_`6EaD*9?X99`m>eEmiQYhiT&pGq4G zU)@wXY>>{V0VdjkyAzPiDl_-}v}#DzNI2&saJ!&T$p^&VTN34&9n8h+6f{e|io8mX zI>@u|?mx>4Gpw__L`!3I0VD&^l`bA1s&~tf>jQswf$)9_#v?`wbyK1C=WKu`w9Wj; zIVT_sX2wAN;dDFOB@1iSQ=+*Lv9(c7>mUe$q&7qN`8BNFw63cs__l^B(?Wm65Kvp^ zur)$156x-c7hv}X$Dyyx?DAdIilnzwNm7w2m6KMbz+|~im;jc8pYBQkXGL+IkNxX? z&aPMP`|sx_CuP#4etO4W1#qgm(HoBPZpD#~94B z2h|TtyD#==L}~VL(kEU!Fg8`sPVr|;4%l}wIP5Chrtcj_+MYSxuj)u-_utyA+DWg zNetMlEx)Hb&6VxH97u&Y;Z+OHZ;rX|ld)QfT(s*aJMg@H3M004e*g8w>q|1W+<-^JKL z-_%&w&cWEw+|k_D=C`q}`VS&3iu7Hr%P7aE1Y|ZWBaf3C0K^gPNUmiGUbm3T0Ansf zXPl3c(jujy67g~KGI@QktgtR63taoL?ut$gX+?oM^~Eipgx)ipnd{57h3 z&$1&&ImCw3h1J#HB7Q77taMbjsO0}ykAV37n&j(~yk1CE!oJ5TovL@4GQ7|eFrcH{ zi0V16ERhta7l$P||2N(%%t&4#U9qrPBsrWXzENs|oO+n~cZo67Aa|kQtKr+-t2tjp zEU(e~=}9$V@cjN{(*hj?icQt!p2aEj@IErM^fa~ggbA=goAdA&OIYK(Ly}cV`$ht# z7debo&cr}WGGWy$tVw}!J4;4tm6D(!-hsRkB!z>uNM=QOjaelMj>{ENk1lTE^*$QS z)zHJ$y`>ep_h4fGJEtY*=4|4qL^g%}CE|j+{p*iv3;PZFOT-3tr+4l|hVH3uM8*rC zL@JcQq110=h(NKGC}zXdb>StTcp|V76lReGmSBySVpvpRlt=PUfp(oEUqByNe#mgZ?QcBfhOYO_4d58;tGcf0+kkSy;=^-fAXx3RR>A6~C8g2@fMY zbTe(X*iN~zqAH)A&IJBdGRoq_o=u*!;KkHrMh~|yREYnEs1&j=dA=m()1+E-pmMQh zw-@yXj538&eMJJy#9>jTB&u#SEfRXmzpD=Xev8$p^B!5pLN}a^W}3#v=e@t>ai*E9 zP{fAx571rsrHwhz$CtVhs=%cJgf_D}nsVB%lLqubLO5Zjdf3b{OvZF97HLU?3I)jC zD4n+)mT0uZA1&X=4>!>YRHuk33y}m8#r!{*hrSupwM=c06fgsn^jxgPnRYgN<=U2v zvR^I}nOfS0%0ufNaaYt$h17I#l{pPKmNuBZbF}per%^QOLd8~oe8H3BzQl)MR!nut zX)9L>wv&^6druNYiBC3RMxF_Fch-l9;I=@D(rS=7E1{yKo4@|O(aZXc4`c(5wTg`H0vIp zR}<(6wu;Is!B7x*Po(Qio!PL5#g>^r#%b$) zhz;^FsR7p=RiTaVaB)uv+%Y`Ur{yx-9a}5hEVJH}JD6 zS|&7&(R(p!>-qMr#$H&nKWi3#?5L*7A+*D3D96moeXs`*vp9RbGR;qu#v3hw%k`DB z9q(!(8+nV{C+^tx-6wQ&I?|L8J|>Gt8b_IKc37&QD?#L1&l+=sr_=C*tE6SD%*%QeoIbtJ4JNicw6+B;6a|4N1=(b)ciKEM6e{-ifDwOvTnA`Dc zng+uk^)B{JH?VA*4=@j1eKmHiNuBePv8&1nCApWcyhECejK*52-}$yF49JPu;IWYp z45(+;ZTHWN$qMFCNfJdOhZ_~SB1)#MSLoRs&t1%<<{Lqup06~ya17EZ?HO_(o9)qy zIlG{>Q!;X6=S2i;f!@FE*9)Aq?;vP@uxIh${2vB5%g&S^BKTK%^%MQXX6HRH(D!B~ zv;6G`6~L+ZGeiD;5avGIevbac2sU75#n)H?b2@zE@p>3|dpmp4I$Rj>_;%@wOE=CWgA5iZOyRYf=Ek%Q znM>o9;%zO6*|_$FMZ1KWPfeDJR6cUxH1{QrNm$nI+N!lqa1!v6E9T|v>T_GvlNaK-?P&>FQjiTj5Pg!Xc~T|2><0U1%JG6K``Jhf-%^kn zbED22hh;n8p0|CsEr-FZ>PZGK_LZr4rgkT(v9~VH=NQbjiZz}9Q@1Bq@h+_jvqjbC zBAnZqJM)`w_cMo))KEi})SBS!<@lnp#-}3nAC?;8mYMm-i;)#F4AuKJ`#(3BOzNvw z+LW;guSmrwzrf@GsUiL$gT>!%{Opa`)@s&*IV+7Rwd*gee=FWJP5Wj396?Bx>O>KI!^YDyrbL(Q! zIBG#|ncQ{P6mPz&mu6syQ-UFa#cMv1PxI%e*%ACil}=^JN*golb4(Lm9ZhUZR+S6H zT6t>%$Y%xPH?YLWq%&yTb#NZE<>G->1)9?WeXh-TRwqL`4v_U4EHV$qp~Ecnbu_<6 zoOxO>2h})Fv-TtmL}qKc!Rp?(N0NE|K#CML-tx~g*liwMtj&c-rt{SmeQ~!7{Su_F ztV=B3pzlQsT`-(lVB=|4y0ymJfH0U6uS}^>aX-l=M?a-j`nzfFLLJb6)6FqiRS4ID z0q+xz0$f(W_ynr!X#sy_O9k9rIQ_U+l6AG#W|r)%%5q+)P1sX1m$xO?o0K0f?ic$1 zT*>XvX7SnlisUyre*e4tZ(WRmxs8pn(f{AX=>B#y`c_uIz04K%?~N_T{Z@~^onJs8 zRe6?9eF%3#WgU!3$ zQK@_PmtqnkCeEIY5E*DnJ~Ia^Da5D0HSckf+cy!@0r?TNfl?T|$jxO=$xIo@6b@hb zPJo^%2}Ih_URX?hjfbG)9*ik7r^;IcST_J_wOI(i?pV{bUdG;DJsfZWqTv%V@VCFb zg!v+Cs)rXc#*SFsT}9}&rF(KV|87g?Jtaq$^u+&@F5UqT>{9{6e2zUDT=Gu z30pPhz%(mqBRDWZ2M{0xx~CfmV}To$P8%YiLU+vr*>F24BP@)RXkc-(a`c1LaX0z}s zuHI*P#8R9Fm@pDV_|m8oqOo!^_1o8g4ab0=GO3l&gCb?;(kl1b@;q3W04Wf%c76ob zNQiDnYIR#)`pEKG6x~BdNx7t*m~Ri!*ty_AwNcq3zyZ}8xDqT7vtpH_J^Gtv$XLCy z)-Nl-3Qa-QS}Nio<~lt?K7?F0nvBx)^d<7aZ@o8$}}p#qVd#R);(CM7Q;-wDl8!vyR& z#KS{F`l2xMcP}|FI^i4VbK&DNd3YDI*I*e11Qtd{r#>A-@;iT(RG$6G5~FQuA@C>I zN-6^?1uzKH5Onan5+Nu(c{f0|KI8NBV9U1Xf?Ql^$QtLA{8%aQ_K*o!xdjwwYY9jqW2V zGyote*?P}J&mMda;pY?h=hOR|`&KfE#T4eKM-Z580{y%@5-yEJ$}=*!S(TB?JfC~l zA0R3m=bfrTz_A@XNS|>U(eR-1n8iNh#_wrs^VGu}w^QXI zT2{+zCyRrpUBPJW2~NTSvL9wn;+X4uI;tL6Q;>_l0jd*vDgnVM&m9a1ed<$@^ut*o znqRYC7H7=o>xcw<9pSbZ5rbsbh`!{e+6U{0it&1on`;b#%{Y^(;yZHMv274F&j^f5=jVAl?EqGIQENJSYW=pL^K%cl=@~iM z_bmvM1YzL0h_QFtc!4C`NQG48b@3#2{K6^E zXOtj?nJ2H{N_qv$nmfhJ!BgFiAF?gQE0^&g+C_*wzR+tN{*&^muU z&5%KwI2%1gv=$aaJ`?&n*|>VH5L4^zBKk1LI|hJRZXwn2n_Wy&MSUw@zO0lKX%(0t zs-kLBkmh|H)~9ljj=ln|>LzF8R_fG@UWV;MdrGhk|A8z3X+L+2V7$)pK8}~*gQ79> zdU>IxSlynk8Xg5>jINGUh!o_hLd=|&8|=;BTfEXAPIpcs59rYyt4JS+_?xO%7nV~w zXxpr}?HevS0F>;g14I%<@UWnRmoD-1iW1nGO)~ek#Dh8I{8A~@_CuBgIEk&69@d}&-(y|9vRqLOI@(Z zlX+%7PGWM1){ekjl~-L&N+r|f=|}noyH+}&7tRg#C!SGnr-Z+rpFQfFz6J4IPD_H= zakII+NU9(X+r-G>avcqF;?A$JuxKPC6AtI0B}sA=#MF^o$?W6}+9}3^GQ~>NM7Str z1rVGXrE{I6#1;$b_+#?rjPF#9gu>UV-A7im%CAY+6q#|u3kf#ADz`w za-zH6^QfgY`2bI*)E8cP;WCON{2Eh$mL52Y7HFIu4} z#+2o5A;Y8%Tbl~?&&ZhovWi*o&FdeDcqKlFW5}-KENa zL-rm)*b_lTY!v&68gWwQfZ-E#Dapz!KV7CSlyQkyD5yvqhu4#tZ(PhC!YS>m*iGAN z8dfA<^}1%rwx`HFW6IrR)I)5SU1wc=9csr#e@n-a%}4j&3lSK7mdF9W$fV1hWYG7x zv2KHWychAm0bo6~=Tt5!SALVRgo3CjmD5Px5GMNL$0?ULh!lxpNUsvNIGCG|dUJOy zQ9X=7ZJ;;lZxfqu;%ea-{;@5i6|~s8c4$-Jtjc9PZA|LY?D(YX=u91}ngU$@@tI;) zPc}NtNn)~dZX%Zlkfl)(NDIR?7dEganXWYH_8S3lFFB~Tr988(!-tY zJ9!2Tlsa6@nKb9RS@;<9;)2gTcyPMw$+8oH&{RW+bBuevF+H>WRL zE00f52hsteOLTr+SHoy-s$&^@0i;W0%gyiy6U_wOV2wvFXELK(eSs@XgQg@L=Us=BLemunQTD`D^0U8V~r?6 zv^^hS9>+N(U64E18-}WQ)_p%+d!dV=e41&ozE9i#IpBH9uz$ z>peq1bh#-b?j?rb6-~mYv%S+JpK+^5bTH;bHm0GZqkdiVJO*>?ieD$itpgm}k=W#yjH#K=oGOzdut-0Ux zF<%U;P^vhYx}0f26^{jE%(ic(@kE9!x+xt|m=>jcyA3^V-c3HZ9OS~yh$oFYH>4fteX&5ASeWdUQqfYCCo=3E_tLg`q8 zcgF_~xM_E5fAm^VnNdLtSqa3TvW9wOD4#_DWrACj9X+*q+hZdu23^XuCb6}#!yu{H zM2XMjzHS&yU%yyf5-l=MM7~4LNK_nWHk;_lIlcM|S5mIeqO285D70WCqX`G&^1`S? znuk_o9`bzBU|1z#n{O`Mjfl!!>a!sPTMSXtxtX=okoackdWYu|VjD48$(CrJlRL-- zGq4!v@4cTjPFT#{sF<cX1=Q^z0(-a-L=gpW%f ze;@W&Nw(}_UV$7a&qMdC#M4sCL7j^%oF^+wI8kvU1s0AVuqGDI1uiHb{=(Z!p9H*K zPOm5U_0zt(dDdPswePMb=H6)xPzyN3!X0#4rt!A(0Ps!Ae0;o7qT+@Ehv(mAB_4S|3ZwR z$4kv@Gz|GADzKbM#C19i_$(Atj z3Pc;rT=_n&@$5*FSy7#;!B;G)$Ax#QAQW{6PA4=(Ug?v!*vWvR6wX@RgmjIk{}heG zB3 zi?=yX7cQ&(HQdAQmR%%R3v^MZDrABAq${k;NXu`x9Zi3JY?+TVR@8qR4<$bQE}Aw{ zpS>fW=5Kh7xcDv0uYLn=5HGidY#LZX_%VVPOXh?u6Jp*i?hBG0(z_1$@@hWg$LfyaOpf7;A1}s{PuD35^JnI;4B-)--=*? z$rw3KB&cxT1?}U;GrZ8oTVlX0!=UcG_Iy#q_EcpBy#P*33|>rvUk9&oEanlIzf8%Q zfP9EDt23>$uR%*i<+`lw`9Jh^IT~m8zRYj(y@(jMwlc(fKN2l<20l=^yrEjsxT+gH1Kgx zf#cKu`@0ukNq>NJ6Q@>+;H=J1ZTDAW+m91y@Av3RdEzg9-NQbExx!|9NqJ0qm{m{Z_xuQ^~)zu!Y{V+6$>Hfx z;-ZY?i|bhQ`mLGvjpcNZTT5Pl5)&l6G$Ovb6YuAWqRYrEMEd7La(9EK+#>Px#4^U@ zu1WTWHj70RkNPCjQ-DhCDPvfsp5#^5Y(w+RP>lmjF&kjz#fN1xJm@pM#uQ#{l)pA= z$P&U@!j=5PZ`229E+^5Ek8{&xO1?*`C?1LfEu=K*KbIAHO&qNc*#e}33KCG71#!MG zl{w9;{{&?sR+H{XsL&NPJ!r)TmG=_slYa}O6A5nGBU(M|{PT|@ojU)!fVz~HxxhNL zJ+!m8i3t+lcx&2%uvzZ_SI4vTyFwvd61^FeNg97kyvSt`eou&(^+@`DBvu>dxhc{w z%g+*Za9sf!LueKTrMfl>0|^xMrlO~`ZcNyw)j3#&khnm}wo6wh%hub5%VZ6#t~K#s zrmCR^p~d(c0N-u?kOAv#T0*7YyQ8ScbtHoPPrBzh1cC&Sa*pa`-57jZ9-@{!CO2XG z7chlm5!P@aD!N35m7%Q>&?Kx=z< zghcO~mVs#4UfPrF477PVVDrL*fk#SUlW zQ#=KHr-R+mDaz&;sb+mMqH1;t(c-|&{KuPJCq-zI{h+;HYlJ>W0AKKUDOMcHI#%Ux zk76OFAb)K8I`-20wv-!{0OGB*XxNCj^pI!!)6i1R zWZtWdo!a@KUby>(dMP4m6ZWx=H#>UFVP_)*#?X*l$59gHJRVB07U|qjrE}dkvpt>>@IAF4@Cd!vOz<(YnscS~?2VWmA zk6vKrAu&Gd7`UONP7?U@a(Jdaol+1v*j_$aF-F4PSmE29WalFVF3}h;TmcFVpVv*q z%(fI^s~%-$s3bjYZDBfQrliIVT~tskxNhUJI};dP0=MM?2f9v*dF0tN6cb8CTFFyf z#GCN(DN@AY|L!|qn~*Q)Q|JB#DZ}aC3(5D5k66H$7#^rX}^afwngc?*9n?S!_!u)=uPZI9B3j14okloIefC-;8YPdtQa~>1$%ka{8Izjm88NbQ`?ad6KLde3Da) zj!Oe4=s)B2V;$DL5We2^SM0UslFpoF@Z{|y=eM-~G+RwQ`b3+b;$HD$X0~JacfvBU zC9dvK8*z0CS?$wz-A)H{#3L4*B>VvX&;N6v%v-9wzm!o88UO&p|6ZeQOq~Bi7wKB- zJ2{xU{TAx1|Jy^jI80>`Kj3}W2fE49#aitgKDcE-4`HQIY-M09PcDDq2LG}1f*J48 zkk!1JTS{ur!R&RMhJJ>+b$S_FF||DFMSb?5U;SVvh5sf|J`amY>7sw5e1n=T!bE$R zKJG;8_#07WMARRn3wes3hAvs+gsU^}3;OQ!o40|^g5xB?ZJ(=zO$7~!TAr&$8h_|W z!YoWxBz{x>CyXV&QPsq9%8k@6z~_nTaU9~eRGYR&Y&eiaI;Z|L4;vM*+dhAIQ&cwO zO|#n~k*jsSadfq1@owt$>USJTrH|(4YJJxzaYnau#g6XElBnD=A$rbphDp=anXr6x z%p?KI0HSiSdB#bZsaNzLS}3Q(sU^8`IBEjeg(bcxfT#04o^2iKzat2YtekzGbkIA# zbG7y4Odc<6fVX#)WOS^3oOyY$4{U+|&bLKBe@$;4%G8C*xtRHWfTJ&d(x)nje8+<)2%_J6OF!e7J5`=@gI99lsLL%7* znx|o<=Y<7aF>MhgYEE)Y47y|rz1Z^f%ftT0QJiKe+L_GR7I0pkcCI9ENaSLm>T4Cp zH6V$G`L1vzKCwhPVWoJ556K8Tx6L`0!Gd#+qORU(hl z)SM(JGuurJ$j7*+{hD7q?GG-|(58&7GcvD^h)CYcA9kQpV5yVBKM_{5I+z&8)1f&2 zT=OAPry+czu5yHNR6vb8bRkhE`IgPc6VG7>ojm{XckzhgZD+9?R3K+{2zb@lT3);% zqO3!90`FNK_s4Dis&A%Wwg@3u6HpVB!et`b5l2Qil^K_-TQc^ajMoC=h-wyBWdkxw ziXd7FSM!dqwabJsG#Za=p2v5Jw$ep0NW7>!bOF z<3bSn%R73?@UP;;<83&kIZ4dhGy$QNFrY(dEQGJMtj>&qr>o4pfWY$nG++^FbC7ijWZj#7KV!u&jJkY(ww+crbMfA-(m04 zTQm4J*Cxx%$0F1LnTqElv4EI2K*^AlKG?+DLZz&Kf-lqX=!&v%7t4V{b-9BHnn-z#L=ph?_Ub$K58Ki0B{ zFRC-0Q)H^fXtPg>XfOa3D}~%~iKIMiFt)?gg59tb%(%e~2<_eh$-073q#UQK zzwd(ft7xL}!Td&cJHBOJIBU0i082Sdx@-@dgvkJAue`~2qlJBWOgux^Jhz86BSria z60sRQ<=zL1s%2ohu&5ZXfn`G?rbHlIp#awsX^TFbPjr`@6l62^S_3?Iqrv~<-{!~j zAUY_ap0NvPE{`*R+W7=tY2}zjq&WJr3)BE9Y4&T)Wa|`P|0^sb73pn9=YrSZimOc> zhI*D&H68*Dl%h*FSG(h|$fZ+pTa1>Wl`^8U{&v0_W<4|-ZtuKdi@Hg59>Bpwt!a$p z)Ha+wWV?Ixy8}ItWG}}wQK}AuRG8A}_C~|_Ah;?Umd9h0>GJ+amMKZK@ZwBYKVz@2m3H-~Y zKCb&ArPPgTrAlc-26EU(VfFTT%-wokR#hy>l20Ztp{{G>t4CZ*5NzHn@b!i^W>X#& z(sPm*g$|u@9M}9w7$G%rTvsUcr59-D!=9Z*E?xNZLz>>S+c83iAg!XUvN6I^JInbdB?&vosZ69zT zv0liRB7A!{M|XG>D7Wl}te5|ZhqGP`F)-x+9vQpOODeToH_83zoydVy!di6QQT7A zC}&b4t!cMa@U_h4G-{Q@CYY@SMm>#KIw7b3kX+7=&v4y(c3StAcKB?L_Oh{VVh;bJ znv1LS`ebjN8Gk?`+bRib)~o_jcnVJGK68Thv5V*FlScv>iR6b;G`=gbH|`YNQ3ZwW z8CntKwdm0K2gBt)Wr@()5^#c+0q%#}&&EyZrHiZ2I3i@$)jQa({qbfd#&P+l?G zP3anN3!63htg*Eg`>N`X&ff#O&H(m1lV5&;{?AqfnPybi@D~-*_~nGi{mYozNLwXoDd{OSPmwSe zp`}Xd_p-D06dwQf@9bb?JfKj6cal8I^ZkDQHuN+#1c`D?x`WonBx?3FL$3MgxbACde*U|4$BSNpJ?8dq~RwOpQd3tQT!aaWD&v(|BF1UhaNm& z+dDhbWM}PYZGr6Gy`Mi%r0i^mubkBAWa;kcXm5t5)C@kPtk756)ad9OoGE1L{t)~c zKz$G)Kotce+bdYk;7OU83M5BO*d^Ve3X7CSm8hoYX@Tk3){3f!-0ks0Pnc%L-2B|s=M2*DaocW^{>E#S zb#PTuo*E@Vnh!EhSj>pE{M{5XD2oYp!UpvmCW~~0{B?w=u&~Iv>F*YF2VM! zweVSo|AO}>Np#8O$+RgVlCscHDjfFsCB@SYe}b9vsM4j$zj45Vi+-xlozrk|aqCd* z-(WV$x-1sf5J78xCDh0GGgUam`4h<7GK~;iFMYi)4aCLJ!X#o{*=t9Su5GVzPuUS; zX|D~tA^&*N!uiAS6=@G%_?H>(JpV{%#x?NIg8Kxq>wWGCkDPrC(q4@__POr)U2IYH; z{w>oYsZS=9;);P3h#Z|zR)Ico7<4b1DAM9mg?LUt$zbZ+uP@VAnoi1Bp$2Jm9_CJC zN1+Ers~|aWs}%wpuR|i2XcnY#=wx6#2`??uDdTD+Yo%e8BSf61GSkftr7(D>KuTbt zC#{G}iEKgHT)?CtTLI+3k`WB7d46+$v6yJ~mU%Ax^Cy9B!{0a&va(tmyX8`?zNj4YAjg3KlY9Aoi zUY&pt;uTZ63VXGM?2q91-;ORduHAb1Iq?3lD3Iq4`*K(jHF9TzoNuTHorlLye zOB_JFZmnxj#}cLTwLfjk7qao(R7I?T7W}tgq@dxmE6Q|yh%XGree=!M0V`8FWXC@b zn8%MHRud#!IL8$99$m#VPa$lD%|7gti950qP3bj58*!LE48N4&{VsXUL5HldbzdOJk!hsEeV&C&5z7#L)x!IDnadOXS!5n zSeni9_SGOr`@06mYZI6NKPjMf<#L01oQJ zrpC~GZwqw0#{#r8YdoLyca5beI??o)6vK!YmoGeJEe5YZK-#0|H4WjYL^~5p!z3fq!A;D5P%YoK64;A{j== z^qXkhdfUnuRJiZ)Q$#9el29j=g-XS4f#*0QQ8uW~MH$*R;g+=p8a1*THk#2JFlQZw z`)Z^83s$A~J_s5qnkQ`6^%7nE_do6%>bWS$Op5B9ZucaA`d_wBClOhuK|^@!Hu$jL<9S=4<7{Q#vM}3f)v2F@uP4eHzV?qhDt!H^Pq`hd2Vb zYJj3GoQ5M6tei+#J!tG&x*|FY4g><9VYf2q4GipdGt85wi9Q3g2K3i?L>D#$a?+{- zIm#%JrOWQlUKU=26VK_^aZMuY{Gi}%3zY4AS-+U-5{I+x%Z#oa(`aqEudy`JG;z1h zYVynj=7yh3H1#^^d0ALniWE>Rq}pk^L%?iwm!H&2`~A?G)yZlQJ2AuUygKp2Ruy(+ zre5|vA0N!Re@*BoNY^JFuj>gh)msdM?To&Ae!LCJet6DtdwM=60V7ElM_v-4ipF%jgM!sq*mb}sr;q2 zUG=$c4JV0n6%;&Ym61Q%Tx&xqCRbJC=sa6kNe2}+jFiideJk+NN%=lI-i_w2Xz0k? z6qM7_-0Xtzsj4}{+|WI#k_cIh!Bzx14GHdCR;njzADF1~ivCjWv8TMZS>3;TK?Hv~ z*lB3QJ~BYGjK3SJ6k?0mW=s*;Imu2NbUvJUOi|V&^Ux=$;METdM?F40NkP(rHx>VV zBeH?^0QarW0D*+UkatkylXZOfLH7kf#*owQ(q)s$_k4s2OU{Zs0ug(@AuGRU+*lt( zL9nc|rOZi68$vT_XkfA89%fyKw@&8FAjF`jT;6`k=sa(UR9LqIqv^7LgPhe4S$llb z4r^%$E#h=w6Yb8Yo$a+@**N~IvJy(!U1dX&Fz_@_N?vv298IKrDf#ijA8SX@Ja4qgu60PxPB9C@QCg=pDu4+PJ#Xrx=j7nsohY+W|+>MV&L*AGc zxU2eF?-G&g)}`%odwrpv)c;h5)qC!&Z`a<-RES+jZfS8tUEwSpPMPCR1KttCUyp2* zw+?J0N27A<&ntDBH1%!4%xv*o?+%-OyZ5EiKP=ovdRvlGQ`t#ABg@{JzjAz+EF(H$ zcpVnVQOlnHFIz{zz4Pjhn&<(&q2 zrXgr{VODrp`Dm|!Kmrw~mxw*%UMS|%t#x3qo{78>`{V^S01>#^7`b&@UtwGTh$CIF zV{7}%{uEyLKq zcymTDZO(wl`)D*so;Q0vxbk(LKVxkm=#+)wg1ah4>DPdjc9%7cGukfPLd4>tgc64Q zv&4Q=1Y%Tw3KHk$@1lJSrC~to-O;8qmisN^#VU}Sd0l#fXX|-w4)46+L+Zx5=df?$ zKPaP})82sh%r~_=ta)4RO!EYqEiKLVA>yfZZQ@e%68XlOeu7oA>oWj6o!j#@uo)fv zS+TaEfBlU2D}=E=ufQ#T_l}r_N~Ez%55OkY4H93%u|RN5X2L#{40fH-+j9Qblkc#r z{Ky%d4QE&;)PCW{ZSzqgFsE_HYur(k?aTDr%~bk69tg){Y08nY@f4rfG4&@4@F&@R zM!)2tGyClG^7Wul6pY*QQPeGG0xv_Y$$DmxDL!R?FMm`lh8Pz++Uwuqfvcb{wrX_Fu-&wE}OPyxdIr?1I?4jiI6I8R;sI54Vjbp+8qt( zI5Z=ygDL@SBWgI*BZOeI-XAL3(59}LjuKqee7y25NEJ=2c5byn%t z9MTnWYiWA-ZY?Vgswfq0X=EKkJa;o-J-bXY zVd$r=!LcU-mf!Y+1z6n6-|Fy9jxX<;5!kfm$t2v|9CXC+U**>#iLes6+!@IA& zIkl?GehnVUZ+(T(I%0NFP1H7E3J5mqPX1Kd+R$7!MODdhJcuG2J%@f8JFb4Rf*(snQpS)^I&sE;;2O9x3lZM;fpQ+gylpY` zO---X9{_`D*c2s@n3h$~P`zEEmC)*;5$<1%;m1OqbFsF}(H3=xb1IKq6d2VPMGoUt z2GxvU>atIwr9@EPyNDC1bRR=DOS?G@V$FJUfQgG&n!3Tc4`Wra`4?`Q*9_MbZGnsr zfJ$#^W?8sjoE>a8_^`Naa#hD^_s0@eXP4b#7d;1uY`Pm;c&-$yy-NPqZ**6*T%y{c zpYgX)r`ov3Dy2(lu%T8Z0tFK;Cb~EQss+BCo=fxCp&jrZ70k*luo0~A)M|Av6vPrJ znQ@8>bnnFuoKuq)d@k%P!4!HTCrQ-{`6pt#(62FRNb$o)&$e?{J1yYC&6a90sh#@R zPG_zA39EdT-%5;MT&Zn?aV#~XAD+A6pX#P^Mc;cfHT$ygaVPmVivuWm!XeB#;4@|= zYv758TG7bWwt;E25y7{>tM`}SFqS-rN?uksR6vAF`0#mp8gcrFg}t*A4ZDk*-xr#1u(Wp>?Vrq1;_>4#`Ahc3a{J2uyvqa z&IvaQue-bc?7caaN)I-!y|${;-r8`B-Wgk}LyN9PNJv5U3Dt3fhQXFlVvf7@&u|Mo zYkkz|{k`|dKUMrA)Xiy0n@w<03#`NFpnvauz=DfWZaT$+e`>6>7CQa|_>UY(0UpD;y}}A3-`JJqfV9zPKVz&m;virq3@E zbwsvE>Ud~)I0(M;yJT|j*E4X?S^ea@Rz`tO3ZM5YbL3799i~vx2kDi*qHTZe1!-Zn zYATT^UQfu-biIr9-+YSE%p@P$$%E2oz&smw%}{YD(vpr6{<0Myip-#*Ylx={nUzrD zeih*_I>0(TN_$+3wauVk_3*DFzND!hj{DqRgu$F0CVN_k6;aK4&`0HNe~x;Sg54;v z&A`>NxHru6%(ibAJv?U}SEV)SV%GPCaT+nq8(hzO&BEQ?h&hvFENWqj>ad`z>DNqN zlX*)keC;(>U|WC37SxpRc*7@uIS%&BT45h~ZAWLCZxjoCZ38Eq24CD7B31%6``W^{ z-_4ytZT<6}ZsuyPX3%Gjz7iY%KJ?8X+cc}IpAtoGp}d}`Lb5;{h8&5;>Uanx^Y1+Q zKxI^0)k6mA%Uc*D;{PYTd{F_zO{&-VQttnoD4$7`5C=o485^V}Tn`QiSH=Y=;LVE)5{CAx};REh_` zrM^!WupLnO3EwdPMr-py7{xl{69%)c7O8=T@%;Tils%vN$Q1QoBhU%*|3NR8+3&pk z&zzi6->|{r_;t;yAt!}4#i?rAyZD8pLgtxqW9b{X>rudhwzP^guOj;Qz>Kv}qkq59 z=)BW%t!}xa_tz~N)zg;3R*w1rH8OKKv}{PJUr~JeXccp>4yv&v)ER*#B&=Ti$2eQ> zM;c&5AlH%nSC+!$EhLE5vNK0O!2PlZD zkQe)BintCqfO(owG{JAcf2^C;FgXwFHdH0vhFHKn;2wiF*qH-gk*kW=lcMo%ZEer) zHq2#N>g}4EF<}4l!po&zT?cwjzxz-VQGvj3fzNT6VQ~sUJmeeLC{RcA5P$-#;Q;x> zsN$2yjue3mO&D_=Xb-vBDm^aC+F=c#@)p*x1`1z`*#FxJ;=ve?S1$FTIFhqxMoqV- zr+W9}@!g|CeTpu{&Kah$a)|C>>C?hru6Mxc4(yXv_j4amq-+-2d$R2wFWU%Gv_mD0 zhQgX1zGRpne$FqLy00xkTh;~HEO#p_a_YlNiYyaY4TAcQYZll+n7RmePXuNgy*%{7_1It+lA=2%oy+b?!X7{Q8yqH5IPAL?6D^%aly&6PL# zTTo$4i(sXa6Scka_W{vdBdAS7>y=H)_x*{A@$8p2KsXTsXAp+{?@i;+ X{SGet2{=0o9EgIXk}7}tjAv>2_i#m^B5&zs7C0*ieeO+EiW#Q z$`O4hK+#mD>7~jt52_xds2N#*sV+S6Zsb}1%0yNHw5%R=iK1L#6AJ#@9~W`b_7jy{ z3H%h#-|XPuJr|Uh(Ken7;J=hj`890-5JRC26x`a_$LlZD{0PIWKz(!$WzDbDgS3QN zKdlUufkZ%lav5`FwPfRmt5r_xp;xg9{4_9iMjH!W8>oPaUCX>qD6^1x;~Da|iixnb zx=mICDx;AMP)>q>5dLp|3`Vs+ia3e@!kYJ{mIv<9{7rtX}vbDzt1<0payLIX2#| zA_M$$4F)67XnEn_Cd=!3=kx=ICaR;kS!81nD@1mTT|L_js|=|6G5SG|84XG!Pu;#( z)zU31@MxfX!WVV!^16*x9>!bYd-6iN8Pk=k)wlOzacIg?K!kU0@AUXh7xT6G;YRqZ z@ozDz-9o4@aCS&X;mtNm@PuB4=*KrUj`3@65Da?w10r8>=lpHkKsBp1{k9dll61sp z?6KIopwrKKS@X3Ak5!g3zNoYpLe z&78Zth(nt6DO7T@p<=W%G#Q0v|7iA16Y)vAuGn00xfWrYw~5zC6jz?lE9+4NcV%^3 zwPejza49lEQij?pQMy;PzvL_}=h;H+LRzicV)imu>uF8H)!Iba`~7@Kp>$|JpJs}v z>wb3sD98b7;&sCW%R5r2wqyMUlUk0uqH;O*SX)~~%SKIMH5A9-dGY>h%`&28Og;;n zOa#SDwsnEgoUEdP@i;iJP%4sQ?BKjQv?g%Kv3UhxyDSy-HSj7SNv(Seae;*dW{P#& zf_b7+vyHc0?68F=I}5T5CbYS7zo~bdg*0P6*zx#7+qy1bXc;6r5_VHh9wAXF{h{ay zbEF*0*)jsG`v>5^Z?md4x4=%nqZ#4Xqx-*hBAeSf|Gx|D=zlJ540gO(z_U@=LN)9t zkoa5>vMZI^|0Dy2Xya55CD(C>j9pVxmUMnkZs{Z>3Jp+;e%99xF30JurYW_j%c@+a zsu%nV%_Y58CCkQ^@4>E;N;TwW%@Urq9<_C;XY&_X=Zq^cfd594$8T%h6K8jtABIyN zNxKVpp|qH5X+eEMLb9vP3tg1g`I}ed^|@(e@+J6;J0OiD$1h7LqbQkVG?A7KN)!Om zA1w<9XafS80=$Bk?$u+I;rl)tVe>J2e7d)6lt}cHE>vqH$xn5E7~8+>UT*gg{f7k;Xu11lqO$|2ntn*qiJt;aYOsY>r?)rUKsN%3VN+&iH)kC>Ydhrvuyolk)) zMZj`nLEvds=7(N?qikqSOg?Gc#ItwCj9+Kp4@o^yeBPKlGGutTr{73h+dJL4(qu2L z>fS|nTeXj=V_P3V%z_|H@8u4^^~f-x3Mbf&VAWgzDXf6^$S zCbf0zs1qZPzKQ%|dMqU6xNN4<`R(~Nm|*RpQ2mV#{sV{9S??h+<+%%%&gAh2!Hd#3 zP+Arh48Nv{kPNcp|+J}D+mTFVgL4RUbqW=flluio)z z$BY!`bdj!7Nf^l5Ii|sfD7cwbM5{*+p7@gRuT8#sM)c=4*sYMls{Vq zL)iA42rAs9SzT{4loLr43VQ8688aa~9GN9$Uv;>!XD8q6?HIFRKAN1#Umvk|w&@k8 z$FV=7hBUEvlW2>Gq}WFB!zBX=?nK z-QLO~)!vjQ{J31^lOQomk-mL&Ht<>AR_p==){^XU?T``gxW?(0xg10!0g1s|u!ai=>Zi1n3q=&Gu zTUtkYO&_&YAT;0~KR*oy3aql02soT6EoR+Sd!}22(Wv(SJ?3(kmD~JFNH#%H3HW+s; ztF(}D{wZic_uJf-w5QLj30rcWPWe(6|- z?sR!?#`eBhe@36m?AK z92Ga16^Soo4#%az=3E^9_;~KH*j6Q zJX7`qx|aQTv1#leE1$6A&cDH!C-Y96zYlFfcaMCSPP%~BA~5Nx#bxYL4b>DeSETNY zLRdDZ&hqpV-lgv2srr^(Io3^-y_qwiW=T8F#PHh%K1#}g4+lNlSy_G4?ds-IG!Df) z=ONVs6b|H6?MXPZ#&@jrgW5x2+RlM1F`$~LT=Mb?Zt7)Jbph8*gO0d{dNlu6d5^unTsh0uZfMD@1@$y(YOsPySl+@$b4&{kN35S_GwXt?@Jr5oJ9)R0* z&0f>UKpTd=M{`FIKruv@L1;ZGT-S_(F)-m7c;e`FdA`I<+eTh_!?(uA7!CFibx8E< zafH>(Y^BKmWc>6V?eyn425~9D{$p5+rJOeI>A~24%?(-EA$vTyeZOME?hrlV`KWQs zxTz&-&%?DpJDu89F4KkZ%>gvC(=O)wz8zFSDtVT){Iv2FW4h00jz~ruiu;qHZ<0=* z`Sb$m{zx`6BLqzCu7B99i=<01*yE^}h(^w{%_ZuTtV1$$yEymJI_BsVD97B@&3+Ue zc{p@Ew6%!J&gA@rQ1MV>c|O zBJ7rS*`KnNJ0+1mMth3lkea2cU0uh<)k!#NM8J@g=W9uYg)s*!P)pPRrDH@OLe3kb ziV;E(R}(hYSx6Gl2Y!=_6e$t$)9~l+Az#nO(=Y3NQ%8+?l$KU5jiW59v#C5RwBaw7 zstrC&TzpI*prj$e@PkN^0z>M#&hKD8vaf zB}ooJ(I}3EBICFKu4;_qwN1j`CM&jWL_@n~Yl*X4?TFlhHUG4+AqjlQ`BQzJGw z;n~q%_>g%Kxo76)7`-6J-1Z%~4O>fx{ToJI9Ca>P^+Ct5M?Z=(bfGgI^rNSNo==A^ zpoiR5W1Jb%n&m(q6M3Q9ym9p~KyM9n)!SXL{%7Z+nL7KT z0B5p5&F=1Gz|+I?X|w(7@k~4Q{W0&M_@&7wECGxZTKMzv;5ArMVo^@=)y}&X(9#)>Y}Krw3dev(eh%ji-4)b^^>#+x%PD*^!m!&d`U{DZ{=ihK0&wm1NsydhIyM3+Q2G4 ztASawsFhR#DVc}Tq1-=ZlJ#U8k~W0JP+?wGeV>=J1?v^p$UK-ut(EXmk6^CO9MrTD zh3U~Uz8)7d*1Ip73B#MT*!$iven}o#nb;C@OrU&|7N9mNp)7g%OQtf}bKnBizlH}O zfEe@G{gQ1ISZdR%7{00s*hB#q6#J)iV`Xi~cz2cIz+ z>5B9}CHfk~)g}!fj1`Dma6T<}__84b^@EoI3DpT;D9G~OEht3plW!K>NQL4@Oy&#! z1OZ&>$;J(cBa|U`mw~`5oeM$GV*+|O!5#EQ0X-4_B{P8Jve$d&GIvN;mh`VB@zsCG z4x^7fCy!!H?^32znd-wQ>(MP~Y#E$?J%Md_9{ee;^N#e2;x}HdXfB>!kNnw~*;|E8p$Q#0^=ZP2gv}#P|JUve42B zueFdlnd51F4;@KXyD6X7ONHhu+$NnrgqJ=zv5d4)Ybe+%E;jpM%(Oj!%A9@@8ALGo6YVE|7hgB&gIjnSRl7<%+l);En zHVHfa)=~^rkk#<6cF6}TF|HP*<>_EAX$x?v%B^C7<>JJZXZ2L_bBzkw6&~n$1mAbh z6VeRawk1Bd1tYVz727OcG7jnY&d`ji8tEi>HPm&sxscR5*<`(ukoJv#$U~~h9Tp*By`&_;d z)Gl088JFKmxJ^sL2M&DkUk#+O!gY*)epH##CpnkibcJ*g=2yO|@n{Ve&v55axBX{% z7a6GI$3W*oD#NsH@cF|guYb~I2LrBuphBFh&L*ZGcel##T&tDHDxb&J61S~?5BpL; zz`xot064&ZI6m;60rjiD<1g`d{Qa-ZxW7piCo^MLM_of(2M0r2W0T(vHdswR7Lx<< zYr2*^o-Xd{%t*~uBOECnd~7p+gT4^D-K@D2aG|Y#y=1F?OMjL8=SyeJ<}}B@69Ln% zQ|n3V>3r=gprnHnvw4f-msqofhqi9S^&Xs?RI!S2~CW$xo};wO+LjzMLZ!k{Xc9U#_rl* zwvY6@KzlYEQZ@kS7pC@UGg2LDT`m3ih6`~P*rYGlNyS{HTIe7F3a(Yf)Uyf#^i`#< z$->J+=nEUbWvUk?lsOH?Td^vrrZ%30a$n+aD=z9j|9adiW;jPF#!WwYC^w0b^EK>% zxZ4`}MU=)$O=1-&&hsWKe}}Nd*BHJPIeWCjActpM0}%eopSWtH5{G1c z-J=#%O$p5}6-?b{$&Bs>Eomq>EH{cp9ge-vMAJ-3&-OV_hmi<0q88Q|(j$iaa&kyn z9YI$VI#iKbv=gfOHDpEJjzxwu;bL)h4=gS1w5SO)|FGXq1{cq8FB8_O3q~k~`6~Fn z*emzRGadF_o>k+L=&`0rEZ0EX2ctJDpD}6S-~`JD=Vkjg-pS~Uu=2j+AjH3NaR|Dc&s zNtp}J{SAHhEf>kpy1{;wXH#LO`kHU14CYWIO>3I=G`b$wO5UxmE6njtDLEBy#R7T! ze9`ADX86j%iucys?A4k)3$eM;-Jb6C{n+whxZcpS{~5gcx-I_6d3fQDIpO~P=5gco zTS!2CfuK2t;HF19bCEq|Y$9J+DX(o{_h_14OrW&R-VbGkfRk)T(ZA$5E~wk4M0XF} zTN|S?1uU}AMU&m~n`z)j>>jx#UsOrPm&UA{X-+(eqE^3 za~|$L*pFThoXcr0vIN7cA{FP@Ptl=XK}^uE)4pt;-Jp4Ce(|n^VFnu-Rp+yIa})aQ z;ng!#qryB4{?!`q1U*?!mN8=V82BX@m~jTc8}+$)cp%Gj9r`)`7fFoE{xK_ z9tN#Rj>Ol1f!CRExGVFX>8IBgr4rVU@Hw?VHulTfM*x2BvgQXyfrSbKP(^Y(bJ6RU zM#A9tnMNyvRtV-WX$#K0s^4i`9in>PE(s-jOYA>;dH+QvI>Hd@Tqa-Bs6ycs^~aZM zX+-KGF{@1<1P!uJ)MX*^J`hkINUltK@7R zYwW`D4rJmYntD#R)>JU$4?1itT1dQ#(~*D#J|MA)0zeBFhM_&OqG2E^TKAn16cqZ% zpqV}wwgY(XpR4sxexiErNh#@D9yS4WYBZC929^s%{kbU~|3n;)45`2_=H!7b?76|u z3Am(?3!k{kX2`^iz|nmzcyQ!J!^k&efy9BOZ2+1$u@(n&=3or5Y(>^qGS@W$V=7Ly zGHAQI^CpPBtUxcg(z;EV1!gWBVSV?ju&=qKxYBmJEB~bQ=4{p5VcI1{1O#(ogSl*C zw-pa*;Dm+O7n-Xje{Q3swlOIj5{=_aP#||(}n@QA&qGA)G7WR8DRcMF?pOOHBbpYW%aP(nqMvvY90~9bFv~% zD;Y$?QoAqggY69uW^>j3b}aduV~g5%F!}*booRvA9i(0EWiDnd&Sv{{Fa2)|C>+?} zqq{8arRk?pmQz%&wdLofgsXt5=fU)8#z_};E0}4cerbw%WzZI}YK2+8#@%VtjYixkHJ&oW-8-+rc&nmuVN`I=R*v zQ}L@Z7-vkYONqjo?x>PYpp0I^wx zKmKpLNju^>tEw>xBh9{n_Hp`RJz{2+1_MScuvOR&PlSPj1iw^Ys+-h@ouKL@{gNi`3XYdRzBON8@Vxb6sZ>yTu{ZQN+)G4u5DJ z+Yy2h9L$lID*|4((y9Wo0QDoA*2N3`NXD#;8$`5Wf|l$;nO^?j)0Pk2tu?o`@uJSm z)X>!c?YgF^OuK5 z87)DFQY4kAS+qt`HF9d0 z3~JhM7iu=0vqfUdQ%@`8-P91ssfRhx3NX(K$AF%d3Bo7eZG>j#NnbH zDZ^i7Cvu;>^J8LpJ>CLLr&Y!Thnx3e=j1dn+Lr<1JZnqK!Hv((I5;r$VR!Yc3ez|> z^YQp+sSmzUt6EyR9}AiAwkYU@%5*1YVe~mhv)T3b+3tz#dv{xVyQc2}W~s2BR4+oK zCtQK+<1L~aP1vZ|VUT>`(e3v+&UJb*STh%^a8%gjy#p?)#SwY-Uuapt>v^kseU=OA?!Cjt;XtHlIn{#YeG zY^-9HzFAdn01Ma)r3PA8NAh(q>64Mx$vA@BH78a(iYHGH=CUAv`zO z93^u{9|7pGMm~;2+0PY86hR9mBlaBnB3Y*|RiYvAiwTkoNwUqDp=HM?r@>vu4wtTv zi=>{Zpc!iJQa3@NU>Y+(i_(LNltzk7Oxzu7>P2-6Z{!98wioS%V9!28C{ZT*&78=W z|1-T)IA;0%QF}#x)*nrS%2r4F1w25p7%?ezKX?d0%Rz0Xhk&R81y-vz5E2~*1&-3x zsENRYQ}gTmUi*saXAFQMcuARi#j3I2H)&00X)szfZBkV&g2N@IH3n98@?qOC>+pj(W9}P`0xU-Lb!np22=p<8MyzY zXX4DIUF$cPrtO5z^f8H*%nC)u#rf-Nre;x-hB=cA*jE&q;cZ|>kUrH{a;UwtK1Wwr z6pCfVwjP9Ow)(~h^OfN!u0 zi3IeKf0Yd1^SDaHq)!3tpk4BK~)sQT*nRw|)70BGUo0~6=UZtOauh4Nm? zg{2of3h43c+qz`<2tD5BccO7}BDYH_G*~X`dq|v-6>DsV39BZ+3W0o0a}4_|WK(V; zb!4rxrmI*vpsgnZ6y^7+R4RL|M`**PZ>*?F%sgriVkkVL?mdMEDh6~+RWbebT8Q2% zz#I`W%-REoTSAMWP7N~TvTl}i_EShO`~!tkqm=I=A)ias?9ZC)s(>D%z)Z+cU$%6U zyjH4A<$7X%G_9S;=5v#72v>AFJz|D_>F(4jLtW&21wm5cP3b6MB&O|6;V9DwcE#Ql z&A_?w?E(=sWBgt)&5YQDtYICN`o`Dq&zGv?jatxm-nP3{#R5r~LX91{fe;R(GNr)$ zhu4W!30tPCfHa`;Y`N!I#fP?BQ&3wIxHE^lu~?Lcb7od)3pQ{hxMnfoG9Ig$rd%pQ zGNZV{YqOLvs5Nw-5JgIPb$w-8<`qCuz1V?w{E6lz8;G-SUlKtQ^H5P4A1r{$qr+LK~ZOC1AMw3? zGpzG`4TUkXVd>joe(pLosXGX84L^XaCZgz`t_D~wLa3FIQVoxUvqKmvF9{lQMgl3dv>IJ+ZQM`OwppLS&t<<5T_; zhj3~Khgm$CqQEF%Oo7^BRAOP%b1*Syt0vA(sb_R@G6l2q8ZVvvv5L$dC|p5IoZ#Q1 z23_Rh7`1&%Kg)c@gmA;g_|itpQW1g24I11hFY^eG$>d1Co_JIGem!Jow#ujkyNNa= zPE#U_qe1U$=XD9|{g=VOgYhHfG;z>jw&1bfl)+fB$0)A01C7yV>BXL;ahbbwSzSZY zr7}nl*Rog1X(U?_Rl01lzuVpZl%MdySF^Lg7!t<(?XWXRqk!!XR`_QzLomiss#9Eb zTiim=GS9Kj{VL1*$mSypos8pB*M~#7I|=sp8qToe)4V(X$Uvj%%)e_KEyw!h?HlM_ z3Q0`@lYMvfJC@zUFF1d82;gs@dwov7r}PH64`6v{PsV^Nn^2Q(%MQ}ew-UBf-Rw!9 zE}+36cWCZ6*wF_&MBcZv(q11NXPb-OTU{QBMWIs*aKzRX) zqRc{=gWyKwick9s#5^Uml`xV9BwHZ58$&*rdmq?gB;ei_I)ex1sruH@^c?9Sox_v= z$_rGOd}nzt^gh0JSHBt|MOHR&ozy`ZianaVD{%yyzv(S=MFEV`n7%#RX5sj{PY{r* zho(D52*h`!OukYHR!8$p3ttFsW3*>ndZ5{$FFje^I-wjZOutfLt7;vC%IlcSeilJk zeZwI7>!sT46-`sY6RzM*W_I%~BE7dX5@25?bD(I|3Kbqq7e}hftb4AOywtRyqUm}S zz^jecw&$CoVNE@$HkY}cTY1QMK30+t51zFO`Z3hOM zA>Q%$Y;N1;>%6fuYk7iH-ni1{Q%|4@PO{z}Bxg<5Y^R{Ig{oaxijOEOL(h!x6yKQ# zc;gjTD2~LV9333#Gv%iZiL{#UiXN^RZ07X+o1iU6J07%r)?nUh+&`TwNi+W29Q%$F zFWEQRK3`rp_nIf38OJ6+gKs%pZ#XHN(CCJKjZ69& zHnIEn>G}I0>K+uop`Ul`{jZM;QIBiMlg`P9&*0gr_K1%RiJXm(q4xFI*R9mkoIl$< z<=7>zp=UV$|Mj6=SUmW6{k_onZL~|@@!^ZR1@lwdS(~x(1@_+;f(l_lU+&+om)l>W z6zl(XHu|3nfv%ymgUf$>na#hAFPIXD-~T0^~0pA5# zMUxY`0GIn+A&;;SLEI=n1iIYwPgHX-4#iK1{$?0BSy&8@PUTN1|71iC&Em}HFm~iyTkOUa`JQT<*!D-KSj7wY}l`k*=nsB%C&KV%Y>FAY=gE7@8d%= zJx`@8`j}Q8{+Xo6pCZ?!1Uh&P%7rKYqBc)vc?Sk{W7A^ECM1l9(Xdykn_}ic+?RA^eQ`U z(~T-uaVTxa~xbTyAUwyvv{RE6?S`p&`~g6EfGSq(am zB1S;F|7hFN6-GbrRVw?oJvIMB*NIFkM|wlRo*xAdB~U;NJoh_HT#X?1g~Z?_4{?+@ zS$o-0Pa;ex<;;V%Mgrk7)X|86f^6! zsdEoup~FM;oPVC7?2;(gAGM3Nco?bsxo5Cg8=s`)+1S|Zat0rYwo-B}H7Shy!M8tX zNgijNcjp6FaJ&9KTPD#4H_h(v)CpvkRiVeMQwbxa6;2!vN38>?5q3%P@Y`@4;O;#$ zO3T%eZ=>3Mwc76V2sM2#*|8VhSM(szH{Ub3_l6g97d)D z@^aBEo^2&3Z{Pc#p=2^-3tgDTC$`qRBLuRt{s0AXW*)_I?qM~ZKxNk@5Q?es&A{AY zaJgd`00sH~w4Qwp)*i?u`u+}M-}K2FliZP3*P&sNGn2!Vi5@VyUX1tJ+V#!0rneUf zD&&~2`*!@Yeyp5+OV0;GTSL@NwTYXdnF8R?>x%M$w@m$#XDKxY;??dB02EGg4Q2h% zHicrd)#01uqwskW zOO`%x>o@-C8`cmN7fJ3Y;-p*R$X;qEY*nR@T4QfmGJeswGsK$+DpA6Q_4NMPgr?ya zi#Dwl^$KxydM@e75Ajs?q=abP%)*P(<$znPzE>urnhrn|`R?q8v~vvy+t0#9`EbZ$!>$LF;RMA}5YG0FJlCEMN;Mj9% zYt_dWWLr0M?2x6<`?Se!w~i!Q2VvH+u+hY`N;u^t-JjG6YkLCX6Tft8QOpp|=TcLD znxG%&RL0_RC+<*xhyZY!DMnJ1Wi18v4bzUQHt0tK`i?wGWyxLJR5=&%FyZ_t=D0YB zd!UCpbTqECUVwENEW|X9xx+j4{-;$;K0j5iqMnU;qQz2s9*V2%ee-4+yv?zmV7jo? z9b&2$lYjmGZ+OH$eFRGr%Q>(QVw57o`QZoI_z?MTN1cv-?3NBP&1Ll*;Y-Z>3uC9U zhH_&wm>eHX>dF$(cnwFxT%P@czKpJY8W>jwv)A#IpAwi{Dn2l9a@d&*B3PR2LlgGP zyUT?%s)h?ooFmd3iojaZF2Bx`g~J1r4rdX;xruW5AXL2*LS3n*w6!Dx=0+$JY~nFU zR3<8hu$kyUo)Wenwux40ewe5w*H*iK7w?vaW{>jOC9qGx@L+C@efWXah5K53iIcO4 zSJuWxTtI+J=5zZh=K*3IJbzfuhw8FP`7xRFu9NF2Od;-mM-P@c$XKHJ+4pW z5=Rc?PO2cBB!E7Ic(}q}0$KnJ*er1%PV66KZM0lf0v|xX+bS5Of$qNuc&6k9ndyWN z4OB9-p7!{8G;!r}0R?(N)$M_yIdFLtdNy>lbanIx&?H4pg1Qm9>C+Y0oY}b^D4sBf zpBmnnx+RY}EIFmk1bJ`kd^9hYdsNx1MNlfKA&RlEP{an?H2HM)nDkGF6!C2C=D=eM zoGn@Ff+n1?SI#f`>FW%ZSI)+URLyl^LbI*MKk}l|b)oa@35B;ibZp7?cIIq}3y!!0 za%r@-ob(f%k&tbiatiSh;Tz-6FAiu#TAIbxKsX7ZDJKzS*p&BGforn>rK?AQ6iPN4SboT?fBjik{4p6>hIDvVBZgivspw!00}HEC|V+ zppgjW0iFA88V>%~pW}l3_c=(anVb5C7sgGIf9^(A_!P;E$WTc`>vX7KpbohV-X=oiYR1U{m zgTwVEmT>AiIJ#KZlr712@0&F$xO5aD>CB6xxVSa+Heqo}q25~cs#)dZ$5ln3%}F7c z!CMrwuHpf*-btuAa5qNe@X+lo%6kB7HAI`%&&dwdbsCJ4bb##l6pkI#j)!+K0sUia zj0|yqnan#Wq=$4GhG4DQB5;knfUJndB6Jj(#E`JKr>bK=7Oa4h*SI0M=bnn!bj^gknIgS z2a{?Qq;J~%GqqpJP$WOqWe>k2m_fZnl^h7V=1}lVLUVi@5pO6U2+xu17mUUGvYVHeRu2e4EuU)J6ku)%UwT3;32P-Lkajv!^KU9>Cc7XwiVBFE=>q zr-F|)6$)EmZH3ePu$W(f<2?DUw1hiMkU}cKa)bZZJOXP_eP9yGJ;p&Gt($KbSMFKE z|Axo>6Z6G-CtD))g)oTdd-i#Bbd;Qa?e)2hrVO~9U4{kjDkYiVh_F z=846C)7f{nD-!k<_Uvmb?WlBj_5$h+?w6^2)s87#>G}5Y z_2~HUF>f6r*wTiwFMf|{O zVRnxCN@Jl}LyxD+2({QKx!IKGlXbNjdDvDXoLpS{lKlBOQ{=U;@)J3HCrD50*i z{e<{1j>C~ZDYw5zv-xeUNg$K9Nv+L&RPIl~?>Kb4ro8yd6!%w{E9u!*+8nQKqm_#= zDU0uQWH#E>Ju+)O4tqR$!{Q=pL7A36w(<-0)3Dt$^Ia=E60V;e*-lO@YYtrR;UKA*b1U(&AHo+ zRg4qLJ~~OEW^bK(cwj#MM)=frZA9PJYcb(@=Yl)p`Qj#dg~>tH{rSX!Oo4EtJRT{v zJ5{{%&l(8o?lL||sHGHfSxZ3Lj|Jkw9g2D)D%E`a{6&2Xj1GaiLyz!e)mpgc7e?4p zgX)F<27KG1=z0hi*L53`G7H0ALQ&f;YP*^sZZbYsY&6fh7%JAXcB1@*t3v8pQMIp8 z@z!@86lh0BT6T`j44C%5Z`B>!v00HCMLCYmtUBOECiNF zdm8d5ls1yBt=Fexqkqf-Ko0(qf6%1+r>;+fB)#I#N7Q@DCj>pLoMB#kd52!u^a*z2uUpQ29 zC78;qfwip3x;Myvhg%KkVh z*!Sfkd#QuPR?iRR+o1e+wMr3*Y5eHN|L?Qp{=emfH=lhoK>x0J?ePCEDEm(Kzxm*% z7Dg7%9>1fU?q4;v)rR=1rUcmW8=@#9eTJWdHeoN=AuYH1Aq`I#glN?rNG;RCQx_7H zS2hWLx@Y5;pqnpGGJ$lqI6N_DU`~%SFfR~0E3&#%FJnyHu={teZY{Iz)@GlCC7MlD z?`%9D&W;dsjs-Cl>Qqq$nV=sgLr>qxyCl}=1hF-!UW7bHOoi>SbhBrEpx{0`OtgB; zDq5;ofT)WMimO17U)#;8f3?DMOF4;JyFQl%o2;}x5X)L^fJk=(&JxZO?9BuN?%~QG z>E4*;>EXrF&TZOb+4ZKC%hD8bW;wr}9#0Ra&j&~pSE-$HNsPg!x9$`yEuCvjyF9rW zwYi1^E>)|MRn5KZYKh(KR5Vw~^m3`rLrs4v!kA^p<&81*Y}d8NbbynC$ZXx6hRaQ* zm#>GH4_3gq*HyP4_wSq7?q2WDmxq&^k7F9I?^K=c9uEf}x9<-yx7xMi>DsH(%Pe$A zWO~%0@K4dCfAWfnm?8qKov1{Lc~#wlC5Y|RCb2%{E|aT))t9q^T&-+^cE$f;=BfEh zk+v>4UgqFfQ}XyC0Y3}mTpC@GqYXjI*`Zma*QkigQAns9i-U4(BkfJk&CUS(@OE8 zfma7LlIXlurxBY?MQOb|9UqU-8nX>^YZN&7ItVUM%+1N^h@$G9sIwN%rJ;2=&{NVK1umX%d^I|`MKpQ_SFK>6kl5Lh)K1(2%Khp zrj4|B)Pw*wra;2LjOGb9lwD6>ow)dIc6*Gn5C59rjQdxGFOzr?8W+VSTH>erduv%g z9w)E_BjGOEXlc9`#cKuE86o7nrH^wkM(CUbhs-rq)S>Jf#CwJ20Y%d`QP44d1+WDq zoSe7NYU;0q6c&M?kU_V!FIgQP#`xMFEJPp{hCOwjcVp|8UWxiQ?|a_X5l!ORdDL8v zYNXCh%gVbTfZk+tG+dRaHXYZ!?|~(~7+!&u>@KMwO7&dgHHxlM^(3NlTrkyc04Qcs z1w~a06(K?fHpaJ)5`m0JQl4Se=VX(fJ~HSaZS6%%XFW$jPi=l4h4SnQGKc5CL(To0Gu&)3lKc#peLq?t+p?qcnXT@ z^vClB6yypC1hPJZtjxmf52SMKGll_BmvBY_;J{6N(eS+K&oLiYFUTy28oHbl6o#zkVi=59&f@F9 zyKK~SgW|q9JyXm`=OQ00%(omP#223( zd7O|akgr~WlI3;bEq<73d6=fdLfg4NL-tL+c5k>N)fTI?Tdlmnsvj4Py^z?2nll0D zn?(}ISZOe|*do&e8IdVS_Y;2kjN`du(ATrGvz#2nZ*0l}45_Qr#u+M>(Nldyb?fau z5NeSj?VZSRuckgl$HfgBmzi0I{$uT*FSPV)@n}gZR7iH=>Q(~nq@Q3=9o?;GMu7@e zlYM3ODughM2)phLIg#rUu-Q7y7k$Q5J%B`=*rUic!sQCUmFP>dRmhCv z9|q*@(LIOiDs?694pwKmn}aFGZZy_C-syN-kBJ(o7SY}+LGrDB@N^=|kXeP*@{V?f zL}1z;)cgX0KO)t(17)MjA=u{o^%iMYB!&-9LGR0-U>$f1Vl4Ozl3 zh#2Z2GFK#-Nz3AVvgd_uk@RaZAn-0F*ve=I)=MBz4I%6lY{sx_kQ-VZ+-wl8G?e!l zRM@wIpkt7TN0jTpIwef0kCSE9r42S>EPXEV2@tcG{g&N`)ITKQHndO^&~d6~e>!;A zKuO-FJ?V4s%gGoBq?@45ZUryB6Q9AK{hxAOQjC1{VRn(?rs*j&ic%LTU)H(cIq7L| zPx>I8r4Nw+QWEw;G-=Z8oUxh?Cp^YYY;z~x;O&(-pYF^V))@v|#kzru*^5i|Hl_i^ z3vcQQTgMJD^0}E&U3P$G1PSQ}$VYZ#2~@2-lds*l>HW~i!7$?+X^I62>mDbTqe~7Z zpbMsuUUgh9(n2-cs+N(Qbdj-9?(2lGys}Qt9{+f3fE8;^od6d4n8Nb-!}MbbqLhp` zs4*8RjkrXgBUoCzQ1a2fqS>)_s4{zp0SS?6JL3{ChJ*?a2wy(ZO-j~Moul4sMkHDv zEMHxnV>8`~h(3Ih20S}fU%FPLgel${%8lS)_P^Q^LxOn@chBo+6rS_mfD0CA!>DG3 zJOe~+eH=t1B*=TKT~?&y*K3y>KA$yN4L{2HVps>4oG&*HWU;uFIEenogX0a6~ zW%j2)dfgw$kQ4|luDmgg5s;)J242#n&A0tPXM7c}H0PjHFj9Ke5G&_f@VW?{m*;-V zZ9lDTJ1c3r;eXWJq`C)N^=9NPOXUIvLCxmy7=?v7d`E2Cha*=LXfsCBtUWGuAos6_ z;Ul@G(tq*%u*aDT&EqcS1WnB6$e53YiAJ5_)KWzlgvySXQAY9cjhUP<%LF0;Ys?){ ze^0)@zx&k&aZ$bD2miY}T;Jj<;)+Q&T_3D*Cf|WR(H~4AG+IOQiEWxZt}+XE{u36- z%xiKX=gpUw@d=@e0t$R?PZ8WwdvcWB_%yoj!xl$K$Gj85j8yUvtsHNs?W_Ma(F_eeNpk5KMu?vnP=;k<#@0f!Inh3>Nu^up+TQihs%gjlAHMX*xC zLvm0HUcq|syI+qF$1mgVGqa8b#OK_0aq(T=P6m4J@ATrJzUx4_0$NnveEo&|r%==N zy>3Px(;Yunt2wPA@TS9(EOvW5eAGWnLH>Ytd%r_;BMdAZx#TlqR|f47l0#~ZXgk9O z34S`VSB855Z92G|A?SiPU12wP2ij#rZ>IPdAGmbMFn>5T;JOf*sqmTN;GYTRAyWQ^ zA5EvH%>jD*Fy0nIx;7=RRsZ*#m=Z}E-pLM@W#1*>9+&R)~@q7zYoZX!ndcg z%d<01+^^B*9TDd;63ENR4g0Ui%Nv4wViiZ(_ZMOLn39<-D~9aboWDz7I06?MpztPmr2F`j;zvl%e zPQRLZT2squvklevwU$36OrB=AiB^QQ8rv()mA4lU0=! z?7siJJGVs>Gh!YbpSfB5Ki)eZ7aOA{8_o37TfJu#i<>Ey>0A^Am)>Q^fWD}YaI{Mt zsU8%^RLbjAkB4qsq$$t-pm4@jR#T(aQ6uZ5jCa*=o3LyNn4$w*BxYb_Yezm#Cb;`J zdiY`jjixTNnleqKQP1Y6-6Y6E7&Ln9)|8a+}eqok-y-kZX_QCi4B1-+b;y-q(wn^vjV zwz7<9H%Wx32(1=&BtWF|8p`?cp$~ME!rUdlux$n{RJBQ*w8%ex%A<&|UzyI)yu6>u zWk+WGQ$JhHY1}C0TUlT-Ycze{%q;4Y8|hSH_U>9U_vv3XqDho=3IV=ad`(CY-~x= z6W}^39MW5{yehhJw#GFC>GTM0;LC$nS>vQQM+P$$Ej(`Ls^rhh zAQ+ZFfrM6{t9Hd3#?)|yvbgETJDJ{!IPEBmL_)z+m4I?hQ$6{&vQ@fAcIo6z%6ztu z%|UB&#t1$(+zMmvB!|aZtRUaC8sq?!A{jJtZQTy{xN$pjzkxfm>#86XC-ezoF#Nhq zpM|o&xN1yQf_tH1#Ogto^u7cDU$kzYH0c}lgLSz~D2c`Cgjo{MejYMAUCGZ&NVoa6 zdyHg}GYd8GXes%hE@5(U*%6b&p$-zK<}wgy1kWPLlnOh>%J}eoilR;ZS>TZzXyymK zYM&4q9v_g#b&;I@$q@?UyLb*pKQJ}m)*~Wu2QeA4k^+5uJa-7XRS#G%BeRMFD3fZ^ zeFAcvqW}&yfETKTO+3NFr=CoMev-zF!l+40Sjlkq8gB|hIQ+bIlueBKbUTApqug_| z@P^VXV}%nrM1$4T!z?3bEEm;t-y&j0mu>^Ll7qnlZL5fj5bCLqp8GlI3xqMcnC>7I zs9yf?IAVv;kDYunNJ)`rd6N{HW5FO^i+(Rf$1*mcqZgTT= zwyjHtC8ICIuW8eM3--_*${Jh52Rb(*EGqG9rIN@YhRggC0Px%jEQIlfM~r-!vpVI} zr`zNUA{3I9`G$lkw@iwH5_w$wD7uyRzx=7NdaO!3fAhSk70uBi85M2mn=mWY5O8X6 zY<5#JuN+BeQINVyq~Ft@<|j+cX^Mo{G)%@Ult^e@E|GhVTw|Ur?m)WyrZiI7L=L#o zsFrT1nX6rrYWyXso!AjscC^LXU!UrLqsD<$bWG(rOIPIOL21*rP=pSAuT3M*bYUUo znWL%OfxOPLIC(6HshK0zgA;C}n)Deo0A}(QjORl|;CqW8JY0`5<`}N+H>|MYK5-{f zT`eV65TK6-WFmaWy|q1=%QvTEx~1d(p%$qwNenq#Qm#vh_Z#=~xO&~(Fyfmt|HNI0 zXin4p9s_VTyA1h97Y0b%2|8t&4?~`CH`b7!bFM~3XYx+6? zSD(K!wznCV#C+X6pO^TvEy|6D&8~fxj@Fhta_2a8)GG~YETu3?^#Vp0<&X%u-rJ(rN{}Vf=W4=yq1}!*7``K&NG8Nc zb{6I>Wp1@P-fi zht?E(#W9nF%}oaQ%;`=jmH&x-?(oJSN}MJQ#`hHvt1Ww7_p7C!EnXYDmr zLu%(*J=#T%={a=>T$GG6f}H7FXw4Ws(h$~R%Od5ChM{t%cNWT%7d(T~P05wIC(#C- z>FOJ3AF1C}AiX$C<%5X}uHxY){zkHRh?x{PvPQwy4yA=z)fg^B7&Ok8p5sQNg3jBu zsxK|7WUTtsSYMkWWB>Xbk486L1jqADwkWa-lVdx52TiO-97>z}Zo~{U*LL7&14|UF z9~pIG!I{y`c)gg5GA4(6Wyt<0OOQ(iK~b>>EzT2-dJSgA@TheoVzDh}FrwJ)ct43V zo8@U{SzAWDF;_YHBu1>dehX9b6)FVHzIa@X@TrYXwYU)T21JJ=d&Zvxc+zoi#JV4K zEO#@1l?RBGCA5l271!lkG2R}Yw~=f>oVBr8Y;b`y^8o(6z}`4MCWgG;2Vm*VHB_`^ z+L&*!ahJe_@`tN`aBP78Zjc+*e4R2*jII1-FGd7pWbk!ZiJEScOl`_B0niKycg0Qt z61VX}4I5DSLJZGOuLV^bM~vjbdkxiv@u<7$vpbC1`*LA-oQ90Yxg9{H&(1Z>1+aK) zz7y?!+oK~lYosSh)YBFWD9{Zy zN_E1=`%ml$d>zXHTEdLn(q-N$FBgiRm9Pt`O{X8ZOtB-A-h`2Z z&mtA<-`P;bC!Ll7@TFBz6a;=E~wcaO_PYPW7c91xZ z$@9HMbFbH~wp{OCS93!$c_EpqnC#CX#kRupWw-_;PN-mg5%SNGOEOStTpkPfvW1SF zXtuyOu=x;0pn{5g_~@5k%Kh$o*{S2-kvY=G=O(DR2#pi67dLu1nH4+p9a+tb!y18l zaEBX7>058=%$$)Y*J&rUk2bl$@KIJv&5^lXA5-s_qUoC79Mt=^*m}zx=O!+EzVxGg z;l%lbdY&Lg`Bf&wI;QM9I+gR$iTC;a=r~-03PnWs)ujA(gCpiXC)ew&CWQq)d1r}^ zhR`W)thpN9zfv^%v9*(|8 z-OSRiYQ4VU@?Lvp>$rOU>gB)=S*V2f=B-n_s2S=;fU8zSzZwI3xs%?gzBK+V#+o;0 z&;nCoz$4nG9u=BYS-F`Bb#0y0<;)nMDs>2C9MrAmsJB1alk?d)T)49<%@!E*wl0y| zrryoqK=hpV?h9(DMH9zv*b14htI5j`z+AQgfiJeXrOB)fEI8qo4ka9a2vH?8`y zkmibJ_Fg2}i6$vCVTK*FMiW**i9|QTn#s`MbqI}Ca!JFxd8O($27_7c;T7Zr-I;@M zWgXOWJ_m%Qn=XdYz3Yn%JMCRc1m8lJT>Zebnl&x$i2##Bdv%)gk#+!wNAC&C_-u|n!L#CpNBahTY5VE%oRuTnhTZ!~ z%m1gQ^7C$`x2}h;%DTb*5gyQ-{bk9UT0jRa>J83~)%IiD4DKBw;s2X8>HCT4G_?v- zYBMzeI^x$P(e3rhH&BHhS3eHy3L48d)|CIYA#|j6t;;lhY*h=5`8v5i*V+>hXYGCW zUp7YsLX65XjH6sipa1}&zkXnn|IH6<>SAl;{G0V?U`?l|XJKpMtf%+e-WIFN*d4MW zbbqQL%$T6tXWPcoc#1dvv2@-?acGGWQGj4(X^0Rn5L9&C(%a29PE0&oQ_d9P_GWry z|H6WE5xEg|iT7??KC*y5njK7vx0dR`C^|Y0HP0LNc4TQ6j20*-Aa%(IB*B`tqnfxo zEz3yg<)@^TbY%KT@TLb_EVTT%H#24r&qR6Rm&})_2KduPggG0~M-?QAtcyezNqO32 z%>SZomt`AEf&upKTZCXGGoM-D6H;AeC01q&#OkM2U71@``}+C)m$iQ;WJ~+BI=|yz zLtCd_&5d85KXzKJ37jM%Bppi{`_gm39MqwKbBw}?vs^HVGcllxX1e=pqSfD0Oa-Dv zFG@fS#hRw0OU+lZNzpK)8>~I&`9x_uHRJlsL_IsTsAU+A9-16qzqb$Jo^4*<9ok&9 zSE`3Q@f*C zVNnm=F{j@(Ssj5Cp<3w!3`uI&1T(meT=Qb*g^b!Ks#t}jpdwppbL!&ajAZW3HOYv{ zBOyJ`hhW$njA1um=|7o?GXO^ttSuDuWhJCntdKqvY{TP>MbzX>unxaCc1XnRP)6T* zPcfKpCSjdJ7lB9b_a6)yw<|%^#2EoIj$u#^-M2F&#r(g?Wb#A&-+HQ)`q`7fFNfmbLb#R$`&GKTkd3b~t|XbAjG#2baa5CKT1MHCQ+p5=HDsDsFq&k= zm!PzXIsT)w^snRY{f* z@9^;=$uWVaz0PCLAc^2mf_xA8+BVKF_mPk@8+D!T3XeL*8hqr*ZqCgs@nx*8hMyZROs z*&LM~hG>BmH!$92XQaZA(DIr6>CtK1NjWDq95g4~5Fa!yh}O)cZ>d$GKrW634gv1h*h$=29srw1ou; zp0`kW+t@zc56FmUzm4$CKA*7xx#ZXyoMV2zvN%~=V0tM;8*KetpA&?LYSG_gLc!tZ zkq(P!1emv-bOftu@vH=2fll`_aA~c+lXY_)kkL^UmG`N@=Md@GMNI`zVQ;RX~IRq(~AaA^lnsH4)_S$>A=;B#9~ER5b4$ zOLSwF^9oZ!paoIKd$Q>CT5a|WI#XzP>utF-Ki%zG1K&?CuAF)^HR$$E&}nRsmFteK zOr_i8=`+29r|Vx6z$6u8u(r?^cgW(l=0julE&Seq1ZXS-D*9tac#7H{iVYxyh_e(n z8_>y^YGfIadm_f9$C#_qn@MZ}@3Pb)U?{!q)zNN*&D&hEjepcpio><8t(1(a_WDq? zm)Kq*vaISzzy&#lx?v)E0OchOfNZa4pUc7v778wf+v@^<Hc{;bIH7ZoU{6pY)oy!x`qu}zK}I3@YKx0!q{9cG@bEG z9t^|~5sZ_ji@nIiaM27}z{n`T@Ss;TF-?Wh;0${BYkGGWGb2p;f&T=DxUh}FUj*w7G*M0#@wVO>cn|@Y=wtXrYHk5i0Ke48W*9T zS5~c2F>^%&1~md=>sO~>z9xodC|!ev5SgI}0VYpH&J3HSrn$rnVXhU3tN^?HM92Vx zz?7LL@IOWV5juT{#p0uP!L9P;Al#7+MpXt|ui(pQ`lIDSORvHjyZahslKv1*;m-KD zbp3k?BW$_Hmhg#L%*&RV$-_2^+iN*kO6}${PL`(fGdR;wzA?#{ z59zpRA_dT_-e18c+fNu(w!s$~B;maeSM?VI zG3MEvYAT8n!whxarxOb^@12L&X{jcvCb}_J$~on^qr|SpuItdD4bqcXCd*XWphs+knfes6^pPI&hM>*f~#CvMTt9usUG8C`@^V zI>0c!qrY5W-Sp`Bd}9Q@W%F~#wVs}BL}BUnK|kS8{)BSR&<>_EMYlsZ4Ya}ZrS3pF zc*nl|w;RAk6gcy=U(Jv5yWRTVLstKb=Kr6cguRKA!+%u2s*T;o@5?e%i!l$+l6XkM zqqBLKEM8PhX(_zHucd+*IE*()`LeOC6J}rfdCS(=(hp~`v6xS>_dM13_%PM@3A1DT zt~$BAG2k3?=fE}X+!iXOtDsojS0ElV(>TMV39Wz`ohOfIS!c{UCL*r4jE&!u$I7U5 zN5l>o2ivh+X?rUtxMm7kWC%^!D33t`VL^}z3A7WIt`!e8Ks}B8M>)JM!S%YOoSy>b zO;x~HifT+V-bYr>VG%Ca1@O_##%70)cgLDFz`h?WLl>94IG>{jryDbte2^Gwt=(G{ z5(^ls7hO@QVv;8O?*Zg+VuAD!7$dA35eY?-#8fE#_ld=WHy(75>l^2qA^)A#b}A8%Z3J@}Eud3zwW)w>hRFIQ$m4sS-*24LDGinbgmh0*7Lv|Sq105Lux#>G8q2)+8mK1+hci@O1QP>qr+e#; zWB>5Ar$k)aZW;Gxo?|R z8SS^7L^Ih7xM@W;3rIea$h>H(SmxZio^hdHTUH>mUWwct9xje&3AL%bM{j_m7x$L! zdVl6CJPKd?P z{4+1H4non5LMc{Oi)>3`C$F?cQBSfO0NZZ4BvxEitQkd_QMlsYed+7wEN#m{yPFtk z+jz+tqq>2+J$LM1#kLenC}`+A90Za_%r?GoDON1jp0^3V!AJWSJI%Mfy~9n?x$VL1 zy_f$so7HJxWp^7~i`Kb)W37JcXYbuAp^7N3%7|7bUSjVGR%!#Al@vOyA}ci4l?)Qw zbPq=zKDY3_bP4LLy5Q%Q@Zjki@?F@9V|%O5#b%GXj!l>x`YDy@Wbj7G-HMzrQ|#%i zd3&8>7b|PljURl!r%g3q(ew{qyVH{ro__jz#ROT=e?)Fcqaa$>JD+S z1&@ujLu1Bw;g8vokl)E|q#7UJJgn-4N5-Cj7#_wdpIfulO7(2WCgSaA>AjIV>!T%W zy#r|Bc7JE%#TWGddhP$yIdoMNJYW2F4Y>sWUnx8Pdug|D{;llS8kcg|5=cMZ|EMHS zfM0It*GP;xEtI~!@mC=sJ6q@74D?n zcsefBYG2^~lAd!at+H`Q+mUG3luDfJ_E~u%SH596{vm2@TwL#W>xpiL$qNV05QyCz zEx)^u({b4Ao@$abWVZ*OD}>eQT)O_j<)}&mBB~E%AJ3md(x8BqLY0hv?U8npXLyrH z4wW402~L@?Q;#+YwCn?)B+@tna)aLby`gSshZmlKT0AA2U7Y{-&$8JOluF=dRz5i)s(shQ#-wO;c2ZfY9VoEA~Y%^NhSlJ;+GEtwkJ!`m;_e>KZZ>CNQp z<@(K#lcP6~&2Il{dbo1j`QZd8_aES46#U)o_%IbXNhZ|r*zcs{TR}p^G#POHN^&9< zXmW_6Na?(?_~Bmr-i5G+RGZw5(WteQF)EqN%u<<-pEf^Inby&_(Sf2h=pm6Bya{0b za)wcgNWOjQfdrCe&BRbkoz_&UM^++72Y=KGd`1`}u^ZRD_-iY`B4J^$6Qi87I8PZw zfa2Ka<|J4>ff4~r7Iu*)Dix-r%r?;_^OGoI=B`=mh*H>0@|sBYYg5u94xfjH=-k4X zESHQ`ndNzT{pcZx&6h8y3oo~a_co8FTiFXR#|DHK9O0nVBm=i;t0q+a<4h)@8q&(N zRM3m$=rV?*a@blllT?J`J zVp^P;5}*R087kfslIYKXvk=8%k0hc#ggMN8*O;M@q*%Qt(1o)22sceV4&OL^#R3W3 z#25wafvfu@ggpqE)x=rn6@*nh^Wb@+>A#&&F$W88!3g zi7|U`YyY)e*wep^4%tY&PMI+{eSYW? z)Tcgdi9|QkFQIT(xa#jyDdn5vXdd+K8Kne?h zCsJ$=ecDEvFh4{n94A-LW`Qp2`aFRx0u9T?*a{D{6o+4#@6k&maPOERVz)+rf_#+V z@GnMZ|UEXGa9*fx5&O@UaF>q6VY>%r6kmHa5?P!ZjFk+Q zmGgZo2#P--G2qsSLp2~V~ZsZeH8**Mg`oTS8BTapovFR7y~hyo}@y;A;V zRDoi<;#IP$HPy)IvGg(la>6q1d4g@8`Fn*3h4iA5h{n4I8L?#Hj^50Xb{Oj#o&HmMc;}H0`=LP6X;8k3FeR5+F+A@Ife_9UW7ZJ z9x*kFGw|}Iqu{n|AtxuVJ7&yp%7xGYw&sGfI6fz^Qn+pE7k&KK)f_-LhvjG5*d4&d z{&hYEh|x9+rw3zSg3%^M2R4gH(8T^_VU7+D7WQ?#cV^F?yV+i7Y;(VM0xRTZ@BEgi z0jJ09H+~cb4o*jNdpZa>F{m?GcnaQiY0vK7(*K#@>^Y^8IIC>9UT^9El2jG*&9vn8CNHNvN+m!eHT)YpC_^TP*$b-t&$}NRt!k_K6IR;*?t8OV zPrL(SUQMy&E|;W__V;03wzk{yXtvRPE_?2>=336EX4iqaxanGfD7O5~KpJ76Wo#O9 zY%M7kjvr#LHel8FjI=och$0HJGX5^#`1WW`v`uz!IEdUcV|SO-bVDiyMJq1rqi%|a z2X0EJ#3i16IFZTa5b-rcPPS8yCJut=P=QxMFN{IFi3rI5W6DZV8qxtgNV5c=8HF_A z-nFaL&!f?~EfpsolM;;WKu(2N5-Uj?X|jlyC9CShS`j^xhT}ZMH@0n@DVDAk#{w5x z2>k2EW!nJo?T(XF@i8x5vDbY~)4pU40=L#6M?+mVCA4;Avoh&y>TlTfT*Tw0WU7EE z)C+wG@b!~T`G-t)07G5KtoPay2UQGR4?DA3&RwJI{NA3~hjoixc!qx`HC(p5r=)pw zN&@QDxiUjI+G1cb38Y7mlCKMNW9BY89uWU{XWH#5V)Qf4fF%FKDgsF!YeUWi$@Qx$E5pjo_;KLXMaA zuT2tx!^Ro!b-NaK!eaJ!dx|Yt)ywYZ)UD5n-kjNnf2AFH7c&t^q7;4Wdu#W4`6N1V z&PdfQ_9BPB;G*xc`W|pIcyD_Mn&g8`#RI&U7Fz`p$A6lq~1mBh%y ze;>3W@0AYl%Tdw}W^S&o;<6)T!YQqYa01B0F=&JDp_uOd(xK_a>hGZLr4J){({ zCGwLfEb37BD%|;MoydK;Wij$dGwT4Jw0WDhE}y-Mq0JTfM^^E_VNCIg5WpD}2zNC7 z6M^yZWet=dX^;bwYbHu1GJ?!|URi=P)TQRK_o&qk-|TxWgjYGm&CA2<>z2)iU4)a# zGqX_m<})`1S@$G(VZv@nHEm)OiI93tYD+JIl~CO0E_{AR2gq#&7=^8nO9h-^z~eWxY2VHt&LF$Ob7$rZ zkwc7|QhO)9_88Oo?_gCuedc#KKjEZJfcT;U#y3@q(OhvTa^e!%MvQxZ{t|9NB?BVH z1bIEiT94THO0}iwHL9gzdfW8Sv*P4pgyOm_R=pjTVNPW^3usiiMWFj>#>?m-D!{Ei z7xKjl?w0ideNm0%o2$_jaV*l%WmS|S1}xD-DcvbBCc9LpL8-sklA+nMFJ z;t@;QO%cGqQ!I10%U*>_j|gquKEZMz+I?s`m>0q>OCws&8UAhC*HCl`9s4M-gJ5(F zsP_pLk0r%w?FXVJN@rqH)tT>&a*%FzsKZT*-;~fQ4!wWaT@MAe(fnW&3I!)3O4Ni7 zpb}Co1afT#cLjMMTT58klWCSBLW)f?zLhMvNS+dzj==cmTkb12_dwQZHx|Z$r>9~g z>iQxPaH8p$THVv9&wm4n-U9uJg)izx`aC0<+Qf;O!m_E1n#MmGRaBFe_ zol`(>8L&AY-~_vqyeC^C@Y>G6?_Y2SQnTf31J|3*MzcRqyVGN7SiYvYVEEwQF`<|m zp|(>znD}9G_F?oY&sqSzNl>oiq}#~MVSxgx8TR8*)Y^CGZr~Q`gd8*~NE+w`_wFzW zP0r}{bcEkBV=dvGb)pd!F{CNL;Z#G$?bVkhIBa1+cuho-Bim%%XQn8l@ILJLFn$JE zzofg}>;t37X#4}wbLlbZcYxFlmk^)OlzYnXt8~KFLL0x}~ynRLZ?I8s^oBYL2R&`BIUq;aI`3Z4Hya!vSBo2V1EU{AqlU4l7;xc- zEL6(8I8QIHnmR5{dN_HU|2lL{(J0D5*_a0q4D(J2TKAW>585|Q1ED|x%RyCxx3xqLyZr%8T0-Q~e_NZh76 zEgI!$Vo)#Gwo9Enqh^1(BTgt)H)e%~11k1JD`rvWuZmXIkHl@^F3b0whR{>MDk#Bj z$^e6_Kj()P?OO0%2vK#w&v#Y}_Y= zFW9XqYcBg1Dm&?eV}A+SX=Y;M#EVzEYBkcWUgFGA!w-pWbJ4&E}ROboSA4$UK<%8yCa8sLQh3QX# zhA1fv63n7?fSN?=X~D>Q8l5kPR};Y9#CPrtngt6mH>b9qtnH$xC);0qHDLx%U+(!8 z83;)w1#A!7#yNSyz*V+55{n=|N;+wwDR*t<6w0C#^^=@s>$8F~U`b7MGiA{eG z*jfg|bR4PS^n87VjTVhCd8T%?Asn>C5R65T+HMD(Z4@0txS}o>RO-#d9NgSOl6dkupv^uTR)RU@|`}Xx_dWbI~kK}vRNAq=7_?0 z&fy3*wK+n9yVk;%^aKCMs5t%o*%cR6`6uFA!N4-Y8Sk(?J#XnqI~9v^Hra0*%CtUR z5~bnj(1%teH9$0{eNf{hu|q<~HmZcqOv405n8H`2*@N1@Q_+$DuGhrSLRX;ez`ogs zIwYZy@BojtC{&tA6%r<92K9<#3=l#?n&_SeVmyH`9)_BQ+FPRT z906*dWAsrIL;Zn?dGVCxM42X?YG6iC!FVm%Hsr*0nP7x@|9f1?M(I_N9IdqTFA$@& z;&6Hi%YjV-jAvECb4-i(stndWH4r^&^D#^$ImQN6W6gA;wys;s198CRJ~$gBam*ma zU${YKETplvh0FNxde&Et3(BGcR7 zY8rx(Xe$yiwU)&Di2-8X!kxoXWF$r1Q7!w_A~c3d9Zl&K&V_c&>=V(E1mI5rVNBrIxvdkE961v(zQb4wo;*;`*q8;Wn5eU)ZDV|*U4Gmxw=do4yA~ww-ifZ>FALq9FlSN57 zB)?_5tKHl@w~_kzzku1$5P&@5Pnk3YU;!kQc%x-ID4Hur65lcTAYGPZK6mwRj!Q5N^85yh{ zv^$w~M@?XyUr~wHWz5+WsPu4-V4AnJ#8>0~KzT7gN$hy}7}59zbsImOL;4ey1Q~~E z*O}OAcWZ4A883rbTRNY(YUREV#qTACw|7sU*W_?SLvDDS>%G<0RmY;pr`R>`&rCPaQ`hzK{_Seu&&f{3t74-35##AnaS!^9P~U$@A}6!d zv@jd7Q|@vKATQ!~l#hPKJ#$-%SG54>C)llZ$X@>Eyj=ogn(9@ceM)&^ z2C7PV?&*NO`~@MtNn=EsA>^Y+f}xu?Fdu2!^NXDNng9WA)bowLBmJFg4eayF@<1cQ z@XfM$?WNyE*cnaza{n8*o;_gmHhHEC9#c2rgfH|>X!Bx_J95TseDS~GmpHy^}W+@F)NZtvVc zW;9lu)@)!o(4}F!`;EcX9NO$rT`HbK!Xj?nrUzRGdlMc9@i-rin$A zFVuLisSwA^>+2UsUlyIDp=9#Z!q}8kIqA}gLpXl=BTzSMYr@R0Z+z>m?;pR<7Z8QW z=?KT-udMB*7NsJ)?Zi8Fl9@fv!tYUdH9cSd+d)AF__>iqAi1B~DOO_e3$s#ox*;rf z{AW4+`8H=?jwV`9P)Cb8iMTe3B=~o}*KyX3K?I8s zW;6JMPFbki$e6xEdaAghC3Y>?Q3czMh=-H*RtKXHsMhs@2p;vZww`(KvDVlSez3)rD7PfLIl$2yxF8W(j)9OhsWFO=s1xCQSDX$G3g+& zSxTU38%_C$oMO}Te7}QY*CGu~2n#W?Od|$lKxubV80dkV;}Lt$KHft`o>(GeTplAz zeS#&%%n!+Y+in5n@c=j^o4i4GW6gX#?I%}Qw62qr$Mb1?10!$Vf;xf>D3&Up4GgN$ zi$VepCtgGwIl)s7oh(7oQ>~1~iaDlK-UIEfc4L=*-z>1a0Kw~-kUxAHY)IcSd6dr3 znpzBBa&{CpO4@WGhMVD@+s4_5N=?+D>#UtXIrzJuQE9u|#Cy7YG?|g@X2Lbx{=m>4 zsF^IrY8Dd@le6gv_V1d(b#V)S8q_L{83)q2K3{)ANG5FtwtnJQ9M@@@W+WX(zi?RG zVdVmnE#BF0Yyu`e=qo~gPM#u=VR}=#CKgPfE!aPFPY0Nb<)?q)X@)7>?vQA;P!J} zuzh>{Nlo`Q7GN(4OnNe3%uAB=xOf>0?xJrP^?e(6uA|ALXC_Iy*1nPImmG%;#6em9 znmao?YZRj@>_g+891UijoPh?^kdG;ELeTz<;uAdf-J9OX3bRP(udGXkv# zKQX`Ni(6;+X+p-(+HdS}llEsjBL4;dPX1>mKEn3|#`W;|#8EWyX2sD}=5fnD2;_e_ z{|)`r4{wA0Z%L3tZ=u3iUT`lz%QseS+~V@nw0P`*d1k9%*GY>KnAO=FYgr^Q-!Oef z!@DORIML;)=4x{ec1`~G-t`K1F|5SE>PZ}1m=A+r9?avJL~t&i zp`Y;@-b`G&MGPS`qg)24tJS>a#f_8V>K4p5YvCaBg9*R)ciwDVrW>?{eicf~!uD1d za3uNQ{+L1AScCq4{r69DHwKE3A+P+5ao08f(lK_`ae5lK@a?VY;IHm1BmMaZI8Xl% z%UJk-Z>|2Vr#&XK$;i0#)-V6y|2o~OO^6w6=&yoy-*BX@29!r{@0HmUq-K{>J^l6+ zew;89r0UeCH&93sjJ9fzcxJZA^C!Z#Z=w(OpF_D9Yk%uR)aSJo#cca^*V9Yf9by@6 zud1fd9=70zy(a7S5ApxVcmE-~Mz6*VB0zwEV1DMktp9%vys53Ri_3o;pN#**^8bGk zui8|3hfOB9u$wP*G3RxlIY4-Br|Jsgv{oXVTW1laXd*zjg03S;kMr9v<)B@`1*gS; zCUN>9DMf5*zHNIZooEEBx!bcj75Gi0xN#v_~hLfG=A#i$J(j&X5fuI19nv{a~4Kk+3( zpWrf#6~eO8N9Q6bW!N!22X1N_0os_lLSrw6MhTj@&2}sFmpi#pJ(i$Eji2bRlUifn zljc981MT75`f-@<&}}=jjQ{ji({@7UD@y-t#wdhE|8v6|r;GzapDkI- z^2;q`{(#+8SHLHx=@d!jUrIJ2^*4bbsV;hwAGelhbhvp%dmdfxZtN_B%y>gyP#ggEGeKM+n&)lb0hX`_E z%lUa1B{&|O7+J#*FP&I_k6uaTWyDSR1OK0c{U3-yb<#+_|EEv1lKJl)Y-7_Oe|lrn z|G1i6@!7a+N+j(((ORS;Qd|>h-LIHmw3k(zX>Xg?ikH)Q^-zmT!UYmX>LBfc6aXYn zKCU*m9{Q(|$jDD}%x;Qg$oMS0-@h^duek2Y44mp5*_@Xa3~L_Q*-bLXm+%7AO(vr_ z1DFd7oRT~J&0=W3rPiyY#7A(j{CW?eKb0Lso;8)1wY3{87pO1x0!*LIxn43F& zcX2gR;B}Wlgv{8pEjo<$RY}s|r(n`jqRsQ}NRVxF*{3*sF&HHMGG? z6gXlSf%8KYfu?fmwY8ZAs=>N#WQINT&EapU&D^-{R=QDu$Y;*WCMl{%jnhUEtXd&~ zx;zKs=IYmP-!ppn`UnidfewalkLMhAb{Y^*Znksd?ZeVnFRysr@7y1)Jz0HugSu8P zeINNhrW_w>%s{ljry>-mVD2EvSj~#15-nFnxS-@@Bb9p4q&a3#3aT?|U*#IK-;~wJ zo=Yn*v||TrfP2si4%bO+JVg~-BckHU)H{r3EFzPQubQmaHp%0Ty~gb|ru6XeD^Fze z$~utpvnqtm52j-7vU*a8cnqolmk=5>)ktb6-vsLim_G{cp9NQSGUo-V3GQ9QL*!Xy z`1bgV60m<(0j3GJG{~kG>f@JPch;Hr60Y9it zs@d*hHsZ@~V!-AF50q$T905$R~ zb@hsL@C$&^!tw_v=Md)!BjZjB)MR*ln5JoGyQhoaFMKOBWU+XoAcb-_!{~eh_U^36 zQ6CP-#F;6+t7d?~y#zmf`a$1IEW@^ixC`YD5M(Ko*w&k0<79RWgjRhb;ccF0@dVSS zS1vk0-a`11ww`RAJhe40&g#~Sz50kPy8mWZF|9yR38!wy#`;?=)yS)+QsWe>hnfC~ z^Ib*9dn6i%J#M;_hSE1krbU=CF}=B_@Tgf#hlA_4w8pLVkazi>woxCfa)D5SHi|!r z0cxQ&NmH*j7T2wyt+U5%P;78IuZpQkhl>Zt{5 zX5bf4qhq8pu`SnRagIx*^!|D{zI4~^B{(l=5E8MZFyz@nQ3G=4yF^TQ&of`q%S-9^ zMO)xc%3LV4i4vMDGtiNRCJdJ+DihMgfH8?};zusOrd?@ufye_ShZPXeMyWl5Ehavk z)X;uhz_P8dgRw+I4jHX6c}s1wx=Le>il%I!sQGW{I*$<<}782{rCS~C(0g!(zlx9hgp4Rbtjlop@VuPCFi)>5B_*Sa|s;g8yU&f=Uk@=s5c4 zH5_*^-!(KmN58%Lk8oLSUNPcFKN78K!bM9=2jn-=Z4?+d;pdRaXOzyTM8KEZLZs=0 zG({}4ffA*q6})ThgJi3xo)l@&6pM@j#}K`RI0_!9%N$WkwXkzj|I$I0N|No5LP3VY zQZY}dgDW}bx{yw#-315Wsr!|nbRwt~pdnw-%>~NR`}Lo5F?b@6rLj#!8eB~}Z((K1 zt7qUf#5o5*CE#rF5a$JceW5pxOjQLj`Nz2VgFLqBLI#bn(T3qv?)Q@m-^+XE)i~RC z$P_gX`Njtqb3laI;q0dw+d~i++cbm4!+&!U{R0H5wnj?`p8F_pJ>p#p{e$kpJMZsA zXyY$V<#+WWqjYlAkC0*IvHdeiGpOK#y~erce!SOYMGL9CS5v-rW`V2vV`tR;vfZ=z zwRJYJ4V-%hBQAY|v%XvIN6EBjiwy?!EEM zJ5f?l$b?4q_{xL9=yL7E`hn=;EkqkURMCaP%{Oqj zgs2CqW)aZWjpIFK+=YW=T??-d!HSu}g0y>dc2*hH=^eS{?%MF!#~(Y}Q1Y$#>W0bE z{>8t&*z9l=Y*}>}uIbUMJc+W|D$o7CYLwTfeM~&{>8)`_F8+-;eZT3@_a(Y}e94|i z2rXOckn0k1YVCPFO3iC@;s2`F0+y>+_6D=hBXpnMu=l zH3;;g6Q8Ipx@4)5{Ds%hnUEpe|NUX+h*rg=ww)fY$F$+d+fEpN1@Lk+<#(ta7XGqQ*!w$|2JrgcRVBD{hIf_U@gdlkJL`5Wlcn|Y3Qxr7vy{1& zxAc_@y%X1*ziVi6VJqnE&Xml8*N`W30GXAKJoi8_ZsUj{_GE4Rc3x)d8Aq&CM(ymY zHut9T*Q7T;Dqfoe;=H39R!wgwGJ|h%Gj^>HyW5?pxPgiBrRIo(qa}K!<$+0{f#bd; z+)W9|weSKIpiBxO{Pfs2uD~m=LK`N~zeHrqm}_)8yM`{{N>eTIvguBkQ|jrQed|I=EMl@tJo88@$FAYLJtKsrvgZK})0RN1^<*)UVkA)M$euS}g z|JXHc2iDo!tsKsm?70KE%n8WJ;14j??ctWOD!E7wCUd=_(;5_0b`9VpRM1f`Canixmy^xc&>q;h7a7*8=rf_jBwCPV1^+nS{-Rl0re#CvqnZT| z;GZ=(0g*#rs^>YK2^Vr*e zf(Q^B@)p!7aYjOwQ4f0%0$J@!L_Xx45^;EyT_AX<FJ;H_M|r%dYXYA|wi@vmYdl3^qZ(5Vv!*U6wKdN|c?>>T{GP0t|`f5^ILZ63 z2>(SBI>VXYlq!CJKEiDpuRBU~*}xGSYT}H2j<_qcG0!TymM~S_v|)_(hr*E1Uj!ql z@XUCy z*vo&wI5mT=Y0f9xbQ3BCo zX2!zJ($-C-GS~hN-Q+7;(8ErMyP=mPK#OZO(|`&QruSvzc$%$((ROus`}Qv01?_8E zcN%wFUmn?S!X2=!fn&p)Fse=hNR{)3;&O8Q3@q?-Mw)(&%7;vjYn=>v4akC%bK?CA za=Kk8P}Q}hUW?>XkXt)N#>(bV#=n}@0xhf>Z^Lc^ix(y4O&|a&LPJ<>4!#>liRuk5 zB~d5X&(Mp_NDrk?Ah$}ONf#K%sT@paVm7V22}Hix=Lpc7HutsgwxRbfo!KD5W09(d zS<~`byXV)LVw5l-$*r$ zSHu^|{-wl`^lccLeN_#5Y<}c`+I|Ogt@t8eyo+P9cic+q9-$FwBAJSSD(fK6d?Exp zPJS6SzbflzU$aR;ObW~C^L|X_a4iaYTKaX*>V%S?zi0cGHMkl+N!WF**l zmD+5muj(7D!-Us(00XcDa`_sV;20#dHB_iTo>|4-nk+Ao2dd4i3+tk_z0w|nS8b4& zU$%ydtAz`hZ>ha#?BxJ_%CFalYFooA37r=tKHjw>$*fQxc9P#+pO2M9suXaTqrQ7O z*$jF~>ywC9@froH3&`~!*$8zy`WikOsbJq|5#P^ z{M*-EN4Di{oyX(A25=?wCAvCLT_;5!SieAx5tNf3@->f`Jz$y}`EXrDR7}i3ou=>; zwh!gqpl%pBN?D8L9B8li{QA&75dIzqA5lok?SQ(&ty#h0)GguaD&l1l(Tx zqj##p6Ku`N_q2n>m?V=Dpzz4o*tlEhwHk{SpKceuPuh$HGMeNWS2C8=8`H>yDTEGM zIhg#}j|SwulFH6Ac0($)ztSIs-oi?wr0fM3K&1xJNJXTK!4g~|f|D@;1^fV{3d<5G zGy)(KM9cBkwadHg&X!Ije1W_U5F< zl69atG;p+h+9Zm=o483PVk&g-A!iNarV}7FkuM}SF(pMO3_x;;6>ECGx(&4}FX~YyN(1ZQr)a~I~kpmfzv|CneA~^%W<3Ztc@S!6b+tpTfR2u&Lr#b2F zHySPXFTJZqTx{7hx|CN%MLe5ws(kSxvUQF>(PR2yB&kpp6-5}%31^az#tr?uxxm^Y zHz^H5(|;IJ;3scH_==iOQRq!V$fCEzILsfc>NastPxeI&qvKNj0sOGy- zsY*pN8!l`>LiPEGl_E+}7IQwNsZ6=Te=O>97JMEAL_Y)Q9cFYd_vjHzk(lhnnU1uG zf?W@_9~xbclT~jUACFXB>}IRqaz?Oz^SuTJS0Nbg-4#YH3az2SN)|8+y-Dp8uEyEw z)y;Mpq~C_*02z$=hfYB%giKzF_DlWUM6jDg#Xc^(8lY1dOoQK#xQN3_d7m~YSSoN| z16|Hkw1CHd8fZ-h8pr&IL)Aqt>-9)k+bv~<3eCA#b&ko4f zIn{>{s+^cPkMmJhJq}@T5f(+|oXyS8?{e?Bof2uiR@j)g8v@|^^}e*OMSa6_=iiiATl2l|vl-maJ{_805#E7ejQ?P36`INdL~|F8kvY!) z3NS5?lSo?+9-Z4&EDjcmf7cT?+b@lP^V3Dr2+nZ$eS;EyfrfNA%2Uh^pf0w0+5jhS z!3f>U#EkZ6AzowD`LYM@65Sp+v?O1V;yyt8!j)4^)5PA%+LdJL^>f-fNRiN%BK>dA?0XA_n-WX*XifFeg}dJ9cHw)8o#Aul zywPv-K*WE)a_f9?E7ukK##i7oBopV)<9VB2D7H$!{!^coTH|WPV9QM8wxyalq`7?# zxDN;3v0k>507&-&Ddqdgtr1Uv0M8wa~2g=evUpY15?(g3zT^^jSZzjHv$pwvai;Y zq!ETNp?`-bYl{}lQU4$rXxwPBn#ros6g4P%J8)(>3q!?uG^}P+n}}bzy4Bq@XllOZ zpAxFfhPk~w=4Se9LnzCOwo|Gu1|B;hWW;DCVC=z)Nk{#%#x|B7?F!T~sJN<4S@jnsS&sHItym}Qr3 z^8{SB*EU4zuQ>+~A`K&RCqe9~J6o;Ozwf;CaF6d1*W=T5c8$P#1P2cQ^TBGVG$f=*k2{k9`DQHu^*-XJe-7UV zl|y?S#YP4LiGEpgz4ID*O39)uK_-<1rGSH?brabTFb`gXra=#)qLescIAwvwT(+-G z+OeS!1%K8km(U6WXMy-ac-Y{^DG@`D&S^wkz8+0&*`Fv&*Q6h3rze8Y8Te)7<{Ikd z69Nx7PspP*77=lOlpb<7bb1r9fqH{TOR?l%FjxKbi>e_EDs3 z9$wz>f7uFjbq93aj6d#M-p8+x_@px-q1z5!H{+c_l5j^QhEg@#jN*XRsER8kHU`vt z0r`?=C)s1Imw3>nThD0`JbCy$QFF_F?mi_IAqs8V%Nc4N&kr+@}+C^`rrCLD?VEOssEQ+)#j_Yl$ zXnY_;#-Wr8nCReqfivj^MHz!D@GbEN#?siDR%Hc}N~e+#bRi60ir~3|W|vNI)^_rf zpZDjN;YZ<9iYJ;=6ZbPlOi=4M=3spEQ*hQj8+B+AC^I>FYE>-Lv1bi0XxN338!srsQUpht(P=vaS)ol@S6SAP>= z^djXBm)E4TwN#+(JQ`ey%=R}KUS)x9nq65&9JM>^J;9m86`r?8-L&x`!|tAt$g^Q3 zbfhHt!$nkOmOT~cAu>x!s51|my;rJ+cmp5gHcY1G2$2^XKMEbcCj%+S=8My+QZA;LhwiCfI#+kK`>lSDhs?24{c zKwqgU{*IF17%ojhYMIc$1!h18mFR}pm$vk>5Dx5JxuXUwb5aZ|N|o6A6IRO~$lDMpc-Tbx7=PlwQPkWD}XFQqA9i z8iV>%h4Xi0`?G#~+-wWaF7Crx4d6J)CGb6|)w$S@Z6E%Y9o??xeD(jt_*U#4_^$tM z#bU?M6kgDQ4?sHmV+h+#xocc@kw)WY-JIT5WP-@i#xa1(>rT5dx~+L|r!}^#!;;3z zC(J{X=P;_mWLWtP+orSUMR4~07ss>t*{gbbqrA@BrZ$VuYuT)3_wIZ1ZeO}nBgzll z-@m)Amdf+KQ*zVL|LbW}xyyaYuON!iaWiKqG_+DbF+=V=cb1_{Q;_gw#K_3j^OG8pjl>X8s%#DUoT~D4|l>8QsTo02PiM*oCdVI%q^{G=ll9Rpf#G()Vu^4_=ij zRgSEC-U(6o)BNhZ05#7Zq_-)0x!JEK$HI<~$ER$AyN~&MjDor?+ef=!>J&j^-VdWb zm62Ik^XAyiVi|RXME@$=XoBbDgaV!>?5BdS65wKMPVtWtFK-8Wmh{=dJC+V&B3~@l z*5-=73aXLCvMClosVC*!Ros#qwbR^h(Y`Pn*cI1tg4{C&B;7BMMrMH%HT|qgnJwLS2OT#W2Gqw5Wv+}3| zLJKiVbH+pUUr(@J4HX*~V)?U{JKs!#{*0(%Ujx)5w1&GV zAF*6B+RK8r0AOlLeRR4$)nIYVEs=5*cS139D$4Z6Xj7rf_!M>#Q55ltQ721Q@juR~ zWeY52RTZzPM}R#psqV6lJt#*^7^O96hVuCcrR*zsFS7W_|) zPlOE^-VGU3hItBeO5UE1PQL1WZXh^)XVCe4w8=U%TvfCfC3zl9AGHPazs)Oo-d*%l zv=6_>c#fCo`m-`hlRCRw@IFdZ`7;$)oBNXCzrfg1@fx{M5j))=`8oBvX?L<>_`PcD zY<`?gXDo7SY-T0W|T+7 z0?YAa*5Hl!z~+Gx0-y7txn3B&0`B`kJ;Yasb^mPC^SQ)`NJJ0#iJ+w`B8*_BW1jF= zio|}pI;brDdqnzn+DNPqef<-SJ<@=f2E?a}%O@D=%IRgDYw0hi9orkTZDq@M$SgKH zn`ECj9}YEH3xuc4we`+;3_biFyOwuJ?~HRVlI1-tZlH$S%ce<_6_dpwLTV+V)m}3! zol{5bsl_Bxy-_^*Pg4T&+%EaE+!b4ON$o9*fiNCdUA`!a1{>%PV1h@SfashMu~G1~ zc4YCU$sjNcY5SDY2bYe1Me=V0g5PPB^%0Ai2cOXTfUe?BK%+K(gPsLK)w~oE+PMn6 zzI--dwjwjc^jbMi<7p1&UCe=N5!e++M2NIuox;RAOrijXxe<8?f0p@yE;SKI7MISm z)cOyYZb@d7>{2UF7>1^}&mo~>x1Sr!obu*9lA!@Dj%y{@s3}Gg=|rK}uMkqU2GKD- zZP)E8EXGPb7(62l60Ro_&r>K|J!>Ij=cjF$V46e4o&cB=4C6R`DTyVpcCb43-SxnI@@mV@jt5(<95 z#rM>!4W#0Qc0)NMI0aLjo7op7lg9%};G!w*h6bM)X$U5wF%MIJl@%NCqTXxnui4_O zx0eXen`_QVzTZkJ^yax8Ka>mXybT`U@4sqA1V+A1E%={ToG7{|1c%e2taS2%8$E=vN9}5qt_fr#ny$$&8zoCI>CZHAUd7~ z=<*2gW}#(fg_y%c)YHes2s0ZxwAQ?W*Ghjpbb(Lp1Ryv3uuxI_v`fkyonAw664z+Ce3I zj(Gqse+l(%GbN-ceh-Z$)0IHL&d_m^y-T>tobiwXuY8W%&zI_`oM1nyLaDD^esN0z z2WmiSXZPa}o00Z?_e|$n39Mt+9Bgiu^ar!_~~{)%OTwz?Vm1TQDG0bLf#PkFF4bZTM;+%52Kg% zvjWHW-+3Upn>!m@nj1Pg|Kv5g{E&P9;hJ!495%=AejZCg|BxyRuzAA^0$^|(@CmKi z8lu-ac8Q?&qx7VsmB<`Cu(dtfKdy4uAq|s!<80c*jY%V?bp*c5qwx2B>d@Pn9o_MYbI*cGnU-`Yv4;dY6(Rb)3}`lVBhK z=-s|j@DtZ$?U1}?4Wda;w34d`3FesOsQ<>)zzPX__^nC`e98pDrZ7D$Ml{H_e-1%h zU(J|#r!VmSxI5p!e}BG5!sgHwu%y<6 zA^R>xLlMul3qT)NoVFM@pIk|hHG-XRl0O-~(SeEeh7me)>G|=EN)w}^AF266Z|-%i zbW8zM6>UB**cDp6DUg%DWv~SiaWJ*Kwc{YLGmze_m_J*0Q^(-2cZ{m{SI@t_v#YaX zw2g4@^4;=vZ22i1h^q=_;`T!X<$Jt@i2Xjq)qTn$r60pOZ*KG!CxvF?I z_PVD{Wcg!M!}bdwB}$N8g*q7C~^G;&8CTG&^i5XdE8}Ddh)6q z2WO%}afJ|2Jk)`YnsjMigkIr8sy5Y)rh}}(iStag1N_+7HD7+;V|sNN9M>Wyky<`& z&MX5l)VR=KsN(RIMO6KW=-THM7(1{O*c<7xqStZ^Ddd0WO_HnbqSW(l(I5c5 zNE>3Z#1Gink!Yrw)yBVfs(pzclG;`AR@+}pld1X>ixXdxR6wEMTtYQ(3vypNv z!5d*&z`l!lA&XlTCR^C~qOm=?dGb{n(cFmYrjBX^Xy_kDB2O^+SasAI#DZ*27q>0% zp>lnI%j1IozFzoE7`HM|Jg?<}?7|3=qcjc$levNFv`z%uELHRK7J951R!`^4)Jdaz}(-P{N>9$B`p z2z!O{)~mf!yHg%p1G$)1M59VqId-1!Gs>)gPh5~!tDG8^S?VVla@`mc11LG^PhL7f zY~}0dNBMc%mwrCyRHhW54dKj`+bGOvu=;-tYLT*MuBMDJt1GAbQ3`yobUY8jRK*l~ z!fSd1p`V~cKiO3I%BObm&G!cTaGuJ644REAO;z|`@T9*$G(6`mEvd@sDP%KiJgh`1 z3ZE2mT=NL6?{Wor0Dm%Jw+dCu{Ru7g$6vD`5!|MD&3No@9y4nth8S zG0B;T`i0jCrd8dx7Z$H{_e%A7@c|S!Sd9Oz7-a2%71r=zc{$)w_{IR%HO~huj5~AK znD+q6qXXHhKe63cbHm2O7d_XmPnoi29dfYY0J3)Hi`MJo@7nUsZ@TWN@}zXU2c&A0 z6i;Mb9vQJ{mT63_=*#{4FQk!A1yC7ci-sapw1R{A+wKJEX7y-+@s8qN`&Qknucn>q zk##@Wu&*Gw7(+_hWx_s5;Hjx(K6UmVHC$@6o;dZ4wTXp&s3N zjyrGE4v}WvjT>!Mnl{}s?8}BdhJ4rplao1}TK~c$*g|$s1)?4`Um^504HnCWV%eRfV zNv7@KMEoqc71=!Xv#VId2hMu_vFp5pBh>oxN0qk{)gc4j{9jQ2c69PBJ+{81`g*12 zo+i>9U4-hrfd5TzSa?pTKuWH%Kk&&vu*VKu==rI0IgCyl)XY;SE(SbQu&t81MIuiU zYo)+Z%3sy7II4+<8&Cd5dcHPo&8vd@ z>Rz=Y^ zZMz42#i|PmT7p=QzDcih2w7b;qN@ElnBF@dJ>8H__G%MPT0+i@==VdqJ(A zOwTFp%GPsEGzG#X`gIbXD)FwG%SvfDvv<}1k_X#Pr1*WbMK;~n$R^6svkgz9^Eygd z>&{FkQnrpLht~E(49Lx3jpse*h4BWKU`gH3O-xPSuzj5MIcLZLzq9YfcspFX(l~Hy z(_vY2=a++d$M;?rX^=Pnt?xR**HeY-jxxXoH7ssCXY5aNi~&nAgv(C+fOTc2d-C)j zq!I%X_EuxX>E%Z&DI9&1mT zHAmu3o!@^dD*A7aNDHc|96u6&yr%40XkkDP;;{Vbwgg_-g@zm>hQW)D>>7LoM{}&s-~=SpT$RZ4Ah>Q(v2(X8t~&6RyJLiN@fin8dE4JFSOs zXUZz%O-M|{CA3pCU0bO}sNcGx`ZGG>b>*bUf8S;YAobrMSlO_my)i*tgvtCZ1nDTw zG2&}rJg}xjp47ai)QHApx2Jm`2!{ju-S7+Rc9+oiRq$x7DpMDA{(%t3b`?<6Y_ll1 zEeNLxb2>eUPi`xH%L8{q81_BQ6kFwd|4)z=eDMY}Bl-i~e|q90dgcNiE_HWkESK4z za-|?SIjrZulH*BuFZeP)t=7sFR#VkRr`MN|PjI&644l z5{YDZBo+TJfkzV{0y!Yy#aF0{;P&M&JV--tMXZrh5*DQ=5;6%WGm*Y$PRS%rRmxy! z(uyQYX;C}gwpWD-#Ca@5OH_#Mw+(#5&a;^DB8ySa5PTUm3-0MTa>jEhjiS zo2Cm*OF*goYsF44H>I8G-q67fe2oI_7B9hu=XUmL#fJApbnxK#nLnjy`tIxM=H|*w zkQaA)AEm&+xQ#eAGzH7|tDrcV@$ut!|5rSjUZ~xnX_>Y^RpnYNi%EWLi7hpgY_5nG zg($l!@ukdhXohT7IikvDgQW_i+U&q|p#d0mqYf2m%&&*ZiUQW9v!H33gON4fWvL31 zX!CMa0dtZl6KXY*hq|5xyJGsYMp&^t*d{i+}$k3UP zP|mOH^(%^MN$3_J*_eZJgIfJmYqs!jjd+=8(K^#U#>1Ye133z0mTb^RA*|@gk32b+ z##z&9WQnbB1*OgS=*C)B$W^hys;P2i)Os|J$I#HOgh(u52$i@mUDtMGEHK^MF{?P8wG`$yh=v(6*&+0@N_3D+M8{l6cZTTrfEH+(8NvtZ~W*^tI@s->)fufF?||F$iAHAJc-V88?sl zbf!jn@j)Ye#PTNQFR0t83^X2ma+Mkz@Tgq!MX9b|w4uy592nLuAJksgJz?3dCRlIe z(w8wK{|@0+`MAWB;DL^px#^nxAC#R_dthM}W@Fp7ZQHhO+qP}nwvCEy+ZEfDNzYvM z-1UDr&)Isvwe%u7@UC9$z3Ht^y7)VuzgvObIVcPfq2X+b4>36=KXf-@=L0oWdJ{^u zh_NV;rf0NP1TZ`AeDZf&%l^zNSqrh2qFMPWOS|z0VWPL6fkRUw+4G`XE@tRO73o|0 z&sRk=F|GYYLt2Y)xlY@$OWu_MXm2wV>Gc>cqUy0z6dsnN?u6GBwg_l!flNe)zU4qz z+OAfzYF`c3)r@q{p6sBEw?0Z0=-H-oNJd%SK&x$X=@gWRhts1zRk}QM^5L=w$UKsl z87G>h6gw(K%YkIgIR^BKZ<@*G_EtL7Eiq$eP*HC<)zdH;?=Q-5Ej}2CX=-m1YBF}0 zVr9l}L27AkMfH8S02J6ByM3sg^O`jR?Ir3y05demlw*Y0fat+rgPr;8)qr5&5hf}N z{{Bb*Pfxl?In#ai;7Y6Aot9Obu8jA=3d3`4{4as}Ug??s?cOHqwN2X75?5NJ1Q}T_ z0AJ7OzX~;s3R5)a;6FLrik{HQbnb&+h>~zw^K>yqq0p+MK1-v}rE}4DP^OVAz;}4y zqin$TzrB%d{XhyBFLhG{Vk{@1hSCT@?7XvOmHAj&Mwwo_huP6VTbW|NW*!L^%`hV| zInqvp4o=&-S}<;@b_WzRprWAF2CGzh>~K>$h>sufZBtmH`Xee9(?aVQf=YF%dTZY0 zvD+kDT5cqz&PN;E?B;N#h3`5ziLP2LVj;5EN-?NKU=z+3tB_v|3KT>8w23=!U+ct= zEWHXKzhKGlf8w-|g;-*ZUQVpoS$9;ohR6Scg5N*F_s-}PO$mhYkNivsCCtU|! z;o~R-bL8SOXj~NC@J73ec%~#_e;y&J5mNjl>IBU8K*|`*IJe?*unnLY%~w`)T55C3BM9sHRgc}P zo^CKKv6#bJXF%8?-I3JS_q}ONHvjg4+^9`RwLE6vO_rUHmy76>=prYbmw+McJuef< zwB_b+ZVEzLn*BV#xP7Nfc+dKjYC>`d1{4Rk;a4qPjE{>wxp+B!!_e)_?w*x@-Y*W{ z!LHqk{`C}O_wz6ap2{Mh?0uaa!ys0PWU6-Faz@!H()_z`Q$HhaXWS_$Ad~-_iItr? znp!S#OC|b{7u>N3K>}9yRoopnLn>2%bq>c5)X~0$1!mvI1 zZYLsBU0rc~{mDKP-lSU$-AC3ef;I?%?vxZdW`}g+e ztE@YY$bGGZCfr$sjww{=pfA8})T5ruYmj)oKdyGN>^6W_yv_4Pih}Lm`$E^(_hwwM$;v zWEtm^zq4{~W-4+f=~9pGPSN2MHsqQB+6uU(SE=oO|-Xo7qM=HXYF?h zCf@-Re2D(L1RJ{zxeKJGHG!)yCIlgwf7SjTthX?vmmXiobFwU0#*u}9wN7-}k7uNZ ziNG<+M6ERNoHit%Ud20#C4hN5mK{$uq?l3Y8)$dchRbLSxM*fA!^q9MaqEuIY)Dke zURs9yyX3w!v2ykD7X(l{_0@B-6>OwGMX0d4nzsSy{*O&4g>oBkWq>Wp7%ws8tsG>q zBb4A&>jvYj;bRARS1rzI#^t==mlVL_9wR}#Lmo+If!)H*+y-g*I z4m!!qqTU=m3XP=FEWWHsgz4{wi$#ZP$DFMfYx*9d(#y6p#u&gDw|0uhtz5)$5`p2v zSWv8SdX1E`VH{(UPi++~92>d_M^E%^T1}H=`c4nty^TkIn;0`gKja?zp*N(yu=p4r zQsOtCy<3Ukmg6KF|JPxMke#piIdl3QKyJ48xI*0L=JgZVT+;Y&(YrD13xEdW^}5K~ zVa(>&4!Q@o+N+RlOS4cu)_92%kWT@{t%)Urtm>%J{mN+bYW2O}RgT~VLNg1Ua#U=o zIt^;PK~rjyM7X>9R>2%3P5SYnlcb9-UA(goryiZShh6YbMj6}ozNn7a=&pc+I~tH*_}e$n`MA+0X%{iqS6W4U zckvuyC6G16lX-YT`o_>h*5&74aBPr&)M}*( z8AgD{R*@bpbZ-MU#=a<_s2mZGD4GYLHElhQ^~O@h>zCcz2GltdcH8SDjf4o>XvVIy z-u#hHs*l1%9xt)?OAd{iB%bmGE_dtRwq*IsSQQ*V9Dp)Fv`(Rk(&(rt%+AwOIhEX% zEk(0-$GnbMM-6&qhka4FqY+p=5r)gityr-dhTb6N$@g8PVP7-E4GGBDe$Y?VKIZf3}SI#smAzDp^=2KBx*%lnGsOsdtfPPzY4;{{bU9 z{CK)Q&x5}$^au(qa~^gLvmdTB=8Ma`2Cs4sZioGp@40qehyp5ol}GL7ay>i_N8XM$ zenjMKxIUE;d_w6?Y}E8D8(Y6Kpc>fU_A6&vf8a2j+=q#M($OJo})tU5W(SkkZz zYr=x62wj4*aX3xis0BHPQAdFgiemUyoL0{J7A|vGfrbQ(R^vX(;r=wg_YCeXQF!8t zUeR&Vm2&)%CZPkIA>~RWs&=Fb26x*u z^Ms>~r89AmI*LWIZ>NjA1^@Y>9dh|%o!2dkss@hkIaj|W24phS@i|l=v`r_D_lGZkZrB5q)ufoAiUJd#Bw$v zh0eu@q#VOLh^grQv&n+9aZ>dE>?{7ExBN9x|MWf^z{dpnCRtZGz0LmYW1U^NotsTH zbY{%Odsy_*{ISM-`Um{qLA5<$N%?=ju(M`7008d)sqomDx&G(=qHk;H;$-RZpEHc6 zt@EY?(yt5UoEV>|Nn>=ko4$`ob^U%<>1gWtOLD0*q>+U*jm zxTZR3#nJ+DX({`D<@xyL0|Y9Y_O|Hv5f|yhD~*J~Un6m%Fuptufhh$WNm3HE$Uj+X zHMeY#EDnE_tJ1uvks#^(68@5Cm~;WE72?{|Ap_7MD!RLfA9d5|#@&p%``&r*=l%2g zw3E6Ox8I8+Te^S7-RUc>-kPYsck$sKtk<_-c^CM>lh=Ee1fq&ajCUOR5S|DyOyM(j zBR3TeENy}+ZF~wc-5;#8NR3P_N&O%}K6=q0%O)^2FH@KUPiI&M(Xx0v9iV7$mRSya zqC1^Qs6~=(XH<@ESenrPGxRD*HPw26GH!lf3VkHqtl0c;VnNr0eIEh`mluo0Ph|Vt>~fC4m5%2sVNR zr&m4SUtn4hQbq{7?f+z%9I7BrW8xtQT`JV`BN|e9^>lu0I(s`5apl&18v{1l2;rJb z>R?$(rpY>Sa8%9&OU2g+KnnAZ!Qjb)Pzk#e2MGIQ>JS7AVkZufMM|v_RkQgCwht)& zNKd;Q?K1S@M+&7V+9m1!V6V6`CLb-HE$iSp1=n1NT@kv`VDIXUAjAsQf*8+9_UUk+ zE=Yoe+X?KuSYRW4--yt8&|S9X-WaPll`KleC4;M0xJh!`J-Zxd$T z%X=F<&ex*Qh=j^=GD-vGH%AhuX2Ud>fOD3khw|muB;@V!#=WZ-M6Z2 zZdXyH+70zQL$5dA>B?u6(%9#6zkfhtSzam8f5=FaA4fDDX=scD`{>5WB+>(Z!1>RM zmcVCXcN2bU$gITZHrX--8dRr1PEP&zyQoV-KY?|LzkDHX$*7QMBmF}?RZR-b#-vLO zoyEM6^;ask?xhozvURXxTV2`5iD2Tm)Aw1YON)9$ovW@~Y?iC=J*PHzj#F~_Y>xQL zgD+oSU@{Lk&YoHQHek@%z#I+V<6tK`s84E zh6pD9dWT4N0tfXn`BFR(sRAvAUcaxzTomanXIF!gdA`)z9{22Djr<$-{;hGnptea% zl*=88r7KV#4aiH9{WTBFT=q1dH-r@=tNPWy9M`|>cgOLJH^eDB!DA)>#Q&lRxPF`> zq!ychC`I`F`9(VOw-m5&bg^&-=k@0GgA7Ukc$6A=;mjy~EoJv1uX)LG<`5zJ(9VNU z_=IZFi&A}AI(@!8pPPN0l5Wzmp~gv$2-x&!uY;sV4k@GK z=BRFhjagVHc5_%powTSHj-a%>AU(q4sINbt4}SIYx9vP=C}G7=R#sQP_RBV7Yr&2F zOuo*NEGO@za%xSGiLW?g#ge7^KQH-nh{FXkDI&CufVIkJ3N~?1s zYq@UC|4er<-g+Oe-to!RX0~bbi^}je|5XO^Ebf;=TBMK2*(bn^ zIo*PHDo+U-y0+zV2`2T&smp2|gxzI>*V8L|Qv-gOdCpP#aAeB>U;~kXzrjFQONwPP9M-Gza37co}cpb7A1j zbkat8FQ4v~V`)I_Y>9l`<;%WEw@qwwU(x2>7=_t*{UoloL!yf+2XdtmU|T@v6&<{1 zo)&x_8;>_bSk~M%atMd)&LxiRHd_qCt@;Uq^r=!cbz$Bt*(VhG!4O<1cXi|Z>r2>= zJskFo;iH=c!`EQ)h?(?2K0dFe4K^(s%G)8Mac?~txq8-x_YB>qgN7c=*i_`W^<~UQ z9sX|MEu?##%jSETS@80zZa2bt+sm2;r}NZJgGlYFK|8_gu=bTP#@ClEhK6+hkm23N z${=G&)#vr2#P^%n2ZdL@WyaRQR~s4UShK)uq*!OkWd83LG*viS?I5D~?6-zf_?t&_ z^}(7MG8czz&9tk3)c^0c%9~|w6X18Ahxwi7>Hmke%EZ#x^tYAzzlg{GH^{s4sc4~| zzyd?T!|j()n$ss#$HhUUPY>jXU$Lc!N&}WY zzx1E;&tktfa6%hXQ0g{NLRJ+h$RKJ7&$xhW5NKE(0U~wL*-Qz9-{sx9H4j82pgvEC zj)mBA%sI4HsV$emCX`Jccy09H!h%Eb7f^~xuh*Y`Pj1*}yiCXtTw3K_as+2f) z3&ezutbjjj$YV1+dclP!7UJm9%oo(TVLMd8BZ*k z{%+0Vfewg7d4aWqQi_-#av~Pf(b9%Z^a>(Diadde;eb+Jf<94|MtJ;~IBoSBG>y70 zh8Z=oog^ivzQb(uhDVX=rnFQia2G8s^CpcHR7Lf8{qkLPFl`FQvDnyFCl1nq;Bji`<7++{4YuJ2hdwm$ZyQ zz2rfMG-(>^Nk$+sFx_guH0mnN%rva4JN;Oul9Qa<77T@K^BU>k*IXgp*Obi=l3am4 zbkNznQY#*S-+v2+R49%0o=(4S#LK^-DQwfKP)WH?l7ioaq`pv}iPC0tRbGL>cp6D*+ zu;`P=mOpkq*8pev`I4vl8;l1XE{w-%_W>sNZWMoTPv&dbQl!=jn=wC+sfpbb+y@M^ zbrpV#U3M7kaz|1MgrYmtiK2;7ZH3C&znB`(>xGbsMq97`flTXzxiYot(s+lPQ%0Lo z-sJIQGtr4Hofbj&{HYwGYXYPxnU*BTma$W3IA{P%I8Dk#^2WjcWagA_zRlb0vUlA9k`_yF%Uy?5WB&w^>f_GGwVSe+0|9c(iIQPs z%B+K!B&m?f(yLl_^#r$>HU(N{zi|=}#-g0tg$LO)gtN{7R_ZSCoS^Igw)G4Je7pwh zaEUCH46?fTCsNMrlI$)%J43v_*1z_?fBf5ZDXH&s&!UB(z|%~I=Rz~J2#%~wft`HmNk2AB2xGqJ_ zonNrgWeuFRkri1=)rC`Hncu%0&#Kc_rNx`w6?+sSGH96ewQM38l74|pR|c^y7oJ5M zM8=cs&J%Wn+AeO#4o9!Awlpj2zs27#pFgL<#v7BU+JigP`XNHq3&kj$^V4hIUPi|EWUOEUP_J_^mT(lF@RJt@+Ze6ICmxq>$BGOO5y<^KhgV_CQhbN0Ch+R>ldeP}k9yC;I zgAc(8;sbVQ15 zG&7niHIpf*MT*rTG zG*qI=GjlZf-wnMyTzs5bpkbDh@E>LVLw&Qi)e5Y5lpdAdCqH!F|3#Opsl?Dz0UFB& ze$`dC0P=#?&?TA8Py?zQw%ApiN zvaHS-bi0bospynEEjl>!5FB*i&4NjneiWILyzQLuKI%0u;V}CJsEPGSpmXB<@q_|% zV`M{nBAm%)<_whPjm6w1csE0>FLHj|qzQD?U@FNLrUVq2@X8|LNHJr|> zLam{(;%JJupn{yUIs=s*9|__{o{n}9(Z*AW*0QrxIYnR-uIN?2!P{xlp=Dm~Htp1F z<0M0knb&)W3NCT5H-B>)?RjPbt%$$hp_&oI_b6No$^j&>A|=W*jc|O!i}Qs`Hmbzg zv)RX6XR-eS)Rr*s6lpM^Ib0oTfw3bmB&&tgWI2R3V1p+iPB0dU5Djerz9XV?3`IE; zR}vePkSP#~(44tx!*(e*m(J$uG6=XY zYOXey&u^|D6A!Flv7`>pX-Gd5R+sW>NN?~7=#9njd@}8>1^_eS>OZKQ)kVJcgvLU; zP4rd|SS{hC!8wp@t`=fA-sWp@xv!>Ns?(lLtvh~vsfwjKvevE*tl=fMiT-@Nm7U&! zbun01T@XO`WQ;#~>63EdwWU*D({a#p zQ=^@F4t~Pc!*5_3;1kS+xc|dRi9A23d5$m>J^>}5{b;LUdjW(qu|He$Fa5+ysdzq* z;Vxef@!0blqxawDvDKS+)S20-VIbzU%#WGT<(UA0A&(b`***zlWDx_I9`bk3khDS= zY`A8@v!K-h(mHFAbcwh`N->gs`O{FSD^i|)B%h}G17b4RJSBt}RYU-lvy+#ks7VMx~p`EC2H7|H8w-1!>;FkI; zTKn+zXjWhQVO};aBxJ5Co4N;`@C|Jan*dD%Z$G&A>uNfD&Y~sMak*@&$2E)tV)ghLfU-eO z4R0;7-?J-uE3dB010SGPH)aI8srz<1dF{L`ljxI&kj=A+9dbV3=EQPq`ttsJf9ei5 zs2u}P8W3MFlSPwZFSfmV&y9^H5l-|A9*Gm1b8#RcMGAJ)b4noYY{Mkbe4L*Aj@BWv zj1zYL)lMR8nDt#sTV8*|%gdkT`}4J2oc^N9V*19y!oT{T8*m(Z8~!nhKB*oGfCM1~ zgEu8w;pu9N)XAy4aJ;) zuN`CPQcEPOwH}HlyDjlgwTl9C4ro;hED`ga`IMviRQ+td-=JrH3wT+%f8x(KX{CqD zvotYX7WI(<{{cX6lHbb`x$xzra`a4!U21&Ej%mw{uj04XR1(IqVNVwf&3!3#;aSX_ z=qQ2AX`u`saZSyN0q020oa#Fuw20;}G?DiNhQJqbxT(lifLw)7DKh8D8sBr(C@Ucu zuq;I}#J7!Y33sYDIg#o_Xm<_YuqTPN3abg{Znp=8h3d?Zu57wk5j3Gy0aXOhQi-;S zge47R18F`aJCA)b1|edg3DKg`f?SkXc7>2+jM7$6lTQ7avGPc+wp*!Q%93Gl!Cb$A zRQzDznjO_#j93G2af9Usm@+>Xp-Kmagjy7orzQbJqUjJ_gCu7rVBW-^O*uQ4i?e## zAv1iVBC19ur+mC9=-ns~(}~y!AGB<{Yn(~*YSYY27-d zW=D1>3#yn+$hflUTwuZx%n;cDJY+OgVwCv}e5eu!D2m*mfmQ^=I4d$#TyYNUk`{_- z4{KPJjh=Sbd6uyOP9rNk6`wNa+gr88qFoi5jh=o~%y7_gpq+{-$P*HzGssu8$V_&)DJ!BR6L_p#=(nZvY*kZL;`<~ZaT7&#=hRpn_`6vP7& zRRdi6)$j?QcEwRDB99hTPX(x!Ah88E3(g7aCo0^#Vk^{4h8zWt5bC`)^lH(zgFU`K zbhA@i#BJ)H0AnO(v?x{`B$mofnu^iA88a&WwAt2rxQ70IB2auYW>@1Js&JSHFw&D` zKu3DgC|X)d;kHnr(`gu1>mpc&qSbPn$93bBq~G~2*xf3ESoD2HuX;OFL2Z~0mNthD z?c%&QGm7mUMzqo~z-4HOKsuhi1^#DS6RYf%^Nvhn<94P$Y#e)(R&2J7*f{BoK+qh` zxE4p#+YS7Ini~2KU+_eIfMT5B-dkA`}f+}o{Ms~MVjduW`Y1ibP zfEi1Evp(%Lv6;s{;x&g6) zk|9Sntj*Di(DtnECkBG6ldB^VQ9u_6lw6nruy2|&aZ^Es_T*_#ny%VYhqjy|*H|oa zODbG9WhS?nwQbenL%$5BFp*l@3#G>i4Z8L5#aI47B$A?*2$m4SiX*YG6_12qtfXd4 z7tXp_ENRuoG;kIm_miN^3luR4l`iHIcGR88of7eT8`wab!RQ2XuI$euFVQCAp`CJ| z((|x@W`HjDZ6<%GHSEKn*veuGkvD|x+mUPdYF1gUD0en)do|~I1Hxfp%x}FZjDq;Pp`pV z`xp>6tJ8DVx#%eHgo(_iXM`U#uE1IP>(K201=W5t&Ah$xVz|03oerBaHl=B&VRTx0 z0k0zaFwvxO8))kx{5sCIRZUjKfXedt_7G^Rl*u9S93j|vs`MQjPYVCO9izYNGb;Dr zyC=cV(L)Bnx&UoZn9ofCYgpV~jD0bjH5u(v~yTZ1?ni{dUxZPV2SZVYqxwmzDOgwqLWnaf)dY-DVZ5 z_`CQ_{N)!d;7a3l$pxy0Q)KvpTr{VsI@mS=r&Z3#Q5Cn)`fI;djbAMfpfyOQ{>Hd* z!6hn3Hgt_RPqJwvy(5{sEPJ?e{bwd`gS{jd#6G}zVP&kXFKs~Y{-+_fcg+D3j0;+@ zUfWab(2JtS_3b?IYRuebuV3xYYlZ+p!Ky*q4q#x4`i|oM8xo$PlY{?yaa&y)Zji>q zh7CdW7M6D|j`!8ZpWK%#t_`x}?2zKeX2{v~)bBURN5D9y)Pt<(>5EmTwMt!cqY=;_q zwJR7hvgi8N%12P-YPUuVrWqWt(mQa;Kofj>GyKtx*$t;_3^(-#GRd|-g~KmrF6&qN z8hx8$z_IcwGAMx%K=}*6!QSPF#9hc0;)> z-T!LT)_7YwZ;D5rGp}oBh`W<_SI=HsZwkww=jT4Ao}2%b!`6oX-te>VxwQ0G7rZPi zd0Sfk-D+ul&Srg{n_ug1O?R#Q$~}qyfd2Qi zET?Twt?-YS1L~){-faTDL!$hti`FB$!5C%U;Xwl4exM7+f{35J-40IHB}`ZfEocoO;?6 z&@PEiL_H6uemzTXVn|Fqz5xbhS+FQU))5AZ4)K;@WEp^-Fxi8WgGxD+m7VI09D@M! zh9ooIX-G07(O04$-G#$&1hCbg%4quY4qqLl|lUm;%~hMr!0e*y{UM%5lF zP%C)w8buA|+6SnHl$SCCS!(dB_gYsX+`+WGuR~=|DaUBh$zWBMP-Ci#J$#&GNoo}p zXIulN(5(r0F@PC%#NLG;9d?vqQjJ{k5KQQs&Ez<8F zGI#>@!;j5>ii04P>sP6lt@Umlt5`4tz^k6bU4X0Q^1EMvQfsE z6uj)9ca7f(bJ;-1uEc8P#Hoa&jH?m`T?J$ra)tf7tes0)FC{fose_YwNrfRQe8`ot zMXMGDok6#ehkRUF8&SH;Qq^qQV7ckI(8R0@UuW!QoS{|3*`ckdq`qYQqX8ialOx#f zfzHl#|9G4HfGz^ra9VhevUTeknkH2GDM7v^FHb%KWs7{> zQGA>;V=K#+7AZv6SvlNHZ$1lM#)jZ}q*v}|u6&G?#1zZj{}L|*BW1!|A96q*G@}?$ z-6XdUOoxFx3{OVYa;O4g1s~;*)->MY-#`NGY%|pIn~yGSt@r7fdPIq7`;mAX`{ z9qhipzk4=_Qu^e3z=^xN?_hYrGj2a${e;U2^PwDNc)Uh2xDEMEq@o zvEz1omW`QvQIjHWOO)qbhq!1Hywl(;)TQ^6^O9}4cD4rzDE5)J7>V9uu*rA2j4dK! z1CrHh4{8_><(JF3aO3($eX5`Sii>yiht6cFO2DH~Q>EoJLeshh?(z?}vtV(_x%{u5>MccwHw zlJb)!U2_!Mpr5SI3$pS`D|>rk0$>z_)pW*syij`{r%%^QQ@qR`v2i;i;*UG=+q0XW z$)&bR=&~%j5#8~#^E=p%g93dO}jFd`#^*iV{CN-spFyuxNWOd(4wd15H$EMMrlck zP$uo@YN^5O+4pDeoK$0I_v$U-Ifx-dbvHLN&rQ!xJ)q`{PN;!Qr3T$c4a;vi(tM%j zEJ>hv~B_2CV6my~%B=qDn;N!cEhQjNN6U6PWbAyCKXt`Cb`~!Dd zAEm6>Kr7M^q~dIXAfX9L0QSH0EFy>)h+mV$Xc#|hfLxZcH_j};xSME-FpnZ+75)I( zLo?+vkb;)+r!Gyp$!5=<88z0@phufxFCBA70P|2VIq}D^8Gx4Ejj|15(Yk z4J4(ApEIK{9cM#;Hjo~4lp7(r6$62CbP^neZZ!-dPWL28g)pnE3P_a<=Sqh}phFX^ z<_{XI044ukFgPZPj}N|wPhT^BTkr4s3qKazjHshGcyV&~ZNSrkbGYR`kM9?R4?g@# z=MVzbegGdy@UA!>U~ud_Wb?;UG(gk_Bnk30SaiQwZ-L2F8gA+>58C!6B-)i=A_Hk8 zn>d|813Ze7x2ZVAy#e7KYniQZq{E#CMTbmeMhJb>5~;{rNBqst!Jr(OEk8VmpF%!y zu8{zso}iNYcb~$KTpKDPUG^e z9bl(*wQ5n|W8I)+R?iTH5euk?0oOj1)eg0!vw8$4mH@r)n;mY8T&OlZ=LO7#5R1h0 zuyY4F*S$791~kgK7gid%`VVw;bM@o}gSYPu)Te?IZNJ64C6)By=85oy5`RBA6bQL9 zJvR*uqU2ys{Q(hR{zMB-K0)xT0_(fT_}Sp3yYW4_mi0KZFsoTpcIFy9B^jDds?p7P z&vfA|Vppr0YIj0*Qt;!D36_Q<^_qO1SM2%9)bOcQ(%}#v_%L_k?tbXy%6eHhCL9L z`?xr$lh8oGQL#BbVgJx*r9?WZoKVXN!Ho36|BP6aLUHBNv_$oacQ^ZaKWN#__UA&E zD>64ddzl_Qa00LCdnuf*)<%5v5MjaBN+(Cw=)saXNduu5VA#@@H#L!3=2vN7fkpX7=M z+R#u@sK^iB5W2i7O;}@I$S^DenHg$}pGamIylnAvEMc6g(DxC)f0)@Vdv0G^{*rk!N@`QA=gINpav<09@ZW7!2U ztbjJ6L5a=B=1P8OmBZ6fQrHDi@zYkSv;@^JBSbdc93r(wtMs@TQw>-**q}B7f`m?7 zMvL{*981CHvXpXAi$OxW8wvWP;`VJOFfxMXOYXTIbdgHSksB05Hy$t6<)vD(TzS)L z1sM$Ocy-aL7cNsyCt=>)%mSC0QBG`u3>dYI&LZ1uQd!^E0No3sJr^q>2n9{1enO|0 zkL=9vSjC<1u5PVR&TR9X6pQnfb(PWpHQ z+~-&oITbpqxK&!P$!&Gyo%rR(!m{DW4+?c=1+-&=6n!3NMZRlJXT%8^oL`%IDU!KM zzn?%I=aNf>@_Ae|dC%xsS?0khyy2v{^#(Al4`kUs z`G@RlENeOg?h^Dci#b!NYG4t83$5p4Ix?JRH8L5t(?KIAjHU=3i~`M!zn5f}W6Ujf zf}1xRGe_$pQ43zHK`Y?jW)i-SH)yhO|m7U$N6{;%q)Jf_LSz`>867@*o>&qR( zW24Z{V(!2{D*XP62idc6ZW7o2(x12mP$lq+ zDMA_JQjJSaxaGkcEfHo*k$t02h%>Y+>CU*-VwOer&wK`{(e>yUtw9i+9L4bHO}4}Y zFm99j5Gj3kC(tDMz{3!glDzQEQChCi>#o&r!Co}S9zvpal_rG321uJkpln`9cIq;` zcKJ2k{(o?$3ac|^6TzUv#^6R;$bBt~wXrVQRYEm|ri7FC7UykVJXC8WWSEPs*MpOc zPMWe>oDG`~BlLP<&aY9vU|Hxt|SL4g&U5|nk zXE##R(T_P@7uNoQ>TIfAiLSkw12>b{dmjBFudISi!ZR zkNJEIq_tmvn2nb(IopPPUc=~e*lzXBWYpW%Nybd2qTe_&7xCj<%#oXP zBjAM98^#2fcKn^-d+U!v0tit)k8B4k)>6O6YRyoa6y#Dd#$$J84y~x}Brm`T9TD z|DNp&7WmRCe}%cTsQ(-D+S1J%j%?jN^V4u~bq$BQFZ=dIJ-7imy=R z5>4$CivvZZo2R)XA0)r-xn2K$bBRf`TpM@qb$N4wMiHl7^E}KCP~E>XbxPH(gauc`hMg^{^M0@T_x%FoX_tA zk$YU2KG1O7I|_tuDpta>w1Yw`Eh?QP?-`S^gsMta(Xw?SnNpin&22_J$Q6S99%x>M zAac+-d;7K5ur`MDyZ6vpoI;*Uz z4|}my|4FDD0sAU72BdYaC)G*RP(|_W!hE49)~1_LQ$f;YR>_v4#X{z+Sb=AE zgYVNRE>0YWm9dk%Fm`6{itOL}d3_GAbZdL@clPB?+m+kfpa0FGt2eVRYhS9y-0dr$ zxI+Kalb@!3|X;ZISy63c9yvnzo?(Xc94=uNEDszPx~trn{xseKBru~}zt zqEeX|tSQt3qwAzfK^(2)VWhgDrPfW@u+6||ay31-8ZH;zy?hUi9dFW^6i3FMWolF; zvFdVYnX~a?>I#L>eeODgf&?#f?Xj6$mW*7|5indgPK{C4rNd@3(1XbFwdtIRXFr`G zLl)y3>t&=8_b|mh3bEjE?N8}Q^!ECQmE3G@$AtZ}0?p?ZV@Rv8vx=`wJHd35goW4XU7jKzvWmxBr zpWA6$iLw+2iEVTy+Q?h9QKdZ#MLCgN#-6{hu3sEi%FW5qDKawDvwZ|q3ItqaBAO*T zhT_-4zvh*neRQ0A$WW=?bcCkq=i#!w7hngwQ$!_F@896Ky#P9!&N2er7y#`zN!FW} z;vG*?Ajx#1B5I$AfFcB4+@P_354Bs_t+gqj&%rJ4vZ$=ZiH1z0E4x^fjK-Pn^qq=j zf(^w0F|3x4>(%%d0R7pNR8}c<91E)%@|s$~1g*eegHJECSj$S)O9~Hc+n-^2B#mzY z%(@&riM4M093??ph_GrsJg?mzvAN@}oj%;n?ghrN=Ic7^;Sc zSFPyJRi~1BQIo2t9Q4IU`1GcgqgG#mwWfflIy0Mb(||OboF>thCifYE#7@nz+>quU zQaj}LoIdPu@;>Ov!;aCLF7sPl436(0CcGt31DTqSylKzLmFJEV)M3`|M?hap8W=zf zz~$cDKSIu1?y4h@BbH*m^l)5o9YqQwc0mb(U7x$FOm240bRku`qDIgG z-+;NC(wnbvBRhg*+~w5eubxMZYj;F)TX6G!H$m-jZp8#%3*m8~5B(*Cv2nAzw|ii` zdyp9JKE2Pl+2;uxN8d{WuM8~=<;>w#aNcRqCLJ%CTxpuZjjglIH@ns5S|{P8U-q)~ zhg_%OzL(N|D3(wTZY$Z-xx_~q+G}1cMfjh|=SBek`st_>|NR>~UQ@9-KaBrb9~GPoK?B1`^5mgL_(fbF!68nI#?-_S~NC;xqVeK;EQz7A(B@qwzasJavVq`FPfX(MlaI6MtGy;~T1U0xAX5#N zgR>=n`mqsK=aBai0(f;KtUmD22^iytFr7-Wh<8%bLua?o)W?*glMVwo29?er>3*h& z-YpmYD&ZU!azo{O&(22<2sjW56R$b2s4K%k3onYcG!pkUj@)TD(@w`9Ci2oS{`h@9 zZ5qk-d4+tc7{h0}`Sl3NZ19$7ncTPLqmR}>CS8p)U_#4~=)1Csdx`)KW(<&hrsC*# zc#`h7(fo20$!sQ}y>pBiz_dTgzJh9=IZ{{0&Fy?w*bmy$e%%E8NT+mgF)Xkm=KDwY zVZ&OPpv*Swy^fZ*+Q!qT`GX&Si*~0KG&I^oP zudnS`Q}uY8KeU^TA5aeNTb_6>{t19n@LvH-@Eu^6MZ&c3UBu6e`2L?Sh_3ClVu0^% z$M=Fa{r~f$?<1Fp{rW5DFaNUC9RCIWW^MDU<^R?Nnw$P#HUCOo+a60C#qUHtCn>xE zRc})gTU4bj#I~rY;}u8}YaI<{3V`}1Hgg?dFsYw3`(K4r=n+|d0tEljv`iU2@&v}84-`qk^T9j zBGuIxVz1Ln#VFA7r|R+>Fj|_URH23v3bs^h2&Dev8a&BFC^@pQpNJj~Ss*=;{K6Ky z@&y??5XK`XJ0g5+Kuds6(4vQSTu5WU8#61J<>mG4iL<&!gIx_-w;)-CyZq)@tz3I6 z@POKpFp5O2uqbqJSEl0?85vTy;+pBy7mqau=>%#6F)RQmztTo)cT=R%U27$4gd zei_-w8dk19J+uH_A|lIX=^NWrxUAh*cDQVOt=|55zNdf7kB>~77p>d7T-+qb%yX07 z>U~!R&v03}^Ur*{ot4NTtMkq&*p)J=IZBviHn8Q4P0ni$?Gsff)5`*BRvykF zGh%@%(ZyJIY>4b9&%2~#dhDCliSc$sshVojr4TGE+F5-9L?&#A#`S9VuAFTe>qjd* zv&Kx=Q^$uOS}*r&^-euI_Pg%Xs!$PgmVMhYoR>qIn9y1tTAZ_Neq#Sg_AA;q3=VU| z;b?Kr5cO+CyCrsBtZ*BiJOKr%rkP?_&8AdoI$q7vrk0RTZKOR%KQR+i&L!t z^36ZZ4fdB7!xSpBUslGuvTeUrF+XL*SfbSvEIF>P$V7rv&Fq+p> zOy!m2U^dK%f_AjcXb9>yPNRiVJn}LH*)eL9J1~tZh4`{fg2tJV2afqlD{XIWB_ShQ zn2>q&$ZWW&k#^7Ry$L)&$OP70o@i()Ps42Tu!Zs;(Vo(8uEel(IS}$ik6BS`P{42L zJ{z#$S%U^{6tL?=Q-D@6DK$5OrzA~pPKXBWu5SHG0Mv_5oq zLxY#q532gNc03nbqdkf=ynlS>&00f;*8x6x)r7R~AAFq~wRZH+UHT1$Dk}|d;N`$y zQccLCb2xUQce`kAh-V$cyC~;I5oiPf^z~1U?r!zv zXNZI{UwQ{?i{^k4??k=Q4!OToGQg8pk~eo5(ojU>_D)|%TjX5g-|L>$!KO>}#vR(o zK8$Q{cQU_vI=%-+E3eq{r;n_ri+(GYellBW@A+?$!ISAarXJ0yy8-zB&c-{nfnT?` z&8=LzQ2hY?&##GxADmJ&$ZskK`oC*K`;9rfSeUvy>l@oUIT_oVn*AP>y{d-)>D&1F zPv1u0cpg#tNEi?v(tsoKv_>^XV^@(zct)BWo1&D81XUd3VgHT9WL#>8Wgs9jj0O=t z{w^=xO`HKub?p9BYM@Cd!#0UhDfO#?xRf5 z8Cgibf+d}_2X?N20pPY-)#gVo23u2DkZnN-b9TJ)FLKp^zs>W9sM*zf$?}OOmNG`sxl* zd;|H*?Y|HbXo_y``IgrBAx+fAW@yftMbH9vJ_fmjQqFlwIi^l!(ITeZ!=-XRL#Yn7 z^2*5GGZ`WF?1f>n-xF0-2!BYL78q%gA>7EC*bp^F0WUi2Or+Al$+Arm5TaFbeRKd# z6sw4P>Xx}_lNG5jEQloK#{I4i=xN8YgA!kUrClAr{gKr*oi->p?w5SlLKdl1G1`$C zJfp7W!aQ~`sGL#d8Xll4SvN!$g^!!dS~VOaGi=|)^{xnuhlSQaL!m=xut7l`TM)r* zHz`Tmpl_SA3%*k$@`%LqahyACxr6j~a{~*jB*lul;9-)JzJ19?ICU55_xGZuF_kUT zo=?6J#HL)SzwyN6=xR_z5Yj|vzdlsR@3U?GT3aIbA-*DczuNBB_trM<1wO`xv{Do4 zq6>;aFSqwnq$6Pt1XmAdAh(q{msOzgMVbdXHOINq3@uY51)+R|^n^hZTzGGBuLlQ) zm?ieDmMJLQ4R41n*Z_QYu-HMw$hu+8d>v{Y7&F)1JvKPCbtIq1+vJWVL}gecP*>ql zQ!){D094E{aar?BoSO8q(;t;7VLhThdi)fZNn1I}xG@>UNncsT)WvH@6IG*M{crK;l><-NMc|AHPrQ1wY8-ed?%X)%~aOD?a;X&5xhY>zGwlC&UCx2r2xJ!3BxV zr~q#O(jc1p@Qs2^j#jZ%h?LdvRY5O~TfraFhut_vb0O{&a84oqHefLZB&{K>D*KH# z{HZG%g6b$O3_qGRAkfTHjIRXegn5^GxU_UKXCVLk!~TT2hi)UZp-_7|nB#{4qS&Ge z_I&omKFMz0C%46`gMSCxQ1)wUc*&`!;f8UMTW6d>&Gym@xnE$U;TmQFS+t9}5KQ61z|8-gG^c%~4B0`EWyG6YN z=%Hp^b5<;f!5fwI|6%>7UHjK%6<1Q!(Z6oroxX&2r`%A(`NR8Y(b6VCcjB9$xvhBw z$vu02L8M1R&87+UeC(Bk`Y-7##C?H7w?XPQb=P~r8|XzSF^$ORf&~e*+Tc}jR`|6z z&*RcuF~u>hss7WKZ2-{sse=ELTcKli#b%^lGfUM*7m5He-3E_5C=$MKoEoj1W?Ib? z>462s23cL&s5l}3;}TtzC|ear%o!tpczFwN%tI9DV6Lg^e%O4DEjxBjQiGowZ4QaE z!RLAL>~7-q(*z!*Ons*`l0sT7!oT(aNF`U7IWL%pFh4K_a<`$zz(*0nL(P=prYwTG zA$e@lM$guRFhB|=T@`U|CDoXQ7WdQ!9>Do7>l!dNdT|2Y{D$mu z|MY6J`0nuWFekUybj;Osv&# z%{`bJ;HIW>OFOX0^RQ`}MgDtH`N0`qBoYCis&CkE$79xGvMY&13%_h_1C-NLWFD^# z*w zlz!o9GJyxEI}lJm+A32N&-?5CsnNN5^+MEWwrnqtL9AMiadz-07*CAyPsPG)@rCCD zmur1ziSIZ;%weu-3sm`WTiYioPyfA_aNCPeufIg`$Jck`p*|BaQZn720RUwN%P3(r zRO`X}8j#xE;=*Gd%3N#~0#xX07zXxFyha`>v{Dm06gP-$@rn=Xbn!2mffB;hNM|%)bNoLB4J+?I3Nk_@DTx zF|ljzexig-9iNlNA9%sL;OfHWHY3rY6*-OI3{j)k>|@6WP`Cc6L%S$DmWU<^6ES^P33sOoNT11qMYv=rMpb&EK{5fDr$9->v4FogSo7nrN11-?Nz(KcK}*D zB_i<&xD@UU-IMz6p{5b*~v&Zu7us$WzIy^j^&z`rsSps#B zYbp3n_1LS8&poCU&!Cd9++ET+x=agEZ320j9*rSEax|kT1l~ z>sO}GlG|ePk#*ZaO5wl*t?3bEp^1t0yr&2m1B3Z4{OeJ}3s+}khjBbM~@q^em zG6q#f<8Fi_^=RSY*XvPHoY>6<#0x$ydiMUs`Y?Dt+!=o%`q1$A2W=)TfX>ti7olYY z0?JaITvPXGL-0~EJ_UoCE0h>Pxk15FnJ0P_Cj_om5{yP4ZkVHcJq1#u79fF`C~(_) zJAo1}GC_Mvxu6q;GDz(2g3=+z-2c2PstO^O(`h0eHvp>PjMvhQsbGbZg4S>tOaOw$ zM}$T219++?l1AzjoI(}1HYEUnEDO_oW$RR{8+70{KXZ4~{1v&Oa~Fau<>!WSyaP)j zi=+!c8W2i=gTO^I^1~RQza^nLpER$g(E@(NS&-0Sj9{{;MO>}{0Ty$lSjAO=1_VrG zkmy>EnNwlCZY0tEgIeo@#0R|8a9@%&xr>fjqs@Rd(0Jq7WJigYt7Ka)v|xTRWs@5~ zP9UXJm|o=UAm0XYzGvj68@faUh&yP$C+bn0 zLPl39@f-hwBKkzb)4A^mWlC!TS{bIQOa}7iLs{~a7Y5=5NFJcEKM<1A$0^N7Xl5 z4?CP$6&$gC^NXgS!f<0K!09w)v#kh2n6%+qxpt|?VR_t^2&VyXO$wlKC^Ul^P{R}l zdQt80k`$bQV^~aphmO%HX>gh#0nM66D%aKNR;^j=ypm>u)&_VjpVU+&n|!n!X=2c^ zmN5Y=OmgW3NgLQis!bEOZ~U*z^O{x zPh_o>m0;(f)>uUZi!$O7=%g=4){%aWiORSoxVSiKKR-B4zrJAA%NZP_&M1r?ge;Z# z*HCHL3cfR#V)Ej|C14fF_ZK_jv%cc5>(XHH89rX%|IcqH-GJI(sNJ~Sx zf7Glen;E~6T2V{ATy2i3%mi2B~EqTJJSY+V>$zZLi zyt&K)Bd;t~!+>W&?uNB1`a%{5622o+w*Dg`9K^q>YWWsW1QNi0hFF-LlHdwHy*S(7Atgv8@43D6WbucSR1bFB%8ghWIWS5E*ge*# z$7ayLd*a9eJ3BRmu6+#yeTI~t#6;sLZI!}0g$@eKbte_9!LUYeoU?}fy=|#x&k_<} ztwq@WeAl|ltMR)uOA~!+F9nmPOK^0s&z+$q-;*oX)>KVFsn>)FdU=s*(sx=M;@Y(@ z^ocA)G_blH=7F(Q)FEjmgxbsoxJV~w$LN?eLq0{dJ8I!?TalVB;54DGm=Ze*Rn4K- zz(|7-$v$s*s8Jf#626UTzM(YM6XFoARHAWQ;Pd1BADn>*3IQ!pXmBnOX;>Us1Dg_= ziX0Z`XpV(}QUNhtd?DtRY_bYVO=zF+G49Bz0xq^K5y4uPq2F9L14)A%nZ!`sHW+Lu zGtS^l0q!Sw1P^&YYQQ-W5Q$g8pe3!GEXyD?l%xRGb^_Ds^Af0IL8uY6h&7R~^Ay4laFjs9u@P`>N3e`vsPBi|1Y}l%J2^9u zQhb|F9ZX)5pX&%0$8*@L%%~^=jxk>ihn1y*x?QKnN*$pxX3`j3*LO_waS6rab<%gE zC5$+ZR1l|c7jDyv5~8RzhxAZiB0OuW$u-Sb+D zGO}QE+rnXU+}GR0AU)Y9vpJE@R>A&foxtW*sLH{fcnJ%k6xz-juIZ?|GQo@DF|>h0 zG5vSn(rmmF7IOi{jZm{^mLO`@(3sCzX;{(?^p$qYeVI${rN%MK+#GK)#;G2N)~oNv zT%lyU04)v2Fbq?DK6CJ4?$*u$R!TB^k(dp^f_Yh5Ha`6!QmV>=;zsAX3tee!Z5Whg zG4NQ}r3FT>%jOee_qB&^7wp(;HDY$S8EXRM%^NZfF{Q7PwvvNiJZV|CQyAL34HR3Cm^Lq?y* zMkdZ)_+{ku{0a|PoY>sVE%|yUVh^CLA+AoQnmp8 zm&0kdgHyht?Z!<0$2A#ut1@m@;ma;%=6Elw+ACJGKO(9L= z%f|lL--z#l)oEsX*ZM8wliIdJwahXicZ+?V^;ym@FR=D!m`GFdUs!&t!Zmx?h#+*SiF zu@df27W)VIegCK(uAsdDXD0!_e0=H^OTQ_U#fBXXTxod4!kMiOU20|MLN{9*en)C$ zu&NZ9;yph3TcV%;-p}AH!la<*r7K7tSr-4>-H}A~d?)-}$}MIE01*5y+@1e@s_GlN zI=THHELiL6SCqs0$?Ff2=F8cpjM^r`P6!4jv26q0Bo$7oTf+>im1rSWNn=rzI*0w( zy?IGU5s`AjX+LvluMSj0e7pVk@GWE&-z&GEibi+sQLXXp7@Ws4?nKJp@y;Q zD1}l$4}eCo1IC#?otU<4E7voPMzc@KlJQxt3CEepea%$Lx@Q~^w;vTJ%5_aPQOu(! zma)0Hcu<)hf@qfi&`yr+w3P4xgj+R(o9v0n4HzhCI5ph9 z#l!x~;;v_!Od(T31M)3r_D)!1dTp5>Oa8m4+nhJIi4cM%LENP6p~AH(bl#@%Q1G9( z$H3$ZfWAX-ohDvGVzoGtO z9~3LQ?TUQ>@`ZS(LE5$CwS8aCIBH?#CO7{2k1$gp>$ygTxwV=^({Y2LL`vp z9}tKCg0^&fL-|nr6R#|Pp!PuLE1^*e_--T}#DyGX#-jhqsyWCO#Wu%U8eyPGihTct zY}ce@CAUI)wb^U^c#v?Z#UtXe5=8!5nDeENQoDqs>w4Tr4R!~;+=+&w(|HD-L+GQd zImtXZ@uD1wR>)}#J@P~P(Ds(m#@L)0gC~bNPPyW`p;O-Y@>6I=rzyiZx>f&%a0acJ%bwf1u z+Ro;%;c-}V*ryN@-2b}c5+vp7{z{D&^BNHaxB z-%^Cb^!c}dzwK;i*66v4gh^gfHpS@3F6CxV6_iDqSp%ky`5H3n^)qjH-3S=EGU!&} zka$_bJvEcG&||EbWP0d{3J-HGnubgv91PbM zfOfrdDL|6HB!OuJzriP!piC5N0H=Kt*|9Nal3`?q1TeE^27X&0o&-GY2a>B+gj|Po9~op+Ooi8o z{{Zp*@RiFj{h_p>Zw_e*on{Uy3Ysm3l!svyy+YUoU~roDJFN7~Q*x^*;@&}jQqhTK zS2anZ8H9#DRGYAT_eobtq~b#Xcd(ftbUo$|Z?>dMl|V(>N{!}(4if5ZjueHea54yT zAXJHrBAxf}30?#tKUI4Jgl7k|Vt6}yDJVo$V*~|U&_Ll-A2nwyXY=iZ7B&)HS_xI* z8p+FQ&CnuZgD}DsVkX2@MbkbgtYRiK8jVY4j&KY!OVGTgmL{qb5neUL1r&;HkNjQi z=%fb)xHWd+qt0X!N2_r%sg;w1bFQF7w*cdD-w+q}Q65s>P*tcoAWMUPX>&)BA5YHO zX^Mvs%0|D!DSxrVsH-;ymt5jcYeY5QYsi{z?M36+>4?0kogn8x+mo+{ztbjJk zP0@vN;@0GFCdkbTJVQ%3agmgBUO8j-6iZc;*{+4e<8d1E70K&LywGWOog3Aw8hQS1 zax3i`!y42!O-xAyQ56*7Jt4{jR7TOhHphg*tVF5JUtn7l)&9~O;Z?j!kBkic1=}W_wOnwrAXJYOSzGU ziRN?#3r=>H;zLByzOxFp>Q#WN5B9{)6@V=so<4G{uWhZ8JH6rC(lXpOU#QY@W@TWYbB{kh*1mV7zd64(5&9);3kDi4lLB%W_+`pYP(X0MeU zO_Aj8AkUWUW7yF@CDxls_XmP1oOIwXNmS<+v~2FmNGt5gx=}u>wnj-#iyMPXr_esU zRBZ!AxVh6OU49^O1w__;0XTApEYpCI^5Zkg+cR`qv6<0;NU=XtLg#u6VBTpe{$!sbw zmT0tJVPa(<7C3wRs^F@i`|@Y=Xb2^&-3Tm6fQF7-lkZHNHPuJr!roPKNVFmG<>^pM zRi3fT)^1xiX3{%44;GW3t(?~;btzekkXp_xHo$AVa$jkVwaIZ23*L7{C0kXxu02l+ zDPt&dpi0S+ilb7-ZWhy6CyHQyrX;HsK9?9?VG8XJ9Pt5dvGI?-HeUY%Ev&Oe@bO0N zcnRs+D_Jf?nf%!QeVjrp^b}b$osl1>ce+{!ht$xXxw~`pH8>bPugPoz@;Hk*u*u^F-h5Vu@l4XRYNN?* zLCN-#ZQu8XrsNC{9t_9}e$SaQ7e;}~NsA-gTNY_NA36-NPRo;8hmB_ZI*eQc#^dWv z;ePYFE0J7x(KNf;0dgbMj2cA8f*&{_#_Ul?j7elp6 zFvx{8A+xWqqvw5fLjL53FT3yK-{)A1Nja- z@`9gQuQy}5#aL(nSK+{Q4zz{9oo_ zlm%WmXqysU^S`DDE250VsMmVRu)Oh%d1u?c?ge7VJ?he^g4 zM@~g_5jUNDp7kRTRHIt{9PBoS6nqI3+90x}z2d8BzQn2|V}0pjhmQhYCv5UYcz)*a zp!q?=&hGJjSzQv8)z*|wi@C$rq>>9>vNVNTMSk}E(6DZPgh@cSN|YjpnXFLC6diV={FAq zQ5}nqAt#6*8zV3sN2f`PkuMG4L@n-t#-wOS9$xS$LeqiT+Xt^q8stz;Ym9Lza83o7 z9wBa9?Kb}E=5_nqeX+|qaNzv&U7jufa_M~Y+~)4P+uvo(-hO$1{Ou1t? zp}*e$zIua8HlWfF`XK+?&J z=!92tI*sz@Ay67)8h8XXwua(}jf^Re`6*-1XpK7!>R9QhAsb}%b{>$^Zw`cG#qdQF zY3M*1ggI7BF3({XKJ>LpClupQa=>Gfj^!k4!UwY(9MEXJVqpd}V^w-v63x@geHEIs z0(c_{if(NJTA>%wXU~x1p6QJMX3h+_kr@jdF+HA`KIha@9DcPP*EDhtb-o2FHjJLk zhQyd2f9ca8H$U&>-bycOs#4)uBV|UMq!z%)xK|LNlJKJGf@5+-V&4Vjz7b*-v32d6 z+xARwcE<*|qn0B~4R$W_Y-eYOJSn3G&ar?A^4o8ZX#k=^1grpG7?nVVqXRV$0)S^g zRU!^?kj`AkNgfiC0fMJp5c~0vWm#L5<{l9*Fnq9}g?^@oagN^W6RpZ1pGKT(!JP=e(MZ85uM5=KXM1L!CPW1QkQn9s>j)a_ zbqd|6oKcB}R+W0&IOINbsA9nq)dq|tfg8ZJrF&{88WP-)K`?`sMi8wTKS{!h(86Mk%2n;9CvRKLML8#cly zOOhGb4Y?8GkKDq+)aQP&r8EpHB)XifFS+FrIZF-dStpt}B?dbRmZ#*R+2?dTX-*PF zMlFb%uw?_5Oq+6p#W|({y4H%b=L|x9>sS^s0TvA?h0YmZ<*}f!kC1RnNGe;~7?ZK0 zAdD)4`ZxnHQWLarzbMuXa?BQ2MS%^Ie4sIhCif3B$8Ri!<$$W-^6%OEo09Z^v~WQQ z#|Bu9nYan7SW`Xv zN;Jv=u~ILzNTCap5z=>4=3IhYxH*@f-<-7NT9}3Cz^?CY4eExW>Hc#fxIkeBnr~0t z)#4LSA>#~$Cdg#5y>Al1YRt*{>N;wBb{P@aj#H+W)D*G}nj!5Vutoi?8lsa&LVS}| z0IyQuEWNChbFFglNZp@bswywtLzn2*mLIBaThGfrvomUFEOyV!8pux_9OGGT7s)75 zi)*PLJ6{#~J`|C&uJ1!KjLdSs*%8Fg+Z+I1(qs;yJlADa$Ra>|t_p|at~vxGS-!0* z9PyWKdD84!bJ)ZMz*DDMwUM^ALMrS1$RaX3D#yJcN?S{7G^5!Ynkc#NKcLgDbyLLF z>)3a3fx z(gzmyd;ctD3pS@=g$e5(LYHk9(G~=z;qbErQ13292f6lYflK=r#o($Y(8FU@yj0-C zJ>p;sq7md`1zL%}P4nm=TLSRF@9Q(&+BlN~Hl_ zQ0&Sy_pH6pHNEqLy-vaAS8r<_RPzlUOzD8u_x9+` zT^r79W>2L}@r~WLs~9`r9AN(&uT0A-(x#yYJxeoEw$46XN%M;-+5I|hgh#LryJJy! z{2*^L^T7$3hf+h%yIXJBcwg{U)}+D@~e=TMluVLWHKo@^K0X0}@ z5)ESNba8ArI-h>_^jB)lYc$Q98@IFILKAu<=v$a(7H9hXA9N~V?>h)kPth)somtgg z_SHLx=5GIvc-q7Ehf^&RBU`J%Nz#4*wBE%dOy{EuC~BUe=zm+ZmWls~3d3H(P3H1H zc9!{QBRoD!W2yFUd(24l>@L^gzhq{lvPDj=O-8D? zojhD?UOD;4h2~=1bUeJgze&Xh$;?__Ef00(;TkwD_cik*R0={hX zdBWP`|DOb9PeiE_?yq92NcP`tl{q_@8e5uK8e6(}{)e5^)c);1m@z}1lmDAFb0mMm+P0OELg$OYbm zjK{L6O-SDM_WA(b2A%r9APV zn`KU=YZycb!CzJqD62`vY-68D_R#Kl2TG7KKWa*8$>wuyo3`Vr4Tg%e`KDe*UzanN zhc13@UI2m1N%5X!=UJ-z{Mju*Rjb^i(&NYv))JX_wA56i>8S%5G!i?usZwi{2Cj_> z+Io-*k_%dDXHQ~YqfHBf=I%h#BhG9*e8$bqgdhK2ZQRfTMn^6`%iZzk)<^Eb;l~~x z*3U4#UjlP`viq}g=XPf9p0jZ0{7w1ij_ZNQ45%6q*-b_7g~wB-DUh1hGZFFtqJ^MU z(uw5IEn#}^kHY9dwbAlLOSkl*ta4Wlr?K`)3zAANRGcdNE-6vr&{P}EoYWVLZt18d zc2Zfg&4UBq6z)kTLOK!)ODO(N6RqF8wKv!dlN%hGD67LsEhr4e>OZ68u`Fo}a~vRH zd{Pq);k9ZmIs*jhQz*LaKl_uIP#GU?t(3@gqWHzJGRz+TDWYY+2s>ni0h{VYby6h( z^!_-%ihQwTz#uUKBpY3++|pHy7=9`S1f>{SJVmc~5k1j8&b^!EUfm*tK`iaBNp|&F z_Gy#r`14wBz+eVWKl2`$#ft+c-@?P{9AdJOk3z}-zj&O0EjO)PVk3KmWD_=xTIY**)38rronJL=(QV>e(>zGgEZ}G*Cs8qs zD|iHVNhqd16IrIcskK_GkE|f5IL1%YIprE$$dWAvljshwOadN9M=1e-)95uLGSWx| z>%-E&ql<=Vq7Wic$8n%6Yh>R%h7os*S`Vhv>o@i1;P86xEr2(Xuo^2r3sb0m#?bu^ za4;CWdOuoT&ZPEeL7mO;4$-i_G;p_zr0n@;9fAeB^kvYZg2H`tE>cYzg#Kx@Hbz86QWO`I!A~ zKWA@7Zf;xSXGm0+T#}Y6l*)n#T_iawrxlt4pJhyLe>mygN0whCDev5-jy|;9a*Wlx zYE%{~@fIb;1JRGL$_T`$$q%XeKY30vRk$%j9Xqkw39M$FX9Q>sjIexx`@IUyJ&Y?x zX(~c?)gARLu@5Gh?DB&6-6c;uavM-q=T<1tRmWXdTz5yVkGt}RMRgejr{{$xLsP$C zJ&&9&dauc~B|-9nRxc`SE>wBb(XU^^2)jxH|J$;6SY<(LPCA`)y6V&6U|QPwY|eS$ zu+Y*86u&l#CbL0Q%|WPp_|}_QEk(VbzeQsiXyi|(t1gNrw{5+WdP>#f{L;l~4Xe3J z=^_WrU;L?MlOexki!!@hO80`V&7%FgT0E}&-R;(R3A?Y)_pj~TGS|Wd+oDs^nLiSy zSud#4L?TTyDpeH?`>b9fRW^FDYTG1;H!W})bwR;;O}gWBHV^XHpbC*hrYh}NU|zt5 z)wt1}V6~;=h)|mE;RSHpia+}N+1CeK)UVd4pEe?;TUSw)kW8pZy1ZF_E?@J4B*s8x z9pk2|CYd|LYx~>UEv!t14CNbh>$5b2F6?gGtS!wABKX%NMse5KxMMB;K_!}I-qA1w zkY@rGN|7TZL--!ZA64(`T@q{qC6QT@vfUNMJG0X!C*GeWjdowE*?>Sq#uTO_to>}h zjso3rG5WwfIYGuwZ(~&hd4WQJdSBDcvhu>mt^Dy;vqEldRBV_!0SFVJ+hBBi=|<37 zxiCCg;0zs=Nexe=w-Lh$`DB+N=67Vl_RCJl@>4LAgPIQbYD*h!tyVoM2+YBVhVbxp zo?F718^VFT6_n1c1nts=8j2+tKG$v`(>3+A0%8DcO4z10nuu6nm)ty~yh7)hL~mwJ zU}7BSDY#AJp(sGB*-EX2#3WsIEJYRaHYs+3*TFW5A`)+{`xir-i9OvoRM(I1c#1L% zJVOCq$9d~DgJL-E`vmoe8T1c515@EG_Q76(>5YSu^)fjHX zThD9WUM|dC%T$+us<;Z9EofAjW}S7@DBOb15dmH$0)Qh+m!?SMl-UW_4s4B9M6gjx zJqEx9$&kBF*@}?Gi`3a{hT(9izB6i90o|BO9;s5!q#Z((rYGQFy@^o(oy2e^4H_K zTX-=R9tFTAtvn<=D!1lZTG03eRRs!16;_eCihF?8VAwreZk4u+NML!OnBXJ+e= zW5rkqijgut$DNP9`Pc<)p5?Jxs#Pk_Zk+H4dF|L#BVo2BeV zh{rB9(CGRPg1a=9Dga-Bi~zgNLY16;+e|qhk$}3xe*fb)coY(>3tNjD8h@f~4GM`4 zF%MFwVhiMb_NrhUSH@pq%_A4>5rkz}ZbrkB)q5QHEVGkRE_RguER82K;g9<=5e1i< z=TQ!AoA#)~RD_Dg)?Qd%fQORQ!d5b^SMRZTF9EZ9bTn_*z8ZO5xd89tY;^$C*I^T{ zo_TYCX7*_N2#^O6%;9PS77V{VgrpTj2f;TfD_~V97s$s7QGIXay*wqsSj>ovrr;`s zqU5w(-)rCtbgbD9s$e9M=k56lEx2l_x(rPII*EedD>G$@wM5Ar=7{YVHYNNpsuj7( z?Q^%NF)L465%$+d=aXxi#gtO#k)^iKRLZvMJg2jGwa~-E4gyLsrZTsEvmync^BYa0 z_9Qvu#Ui+wSy-O7<4XgBVA zEiko|5qXnXG$Ei7a;43PEBb;BsXcegHBcK!vi1iS+*qI74lf*v^2y@#3|UX6jMd=_ znrq7GuH%hm5$fH-2cYeiryA*vc5%9~F3^_WO9!7~B|8ymjs$UulfHgK@m;W8XF)Gj zs>4w13ctc>8*28bx(6vKY=BGLqAmWV#X*!{Q5S|GqR{p?ix*B4y0ZU9bht~l+>Z?B z5fyy)TJ49I`H{d+4w<*@V(swNY?TG;lDhHvhPbEIT;R*QtgcCB<-5@_BYVsDox64i zj8Kc&yn6e4xihNS=(GJ03gKptTz0wJJSg+Yx~8n|kII*823ze)i?2<%uDZ;2`mvWB zb;%7;|K7%Wza1X@H~9a5a7-xds8m6@AN3&u08|P8mj{Q7;cu?wH?L*t{2!?n+xqw5 zxbOA{m3u>|D#hfsy$QxLA9dW~o?~?ybIWq!!9zqNj6{wyfm~?HzVPqg9tj2zov=#Y z;bxW;acEpaN9&G0LUvP1fmh|yU%Ip+T!V*K7iHCLxN#(rf8eYJ)fe-Yf6g0NQ#n*3 zsrfxQ8nFiArTIdC9o-j@wtX$?Fmu-XpcY3Qw1BmgOtCd@3! zGCK03n#?R`%SM`}mGb04wM(CZ(*8;Wz&t05DQ>Oc6mSlt-?BLW!>Erm_-wAYaC*F4 zw`z^zK*UB}{?sf3IBLF`llvWBEdLN-koC+q7m`wo%+GECQnSa$pbyNW><6V{?lTe{ zdF?^Et)olWQB7STC{LI$QmgkM1&~gf?2@w2-at+(g!6C%6wu)@4?d7~bK^9qkE5TX z6CSv`zC5o#v9Dk2K8TJ!xLaO7BjxTlAU`=+HCH!3G4^ zt4Z)9U=n4F0x@-?3=towQ2?1xl3CQ`0`0r^96kw}iRM;J+>lnJRgRoY5zqtDXmZJ1 z=_yNOEi3BPRHf1UQrI`q=&rExAiqe+P3H(VC+xPGgo!z{g`4yC#;Q1!OoDXO7iZPN zqgNg!^)9`A>RDP`L#5GHAzUN>?mro#2ZeB_DlU^_R)|YLJoSAS%<4>Lq0jaLGSayB zVw&N&g)}1AX*)plY;LiL6I3H;VK34{ksROY@pImXy}G(0H5w(094A{OvQG0%xXp^E zIkR-)2STn2xA`f>!?`N_3kMdwN`&01l$`}F2HOzX5x++(s9I!KmD3v)aKDG8s2?xH z22{gN$Q%#I(?q}iFCo^>;MKvRBB@|1G6&6o97*6Cdc6Q1337b(rrJ&d+>tZxDZ- znbNFvC0l$*0&_>47-2W27~?IdcS@5RM*&QjqOs016-^h1fCK`(V9|%~4~A9M;GKOn z6W)wg5Z>vN&H+!y&y+u}EOEkKd$YlZ4W*$W;-nGK3;xmKyu)sxwN%z3FRf&fe%)kJ z%#gW#A+e+}@TzK|*RryvkrWRiMcGx|4K48{BcOgkVFwzG5JZ%oBZkUX4 z9@4>5|7=UK_bu#BE)QseU&axY=qDmx5+Jt*J#EF<_xI!Xftuuo)P}JYIH5M;tS|q) z!#XLtJx>&A68}pkVTO};8))se*>P{g^iMCDO620H8?+2_>DjBtcHs|?;VlfdEl^i- zUdY_ByBzPTa2o><%kc6A@Vf&OoX8a6gvD<>F%Wdn(!&qCe1n}q)C9-B8Fp_#Ty`$C zd4xj-38)=6%<&r*IdVeCGYdD{llKznJQF(a?99l{e8eILs(pS5)T;#v7^s`*YoG{A zv`GU@@Br~o_0RgH9hSu`Lb-4D*+icH&8|Aw2dxyY6&IJ>8(SJ10ZYn-BpBQAb zkmdyMKjhg%w8*3_PLPc|CPIUPk!M2Dhqw&PoA5StQIO-e0nr3xiYAg0vrR!kfCzN{ zWzR|>t5(+fGw7g=k{M5wdqR*1DTI=~CST@Y6dofkx~9?xU_$5PcWhTAzV)t76^DE%*nv8@c-wMQBm+#$C^0dE1U z8GDNBRvBkK9!1o-oU;$hgde^PT8Ys85^+gf11~(Tvtyiya1P4T@p#x*K@iFXWj7$y zhqNk7hr+T!+u<+7;`QHU3=J%#vth(V4cVTWB-D^FIi? zzh2#jHnGC+qBb;T3Ec&2f8)Wep<#Pd0YrF}GDNeiim`f&H$>4A=Aa;=CaXG*+A(JN z3BqbJ$_oHo(DYb1Lj9E_997c5fh1f=^i++ss>kUbfjXcxdJho(AI9D>y0S)V7LIM( zwvCQ$+t{(4bnK*K+qRRAZQFLzF*`TUJ>z>d#`oUyd;eT(*IZSzW|b|PXA3T9_U_o) zfyQy^_##b?%%mC>{YBwUG2ZA>3oc$T(j)JUu&hP|JVS^CAC#oRxun`0Rz9)=*0ArQ zq9w?+gp^1MxOi9+EGS(kx{O%9539TgeUsGg8CZJA}A z@K`^ND>)UsvUq5|UBa|EP)IGIbS^EEz2wZ?Z~$QZ0K8W62YuEJ1|v1GUvC9SITaB# zTm&yjG*el6kj%Jz1 zz4R!BLzaz99q3M9GwKxtS3{5RiUl5bkH|<$?0Rng`isN^V|C#H8q}z+9PUii2!SWS zhs%&L)~5`UC$bRhy4o&&fwKbQj+k5d?RJ9P955_y_ZxgE z-CG!V4{BS_F4dOXK0+6I7{k#t7|hiPwnv@Ku5+G<^%D(ADI9P#R1>;M0#o#?oe*eW zcf>%ocxX56YUm61R2;uihh)?I373bWMhx-Ykt!o-6RNHHZq;3B9<6?m zW@urytgOt50)NCWm&(>uz0H$U)eoGZEt=^>XL0vj zY+|?eypJbQ!X$od0t@m|J8?0JPP01|lff|C3<2GDKPCL&AWt?UE~dt+sJ65Mx~axN zIUtCMtq{ND&DE8V@X8=xoQp|c?k-F%eOCQtx#IVFSG>(=^@Yj_td5Ofcwj9v=V zPK0+Sd@Q}fZ5}oSwT{K@`XiJEfi80*ec@X!5zJ_Pl_r9;c+4_ddVIcq6vx!S!N&_p z{(dI$t1Bu2V5v9H({^@#%a3&By*GAccL9&be#8%$51!Dnmc48Q{OJ1*#tFJd1zIZ+<7OlQ#yF^Q6gr?JjW z%PY%5VLml-bxIg}J<824RXQv}#r~t&G!4>4Zo6e+5wxpaF$g5Xel5oyfuino#OPd=?j}J})LYU4&UyjyPnhZQ2kRX)OzI`+yJGvKWGppVgyd@uZ8XZKXJk81gO?plI zo^YrLkFg)CJXeV|u zh@EQzrA~5NEF|ZewuG3A=f`o_jlxd0UQP>4T@&Cz2G}LP2r&z6XQOO7QEje|sb411 zaO=l%OyLT`vgo2kfixdz+~%%hebvn;L#1RFAsIlU#<)nq_K*LMd7f=tcD z;0$0bowM;c21Z%#;%X3&_tDT*zs5WYgcY4**y!)HL_S?*^fk_W`R4LBlHIEd6)t)= zSGkG}s{C$W@noj!-jML2(JQ0qRHp1G7Ot)L6fw2=q?E};Zi%cQesn%^+(gp0Qnf-t zoKBLs@eOhOkaD%eS2bjW73bl`RWs%Q=_uPq%)e_ii)X{uOk;rczcl^HbYW0D#}D1? z2@Dcn-`iOYwNwnF$5f7x)0{)}H68I-I)+S>%$95E_~qIFlW{2?2u2hRcUf?(q6;ZQVx4>m8Z)NaqDWi! z>$)g|w9YQio3dEiblF^ZxO3!tR z9P<3*Ng!7z(u|+H?Koj@YdmanQ1~*xNZ<;GEC;s%2hJQUoGt)`#~1E8bs$+V(Ry3N zf=A74=aKwz_#Yf?B{OU8v+!(cO?%!!Upw?SF0GfT#4YW=cedj9+eE)cH>db%R<7QL zV@vJtsOi`>9+&blr^?^`U-2pC~Fd#|CSDfZ9jU=1g`!t{}8Z!FIa?CVNzecDn^6=LbC45$0AnC z{6mW1hzN{ML@9wQhFk-7YL2aMe@v6shW?|IL6^Pt-K&MXEXt?R`*F}8nCYo#HkIX~ zY_9G?Y$d0jvy+v z#%CU?EAf|1eKkpb5{B1lUa1vP4u0ZuMKQ1Oj?t_fIIJ}~KX(v}uB2OxjmPm@FvrQg z`umC-s@M}O?!=%HM+h6T+4BOh-eENnNZF+ZnC;OTNB+X&J*P`{-e3e7h@-2uyB*Bl zAr#a5dXFgVpY=0{${RnyG_St>~ z&8ItYbc?b4f7JF;1zi@KJdf-Ozt-1Hb^?=NtyeB=c-(DcyrMQ+!$7Rv92R+2ejF_;T;Gi*t|uJ^D;}~&N}j(a!X520%t=&t%7*8J+@}Cz6qQUUJEMT z-#S-J!|DmB`5ufMeue*CT;a~G=#K;7TQSPP-&CjC>70Bl%xIA>)88CBv*>lH8N{7U zli#0Zd@2wBX4vcqy%GK$qN<43VyVPzuD2!Ide3u>vN*5du}Euji(+-)(pPdrbR+Ag zYY7kewBM;{MP181UsHU)1|#Cjw~f%XoxV4{Sz}_~BNA>>3hOKpEguI#v(Zw`kriXz zP=xMI)$gf>cEitav zN}S~Tp((K^`&h6#f_drC7J|2tCCe}t33dlzEoVpDu+9Iumnc&u1Cr3*49~v0Sl%7N zcnQ8UyjuEP!&_O+UURdUbF#C@1#e@;>Qb@W(b$Ubb^vnH8dUm8*Nc7mx|>()f9mTo zn;6+aNc4aK@~f-H2INn-<7WB6E#ZGELKrY)s)`xnHg&*2KzkrSKs5i`(GbAa$ij@# zzyM$ea4|6WR)Jy?gzPt%;6iR*(M6rs{3iin*&T_gC24d0)W+^&3ZSG59KbrtP)-q$} z3JUlO)~e+AdGKH`5qcR7Aq)%xcy?B4J2CYkT>Cffh^bGL9Z zG6k4@Uuk|n_#yRgr{=#Z)t$Q~(+dV|?}w~GxBwkEv{}S%xM-ostn*_OL`pbPLvCSz zpSR-gtQ+7!A!PlH&OM$t2fPbocRQ-owWp6U#GQk8Gzyg|onIzV$d};gB1!DtS@tCbfGrG9Hf1pm#*hUzZhXi&YS5D4-|U!PLv{Rlx)H-it+44|Q3gn$;t88wad zp8%tURTfglP-6&@M#7k%&d!7gw(M<%DDY_tYyv;icu2WA52%Y}5_Gn%ygoZKt`83( z#ZC2c%kUO+erx~f%Zjy~g=A!LCw62BXZcZ>k`epX2BL(VE6Rs5p-Y#yNMw!!m`Ry* z&o3_K5;GkCW|-o_QpNS!0?TfUn$~VE$9mu}sfx4L@cDu^18VlGgyO-DJA}R+pDcmz z+gp0}Oy@grdrt^8_TN6x!oOGk+XBM&xA1-!hytSiXx~jf6Vj|dNvOG_)0`$l!@a>8 z=22PDDOeJrvt_2Nm_^c(70UG1X%|D-vSdXbl!eF4h^y=5?=BS`X&20Z-PG!tUm4iZ zRrQWmt=R38B^U4<0VWgV6g1HNy~&(DMvcrt$a|?BOIF2ceL-~LCj+ukss#@79H8hRhO})XFEKP#({~Srfw?6jv#%iRqOpfFhQZ zi+DXzYfOR+LVB_xm!^ta@e5^0txQ~i^tj42Bt^;=(hN?!{EoN3Oe{J`G7$Jyf9}UH zLOJaVb|v;x4aufgLNHl%;4Y*>CW-SNtyQ~(z=hW73D4pd(=y_YIN}MF;tIKgQ0hoM7Y&fqc5W_RHN17#sN{MWo&@!cFy5cD zB~j?$XgHGCFv)8m)AtlI$n7eTNdAf7ISL$mCy(Gd(eJNx5GK*m?SgLgKcn$WXoKRg zKyYuP|H|3mG0)C+t{tOC9Hsw)Z*SehVAAWBl`pJr28k8!u&KBZq^UTog$MD7$CcPr z^Bv9=xOJ*+JI&Y&NRlF_pNDRMV^?Mz$+1$HnQW=YE6)i$W5W`4!0#)Cx*j-78s>Y| zenyc)igB1)2!hP3h?2;lG;W}uvW=21 zTMAF{{Dp8eb-EV$a%N#VgK9Lc>Y2peT7=7YQ@d}DG=y9E{jXHe~cT$z3@Ra6Z_C?|D$>&^8M#yl=07k-)>@|)jdG}9S8%XB4}qJfPkpKgV4WiSU?U&rlvM# zwq`C)0260Mb5}bP7l6H;kqzU&2B3qpozp)n5KWzL3`FyLuH|wjph1syqK^E|t0R(X zoYp)6bDS@L0?ACF$}F=ZFXy_h`|5Ux|8?$6swqqT_!|2k*uE8($aJv%~|;48su`XG{SGV}IZZZWlCF`Vbor;(Zrc zXvg;PGgqmfhmqLKdejNN`1_cTDqO5TKrpy#mWT!FzDW{nLKuNIKUF*iHPF15l9D9X zI&=~44E6(~^~@h&!sMHxjXCi9_mUMllZj2-#E`x*^wReG+}f*$Q+E^uZb~NIo^-&W zfi&^l(kqT;z*DeItU84acsp229Nx2w{8LTQELJIS8Q(H31-EoW@VxS*%oen|8UEWQ zMXy6W+#4k-mfV|z7Ynu=1l7U8>Dx+FruB=5ldCIBH$k?-?mo;k!?3-?N?{bU-#fVc zaPDsF+{+aZS18QVZJ2Pvr)5zd(_OeR$6lAok1U-a)jkpAD5TGE400U@&BJHij3!Lo z5@974^bE}WB+(*aR$KL!EEuo)NtM})z%(Uy26kOE(WwMr-Bhu_Bge$OXZ;ylOu9&v zX+oF0@?%Y6e`>Dg1cEge8;2MeXYStdB8ytNTqii|vcT_1!-ameAu_Ee&$r|WpGfL) zh_u}@IM3RZC=a*=VKXP`kTB>!UaA}wvZd!T5oggQy5qVCw9V$cna9pcdXZ`7DyogU zTaVl-Z77u73l3nA?|t2RoEVBeq5jSOI==Ex7GC6Nxg}X) zj3QSp6paIrx0hb+QbcDkmeGDqP+Z#pS zt=jhoEZL;kPDWE=Bu&Wy#g9}fw0w^^zGb14PD=x#SiMxCjyXnSP_}O&))gag8$-+K zq+pC@h;^`SN03aJ?Aqq6k}}0cv10Vsp36!d+IP`qVv(j)Ik(}9;Oxm$IIIc{(LxD~ zSS@cX#od&*cXckB>8HdLOSRZK05kuiwn=!!RWDGT+`Rs*eK{rh)hxK1sQ(X0kwub{Ds-M`9R1tcj|LZy+NNN(C!BH+Sxg_Mv3lH zm-Hv!H7Z;*5Ic5FZ=v~VXpJSdy37chG{OHJ-8FU=c`7`p0s=|@j^lT-YjpeexQE+g zWrC|hni6Rvjey@T0|%v^v62v!v(`*sU1zcM7E|V(5~pKZ&)CPiDxpj}h%MsXeZxyO zUp%QB;nkcbq=G2%n7I=mi^*A;6Sxv0fIAAGhIUpbfMquo;6TfA6$|x_1J<{-h2LRB z)EkhuR+G0QaAPIg#T=TJINn~kOR$kFf3*yVjX70*(YO{dp+2O~aDF)%WNb{1b)#`kfQ6zE4B7uD=*&5E$>mV->2vJIX3}c({;A#9` z$Q#vY88}xTvU|?tDA%4>E4w(ZlJ(qwUDf&^kcc5|wdrX#T3NhRiCJ@Q#vED1!zU#W zyteLypOG2hA*1^RdA*Xh*^iti$)u5!@(H{}V7&&S;v?Td-H1WeKl|}m5w8S zdhf5_|Buka(f2|D{O>-wD58IBCOSIfTKM&wwR2)Zy6K?kR3zY~@9=thdfK-qKc~+zqx#;# zk#u)IbRg_eEaRQ?mUW8Er~#-?>+&JzF=sDpCNU6Fkpg%|i0IX5*vID-9oO&36S)LB zI*V9SbaqANv+rlz01rK}xwWJ+uvuTcm}c*HR>W=c3@fk^h#!uH(kM#q01F!D9o7`s zkpB)_)Pputk-22>@$<>ppRuzCQ%LX> z9RUhtMwrpp6P8J(r3>~txT8O7?9$lvA?N{z(Um67~1so-pl2~ z)}gVJTV&x^-`<)1?bp~DFD_RG%&)ylI?C@Nxwy$u(1x`nXAytT3NRX$e^MeOekX{H zVc=n$;m;Iru;z>y2~E=}b^*1E8sWiE98RVv%iHIaGKag0Xa2cl!*obNW{(jW=GdFKgA?jR$O7Cl2AkpJU2X_e0_L!$SIA z?O-gWq~lG)VBBj3biEv8ZXnF#G2uxQirs+wAT)y0&hgOI+szsBWuWifd;lE?84?9Y zrQ!GTNn^)?G(p$kao>TDd?3hCP#$goxoum8Ab6qGfBJo4IRpJ-==FtM63Rnx@=7>m zGC+&W`U{7B`ZE~`H}e8sd$9b+N&{bkUUT^6m$Fq{I*Ys-sZh$)_?A@xIs@%!AZud%)opO2%9cey6{M2h|} zAT4`&IzF*jMh2$7^22aIvO)Eb8`CVuzqsY9;!0j=c-e3_Nh%;>@aM@W5dk73&`hr| z_WHc7_^B+tb^>pSQXnXPx%j3kw^xLWhAY}id1a9*kuh1pk&NwT^eD-t<^fb4-w~rI~tyy_10%dj-dcvOEW3L#-g2g@+8WE zz3sR=lhf;~u@lK78UEo%+Rp8b54{%4r=6~p8BW0r6WyN<_O7wp2i*ki5Qw^8I@7+9 z-5OWYb47M0muoN!ZH;#Z2$j+C_TpA>JX+O+rbo+Fn|A6v3FQTYvV&%(wqs{dup&km zvTBUlVMcoRy-~3!^A(~za`hfY#+p3vWgH*Lo5CI_XpkLztNz9A zePZnD$w5x{4J3*)l^tlGVra(mB*KId775t zx|QzcFLu=!%kS*6-V7*&uQxkA)A#*VP91h7`HZLqup0FbutIw;s?u@vz zu8B&AiMlWp^&`Q}U_W7UO@hb509D#SCKZ1>heRS!fKoFnoE*ms!#U*vop@+O7Nj%? zXNS{9VIkO>STDxkG3C4}2Oqo9`{}?^=;Yd@{9O{_PMAni5vn@|ohFlE9r`84pne9@W!Pp;vLXPGl%Sz@)Gn3*AAhzEaGfZxay`S7R@7x}e@8-3{ zI@fy}So;WrUfy_4m43p|o7Z?Q2A``@F2#^evV+XlbRHVI?}=arg7km&dq0Fyaap0bDQwx)qw z3?RbyM)GHylu1rJuPfiVgN<=(<*q;J*NO&cka`FyN~IiQ+%w|)#^z*s zc+IvwOx1X^{Ls6&RinDbd4s8K_3lpDJ{Bz6S$p8u{Dy9KXasN~fs)tXxPoLS0Md^X zjC?4^ybe5x3g;aX31YdmFRKw%Zc!dts#EqFi2Pg(G;Ds>J%W&jou! z?a{d{ieb!#!-VL%j;#iK5Ij@$#w)^PPQ9U<$a76Z`WymOuY3J#ZFx$js$FJ9A9O>< zZYWJHy=O3d^@y_I?29ALt)t+{Jyej8dx$H)lF#C;HFh}_u_9ko1tP5gYKoTKWk*|j z^X1TT2ZKU|%GyW-DQ>K-hy4I0dGmQv@|$1AW(-IrclTWwJ>zo~mbGAWtGbhwxO@O+ zyk(HwL)0=Yg9V!)t-#8I6Q}ZMEf>#uoNUBzxl=n7X}{(pxty}<_SO~uHb^jRB?mU+ zmQ%t6ONE=V70#Jcv}?8ZESQn`VU`K5Ju&G~TM9;IGjgpY2J-asi3xhGBM5TjEj^f}Yo^BW@ z{BE%o;tWQJx)WFblOC_t(3j}wcHjO1e1nnNhm;4h|YRg(-4)e=tJHXlQ$R zYPtYHr4x8#0?4x5<4|u}ga_+qNT)Z4t%07;p*eLGLjajji3&6*oksDCyPvmwNz(1@ zq)N-xzJJ!ZWXd4m!4t{^`c2sqM_^SBbt)AHPs9$jphd0(8z>a&hU3g)1n0q}l_v5- zo%Dh}_k4w?mkD^J6uwWtMty*ZjH{1gn4Q)Y?QO}nW&3dG7!*_?cM~AC9aK1?&!hjy z1fqS6>|rP!0*VE>%C)c06z}f9;_w~uUV;G1W1K4ewtJY5Xy5Nua*%BR%f+zBqyRe_ z$?YQ*Ri)W5;;ZhrwVmvcd+i3$H_rrV;nlRizo?pQ;m)-qnj+lQNikpy;G!WAQCJr9 z3r%7cuP`r~UTFjB zcU8~x_j%y|s?Yz&8)9Pb46yqrwyjv*)_I)^>F;#yYCa(dYmLbEI+?p^Y|$B>GkrVs z15b8RU>&&Sbc(JNLtDU?AMF?fZ-~D42&T!!ng!uijoTxfooIp-N2WE_B=tu+d@$Vc z?Xb^Y^vn+xl=o(fW{Vg4Jl#1YWddbroW1N|u$NH{zLzNJA+Ru$9&mWjvJ;udW5&_A z2sGmaU`!lkvG_phw1#jHJqWhBUM?^ac@lAtA4Yu-n;m0PiV!}d)Ptg^qNszEFC6p- z{+6_>AII91sbOJJL;aNMRg8z1Oin*sUrM~4H!gMrB>I*Q@qd*m6P=wTNo}5!fhOeT z_{%DqiDD3u#ibXum4wW7#>tvw@hKWtV2Uc6LZ{Cs$SLbVtLGy;>p~c|L4|phB5wKN z*YJMY?TExaZ}r`{-$2}UJOA7{gYRhdZR-rn?A)%r&huUwXkD2-^Ys_(_^m;p2#4O0 zKOu_qNtKY!G(S67r2r7?Rpk<*$p_AC0D2y>piWmxi(ph;Z=zHkmQ6RMNx~$N88j}5 zXffzsOESomMh==rF^TRDS#(InVvDe9{tyxFXDVdgSzs_jA1b8fp6QtAv>>CZ8p(2ST*ze#w@lyQKJf-vOe670Wm@hXU|diOf@sn1S@K) zBixuQ7Yq(mbz08^vgqt-DkK`&_bO8mBDLukKN@oDuZ&oHgbj?NQe9tp>Q8ChSLG#+ z_hpU^Tk92$cbmef9in^4Csy%doCY~ESWv_`j6e?jS+JT*|fy>TUzd3>Pn)GYt1Tykm- z{3R3~WYRpYUimm&bw@iXP{BsPA9_{zSgKKn7t#(0{aS8&(K;_hdOGvSE_|w~&3?E> zqX7ACy5Z)iMjpl%DCRC2mz5ai_XZ2(!!ajdU|K7Asi!x(k3nvRo((<+FW$_MtXDh+ zOHTXjFWyx$$4qYdqBf|zy|$;yFp zT*1sqst#~|iNi-0*qjWZ5(D@ksq`bx4!i)Nx-iu&3JpwKZ8jdjiY5*FOf0iGBg1p2 z)E6^7^0geqMO9b%>fQ9TbfkQ-{bYR#xr=o2M(r<@pO>UHVIvqtYn~g`#VTnTMiTnI zY+z^78|oQm=W&zz-KtlVv=S?0jOeT~D*71Wv@w(|fYzpV%Kbcu-Lr+m%h)Ot=nmKM z_$}h9O>Un1FoJgT4j4Hrt*+eC=*n!gZoOFX@fmIiV0+yvHX!~ zyoapO;|=S{ACoxF@zyK!V2tT8zxppb?|*imVV-bgAK$xA&u?sH__wC7nX8Enz|_ph z?*GE;hST@@(|1MVk_s_J8KQ-3!gZ_O!gM6vGGi2920(GZMJ6hYLP6FCWJtFB)Z;D$ zg>p7-w{RDNnZ@s2he>D98!`$rb^;5Z2p+T(6%n@wvYpOIt#FcuEhl*(b^z`Nt5u;1 z=841njjut`p&mN59=DK*kx^(Jhd|m8v$~Az8$Bc<#7>fyz~!@Xw4rFaebFOut2CzR z5IMqB33SJ7@YrpwZW+VEl+4f1#A4@GcTi+?oObEmc~0f5;=_=&R%#?u&tB=eld9OPgN-KpD$ zxjgd*Wf{v&M^PM$$#ai8Fz8CZ0T$e_%@Ss+4=eP5A`5YNpg!z-QJ&bUK}d1y=-|tb zB^R1%($*0gRXKODXX?S;-^$5^*Ug6$b7Ccy*bhpYT%n= z-u~1((se|9FBdI`{R%BsqEB5etT{+<))m#nK=cE6XqFxt%@x{3Cq#YI|kAT zJWnMD=~P2Erd`b7DF;gxO&vtkhV40Li)8a-NPhO6=X6e-q(lz9i0aqIv6FSp8o`)| zas0?Cv!;e@<}p*WT`NZs*0==n0F+we$~rJWM@JByMd&3mElhndaUmAc0DEHcK=gPL z1QwExrk5iled7X!}~pN}VebuuA#Fky3#3nATw1B6O|GqEQ4 zv0-3dZzmgk(WK3OxrVw$TZ&c5915%PIm!$i-y4dk?V>`uK1)gpg)&_-^)(EuBZC+Y z<`Ia7;PDwWqyl>*d}p>#vRmlLXBX=8_C6od^{y*Ma{nq{2FCYql)7}2c?M~~rEJH{ zLT;i+ZJJO@5z2L`h6T^FB6VS&>l@N68kJc|Z59)lkz5M%^g0`|QivbS?3i+USTtA)vW*4&`9sPvjAel0$j#e_9P6X1l8Gfv)Viuj%gtms)UHN10 z5ChIpzG7CXamQ09g=b`!C9B#4z&z+X7YWb_$#GSv{_|d4|LD|Sz_quwJh8TxjvJo& zDvh4C0yKl5Y4LZ`QI%B3Xe9@gS6iEtQxDOrm0~b2 zb%Q;&U^+nJEudbkW?FDvz}+T8s6ovB#kp|+K?bkJ;RO!aW zr7&Ki0Rm$DfrZoJ*#W}OABzZLki{s4WgEQxSI=XPwT&w2B3w}f+&rg*SKC2*4p0_G zNz2EMW;lN{cM2#i?!8+0{~Rni-6X#*+I@ikCyV(9M_=aDCg#5x%*%J^7u~;wqn2hi z4rWgOm`IYR)zNcbHP6+d*;r~-!wGMCo$-QD#r)t`6l z&10C63eNA0-7Jp3bM3Ozb^Csg)6ySQhYVrtnKUc%toB28kC|cwiN!ztroD;02APBl zRWv2Gsyh@I7yFU>yBIQ83Adc3?cxgp-|hC9qUW8LShd1mRW*(pHbf{e4uT||$PnAm zPRJaoj7_0v6eKviNhj4DQJ8J_fiLrUTouRf+dd*JEM-blU3Yz~rsT^wPXn%VgeTjc+`=@mrbEWB ziRKxof0Y=;B#ZPt(l@P0Qey<^pripa_rlSJV!zNo8WXhiS7S%)wGf^N_{$(5AIyB8 zK}6CjqBCwGt(swq!`HkHTa{=4U0n>DA4&__9BET4ukgC!JI^=i;K?*?yb&-qA&$u_ zBB;x`NRwfwL?|&eD)||>S8t@A>`Kb zQHBt-e(%=6MW7^TUZDSQB+gp@Ov4S`*PCALnEs!K`qND?Tfdg3E)Smy9=F%OA+_TJ zKQ9L>4JZ@}rr#<4qu5AL#+C{;t?z$wCN-``I5pFPZzdyFwFn^Kk zU7=-TOEO`=##K{Ngbl7#Q=!*s3JC*ex3bc^{P{u!G=)s(73~RT{xLV2?PmbdWw&jw z*k~BAcw1vYJQSe`Qq`>Cz?%(pN z!}DPf@MOX2_2FIVwn^`5OC-7Wa+y9uS!KqJ7noZx`cWuVskUi7Z$S&4gkZnz&N%XY zQqP+eZ7;!jBQ@mUvhQ!K-v5`s>_7Oc%{O~B`o~-G&0qgNf7$o_!fG| zh17Qt;2@PUY5BVL+*da#U`^TF{puJ|smlAhJ?y#v9(r@EOVy^Gg-J{4r8!G7xoS7Y zmo!Z?FrN%ohq4;gCsi>Y023lv4ia=LB#+`_Ll68Ro@}n%E9QZK4{K>JcXf?8syGh9 zqWp~vFmmyM08J+O06UZnTtg6)^rI}~ctuUtMkk||@9(Q^d+NhYq;CRy@?xtr0?aI5 z_gNJ^_HJSET?R-dZ$q_o7ww%V;LY0ANf>iukY{*ur|wX}PW5KdheqJ44>G2tAbebw zkjO3xZdy*Y9^K@%5{W&j0;J6_nL!mvN4+3k?`ecHR)U7vG=nNSK^${nd%%_X-Q_CiY<&QbAbzt%7u^YxS< z0(uLEw7KE8%``L-DJ0J;T^U_n6fE7Vk-5i=JGe)ag3Cn2LV&f~Z zZP$7Or2rGV*Cb<74AVukS_#4^Xu~}dplvl%j}E}A#s`YQE=C$zP>P%0_kg593?#_N zvpA6~uYbfWi+!D|uvscoTiY5JG1hX(!n~xhy0p|BKCcrfOdGR+9spaiUf~8hdvm_& z*BgNF3uMV^TcN5c_i@h~Cmy^Axw%w2>AE0_%n**ix7xrP@jl4R=x5JsUSi_9`D(RR zO&-&}!Mn-qR?x*-hpB#hpFBN94HPtKm|z%wL(b)V`N)f+drQ)P7Wn!L!=VI2n1EJ$ z#v;u>=K$;+@$A?YaTNmO7@=UstFe*YE4uqevS!t&V;EE3iXYGzsUaU)mc7Z%ZY>?+ ztmIT}tFx3gKvRU)K5RKv(iM+2uw{WJP@aeJ{+Ry6UEqX_S>fKVa z1#wDMSB@8QRkKPDO#Ivss|Wt&`gdmFz`!)_wpybx>E|1sBgOu@JMj{ZXuDyXVbsFQp0dg`lf&WG4u?_j!-bGHJ)J#375NnrUn6Ic4iH4yaHg;vhMU2nTrXTnpM1Z zT}0Yrz_awc?swMm7vjGI-u8p<-rhIhVgB2s($>h%^1BMv>|dnP$bMfO>2tak%aITy z;UKuEG^Y$D;?RlaJVZ6%;!Kp9?g^-Jy|a7Ewb#A!E`cWr!5v?$zLD)u-+x(ZuL){yAne}@|8Dtr{|?&T zMg5p3?0N*ERzri}j4=8nQ+C2(KFoqgwg$oIKr%&zI~0a+VQ7{eLNO{QoINyCm>ryV zVx}L1jUZ2`cj-S;`0Ga1Q_sW$kxG6kTi@*HkjkVj8q=Q^&J>NrIqkk_LK07oBtDog z2aic=MpgHhL`_KOfKoW)`_7FvS~8b@t5kJ)ccxrb>YOi4d21f1G|R-U5I`3WSg>Rh z*6nQ~%O9~K&k|{tD3PMVYr)y%%Dh%jhSYL*PjdWY1A@sk8_Dw*7Do=ucx)NQuEtfh z0LPZAd$YmS`Odu2lfHl`hAM07UUNJ!o31YDR#MU1OcAfo{&1SVu?c)iy>ue|6-9HL zrr(Bxz+=ypVBx!?^x+qC)VvPp~fC*R)>GWS7R!IKLBJt}}4{LzeI_d*#aHi!E zu?PxYzPRiQh5cQ(HJG%myxL&13t{Qzx^8l|<1zYt>8wgp^2O1an-(Z0;vUSCiCNEY zk$V=N@zrQN%ARV0356K@x)N7eE}Tzg$2|md_^|od{JOE*i{TsI%E#)~zRe#&C zPo2O%jlDQmC+Zj|rZG*CcgWern(X(b@x!{)PxZ7?RSaFq}M`&0B`Nix>wEPyZyvT~Lc0O02*dgBg@u5<{UHZidSy&f> z9LC-?Mjvg6-h8CFX8(08G0`VB#3RpZH@qBN@X(Ps$_yTE!#8~Q^#r|(tMuhQN5uNXAM3-RhcI@TsMRhA1Y+0^ z5Wj(Qr4|uX%3LDBat0{XCI4=Chu-3jQ5PEF>Go=&&hB)+ooH0mv7;v$!>)Uj;WY$1 z#g&5q=aOk7N(ZqO(v-BTN)&d~ zfX?+pgx)FD!@3l1IPhdjG%~_%c01L=x*$NUB$F_>wu2j`OGJ{ZU=%=z5IG4yc`n>8 z7x6slSuzRmiaNZK3`pXb^D3!3_wGN1*f6`r>c%*G^ z$sId#MHxCmE)0F}L=OyQRpX{j67E@W^$>tKRdT@#Xj#2rE3?}UHl1k&djQ3yQ^yz$gQ$UGUbwjzjmmq_>FX6WVo98NKHUd|JBG_v| z^}XO?2AwI`MIT~YtCR1XeMghvqdcZkIu)hMruIliA!C{O`0}fJAd1}6ZJBM=|4eMZ-S32kppSe;x(`DJJdr%)*HLhk12iEUx!D;A8hOMH zj}<;KeNlN4@*zMJoC=N5lx#5dz~hz@J>d_U{HOcu9Ek0RmHSU2Sn?TSV)7zgAqMIU zLzH8>k{?lV<}jKt#0m{*@oYtP##Vq$E>b8^0bW#}5-4fZq?ubb`*C0;h&sO%X|vCR zgC(mYRM6!UV@Et4A7KlRyLkph?6lK3RaAFi}eqduRIDjH& zjuIEh-YlITEQQ3GjYtR5nA}>5nzs@(vTLj~C={;D_Sc1!jnl*l#M;#^t`lc(~B z`no-h_9f^^<~Xwa08taD0`xiV`F2z4d8VW^!@Y5$8Il6a|HIika9P@JU78uT?F`$t zZQHhOJ2GtBwr$(Wup`5ff!gm^eY)y9V|3T){tJ8TXWa{PUbC#k8_XGFqSyHhE0M@Z zP*wI)=yrS5^;9D6X`cNLz)Z8M!J;|Dr_|B!!&}I_BN#h7c(YIwM;ky~t~k&xbUaiN zE3Pgj4nzjMb3!AEhAVZg{7(DZp{sIMpYw$DB z7z}TlX>POaM2v)Lk+ih}zQm`0f)dU>x{m1LFTKw7S!w+93v5DOL6Q@%7b0gd)SKTC zq!w~@t2$PE;3oz6bnv`WN7T-XG_mW%(N4(0u$(u*IJ2sP4tVpMPj7FYy7@4BM%UXr zIUWud1M>JEANzt2kMlbVJE@f_BIf-#l2<@Qbigw9I~^bwwL;sp8Til(fB|0Q31Lwo>u&TPy zoQfLARl`GsWKqZ9Ls{=yEo-+nEt`H05Tw~fGka75vx6**Ri1xURCFf5YWlOBaoI}z zl4Od8MD?YxI)U_r(8xORxDD*6qCm*u8m-Ox=<)iJ2BCKmd=h7U_EIVJZsLcFS&peh zE2(%e!|*f{*Z=-_Z)Nu(xFxjOs56H?fRKmuTcYf4jJCKr4GNJ5+YZ+dqF|T5bZyY< z`t7A#yN1J2wOsSaD~8C^(3CWjh8h(hh*b}#)Cbiv_SdPCv!4Ra*jTalPEDY|K3;CB zqSqU7?dxRG1mZ4Xm~+H)A%SbdNWL{8h(I&(8QuI_f63DPNOb4?ku>We5AECk^0EKZ zB)1HlIMP=TMYHYc-$cLS!H!#^~Vp+|FHI%dN|k^0`@j0`fh;IXJ}{qU$JrOTXtJw zh`y)lGN$hk}fjF%T!9cLoM8Rv{oSKdzQIP5x*G0N6K z^dw;qQ5J#9h|jK(-hfgYf%mn8F=&?nFFFS{i$=UO3 z)}`COz{}R3p*Ne_Si#-X8*=$V`pTZ6*U87EViCTDseL7uEFF{(Mr#5q!2?#bgNzIU zr!fM0PZ=SNq#S<3JAPk>nN%iQHiRz5Ad(tXB8yz6<}KqPKlFtDQ=l+u!F`H+?MF$8 z4}j7(O58c-6tw3%l4~sv1m+J>9XCDoq*2jHpMc88jvs z8cvSqgPsysr|&DMbO8fe6!$xX1(*ON)c}8CzK>YPJVzg$Fu^4z5O$=2^(2Udr?k;4 zuYb3~@gmH0v8E&}ugX}weR1|SnI|x4; zXK$I2>X(3b@y(AZ1@W!K&H6jsPqyO*2HfR2uV-`u8U-wikju}>l|9e6Q`p3`3v4!~ zv+%%c(G}1d=v4q6)~^@c!D!km$hYKoaO5tMDd5hx5R1a`aA#|SYKTCiFusUFjoU~e z)KMz}N8Fq2@ia)wQPWhVYF({0LgXszF4GzaQG(XUV;sT|bQbR|q*0?rS zt*UOg<6aon^j+auOa0Wea}+s5Qa_z%)fH+E-<`6L*|k#53d~rsR7$$90N4p6k>ppE zE!b3L*2*n#<)9go4fsw%aq$p(8|rkUhfbR_I0nn0hEYjmgXTzP+=W`D351Bni59*z zCMF)DVBD*QUOiroBWn?B^ZN!q&aq^oy-JEYElo6745KKNQN`%<`NnDm|KB6oxE%O- zIw{A-d$>>YybCGJ6ioiWI7)lgj9fj09q1q~xULL?IkqiB`MzF0oJulWY&WDz)s>e8 z%`jUQm<>D_=|q7N>KN{{7f@{)Tc(G{M^r7-L6zzInVGp>|aWvn%cF4l6|i!kS@ zC^*wC8Hpwy(~W?F+WN-j!7bZBZENm&lDrjFR#CCzeEe!g`aY#xoU%VMpD$aptl$1z zfx&SaX*z#v&`0i?=GR^X4WI7(@qw2zPY`{*Q&ixM-w^!<`Ia0}#&eHkrw#!IH4jxA ztLUB2Pqxe1Rm4Zc&pz;1>MjV5udR0deHmOM(soj%8N&knKiZL%Q|7K>p6LWtm3j8d zFR<#+U;}U3s#-Z6Cpe?YXi3C07x_5`MF1b`vI+{fh>MRGtU=e(v%+k5a-z*PP!>1 z=t?Dj;qvl@-dzn)7W9NtVW*_fDvM>NrYN@^!n!uoH~YhPZw7*oftYEjxU5KhUh1cd zuhq|2Fz@zUD0p2CFDGygl|+ra=GQ9P`@pV05-+U|>*D&jcyifg9AAHhr<9ld$6fMs z`S+Qxx=u;8zjHtAtY4q_RXe41dqD`$hYq*AYtl1EA|KfQ`;P3(y~Eufa4;kRcuTnc z!zacTpt{gEG;+4Ladk1(H+6Ed|K~=KqpAxyAENrc)U^YcNSt9aR{D*b67VHm7TQW+ zi!1VCTDx6|uD>~c6M7%a>$zqg%dLt)Y<-^)E3nCmwQeK=~1C*AI5 zcT~Em`hQX2pgblTIQwWOjn!c0s$un~W~W;$qLAP|c2eW9hNv?Xy`@XfJn-rF z0RV=Pj4v<#?aSW~&-zG}5seT5UO)D{S@fw5QlMu_gcN}q!GrHqYHU|$P(UU_j4fn| zfpzioQ_PuFR5a9x&~FiwQ+h>I9d1TeL21fp1JZWs_v~|FQiU~% zr0K@ji4|=sCKTE*F@+~8O*|M85J{NkjB3sX*mlB3bH+*F)RpwAF8Jz14Qc1Y*mM!T z%TQ2RpJiLwE&R2t)Q&Y3n1K;a+k>4VYBd{T(t^u%ij)egh*+Zm!;aUVHLo# z-uMH@vY4mRIQ`TQPh3vuII!Rdy9PHy5pB) zWD&3MjO8DglbVu%u-NLN0*-z_-!6M*=8-k83mHDe#z&%J-LZg*NtZO4N)dKI>99(R z6&^w$xfaiPb(F*M__iijk)PM+w`hZ%=vs&B`{=i3YAH=F3lsT>z8ZbJX9;1M5&$L0 zuZq25yq;ZV1lJl$%Z@6h>8dbOhjG6l`A4$}q$p(+B{wT8>t_c!{~5;zlcdorZCDki z-#6FAF!-)MWbZp&yF9ei=Q+45@1mM_kC$wfvKNSJqRuB{MocH)brW0+ckj97N2aR( z09Ea#q@&LQl`O<^SlYC0p#cu+`vw{MY1aDiYrZ%CMuou-)GwsHvL*tK!c%i6YifHM zvQ2_aLk&aBLg+11S{(ahh;8^^Mw5q% ziU9U%Msc=8D^C(01p?n}ow%~5dqN~PGWXDbK68CEb<_(ZxM~tUC~ZXP0kxlK^(8`F z%!_|?DeWlaLFSp5sY~O#i*;~DFjCG*stm;$+klek)CBBVf+wmWFp4%z*nSe|lRfO! z-JaZ;x_EPE&XWJe(%Bn4etNWG%F~gze4>si)j#+(@o4o!qN33Hu0kc1KjDZiHhnXD z29X<-=^W}D84&yua!8emrcFe1OiZQzjI27_LR(7aMd^vea~g)#)`X&hY9e)Fuv)@e z+Eqd%s9Mthvs|52S~>95gvgk5R0>{ZD3pEMStDy8`$HP7*^yJ}85vJC3}%w=A_6*# z=WLYBZAzmF*I0HoLL}Y@+%c{?Vnaw;u;Le`7?C~29cD@*RqbRl7Ay!gu9Qh-_|r8= zn`ydM;rWeWMMwMyA`@XoAc6yOPWRBz?>3ICvv6{?1pIDJ`@t_+$x3ca)s5CGBR0!)Pg%tuYJikM333?oQX=iaF2Byygqii-3^S{{hFb_#G2E=OP@l7^ylbwVP zW3KmwTcfUr?UCa>G=P7JoogqsPsjhy{U~Gm3X1K^w-zrB_T1NnM_kcTQi1A_v7}* zUV(g3R)e@~iRJmkm!~yZ(JgFddKj+((9OX|0m@hZOo2dYRNl6<<=(ccM% zFVQ-0z5Qyc?5hlm-Q6|nd;jxqTCeq#XBeez?)a~)us>msTHj4cZI3Elme^|pd;h&h z2?KuPng;YJ6wv?q82n#7inF7up%Y*d{#T1qqxP>c81@LGBA;?aR8M6%pqip)UIbpG zLl6;kFbY>GI#I{V-BQwjZv)0P(qs&762r><)X8i%qome=<|60lO>4*~_Rg{mkx6q1 zqDlFl3L>^TCa7vZ>KVBjwRB#EQfl6WuToQp-XD2zj{*$?ZCeE73_CcE|KicF43c|6 zV8TrS3NBU%dH~I&TcF=Is0Cb4kZ7`Vzgh~BO{dNln;^uXAA>-snuXX}?3uNu(U{v{ z6PRsI>(Xoe)}AdZLbE<0-m}jktn%BCD|h;Ef)%Kh4nZ|gDsXr#Oq$BI1q9Hegw#Nq z4zT?ssn;ROu{6B1BUxK6Lrflj43jLTpj6lTqgo`Nxl&k^`3tUGw>oH+BFu6-uuG9A zRygY3v-9=g%Md6}{Nc@_HC@iAJ#XmM@7b<1eRs~{316&<{%+0XnGO_itij%1UEC*+ z-)=@yUTVU;2NfX0{i$j^$egR7tGZi{ONb#)L3_ZmL`+W1RM$T3H~E9%RNO zz5|fN_+D1NZDQX^`Qw0#8Rp2H4vf~v5}lukDMja|ABnMu8xaj)x}sd(B?Jgu1@sU9 zh%~seVz|OQGDyI&e2DU~p|fqOqNuM*m`)ioz}Fuj_qrd3oWi=D+e-Y34>|*(#bMTk zz^NSSoy$D+T6!)vk5$Swy<0sFvGzBSS5eM&OP{_W+Pv1v)32_AQKB7@T4;EPt1KNZ zyo}SqmbTsC@6?8UCn?sBoz_VKyMTjZ)4q6=5aGzAw8jMa3u8I zP7af8v-aEY#a7GYG!PYZ&lGw)60;!~56)pdj+!2?as%DsEWXim*;HqJvC8(3At#wA zEVvB+na^hAGY6filP}j;3LqSA>e43B7F~iM9B=RLdNsmj$+7O#1yh9R3%5fZwAuHd zXp5gSkFxJ!@sBZed|MdFt8RdQ$amgNz$5YO3%Wd*F`VU0k2*Jo^ombw`zZ@p)hd1! zEuTtihv%yQ;oMKNFU~`(m2~{^QtjZYrIML8twRi^tw?FUCgJB3>to(DM&?h!4>2(D z_+xNo|LaHmB}`%Wo$7ezWE=$b=Iwx|yS$N&KIO#@^^;HFv@K+pI^~sr78w0X9#&{i z`|i6t{p-JN%IF6KxF-N5f(%e1c>lljJO3&XHumQKTba=PwaJ0%J6G3kq$eN+lH;op zsVZ(nu~nGEL_FABSqYtSgj_9awB`o>=k{e&?)9wSrMb)V$CbCQkI&6LoH_$~qZ-c^ zriKk7{za(51o+LB%)}HdY$FkLZ4h;)bp0}*$tf-GS9q5I{DPShka>!hbW-hc;PWj| zIb_S{pMsDr<@pAz$%6tV2|wGzSm%%&((+mQr6oiNoo6RO4X0*CZI=QN;a)cdB2o;K zXDIX+Eqt>fra1# zbyfyS9wybJHB4`yhXAKi7N;Yn-l{nYbA{%QY}#Z1^oSj7QFT<#L~2uUvp(4ZP7TOh zMVO_I5O(7CEbvgjr(EvuzZ`+|#r|8B@cXYS@kkzHrhia(e&q`(p|rr(o;3M~JaKz6 zqGE+cjEAruJvTlz{f<@sOvhBM0hSO&zFcR6v@*zATSoRDjF#sB9JAfYMnR!LjR;U_ zsvF}qEh<)JA}6>Yb0Uqb^KedNObccbZhDw@-1{KFFb^Nlliwah5(t9~E zmCc_vyEU2}hYHO=X;}jiA_+&hDPYZxWqv9u<<4o*R*{krEj1=n@UCLr%J7*mLd;So zmg{y1d%KUw-WI5Cmc8&mSIx4ejG_uu$%kRoHZx8pB&?Cn%u2Yth#$kbKei)b$x;PC zjpz2%MjOG$Ha9k}ON#ohnUyl0xi`xW{tD>xG^qWV`0L>vfFPwX&|f~eTP6(O@LWIl z_(!Q3`B5jaHtKY7fV(}GVzDQ-czuyvbXTe`8#UOi5D5C9^K@adM8i_+8bnbOE#2K$ zf|GRa7UMm%uiU)I?S*Ezpda4KV(KjVvh)QE&l}OV!t9jF$#AHh%7)f=_^S}J9aC4e z)3N_gHdm(D(-HkT{@voz4sm?gV;5RFNgMGwt)N2ew)aFhLsb--Eg*zkv(`8p(V>cr zTTB-e;k2ux(2Z35xB=h^DNKfnI~=SF&E~!IUu%WF}Oj`3bP!JKHV0RQ`@jMk2U71 z-H?ToZi2l7B>(hCfB8ZFe&+~mFZ+D&veM<**mJ96uF5&6$-Cs`LVfc_NYIUN86*1k zq-gvrVc_5(oj~)(PIYjn@2A&7G*jIK`XXLjDIX6HTb;fgwYlj!_k`c~t$V)XiGLUH z_rGDKREB-Z-vKib68!(cWAeXUxE%ocM`!#0DF?FsM;#5Y@L?v$RuyLE#_{tXuVkja2Le4*jY1a{)@ zAhjl2euGVPn=f8VT^+Zs=n*2L)^&+-Pj=V1%D;{dTLf|MWh(V>p3}-I&?J0mI*W?G z(dvUPBsFAIOKpIJfU~5Fd1+;OU?y0fC_zchyaq^+CzGjZJ#{X$nkp~Ornlgi=~9Ke z*noCb!>SpOtlUc}8 zlgzalt*JS}qBR+)SZAt+L6mLrBIS(|4J&`Df@I4m50%6>6=KkE(}Qp@+Rc~32h#6C zZEXLVO6*9hs95DHy(o&ZtRLyCj>ud&6>5Z`h#IJfSVu9ZqmCCNzmXs*kpY`0HKsGg z3Qj46YO$-P@#$O0W=?QkbkYi^%BOnURCQj5PLyUbvUbamCYcVU%R^6uZewygcwKM4 z&1CJn&fTV=*YDWzZzgRTBKNpbTE?g#un+GN@g!_+3mB34IL#1+#v?0pd%njq8^Mn0 zh&xla1 zAvaiwy%MSbJ@<=g#g*P0b740pS@Mok(I9mNGG2fg$x+T=x-qOe*D?HIiasw?kg-WiCK1zd<6-fT?NaReJN^~ zGUubn`vY5hunqa^rCKJ-Q+ia=hp(x5xHnk{{Q4^qb=(5L9yo_Xm3JZKRJAiQi2;@M zSf<@HJQ{TxON=joffb{2kN^yr<*wI}*aar%MwYuTeW0-i5u_zGp_T z240|Md@k?3MZ@}cN|PPxP&zO>8OPHxPJir#+TOWH<@5(3t)Xtd{bohos+b)>{HIl3 zc0Eu%X5>py*ZrM}jGZ-eJNo-^^l$hsxyz|5g4UbVU?n>Q>(-_6U7sqO9dxyBHQK}K zvEnl?`|mLKUi4^nNu%jx|7dc_j1-fS5+-}8&Zh~#^9R-MWZchc*w3I31>wx7CUFZ~ z*{*NRqAzyNA)7|BXRP8BMihQnnqt@Hu&Gim+<51BLCb{fH>u(?XSOWaakzB`<*J@x=!+hLS@~JIQgh-T zY*e?4A{H~ruPw>YwX?TabAjY1&iOL0VWv^{Dah&6OoLsDjRM)grA?*{2U7i$LKlFp zN7E%pwIF!T7APdTY??c_@u6aD7gv6+49KRNzwiBY%U%8I^Zxkweq`lsBe|!?gxsE9 zZmw>2PjCO)+s)^m`S04>h-E*hP^js!ZY?CgfE09*hQf5IkjoDmW*CW6qEuvPNbv~B1SdeuXX@lb{IE~M2SUd(@G%WcAPB>Kvc zi7*bcw4BZK%romoWQ=|H@o9z=XO>M$f6GK_X3%>OhC$v55pbvgQMYSF7J9vGoQ-#W z79g*G{yjGk*f{sxec@n`%A-bYi&4gZI`iu_fzh-Gt@f1q<*R_{ij!JJQoJ!C5M?ua`h%Qc#}Rrhi@5ng zYChr&7*E+%_`~dV|D}eO7wj0kV{B-5&F4z%>JK!%iL2ldOCC=fn%7lL-CFYvohIRD zvf+n$U`UB?hpRQB_ci1vg9hQ`9s_O{2bbf*BdCy^_3_cE*^5yr+P6Zr3k|8A%vG84 zV|ybGw|(10TM$^PGGHx=dUP-I{*rx%wCJY{i|}%8$f=#cX8IwFZ%S6T_a7VD{h-m$++y-P zlJl<33T>^BZYo<;d1x#*`fA!`TQDp_bR-zZUUA4-wAvGP7w>XYvh#~t@`Yljz~4$z zFKAC-QZ7{JjPi#}2g_|%s-+iq&2v)~8crd(1$mdi;jvX>i}oViTOfEDN|M@!D=C3e zIf#VWF;@U?2kB?rfIX{a)9h0D5gL3p9A7xb`>N0FW z8>k!3w93UIo%=N_EFg|+WK*TowcTfLI5xX17GKWJ$}6!{h;3AH{9YO0^}%hvmHYW( zo_B`YfN*-=A17|lGMSo6u~V~6^ByxwH{@3{SV9q|>Zi}YD2qu;yepUH296vjOUkOx>h=g5l4vX$3L&939p zEoM_rq3^WrsAyD)H=LhV1D020y8L6THZWR%w3QFarwWBz5jqH2qj;HquO>~oS*#OM znNxGoVvqy2;mp5@HVS%l-P>fkp`iBb8;<;URc>XPV+Mk;&?Y)vt*-6sjz(+x#NNR! z>8nSQBEdU_I!Y67C52nf;j;yV=Yce=nZ9rxx8yaBdc`SA5ytj_FL270P_9RTsA7pRvA0 zCEZn!OZLSsm3w2IZzkw+Xme~$XnEK(NLOhI`l^WhrOaG?|IQ)%qzxBJw)vqbi_2YI z*|O7r^E8g$Jd1WdXW&^`xx>0v6$H#w%Iub8mX&Omg~YMH+?<*uL*w|AlB`zd0=mk? zDnpvdIf)o%GZTFYqR^^e>EWwGh0iZj)&y~M@Vm&9s`2n1U`tWJ0p(_}QY3W+hzsXj}Po~n|e zxaeKp2qV*@P9~ll!DCG475HdCi9Gm%MEPRCyZ-wN;rz_3c@GM#V+|GSbWGNy56(BX z^1YS{%e!2rW3oT&N9tC)s4Qu8f4j@9&_ND8J6G0I4lD55Ow6Fh3= zwYb91rE#Yq3nBR@h)nWoB+*C-TJL9K=lvN7=_NQrx+pH_eTW7Nj!Gm38ng^R1j%q` zowV=ZI}49(uE~#tU*mL}hvl-#g?5)QUMv$M#q!C8G5 z#*H6uFe`C$;Zsbk>W)W}PZJ9*eRaAcUr}309kH^&&)4eq`Y4IG;l{*YUeet+?Rgy{ zoW$)rJ`e9H{IA%J{NIV~I1VqPt8|$1?G3KAHs3?i<;Lv(kMNuGcd7PY`+fg?bF92l zz2629P~`vu7XMRGGa$*`&X-J|;)vg~`i!LdKL^ZzS0X4HLh)x7in*Cc z6ebrM`?f(;ZBuu7gB!j1-%j(z4Y?gks7^t|ZRNaO^Y}40bq6#RZc8=jSf95SkSPLo=o5~R=!hHsq@c+;g&IB~$oU5r-(mMw}LNE&|XFjYF~IdMvcLaJ2g z!AL|BAT?bwJ5Ep*q)re8_tH*e%GH%1QBf`JUU9M~jsBhd5DU+u7?T2=9;96LI$Ww8 zOnHdWxRN}@Ax5txSRxv=a7xubES8tN1~p?{t}!YcYG5HkeF7B{IsRMWPa<;-7vqx6 z{(hH(!=WF%)^L_vm+asXccY*>Q@D1jn$Cb1X}y5jRcf#>Fw$MW7^-};Q-DlBbv&x4 zjJ=?bSZjPd+Zd=v5U1orbqHZRtKc-NQ}YsBVe`0W*%oTPctBL=QV!mXwiyg@it%Qc z1Gb2ayIENv*zI&_>ug8{Cu<#3u3P6rS!{-TFo`L)YIYM;D*oXuJaZU63&yw> z@s!JC#^M--{jyxX{i<$nH8nFm>kCOd%{Qu2d!A`OmUI#HOw+NjSE(Dv2_(k?!`iY} zx_pIYftbu6P;J5%$VCyNf~Lvwla|tw;8TX<9Z*8iDi9Q3YTQFL@n5K=reZ^ zjyf6XMf5*F_p3y*2jEZ1rpSl3!Z2 zw9yNG#dx0Nw$hEU))GRRY1$8|_#BeWx~(;iQ80Ei+tjX@k#)WK*ga+x$GE@q@|>dW z6OY~uhPTR{nafhBbA>6;j{VS1A0n69y@w^HrUbK_I@vw}ZW6Ga#eyCp9QRsV*Nl!Y zK~MvRZgHPPP9WiVt3PGRXy7SM76+3btQ=PMfWUFD=D}(ZW8~4fO-kB4pN3gK?LN8S z1}VaZlO`lA&L3F8v>2Djtmp7FIFuQn{?SO!T7&2R04g5TUku@3)3e%+t*HV^Lyi5xjYX7QB5%|{10?g!RQc>X zKc3jn$v)BoL)n0_f&+eS6de3(S_88_6yPmRKu0B9+P-XLBKi4|-xHqHe88$_lI`pc zH*{m`ldbM9i*t+fGnM35SV~DySpXW7v;>=X#9;4e-}1x2xFBBi=MI;N#C(hvi7tG0 zvA!2(`^&O~&5_$cCnW=AiEKe1`fC%T*}CJqN8Ny(U>bPpYKX<{%3!X&K0WlY(Kqv#;Gko9A4icl zr^p_u50CT=4`b;>XI*(fk*nlo2I^|7mc%3=MbGKs71E9djj6mHBd}AjuBn`n z;H!fL1HQi=D;Mg8h{h4WQhw!AS;`xpba`dl^4)v!Ocwg^sw{Z-_le#F;hCA zw||@df_&s}ZL5oa*$8WP|NW`XyNI^=(l+k@Z);C3*bUWgz}oW-;DVI@4+nNf^TCe{HQ3${$`LDewZqRa7+yzlWNNc$}33TzolhYHe8C=ySfExiP82G)rfq?@e znOv@#fOvJ&&BOdKeT6>&Hm4pma{@i;482yxh_Qgncaalg%B+xFEcmD zFq+_qGS5K?bVUs&R(88=q!6II94J0T&=RQ0)Ap!KCxK+3YJTUEKLDeoQwMAW5RL;Q zw`6rb&l^Yk8G`tyF{q7-T~mk@RM3|$LtufNnt;?jq zB!t88ES3R;;}-p^c4&zbCnaADxxWoc2R3cXiTFhnw$|;<|Dx51#6sYVtP9SgwQ;N zaNyb|CtNZW-t&P=D8-z>MFu-meGca<1~9Lk7s{tfP@6C&@)c+Xu9UG142$|qDo`>_ zDI}$YBS#TMjHQerkEvQZ4W%T}6C;nH&ekmUaRI3{}8^^B54%It2DRp+Vod;(yTDn7Pcj^2JCeq3p2G2wSfo zVkg03pVX6^EfQ6M`2#o@i!LpLE1nsV`k>-U0mz{@8@d@(%spV{%256a1N0j~mTsKM z8hA=17G*M88Q9V1`T4m6$J{Tp)S<_)Wi)Y&FQ-6s)rh$2N+pP?TL3ONbqR_}6msH8 zuBw~Vo$rpuRCux}fsV**9nGS{o()8=FfY%&n_RY77`6|Q;;a{xLoit`cMB>9%VnCE zeQ8rsth+&Dp@U^-Rfgr86+Bg(o(9UFr)BRbIr$%$XceW0K_p;WWB#ft3gDny0sR&Q zxbRdzC4(>mi;WCVm5#;yyVchnL$$W3ekwanIkU8i330&x1Z(|Uqjp>(wNWoY4hhj< ze>{!23uONIjxb&yV#n;{=5``sU}3Eda4=j9S>p6P?yRlGRUfA9Sz{3|>D^SSNUPCHdJ;60n5MOFZUP zvbM%7#p&heu)i;N-GtZF#*#qZ$rhdL&}~z9gAmM|gTuHGz##LwvHjXO(Hrx8H#cij z`ma{ZZIF8H66p{@2OMwt9`*hf(1S8*n?@d+Z8n56OZ^ zAuYL<-rJUyTh-dZwOAhiUepyTF|vTXO$?E-AVRoF)cmA#WD+|YRq?T&R~za@k(~{~ z$h?Yt5!NvQr<0sa8?vtW)7NsFRqnl>;r*%1I-k_^svjG+L-p5Nw8c7lYI6u;T)R72 z`1f1Z@Lt|j$=rKAg)jTpzwsSArarB90mFh7#*ZKD|6!qYaW^#uq%i55n7aMTYcZl> zYmd$OUj-5Ae6lqQF5a)aokS_Il-C8V0@zkH1%2Ac!1b?NE5@Mc+bz4_2D=7u&$CJ~ z=xVN~*~hd8&n|I$+#&132@Fs$Sm?{8;P5tBcM@>{#;l+MGDl0%CQ@7n%aIGTBu5D> zXq5iw@CZG#@sRjrJ4WcdTjqQ`uq(4_o1b`yYqEgADxw%LvK-RjZ-xgS=*QiXh9C5x z?9fJg#0AoZ$C05-#sc$juQopmBY#rJD)5b#X)3};D8Nt62}QI!J2h(6;1*`P60z~$ zxBTSyT=Z!SvK_uuFnO+NgBvFe3_7937(2cO~4WA6_-(*Je*VT6VMlYYPG7)y31@ zeFMkE{@uYFVy&%bZAb8>HC*0)zwrg@>*`r=OCaGNvxPC)r#LyUByo<^?0a%5Zo23u zA-?ijmM2{;bvC()oFP6frLy5KN*Erx;E9DE%EDK@eJG;k@zuiY%@|Rj2v?ev1-D^_ zg2eT*mQ2s636~3HFM3=;<(Of7M0j zs4mudK5f=?A=6tL+lm0Uef2WM*(oJool^Et?M`TTAeI{MKu#7;zmao|Z{nBcx%bB( zos1AIj;y)VuT)V?Yebp^?FJf?v;B1498KiT97on3HP9Ui*= zOn8OrF@)X8T47Iwvi?QW+m6+ZI7Ig#UiT#E%ggn&9l_=h7n!R~u~gsf=U^_Ob?8%!x!{|w2DO&q$xWLog`bdnj$>dasoa(k0q_f&YPaeJG6k?m>9WO z{V{!E&xTwXN(#$M`XQRF->ZQ z79xo~M1|(&b*W@hQ4illx`0zF6nB`K6cYV%R!nV^k=QL!NmB}AIHQ9A^u$VOK2=oB zpqt&T`*l2=!#de|4-&NDRukH^*NYwK8)a=5W+uVp^pNobbEC3>F^iRH%Izf7xGym` z2XrVNLS!pV#A0jc#D+!lA90y8pGB(RHl%J1)M6}UHEq)Vj|y0aOi%OWNx$Z{6*m>K zp5@=mH5lN#^#e%;DvtR34n;iG*C7=6vK*$;OCsp-chnH(6@3*i%arBb70~2Yw9?+4 z{7devtp$Hp?NOL=7U1&VGhBVvuz%UJIHkmDq|EpskJHiNfwRWI6Az~9J)+Nwuo$<@>!H)qw! zJ^0<7vy9}l6U$fm{+(+}5^`#6mYq>j(W4m!v2`HUA0ws-TTEk^`k~2W>}#$-S(bpE(10SBzO{HS}LEaU75QBt~LI68nbcGNVz@~E0!glXejSBgVx>^dfVy#lt%C6Yr zB%F(3-DRn{hrbD_Q+^maNfQ{`kc^Z(C!$FIo%5C=nHnd~qO36Gug%$(;Nw|8h{Ez~ zC_Ze7h0V$#l*nU2IRU?J><^is%t_T6J(+0gAIMQ#a^(?7oVr#O=!{3+G?h;+i5)=V zmffSxm}B+*Ykt=ypKedM)Fa-hEU`iM}fof()yq=`?WkR#sd9RhKX;xH! zV-!VH-`=2~Uvc2Iw+!pWKz+Hql-&~E?Nmb9zI)y%THV0W z6h>s+n}J?{Dgj~Nk&6EXi}ywPE{BS=z2?`PT){k6R@2{87MO3~|FvKH2O40C41E6= z0L|tEV3Ge{v7Ju;gm;;`0W!P(x46oZ!~r`%TxHm;Cp1xqjlhY^klgizwDPpZ0c=B8 zVOcKGVypxJR8Fb^mCO!`8$EJDdU37Jl)J6x!E4B>!MVtFbI0W?@72(;yk})|gJJ%*KmMii7FkQCw;MRZ?(pgWu zk7qTn+$Gd(I^SQZ=}Ho(HQO#<78>8%j&t;2Jaj4yam*5pj6Ts#Bw0+q`6l}4Z=KqI zsaJ8#B#DAgp-rcqyC#xPCCjrTmLc2`OV*VM@KDhS&y9kA+bD%GDj}JrBH>|CJk7>4 zlbBF~y_8hKQwK@$C*ZBxjkhj_nsT+OGn6FMJf4M*ODZo|sA%m34HR0GlitX+mtQ4A zV$?_4KrW#86Q=EJS-Vjk-L=)L+VbL(Tq;A!yq0+mOSivy-M{ror>wJB@4PKRhfjO% zYrVIK%9rg6pYYUEiMT?mZnmzE^Q%+~fAKuxJow#aUu#b_-;O2iyY>j}jsT1)#x0Pz zDt-DT1W<8P|4uCgJfKO}0QH_D;I90C=)C^*H!^fHb^6Df;NWCxZ21q20$VBwSKu(C%7 zeW6#J6!gY0Yc{YwY?P)3OE>RB1 zrprMN#6qAw+z=U9a%;I~xSBuC-20`(Hg!3SGuJCC2Mz|^#8brp>HV&PzHihGLF#3= zG|_g4Plfa{HtN?Fv}gsH_&(gdq^A&#wBL`401W8!tc28CL8yOpd!}~qI)nu#LO7&} ztKEp!2I8D~IPp5&VG~Ah3tqtP(XPz^-JW&x+r#}7crTq=)rb=cZ}ovA*vqdegatA$ zoy7}cn$*4hI*T_#P?8D?SU&cDbbEu75g98sv9Cb_blyUp11d*ECEXW=$6!K)FXf$$ z?B!Giwv4O`Q;;R7DGb{#-25aXJW=E+fnnyFk*_xC4Y9*GKsrM?Y1EoMPYWFbVG702 z6+2f;+k{_0r(PIm40`rr zu}@pE8kJ2VUytd#Tk(KupqU1Q1E+gkTkdt`go%^9uhbKjz6*|8py;}AB5A+3lWC{KmQ${kc?Oj7&;}YjJWg1SI2P0A zPVYwdV;&e{roUwdVd~>xr*(wBmXdYD8BE!qpTE5C23yJ9&1}4>LVk5O`-69R9N$ej z;*L7FIL|9T|DBnUWr!(szxd@~a(}6O;2IAvcC@osv1i;_^I@>uMi7xO!s24gdYP}$ z=O9spzcaOjS494nL1;%-xpB-5Q*aJDQyyrhR@^i;<5zFB zklx=w_fnkD$1jlo^$`6-2_XT5*ql8VHY1I*_CQMo^#ErmaVnq~#vAS%uHf?QPc!PWPs(&X<)TThnw^YmMPE|6f zJrs^LrydOqy#-$XcB^u6^<(r>nj{2?CP3__xQn_Yp@SQjM`5yRFzF`~73Q5pBJ3&P zR6xHXP8&>v7o99v{DC0MRt4+e5H6rm`6oJ8!Xzxf9w`;w1SkQ}jz^{ct*wb?+m?-e)4MAq$6OzYYq!9TNyicduuLS zBG;()>pwldC_<2^DP(w6vzpCIOx_lY=`hC`js`_mB{z|ZF(D$ZtdgKkAS%8iDFHRy z?|uFp85)IaMHRBlYaZ|0o#zv`GBF97oQ#POKtUW^SZMC$cHg#S8&XfWPOOZ$Xm)F) zptk0ji)IWA3>|OGsk}<}!php>>zt?D{PO9(rt(P*^$p~#RtZE&-~AtqeFJl*f171& zyJL5pJaNai)3MpHZQHi(jyvwywr$(l=l{-b?aaGXv-1`1yXvZ=-#KjTEril`thl^! zW#LwPnvdbvr9sF>ltNhd+3j7AKV-aY5Pqk}4@;62HQ4>R{0<}b~~u_Wg}ClQ?raNQ(S0juf5?|83*4XqJlpqzcDgcEoQ>~^b|>!PK`xx&2WnC zxSZQ#FHN0`hBySrusoOB+qvzyPfIBlu5w&>JGZ#7iEFf1X`Q8t5xRaP9&<}=^bT;Dc8vVEaLZo-G?;|u%JBuNY)?%@37 zuSGO9mx&z*tAma$11=dqc(0B()_=p2LQZ4!(%t)f%p#1=@gUwBxKdQwhj{Pzbrbd+ z0&czR(YQH9?>MO_F6*M>%)w-hgq=RG=3jn$!&H+Q&}`qJf5-uEr>mw(l!3D0Hi_4G z^ycR?(ojf|^5W&uM;xNt20)z8GSBYd*&@_C-S*eA-3#Xk+al4^+duh$|F4%1S1)+k z3wZfh|L2#_3^*6>VBqvmF-WkAtS#{Jb>7iR?SrA&OQ?E|(99&rhy|L8sP>GTVTjrr zVNdtEc($*RyuW!bGy~FC794s`UA#Ly9xA-Vo=SMsJ6Ak3ZPGuv=%Qtt??r9kbgYIO zMhiDJt`J;dv161gB9iFc)8wU+w`V%O&2Ti!Wd_s~01*50wmMFczXXB;%c<0$X`)I; z0YYZtmf3(Ct={7AItqrj%yShIX^*S6!Won>`qQzLK^pFmmXL2~r4Qu<)I=a-wrAe% z-uAz@uE;QZ#0G5`>Q|?AywW%M$FY!)K=8G@u)cANr+n2kik4S`m!r9%HGz%FT#TqI z;f=W+MQ?9tUHqL3%hGu3tdF89ciB8uIoOI{C=i|~h02r0oX@2*YdfCXFb3#Eq3pdm zy`0>fC3MLj-i=uO@K&GR%eo}`MN~3&vWHL_%bNa?H#N5siV7$b!u6r8-iDT-XLX>- zqEFibRnNqYqx~^snVnZ-s9cUXH`q~+XwA5CxGKp)uw5LMaH8#b&^N&-D%#n^_TxpE zdUDvhs&q)HN@ekk4ZB!sF6w1MT=!>*l2VC6qPw-h!?y2e=TC1>C~O%QU?ySmXt_|cFgn}($Q>>?A&O~( zg%sqD;qkMAhwNF&rBTUu)5v{*K&>6&u6c5m$>8tFc&x%E8WLx+k~a)Ci30A``H>!f z0Wrb@_H+b8&3^jw6L)Zsq}F6tZYDBVk!?mOlXzB{SeB6;4`EB`id~Y1{hqZZEExTZ za2La6c8O4N^=XgW@}^wSm6)8LMIZRZ-D14w z9b%q>uI3X`)wt*5Kh_vT#e z3e>=ws*UYukaFXT{w7QL5rAaoC#j$kVpXt@deMUWO|yk~OB;f%3O*+^CwsnX7*u%h zXxeUIqQk|rLz!?jZP1#LjdQUP;*#tGx(oU)2@J^D-r*%ppFxxKEXrNucF@Zww^SZeRY zR$zah`Tp_AnG4AU2B)g94k>4nF*o*5=u$u!b#m;E?6!6rTbo_Pa(iSj>atOI3UrF;e7Yk3Q~s8&1G$Dny=<^H z9iqELj<6D+Kc3EcvffE(V;#w`sXv2yo^y>ERtkL^d>R*kHrvS{2E9q^N{Ep3glW z+Aw!=7YB8Y{YuVn(!Z`%!UaC;gjQ7G`>n2+lD_QEPOKE@bjW(t&N7QCPom!G!fgc?FRBVTat6l2gou!8)>w@<{1dR|Z-6|JEK$J5@5L zRtoybpMO)2ZRTLEAtQZ45tJ~Z&9(EMs3t1T? z^Ev5|4ka9HZ?E&gTXx?nqA7+@W}am#87j_NnG-V^=(Jdobw^Y5RPPo8Lgr55FxHvU zfC?9UdBp4rF$}QIgEV}u3!PHbRS_Du=S1b>hIE_q^udE;K7~9rSj*f7&pN8);uO?hJwAi{4R>4RY@t^BjENX} z_(Y>t{2bBYWTq*82nw<|rl(@GN6(1#8BQ@_`hnD$)d+e+kp-X_aWTjS&73FYOa{IM zfw#c)Dtq6$5F~o1&CDLgBVoQcO2&e==DEUL{EU1-?i@p`w!gwgj{(k{U$9`0S^U7Q zQ$Z&1+wVecrylcrDa7A0TEj_=6a!MqYXso)HHeU2@y&pV+WnISnl;b*ECzk}VV2^o zU_Q23Di)x07+SX`A!NNy*S=3&Z>50wvW-_yuk2Th8Mi}iu4%{%dqJbt!a;-wnvVKS z4X@b56FYrabdwmJcHBz5BV!rb^pEfbHj?uYZsXK5ZtaMxbQ&ZgzBR%hKC>2Dv-e0|+7xdFIzGupTNQJq+?q3Ge2p)E3+jSG$Uux&} zadGWpS5&A?>XH=WK8gg|lQQAX+}5zmo=sY)`x%6Zc2(aVsH4}LnM=U28W_tDs`gZ` zY=u)Cx4*l=t1;m>uDZ!t#{cDM9k;O%q)V!;1T%<=!+&i|^L{ikQ~sCniTqI%lP zW#^n_aRNwm6oUFVhrnm?@bTI%uI3uPTj>Y4yG5c8#OA4vKUj{&uMuoi9B1p7wU#eY zyRw?4N!>O?IpHUd83tDM|Iy8$qz}i$ithJvMXnK>;wwU)Zom$$nC@at zcZe`{iZsjmVJ%GZ*(6Ac{IyeHo-+)M1Q+8fDyWOgl!FQzJyQpvNpPweD;h^~Cgh@7fiYkxSr%mU#~t#u9`^c(EfDlebk zfE5vTQ~FJoRLmJLOAHr%KTb2~IX|nEOUgvkBAy>#U_7f%?^^9WS4Bv$z{8g35{_#+ zhFHbU(yC#jni*#3rDy%s_64NBW1*o39_?*wJs;rfo3;E^&6RI#*?heYAHZR!MIurG z3(VrbOJXcOVAOx2)9k0%!)!tJE5MC$WlR{9D=HZ@funGzkU;t?s3QrazX7R=NlL2! z&|e>F8YG8mn!3`EZ0eR-tV_IREEAOHNHJrekY-^7q`%QX`df1Mks7***qkJE5(cEd zf9Jb|EQ~F77e^C-^cQ{TR*o(=Q&Mu2*H13wE5nj4hN2} zsp(^Y&S}Wi8?RP;DX*j^?*Ll$N?c*i?nm}iP)j@Z%#DPKaO*t}c5*)?-tlx)jXBo? zT%G_}qi1IIu}R*gUX`ATCG`XiYZ7I1Ed$vDJ26G-0>PwW$Lkph%Rm);&USKp3QAZ< znbH~|u8d9#=>-1ALK6{Dy?!{ZIi)UQT35HE9e8Sz5EZ#O-e{4$MB_>=nS|jZZKoxO z$^|w_Ckx%CK?hC7W8>1F>2*2ir%YIldLC80x0_@m(iBzl1-wtWPJszdWD!O)V5t_2bp(+QJyj82VQAk*u=| zp`>@lLVn%D(E{FsS7B88xSRdT;nRYON6uYG?G)&LzJoN?PvaJ0^a_xWgLRoMtD~?P zS6CEnJBLiewm&hEz|Q#Dk_1c9mzv4lAVx>bac(c7yL5LpO)MOkEICJc*2|pSDn0ri z-p-v~`gH!uMK|a^kfxu(nSOSqWhG$a1+fj2A5=D^c88V%XJnhrP>HcE4W`I8WU8nm zSd!3iFsQ;Zx7-S2t1D#5h5DBj-~e7Z0b!GxxjXs z#ufSOsq*5~K(-LMPTMyyw({W(C$5MT$IH5LPo6wD5!JDD1?mRTU+5u7CNP~!MC_1VIQ*{@G$DlF8TSuEU~Bea_Z=CdfE7qH%A{nJe4 z!wg^*NN=LZMcMwx)d^RR#60=iq9%!?B`eSi@aE{-%qF^Qgzv*!nJ-B>M zZT`NhL!0a}>hIqB9YFV<8g7DD=g2TR0nGC3w?lFh=3)ws0(Av!>a1;Dg+KXA4!N+Y z!wk+^JStNaI@phN>7xao(qh34RqEucQaifaJ@T7b1zcv3Xb%%Jv&V|$c|6Yh>>W(r zvYcb#VmHBGXM0L9jK=6tl>wSxtmrq=3Dq_L$z8uc$E7F6)iopCr8~ev%RodMd8K0G z-SM2TRYS?tsUBy2zHWFR(TmHVb}9>jYMnnY@pCell5L(9WExpkm=1FZc|-Ld zHvptlq>&%2qj4-J^Zw!rcIDD@Yl9yFGt=sVgLxTjn#DYbB`%tzItoL~oOV)tNnr$c z2uSoz1$+>dT5Xi>6`GzY#G*?h!eFK7IvxJ2g-cIC1kl1WXh-{VSi(dxYS-mc!V4Sj zB8z%3g}VfEq^nOX_h{hy5d;lDD`t!$Xc!|%Tf4Pb0m1>FEu9`#p{3)wB>TV?uVYtt#vb%7~ z;^o)Lws*VKVBS*nx7K&;|HJd0@Z;a?0D?YjtwbOoY6k+M|8@rL|N7EE9}#E>{EJR* z{l^TN9^juIL!^llT?bOGgc7d#h@Ze6G(`-?$K|TJPT=eZ%-6?6)zWfNat!oz&Cw9Y zoj1!N*d;K5z`UgabP>A3jGZ!EssU6QlYGEGXh;SN<~c>*T2M8vte&B$3@s?Ik^4W; z&|Ij3*1yn@Y^fD6Z~PA$($Lz{)*(O&mEf$4pM6o8!Of^-jC>0{c-=_=q9I!qe;^w2 z#RH8AbnWAAp)u)$4x_5^)Am4D~X@CER% zmLRW6w!QCh32R~0Pgc=xP{hjg#L7mbEb0HW1cl*~sMVGcT(F%g;Zn|_bR-pE)F-K` zyWUAFD#n?s1n@4N|FeU3t=`Z?70{zMPbv%F&w;0*nqEc$1MXUVWlQM6v8(`BEIdp4 z1tt83RvsT7j!O$q3MMOp|J`%$rK@!>(v-yub3l6WrzHHDsH(G(ED8j*>9m%ajEG`X zd1CpW6G!;$!i%Z5Ol5ER`uZuRz~Tx!u+)~#LrQ(n7_man1Xe|B?Y`CH>D>^3BMI#* zgN~&qP9ltIb-5J*3q8IaR{LOp6%Bm3R0bMtMS!FGFSKMT%$uG~>&d4>#cllEWtDG3 z(Yt3U5@}V;Oj$@V`D+h@mG5i%Q3wacMvz|+l68uhqo!YqI7FzjF++@~wm5GG(b_lQj+bVeHN4o16GF#T9Vx7+ z*gYPuuL$8KMn)O|86@|f!AOc8!0R>mPAz9cce&%k~cY2LY&n4=S21th} zy)~V{wz1{Mler}0S$T4k4Ioe znnl+tZ{}FrkPL7?{_6Si$xm9X?LrUoNRW7;L3UfeTu<&iX;lGdiah~OUleXok8ReX4xNvA+Cm3z-i5ol+32> zwZK@XjNbIvMU`WEw!4`PofWp6x<-de~#eWvv{x?3N9Fq&}GL`@V z;v=JMmgRr&5ve8+A5kcgQPXbLZV^z3x~%5=#EINKZwy^^6*6wisfm-_bp~D#?$qw7 zW*Q^sx`|092MQ~S{S~znbcCgeq@Je{1&`^)PL?=cs1Ca3_-EP3bq0SBpXoFO*aQx< zQ)UUga!D=X8dXif;N8$&IIhz;+mA& z6K8!N@9uP5Sg=}Tl_lp{k;wE?ZS;3}GSanmy{vsTJA9$xKk$*zU$fu-2c{4$krNK+ zz#@}_$EVOKt>=36TI|Y;TOV*_%T`ZyZ*`kgWLjMBpn+4`_utyyFrbpaA``}BgHI8q z`lz&eQ@nl_uyh;P6*%#L;{JVBF$G8L%v{7(F8wnGFWv!{PQvt{KrO2)>d%a^x?M@h zo5;D-#U{vk9Q_vfHS9^qX<2nAGx!hClmPfFB}vQkcP?#}trvsNoO`xC$C09oJi&$# zy79{3Wdj5G`zx6BZWs#_e4O?5^&aF^BRlUcoUgMd7OE-mnjLnFDQ2H6@hhP;@$TEb zkGXxiHVRj)Pelx`r-o9Y>p%`qpIKM$Zp2J3<59lcsqfxXkhPmI(761zpDmI+QRDBq z3f$N$ZTRv~{%fW<>f?Pi*)_aB=aIOI)F>~8_N+y0SC3vh%IRuLQxObKDr81@mgpAt z%Xwp)XSdL+%4SdXSUuJiqmj@uZ|>_={1{x|+KPM^zeT-S$KHK^4*vdzGnFM2_@5cm zKT*8VaxL&yU=;5=@G?;UcUQs5#LekHr|_~91Z{ySyx^-BOc93_zwwih?6$R-G|l>A^yf)3I%I*}mSW>BsA zjmT8NkWOz3UvAzAN_%V_3_*80PfOot{tLD{KENKUi(KuR1n#i3`en#=m#K}Ai!Hzz z!CT-r6R5=;>9%6wFCf=H^Ri4@fC=2`bD9_tVcNbi-g&GNsTR31QOP|MxPid$25;!r ztqw7en1{&Zv{1c>WU9KXf1qS!&xLT~a2D`UEKC#qiSG<)|)U{?w2&>U0)5PD@Ek12iQHatqS_J&3{ zyTLFqW~JQY0)1xflbF21p1MF^^g{MNew#^=Cql8bJ)^vjErD*-Ph+Y|tP3)K2}$KP zU&5~WS}$9R8#MSKn31zV=1kc?gb<(>E^W!x7g0LuD@)O!AzK4pTTwB9RLo6~(AL65 zs9@dewlBhbPL@U%!s!dQiV8-zAM2*NWcT)PdZ*yw2 zZD1<}5DQZR5Rg7w5D<$0@9#G>arh_qa=~lqxHcTW^*~E`4;_dHli=m4hP^rCoIwA* z;@LUHrA3Jj3kFpf764-hs!+c>=iBWwj*gu1$d&qrvb2Z`Vd~-G@yg4)_kCCNk9%2t z=9-xHhSbgHPDNEp%Pw0Nb^WdwE3<|;v^-KyPkS+)MS6sF{HPS`^t)3c-45de1qT0E z#pI|EPh^a!AEMaloY^}O$WH!xg!3(NL_O-7CRkV%RwZ4kkdNl5ELpt}-Pi-=RDvP( zfcN71EE60|9?55cK+=*dST(v?x4#~3OSS~A5YE>+`-G{3lasG65k-*IPgM@_9~@P& zx_TX)zMk$rFfhugxe?lnO=bJFYgGZ|y8L0r;Lmi4mOae!m!cZs`y~n2)zqk4Z#0&E za`|OrDvr57(ejEkHh!)=G~a|3{|@}oj_9W?Ng`I7y1VN@J}Jk@$RO-@Gd8f0yZPZn z;q&9;dguOPVc@0M?Mbl1?dkDgU}2!W<@zj{)B6qk<|wj-QJBR4CsWjWV>ybi0JAXj zD2BNUb#Z7lenw6|2P4h@J2FF2mmDvhMz2+}A22;f2Vi1a{MU6z(`x2J& zV~|1GN-?@K?vGZv46Toj>RX=n#cn?tP6_Y2icz*I{hToha8*Rzq_u*06)L4aQqAs( zreh&^9LRVg@=e_IghDr@BaYfR3LQbFBzZskzP7AU3==|`yr12hHuNKN;oRKwZkEZ- zGo)Y2!*TatjT6I*uzv9U&%(HIE&A2IK5k{bUhhr@&gL4Hjz~H{Qd+$pF8ea?kW(MQ$1U0JtsVSk zd&sL23ZMwp1M6^wY%*_U)1`>9`E-+sOyWOeRpE*__yUy(GWF&j{{986N(}RVVpsVC z0TqRpCnj)lZ7yWV44#u4`mWTMJB>Em?tDu5LX%ND}&H0fZOnW>On+GX0YhRXES<03$5n~YQtU4x+(`|fq zfou2S9N7wIbEt2n*1)m^MxM>LV|8UqAuEKOi<{O|W;CJUt%$@)_(NfQ)@@h!JGbNfO!NsHg6AE9X(gO&o*cceG50Mcm+2zs%GYh{%9<$IlZlc)%>-}e*oH(M+?-!ThayR@C-9xPmHC3x-*{ovY z$es>i@N>M{#MeSX;*p-bHPA_cshmW$8H2`1%r^~){`oU50*w>g;FvPm3!~$(nR&jD?Ka|kLC`{|%F;qv z7G|>O6Ul%c|^2i@)>;#xG`%a{xaTQus?H66tgtM@r!Q@8f4 zhbHVcVG@1|tn_N>UFZ-xAWW;VYN&NeIg=p_BdcRiy9zmA+S za|vM3Q*kdQ6S2GrtCLO9FMMMJ7y9>6D1Wdx5IoaUj?yHQ?~5A|ZL!F37v)iaHi5UH zaE7v_gE5*i>gr8OCj3f0x;l~x!CirL8ta>rLIbb64-^vM$c1uf!t3Sc{<5R6AolHM zvo1s)t-|Insge-}qv5BpbikZ>u+@?nLP0WP6V*v?9=rv1(ce(MP_5xx!3;nY{6%VG zNtYF}=YjU&--J)00JKXx^qt|^j&hN`3pi+YA`jR*F~_8=Id|$nhhC@snee4$F_hb!UcAAu$igo{c`4ixuDbR-Jt{U5 z%;#i`ulpTG8EAlkB3dq-6=?*hZP>>HM1Z@ll$6^Uictg6lcb+qLn=P-tgi`$AK%l3 z_szXe57|ML)K=QCED_}0pqw*b2bV;Ib1?6tnJDSOd5;kvFd>q&>76?DSqEq6a_emy zkz@t*+bT2|N#O=K!HB#GqT1ohxwxf>f?W&5(u6y3uD89FOx{|cw_t-_Mb;4M3J|(B zwB1xg^(_dgTVzuu&79{;8AnKP__n=Qh+i?zE=bZkqd-5=RuBg8c-pQ=aYbV%+|&u; zjvvHSG=|vvFVKM5fuhq1$^yncm$?;|eRFQq>GV&vLkegVR{qEy*#?`@@sFrkArvrZ zq%gI+x_D)voyj)n{G`W%o+n{?cEU2`O6D&<@){z)zXFbOqgtcqs{EtxmN^ z1ObwXUajyV90hMi8VzYWPXm3(CE7B*R*1jX_LuOnu)J8mqL{Y^>Y~2xD21-Sz=_wg zmFduq%r}sC8WR6(H7wT{Roa&f zqqEvrf)Jx|5uH>7x08@~eY_uw6%f?BaC7q4CqG5*Stm)xXq4MVnF7a72Ww&iZI5sK zGRQ8V`h4qMyn6nS$q+1+<=U};Kz2nJYYpXbesI|+wzF_Y0!%aFjJF86xQiBP_vh9G zl;`;(IYH6KHE98J^!yW&NfYu2j))k?0zSjblAMi{`*DanF^4wmDDC5Wub=BnI?~&M zO)-rgbrFd!Qw&#JAevr=AWai~rB{0Am8J@r=`j~(cwYkjAJgjrq;|96WA!}~f^zss zV29RBgnZR+&QPHh(wos4$JW$&jUI64w>V~LEf5F4Vbx=Lz{AIqNfkMJ9`|xEvIte% zZxIZpYB+Tc*g*TU($7KV(tvshYPU6Mu7} z3I5Txp;euHwiriDS+we!7r@YZ{&A(q2_AJhSpxU)cTyP27os{ud!=+E;cLK5I290Me zGk>e&5X1lGjKrBNK=!^bX{Rx>>a@9Fw_IAY@2h@`Hh~yhLx^H~0!zY@>kIafokTd= zxRath3EWFAJothWX%PP4(Al{e7hdlye^L-v!)orujca z9)=#!CEY*i*C1+|FS!s?9$`h_P?FA8zD%%+yUdsMhw`e@DaR;21z}R1W!|oU-_N zPl$>SfTh?6*p@cGW+Q5IP?%9V$`UTuQg&8a6EwVtP+SP4Q@E#)hOpDaC>e>BSZ)w>3(bZkUO(b_5i2EiWE$T@;^iYyLn>*wAqqd^HNk>L(QQD)X#W_%O>eRJJ zvS7F&O%aM?@B>HKU*TYN){0jzES{ssB1x1Ptzl~E!H9~lk}x(NW}x?8({HmqF5@m)%zr5KN)j`h2m4$Gsh-2Aw8%L9XZVTan_BgK!7$OUFPqSSLe|XUcxe>H+)HS)SZ2QG2Bg$>bD(lK}8isn>GgaY}=1H!W-Hr`W zUZIQEX_VdXiPXx;%onm?rE%eI!+4Pio%fsU$}$VR;ae3wJw->_Y!+OcbcdjVk|j@^ zEHcuP+C(m`AeYT9VS7w@v0+Cg=0w$yQD9(xdB;6=_`V}!&U&{sZG?G7T?YM;mtmqB z)&7z{DWI47F76RNW-i<`&0NN3X~k}%@M=Tqy>MjR25Y7k(+yV9Ry1vfI?BYV_p2%- zboG7(E2}pW*NHp^SG+TCNZ4|;`d>BQ>UgbBok}`_KxWAd_1>&u&HtKlQN~W8Xl-1< zh4&Shho-5F@@s_8Gl7xpQ7wgxJqHjcw!4_s{VWjl4sMv1wRGjXG~+|i$*Rc3Dv~48Vv0O`zJKj>4m2L3!5x*KQc@^HGEdnH}f6FW5c}r2@k~V zww7QOl0n&@EJvqXZ);ZWl_y4yFyO%jC4u-m^T4P@BFtO>}tCO`Ztggo*sjsnm1D!3!!5D$}p*Dh1l3c%Iu7b zDSMd#H~c!`y12@`2>Ym_XY1r4nFol~ar=u=aui%!N^5?@R!U zwLAx&0HabBkA|t+EFDxy$S(!czi4Oi^-Z$yHzeiZc>>DqmUg2{fgj`Fl7^_A?|My zwvRAtsc_MBM>~L-hk^glX^X*RxfbF@?Uwk(aNH~Cr~rO0K)VpS-224UM}r%dvAB<7 zV8n_jS!i@oKWamqH)3rpQ3`*cIh2Cbd;K+xfR7&v=2X4dsGF@8+&*kGsLBG$DB8hg z@GS_rJdvXwUvWg`%JX!32gPQ~oj2>pO0pneI3&V8tfE$~RVb>{t**yBWUZ7Y=+uh; zezD5c)YC>3eUBGB>F;oHgZC_D*j5uZxUG69C(dSIf|G`h3H$pEFEKqB@?RWEQErj* za5z`xoFK!%Uy@hGpVJI~eA7YuX2VslSDfvuo1L%Mnq2T0(130F5*5wfm^ZB%{&#$$ zel>cv`|T8cc#RS6$@Mt46^hHcx9moi{yw5;Ws?zb*(pd_+k4PU6Q4xCJdEfiq_$+A zRJ!vkdsh>Ri_=wiECp^O6{Sg&FVwU9=|q%oq7bUYUZHvC<6+L%wklipTz-Av$MFB;L;})@bFFHm&|s*P%37PmSJmV#tP#y!5e`@^^Nf4IqRuJAMJoFE z#y!BC62$O9{twh#bUCI%U(^gKmcX)X$dD{kg#1@q^t zR#%WM-=ooZXmy;ED$7g~%WdE`Kq$%tQ-SQv_Z!tQu7Q}57$i6-R%@9ij$5xl3C-0} zGbfbKjx=l~8EsYfOkuRUGhg`8Zkmy)FnQ_jIG#(CIHVlqCJD+$o-b;Pij81TtgR*w zpKre19rjfza_yUH)hB)AU< zj7-_UD}|?ln=phu7zm*x6O_xBHPOcr;OrAI{Nr^ERXju9S`G0jRfJSm`*S?&mkk== z-avlM#=~!~=S^#{7@F%1$2E-SgxJr;jz7CBcDJo?+v!d979OpFHwbxpw8%}ywGa@E z-7Ua>5O)zOcaUHt1g)TC3xrNPm=paLIvG)O=&^y^K@ej%-Ci`70igHF?A zk`jYa6!Ua~#opPoiv9W1^Hn4Aes@#CcGpLB`-vWEqW89B1uyz-tkNDiUilCn_qbYg z1MRVoPlm(XzWmArnw#wMN|Vd0%)m}~dLkhKo85+=8)O3kU$K&TY)QJPVll_V7b3ethawh@z~<)F=51rPxwc zuGqUxh?Tz1b$^&iZmuT~mGgh#a3M(Ks}Ti!@I|Nw=DePreSU19w__o>I?ULGxlL9R zYGr*3M>#sqBzG&v$#%r>O@e$40s-yib;gUTiZ-;YAn)@wXD-gou-?U5s(&dB!0H%? z#D=6o{WDQQ;_0Tu)aQ7sw#H6Ky#;eC9B) zcELs6uh(*dxZs-axzv?;P%5$TM~K6T zX!Ofm^w|C5l&`d(|EVm)6I!Dj*TBGCUQKNrTlY_xBaMJ#$mbjb%XP#LCo=JuOaC%1 z_YdEg1BR!hGRA8>qdsTF<&ZwGwi;ONQ=4LZay(1kSqh zXP;7x-KSmM%;e2d9;gRvP^VJ&*O2 zo)tmHGf1N&P;AU99OZXiB{!ro=6fW%Wko^qq#RlXkDwtIC{o_LkU=JbD|2Z z$pgg4jng&>X`|#xu`U0+VKL*IL8sPHqQr{C_qN^fEEz$1iyET##I%E2Pnch7VZ(k$ zwiH~;!9AJmf{2MGAR&2`w)3Lg=Yh5%n!5w+@JFV2WFimSO}wP^8w8QQDR0ys-b)Vr zh)+Z+{VQr!tNe$qm1-Xo3xD;xAPKP5s zfxuN#4A?zsO+BAXFO5UwYVCOa{Lne*bPSvQ!{mAWBdk#DLh^Xo@1D8+Mh;YD4{K|e zNCm~8_lz%gbM&X}ZkooJ=qLctQFuVQl-OKi64SQlR&GQHHJ1l2eDMPgbYIq8ylCr+ z%QjY@Zxp(&hmsYX!{P7W^m^sWM9A2aJCtAh-T6h|VoiV8qi2uzy0Qy><5o4tCg+m< zr4Des1QqXpQ4+GlyIWr%J za-frv(XKu95+Sm7^>TM)vmfDUTBrrv+mAiH8?J#=;;0`}&@n0ISX--8-c{29JH0h0 zNzMf$lYKPp&({=7V*X5h7+&^{KJG2t9?;$WemCIBGI&%P5}Pa6f6n>ZN5pwFf;C`ZI1)^6Kv*cMN>YCN5!}WI)$DQi-CIwulSD?#@>Fn(D=j+&La` zX|oOZR&daeI$4MBTz|qvLxmLaX)}thv^cB4&RpSb1ILP(Z_!;j{MT}Rh95xpv)oHX zru(k#{)191QA=G;`xs52_An!|YQz2U6w0@2BjG%LwAQ#U$!?o3_Y;SJkxAk%!|iEu zipi2w?^(F120VY+{8SH-Xr?CZfykr6fDS`9sJ!!+&(&u2S_FaB`@NrRlX%*q@<7OY z?;n!*O%GSXw>AEv*o&znIIy%<6&K-O7b*jPCZCX1P|t@rD&q;PP8aux`+7-r=k)i# zKV9lX%*a8wg*}~P<@a64T=18|?QicBx`L~n^>Og$vY!$)nx!W#P9BLGJ@RafDjNsN zZ2pE?CC3~`Rq0B#XGS8wOZXh3oi}%Gss}u8{nob4fUa=K8&kXE0fDT!A_5w9r!!80rSqf~d^L{^rv~Z`GRCz;rBN^>xsv5PW#KYM>3LW>O~?YiA%)MlJI?%=svzThuq^>oHHLgEXsYC7nR}ar?N>jB<(xzz|7j_|C==1!y_&^Q48h(;2HD@pNXo0`z%EoEq6rPct*+0H|Kx5#v zEv4KsRCGHJh3gx~RR<>Aqql~)h+21EaMdZEoJ(hGpU)>4X+sE#Un!wYwU5z{G*wB1?-SgHcqdHSb@QJ) z`WIAxPG2Ulbbp)tK7xHMM#)XFhzH()7)s>IQvQzEQ5dh@Z^@J;+V?l|FFgL8dzCwg zIldhv)W{I*-A(+I+cmnH<}i*fqWCt-UtPYYBcDJR?sTDk7Zhpm@@BIHxh|0qOe8CJ*-dlsZ&psN}3OzQ9@y0;DrmfvFb|x z&$B=#vU%vM$*n9MnW;xbgE0q!P;EuR@I!7-y~mc zl9|lDBR6WN(<|dEg5^h}wUnFVR8Au;X@3`R{EhQCsLYvr`PZ0-H`gP~-jVO^?BD^x zW>8ISFLLE&1%sh>d*Wc=1Bti9x+Tc3KxlBZNQCQ9mmq`ez$E3>_Y+`875{i7W1jT)N<-TA^Cu$Bf!r zsGipJ^X2fXN}*BWM!!cAF;YrAf|Ap=V7LbgQy&s(%;MvNol(IxBp0)eg0Au;LCXQsp-)E9tRmBIhU@srT;qT+EW&>20?uB2R)+Ij zj8!5;U_Acu@tlL+fUG5r`vxg0tQ9Ke1VpPG49ZdJY7 zIYhd<#?&aUR91YzkboJ^4nCMfZ7`Hz22-Yn|D%r=UKwe)Lu^ub^!HNwF|CafW=rtf zi>5UR)d%Oete?s*8B#o7N=KbU$g&WXTTCR!(8Mv74#di!#Q{DRRw(1nbTtNPeJrNN z@LtT>G;i3vaV6`YJ#Gvesw%FmyI4C2q#GY6Y#U_9t|)idCO^j59VRXR`esWdtbEnt zK4w`at=7bb{YfE@N3dB3)&PRC0{xDP)!!>C<3XH2XVpyy(L+{xC+U?e;hC zz^}Dk(A2rp%Jhj?SlK*jtD!F0jhjE*AfO;@V!}H!T1`~5^TEbQDeESBIfW7rB}ofY zJKyn5YF2XbR^?XnMn?!zU(V}y!0EJ;UnWXqHK%sOyjia3)nwKuONk+FSTjzmf<^8B zK;2EV#O%e^a+Z?CBIpvRQtcnIX_~_dpg17`_ZT*iQY0W&s+VJ`g~x$g$Kc__L6-Ag zcEIaWIZqaV zXq%T)%iJ@j>GbB|GwI^VxXWH^bXGb`D=@^*U-LlZB+@Uhzh@zYd4$_K>_}=B#42R3@QBHf6>~Sp)Tp&nl`UUzxDCd=)l7B~RZP zPAT3&Y}Txs*|zeGS-5e*(V#4_Q2cfRj7BN zPW9kqrGrBc8{_Jis@f)}JtjgA`qD3BMhwju7tIeJDRxagTOjwQtkZ5&gG|-RoK~Iq z+`28c$V_~QFAncv>)=tN;L%hR(dKI#^Fo_v=`_#TAW7~Hi|WcFe~zx)J*0GI=G=!K z-=#>Qz7h-jLdx~M)p_e(s-|<%w-VnFRMN`*_-tynMzek|#-bY-QiVi}L(dtUKsg<$1M;_i}o^VACB^do&;MDwsk{R9k4%*E0~ge4cv=xw%vZ z$;im$J9f#dHB`+lXZEP6t2r!OXq~p34a{TS zvt$5J1=jwtn)V2Kf=ye3Ob#%;dT&IjKI)>c= zbISj5W@43%`eil3nIWS$*C_dZ#e%I?TD&;Qyb=ATw4kcg{pg;;oL}}O1`+yGrENDYY9el1ne-Z|C}cy?9Eccqc#7jK72INJO#R&hPYw@RCel zo21TTRRkeZ=9RW9uJx39r0Mj>kPWDir`cVwk;=uf z0Cr(c7`2FU+?pvCHgnimj(6c24E@>a?}&0Th*brn{x>liIB;QH@4R9{p==pPsW zfR$hG6zTtZ4Q6lSWcD9nAXk{?zl5&m4sTG^?X_4K8N4%;8&0P1gU0pJ<`Mh!v2sdK zso23(!ZvQg3XGo@uNy%8z5&&FnU<|Ii(ynyZtl(xfVu(MeAk562$SLJWPcsY>U`sN zV_!Qipv&1UGgKhQCFPZG zF8x(u4y`*FsDj9&G5}=n@ndiN)YhzUb|df6;yFrKPh440t&g?%W(9Fy=x5cu&}t^w zS?pb-R~EKC^qEloPwED0-t`TwniX0y!!2@+OHf4mEv!d5*_q&y4t{zXH=I9&G8%;6 zSsxI!?p+4lf82{dF-xIe(?uW-B7}RjQ3<*(250zXVWkS(nvcVN8CrZ(h(DY&l>PII zfHR~3p%EdK8(8(`-no5K+EaKi88q6|W5FhQm8^fAN&=G4XCn>j#wdxR%YeW-ID zak1-0ah0FQNohcCOudM&RD1^*QOv3KT(x?M#`ta1++nJ@$@uU}5LLyp`P`GjgeMI$ zksGCDx~+DrP(JA#zVtyqCk(VlegoC_>;Mz)IQ=)#+a$*g8u)`&$EJU)xt}rQ)*Qmx zO%!R2)cF4;vg;E{pbvKr5tvt&|k-~Uoro5vx0T|`PoeR*%<6GCeYB>|64f8wJzH~>I8N0A^KJ)`hbRD?P0V?rt(g^F8#;68+PlbudNs%M zk&P=ZW|si=M!XJS`y36>&LovcRM$~FqE(SKl*-F9LZ@7s49?#J&CAbTEQKF~j6 zO6mH;ov<47gx)8;Sxs{m`zO!{2eX&V1+{7bf1jxW>ts)6*$Lf}u>PQe7|a7~xOo4S z1WwUlLL|&~;dA}`o5=G|tpmjHfvmq3}w zOuqY~^-;<|D7D$i@Nv=^Cg+lA&+#If>j7gH4C4CKlWHsQclv0I5&{ItSjzA+3a#+w z!IxFOe`i%8g9AR=K-&dfn_wiRJN1oC>GLS)7%=RA$hBDzS~ZC@H5C!yi=i26yZrS{ zqzVf;r6t0jxqx@<>dsTIIzECwBnE{4Q~`af2b)6STUjA5)xAmj=8^SdtI+0``1T4t z2MG+Kr3dq#BQmmgpb9-9j@DNNe%}Udmaq|_1fooy<;dFLoYyq@j2F zD^?T+# z)Qp!iA%8@L(DQl@mu<;Dr06h{R!w^$&3Aua&33_rD506!6=P5bFfdsFB`E#Dz9{JafLC+8@ zed-wSR&c`v1FPevj!B9@_;=e&P>{KpXOKg@9K8Nb^KTkwd8QVtaFz3^Z<-#``16k- zS*ng{BDDI8_>#dJG%xV@h(iPmaW}d27lw#+q^f>-qAFo|q1mNPIM#X`s+4)v=WOEY zU!H1nDAB{{e~GdJ(V2}@Z#_*94yD25LGJn!af+?%rfLV$w{v#KEl@PTk)7vu;8&kF zNPi=r%cpjoM9eduPH(JcSLbn3roy*h47BpLJChd6%}3kGW6aw9VIU+Xi`dI3xW-zr4~G8sAKy`L@BheHRK+M`_utwOS)c> z!G;3Bjr*lvaA9!!@PrG+pydUl&`r;N zat%oWOIon0?hrp3%x0Kha?h>#hF)M}Ro*0PeMBc}Diq|!Zuu;U$pf=L@{aoK zGJVHR4|v`xfJm(NTbEwV`K~B#XJcRQz}=^XLU`=ZLtV5Bl35$TiL!vE3F!lo4KEHF z9wzYI1HOA?yV!PKYGgyux)PM0U0=(66!c$hZ3jj1aL@2>PeV}*01O8T2%{ItjET!4 zw>eO%zI}*zTH($Fs1(t=uu|0Fevr@21TDay#&QI|J_UnZ?@QGA)Bsv5l-FOW@^AgkwC^MER_-Hps_5w1cpOL72vvXm zW=m)r@QIJ;(oUhae!aIMpr9)8!)B1bja*Bh80Thng+?$3$zrij^###y|gc zB>q`KK`1lo=2;58VZpp#`i(REHj7S2w6tXph1yX~jJLg7N@Hq% zMJIDVJ3?ub*=`bw6n>iDZH*LFY*LRu6FJK&xmqSfW?bye&^#0<&2=)a#(Q-Cf!co= z;Zlzj^hx$4X2m0;zDB=Si?pfvO=;(5F;R_c08&|t?h9B&pcE3#iR!inAfOXrfWY6u zlPNe3IGYTqY;p%Z6WEO%0|uKJ3I*t57D@e8(eP_Vip*Kyx@&qRDtD}&LxwWHJKRRYP!4??}mEYTcJfu zBcAR7D@j{n$r z);A6oe1BJ#_?nid`_~y?bS`kuaM_nAPJXgGzc7*w82(RY8$ehU+b_DBF9g7+3Lj2e zya_4a*M%sG;ts~fiYc0o;Y4b=yWMvEH0j(T*O6YowAD)1aE?`tZWU^(7-byV{CZ3s zA(2#eDwr4+o1S)X>ou)Ht{d-=mY#tOdd(1%L{q%ooh&E=Of4=qtpiL%6Y;ToU{lhI zZJH}Cl6%pH0Gri}2~lP1rg|>g!hhnbB1WDtdKC!pd_0ZSRIn};RKKQC#Tbhx9-7{g zOv25N>>~WPJf@TU0&dlvM&+d(g=_)Ct`-^!sav@-fwL+cWK3|@^Yhb)Bm)HFGU-C~ z^{TD3c;u+#`fzGE`Rh)&(1+>>;i%!NruJ(--g5PkrgFFZY5!zZ^SMa&NOkJH^i|$$ z&n3bs@<}=M%pFYA8B79ruWW2LK3V1OTA=Uq?s>10#DAJ;VR2R%>23Zww=R z&y=hRav-<~`sWwE<`44bnQ$0bN>z)G$OtEC13@98g5#_6GhY>CcMI**%FWw2+mTR& zNqVfGhltafnwgoM+)q(EuiC-W61i|jtm%`kb{ye{R$_Pv5p`e4X;8mzR$eVS)0;x* zL1vJ2(-5KtbYj^<;hiOreuxOq!+18^T-R|Tpq~*H+~KfO2Lw*N#lHzmB2x7BCr#S% z^bL|p1u{5znSwsdI<(n9G}|7eX_A@%F|5{5MCcz9if@Y?4$mqjjP-`0aU<>;b}&Y7N+7BwCP{?wq(J1-UNaSJn00mc={kONc_VT2 zKK|j$>cXpq+j@AoKFf8P|9y(f*ULu}n^`Wdp&GSHk9R7MEiSJ$W1#YW#(gF9xKH?b z5*Ek|$7Qs9LSLl6e#Yd~D|R%O6FH=wl&j3GsBxUn^1=>k^9GWkW&sf=(_5({7RFXe zjnc%}2oTtrQ<>zcR1s;y+?w@Dyl7&xXw+1wIkxPT8G{Y@@kIi|Pw-X4-m$v&Bmyt- zBdKjYEj=si9-HUf3esN{L{8O+U|6(zrAQ>GS99F8NZ9&ZC+p2_lN%0zR$B9!Lk>1z zU}=$sD7%FgTuFSNv~2_5x+iuOK|gZpF4KHq-4F|w_1QLQl06K4fI3 zIM>BW2}>r%1g$r+eXlacp_XQOm+SNZWEHU{j{BP`23!x%@zxsRAUxV^A_yNf|t8ocI2vtIP&@V=ZQTSUN! z(zJT~e!V;z16O@H2uoKbswu@9INY_hi6IXe%j0=l{>zLp(Dwa_X%6fRqIJDSm@3Mh zDd8M{7U`71&M9ZBz704pJ$xx|Jeke;X>AG`5WO~rp}$@b*=Jl&5-@-Wd}2=OH}Gs7Mf8o&3U=%rHg%90_4O!VIi9h;voYb^G=sMmqbU0H5}=kP z7WE-_A)?WasC6PSy}XCx0lZP3`32GnID9dZfx#(T?J34i(V+%WKic1f(A)8Kpj$JR zX$Z3SF_`0Comm&FO|8&ZTE>1|i}8|~c*|nHjq>8ggok`I@;>*^6$=RNr5__Fv~{)A zHU3J8UG@xpx(x>XaBO}Wa%9xWyVgZMTNx2qg1Vj3)Unc1R4ma9p8-TIzwkxy%31UU zQ{g@#LrR&@*?wQ}IqiaofXUv*n!s`n5uz|$SR!5fd)k}zZVP&jP}Dtk;*FS`#$k4k zz>`` z-c`}fzBcE^u7~qv7vIM|HwPbbnL36jCi;Cd97OQ!12B5Xg)ib%T!tB%2#-M7u%at25{a%5)PTOLYHdftDI=9ZXyw~W6 zLlj=QRkYVcF^G<;&_Y;g^E7G|$n+1YH8PGJ^OV+0Nbh=?7#KHmqBxcRc(TslOMg&k$-0QU|A9X9Sb(+5N69Ms zxU~;rR0DcFEEDy?5-pne1~u9j?bcL>RMz}mdzoomd<0!6S0)*P%@?ku)Z%(%#T95X z#|jyZWpgT4+26mFt%P#^l73&v`|!ZZ2XS8jJSo_(r19Nj{090#j~*t`J@W+G2V^ppcKO zSx@Y&DF;6cZ?ne`bpu&5Q-w$f*nJ4?cvM*dtfLpkyw_e#ucSwjFI$l4u+%g?_Z$7# zHTR|W^s6Yzs3973S-kn~Q*KDWC!HuDjv=nJcMYH=ZdDdbzptpv;VcDpyNPBy@gaD* zu~YdeHwJ-(zcqKq&TLLoP=d|_AMo%HeN<=~?y^@O^#iD!gk5_nd(>}w=JU0KQL<$L zQ5Z{28+}cog7D=(4?>Z;T}W_bTFM2{5w2LAaO}-OcV)53_3&7BxxF9es^^29$mnBc zRzE?;#!JGWq2>r0481?Q60VL25104kQPZ0}5O~G6sDghbYnBDtFYLFG(=SZ$FZu4F z*tabH%P(_b^$X-+S?YTCoZfJTU${aT(e)QLw8pYf#9)D87Fb&$XvQhs@Tx>DU8JPJ zJ>TBdR@AO=#3IH=sryL>UiT3TO%}A9hLy_|6vhi1;>mcU)N&wZx^lGvlLwkMmz@GB z#psiPY}ZQ+)91#M0NM_zZWjE)@GwA#L%SH`H)mR(w4>Li5tQ&!1$D zEK8FCs_3jNIByHmTOY_nogqJ-z&^F9K#mStrCyRnxumDZgy!=Tnb8SK)JfksILHcs zbohqUhtE-c^p!(Z-w|4(pNY4#|A8*uYPT~$NL;@U`b*@D^7f^ohHu2GLP`rjmd22X08D8aXLi?csilr0UuT(-dRrs_lT4U3s5GUzw|dHb`>(T}#tCHM1%L^2EJP`D`UbB=l6 zK~V0HDa2iVIsiXgg-e!VhHr70(1v=wxTUc%oux?bD_g)j%rSEt2~e*5vTBh5w=QCu z!*Zr5Rw+>va-MW&oSw^%1ZuydReT${=JO0j;Ja;Rx(==*3~ z`iXIYHAS_IO_|5wN}Nh`LByJrW?{kvp6H{R(S$eQEDe-ZkLS5V-FkKhvSX2-QrdeE zTxeDdcc|0+T@>vi774HZU25-kFk;Q1?4yFFy#GjOy|NEM;dDgXJ;ge0Uh(tmWap=k z+(hVN=$R3!a(G7)kghWArhjn(T6n2VBeV%hNt=S=Y??Ea`I-_Pr!GRL{(7r8J0NC; z>0NGFCdYWIBj|5StM`n-0RxNo@Bt}RWGV$n;*120n#t|r;nkwCYC*0ug4IE|o?cJ2 z=!`6{9pjAu>+i|p<#dZ}{YKeRs?uJqg|h8>gbvD&Qw{NK9UgmOAzXT^#V)hLRAiv_ z1VX^P<=-S5L(QXhR6Mm5^J)q0b1SN+rI*Q(9o{MWT{m1@$SH+Tf+xjf_X@>;alBIX z0FELkXF=N7ltUXOHOxY&1^U{3E?mP!7pTGebx?g0fZ3C5F0&H>dldd%0f541KQVxC zkbGY7mM12TKAQ&Nk?y-v@kr#5_RG|I`Z34LyU=Y)0ZlG6$8_d8dKd}+&Lg|YZ35z? z`TL>1qk!j!lfa4=we1Sk^$dXS7)Ii&o|lBM$!*pN)ZDDcn$6m${v!JRcVuP~R3$=% zkKCsU^;xNm@JE#8_epQjuA^F}-u63E0@-R{x;z_s@Txcyua?#j1wc@4wyrZ3$nH&0 z)$gGFRS3V?b|ZXFGRWviCe?$bbZHZm5U;htk6;;JuVVF2Db}B1=5VTgl{eN-UWK-F zhMGaeWh|Fa?J(x(yX};UyvXgp2-)j?W7}nHQRx5zfQgwmLj!4xEy%@oO1_n%%?xs{ zPb@J;KJLZ1m7FI!A9j&OIAo@o+rUnX(ja7t>nTwco3o61tkn)i%*)I_mm1-+iG6b% zo8C4)EzMDNvLFY)TYN7L?4wW%zeu}j-;`{zb00Y8gISmchUEC!FJ-sZ^+X2Bybu5T z!J}2`oPJSP_Vqxx3jVm*H4y|DRtx zrubz{M<(RCJq$Q?dk;jNBVXcFFj%FOmUoh9ES;x#mlKGC@3UAZGAyjstJ4ezG>87E zP&DlEY`=^CX`wF0U{fMrE&cDbj33F&7|>tFYc2u+0P+8-YPgu#|E|TC)OBq3MUlNu z)U@N!PB{as7ON8}<5r1P6x0fT3D=N$m-=w@S{nz`kwU9_I}Rg%HuzaP6jjQ^5mp^e zHXVA*>;=_0H_)53InpbMzv|NdB2}LP_eiL;6O)O||I2EWy`@boOF}p@Cx+n&g=0k6 zuh51#l{2rLr}$dEPznKEKT_=YfP!b`ED^ROUy8P;0?ebPQKpvQ4PkUBRD^VjACfH0 zD{Qi-swoBmgn3sNJZ(dYMU9=gZ7iLLv(2XWQJpq9F*Cb(YA54f%VwQuXoYVy!wXhp zE4Z7Y0RNzQF{iE~A^Mq4@+*6Q34`_&WB{8`uav+-lp?A-W`5AYI@&Gqt<-I8qDLf; ztDi1)yT+VeuMe1V0&JBkT*QquXSZJX=yPR<^}VCd{B-u<%M0n-+R|sn*p57Vj;^*A zTNxO;bq*}iGpBzS?|~Cr0hDNkv^aDyP304(bS{WoH8Y~X0zqxZH)g^p4}vsMYo8oa zoLA(NI2g#5NFbl+Ys^IhBvB2s?|)+h6MfRG3YvpxlNs*F)5Z!Bxd3XPa!m*YPCwL$ zq;8YD(;gi#sGPQTxpk;e;WZ`AkXnXyw0myWA_>|S*&-};lOl=_#39%(67{b{oJN(k3f%mNTANLV=^W7qZfFFh_KBv)j{Pa>#%GDPnk4`+L>{RDJw`I_R-)J0^9#p)SM zk(`Ux74phsL%u$nCzm_61!H}UE@ogG#{gWHaxFlIipus)(){tI4>rpt=7wVMo@Fws z2Re{iuQ}8DhqPuPX5v(^luMteawf3S0;sjhg9&Uc+cdItD@*g}Tb2!pY#zzl zsW@3Ii#X)*PmAv2fWi;GnmH$sEvE=uTB7Du_t*zFoqfSz3x0OpOvU^g8+;y^Bc`SE*_L+~vTSU;)1HVA6 zJk=1XOq1}{ry{&8pTU%SpeQ3S9(u6kv=)gt9%|#i6%XaMJ5=&}HFi)^xsUR9Ak+L5=LlSPG3PerTzb?&qD7~w44QVF|H)(q=ylimX!!l0tuc_W9 zCMLejm!8?oO+Ai><4i; zDqu;dC{dB=-jsOv>}=&6f(e)se$Ieg|49mXMO-uY;f(zjrZQg0yrJ8{;a>IDx%MM~ z8)|CO$~fz@wpf8Hr<>Y!x8Y$dlB~79!23^e{6E6h`u)|d7k|Unihsk_@czHwh`H4- znezXQSc_7e_ywK9cb-t8J{N&%bI`a;%9i7iBdD=T7YWFM(o`Laki-*|QNDlBqF~8r zBG=Lar7$%#G-USoul6Z;LC4}DI15>8+Xc#0{+(w5QMR&Y=}Nb+;=ig3lubFsr{``e z-`zPg{j)RY#)!#FvT{ocL|RfY_MxSUE)Di;6wCm~5=+L1Ltz1b)y{?C$Any14^E_o zcZVFQo015$>OGL?$M!dDH0(T5dcypLF$EvEK=*A=o_Ot$SukgytIcGf<>uH78YYb` zfw6KcUONSB2Mm-%W5>9o^J_dSl>&EID#H!?mE+v&(9hZ2klV^K-4;W3*^g3^yzQRO z2C*4rZQV`Q0Ai=i(@T(mbNJDfA=?jQ!V&(ocg^e(Y~3tSqftR@@K86eDIOYT+?m6e zKZCmvYE1)hP+AAfEQ|bD#v4Ye4^I;BV*w|*F&xsMDl7CkoV6>l2wHjPVXautH60!> z@@!CP6y+uRLmD0BI|j*>n#>jEGqy9(_G(fC7I*%TIRRhHtl-sr zPY~kh342PDRmY|%`h1(ay0{Gt$A9g)jf&n$hBWk>k~yQfH0yRS*^xDT6>-D1^E?zH z?|kon{rS&=_z(VIbaoI7|Gp{Vr|>DoNUKYd zAe~DtS(eQd?!X{3;ToH~GbT?BB(`?N@^7q^sK05LvD3Zu+yZcovm`UK&@qu9{!Mnx zv+3ozW2Rd+=~P$U_beOCc={z}O5KHT8{0L*C&icbIM|%vZU<2=P$D{^mMJO&@KGCw z(s3~&-YAzHa3dM{$hm^h*9ryf?f4pGtq>;ti#tjxZ2}5ck8sN*&#M+vQ6`)gPAw5Z zoS;@VL{XE{9VP_C*jI}n?5QNqCE11Ip6aj_RF$UxqNylP-R$YntnwTie0OVW%|Yk) z`dNBByBk}(qXj>v_-)9|X(XS-a8mT2>}iYv-38qsD3RI_-6#(8mC-Bg^IcSiQ4X0A zIdjygG${4M40V$3OmviCobl4(0my41-+Zy#1bvQBq@& z+1og|-C$Oh3rxVx4J(oEf>JM$(SCR4g1Ry_emYWFfG_WPss;EfEyvowqJrHIhy03P zD$NV}TiFD6aE%+!DcgW(EsOk9Ty=ggY=2Q2%3#rDnh^;T$iJavfvx9H$PH{s4Qz}q zPK>ba_xt17#O~YU+Fiw&n;AG~>JBgGTAr8jq7)<4m{Wc*62Wy`U0GNtnntt? z*`*#JGgojP3?Mo$wF+ls1k3JZJ=vQZgw4Fp6|7|NAEm0ow0E=ehlepnqIj?r}^GZf! z9zs2NiQ zo$>e8Uf;tNajRQ+ti^8)gSoM`B{zC z@C*5;5Lc#y2EAx%lmSpcQW!t~LZ{GMr{VW>NkvV4s>UF50I$-jb}Pj}kAP^xEk*XS zAjM%Ql3U>e0RWaPqWVOC#1e~x{gi;i>;WT%{2*C~U+yQBk zTv+o|&AYl7{eVl6Opu--v|zaG8FeY}k5U9N~%CW04@1;>%tmy0kDQ&w0uclezL z;~;2QCJvo*x9vOPhE6Y!CC#H8Mfjliu&?o>B1q^SYJdoZoO+0roY=yBb<;~kM+v&4 z!T~AgZq2K9^n26H!^Aybmb4(Olei)|GPjuQ(jdMni7ge2JyGqp)1+{KHih$l)MeUyJ2O znbs4Tob=(ePN_kN4`Jg4WpL2Lzly`YR(}7cZNxn&ecF9r78^ZUb{{@FymM7krv_1O@WO4R)p}DsFIb&CDM}a$MfIU~dIkIl*@PR5h(`htY zAueZKk3IzFz<4`R{CGgJaMLT7npVO=r)dxo9lZeiW;CYht|)s5t|SwG#4_zvO?%tZ zSFt$GYLc?#Ntp`fqxe08UqWR14vF|P=zf$u8A`HF0v!jY-(d*8d_G`#N!=?pLGj;F z>(X7$_y!hXcLkO5C|GQqjrTsv5aL6yH_VVr;To&I&FD=2AT5&=jFv+4P324Fcy5-r z@S5c+q-DtghJ=%Mpkur@wBHC|#!&NiFNVHDz`S!1sYp;Jj}itPo9Bj-*%|=l&kNsW z*Sd0d9X%YnKS6vKItFfK5)GzitbzCxbiJtSKS~Y>JgCAzk{i@hLqE^aKRXjmaeg-m zdvs(~Cb0y)Az#VM`an5-=S;5^k~b|(`!GFIG5R9X?Jd_^w=Z%%Qv_jhA9}n|J)U*( zqNd^WTzg5TJ#%Q%R|{ikaIa#{*S4aY@NYahk?t%(<}5t;@j(rhqsr7(5n=>hbw7QH zyUnH>w&;nl;l$ZgRB+EHY)Gh;qBL4A2xcuWY>2K<237f;ZJ*#=F7NsJrdU)|=Fog} z2cSm~W9h6U_2h~+qLmw~cd8wipaz=$hAri_EL3y*Pow1-6tEq&Dph-XhM#~~i-{@P z%b1FbPI>I~sqTzHD{Ebe%1RcDJzyPsil%9jgJ%&$WcHmW_cE?xboU@m=7(K+0G#_yKSz%J+X*VqmN&@1JGgk+V}D&^&S<+1Lg zL8()t%%)5k(`t)z6Qx2Xx9am&1XfVAx{;EArCf{F60CLE9ILQ|XH9IoY##8sS~8?j zh`pg!8g0kr$IkSwx`Yj2b(u8N`bo&E@q=h1#Us2-RHT8~MR50DVpUd@Fod`$` zX;s61ql+%u8ok%6GlDh%cIGzOpyHwtYm?4pWp({Jwq%fWbdaf3C>(#0&C(vxlbqLwCgF-2$!;OVv>}}(=8wJzF zh;7BHoQ1WMd(WOAGODnWrY&E`Zt;gy;ze><@G;)#VP9n8t3IBOuEfJ0eB9f`#bUHA z-%)*12!fKf0b0W_OeSc5L2(j#nD3pQjpD((1Yqc#?HI#HjjgWD`wuOh8UOXeTr9aV zhbuQ|EJkx^{$1QTD-`CrxQy6qE25dvS1)wj`n!b%#6n}(fIACVH*fD?l%0r4Wf5Qy znnB;7p*0h)C)Af7#8%@G9A}bdHWzI=eO*q1+fAyfV;Z8qgVovKg2u0>jW8f=?@HiF$c#`i?Ow#8yz zbiDRPS+@*WFhm!dSXOp5#>)hBA_x_@<>Zx6^Mx+LSRd z+STjMyG&Chda%LF#$Bql^t@iYL)-^O4SdQVdo>Y9%r`Lrvd|>c%Q5_KL%2|@)*?Ou z7R6Oj$uOhT9qsbg_Q8UQ1Oc+v{-qpyz+WC9kXhp-S2Cu+&08W$Kc*l}8=7MA{kumv zW$abm)OzV%OtF0*+5-{PiX_*ZAF2cx>y_!mBPY+Hk+gfoogJT6zUP-`T;BJ(Ew)kH z!H5AQn$`j97^a~&O0*NZa_6#3rTwa~&k|dBeXtb7fmwI8YptjK-CjS8Q?wpm0OpQMk0k#kvdjQKE zk`5dLcPwmJnvHy4j|p7V7dhR|fIzfNN@;oAP>qUlEqvbemwuKx8mwdi3ua!U>_D95 zq%>q;ztwBc#wf-s8)CAP^X2ef%yy!yWMX?GMV7cj1S_9F5*Nf1Ra{^dKAu*LS)WmJ z4&#l+nY-t8ToL%pFttHMvpcX+G6P05KJ#XD8vaI;_HL7JaMkcd|3bz`Fy<;?d%-I8WKG~D z5A*^X^N~*d5d$;zIWe`bZ57ckI-%9J&YX^gRT~Rf_O)JAoc2oq59TOce3gPWzD0zf5ESUQy+TPM9IKsHp>lI`(N0O!P)}s5 zx}I;nobz-F7D%^U?C=Z2dhtb{Op4B_>}X#u;Ch#Pt(1qZYCu#5A(-RIpvJ+i5S4A| zp=rR?)_Pk8Jy%J9lFj0Q=&B6tA&l;{RS>JB1_f;!)23A*F~W1Tk}qHg9~Yk zmu5-o^$XAHWf}-l91PF~R_4aP=&kuqbEHR>h1n}k+)TGA3^q&7v9jdx4eNa?m_b^H zj6BVSH1Qb=c8N0&nPTIOS$B(KBE2M$Y00Ns*o=cU9vqt)Yvv0^ymZHdCUDy&?C{&R zz+xp`JBp?*2+Uw73$0T&VNwSu8}s-rUEB`4va$i{z~wICQ>%yqlb(^2C?pT~tq6nQ zr{V(s!GQuIl$A{9hpbTT!KB>?yy6=vdty4QBNm-qN=cOGiNEJgV8ub+vnS)3SzF_G zf2a>$tWO$hcK&ou=oYZ>>J+rs@}OJp)>jp6RaNUkIrubZHokHQUHC*IL04R28#bPHcC zp@KX{W46hy^aKe=N2XTCXg=65} zP{Znd=Y@-DAfvEw23Y@pMPg6Z3O7%3T z*iifCNW;ECBm8-Pk$6x_iG$Dy{#qI3N){|B5W`wMYKSIs_FRr#C2pDx+ekD`{dWyC z5%=fAG1G>-Hq>RcwQ~tefCsXagB3Kjga{+>N(F0U>ibNS{T9aY+;hVTS4cOYZr?

M3PYRyEE5%na;*)|ve2fMNN0thU*_e)P^ z3_0%ESMKcYA6F)m9I=&^f{L?O!+u`fhF=kEJF?^;M$OfILR77%7Ve;e4Ht6J7FbB3TNd%ebkFc(vsrL4aH(FXc0q!B2!e=a6XFpWIU@j>5u8ko^VEUfr z``!D~$J)kSM#RM3W6Ij;!e37g&YsTGCwGBK1o(q6BV^kd_e4~Uo}CrE-R#^*H^8&8 ztR{(7V)-=vY1jSwDNaEN{?+6*2oGEkWRrhQ0>!uJ-a%Qag5i-$!v8#oEExkq@6gdW zkB1oJ*^k`hdY-(9?dy1%fXrJVuoS_$e)0l__G2RW%wjGFiV`@ro`ZN8Q@q~#Z+F|P z^XddRN{m^%dPd_`?w}j+SRyOr%fYsz6v`kV1F>n6_e!3690} z(QbRPu7g9UtN|ZRkn6Ar6c|C1u*~O%5$hYuvfqqv#~}rhqB-8#Ym*&xyCGD#&@IST zxvRsDSKn763X(yGCHq7+PdTiWY)Ywu%vPGzLgvzKw<%NY0aXi++8G%kIVUN%f5JTX zY8$>=Ay0X3NmMFxG)&kcccVRXO12O$DC;1|4PnJPcm*UV%E+mE3zWsbaqx)lBrJ*0 zaN%;2WOBq}UvW5h6TG*OLgy#l zbAdgNI_PFHrJ3e0iC891(pTdE(xsT0wX_}}XmV1{AGm%F;~&ng@c~a)q(aH z{_x~ADxu=Z##qP#WSp z3N<$J^Zd*HvgYLn&&s^=cou1qdTNj*U#~`f`o-~wAP)7wu)XJD*R$G;EiTK4jMtdx z!Y`qVnLz#@zTUAtv~X+EO|p^|+qP}nwr$(aif!ArZQHhOv$Ics>D_(3=ReGAj(WzZ zx~m2<^K_bGPaG4lz$%FqdA_~Xou7AEyB1|U4738(AjtI9mnUWZM?A{qhYKhtvIS(U6zNcdxD_09WU7VohFXhL!}E?z`{Ts%K?@D_@DY^Zu_q zhuIfdr`YX?6Uf4(MO8-(KFT9oIHq}G@k#rp6pN8Okr0~QtTWh>HTzYbOM&dk{kD$IK zB%B1CeD6Mnq|l~g^|j^>F$kafsn=a}(xL(dyfe!>=s+l)ooNu8@v@TrMOToV?{HN5 zW-dBL&ZjG8MT{%H>=Jm@??YVEEqfy@d)r^r)7u&<=)5YIO~gZ0FRr(VjY>(hD7Hvr zFiFrt6?_ES+Ps4=nm`HtQum)NbzOEG{0X3@ueCmYwrz^uuf=2_)W0_H2)3olm$5Xc9}MknE=R> z?ZLEgu8Y?8B*n$`@}%^sI%%oyz2OkLv?E%db(O)v5!4N7C-d3Ya6**pjq@6ikg#Ck zT#2%4(Z{jyf_My7lsRQWrafeJ%ml49I6=}^AwRjPyQxxuTHgvD$GKYDgIN3(EM!Rl zusfE^zGl6%=wmCE&)pv2=m zzvJ>X$;YZ5pLR|TYh|b91W-7Hb4CZ$O?GQVb#a1ceB{p>BhAk z;%SN*G9!l6Wpd^%z^3pDx?AIX4j<(RL3-)NwQM#qm_K73x+;Jya;dz(g$hN>)xt45 zRLEHKQB%1PU103S)wyF#_}2+ff0oOVdX=xGYHgHeC@fg43V+qU6%Ou35vSYbxQ_ic zrgJJ-#j<l2h%{jsxZ@-nwMjVVNbY@`zmcF|hyT-pg&{3R#qX-tw{Xy;Mo)P7wTsar(OVm^uzjtJgx2VB!SMUPz%O zTI5_sG&)6ca|me z8ehP*7}T>wmmHRJGoR|BJt?dfYS8M9t&ri8vOVl_Q*4b0HcU&~vZ3ai^ql<-{Y&Hll9Brzwe}n&FS| zib<0lmvj&m<dM&#ErA`gZnz(_j0`Hp>V}mM7U?=bQ;6b?_(r`bkf-?Yt@V_rGG8`y%EgV&p z-zMS>rZ06z`*WfIY&JLl7bq=hh=VE=769Ol`hPyDTK<10)gg@y2h0)pUxStuKkQml zA2OdtbR=*&9|19bJUJ@3`jLT|o*y=V<@J;&d|2sY{CLgLZ<(#>GXsFwSa^AVyqKP8 zaf$7Mce^QWsHu4SnEz5cTtX89uQiEyvs%Q;BN5Dg5?NwoX87UhnzXS zzZz9M8T21?+Qe4yaAsS>P(SFq2>iH7$gnkp2SqQ<<`Zyrx`D?AnsBGv+r6o&DAF`_ z+A$SaL2i2ASC7xj$IJakIH-sC7W>kBEu-@nhEcq8FG3>-9~1`|8bNo$c&Sn4VmcL~ z!5e85q*+DP!?F6-xHKSS-q8rE)Rds2%5cun&1?YYF*PR z$C5Ap7u)7mK6Zo7G(n^9L`bDT|_042i` zJ;vI1I5YB*eAdJ4{oQPWz;kcOVDKn$lK$Z zq$6j`metAb`|R-U<>m11!S!a$@$Sy#!|n0@{43irfs3nwz(Q{*CJ}@05`*=O$4BX`48Mq;6EsuNh;Ko>u0*YfwWrrnE z>rRd>?lss4JG}d%c^M042wvnI`f<{BFfKc>EPv8~d!>~>Juu^pdEXBr%ebfR@xJu! zk}~|?AK+dfEo|2z36VIg#gY#Pbdvhk{#A8^gIHRGCM+F zao%@&#o)k+Zu{czq-7dN!E?>$p&h(X)_)R@>9%RCDN6Z478Sy@Ru{zKULmGbC28^c zlAH@gohrKWf}pyE`4{FrF;B_RX$#e*-xkwQf*uFs;4#rI%kfG{PA}D0H`P_oUxOM2 zm!AEyJv&F(?URJlgM9>vqmFWZ1%%ZV%8AMzeCz6oc~o#Vy5<)v`dXQI@btZQwcE(7 z7XYE4G|I;u6%#}L@nl6UJCb2);{b+521()yJfz{9h_wDM6n}+gMHe6N`0H?(WW=~Weypm)4FYnIdFGKMyyO*yB^H%eTDGU z?SyXVk{T#xZR+xxjl8R!B~-SF?Gs1}REhW0zi1(|LXc3}#B0!N@Xf2lR%j5c<{~8G zVaUN3hEAA8!;ZzXj!(E($7=1-Ir7Q$hdj~)nrN2^WW-8|mKT3{r@hUdf8;Jhehw^1 z^-NJZ6U>e>P*FTnNX|o)-0i-M*ezR#1HIzl<%ePlmvXO|baS;kba3GenuD8FX#;@C z*BP2M7vRCboYVu*O@|d72m%zRLOFf!m)XO3Oz$bNjqIy*4*s}S|2^RW$Uz`B*ff-l z?4PZi4F;3eug6cHxfxj?r4cI7c3FfM@jYTcSrTLd7pszkGto~14aY+5R-L(_`APR?#B5>x%bafnSD82Lmmgcxv<|_= zl6Nv(6OZNv$Lj8#i;W7Ea0Rtr5d@aZalflez2k^}jArRl#KbejdS~#{jf$pjDlR2s zMu4UTtV4*Z4#bvX0^Y~5iVW&^r4fR+KatO@D%sDMK1DHe?jdo$DD$x*33YZxAvtbc z-8xb~hq1H(EL%`p)}W`uSly*&2rI#jLX5};8nukopc8a#bkI8j!jRc2UO(&lVrGNB zaVhU=LW8=(K~+c*uo*`Z6qj@;mtF|$h2A)L48?N(G}zH(A+?4~O3JIQsRW~uXW0!?;{P?)|*zRaJN5^9>7fYer5I9 zXI9^mfQLigyi%vst|0}PM%*0?{m!x&_YL4g*QBg22Tum=f zcmeW@&|T$==%~HLy9SZ7&AY}Tn>f0`zkM}1P~7u?43{BPx#CYl`?eSaTF$N`tIb)r z`$O@i*+$L5D{w^{SP=%1FCJm*-#X0#!<4fd+he3}Lm9Z;T8`I@}n`lVq zJQE_hAQPE5?sPuv-tXRJIzY|f7R0u3?BOl71%tbKlT}E49sGT5e_Sm^=T)jS1 zE$-hvZ%*SZF*=oZ!UNM(y21c=(zMQTa--!J8`~_kTt}7^q)N!LBF+}g(S?E{luAw`)<2Sr4kfH?C%>@w`kmkvxb|2l|zv#7zFI zx^y&dVARi*rILa(`>qLVU|sc&agwZ`R_{3>*;??t2v)t{PoxifYj=2OrSJ(y|Mpti z#PAJN-Dq3hN2?8Fkv;=ScIG`@(Tq>Ht$xo-Ibxa_?bxgVIXlfHW3Cqaj~#gi2Y70v zBsQEJAxrrU#zRcJdihRxgRxef#U_q|GDo3_K$Jec5*qcU0hUQ0o%Mb zlcqv4cCF+1Fu$_2@vd)iMy0l>+gvzZ*FM&emD9z~6i89rs>6aPX;{KNyS#{L-3YIt z&SaOq5p!W)yC8j}e9EHo8&b2NJ zCh&+WSGTUKWX|WY4=VghJd<*WfK+EB3Nnn6CiXqSFuc|+v2?f!!Piq`t@bRXqb*~3!B>)9p3!` z&>9_x5kMUoI5F2y^Q7I;>&QJz04+5PtDTenG_Xv7j({ypH5pACZO)cEERIvI&h~x6 z$MNO^mHGlxQqp0x{!>8il$Axvo<*lo#p#=CdgEKKgz&S8pWzli-fGU&{vaBF7AFuzX85 z-Br$Mw#v8JY%4Dm0^$B{r7gg0ua8f5&INPb5z!Zk!R#*{tEm{IPJ`0A?9&V9MZ;?k zy)`=6YnB!hGDwP=v{~#O$yQXRX=XP9mLe18a+#+Qs6Mv9lGeIqy0Dh)Ys>LH*F?3n zQWd3Qe63mb1W?KUJw9(cim^HL78Xxh?+AuDS_RddhnsGcWE$gPqp3U_t zq)Qjlj}WWR9n#bQ^VUVTHVI_bDjF8m{c8Ky^gxgyx3#S<8j_pK^h-BoONKZg$Rk(H zxJN-ZO2~=kiP)d0|9#LqiYjvm0tWzq`30T-zYNm#Gq=-qG`BXl(swX-a{oQ+ zWvN-)Zj8czSL@Qt@u&%)54)@j?uq<891KA`cZ^&Wx1)n{5UwWDA&Va&rl#co_RL8; zsvu%D2O&yMZg^;Z;9{IWePo$xc5l=|c50b-YUq-D&=Xeej29Y&SqKFQUMkpo)t#-Cmei=ACN}3Unv;V^*AS{B z`9eCa%q7di(>Ap)jOSY*p-?d>Rsdo>jgb)MphYS~K4kpVa5@r0-v5nU3CCr1c{HtA zX%SDin|FI5Yw|ywTRcDCzdzp-VjNld({&%p!;sj~2MAKgKJ2hNN}?iKR%Gv2_)#UB%KAw=`&ia`4Pt z`*{A`Mf1q=dZp^@Owrh$eSvy>e7|p9+B|<_^!OB=J>fpytUYmtFe!jAzLQU^`Ldzg zV>pkpcPzD^^b$0|q)CC!5kSDH)>ADhEHHR4wqbb|KR6q`(m`)R<`G9P%jZF;uR)Gu zQ{6jB53w9f)n*-oNGDVY+A3o?-eZNP_vgK!SIFJz$%z|NKp9r$K(y@6E2;~P&ju;G z?dms0UF)$2~YQkaCK(vXnQKoDQ>7Z8(JdH z)V`$7hHmW@vd$D%Z%`j(3#Qb0I;MDKkVWL4t(!CbvxYP{e#wKY-73ESogHjkIbteA z%a-vTnQt7QUtaodw91YKRT+HoNCOHbG*?<gzoZH7hSa1*|c)=7n?Vdht=;1Ci zP#wIdZfMMgb$>?EzFPQ(%23f5DcE;~b!$MC`+m0sPynDTX7NW(g66RxT(S(2{#Zb{ zfMrEEjnqgYamDOXW5h>UY!Z#Wt_H_NG!L~Zh47QJLPp)oxTGrN7dG05s+4OmKRWI& zUnItIfYqA3r$-t%95oI?=9)mKv0xx2L{Xgf>Ru4b-wx7l07A1iKiU#>9K&8Z zuU7un0F{C=tX|RA*l`@Hcp)T3V2}ZPpt1y}Fe?rZ7@?`JMi*0uRa?TJtSl!KgS?ui zWf?@q%VSH3Wia1VB^f&{iKR1mxAZ2$`R~QDN-E7#qf8X~&up~9IuX{a#=cIHWi74% zgGT5-7LI7`bcIYCiXCaccT?%GxId@~-nM)lp!540wc3SdgvxQ3S3rj6xwYgdq(#7^ zJv}a`O@ck_k8gb`jG|3{LM@e6RVk<4+kM^lGr3m#fx;|Q+sPnEG*T>I@01(?OL;S(_Vfb4}O*0bT}avSTH52m;NS{j}ong^P9<%)?TrxE6MW!n5q<1 z%`=WA7aTOQ)))mIf{Nj^vou{jfta`IkW3L)9nY>PhS-@;Z)21ybE3UKcW_cB2s5Gy zA5_bpRJ{*fTWP5NJUhbQtmTs%QqQ{E%YN=ECw=Rh| zgT!H3a6D6E9z7N)3LVUQKLTpKz=`qE+)1hP_+@DXWSLcnpi1K6P5pP~Y3Q$D{q&qmM~%s}XH>@`X6Y zGkILE$2vi!sbzA(VTBUivpHF^3M~xnk7GmlZm>C4OB9qi7?q1hdarwVWJCjtWr0Oc zbRR{sewynZ8w4fH2q;5~*!%Te?;x^y2BsRceN7bwmpa8in{ej~4Plb4$M(mxI%UqI z*$!7$Zo~|*T#%flt|Er{7Jd9jeqpwpLzlNgI@Jr6y|*`a?w6;L zm1;jx#P;6a*j#pZy*E3j-)-I|;#!W3=Yf&zvP?#T#cJsSGdu^zjaxGp{Dzqfod*^7 z)#0n!_XZ;Om$-c0%)we1;l_GNkd$S?hvCZ;ug}|2A7HgoC`3-DJ1VzME(O~9$jN~R zUA=fzB&tY6(6q78-ZoucN9W>FXK*HUBhaOMh(lOwi(sC02jr75?~4|&9X4f#d8};~ z+ksG3+r1fJbtn6Ez)`YI4?_izP zgzcIWrh@hkUX8lDT+Tc@aO`e=7STzi4gD&g%}@08f35rPCf5lf^4k2;rqh_$l&7*j z40m64yNDQ@xG(SPoPFzrLrQ3R%!_n!ug%Uo(Tb6BDP?-|J!XdH3buA&2OC(1>xJFJ&kM7=D))Ny zlJg24eJuuEr<#jDUa!`so+RHQxfy;g_N;(gVCgpj-B9Dh33G;ndF~E|j9`!bU*+^? zQ8L5P!jW0-T|)~-q@W!Z;YdZ#iWnH-Rev=;vI9C4zn*(t%CT(&q8&hJ) z$eprplj64+o|6iL(t*#K!%lTs4z-3rHxRF6JjqWQYF8$&sU2M}&VK5eKB|lMGv$Ax z#>do(zrOd++&*~~=ZwAWvs+dym67wxwqcxaCJ@5&;1NS3uO^kA=e4%-M?Soo(8623 zI^0)_72NWCnL&EV-}V0tp^v4^(EXS6-aLejG|D!0+Eg`Ub0`(P6pWIrRP`+NcZTgVYx z*nHXjHr7a>0svtC56&z*N1Ojd<%|BK-b4`oquxrq$lOI4nR)|0Caq1=3I)_>J3R!*2r4-f0N<$&T_l4rUpp2`@WymL&-4P;~5*x#w#;p{|0ioU-6ym0u{2Kkd zqFYKSMFlW>KQ+y>VsyGUtysG0^uDC1bxBle-QGS9pN5Ve_rd(8;$O@OBK#9SwT2-? zvuwo=dlAFgA&Gh~W!oV-#uzdfG5lX)_?HZ!%iWoU|>oCRxYXm+G*NL5qZJS9z2eCK^h zQf#^6>5!Y_V%iKo0ni)JkNmA!lIlV6#;I}%QuMKg@TZe#BwT(-J_~Ds_slS!I-n%sfYvQO&V@%rB2X50Wq&TM^@scG-bl4O zLq`d+$BRj7ZylQlZ=Wb?lBPk$Ndz(s7*s7fW=$jfvGi4ofDuJhp-c+U{0eTzfo{Ob>w`~F6)}=bb%;;f-305z*<4l0%i*-%~-G=fe7>@pRhmi;^mRy zX3FHMx&ggvLSO;w4USi=@D<{RpFqbDLX*z>F1aebjqDK2XwVlu3olj04_(Ew82p&S z6rWI@!~=MvV@d$6h$(q@2`o$5#BB&lvfQ8wj1ete5iyM;MO;3u>Khwy6E?uWgnLOZehCoXVdH8iM-a8-B+G0(`nN+lqY9l3hbMV1*fsfdx!PNK{z(^0n#ZiPF88pWMc*Fix(=0*T^SK0yWM zzroYkNnEc;$l?MXhT1Wi;u(`p>7!Qc>{1~pX(e&@J2E+U&c&zmCCc?0SZC$k)tj*N zd8TILzN;BL4M?f1kLE>oSJ#p6I#JT^yDm}JlFv`V?3<5?MmfnrW!tS7Do=}*tlNjX zZ;rqFsoY%>qMW+q98bRQXOE^Wx-`Fpe-^YhFRF^~^Rj^VLm%19@HDernct;I#)PfIPZQ)MlP zzHck=J^Bp_MG1W-kM%!VG&t(op<0m# zN7ahBRmZ^(GTOJZG9FaudAsx6`HGI&o&v`o7{AI(Qk>^Bn%dhHCi@ z%6Z})LvyxvIqYZ1_*y{-`Q{U+*-Nr{!^4czo16HI1R?_=6ElElX~npgc2RX4dtsSc z2UVntD=u59(plK{b(OC=u!Z2-oVfKK!E0*=fF(V5&8a?x9Tz}SMhR{?^{H6CY|r)7 z-_-y)Y^htUXFE5?ivJL(>VJY9PK_&vQA_KpJLx-*Ps4<`CH`H@^vt`q?%Jtb{RQA& zxwseD_phiKvNFPq-`#+C$1{w83%!QX5?X!}1rgpUH=K94OfNwOvSisMCabt@@?%|( zw(82yAHq_at3`}x5=Twq!>l&YepzGJA z61qqLq7T{1E!^(|4HB9wM9oNeTpfq-vbDQqLWw{0uUCaYXp>2xNg%CRqxpfKx#X(X zbra2SNlC{RjZv}{+2J#jGoSOjy^A}uu2vUO|MHP!cD@4Mo#z;_wX-*X3S3TVRhaR( z3m#Ezl~BF8{_8c-N@58>;_4}Of|xu)b>rcMf@{2TLjiXSU?NR&0IPPd)m~r6nL#rc z+3?ERear=|h zhXfHRscDo@Y#_`%ureUS?mhY|%AI7Zh+5P< zOmc?Vkbbs>bpeai;}Aap2~az2G|}Y0LdTwAPVE8wK@@87g`u9<7KGd?c6t!oZGJOo zl&os=?E*0Ctx)6@br7t^I2}&{*>l|j>tZULA|`O?Y4Yy@7HTNq3$I#KFWQ{G^#vz( zMaSgifXdKGRKB4;xk&J31V5rFu_&fZx&ls>noL`6Q|HjH!srx>gl?q+&Cp;fACzp8APsq7osLsQ!hd?{Q-3Zz z*6ZWz1<#$pZV@+Y8I}xhMGF8#3ifWvL-#i#qrxD{g2sRmybe{Ue~8sy)ANIBH1|qh zB|L)-%k3pryJ|s9OW?}|C9Kt{=|skYmUE0y_Gi{K0-7= zgJdaAE>QqfThH)WTQHO)?^#rd1mx6l*Wqan{OB_U&q&ozx~1(Ne+s(ko_tbqVKVPN z_=5B}Y}(4DFxVCzgwHx#92`au>eoC+dcd*lMnpiKrtnvJ9&G>2LN6qb$^)+1KpJ4PRJo+RE`o>E_1m!d?zK zc{bG^ZtmM^=h|FzM?DzTqb_6!CCGNf7WPqOPqpzsiKw$lZsA&S9qgv*$W?V=wo8Ce z;lId-$fl%d_XRt|Mh!vbqMU2@X5EWo35Z^=45JcJ=NHwWo~RgBaNZR=jGI@B)Tb7b zC@rGWjGK~1FB8OhvBOm>*XR`pPp$4YqZC$eL|Y(UJmL&YL3LRLQ%rK&dK;+qWtoA? z!ioxjX&rH$&p6|7a+BpK_`7-@k)>HAx#)-rTp2m4TKz`$EOr&*!=}7Gv3cR4j{mT_ zb@`aDo{|AQ4#xwwpfWZg!H;hGp~pB=gQQLi)np1fLx*_JHv^RiDLxL}wJgF>4_gK= zCv$rK#H}f#1E5Tqy}1oSlTm#ln(;MQKskr`{rhCz|AC#OU{{c^Tnt)MAoTmXPEXgs z_he+#Ffq!Hi6)tuI&^ofZ{s78o?@XMCiOAg9u)XZ8UY~iqHm5BdKv8S43RS zQpHc|F#GuWoN#3twQ)SRe!8WLw}tCs2W+RqoF9Fccv?PhU2ILEtukVrEE_}T;aM`N zGZXU&(BRvvaVb|jic=H2uyF=@ePksh_MsUXyN7wXe!uzp z>N5OmE7MSTo93?t~Kv z78x+N0}}1l<0>2;(M_FA$B>xwXMwSZh?GNNP{!L07##mMk0-5;EMXQlOV)A`jgCk4 z#|AWTjiYbL|AJD-SoVUiRi@V0Wm-AMw4vA>Xf6yI>Zl=~wy`0?tXpk30+yM@1EUGw{N>RVrT?s)Bh^b)tbROhr;BOR%*w=w|NT z3Sv~{j{a3HZe`{-{0FaiqaOJx#b@Hd-z-qZK-@Uo#Iw-y+T$ftL%}QxK}7f zDp!(erMdrYfJ}@d&qFZR&Cu!>C}gp<;ySi*C}%!5=v|LAZg|fWh!eZ-a=c)mcA?m_}WomF+|YZ z`OtYz9<#H2KQYqaT5h|icU8EPEu@eRdhher1&!$n>!yC%c?9`W`6B+guGgK0j$=y2 z4$NQRHulDl6bym>_aq^4S4xDld=qc5Q6FRTZ4DglQ}Cgy-f8-2%u`%&z?#?^{G$!Q zO1{neVGz&b!xH#&xFY-C+mS~sax%f+k1_t&s`~3x1+ddMGO{wZHg<9_H*}=^KR!r9 zTSs#n{W2x?GYx0gTvJLOWs29w{V?5fAfI{jp_?S>(idsgfgla3g(sD3%1H_3?v-fV}#h4y_J>2T?XW-8C)sw0F?w>)a9Q2ma{A!E{1EL5Hh8^is+qKf98Y! zd5srC;I`hCL^7mA?I^i&Y1m4hxN2UtaiuF=!+t_ds_7?Xaclb+*GFU^6KEdTx>3N* z^7d}xw3k~A$ZBwidXmYrrOrZTTu=t?8($0z`g1|R70*;M-jbc%m_fMS1qUqiiiuyB zWQZ5{kNAj1x7W%rAsj(@Nw-(_9wS;)7{=nG>GO8nV5U;;)&u-N(-4jvu#luZGd+d9;`{ z!U)HB|8QUz30vB;=0br!!6Nfz+p%&WA~y}bKx^E4ij4 z6hUbusZA(>8_95UfqSKwj9PbMUTO0}?EuB9|(@dXf1z)d!Z_ULswBGF;AF7KCrsk0dEvlV88IvU_uy+#^ubeyZ z-8kolM=L5Q*@nj;&|v9nhg4YJ0dzpChdMt#Byn|kUhRJ7J8?azJvRDE#%N_@adj~Q z45g|&P++wozeC_=H1<-4SqF>6Q~GXtzV9_>Kq~y*iu+TE!SiuUcA^RYq>g90Xj$iX zc6S-vYz@8Yy=3_Z1jwkH!39S$Vh8J31E{F)=LisHs>K9+|Lc-Ib5>QXw`_9iJ zXI=UHr2VZR4f4z+#ED%`+$JwmyLw8TBe>{`pzN@MkUmmN)uCm3{tV;|2x!{5UKDKk zE6Qmdoa`B}e9k@wj(bUqQU6MssJ%Q*XO`YD-26by~FlR@A!zyJ$#QN}< z*T@np)^F$YX-0RiB2l?m=-I)4$>>O>q7j`EAFMvb#VF>N>t`{={ozF94a)ZCV6kkk zZ0+J%;|T52Q|0HT(HxFN08&Y3r5T9BGltqa;KXK@ZVjrPO&wce=fG*|UU9kIdAwu# zS#rs{y63a~Y_q*gz>kp;{{Xjn*X?OBSTDl2_Ff~kViR(w9z^Oxf}U)5ymqm?yB$#pDv-r^z^R zB>EoDIl3y}44j^vqGRQFl%(&dV5b@JY6G{kOXLZMN;Mlx5r@Ey{gLZOeC~OTm+VgG zMEAd^QS-{FjxZ5A8Blcx3ZIc&`d?gv>jBj8{g3$|}`l;-+e<_!f3M+&A8@H}!9a8e)Kdn(d61+o=&0yPYW z^-wS46nhbXfNv*~Lju!SlV8{^Qp-{35lU^FihH(nHrJXJ3r~ZdcUiJ*yn-C>o41RL zkG-=GRIkdBce-7b_?>K!oSkzWaWbInC<RT%xQVKuhqDKB$GzUc!R7nP#LLR#-Q!lrLd3<|8?ttH zLBmAEO2ft45lHs-?AcJxsDjdux-#x@INztYXcWX|?#& zU3aH6bmL^V-#Fp!sy^)9@T}f05(A1b_{mFKP6m}x8eZ|4vbpC>p<;n0U+lPGb@~s# zrq2}osoaNsT=3C-_r4?|5!Y*|sz1}dyxoXWn$z^LnO;5VDI_onf1Uj{|389=)HK|i z0r_v$ickHVzXANIbXFVg#+LyN%oHHQh#jdIFd@&!m4UhDI<&G&_YZmuwF2ES0%NKP z;9p6E7CD~7xMmWvXQ|)e$dJ4#0;!&8)>FITM>#F92O-mguX&AM@MFW~23IpJjtTRV z!aK_ZijvuX6cp^P(muYo;bYK)E2Wx*F(#9V8h1+x{~2hJYFdWpy~+jU26C!ui($)~P?H%#yRoo6vh10# zN3{oof{-|*x2R-mIu=jLlb+4bx8|*b=+<%KCqdqnX`C}OOfJpa{+)VC9}&a6TtRSd zXoS)WtvPZ28%*^i9)du8T=1%RP%Py>l3#G4MiRD(<`fLQP*g0BOyifu>oNzfrG+!| zRX1^U{tlxMZtc&ofHT9t>7K_YXa5ht@(?eXqVMt1V>t^?V0}H+L(${ixy!cIJeliU zqf|s1@?Qf`WTwczj1UM*!*t*QFNzsVyD#8t<#n>Po8NnZ_J^hmsV@jV>V|OVcq_5lzq}?M4mRQ9Gk!4}l?V%_Q=M{Z z+C~j%faWPBMZAt+43^@IoL2R2Y&o7LMx?wU$k@=HVIkzIb0;rQ);X*bJzj6|@7kpn zyj{t&bBkZU6+0<`F-a9X&`P5kl;HLWAX|qF|wO=W!Ctm^b5D%LTJGsPtoFZ)fuhY70U9ew6gzHR91wS zW8i1>ZoqaaWIc0>v4d?<6IPTszgjD*I^OzkG{&iSRy3yd*7czL&PjLG`pQ;Zfr=Z9 z;AzFpB-T}!RYG#L=xS$Um}$?zrc#=^caHs^mX?HPlNDXucU;_YR@ICwA5QAcjxki8 z5-0VRHU1jjZK7XAZqs~~Vt@8x{0B5>o%XHj`Wg1W2lC7#Rnt8%001@=008a(-$1rD z*0<5sH*mDIa&|J-HFj{Y{m(%?NX7b>JrDOiU4s@!E8>jhp}Z6xE=DxcREp~G%bq`T zV2{dVz9y8_Fpb_VNv6sa6knp`FY+MIkMMcViT^2FWZ*y0Q40Hy{`9@d=7v?MROpvp z0f>qrAq?+NKB^}3kB7K%wwH~_K=Cm0Hn2bWtO@#nKlZQJGn_#gxCUY)tz8oHm7kg9 zDwkzdMBmVmOGi{FF(!Ch`)F2P`zu=;j;!f{BaxN%wYy9bkoXUI+F$_IYt01ZDVRGmkkYbi&y{9!TBGjNG6SNjpm9I2A^niuL*ZFUH=1J+C+g_H}x9|A+UzpNTQXBx`k$6J_bbo}|x@ z{VlQJ<(mG49qulzl$gY2KSv<0NCO`hFg&Q5-Mzk5>|2B zD6C#Mi19aC-hRh96T9Qwi9=kbR*p54YLlfP$6&-4kjjFB~&-;2{0f^y3w1 z2X`2_JgmQWpQ=1)j_U5Uc}nCk+~P&#KE8YwqOjRnj20ZPbRuyrzH~GQV=T#`gegYW zfy%9tiTVlq`1Kc&OR{-Vr#}d2kzSphIJ`$hP!w$$4ROdnB}p1K^^3e1GWH|zn0#UE zd}Lx9UawZN6MDasVI@U2P7KdLRuf9*r(_?xqCVkns(4{97McPP8C@mp9H0I0ueM2g zmP-va$#Blagr);j&(Y#JwvFP!7ufvd7r6eAa8-rG8ufLPYN}vXU`rFQNtdbEMA5OH35XV^Llf=FJkqr8KT{Mk%0 z&)DEx^6GTd-RcYU$c3&6gKf=vnj6NUWyNe+Cwq2dzr&YBSV!a-c~-RnsT=Szb=Qw{ zc*_ShNl%4CNI4y{_bQxM8F<*1Z+E3P@|-*MEuEp*4+f46Y>(dyXv71z5H`XPoivrN zgPnI^UCYm&1<-+(lu4MTvxA4rHaH>8kL*FtiDqT^ef%}nT6M54Z&WBRJi2JndehpD z?!Yzeaf~@N$nHHOPC^PkAC^&?v)!^0w5E4>w~XniJY#9p&p zz1P>QLU|-mG?lL-#|n7NGv_q}Enl-BAt4ahMxI&!eWl0j`*gJRv(hvCS?O{7|Hr4D zv4f$pjnhvI`~OYSB$aPlELN27te*X_54u^Jl)`zAs4y|2B5V!Gd;wKuY1h;Na25D> zi+2gN@$c?us{wmIB}@sB=AD$8_oq79T@6%A>SJi=1JIC-N3}C2KwC3WD0c9PH3pkZ zCK)^0qxoCRM2BH!HIy4vs-YS?q`n<3>^!2*(3w18peegWtM6#EX4diGjI5 zy4bKx;2v3beSR^qrTS6O1oDuiTIdrJ41SDT`cyf^3Ayw<_S(AnhlWk)^aj>|i!ZxHUg2Juqpry|`rOwQ1v41ub}*G+*@@DK#?vafOto#t zZYHRE+UEN7vWOT-Yp4r2Rg-~r8d8#)lFMkg!=1FgXFtc%yAWYSq+8FUzzQI&y)Yub zJKcGM;k#UKJO=J^dHpk7?YtSim@(jOf?GXcyY@t1Ea|*Z0Y{YbuzhsR`eS~97RAkw znG#N00x%o_ruml;Ad?RhpAcs_rdHT$b=0G4(FWxiU{u=ljo6%`u>0hsKLXK*-REb2w#B}LaV zpLNvH_8g|JEDoxZE8b8qd&#i(33nb!kyu$MMW@Nq2@TrNUk6%VDT+L4!dhe=j`Tx5 zlH&=@Akjo%60SYbSAKb3d{${PV?{BUs*SanwW42cUr#=bSxiJUbHe%Cuw-11G=ZDw z3AEQL2TNdQz7hE?Z){uRKW6FR9O$hHmjJt>Eqw8rVxRVF7puS7)w#!Ja4OOyMEoh- zd5e~~5b7o0q)zqj&B1=@c5l_Je_zI>jT}vi6wB|r!{gD5a5!b7O?V6h^9v$qhcxk+ zruH&)ACnf+Zb^E4Vg<)qy=2@ww=%fNFF6@L3E;^S35yS_>nw#keb6w+QBCjhf+cq$ zH?i7>Ub2uSneI7mUPKC(*k3ituDPB0C6)QLLNziSf;pp>wgDIEJmyMcHi+Did}e<` z$fa&ZJk*0D0F04Ib6BlJS?QUl9IhQUtv_O{EAUz7~0f;IM^G8Xjz2@q@Sk!D;k@OJ5kCgdpu`CmvZzWvt*eq zVKsWRzE*)Fr^y>IWc(fqSX(?VL4eO}yPj6dAGhjVk`337g1R}>ma6Qq?9g%XGngoi(gYWo;ZY1hU7xSm8;&j8CXsT>)51id zB~0f|sC`GTVxbCkEutfqKOHdrUtD2Bj+InvnDtWP5s}*j zf9~@mxg5cC%655Ff(^->Us$E_;Ph5@ zhulv6&L=We^;A1ot<*RSVo1M(q~n*Td1chmxW@wMlqukK3h?QcRi^KR|AOdWUO&pZ z?feljPk%C0VO(450<$o%X6tgAD6f$UgEVV87#NKnT<~s;w&(}fcZPR4hibZQZ(34J zW?b9SRWiu?yAeOgMjUqVs`8LKKV>$Nx~2V|*+O^&woxhN9>3)(Ubp5mUuhZlZECS1 zy7%NbbpT$a$la_JV!NU|1|V~~7Q#EIwa?rL9DMb)y#|e*;W>39pWC6=wRCkQo%05J zeX-}ed0oaau3nBgV;Qd*`A4s6w3TMA=w#Ge%~SiS6aGX1>+4B?8V>LenirWq6@9c9>*fS#%9@mW5M{_hd z<`w9?%?J2@*4#~2mbJJ)kH+jz&HXElEf0HaNycw%X_VWh|kHB}>N{i#24`r|qZ zb;b(liUge~Rfkp+DDuaWw54{jklmn7v`q|e#{zw=tKfG0_gbX1z7-0T30`z|@5JH@ zne(2g6w(|D48M@JH55fzvaqOT3DE%+6nns}U#W*WNf;)}3F@{5_*XbcTOnE!QR+T) z^poaOq4XCDtv!IHlUr#KEO`vF{7pg28Hhc0c!LMJEooCsm@#1DGxd^a=2kb?)6EpK zx1L3ZQ!wB{jz@xof9aQX1w6nFzcYZ{IPmbJ5@H3f;#e?Z^=0)ha_=q^3fZMV#&dnV zz0i4fCM>a-?3irxnA`$gvj}nT&WL_*NLucrxeMtVO;SBePO0oOSg%KhGArK(9HJ)q z6^l??N^?Ik#m12{4#nm<)1z6M8!pC3+D+2eg(pu1`)oV5@O1W$j$2o|)NnVGyD1!-F6jFN(5 zHLSjkS){r5dj>Dm-ili)-<`)E*1a{=o`M2*3Qme1bN&K@MBQhl=*fiAou%)h1I!se z;4Lvu4AZ@gm6~a6_0I&IoE4O5yrk=7G>#WjYs5D zobk-Vymhhbt);U?+~gaz8Yo^0IRN3QHGdI2zVh#!G`PmD6%%eH7oQiF=pPpyhTRq6 zM%)$Q6YMhHDb`k?}*@$i0WJ7v6(^1N|px!)+)W zNq<1Q`U6^q|Ih0Dzd&ncYx*CsT^IXb^Nr?n4sXuKXyiDv^yX6q1yl#+rACvw6%*nE zXV(>Ct;BDih6B(IK!=c6y{?D*ORky8H;`%LHhBU~tW;`tul32o@Dw}q9F+XgUb|)} zPtq=AFSG}N2as}tJt&y;7zwn4m<`BX+n;R%XeUIDpDFB{O~uW37$Qr(Kk5eGZ&;;t zLx{ZJsO;cA*bno*WWSOt7|<(2*(-z86|m4j^m|p&1!!OdErXwktLK|>=>&bROwFa( z*|@c9R(=!K<|(b2>tLTROx(Q=8F}2H047X0$2nH<-{mC9xh)QXsfD>2H1j30|1FAz zZclsX9$>m6IlM6>4R=E*)jR{QONUo(EZyn5=eUt1x_dQuht>c%l0oJ#)@4*aAmmn< z?go}DN1edGl9+#K->Vf83_GH#4>7={eGH+_={PhYt;d%k>iEk$)FB09depm?)}~Oj z0X2|UF&-z6_m-}L*iGMWqW!>jo};02#qph3GCuP`6B&z=Lj_6?YkM&d&WeLIRbQkG zD6#nSs_9A<=K|}3t}48B8jT`*Eq#|j%U(t3%`}LLSe~XKlu|imeE88rg>Fv2VDud6 z6$+2Jd^}Jc`x~M7%{AkSu*#!LCzH|Wk`OF&`HsfeMZdtkDU3Hav3%!dA?E=rG`I~E z5bh^sCs;wNnhw-2O?1~gQ%5J4_ciD@OSC#1n4Mv5rK`&g-cSBJ1=46YJ5J#nAq*C#P!fH$bVZ2zto^gXd?@eL9Ok-7&?Jp5PgSum8 z)@j!LLlh#0Q5d^SaR{0!L`|nGHRx#0O}`@hNN!Q%#_7eu)E>}8 z-y4BeCHZ2bM+9Q#?_dKq#$Sw_vCEQvwSTXFEj~4KHK(#ErF$vy&a6FTj)*9V>0sdG z)+STPex&zCXa+NevC>UXPwVx_rh~0rghrPvEFx?NHd%Gr()RBF_1;tf_H1%94cH+P0mMwrMD+&$@aAJwA zwo{bJy_|lQSh6nJJ06ilIB&th<`oDPlCzNC3%IV;&NW`WagPGCnvC^}4cu=bdjvxH zk|71r$sb!L%Eds95*3}>=t6_m|I|UAogN8wOT34E)LE88XKj2Lf}bUCAqoHh;s1nP zJ7awZ$A70~tI7XU)cQQvu(8oPRFH+W=vRS*+toWUE)(+~RFlo~6&sKLCXvJvA5w|< zdd7`OBwmwg2TIddJG?vC8acRgy9dlPbtfO%*BLm>9&lzKGjf8rjUqxl17b3r$aS1$ z=Tje496FIJbfn=Y^h#`iz_%F;>1kCWkj%r!2`~lFW?j7c^v4>n^d+YJJ;j6)UI3N% z3k?#yJKC+9zYxKAf(wxBE|sMJ*s;c$hmAv{ zN(GRxo}UC=Gz_%7pyl8pVj?3#kVr~#y981dis-up0V#!p((h=L)PT__trpb?X;r`h zXCZB`BVM2&$4gGVQdP=fkpW#olAh9OLLHS;%Msc4+JM|il$Eagtbgs)#Ot6S8vbDH zM)ijDR{!ty)~N$ScdK88tDTSY>xCObcl+QrHiouOAn$NW&s*=DvRQDAqxlP=xc=to z$*?JFW(&k%8gZcmUYMFtYqgVnEog}FQ!lZ#=k$>RSwkAs*_6B_L9zg&bbOXDXqK9Z z-dyV@(UgLeV{7`TztbT*Pd4uu=RfQ5m2f>V}*dBF9QjE1*_`+d%pVsx=3}iy2~z)l+Bgh4oUK zb0{|Df`^nWKeWhzyyv(Ar*B@lh(@=<;u^c?m}~vq=S&vP2Q3d^|I-U0#~*UsHAWOo zH+yNDtw9t_jNkkBeNlQK9E=;DjtoT-E$bEQYA6`Cd;=mg_M8zR4z~Tunwo@ZDSXNc67$NjylLw& zAiSs!#?CAo^E)?Nzq+-Ry5R(trj217N*OnQ-AQEs@65jlSD1d5ys`W>ctW%oe#~+4 z>lQvSB-r60$y*WVvo}pr$=^dpZPxWaK9qQ=Wb6d&jNv`fs{93r7F)1Ctxs8~J2nO0i9W^?fyql` zZ8S!2DCNk+Zk>DU#aoUk&fFQ&TlBHY6l^W#8hwj}%irlc^$$SWU zeYrQ_kM8YoJs3c`T}?5jJ`gzuV+1JNvAY*3vJzCuFop}v<25P^;0Y6YeCx$Wi&}@N z=VZ1Dp#6vbmRbf1c1MaX_Nz@p@$pzOs*hT;%1dH7YW$Oa!o^q$DsB)9Wq##nD4lF$ zsV*EIsVXJgMLQWXtb7xILotAm?D5ddW`W>sOoNoN=GiO2(h}{4=swL8o~< z8burX6I4U{B+bgWKByePmW8ZTeQyAao|AMF2U$eW-SqlUd2&hSjXQ6!(Ldobs5$Jd z9m`s{YJ0*A1F2)i!7ZR@4r=#+BlmD)L&DGe8SHdrUu?B8-(BPa5Knw=E4;8sdZ>U z$|MPGoW(Ah1jrqjLb_Jr{bd`1C^+Q2~as;x~Agk zfegOq7@-=dBsT>iDQ>rVFF-pIpAm3v z9zW6CLT>Ct=d)+`xvd^8^bRN(WngZF0uz}#tGBkQb#dGG)Nj)aN$zAkx{U+w6oj}U zR|Ff*w-?gKPiBSc{%VD12gOBfp2!^meR(QbPX~4wpZ-z;Z}ZSBo36Q9k+;}Jy(LD$ zHi$E5-$59#y379C1|9gNX|~zx6sKObWNpQBl`$Xu3$e{cs;P!z(GhM$@MG1pvZW()Jm;&yVU;J=L{^(4SGtL| zsL?N=SdfKS$Jt#d3W~OzlItfc@37Jn+oGyuwT$ zxzr3=Snrxl?G1d1lpDeF&P&kTqhd_6?Cq}YVpbD#t$0BScOOE*#OGM6`}5k6`sTFF z3mntI$@UWZ=WSrUeG9;Os`mz8{Z0Xcnu-gWjSY|d*)Y5;`;PvO%djR<%eGsEY-l#X zK~@6xai%)#rzlrF(lm?$v3p&%lZph;L)4g#eV3Gt|q`^HSA@KPzvi3T3^kEPP zwLysg5NQb;{w)s?R7~^K+gvK!k5+0t?D7k>Hfg`rl<2HOu|GElXqJJQvzX069qOl> zK`B0{l;CV^lm2;O*kpbXlzZ;9b4OjA1WqX z8#30GhPxqN0B0<`{y4!(J zxZy$S{@!3K#%aP-uqthiRiT1T_WLU6g<0{FhBVkP5bKA-@X?@qto>8TE+NPzksYlZ z3K^eLEvA-?g6W>k)^KzaRQy)@*@WcEH4@IXA8-mdhR>OODJaME%|^KCU}CVtP$RWI->G#9}p(FJ85z$73C+I z>iS6UV)^RsSuM(!4h0A#krc$OQyZH<+i<`3ImS^*n$hr4GjAX64a2QHaK!TVfb8Ex z>u?yye7aO6pQ*uICYvt`^a+z>O=zTGKEJ0H2%vhKF~wlX*t=iPpD7zrrs>WeR zr9Hrjt{5q%89Rug$-z&kwjY$ReGL=yANGGs8nE0S4J zoBNdoBGzK)qn_tnO4=2s@>CI3BG=C3No<*on9uJ`0OKt&zyg>x%AqLRtLE!vJx9q` zM+h;*&=qgkkd>#Wu%8E#$HrxhlN`vS8aQRW+J(u0$0eUd{W*llWZWj?yRW6_hTJU8 z4}*n8j@v#Nvp;j(RHsaf;AL<93W7AQT|^?Tv;OUpN1hJD+G{T)a5S%AIA1+r*{3M5 zudr4R0|`bK?6m3vlenX>NTJhUm!F>$Ka`j3g0_}Ys}=7@T<~hvf>xSb4QszvS;c5S zY<{Tz?H-+N%0*B7%a9rhszPaP9&n2p+03b6XvSzbNI}7wIiu(_7Hj4}Wq}XWEZ$g2 zEx5*xnD9dxq@In>pjhuzrvF=30Q-7BoLQaF+;tUYf-V|ZgPqJ@AM|=a$L1hea5Un_ zMfclwu*dxY?9EYzdABvJSWvqe!>Y-4puWScg(N|qPgj+4Wgb*F%ymY3Ij zi26(yw~@8xb~j=mz}r*9+B_W2EXaFZWU$A8CH>*;DB@dqjv?t{jFz!QfR!NP$%)^gb z?6;*dp)ijXD312esjAaj_+`QQ9(x1RT}29=3)bz7PBc!cAMvZVCgdsWgnR}uuef{k(8cL%HgXvhW#n$i+-HoFbr zDdlZ!0?>J5e~sCf-@z$P!jIz&F@nCabmbsr_y}0A2$#{?Y-EshSF^4HY(OOUWj1Cld`l?zbhPZ?{`@k9 zo~~!!iWmEn_B9~j$>7?Q0m?xzKtGJ|6KxcZ{%Y?_%md6`H?{UkB&FTf7*R( z{eO;QMg4Dkvt9RUK)DfaFf1lcuk`$s+6hWiN4^;F&kidsRmg$}Rg?N(J1nZ#qEuv} z-|n>iVA@N5q+T7Ly45>d_%|-`~@$K zMD$lra_PBVVHJ&(KgOco#FMv*>3A!?zc6Wj-QC(l<`m=eNS#cqeFEX}^j-~o z*feKyPuf4C6+g_}o>=*y0}TC$9oi|2`hHee;)K#a_U6PVfWm%21>z|6af8SRQ`9js zqy01q&nx02P{m^g)PBiXiD{@G-594Re^7u@IFOSzmhs;oTM>B;a4WI9O+0#T={z7; z;xNb z6hx$uKdQt+Cxvs8YI}NtUye4=hwA-> zb|#OZKLE0vKkxUgY~mUh$u42WDoOMp)lJmpFj7M?=c+d+pBUpNZ^iu<$_7{*mZJEb z18S!)^C&=Nl?hW(Mep|yb#61b`z_Mw}HR`Tz})5oQ*7!S`-Ao0khCqDgN zJ5e1hwV<m7)&qf{wV5XvdX^l8|dqZS_}KU9=5AhzRiDB+RP<0)2FY7)u4|v}cyn_aPtk zt|n`r6IF|m-?nrQKBJNrat4=UqzI*KZB2vUm373jG?v~UGDr!jigjn;iEB%I-b>YKCb{D+jr7xx7;J?$g_313@T~3i1|LE$ z%GgHWu_hbQ!V;@c25@z~@uX>{nxW4#;nEoQB=yFRwtVLF=gz+Vy}h~FYY%UpKCJ)e zw9K44^4u zqE9&c^S*ry?T>TdsI;aF>wt z;Q68U#RX=I1bknGeQQu5O$h97%taVpC1}{}5wvWIP*;^+gW_+2^TA9dl?1ZS^;5l9 zl#zQ&QQFsM`4mJeW;l2wZUpsXeH^Ed_@Hd;>t~dIfwKOpoKTf`&1+u{Jzbv8EckE< zvcl;*kM<^2unT8f2e}KZs>Tq*Mr^U^SEsKe<*Bh?EWbb@wGE>l23&=jw+kuosRaii%>n=w7y3x7N`Pujz>0aLwd+d^_vb?k@nKuf0ZgXawInYm|nH zCWbeBs!4pfIU6wg{BN2D>5@ulkDphd7#09P=znTR9o?<}Z9digPet+nQ4*(VizSNU zbE1l~7vf*9V<=gLLP4cJVlHoJ1v*a_zF@32-A^_#>0-i?%JFr>?WnE?Yjc1KVREe1 z?f7mIv#Wq+IdlRId;r>iyVjV0Qcl0VB9pfD-t_xsMLPvH%c z6!9ZW!jXKJ>Y8r?ifKT;;geEbZ2~FT3RVD%j5!{%`u71l&~}L$#x9UT866T7? zow>QiSoy5{WCu_p4N2}AexKfAV8oXhUHItmNZ-qI2E5_R>Ywj!UL5#p5xWOG8rxu7 zu!Fv-SQXnm-VcyJFu}G2d=jW_eeVM5A0Y7?O#73^R)K23vEuMz=&ARSB`7JWDOS;R zDLZm)_2{L^S{nuy{;Y(Q?79vEksE8!KY}<3n!ZZt3aRcQhFSCr2paY5jYDBOMK$AO zR;;~Fy$1DYH(LY4pM5z_YB03zd9S`)0KI!6MgDSQPn7m6v4V~wu4$22SBu)-=5Zie zoepgmU%e0s63Y@p2^J9wm9Rb;FeJrMf+=?sR&!81zOtm%%{>2Kfu{OY`idnUl(6qA z1nmyxf;llq5obJQn(8q@Ubfb))t>reO`Vc0=TfDHHK#u`+$ylxse1YXpvcX1eWX9j zaqVZ@8WM3%8#ZKnsRA>!Et*YCip2muJDHS;!@sor@z2yf-&sm4 zkRgVxnsg6*fAII-39UD#Zf?W7Il&P(_4C|mh7314KE<-4XPv7_FT%1&(LDMP0TmsY zZljKf&qrBoi7%>nuFYpBCtEGiaS1Le+MZtPWyvfFg!5t1Hh|wdlfsL)qwTwc6VtT^ zjP@zl|0R+WP^179BsY6wl=b{O2a+ImNIh`{pbSRMDt8n5x3&i{iz*T213yrXRoYOz zFoi;#TAnOTp+iwW4UlM+aHe+jwESUprU+UN<|X{YRaP z1V2n>>QUp8e}Zin|3&J6h_$0oCkW%DT3SZ6tR&^AVRYHtG!ZktJzZttxM|*BL36u^ zJ{`3@Rsj&B?b;f$Uf6k|k`GGO5q1V*pZRDc^XfoTdd=6_*}LBt*Dyb0!l!P-K$H=n zlfus^F{YGBw&FxB42cT$-|W||{yD>Te(Q3{%t2;NoKA%C*ruQ7+3G}^swXEWwY+aj zli#DUD^fv_r+vbg8J&->O#TaS-ghaqSLu~k-rC4lgEiu}rf`MvetebGxi$A-( zspyUT>HwlW{|q8?>rKTCHl-8eC<&L{rl=WLrc>=`?rCMx_@K$IKccm=4sTX_ zSe>dCVqmsHsB;HI>*ZJw`NZD}|)ZWT+^#yM}&NBE^q!vhIcdx$7{vuxp)@5xW}xZC3O7yd?9ke|rpI9`7>DQBt~qCcH4# zD77|dBu`kzrZM~7eGow;*_@##lCE~R

}1T%6qtIwSd$Kn2Qb33W4pO|#G3xj>0I zp^{6s7TA@r4Ouncv|kDG9Vu zT0HfSUXA!4z1ph{sN(65UQKo}86uoa@<*>0A<&emTsOdY1C`q$2KM=YDP`jA=FO8a z2{ib^)%N=Nb^z5&6H zg)h!2;95&&Nfp}i?$D+a_g=nnu z1c&;B)W@1Tu0>g3O@oqYY7Fq>3~71*YMpLk5clM6$w@e3DEg}tBms^)hKT@KL%rxH zT>{2x78kKyy&0U(YlulAjHXGMmrHpC?FF%fb`vpYCVpdO#7FT#Oz z7wnG@h58zAhWU+v{oYdlRN{XA=wUB$8*M|iTQrbi&NegtLTIS?_uc#9sGlKj_`tCQ zB8dcJ$sGi`T-+o#R88La8kP<(d9qa~+5?OVhTd<{!^rwn*11sAljO!ldQtF|o6H+y zMW6+`-C#{niq5)wr6P%nD`furg2^{$S7on=`xN6n`+*wupzq3>`VSOufa-otVom3U zP%K6gKF+8ses&*?9ze)#uy!X{1yKJVunJ26`4CY#*^W|`wT)QM8W^zd@09^^AWy+T zI;?J2jXz7h(`rYEi8aFVHLp4k<;0d9ZviGQi|IE$2b$H6-V;RD*L%q(6X!OQ`)I;s zk(1Ih6KymcAeL=1IEvKTp__4yxeRo9`+>L>+(}ttP*qW^T6%q&Kw-{z$u4*7lP4NV z(UHXRf4D4+ zLshmNsoGZ&@t7Tp)y*}H3#)Jod4)s|Uv5zRoFVtrn!iJWMm`H*JBc%3-GR;%_;m?R zi3WTEaWAOJYvU72YJE_8rtuDJ!nWS8?=akX9&v7w&A8IRNa!3F)+<3=)0{S zZs~eqOLNN+Fso(VMeL)9T?G2^#gnpGj~`fxl3LMOhOo&+>nHaITufUV(6|i0f9$mo zGv#zthJAI=NB$$L#C0ut=khqQvmyp<9?Iz|IiN$h=Vs|^2pe%_zckjQP|SMrj%y2& zc7;=tF}WoQ4hMTf&+N@QOhyHlC#Pofx^h2@MbotliKWHj%Hsh$D8>z%TSSb$1?9loHY^hMLD?>4x*{U@yUJ6=VKC$~xn60lFd?zNJ zsgX64i`%(S=A+}M#f*U~5mn7=t|6lx8-KgIxPZL_i!z(zU{Pz7E3?JyIh$QnJv(E- z0dcp2-zt22pu%;(QWIl8bWqe-m=~~aDw@MfmU`Nq+QYtxmS@xBDGg%FiyC8O!}++M zY1%3qA}7qn6GO5HbowOhi1DE;qt>rFnY76>tDRd@EhFlRQCwe+YT7<&&GMtvMV67> zThL(Gi_f0P>SbUpEByVL=mA1cD~`~-@LQ-l6hncCB30ov_Ur?E!4^3~;0X+s){HzP zr^3(7Od2BF)%OBCe%O64f=@GEbAew)Ug24Gw#=)T;o5F`a0F)>9c+b&REFVHRs(PS z+8U&@QU&053VA7v*hoUa&=>FFw!QEu+%cp_Q41!o6cxuFg?G_|ey72yn1$d()W_o_ zNS%r+!`J!4%lFQo;E5r_hM3N1V1hyxzZugcv%?#{8g+)YE#JtotKGsNZxg|vgyO8D z0xb;yn0eYF@X@cbW`o&J^l1j5X{@-O@|C5&;3NW$&$4YX!jIPYtj>Q*7 z8@nizIANn%(bj`%BA}2z`+ai@a#cRN3Ua2STB|B_JOAecZ;jlsPiE68zkYlUqcvu@NgOXL%!nWnEBSQ_(J>V%C0S?ab-t+%?4E~>#C-6u>9q6BwCxxGm z=l`T|`3cwg(T7?6N61sWtZfD#!qCkt3gP?D=1is{dmi~Q2aI_LEmnkG{lK`lf~LdB z71Ey98~jqJPOf742;*xqKVzDXBU!|jmtNl%tJDsD-OUV#UVwwFkRX?o zzX`mIKeb#1sp*$l(az%r&Tkyx>llcJ$59)jKT{R^P4KhNejTPK-rmev&DPj@_$+kI zndJd<(zf|)1TyG1Y!COqC*?Aiq;(;I0`O?g`FAd5>80DbRm4zB)_4_TWPgf z)Q}R7+Khh`wt)NWQL!emJsJ%vC79 z1Ky2)M7=QOcb#{Xk_A3*`YT?NBh0-RT!nww5X*sn1b~69nHSGXo-@;(Ir90rSEEZ| z>UV`dJN8{4R+EBd0Dn(@|KIMAfHc76Ff49pn#t#3u#_Sx7 z4bA__5&IdY2B~b=9IzmGKdZuIhBN^Rx1kK>>k#=~5S7kpkA!w%Q-$~E7u87%AvF=9 zj(omN$0pXCsUT2AWKQg*WlZbkmPaK|Bn6#8>3614WL57n)+E+z`3uVrRy8bRRu!pX zP3F=tF6xPQ>V_ra>67}~mPRQNYmKDL>ifB@TDbDY1x1zTAz+gw1+D)|iv>et5bbS2 zp@gmvR64y zb_?$<4bp~=G9peGwqUQh0{j`Iay)bc%n?hNJ(JR9Hts!t8L@?p4nNqvjU23yb3Nx| zzZIO_`T)hq(z)Sm!Ilh+Aa-%{{rQXgg%f>Ez?UDsxvN=($?KOWMQAd)266|`E8xyo z+^XO6Qt~Ef0=$Ic zvGK3YYF(}nB7NrsqS?D;uIDYhD<=`eLHkYV_^R5FzWKI%nA{(SQ_^=#e4&(^V8fD! z?t0h_q^RdAS;tcrdQ)9(e(W0xDyTveicu0!0s4Ulahc<1X&f1<=VTua zX8PUw0k_Bs!;E=~Ml4B*WYzij%11Z!u&A5_l+~Y1D`y3KTY@dNLQvr?c8D%A_A!72 zfE4sxGri#3$H(~>u6^i;aT}BDm^3>v*9p|P81-pGOVwYPNoS2`wM7c)T^^vSG>c^ZEO|ghPhz!xz(5 z1ZSI$6135a8EUP%AVuy60?LxnKX6UFepKrg=>&T|o7F8Y*KTAlj}3`|&z-@2b6uPPfUj(9WutFtgCKz}P#mAWfy6fq&D z{<>hK6-UFfFYz3F#W4p5R%3l9kI_v-RB<^H`+D}}*wsYtTbf*&dncb@@lT~?&GzZG znR@zVGfc-VMIv3B#XF))`qS2vKmJ5bTH#4bCSWg1oTxwJqM?f}G5sQSjO%`>&#M!~ zLFq0uHznWHQf|D~pS!B?7IlW1sph8RIJa_jJpcxm9w?3aHi4jlXe-<={#o!goJ!&;88O-o;~_ zPpUa5FpVk!1iZ}Mw-0-L5wv_z<ko6y++Bw)7>RahL=-XKSi>uZ#>umqh{?EaKl26rh zE0fAQ6Wi!Ioocv%HddTF5s+Z^t`o^vo2tKE*#1aOdY@>*A?i+!?ig*|fmh1c&D*xs zQf(3NTpWuQV7-o$6p}4#$--M7iXVcP6{uiMI^#rJ^ zewp*%qjpuTJN&8yo?IDTw{Wws+p;m->m8P;p>?1&qv46Eu_bNTaQ#oP(zyN$ti(TH zMf(Bk^1r}Z^>45yZ9)ATtZkK7i2s1qQPcMniP%Y$5o4zPcEpS&TiEEpp6mPJ>jDQ) zbm7d50e@$D*9U6lIU(cD%ZoR!g%d-%dqDSKOd3Q?DGuAKjOlrJk*8CG#sj{iUz7&7}W%#u9X#g zln^my$UK^!jDdt`qU^_I;}J9wg938RIfL;#K@2nPUc-CJxI~50AvE=P$?&$0o;|^B z`d)LB$kYw7zojz)deymk4xPEkUsSI5Qt;9HqV=P}Bmjjb;K<;_$-PghNC&w3I&^VJ_$z5 zRZTjs7abRjooQ6G-!E;u^u0hxi0fy`By`D~7f2UkhcxDA}=t zpEcc)52B+bZBcV!LhtF}vAm|YS!~Vevu8sw94UrLOBj(g%0z5x;YR#TFUUPWBp#Pb z$!`MQqSvxb+_&KyW4^@sIUwClZU4z49&D6Sghg9cE;&Ie8_& znyp6ACmzS*Dl&23IF^4v+JN}&%_`*woDERA&0@qIz+0MV=s8=WH7KtpEq}YK&F-_t zt+-8KOOM~90(Hq9^{Z7z56mfLQ#|0xl{)gsY0Hxv*5JpF1o(zg#MyV)i?;Bi_j1N5 z9t8I8ARM<3cR^<(KsQh>taQzonHDIM|8e+glQE%E4G?R}?;bi64e*7m=Xr=XIzoJY zBh93ZbD{HfCd`R{NyF4PR*g990bK;#$$+Rvs}9wXRc}Td^s2g>eGs;M0=F*+I@4z` zx5NEl|MK9+bCfEZYuc)Hr=o_nTBXP`{Zv)c%2HZjW>*jYJC*_~&AFd@`zXa}xi?&v z?+hPdqk9fsZs_LJ^BAGj$n)OE;^CQ7jz`RyN%i3<%0qDIY?Iem;VzyybO$ycmlBf zcL28kzw-kB>z``>H(>Oy2GTP%u(JB)8RccmJ~P{#Nu)&N)cQruV?*r4 z#Z8%MbF8LIQD-Ql8m`0W%H|ri=a!zs!pmueykIAyizSK4_ z+hp>e>|QzPAp~AoyzAV*f9$~&pvdlX#Fy}g}W>`gp?ZfwjgY+NL#52d$haeEb> zT;Shlr?1fw#o40H4=*X!f6y};v5k_voePX+0aG2oGZsqMt0e7e540j%N?1M+Ho|&P zJkT=XNQ2%b#l)GljRStKvIvQAs+t$Wc#@{UHia}Y5q+Um!ddlqqK;~e^}~<>HG1I( zjLEkPq)G9z37F7A*&QWJ>3)%Txe6!W6yv)fbzU1X(+v{|Igolq3yqtNbYFO@xm$Xo($onagKFgF#xDm4 z7EvtWz|gCK^zgTul_FMi`Q~J5jsnWnVf=@yC5eWTiMg+W^t|NmIPTJ4G6?A=he$iS zWgWTlh$VJk00C3Z^iEYVbi+eIiZ6Qf;P@=sN2R!KA+Gba@L}sJo#np^BVBJ64EkmB z=v`u9$cTsR<+C1CA4YN-yfk+B){67mRF(}R z(pIP@Hjs>D7Hn&(Yz(t*Hs@%m`aha(atl9&bZnIVP}oh}Nvdmvk5QE~79(Jl}{gBqvapjmq}C#42n zBW$dVmoZA^6>5upf(NyHd;}6=zy(!a8=}~TZWvB?>zT(d zXEsDC(oF@vV8&t4g>r9bu!+Yfqmn`sp-3sf~WhWeHDfi-Ueeku6KILJZqh^^SoslZ0~T!c+{VA z&B&r!lQL&dO^yeVqU}J;vq-momHQ`%f%e7Vcq=ZZErkPtb9Rj$7j)sM?iQKeonM)5kes= z-53#7Mm61q`YkO-j;o!E`@kWKqjEAv)*+YxyRo3dmj4R;+3AE1hfrVVNxo!nTr&8E zuPN4S1RHHPnAZ0)!URPWmlpn}6e9I25h3N}kJE2CNAdAorArszz@pjQ>zSX=eDr5K z?oX@Mvg}-(nG3nL`K+m;9mh|zE5M&q(43mBky*Omv9ATs;~o;fquyrk1zr^>j9(sM zZ8OOq1=8q2egoseLAYM=ocZk9vntJ=a^BEiKjGsT3N_`r(c!^+$Nx`JW>X?j@HZS^ z9Pz);Bmch#h@a+;07>TfHvl$B?SGSGG6l?1PmX#e>S1deWykJT07+I>7AgU!PHbnv zHD=%SX?ot?YlBGeXFE76!ON%u)2`hQCAztG@e1B*mbb+Rz?@ zG3&B?*@ns~*u%on%EgDL+ZO_86Q$l=!r%J^%AnqS8`*A{-(;rPzZp%$VqlhK?bey% zc6lfbkcxpC*3*cTa4Lxfu}N49PyAHGLd-5^MT#A0U0E(VC3`S#wXSX5)2M zZQHe~OCOc5d?4eY*}la)=9Mz)hEkqXELX2n+Pp4GLNC_uc{ae-S=CkgY)wgaPG+-Vv& zGNV;)=+C}h(oJbJ3|?vHH9rH$hX!1gI~z`Q|%l(mdOdEZ$1iJJjH-O+Q@h1qIV5vLki2)YDD^HwNS_kz~DA z{xW9yIBmv6@2la)83&6>B1*WZr)=95QFW4A$`=h(xb8O<^u>(4X7AuuxRC^yea1C~ZN zF#9(t*$>LLRO@MsKJCNd;3`&2swR2H@Gg;Z%KXOhrPb@a?7P;!$|sBmsKz?+(4hA+ z(G7`8K9C_}9VzeU86lu4wBLsP@|sfJV~?LY8_HR+i6;7!%Wt(C`+H(}Cz(r@&H``Y;?T`rV;;cE2M=HWQF$)~|6d3;Ssu&AJ<7?;O9zuj6{P z>e<-XY&ml1ifmcdM%ZqzH*}ZPWHUMZeFFk zu;wa(EpodTj$YJ$Mc_?ZT`aI*wKmJ+kS?LyXGL-MNM2ix2b%wK_ zKe_fCS_T+UnuvE5(^38Aob-)C8>io+k{=I zxI4JOqKjT*6P6v|BvoJYJFL9>B~NFgcX~vwXe)Ra0`y{Sw6&W>I~@5GB3-TV?+){Y-)t`6V(u59C6wy_XU+0& z;rdTZ5%B8#Z4KRHMFx~{G1&6XLo}@3^A_1EkhS%IFG-=LH$)IZQETQ|6V#Q&DdTMI z07^rJ6n{0vU+(yWw-bjj^|`s3ikdkVmUT=nVM`%mOXxi!N)}_kCc2EaYsV)VVnY{arAo-4?cYfjJOg0F58B2N6$zh+Df3>iJzM6iPcQ@zl{wY(QspEIq8>uF zhEZ6FTb9Jc!uA4X>vBSL6H4>swaD+^Q~2RuiE8AfsQ794^$Z$M=VEJQ4L;Y@wRAk+ zo7XN3cM>8Kb$zJ0b6=j%CKis?-lp(#YT9nKG{lX>$N0{)_e#fj!eCwt454XYYl#Xa zDjNG!RVuDzkICQu`m)tw11kQAu|=B%x`iH{W8qbIhfZ?2wg zZqIKUzYy=OKOfHCA9oJ#cs9HW%iGrE*bv!V{;2EBy+0zp4ozGXhZ0MJGu$@LR`4Z^ zul!~-O5c#;kk>D47c7c1?;KTXFV|i&q%p75EpWD}F4#N_&NR=_g`gGboT#j^IXYIj z`B}J-)X!+rBdL|9rz@hqmr2;DWzI%<$Ly?E>l<9LF6KxQ?hlXSms$I5LxoxtC0`+@ zOgITuv2%6V-Zh+(%;0#IiQ5jNY6jF%iBk~i?y5u%G0`zAE%-j$#?Ajnm9=j7z-)gg z;G(pn)^;q&l<|cyTtrv3k>#p>I1m*+ps^vMQPn9ZJ=HVOl$o=Qee=MzG%}jah5y5cbm4<)k~1?_VGuvRr**^qRmOAn$v~)sF*lx%5*D2U^q+C5 z+$ZCpV9I`&A!a!ap4jprWal4SZJAA!@!+MayGWsBuiIx4 zSr>`N2vRS7dE;Z&L9kq?Dz_3AIas1gie<4G~4!P@ve#m*sU>_hzJnMT`NgtjtOLcs z;Z`z z4IAc7mqrBw>R1|gjx9JNuo1M$`}N-BM~*;jQ4RPvA!x{GD~m+L9w2Mls`-d@>Lg^0D)Ynq30y&N%^b8N=h~EpU%t6&Gi^U0nCSBvd<}6r zQkgr`+}vikb~dJ4w!=3=tD1hf3#&bVSv0LuRN1<$IlvDW-S3$FIq&VsNje^3=56W; zdm2Yrobi1=)~L_V+zINrGYweK%1}F~=sF%Q0^J&L?O3Ie41M^Jz1B6_o$=_@DH&bb zz?2z8pdb1ciD~w-#E~)R`(#J<{n^<+mP#{)U^A9sTq=457f(*rC_!oIRKea~3S?c9 zgc5g@nH2Rhh})6mi&x+yn>ct>h%0l}-C1%cz~k5l9RZ4TFQ#;_MB->=PibE#Sh`ZF zMZIH^{TM-KzSfrF- zCY>(^K!MQ(yKwUC)2$BGcV5+J2@;7-kYeJnH(ho(o&Hl3xM=Cuo?h{q%uE-CZEhxF zcm@ytWm5j=I_NY0)maO&kDJmBDB3jK=y#oE1nJ94c>LG&zWL2OZaS=ijjMbj56&Ws z#>X%7Xi(LYq+SU>cBL|<{F-{^fK#TY9ZLN>lpV{E2-*2O#$VgG7=>FyNJrBy8g}?S zYAg!xl=nIV%YV)aP(Pqe+4mNNU zq+??YkYoS2%KP`ilSDCX<55>+Kyv-EDWqyh751X6tV|yn6fG=sb+T@~x(kzw4lJqc z8;I3ZdNwBCUc0ZQ%6X&uFY0kN9Nx<#NiytbekR3X7WK*7lBc{UkRxPTc)8pJiY0vo zPwg5opRzn7lEV_@&a zmATQ`>zg@BmTG6je|J?l-Mnysr|x${Y!#+pY6U3?s%05vsjASXBmKYDtLpkEZ`wOcayWU2BhO#@7 zK8%tCwa+Ov8PorLc!y{4i7?7t_CesJ@&?k7f_TnD(k}P6Z61izQUX%Z(z@U%b%cJ# zbZqZkB84gN86j%eRF;shK$T(^AUs!8xy7xRIms4g{0!maoJyTqe2W^5{`%0<)SsXV zu82Tb0fibZvL-s9fJsCQbPv{`R9RHv>o>69!$ZG}_0I2x?|euIh`ar1JaEKxkwNet z_mDj`8GD%HGn636xNAMG->6JEaBnt5f8cx5aTrb_=m(M%Pyb%}bOpMSHO8zWtTlIR zT`Z&Kj>l=ud8S8ssSnz2pHKR(Cz3+8_ZaVB91{-eGmAGZ0Xccvp#o07Xt@`Wh?j-# zjPHSDbl9{+JImw%S0@%jUI?RjV3{BdBipmXEsMOvb#Iqq7hF@*-SKEtLA zXHANtlxh$Zq-~I_0aZk+fHbI8e+h-`Q7}x#QFBGDXgm9n6b;5j!Ij7V1?kuN5AnU6 zd3`hmzEaZ9{Z0PQ({s8hQB^TAHyEk2)$u+oL?U+)TW!%^@VL?7O%VuJVLh`e#K6K# zY@v6%@yKGfjMZJJz@<5u+T!>I5z6FS&GjB;Q4ptpNO&``*knh`%d(w(yt4T_ZmIpu zgKdL^-qw@y8|3|5mhUARDV6jo8bEwwrLG(PLwrY%Y|I$Xp)whww^nYi7d|<@^6QTlV(D9}4utup|t0U_3`TB-0;DvZ= zzPzmku~bWc6Q?D)H|*f2)eqvwj;6TGj>mj>jZx8y&q_|M$T&ZwJA;Vm_Wca_2p2NT zgwsdr#vqVyyFhai)IAtE4f{&19omQMW`iStf~SQDt-G9x&5CMa+gcFO{}?5W$@h0w zDlC_z*pJ^)gE~9ksYS_{bb=5_^Cyop4ia&25j1OatOJ)-A=X~kzYF`FaWi8<5fLk* zb_yXhT|S;HHQ9nE3nWW^kmYek-T7+5E?vE7QOnw^Z^eD_4cPqp&&O$q zxg~=P@Hoi>9w*xW(c|`Fs4Wk2R#8D^2m_Az2av!{|eO47;&B8dwjdZcqHZcesgCEs z4pm=oLrH`ID8-f@ZZsgDknij+mQh1UsRf?1G?QlNY~{p3AgJ#NR%k!xAZd7e@v=RSRfs^|u!aSoCzIRlmYTB8o}5K(LfAVb)}@Q+1u&=1T= z5;iCp>ZIYRJ2|=@Rsr=6*kj0|(`&))ME|YDo*uljdO7M#CgGOL&MLY)IQIrdl>3JP zOk?)f0EU};VSBdxF9Wy>?cWA)55NG9{HFm-^w$8USxM?s?X>-G3}6l}fB{SnFo4ni zWdKM2cLp%ae;L4G|7idh{mTH>yGF<3MQZ}0lA2g7TWK|}Xq={O?pU2x`nLgmZ2D2# zbd?$U*Z$r4YyXZ8{s!2;a?`_N<+l1HS%s^XYIl|Z{}-%pKIvcn?ekn zn2-Gb<^M{U+}10r$Ntm*#s6RaFX-z3<^R@4EYv6c!~Z3@9d*ig(pU|52@H_6Qg_xl z(kO*LUydj@nP7~vPRLaOBj9cf$P9a??<g^#nvV(aejir{)9sS9*icoRoJvl7>p2H0-IN*^s+^Ap z>U(k25PqZ98#JZgYc8Ae?ix^JBO{+!gaKY2fo)D&I+x?0NfSo_(Y9&qrv}XVo;G!~jOe zoerj4obPoto8Jluy0|vj(`ta;OW=F_4}Z48Q8#;=Oe&H@mghi}-#p|fI3lpvfd%c)Q$#E8Dc-@dL8wA54>>eTg-RnTz&hGMkhK|}99yVh|YH(r?T4nEc^d~dm-$@D5w z*GQP1U-vg$UY_TCy4lBSMy)LRmQ22<5t^qLA#>_gGm}A51K##tl*cR00i#~>_+E-$ ze|A;WF+P4W-=3|73i*UQc~+?kRxhs0Kaxg)oRcnsSEv%An>OBJIjDc6ZznrITgJn| z+X=%3(CQdsZnqY8vO!O7&}d`y(3O-VIjDE`fK6oHaczqDWV~E9nmQ<5guPtv4DvRC zm)$VtaPx789K!RHqA&Vq%|b)P#~*cGW1_{^Cw2-V#!FnC(2S*f&U*bEcz6EXVZM2E zYWptSiOYV!Z?JGUEdNY5xu#@}_2*9rHpYt=jGa*v3C6F`Kz}t9nct0v8)p(SMB7opg9@zBHGO=cic5F3qsDSvct4L)fi5KKfstApiGc4H+IP zWr<-QWMBXWngPH->HkMhj{X0^*IWPkR|YhI10AswmtBxhj_f}W38^CaHZ8hv5{T~M zt72W<)`qa-{V{P~)q`|7Sx3K_nQWHrcE{BWxOp6V80_^7xNbU>^Vg>@!_5 zT^f=M&FKSL$K?Y^ZWKrq-2*eBI&)bS6K!!3bA~`RY8Ebhh+(-@gb1AFDPbE07zyAj zjH7(3DOAynp-U;tw8I%imL=QlZ2Z6Z18)UDH#YbefRCu%H(C4>u*bG6-#It{G+g}y zun6>0I(F2a=&m38qHSki#2R#y(aUwrkPQ z+@SaYL=h_1h$Ly`g1yEXQ5}|}<6+ZZ!2WN;RhHAvpBXFe2moO3`uVoE#K9e1G&f_+ z+nL?7hhl?I=Cb$lY$x~&kv`2cgl{mu4JxLDknLI1d{FKO$y_L=1FH}>NR1+1OeuXL zRqAZZs1wa1l;wl}YEXxuj(A@WGZ-BaRSR_i>SAR(hw{3Hxe7l?9krNeW9-8*1{lPr`f*SG9y`DvZSv|LxGcHUao^e*`g6r+iu*4d{+ji1!0X zT;PLD9RF1Mo##_X!(6&B(vTgW#A7cWq|{M(o@`jHGUXROiNuVLNc|Eg-M<)66Xky~ zpu2PP00vYLz0(WI&Z7X8*x}HvNkMb^0d*3N68v z;ju9fnX$}_Hu!6u)tZvaODDnL^THtC*e#U+yl8LmpoqIg($@(c!B|eTUvzBduRW{8!={_ zPLf4?K^ZMgHm3x{jqmp#eTgLwYd)5K?Vo>wsG6=-r`}@{mG=3DR`CU@_~qfSEaz=9 zF@oq%4OC@U4QK&4|I zQj>|PqMMiac09ux0)Q*9pA)+2#-XU%+=;(vauA+3g>4&cgB*EHRz#8R9tMe~(5qJP z`{B`w04uCor|YBVGjO+YhQ`BT`1Bz_$7XQltlJ`A-|lHan$5_FVgaA$%5()00*u~R zLp_>J3Z>>#&GDm&cMfN;*%CemZevqhTQL!&OfdkfmxrQ_;>p1Ii`AFsj6_1V1Ga$I z&P^Sd`XK^N1(@zwo-jJCPe(rhT>ZsW*Q*$3mDC@WT}#DKpPg*PcL%3;cdtDB;I$rK zsZ^-m0H%*EJ=t3A_)LenXXT0`d9fb1d7O78SG<1E5`p)#ke-~aaa+01U|tUex&jUA z8O>2YxlY;*QgGPbBcW(yJ5;y4;1IcgQ1yKSGS41RrpM4`UHVaS(^v1H|2e^z4`l8U z115MDzy$w4TciQddH@6Z-{Kp}nZkb^Ce@J3s7l}eW_(}15DDGB0q`4acMvuvc8Z37 z*lJcCo;@`sP_K4=RuD_@1)rc%=!#;NN2 z=GsqR(G>hCHg6k8VtfwP!YpqQMHv`eDb8_yl!6W+j8Hosu3W=?Xp3U{YeHFa8va?e zGzPmbCY`{?uiQErF{Kt^uAZ$3Lr(`cdMHN2OsLdZt<-2<&1cHy>S*lE<+uiL=v=IA zDgSOpfr97c0HT`1$FP=@Ou^-^-0d8`mleBzr2rkk4xV$ch?BZoLh@~uyp}=+v0{~r z+aVcv2arBlER{kNay{h8p#@R0XV>(4w=)KVR~#|m*qOFLw_^bO2l1|L#p02EXn`zc zva3h0{zV4}Cs&Z^y+wYXkCOsQGz-a8YSIJ2s?b3kof}Q-+g{79&WJOP^KBRv_M?Bf z^iP-~(I@2jxNvCbxb4_%LO1Mo?hk_`1e{+zal{Aj zD}lj^QK)%I0jOmOl-=w3F425@P}&uJ)~Q3XIwhg~+2_?5l?xVFud6e9igSsTEgQO( zCJJ+3wfjB#NX2jjHpj5#>s)(xL!gpfJGQP7(2;(NNE2w=Dfv$9HEYp|xD8`>k;AE+ zeg+mp@WkUaNJR`P;7Q_?slag7gofxq9w-p0(ylOd%s+D#UnD5Ud%;rTak);pLJ8xR z6u`2Sj=CR)n)fC^q57ty^MY4i1lCLYLk$P53Z%cp#Pv0~9X7-}#;sSl#s!Ahq#LVl zD&`>`&qofbO)-aNN93vek+RNuEQCE%c9c_xI=Wf^YjDSwtuiP3XaPM@&_A`R;S3ba z=OP*FW$~?ozD3j-At)nU2_2vvhiY${GzCx_q6_I*z?2Nz6^~UgYXE9P3e(+HHDzwK zr^a-#$2mT$kY?jMUOXWMK`d>Ac;WUw@NBGj=HhP1SOOC_hCcyI6Lmo&e~4{ca-1gU zp-Lj-gOHzB`rXDFIaa>zZdql2-8eihd${OMXm8dv4VBWKo&|q-*Wz^+v-apo0C~la zk7Fqxuv&1+XH9Hpy|G|FSgdTYMhB&`46@|a7DYvkhj8dL9P#(j*|@Da(aef_oWWyi zQal#k)M7GhWg=L1pknVhrOZ(qV{GO@`eY7P{^a;Zv~*I;m#sV%k{QiIOKaDeXg8ge zMa~1{TvfDX%H|`rmJ?Zvi*M}C-MeBA=_J|e;gwiee^kr%BcanIEWJkek}K)#%&LuP zKMU8Ja22sBS<$G%K577y`;3snKPGq1Xr3?X8f?JiZojC@h%`p#pBZRfn*R}%zjz}m z0!U-9ygz9Dfs2|>Wr9_Y;Io0#kHF*~(%r8B-Qnt|c4Opfl<#3G&LfIb4P~1^Df5ml zzn+yy4c`kNfmp?A*GUMGdWPCuZ{+MNRlxP)yqOAUgYpT6}bO7Pn65Q zenQNT)zjm{x%6nCa%edl34IFw9f#; z5u1b*Xx4hpqp`8&Ezp7JS>$`!3rcwI~E)7r+Brx&l?L%Zw zTZ?v90-ZkRa)wi5BlWUCB5C?M~~0H9~({~M&;H{@a7Rmdq%7BkKSiF2j;fl@)k%>+Z3SGs}Bm_t{Lb=BMX)YBB^8`e7kB>Be+}P{4g<4nL{vjTCsU zNO>fI>+xl(QSHcmX98R;^;12VqyGi@vX6Um>D(?fqWYxMS185AK(Su|!5m_Z*)24J zUFP`_%HS4kYj@uOez<&>)wPcKhN&QJG4!CJ9_I$0-$d6pL!^|F#En+xDNnRyg0A3Y zg+{S>{XQk?M$*|_rMUgtVLv;!Sw^G?lxnnY0Ktv(1SFaaA1C%**yy=Ax{fe87)gJ? zr>DL-XlRNJAN^~gY58mBGVg;$laWWx5zXx6Q_3-N6ij~{vI2W+&_gk4H(@pQ^9c?2mS?EdY37!zP694p5LY=XO@G6I#x*dT0caggF652| z_mEf+jjt8&hIkvuBTotkO#}4~DNckPYMzCgGs~a9JfBEUPEPi0JUVyhHAFwUJ9|N4_wLkvf4dw4e|vfV{Csz| z^I|b`c{=~txHx#&eZ54J!(V>6pgRvaTN|VJj)XcKI@K2U3>i-mra(+w--3h(THZ@f z>_RB3X8zmZ;4HivTnl|FZ{A`n%syRGu?Wlqy2ki~RYm)RtwRW$EJrONTmYm&FRNnI zaBSXNZ1{ZDtuwpsiF-^uMaemJ&;kYw`D^n6ieai~8mKd4{@9sYf{}o?oIh6QR%f)? z7@Z8FLt_JGm@%oRY}6>AkhB@45XMBCpF(U0R0IX8brVxCs;w@RU_1sbRpYFy~-(B`(^{+`fn=0Gqh1|ya?~E2iYjH=&W$V31 zSkP)^>L_xAaz5V=MxYyQP$AHhV_?ht1EpUk?V96^?{G%D#t^Q){oiQq3No5 zGrnBwq4+j+bT?`+u@k|=LbFZVwIjYwlY#sm6v1;Kv47{2oorCW*WVDWM?~)dc~nS< zj{ffaI;mleHXbXQKl6t6?Y9i&#R8V-seI0cS0--+&ZiNx+9jyfJXE;k1tse2=n4`h z&h`_Xs5|{vF@*;yG_gQgziJd&a0EhL2-&LhH{Z&eJ7J*&K^2or5CH_c!5v3*wWkOU z%(dm^-U1E{#~BR5Hupn~0Se<9aJYyMsr|fp!f3%H=x2N!1&f7l2)Wz?=8&czCUt!< z;mB)+;>h-lMo_|PZ{!MV_CY@&{ZLH^owx@i(_*A=z|X6NDae~8f2MVchuhf^2n0zq zvnQmNd39Hzu5~XYf$S!w?~)X|hI~&TBr+~QlCf2?{?_@aL}FQo8*a*SsKhB#G&0%| zm~sJYKmtT$ginAR&M~Lz&I$U1QIgXqLlG)1ZimoDnngtLd52~VhPE8pZr>!H&}b(^ zKWTQfY_uUtgzAJ?=fr`EY@Ws5EPRX%GvntztfWLERmy=%tjRzG_aTkww{Mv}B^)p` zop<&$T5G*w6(^i@fZeoyXyGIyy$3902b%>9*0X5HkRAE0B1<6ukUvtbh9go<#;>4B zTUOn3c1BwgnYuBUteRnXu5S#QS|yt^$Rp*}DK|Vht-|+OyD@?lF_CZrd3n-`LV`Pe z7`C#@j#kdVQ)8mX=VCCY+w<EJ!SZrGEQrY~u_=<#K~McUqLr3v+EI7df;%9TRo@74uFiPXvZa1P+pRIROm> zC2LHAgr~AAq0NarOuVbB50lStxOQHevKi2ryK*^}0x=|k_Pqw0hXUI@e^z?_++e9* z74QCDYNT>0a-*M3cw4ke*c^=vB3PAZw#M+)D~p;@um|;W#TRRknJ;uq5moICLNSsu zqw}nigNl&f;y>D+9VOsR_1~ffSej!Gl7h~I*M(}~#AqWLX%Cazi7kWj^Q@N@#wFAh z6I(ezC5mx;`vz4^&;Fzdr&UDFR6rAYTDv8E4p#CyZDa+QGs8)IOE3A8Hwc9nY51rQ zCDYI3?q7A2^5+p}Rg+uCeEguhN-;rNTLtb3bP~XBiu2u+DXxu zH6M+vBERB=KQY*1dB;m)L5X|^TW8iiXhw3ZV`~7&}rv7Bofy3FF~i|M+r|)F5je+W2^&(DNshe z@DP=-bl2`dvNCm{2@dbBhnynyv|j28^+2 zM%VRmtgVuyA&8e~VzlVZ6FA2C=-RtXMX8+-Lto!D;nQr>`0{opI$b@r z+{f1Qv;`?ZZ}K>{*Rb$rnIBZ`U%83k?^!CwFDjJo2!9e(3YRPxdoAr?j`qMz6DUza zkHc27gHoOpW4=);gBUR@yld)Q)<&gI{ATUK?&S)bdRT^da=&J!fr1I-$b2mCGR+Qe ziLX32>=MeR71lqs(qWq*?BdAQs^BnsiqQAeXUd*@g0lHbq0N;Lv*LUOOg?#h^M82@YG% zVkPve%Mo!6uKNpl-3Y1yx;Ht-Up&LBy2{(TcXygQ%}HS>&Z$Y@jF{AAYWlF?j}US{B&l zq{?k?*PHE)_L(%VrxbEmCzp46VaaHoj13cci$8LHjU5Bx1AA&S>(zoQhYnJ>rdP;%E#AkeKgsV^a*8#u9i@VY-gK2-KP-mg?PDnr%&%8)16Y6v^_0 zsPqI<=~Y=>T#C$|KPaFVwIY=n44ZJ{Ai@(F@EkcubP`rX3s%B1w^Y|k7W>dBL`xp4 z^c3=_V4-}03N15*t@!ZmA;(T?8_H6}+~8De|7f^`NBZtG|)R z3qgnHQ$zcq&?`j1D_b^d`Q>5Yc{;2}49-Tsa$^58kFtDSn#3#38kgT>yYeL?-xE!rxrCgJLVe$y^GFXm7gpBkgZeP$G1S8VT481 z!vHvEW!K+)=yL>*?#Hj|EH_nI&PcbDh*r{VWqWIsRLz3h`Rq@whweKb9>7HHwhM#b znTBsOKYVz`?3d3PIwuiZ5rP9#p8^W16r_tFGtBkVnJjNz9+(!+`F|Z;S#lL?6j$Ev ztR9*#o>V|Q+ls%HcRo#UuCVLClPZ@mWkfjPKCRHrM z%j-u1#SV{oOnq>+bLKc`R3)lY!^B4T;Kx!2m#<80%SXYvS9wM%~QlLaCl1%g~>%_aiPRRc>?A_!oB`%LtQLn z@Bv0Trk?PWImwhlCM7ChxL3vU7>B+e6F>BJX*QCGOmXzzw6K)2@Ng?t%VRhI-K!D# z&p#O@yVPG$=z^v1nJ4fBoVD z6AM*oyPpz)tdiqYbI^+rxSpG6{LShDA2)$jq+XoZa#f_jj_W!yUtV`^k}i8|Yc~(~ zr}lzue;mCXyzE#yT3ZI!Jh1e9a<{6g=s{q(bV9ewQRM*RB@f zIKw1W#+9F~&?&_P*#B9~B=hXSVkC}4V$gudaZ#Nj)SUB9^2us(=x4aA1Zqz0`PXKX zXQTts>rBVxH>kO?P@bA;cTCM}*5bUyo?+JS;nc^}swDm(eC~FG@>FK{w0v^sgWYS` zS&vqGfHcxTf-1)0*>RXyF5U>MKLA_PNtid`zu zR<&xn2sO!*{#-pi!KxlpjZtQZ8Y~_NS6laW41SCVIn*Jj6Gw<+>pH7+Cd0tnaG|*y&W9GQjEgzYXG~}x zP})!iA+$6JAe9!gznfpux0_+61tDMcQOKwaU}Gnt5_Zt;ejJi5Yfx&0{>tzqi248c zdWR^{qHakyZQFU$wr$(CZQCbp+qP}nI%(V1%m2Q{t-4jC#@Mah+nqJ%j#%*pD=XLU zb%}85WZ|@a%U!;GafG>IyoQj~d6tv%*%*-vbG+_LqnN=M9DP3PnWc_+8V#PXBX#2| zxMjn`A?n8J6d-ozpnrE^(P6OoYvs|w&}(@qCz!F(u9O}IeM3zNlv1Hyg2`w)>@995 z)V@Oo?m$>Uh3#q~*`N<10WR>Mey&zrIr-Jufu?Bl)OnGSa=5@$@g{$ByZip(%4eQx zmPLNuhX;guMBZ+7y8YHh@~CK7V9kF~d2xd22b%(ir>o_Ig=hBP<`?FjmD7OA205le zJ4HO3L!P2~jf<~J;wH?a3BbHSz~mG{y5d;Jddy(sgzYV3g6?RIgx4pBYVyE^gH6j7)z)Hfh}of<$lc1dreX^JdX&n>kZe zib3`%r4u`c@j`0D?>IQTR@y}T&{E0yG?uLza(TQG{OMJzGg*1m&`QQ9#*o2|((#Cy zmS9=Uj5IOo6HMcdxWAQFcPt9;c4@9h2hQRQ3mByHHn|ZXd&g_(nXgrRNV2tFya9H2 zQ!!2U2_#zqTify?c24@cWYP!0b{NGi(Y{48!e2uOG2PB!o~1CUPCKbE61n&;=~P)3 zn%(>hb?B5>){t>0a8htyxrk0G5Jwphp3ggS&ndPcyzl}BP6I4qyAt(5oztGMK9IcC z@!a{VQ5uNu{>pXUQMCpG{enS=_s0g(I9F9(m8vtHr2{SsExCz6ix5;o&mn=13j-cxqeoH1-)cQTlOpF0qB&GuVs|4yp zV2~D&1mpbmlvKljGI*1#pKM9v;5)v-c)0@u^rg>_+>j zmBh|WPH*&4!l1)`C+Qk$LpuI>HJkX#4*d#a5X^lN5&PBSq3TMCNJh7#&~i1*tm0X^ zZ+1yN=8Cwke7Uc8)g_d22X_{njuWlxZ*%8GrJ<6)*KtxecC>Uf$8ywXY0=Lb7vsFr9`c3| z4XJ8>k|?OcLJtcZljVN`$E*NLBuev`(nID8h`q3+g%bGeJyF-+ZQsshJutR*;A(D# zd&1f~5f2xYk@H)p?@)U9L%ZjdZ8$nQk0F5-{GvR^8Na$EV!1*{QDe9w9B||mqw*o; zUo-`P_5~A)R7vVa@Z$B$_5}&$I0ooCbRa1llKA*gwdde_S$?`jQGQ_O9(IjWYDCjS zxnE@zT#KjdUZ$4(0aw#5vIc|##qa! zeYhc}t3DE_3fKs&Ln1#sfLk;ZEcJ>UfI3qS3FU3-1*hVbKvxjgq8g2GMe>eY9Pt#va}$ZAAxX3>rDMrrl*A-0;|@f~ z*T>IE1_bH>+NLc4dI6dKk&XlATk&K=IfZ2m$g} zN}_pfGsB#V1Sq}g!m$;*!tsY#ukjtyb%Nr|uhlQ!N|^&9eEy_RLK&^T;qnb-fg8^? zqO<{k=Lukz0gnz@)dX!K3+vE>$Bv5BZ-z`3X2HV<;SpcF zN&c%_$z&p$L=I&D-$8RsOjA^Ket&Cqk;pIiIEV3u&or6ApG+Al+fQne#cTGxFlnN2 z1>G37qRTdaUTbYPFL@+|xrrn!k6X&1Mp0<3v0Ei_AxFPB21UX(tuaQ{(tX*~3I*b+ zbM#DRc4^;0+%$sd@8DMZe}kpuD8_kAMWC;UkFgselw>HBSZ!MBDd9ZetJ(%)D*qfA zYMbT4+Clt%3sa$5dxQ5}*Y`E+_k>W>^yw?~E>u8xBWO-oEBWz@c$e}-k*n%#L2C_B z;|c*tsAD{(9FW7hfh#nNXt|U{j>q^`4iu{Xm>Vh>m{GK2nX0!|?S{2%ygv@5qHz*?P&@wI87N0$7L;a$(@WBt}d&9m{`9$3Y$ESC_+k{@u&@=TuMGA}+ET1OJ z-7mU7ta|6&UsY(1$83$jP~lr%9cv#*lc<7vLytM62%OOw(@j7$2$+iY*(iDo;F=2v@GwATAz5e7stHu-WWxOJrj`3VaWhg(p$MPt+4{4+yVR zk2R?kt%#O=A$bUUwC1o}?UC0Vo!T~->z+b}dFHITs>u~_I(Y1M*1(B5H{Y(g`n&?O zud3U?aR6ttYK_v+&on*?0j(yWIWtnsRtRJVI^(tVp1D2GT)|v!Sgo>izT68}9M~Ov z&dJgo5I=GFY(g~+#L%x)jpqW4zPQhrH#@J|=^xD}CaeFgUrAGH?%u+lHgmpu$||sY zC&f#2kfJ}3n4}ztDpoU0O<7r_c{El7TyZ2g8q&RzBU!g1!hQjm#N|x-Hpa>u_+R@p zvf7v{QjRP@pA$l8X~O**=@oK_oVj5(lk6>C?;spv&%M1^7g`NXSy{V=V=Wy)N#3={ zU2@v|PU__mn>#+hh||+?pBeh%efHv)hPN_4C#>?hj)EP|)ty0?pWPOs5);5~J1B;d zh$kwEKS;(1CeVRw!-NxQ6Bxx*$gJCQRjUMt%2B|o5K}FMkea>~$1cFjhR|M!fXSxZ zQYqh(lSrlPC$q{-+RYB^h`O2rRJec24_Oa;{~4&vS@J#;oH8B1$(jf3~0> zHm9X2=ZkFQ3kK$8v1dBM+Nm|n@}>5$}>~8_3AGEE64q7 z+q9}M8JTCey&5LDlrh&OQ7D#6D8JOu!oL!&Y8F}2oxWQ1at)MvEIZz7cKg1=X|4vJ zv)RVIV$Av|_}mkK!3#W%DD@=uto{L&DU6;Oz3k(>IfR4?Hpyi|XvnM5`JV>Hbdw^F z-MVX7X|U3@6I|3q$48VM%?2OR)R*U}(@M-VaP!jLch8zE*`g!}JgInutsNg!#U|yS zN}-RMnMCb2rQVKZI|F#5YJ3RN==o$bI%`m;g5Ws98xO`=6)acGPBTW{y^wPPDCJsI z3KgZn&C5%#4n7xdAFiHDxD=`C6z&~*gCyDvheSZ`*8u5-uB5ocEnT(u;x~1ap?%te>zF zeqHFgoDvBJy3``V4!{(Se-h)twgsPbl+}i1)hwVU_lFco3~Nw5vxUw(qu8*@-o42J znk1`0YC;vbO6z?a%Y<;(P4cp;2SPq*xBL(*g9Sl z38QmvVKl3AW>AxC*^+JqOLgog3zI-QDptK$7p@?#2&SuZlv1Zl3Ber(DF?fwaPhqV ziB{pvNB=TgG6;07U%vi^MPlVFOkR^Oz%@}73~oRvS5NrFnOKUKBwwmzRwaZasZQ4N zWC=m&%Zm+x;Bf+40(zv_F3{mbngKqZU1)#2e01*CeF*{ITs3vTIthM$v~*_Z!h@Fs zSZnN(Gqj+=e3L4HiQ(Lg6AvImVakgd&S)a2r$_RVW!i35fO9>MUyj#;CL%DYsbaEB zY0s!{Iki95`6X2lFX2U5FxxI(^t&@(3ygf3-^XUYAa~&Z+k&kjE8I)7y*}MPes^TX z+|oX{f`zT&TiBd`GKu}gRIuDTSz_?I@*`{LZgU^E{MC^%Er%A;WIK3O@-DBOXl?JL(q`% z4RSG${Zqv5Y!oNdGjw@$!#0ycb50!yco=R(pOww=@w@*7r(^cue28hjweIk^(wQKX z4NA^2yWO>*cygxAV%L#K<7)aU)*2SOHAY*LA5?)!{6~6(JgC3Hjzye_r}LNHH1_56 z_f2FQIMsK;-JWKrHlXtE=QeqWKh;qYqJuz6#2a^t)|Wy`aC;YT&;?Txjh_4-R0B%g z5q`YB2*i6v76V;(4^~g^DOq|M-2yWt=iDsL!33qt`BXWE1Fa2TDXkp#KUhDUZz>FL zjHntggn7$ky`b^w&Aq47lfA99nes(hXTnV$OTHi3+k?r3pUR=(Pngt!rp{_rswhyP zHtry?C5m2`SaE611;hKDWm0)5(KtXdYpv z298w^<&dbO)KzXi-w>%DC5AAof+pmPn0QViUJ^J{8Id$DPfc168>p(c8fcGvDSTuC z^V_$BQmM{L@{XcDcKp7j(3&Azs5qc68)&T-+zC@S68}jYe?hG<>|eBL8R45zfLGS8 zC+9JFvLB~$UmtP1du!bL#Yp%%O5>@`Apk7|4_q5yi!f))N|Tl@;;H0|YfDn)5Pl-) z?XlV`WVSN4o#kVKa6(okTIlp++svu8DfD^n#?YV~qZTY?%ldRPFPMO_oMs4ViFlX| z=O%C8+X+Wu$vpfMHXE{+!u=No)G0Aw`ebxT9c@}FuM0QEIm0znAzD!)O5`yoJ?|<( zc;!z!LBQ0NQI#HbpJmkK+rdwM5)Wp&lbF}rAVq-9P|@h4-s54ztt23t6NP>5S?)iW zG8VaXI}*#GDJkV1}x{C@F!yMW*N_s7j-vch`6CU!Yz59CJP{pnw<2Sy|FF3?8Y zViiQs-)+<@2SxH@lc|nYAR$=qWvZhd6*M7Cu>ATy0C6`?uLS_^`3H_IqG7M?pclpL z&!M-^2gb>dJ7#m0wK;5XULo$1NOgVFZTr%WMD5h{d30Gjrb+&7$dgkvn}%MZarr>yBq*St;2-NdL|WzTy}2c;s0R%K`tyaiB{Ebk<% z96p{i?9+EaJ_{nBIsw%SXS580&=_Vl|)36=B{ zD{?H0?b5l1h?sk;#LpdwR{eimxL+MdKh-_(vdWG0Mf8ER^E%>z0#OLMF{z%fRr#v5UUhHCj}lUsv<61MPc; ztLxV<6~M{pfDM2p?6el2RxpB=A6xfQ5;7bvGD`&aJPNSr zBRmY+9lfE{=8T$Kp4#zvmT7qBwAVv(J5MwTy$+%&mUv%|E(2TL`tT<+CYCpI(d0br zBhQ`Y&%-<|&wVDU_S_roUyy9_ zjZ4F}dWH?L&)RHK4C`Z#Tm!Mzf9O~(w3_Jbk;bNn5fRoTEE;j}j);4eU67}2RfoQH0IYi^9h7MLLGRWM>Kx z)45W`UwQZ)Hm+Rr?MFf-Yikwd%xkt|ZEr>vJK5;h>b<8F zEnqC(%;L@4I{qb&8ORZ*)G?xRDP|05O8wIqe!g}#L@f1>inx+Bhoq<+QqQJ_-h@y! z!S${|{-aG;_^dBkXpy-MS5jyJfC*o?KKp`i_DG*54R)}8YAgA>eRSi)i#5)FVc6Z6 z(|-&1MvV;m6AS(>92~3Gd{NTuQI5k5nUd@De^j^sBuDSwJ)TAt@Gw-#h2i812m0*Ip;RQ#DhVw z;&I~xjateKkKa_Qz9mF1QzqZ9holr%JwGz^)nu5 z8zZ!nHP;xCJ&*8-IpHWo)PVZbimwe$KKU&Ti+ECdS+Q&`Q2v7r@0J+DcmAviBe`YU zx-}TIK$W0LM1Yl04!q!zia5}>Z5t4+=ooHa1iHT2ao>mNPXtxjA}%#_mPD>YL)1Kj z^c*p@tsZ`FqQgYnLKct4uipYxP|chyeK_a%P2dQg_^SN}Qjy7~h}n3_+p!p7r$K!26FByoXWZ!ZvdD0Nle_z_|hG}$;$E7VK~wckyAvQNk) zMb%%V_NZ(SL1)0$lXp7?pY#3Et4ie2L^73m=;!nz^={Rvbot;)jm|b~KS4~x&U}Ky z(5%6*Gq|#D8%4?pS*r~S(z0n#X(L(s2y*K*iB`bk40&KvX@F6T*UM$&j-qAT=Fk)RA3&v5lc7;;gEWO<-s&+% zv$zRN2h5KXE$d=oShTYbbaj)f)G0+mtQ&TpQyk65X zrq4eG$}DFF3|5b$y=1<@@=ttZiDZtx}*-s-#v0Btan6rP* z$txIC(EDo^0ubkT1+E%z;k~-${wo?6l`>W=5 z<-bw2MuTUbG2vmE)%R>9v|H~lcZge~`}bi1Ar8G@qVH+z_9CbKe5nfFG9$Lh*V?US z@TRu5_RE2V4Vg)gq~54LTx>A><@Qu44 zC3}_&JP4qd{Z*B#QZ;i{s(3-H%%liJ7@fHjfBR*^ROu;0O$(NRtF4_3WXBu|HQR~S zy_hss)Bu}n&ACS(u2VCp%1bz#!v9Q`jXQc8lkc5wI|%G9oa-wwkyL1~+V|j!W!zz< zwijnS=8x3=qA+8_8yQqs-ny_m%~caR84mP@K9P=OQ`ir;qrCiJ0=S3gZKQoGU6TJ^a!yKj_LUrrRI7MulDLaU*Vk@@{gs?QX+db>a(h8qU|Zo9 z;#HkIS@`KIzG(Z1racR3iIxbTiGFo+MX}1Ou z0ZsC`Alm-D>26eFhp!-^Qh?`n#`s2X11 zFC->0UVn3mS!869YOHTp(EjPo@9wqHA~ZrcoT*d7+Q>~}f53*=8Dv~#8qi8c${toh zY5%kUR3BdC#yxcM%>LgrABDw@#raHA1cQN9f12kZAnzNzhD=1O3KMnft??p1_tQlk zX3Ib=r2gey8W{rs5d0tG`@g@!$lS!p%ISBEUuj?1ZHl)1T=yET zydj*?R@n93C5$!h5XVnEya zi!+nO;WTr@J&wL#qHSAY{k`@#OpL;JjXP=iP@!v>NgqLPye+JToHl?TVZ$`<#fHvp6MNRoOXntVdE$`Vq^xH-+F z9p(N(m=-%8)liW(#Ulj`nPNrbL42}I!XM@?GNS;8jUr0c;1kJr`kw=#&0qabFYAn> z;YziOC(k&f`k;o4-U$!=uk)*`>CDa#HZp#Co_nMC$quySPnTqsy5&33BB?yHgqVN{ zZk@;mq_eIB`&x?lZI@)0zEa_(qbiNkRp(174Vnfl z_(rh%5_+i|ng&vvQ%NhOph685TH!{G{lPj{4VL{aB(YKtLOj(;Z9Lrai%DnB>Eh3S zFjy!p>PzXsg^Jhb5lU+-m|4-M)xmOTA#^y`DBkVW`1N766?zNERKE*)`$SmKF1`Y%mA@=5(z7A&dJ z-&^QpwZpj7o({nU?%v+sL9@U+iuyp@=`vW;`cGx<(z&m7_J{(JeP_q^mMR(jekLl( z%K#+>6pfhEO7P}+uam_Yp?(hyucNa)dZ;ojad!3*+jsB^K)XAr6-0kU2O075PuprS z8D%f58K)wSDlmDAJy=Nq*JsdPzaSLf)sGjU873#2B!&!=+182WtlT9dr^6gHj`268 zKLu3(^g|0+L3dIX04OLjg_RapL0TrUMwVyQkdo(G)Ca_~hkGEfK`c;R5f2$pn+&g1 z;UV?YkK1dQiY#7)Nq?JV)G|jRDu{iaLcFRIag8D^UmSi z<}&eEcd%0&;+zf@GD*!TJ2Q7dJCqlZHPFbw-_QDhkc8lGL=J7HW6a+ojwf%pMic&F zlWh8Ho@y*q*~uIE7`7O@zmV$K)|4ob0rF>?9tOldSq}Iv$3z7-0a#P<&vSc{I-&^3 z&o30x;>om5oDY~wH?k4;k{??=s37*zJC%9;Z<=BggfLv52Z#;#W>^jl+4C?lBX}oV z6Mbuk_=`LWyd4BGoWe~gavwomL_?bOGrtNWIX*KtnfRy}FarFk>Kq{yGh*y<$9WlTvyxYc}Ra(i}4BI=le5j_}f z-a0ryAbw$dpiF2!K|SX0eMkK0cVLGLeGHqh72%63dgv3NL>4f`Jw4{g-~;`Avr0h% z??m4~n|eKU;*{td&q<0Mnq90fB=O5i?Nk?6K>?U-ZKa#$p3iLy?!}P2mVU z&yezfVr+$o6{w1G7YXLsb*}~EvU6sDm3qbORcbF5x!ja;ql~2rUje)agMCwMDWFJ( z3Lt)((QB(kqRpe7Qb~AVeAYLyi7VWVMz_;gOvsQFusxrtLRYxnyNxyj@R8L!7 zZ}i33dalNSR>9g1DGZExU#^iDeyeq8f(Zoq^MpnMrZLP!G;P2}`*o*g(f9pKjD*{L z1~lPjs73F4zGxEgw6XN;?R~u5p1#qUgpYBN!T7s;7H<5!KkU68p3lE~oc344kR+XY zx;uOyR?-_&ytp5?u3m(~2q!sLp7K}hi=FfX!SlWuS1Zu=!V9hUYim%Nv!ig4j-gP> z;R72O)(sU@%M{9+izLib$0Q6PUvTN=xYs(_dtWgE`@$L{WAzF^y^w6GRXMC~cH6o^ zDCS`=;)l(+M6`mMR$MORN5ic|y}8d-D<90nKb?vizYd=d*nJ249Z`}-NDNA1r+Gk5 zeEpS;>sxM@%+Bh;t-nt*R|v|0^(UpJ#Rn2^vnl3yeT>HH)|_Z{@*W)3<4~XSfm)3Y zsTms9u_F2`T3!1Qhc;CtkM;UlVIK^s%~<<79&BXK>5eM)UHm0;=n#$U`@Iu(O=C`! z>MgUm98x+lCMpE1!z4Z?l!CFgdmG1&-ozD78A*&zuJbj=1<2`M^gA2C28p_&nG5xL zWT892ViXmeFs#?Z*|bL^bfn~J_BYK2@4KoSGuo7QwVfGY2Eot3h;!3FescjYWQveSk6g-Hx=U#7B zpgqM4&dAAvdz9F-U|kNiL^823XY|%cUR*Ak z2XxN<65V7rpov5Q89~gAEFD$;q9H-XBTom|dwIz!cwU6037bYlRP*lXe%x=>$zc(# zj9vd!Sc%>(^*EE1y2ps5j}`UgL63-8F1Xoq9BahKW}p_-O( zKgYv27G(Km_BfOqu^97*Ni{6)QeQ0iWfQ07Z?98bK&kO=Z6&i}_lvtGqMmEiY0`63lR8*p;bi~5EJy|MxXQul;8 z0caWZ!Rq#$X|Q-Gq7VwkT@$;~3PS-j6xPNHV;iY6UBv7N+!p1*)3=Co-bBX$jDiTl zSLPP$-L4AkzzPnoN~V?+Arq{*f(1>N8Mf6rPMIhkR5BIT5UOAUqE+A6y5S+~UbCyV zW@-hy6w?xu_mNnDMG1~4pdqVf7nz}?=qMA7Rc(K??c?|#>u}v0ypI58TLG{nJMled; zi!we9Qs1r)Q{)n)e!Rj*p02?{fKEtfAvx@C`atuq(CLP4j=<253(1k)Ez>krx-8p1 z$mEDEyZJt4h8J8RmduStf!0Av0}zUl`yMSQNZYo#b6B$>=|x2b7wI??7F}|N z?hF%OuMrZ>b`cMMK@S)0{Dw>%I9b&%@42U{!*@zM`z)~&frnhAzZ-6iGA-UC71l@= z+iC?9=LbW<32cF+bo-FiP(#ZxuP<#e#ez< zBfF8{W~~tnN39SLgDMV(lg~1{Oo?Cf?09)M*No+_FNxC|2wW~3mj*RBZ&KBWj{dol z%9$PsTN!8?D&7-|Q}@|8Wj{(;BEYIy2VlbrF08ZvQk3N&EGVS`dLSp5-*;##Nh|P5 z`dInUw+i#8>La+=w$KY{dcq_odmd>@2TM1K=7Yt)o<{piHRn|UF@_TrNO1t=9gO$ z5;Jt7UYU}VCa$|H#_Y*=)P`G%#0%)>eTjRUT_aN8t4Q{H+M|qf<$)TfUEp)mi5Pr} zuuo~mh?eLaAGv1MN&c94d}eEqE4teoVXrhgOgUZB{bD1ID>=^gR6&-W)815t*6U)| zxb!mgeNJn~%eU@mP*yd9kjrgOo#?0@EiPsktK`M_vypEfnc&41OPw>$4YXlLldeV8 zKmqOR&7b0M=rVhsKZ#fmMx|Bs^7x`boOzp-!zXplA8+`H#-)HQJiGr~G}tXuvrL|vcIp=K5w36i%a?L?fIX`nb|%xn1!#D1HpFUm)>NIZ1`# zr=+;9Fog@n%gLEN=y#$El^S6vC;Rof0gasLEyLD){42wp2Yr5r7#V0#;#d5@LXaa5wtd{Qnxm@ zdrn-b*y`w#{lSs;9%*9TDaZ_1iG9_?oX$T)!(Y|$qYN)vZG@!v^L1EM%TL-2Kc`zO z5I;=ve^UtJBXcIDGb*X2mfoJsQU$;oNs8z0xm+}FN);zZ5)#5l= zINgRv>kzvXxnkF^%c~vnSU#n%7wN#Nr{GHjxk%SW+kwHlx__jOv_hFZOraJnAuI)I z>7A|%D9>4GuSzjAMz(*%grv~Ip$YJDfTb6SXL1oN zC6EJC#+zHlEZUP}O}}iAH9YlN z5O^x!?7hPz$oD5d*}t`(CoC2P&2c3MDlv*X0IJNmN4wQIC`nhtf$YwDLRug>Rz74h zs{#gV`qasgi;6l5%p|>*1pi}|YnaBT`u#IZ)yH)?@1+{bqw%H!!9hjbmIfMEw_X6j ztTaXG@YxDTuac!#sTmA^&rk;Y)q9l(Z7Ma{u2gI61yt!$j}a@{c^(25=+jY(f189B zZ@vO=amv5Wzvy&6eE_|HD&2fbh%6cXkeLgLL9!EnG3!1qF$yNXZb=aL~#?na=lx}M3z9&CsWaAIyP`@$hS&J`=O1DB= zXX;D?SzLUP%>k{=7qzJb?H08=2n21ma8vAPYF1xK?SFy}ces!6&=FT=qIbC(uh4nue)gSK7|@|+D1}h{ zR|o>nPt_>iN;5uRkO>I275QkGNhCyZLA6aD>wdT`xDVS8$bfD{!-NCZwW}4S8Q; za>w{7pT*gxIciYW*=4Ne8{XS*~SHKJbmx7K#2>T%6nF}wb}4n>=B3yS6y|x ziW~PFoH)ny9H=f_oVhA!4ztyEPanF$6vSHz?N{6dGc1nHX4YiIy>-U5aO~ArW~YBy zP5cU<>8;=CUFW7YfJ;xAM}&|0*=ua@o8RD37BZ%40KcuIPlj>Sn=P}c z89>*5&-O_<3eE}oy)g7-ZKaB;ZzI^`3Qs4Fm>s-?NRxvTCxgkq6o6i0M25Gv*dh3d zM|nSXCp0MwGRpk89hM>JMYlMjZ?SwgR+Tjz;l7rCr6<@4wdQB_DsHSSw#fw zC)}Thd_WI|C3qwrl%>iTRG{EABc>h{;A^Lc6}jwie=%ZzPfbRjWH0RjfxT(pG{EBS z$nwS6GRP~~BVR^^A`Jo~Q4wzBEp@kGP!HXJ^M16;=&hODZ@Ez*0R^Y62nIx8u7G%f z`J0qU?q*Z2B1tm30Ayg6J8y>$zP6RGhA=aqnhq^9lDH<;(M{iVvluH&W$Om|Y9ms% z$>O(&UkTXJ<-~Ac+-*bE`m0R=ydr^@Y5d2-&Ec>5h5epdd)~D^Td{?Kla%U)E6Rhe zE&wHk2@Ms9%5Awi6i0k3dBgPgD;g#HW1d^`ku%zv*{aFMlBfZU`7!xdO}m!2h>l z@E=Bv!+}Jh2owN7IvxN3{{KU2H#TrK_^*_Q&9Bgo^j)J@U+N8!&KPCFb|wN2UZ=B@ z?NF_>W*a1kP?X%10I5VQMN3mI@a>hGuG16Yn7tc3#Mu7O@^C+O`5w~tH&F+x>bkkD zQ*A$WODAjQW?1f#6sjP3PO|1}J8~PkE3=YZ2|8X3IvEP~zEODQ&P0v-E>p7cPs~ug zA2AN$>1J`y2c7uZaJ%|s_FqGqduajTxNs8$%KR3mWOGDeL%iBV`nY-RaGAUtjc8OL zrlA{EVeUnwC8S5loD>&M0v2rPT(9cHz2C7pJ(6l&gNi@;r3}qZ;Z+w68p-Qc+i8)Zmr~hUI%Nj7Ik)S^}krCQ!1d zpK=k}2z60+!U_0~glL|x6IHF%$HCXt%jH5~q_nd0$M^F|)|<1loujj-v!e?Y-)?Al z^a8*3x`tQx;B06A^4;yOxAFp?Cx4f+OktCl>R5MG2OHO-? zp7_M$&vX-+Zz$io9zJpci4gqrS3DKSzo3u1iq*!?aBXVA;+Booq$Ebj;HjZ@h2?tl z^=p0kEYC6R_v_{HRVdqBGvAsZ1Ouh?+#-RZ#z2uSjINh zuktLSRFKP0a=pqqhhd6mp@3qwIq_LB(V>_JBe8aykbrnfv=0=zU`;YlGI5sLd9q}{ zSKmZ~8rnr>+3ar@YQ}I7WlR-`W}RICr(($y3*s4{=Ug!_ULMEIndy5Lj#c*B$=E;Y ze2ZblqG+Dt2b$xj7RgAUg30&ZA)?0u(T*QYf(YKxj7Vyy_xKdV>p2sS&^iWj+i!77mzm(8XS`WN^1XYsft}u~HP#u)79ery_%mwL>BE-h(k! z6nwxvlT9}6QhOp&WKMdv?XFY`_`Kfw!W=KrC}tpdby-ncU^1wQHqG*NQ8Cj1s-4S=}4$iSN5j*tU5tPUEx3%A03H0#oYnfd_@t-U$AHxLhAA=)0-q z^3zYkqvJt|MR!#?ErF_`C{*ViFf>Pt$i)$oBo>aM-9BoSOBx8{&SH&O^HWAPSzW#e zFBnd!eWD_1(Dbm*s1AdIN`tD*5p&85&YXfz_ACmhGNt6!QASPn4B3jZK)wFO(Ot6p z)VtfDsUY#`ERc4kN|)%`j5_&hVk(&|y5#_QCfrv^2Ox68hH)z8g4a}lS;y1UKK_`w zTn|_n`A~@9@k5?<8*H($j@4Db^eOY1e80b$AU)Sdtv0j5|c_Y4t#3;fM ze|Yoo#lANkpq)L>IN^YyhW%BkqY7z92z~C!#dTas3jDcp= zxakO@mIiM23epP#^XD_Xz;GQNc})Eb&4t$`gBev-k}`t+cs_YQM%#P(O<2K8l04d{ z9#615jRjiNVf$u6xn2%Z_@tA@{`5(}L_6_o77qjp%dm3HJ5;*LFzr%~e9{bBe&1FF zkosyx|3=T&OBsj|SV8R=3?TWB*aL2<5SUW}-bh=%SiwN0y;o-iGmhj6nh<$s#`^?d%xS~8?1ViVizaABtHBQ5MEw4mk4EiN%_A>DxfiH-x zWQ{SaCx5q4z}i(L#qDvA2A9LUU&e|u^Geo3;i(L{f;c&;_tlTC-71Hw57OY@uE(?# zk6zL;TTY8!w-aIW?-N9@%?EtyL#R>l8c5+tqT-&T;wo{qp0O2(R76`;*N=92TkcY< zSxnNHx{f%xJ8aa}BMtTlZ0DeHS^I2JuPT}a)Q}wDdpRe!9b5Ebot-vbF3K$O#CV<3 zYkJp=e=Z;@EGh(5?H~n8YS1!30<2E42&c=;oU|bKv?Zl`Az7DTy_XR6*Z-zE)^~01 z#eq|KC3;rMSFrMQHnV6_~Y3m3ictmVM7{?^B%Pg~&uL z82n9Au%e}Xm5<=?gwdgHpvi92=kD__I|xLRoY`5>;@ZmPha}jHHf}L!q}ffdOOwea ztUcsAWwK~{mXWuF7%?HzW`uZkl`}mG0^CqFmgj!PPjnt1-Ev+HF3}n-7+6u!_8{7F zXoz8%wJsH$r+RLBUO>i?UU>tWZgyHn0%?Ub-=Uw(!DbhxCHwWtKeeJkqGGb9<-kYD zu2;`LK9REM!VGU+?LWVz3W*d+96A8FPbc%CS`54iW|TGJvJqARtUi? zh^1A)L~;wX+&aj%4l3t6Y^%R8ZfRJ}!rS$`8MUOiHuYPWPFj%y$}r8qWm;RWKQ^RJ z*mE7)E}XbWK%q8Z=#3nT_c03sihe^nUyhR^Bx_{|BQ|~q@z9xzoD=50RUE2 zRsA*F!Gc?cA*|V$>XAI)PQS&4D{=kUYWq|3BT{Hx`(v<{vfRYU^8!1qEM1*cu6COA zWRhk9>23t|U~HqfFMthFRhG209wlXTu@8^i`Kp_)(t0(W!+$m~_ij+x5kvjc{O2i8 z&k8{!QIriY_-?ooS#+iqV5k09-JKetFKj%{lAXwfUASXuCnE(k`&-a^SHzB%Oj!ww zI8O1^H0X6SX40RtB4%7^AKYC3daWE42sN7d3j>f8QkQEtY}`DL3ArYaF|@1J{a6JC z*ZN;;zi4o`m9eGR$_anNA~t9M0Hpt~4b;Nvw`1GT#o5I8KZ~gPx-AhKn$M~l{W-WA z=y^k;$6_DgrLJi(^JTzQ5h6yYu!;iG9%X}I12vVvx6cfTMkGmlBb2$6_ORXx-1%b<;T8CGr({`N|Zeu1O(VA=}zyNJD{sG-a>>+$x=A&H+`9^jhqI zdO7L`E0+D;wLZxCkL6Grdc@QCBE0ho0bMf?YamRBd!$`eM<|ia0o*OxiM4>yfCx6hSqU z<9ZwVjAe7s^dPi7x|+4O!=!1ek*kLTS7)~W=*jw)U#(x8Q7)aWEp5%UDZcIc6Rek; z&y%k!TW8O}IZOwrUshCRL`D$Ctq%4)zLbc=DozKdptlqM7z=tn3AN>fr?cfaGlGMgr*4kJ5tWJVL~{ne0lS%>}($j+LG z__RVro=Q)}WFel;NyJ?bTFll9rw6j!*3Jy2wF*T~f>Lh$UuDEFM5nkY^JXkqs74_Y zXtH!jrN?_KEO_z*ko7rCAjGEjy;0%|h=Aricsm|Yt24`Yk`zTixZW~DIkDiC!5bDH z8w)?S+^{+)%<-~#&Wc}d#0eH|gA<9gqAE=%WngE5IQ^ipu!tM4U7O*+w=|rH#bv=x z+LpszNIE$(s}b{3v#qujNh}r~#5%@{55+72^^ZBdgZ)Llcf|DAxBVCc%aH`sOUU%5 zjI#wtp~A=l#!&*E=*>M3J5H=^K7P|9@9VK)SP7vQOog9(ey5INCVvSX#a(~WM;v4b z8}D~8?MAzSBnHMaCt|!*-KuLGF}oAecD5$TWhH0*>JY`?6V-WM*6%TZrr?Up>z*)` zU+=~;C{#5)e4ulI*rr_e{YEBs8x;{w3iY6i#-)PvPp*}-M9@C0DQK~gNRi^x6J3<; z_wgJ@-5j8*1#Pg0mw(W%ZL3^CKBzjcVFIN63lLA!H}$IrB-cNK1?P zIuES`HL5fB=U4Y^f`%*XPa0%z2)l6%n~Jb72KMsCG9PIwqjoNak^EiEARSV()t&AL z-M>Ug2P!CY9@I5$S>B{kQ|$Qd*NhyM#y3xrhK<%ebzJ~;(!3(>>Wv|1NBZe_g=X|i zuNUcGm6H3LsU!Fux=DDzxdTb11}@?~y}wpozVY1kThXA;%%X--(MU*($}fvi6?Nhg z1Ya9Q&sH8n%GRJZpNYWoPv%K}_cdum`=X3Q4G+=37|dbK)%b??sBpQ3uu1zaWy4D$ zN)vrUMs?j5?sG?I@*56SSNeNny$7MSJsefX$0>?XSoirw(!fK*KlI5b4@&V9V5@`} zfPf*BxZwEX_=`C9PYUZLMoIxbPSA=a#P<*uIT;c=G1pi*)b?BOV%;Q+U^V>qc*WLjU?BBL+0_!by z&<=E@JVK5KY^&FQM%rtQ$Fsf9*2Xd%_L;J5$^oXhSBSL@!bBggxlXk%%<{rInyO|q z1zguB5TrWxCY{QA-n$ca5HO&U;Hn4mxuS2>51w<=`y-A>T7*~WjCBtyW|HtQxd1+M z%!D0%hk1&@LGBG?u?e%Ix5A+s3~`|wbM)=lxUu$z_oPhp!sNJVlbmNKCh!G@nf$AM zA>U-d#uhXeFuf(JphQGOPT9>06e-p{VA!K)FP5O0B@>;ql@0|2kBJ>2IPyy~OT6Hc z#A6=TKN1#C;0zpuCz=8NO?@=DzqCI21#cer^;w@#o!XFzO?eL$5_ceZB{IK4#2!RZ zh70~3EckaGV?#yIw3>lkVDxWnjc5RVqHCdF+l@zK0&`-)G@b=Tw=nKh*LFh~b0K3? zHn5xc=^!iS7rrM4PklKwdDkg`e()j-2xQ+uWNl*iWoGyme|)8IYKuxYHS zp|HWEf3rF6LcuqTsa(Z<2>ucLgxomxraW+OoLOQ2z1<2+z5?ao!W+P1%h_({vTq&@ zb^XbQWq|2t@>xydRXwvK+&?h&zHbs=LVr!$&X|alO_x1tt*G3N)pavw6h#?AtPH5w z4400wdG&T2zM>4cuF^FF7kPiZ+@}E&YFfK+lokC+P7vMl*~UNS;$Oa~Q1>j(5n08| zBWo?F{EEE&a_q=lHP371tswB;*sCZ44nziQId^g|7)|xHnH9)M;*Wz=iaCw!@%lkC z<&z6`FPYrS0DtNz#|`;OO$%Cdh$Yj9XM@pT%lm07rd;%_4$<)JTxFM};0RTbLzvCj zTS5!-wD@AKctc;CH6N>&v2_so2ZP+1sZ^8(&Y+=+;X#yt6l^X{)&+iYGnd_Ca!(om zJ^Qe~rX4bww75NIX|mf`_N@m^ax=PRO|;O^}>?3d?Ct(QR(j3$pf#u|zaR3H(b(J*xK z^H;+Iezr4zNdlsujp7gJ&e^-S3LyUbWJltmi z0RZ6t>1IOkf1nIgJ6GF(7^Nv{8+IFF2tW7&<^)_iK&JIHOAEW0PB#1uDcuQXkRceA zGZBr{J!31yl<=h2-Aqz3aUXzEUt-F3hLgGR2^7Yqo!zP1kzt3xXM3hulAXaUD$xoS zP89JUU5JUaqUNMQB@@do1rL>|RPFjg2)#wsA{1Y6uRs=X&^oG%Un!J#cZkr1pe49= zl!}m+#yAe3zA_E21ZX+kA04O^qKl%97vnP`5MaD>B(GBtQRqne)0Hz8xZwMM*EJ6H zr;SJ zX2CnoDGzrsuK99kwc1TNj3TOxrs=W6<(vpt0r6~kUf^ekpB!D+XBy!z~TZ9?@ zsjK4~gsm;n1y3F+12^zrYc3cw`)kR5pP+LOh+na#gLDLvqNBUkmKgkj$uz`fRhmH_ zU z_46iwBnbwokoY`>L^)yOx(|g>pY4-$-UvZYu&*DzbJ;y>IUQD3Sb)uq^PzRkYFR{D zD?Qtgm;00~{4J^94D58M-fa_wov$aGW_HPn?*H0(L8Sf`1 z+U99*0Cb-5qFfznS#Lj%5p=?Y5AKKbjL0S>MEQ=f^=)N%Rs%WeG1%MX>sh9{n`jPd z#ibh9QWLfTJ1Z`{3Y9C0MNrgkp~GuBIG3-9`R+ptY!sS=+C3PMOAc3N*Bkh&&%=K9 zN>O(u$nzLO=g!yrUCQp_K5^4y;77UEq!r|bLoEttN5{6gC4QA# zeuDoiv;I%SsW-H>YyF|&4+8-Jkp5p0$HUmv!NtI^6hgU=kCDrbY_78%tAb{-^?f% zdxJon`2YYooJ`tjGRhdSZVcXdH7mG#w+S`gOWPQ>bHalOtKTr=fDAoPo3ia^8d8~I zh@?JC)_0LWIwmX+1*lK54p1bRIZlP7MMQ%#(mX{rfN|3$na&1LgQjEuy4YI_jHtHz z5<|G!IcTVvDpOUL;W#MRjrKMu>`7D6@iNqRo)V>Y!prf&oV2gFK@Bm%qtql*J_Ixp zh=Joomp<0X0d=u}*L6Bbgwr4;&c|VOeA8dW&XxP>3%EhbQsfD1nlsSaMR(3?&66?} zw@@5d`r$KOSwdsQk+bJfaWzrGqw4bp=lKZ3KXK*d=U46n8f_&#Q zug}vlz`1W1>Duq}`a~DuExF(#JLF!cIj*71!-fA$xvDux@PVf_f3QXt`=#Yd=uaaE zRqQjuHO~L9ohReoRsj@IjG6nnYvy490EqsNowu}guy=C#XWNU_Y~}Yk5Wcqc8KLk~ z&!nI_!7I;Coz3Cx5+TlgsU_U7k`YV83VFGT%=|I|JzE z&VWXhLIqKVbb9uQvV`R7jccI&Dj;db29E*OiLxL&#fY?#0i_7$=8U+RxMqdB!RH<*ncpU7#VBS=WuVSq$NlP|DCoJnnfDZ(>_EY=V{5M6HR8SNOv zvb)x{ODu{Yi|~7Y!2}gtBqjLKyh3H$vempXLsqJR4c(EuJfLCgzQbtJ*K9>X-no;nX*4=?y-rf+t+^$`-8-%_jW*n~1K4)JHn0)RNeXl_Fll=u$j!*NK zt4Dfm0Y+617~h$Yqd1ZbIbz)N?APQP#PplU_({tlGMa(5hBgAmkn~Bk`)%?%epadPffOfk z0foV`qH?*42e=zn8N6PF$sHg#;^)q z-KRi_Vy0V`+IJAQZE|}$hkyAhQd=3fb^9br_o}Mus4Cf08x|AUqa`8C6!)l3Kak*{ zDzy|TYJbuwv55BC0}UirsNsl5Y0R%>jd;#f854lNlpHW;Llekq>h<}$ym)xHAOD6j zQXWMGk8_2mRnMvn1TR}rTRl{b^QRm&3~Q~JET!>YX(eL-b&A|lw=Rsr0ue9BsL|+} zc9ci6lmgMC%95hf>f^chNm>1ya!_ZsK9qm}_RH{@>;g7-)6fIyYkkea5vCy%54&%{ zH_)6PSPx#8T%gSRd-s8lfr9uQ;QNu-hxo#Cgc1NhROW-@fwP zKIfpNy(ymU#e%Wl+RF{26l991^`m49dU7~^dw$nEQt3(MbnOsEuZQSWyF?=IxFvwg*L~PuywNN z+6N2!`^c=p87eVH!y`fsf-iFVq#^(Z0zFjNWoVA3}V^~rp` zW-$DU*mR&iFf|4T0KfQHZ}<^~iK$l`z=E3;FiD{72#!gCdP>S@901EVD-xL#h!!{P z)|q%A#{)`pg=%PtS%|v|bp4`%&9wXNY^zik#*1ps1DcJ1%cCUM0H;jwrtHvfFG(To z;&{T5VgO@$scOoXT!Cd0a+9kiO~3;L zEz99P`oIu$rUwXO2&2I7T4RXcrgPvG9=zWX29Dl*@#&Dv*}_bCr_-XdDp6 zVuBr?2roX+w#%?3zRrZ=sUU9#wRH182r*{Tt2sowlPXgCGv7y7opI*eqF7_{8$y&h z6q8!oynTfbWugJ8RidQWJ5-f2D#(NZ_~F=7FO<`QELXi(aUuo9l=;uF%go{6;qj;0 zS^MeBd<8-Bjva1b6vd=kJBBWUZn`D;$=|>2+xFUIZ*C-Zx2-9_ts&>uZJ9tYx=|dU zdJvMpK;qJc)lC7q&BBB`_px{2s*A@6W3kJ=X77wngC{`MQ&*r$2uzfGvoW(N9fZSA zu8bYuA`1(I`)mg|)T|9vfWIEzs@z*3DP}vQ3KuRgz~~q-kn3?Jk@GzEv?2p`3;TUI zp^vGtwzz?GUVFZ$L$3zZ?Bu?T9)%*m2Hb>EapZC|mKB{6Cj3i^)*#`)r|V4HY9hZ2 zxVaqvxHHs+#5da?HAlUR3y@#JOozl7PRB7bpg(p08zT7U2N3bAZs`BPg0#T@2@$wE z89F$aI{i;oW}@Ife?Z9fE2@aoTEOIKXii5`R!MsOG^UZe;E&##d%#Ar_@w>oxe(Xg zn!%vY2T_EN-`8gzs%0A9fOtMe<3|S%C=d|4OchGH*_;E zw;^vHCVts6fDrhc!Hky;1N?!GQwvdGs{j|zvl2S3$M*DQqT#G2Y>V_3rJyT@lqorR zA*w>>%6M%S_^X7L>If#;5}d^$-O%iTlk{=_MHpu6!!cx;d%?7dZ_p`kpTx6P+R*un z!}mv{;?)V3Ty}ur^TcuoU4&^o%;>LIbEV zUg)8;;_!TX_Q~J9UV|7K>pMy>r~jQC`+Z3c@&-O{Ut9irW>`Aw-y+~YG@B*Q3ew~s znhp66&4&DcLBjt&07mxqEeuS~7zm zR&_hrXv4q$<{jnH-?HG(GuAW>JZMs>;&pqs>eUL@q`*d5-O@1!-))sfHQrBT-Q(xg zZ98R|3rVR(@SisYmBV9?NS`3zgIi?WvQkrxpr`a@Qc3K%rck|58Z@L=vetsMk(8^b zz9~>oXjZ$`c}f7d|5!rF+pP{yrRU^s>TH7S-X0pgO)Om|`*ODT@U>^_W$)BgTD!Tq zzU~=$Il4xa#$6oTxj)rLO~3;pJD?7QA2$$v=9@^_p+LS~h(SmPdK#i67Kbl*Vhz^# zDBgz{ME#Xid0UlMtX7f?qs-R>qSNe14e?LVibWu%A1ASOc`O@ z=$2>?U?Q(^5}~ zLR}GXXuz8H)2@#%HUHJF+N>>y(vBa1fCBS%$RFj>;0a?xTprGCQFL(#rx*whe2W zwX7+Ml3OEHbBR`?lsg!DZbs-IC*a_mI_jz7c>o|^v&My+V>d*d#YF00%bE(JB(W5+ z3kl{oKSJl>CHK z&x7s0XYloSuiZiFgR0qczi?tC9no-DMbjXDNP75F1z!HVh3t#>T54{GGl4x8&t)oj zweE$wu@Dv`V6G=Ju|pASE|}Eurize4&h#^NRC&jX-rFj)V^vj^?H4xx$tc(D^3~z7 zRdKT+Oz@f^u3 z39vfd2OhQofPCgc3g&QVTiCioz-!qQ@~~HyWH$KjwyptvX=I&p3ppK{ZK`NTnl@Nm ziyyP!XEKa0dfu0h!j&J3&Wei0o>K((oeTvS!lSbafLKrZS5Blfw>gXH-Fr=ONzy^? z=3KdgySUe2J8+N=GU`F{KyYv;xgvGj6q~4=$!JJ3M6m%dM>~ zWG=z~*OIB~!HK_CI1oag3Z6Pdeal_n?M*CPG2E;qKe{5QnrNZZ`fl@d`EqwrSo2iP zpPEpw=?x86g{1ilOCwn~F^tW#v*;||lO_(Lqq66cl^X;W*bD{H_WqB_WziJUFKE1+ zYXB=NBs`A~i#Hb(ofjih4I|Tj*AaBm=6ftXZJg^Zyasxdw`wO0I({ENMsijKq(uzD zmTW;q9{=TbXA|llG5+M_uDaSHUCVuipI4w02a(%?1I`L+93v9^yLOnMNrz+Z3QtrK zLWH}+OW}1q>>pOPrmk+>VBAERixICr!d0rRq`^;hj1bxNoX~VRB#wS>e1>ZWd}J6K zXZ?<+jD`{tx?4 zN%$;z%_R1Zx-9CJItqt9!a8Thx`A$W#1~w~mOYGPg{|WjLnq?beKq36epty|Mwz@Dh4&K=B_kIWJFGO)1d9Tn2*8 zNv0q?z3$S-@OU(LNm(Q7>ML%mWNEc*fF7*%%a%6~g<6#; zlIy}lu;-_TMcjv)P6s;(my&bX{x??6Js%Hu?W9>OM@QitqV#cq#AgSsJ2e9AIh&nw z{{HgbnMf@z##MFrd6=U6%xqqEkB)&oC#=ko*!|_ksOxi^aT93U=2>?`4*FsZqB%#* zVb)V-&5(oX+f<8}W=ws?gvAqb zR~UObDs47GnXwx(S1yR-jQ34xQHftM0j;iPH{_OKMCT<+=PWJ0#+mP9KaG9cgxFS` zAW0x%> z^hgU=QmAan1Jjn@H?XmDU&gVe?jT%*6;WwGHo-b3N$625EJ2k(=l622?8#t8w+$8^ zRBmg%sysE$?3BRfH`MRdEUu`2a=U|V*^IIDlu=JgRq{$}C9OC-(RtN?sqgrOzuhxr zr~c~}>N`~jn)A}lJGHCN>=Me=&YlKS!_{`RryTzR;}iW~Py2s>Eg(XVe$@}Kz5gMP z|L-jKe*;@vSDT*{x2ubT>pym;cdu-B(6|*6wp| zweX>Y3rM!e2^6%Fq>lF2Ur$+tl#0nIC2e8A)9?`{aAEiRS@sfM)aQ{0uFeEqR#xSb z{Tjcn8);)tga?OfusadMg1d@MCD}=($9CH1Azz#4y3m?r#*SMk*KCf0v9(Z0jpMkM znkS!|_JXOLtRLlIIxI!P*Az;OwC;3V2d}5jg9X;D^qi(N>7}wtcuYE`?>cBSW7Ve3 z8iQ;lTa3AoRZMhHc+9D|mSoDbDy*Hd{Mh0cIlUKpb!H%ZB^6RoRj&2OEREa{W% z28@pZkzXlC>>x|Ap#<3xinz$83s@l6+=@2G04jy71FR2OshmzyybfJu6dgj8Uikwx z0z$9>*v7|4FEw#})u1S&m# z%&R`+^#x!@+399J*O$~)V}Pc{0yN$ZJLi~NEcSg}aNr&tpvk$DStk&}wYJS2uAsm9&Md{tE!|CfYTogE&2<#1ckzjAT$eeL)}@a2O$k_x|o_>D2_4IvY%(qB+8F z5Gw6R-(ad>ccd7O5ToA^3ykv4aL0Jb2#B+;=!vqR_>37Sq3AHwu?W?HHqEx*CPlG1 z47DDmT=Oc>T7nXMtPdK2ff_Y}0cJOX1ayf-ZXDgkusgM?;u@1FTdNk5CYod*a>(P- z!ALmL>81LRJU>Gvm6EYb!qAhJ*?lcmx?a#Z*G3Qt%<4Xl^ zvcsEVToGdqu#upyBk;>>i;{hgIRl5JZXzcJpX$!UK4Dcsvft{)Jvlnvz|dekhw}sg z(D6~{NaimMjcGhrWL!KN^b&SEHUp!Qwwdv9?@(N^5Kx`4j;q0S`Ra-h$K@6=0}w+! zJ+VLupF!0e&#^rJiN$zWP^Eyb^~aACoI%6U&>_6iKAqYQR3_!TClAjcd5u#`NY^vz za(-1=DC&E8ElAPbqfRZzaI!rq5x&`sZJa=vDeFv3$O>8@xPrRRV;t`lj2$D)WPZb7 z3kv&rgsaHHR2oEi7&H`>x2}2LD3}{{vrRx45irb!*TgsJnD)g-gWE}X3az_;fD_g2 zS+Kw15{)DTPKaxlA3>B>AinN*f1K)g#B?J7lH71H*Dt2Q=Zs>i!dxE6G(OLTCB#D%{o{U zSTi(=0b3`5ZRKhju1D_%_BMMoITYChunhFQ?R-k&j)`q1w?fkk>a3b4 zeSs{Sz&_Y54#md4SIm_vtddF-ZHsU1LH9adv$zbSwtY4jvwmIzj1P=FM{OM}6iSF{ zgSu)p6}&MpDrGUbsRvuU zCl_%(ulubYl1kAM7V92BL2}MbYYdMBMPN}@J^nkr5;&sYdV7T-Duy~Dn&a$k{9@3%9 zBtH-~Huth+ot-QE;#LYJUlaI?ExueBBR8*!R1xqC5l3bUbIM7yNoRUtA~?Gisey;v z9iE@$;KHgoa&UZX--?Bu5PeJL{szlT4{s_N{f~$#qpOWmsF=f5M|v)LMhdd|r-0ch zOhzfco^B5i=$G%;8HGfoUp{Id9C!*c*{PJ1vigOIC`!rG^i#xso^Yp9x!Fd$ z@qg=&i#>r%iqz$0(`OWKQV>6Gk?=2>?$v9zg!Y>1sj-nuqqnacN}}dj%u0x{x`%R* zAF6$I(X1Rf1Ix-ub3@g`Y8ySz`d#7trjNz1Pb}ek({65&`X4;KTwNc9FN^}w*`G{` z8u0WtHxX#YZ99^-?*`A zxvkXC0r1ybhGDiaN)E@00>2DZCYZq!C|v0attXKhBZgJ3Ty86o!yiOs+hM`rO{|T*G(hh-2q@ z;RdORaH%V^WfYcvJt;B9Ks0FE*3E>QPe>?cGUral*Y*TDzL#>%XaOv(Yty@+M*Yd( ziU#-FxJ+MuWUdpr>M)?-2n4?dTt;4zBN7SG}`F@5m0?pp{-A+zVO1FGG zw}>i1hnJS}^%DPe&G@Irny%0luBotgXd%kz&gqiLcyZf3 zf+z|oa#B72FA~?4apiTMm^EZA6o!;n)X5~2E-b47H`JS%>n9iX4vv%OW)Ex2E93Wf z1ZR5tElekwQOz|(t{?e?#nYl#9y1FZ#5h(${PEwA_Lo0d0riQJQDQ7dH6l76a^e%& zxv}VXgx6P}j*>I4yrbZRe&=$lTdU9zmZ$*HLVWN$EGIic~H9 zqZF!L)YGI2*S)ExzEI?DVf?QwIoPVbyOJALe8mNDaFTzf+5%dA@r@KQofJ!Wv3M;| z#EyKopGoUK65@d0!&pRaJy(N0^9B4r z;cm?Y@0%Ss0Kho%f2w5tH{AV6;Tl{2SA*ed4Q=NQHZ;GLT8_#t1TcLmz$jTz~B9*snva_Dco}s-VJgPiFL@{k3F@KY?LtggOd|pDNNgGf?1@U>1 zPCAP1uCK%ECZJa9aEmfRg&czspo|h6cI63YSi?~UY?8=>iV4H2XRV#1fd#Q{Du0co zX6Ca9zvArtSYI;OLM${cbK!UUv~Sq~n~i~G=yc0e<$1k2dU?9~x<3LzX3_p$2&M*2 z{O&|RQq{c*I2)h`WC77lmOu1JWA``tPI(zNQpRCo)OC?)$bDk`LOPNNh!&gUM)*^sERtd7;njM-l>MqKhb5C&Q-W!PJ= z(yo_mk-BLgLKG@lFiVd_dPfrzUXZ8uHQgwGYSyjWh!HKxT=o)JWZkRh*v%mDOkaf!2OdYx;N)y_bqQ1AtC&YMesz zraX-zg~d{bml4WPjDg4V6)ujMxhkqqv@n{HrNvr9vp1Zv2fmGlR-H*r9gKHvSa)jh zW0Od`n_k)pp4isr0fYL!VDHMd?AM`ZDBI9FT|J%Cb(Q)%83fRMIsE>Ozz7Kb!k!mh zLFx72+^-aFrF3%{rQL*7in!N`ZowV!kA-J%-tU(gaX{(V76T93k3rmalF7DnDZn}g z9RG#O%^NR&7jJ;o-3A5YfSBE|^Q;Z|q__ot-Kk!m^y!)_zg!(X)m7OlKL!k#_lGVo z-=`A)uct@I`mf=v)+0{r9;|G5dbx4f&|@&WRs7uFIo+PzJR!%sbfLfchdsYPK2Y@h z4G!Pz5gl&MWU1+7_zLsjH<|^~)-f>E#q5qcWnGnzMv^(i_ZxNXsUU6N(%tQ+4uRM9 z2YeEHp1SRK6z{|I)^crPMR`JBvn%#|t|)Vd{8VLam|osP3}8YDyF9>GH2<8>s(`u? z)uO97u!>h$O` z__+~Y0Zq$tlv})*`46U7X%FP6^u|Ey@RKNz(b=bNDblhhs+4@mKuVg9;9-6?70*rDvIhrZ}gFwnp z3>pY%mPe8ruGWaMQK;5(EFV@Ftvzw!Ia6|`LLXw4J4QT0TW?XLM{15$F2dD-sI=yCJ(y6n~N z^yy#rxJFRcD}y~@K|di8)zF-+R$_yr+O~JVzwRzb`Ka#ICz$&ru(bfUnwhA>*uoyg zW;yzN;If)qu;ee9In7V<#g`}J^Ki9gp?Aw(8rd+vkk?<|7k>q@kCd(xceigKz)EthwBv>)v%P_$NSTfy4l zwuCL)&rUN%`^YE9RI)Yoh5|sf|l4xaGdS60rUlyTAg* z9k!7fwb;PKb^(iM546-P^EA!6x5n{D(dIWBbuHPHY)^ZuS$p@7;Xs1cQvYYcVbUV( zsXU73rQgI{UK828S9W;rq(;zBL*}{lMnpL7>a{}4Wa4F_E&+?4{2Yk$p6gO&<6^v= zSW{D0u{~P%V6{~7f@N8eQSGgI%M)~_YTU5DSAVlazl{aI6$l6}N^4~4I2-orEVew( z5&NYK>tB%nby?4;Y^&I{bM&+PRNX;-s3#Qvf6b5*{l$`EjMA9;0{psU z$H{SrVgXJG9NfBa{5j*CkZKFD%7Q0k@d=eKf~ZE4^f%-EWn5zv|EkvTTFtw2U6CF+ zGN(9dTtBlXTCMS9myEv7!O$pMHcuZ6y-e>d;MWT9fKBL3kGUx=kO=aeOl>TunkZt} z2o^1NwdewynjW*dyQwCcc5z($&|jj*4BaEV>8DcR2##$id>+U{anCcFY}7PCc)Bp) zfPeE1{b%J*EwBP~epdXS@xQWy|6%3TB}7GKpl2jxr{-X#X{n|sXBw3l7MXXQ6EW=XJN==Q+G$>I}Qpp}dNy@Y; zQIxSPPfg57FUd|;9{dFY_@4&@%HJ;y;b&@aKZEvXI2f9k*qGXy{^$#ho&Ur1TK0s#2n{10lz|L|!gQDJ!{kz()B z)XK3I+i$-p7rJd0J>WP52*N#)ut4N(2zdSPvGrpW%HqxT`>vIWO;)E#Nf8?62dBasY=G&KjfM1Y3fvUlwe`x83NSL5xbeA$LfM$L zOl}N7LGV7Mg4F? z4oKdDuxAC~LGZIzkST+WgG6Uv&9omG{Ah@W>50i{JdP(*CGNjO3VeiQ)7f?c0U)|- zzE=Ey160moM}HyC^FjYDD&pbQtd3Z+2F2hdGH9tX9g@YHW(YM*ZP2NgX|hdE)m(-^pz+*~Wu`?~aI*U))T~3A@L}+)-_fERyH;ASY0z+z&M`Kn;I%6-^Ei{%W zalu=f6qL?FhZL?~o^k#LA;XiGST8~6;^n{-YHC>e@%8DFdAJ|kyQ(_kc1wLEY})E z^Do60+9&^fzn}{Qh}yiDriWie{C~p}N|I14UqgnT1-y??Xx(EE%Qh=vk;LRs;(YP2Rv%Vw@Y4e#UkS+)?ygpCRf#@MZm!VV1fs$ZA z7(lX=i>n?J^&6M#PP8B%k$)Ua0r4xfnB|*d7XGNUFuXi|=wXH))8ta7-GYc-J@W!K zQ{r(p^ZjO7s|VGh1k{G2wdCM2gXXv}c%j|duG@IS3xPnAEwzrPXc=2h1EhS4}RfG}) zwo-M7eh5X}WVB2X*S}`K@Q@urC*!ZKp!f(I+w!}K{2M_pwn_?2I)1_RwU?-t_@|%) z->Pf>`O>s(w5zCWh~5KzH4q}Q2laF&7~AWt)&KKC%vPJXtu zIA6rIDYmOITS6{HLHiC7e)(55g@C_?mIArr;gQ~ghdhp)wpRv)kvjo^IXYOxB6VIn zCzRo4HZz&raDSkx7rt6pb*i90M_FFY)u8^nn%4&Xq`XM~uW>umnR`@J^adX&dUa-# z$K9icB<1W^>;7{y-;jc+24P5m@7$BCH^uKRIM)h{3~g~(K@H10H3{0$aaA0cqdblt zBlsdKDCy2OlU7{l@NdILq*|H8u}AUULHegPR%@bOku^#IkS-dQl@NH!e10=2z%9mIMh{!&kmdp_~xHs z^fOVm?kz*ng){HS8YwPpYNd!t71Z*p2&HGN2(-3G^!u(k!eY2hm>cf!hs0LjbwRYT zZzP#q>}4BlWrgp~3r%Z;3vEiM!-q!}Z77wrSJ*nyYOop;vznPuO9zq+pe;!PUkU>~ z%cKx}b>7Q;C1W-NW-TVyg+K2eRTkBL( znd2gqrGpUl)0LbT21oIBNA>xP~a*c*&B}`;R3gNw}Qm zTILlC+`Dx`4PYv5WWZu@GP;2Td=Rd*!dz_Gaetp^((`QwXpLsUxX>ZH!X% zT+n8tu*}O}W_GS=j`>-5?aDH5hMm!uU!dwgJ}E;nIJ;)DdiQ;7Hq}|Xj?s;nrYr;e zJ@m~Yd@J9h-JWvC2uAJ&zb^QD5d)>nJ+wo66|ZTarS6`G#r7 z8+b;KUM82|!+>l8PH12HF?6ea+qv#$S?Yd-B9($#Mt!!X(#E9%Sk_OF&Dsy)%3bc*zRkq<_ASH|NKX`cmO`cRy>mj7*0`elymCKE(dS{?6lo8YylKsSYoy zc=uwRD%%{1O0_zi!am2RBzEbfb&u(i0K&66OdrW7vH7b5LJj~lu!Q+9E$zHYKiP8L zU5bnW0Po~Y$$eauTgEc}_5m`Ih4>IpIF)My_F$SPV!iz=&Y4SsATyTNNP{NQf$T9) z|F@>p{?Rtzsf2ws<^r71m1twK7kDDTovfWdb)7WosqhFpt@M>IQ=k@I z-GBkFrE#`Xe~`oRU${!))}}1!O`kh|!Ta`t(ov5VS_W+cw0wbMEqd?9#(aY=4M`W7 z4g)a_KEB{Gs|$gf4VDjj7xwD%)l0clRyk|jH9#yVXAN(2s+qjSJ0R1HEMw3@iIu5r z0ESnpHouv-91^|)E?9>7bR;*$M_sS{dN(Ftc4=J5P8TSgd2xPgV>$Jf>F}?QiypO6 zBV~_wP3}<8QShzucDyItwBN{8Th8W$UNC_*G#E~2QyXxA_4ZzqGa*bzK(11Or6eP5 zWOP8Qz>61nTg{)7GZq6{mz(A44+jC1mbw+tH!>+diJVev^7np zr{h7TVYXhBm? za}>DgnJhcGUP$M4G*fs;o-z-&KcdR16AmiFaifs>;YIc?+)mMmd!l#doX2b$&Ce_L zdSPM@`2;#ueFEeJZqMWx17$G>2mT)2b5G=B*Mc>F3*VO%zobxVJRX8L<%5oc=#v~j z+=X?t*k-n05CM6jI`x--i|OEpQLE?&A3}t{?h~xwcL)QCoSca4j2^q=S*U`gS9dVD z;47iwuQl;UdDA#uv%Zd5VJ7?AucR<6B?Vq)?UdtMU@B>oj8qZqwqNZs__9HB`MmgB z8)PM;Wa{lQ54tK~$un=2g3$k~ngV}#ZxKdUK2$T&NxZq?)d+7glcINR=cV8SI zREUU*$dZyIC0p6EBw4e!h%p#UmSOB$nuFu<)f zazKJ@O#i&6(TTKxWP>9r56m){7-Y0+uGqW@Lk0BXKz?&;-1oH0bGVF+e<%q?hl*x8zMYB z#C+#y`DIh1xDVs&F^4rJKlNSTx$Y4h%Q2qVq0hN0p(c>+7RFmoU}&2D&5g>)_7^#< z$0AA&KcAGkKUm&Xaf--7ARK?$UDzfXq-pH@s7JZdj6W4q8Esgnxc>FnR?&`QiFuVv zwP#lLCm4H$9Ekba{Q8S^r=rl%4y}H(=M5W@KF+KgTJ&|R$=BzZ!J^x+Z_PhFwHFOz zW1VEWH!&U3wM+g%+?V}F^9ov4=&vcanCUz6UNiPlRK>{wshkq+UcTMw15JhX89_X$ z5zF}O4Y*fIx11QYf9Lis!Nae6a+Td+tR$}M!LyF!76wKX9%X3s5`d5bd5IG?f*Gr#-M$4hdwHo@)MuILTb#(l8BoZ#|V-pYMs z`D*bi!c$5F zdM(EoPKCyNaKrSxs(H+AG~~Tyh@m1Ws$>5H&F6h@pM>^Bo)fyK{>n$>ys)wEWs~iZ zUaTBuQobV2Zxxv~Jkc)S*kG?aF?enwTamx7SHSST;$1~?k1I0MMnbC0cHFm!PbYn| z_N$)0@{+4$^|$OWm+KpsOm+>?xj+3NEO+uniQ>7(acfRJN9B}DcYaR1!^8E4(^+=0 z2JxDv<3v`>unC9NQH2TzLy46iVox}EFuvKgQNMQE4b0|mTib0kC27;LewzcIdVh3&HLc$%)3I!W z@yA8dF;N*M2gjATyV<5z_Z72TUw_EtNFk_H1C)+$+sc(;L60JTc0CGan(+6%s<+i3 zafn&OyI7U|Twa|}kg?YJq(PHfAv*O^VJ^=R_T; z*PU`4d{R}vW^InF9Gi7vmVFcFWno_m*TuA{g2dVEUjJ08_DwgysOE9mkT8}h3t&9r=JUK7Zn%)Z;^WgaWt z=I1u+SkNIk3A{JGdNUuG>{7GP8t>J>JHLNh#->q>S09iV%@t6I^yw8Z__(oGQo}30 z;;HJ@i}JD`l!ErZgz=;}y%hXF&%_=~}WYgZ#d-@8|b? zO(_2QQQm^cs@xRmZ@F#5MdyR}YA4^{V4Q4Q-Y*fe$sg`qO5^9RQ%g|3(%#JT`MPcA zo|S6E&_wzb-m9CtK7DU}IqgWmD~?*|268s(9VKK4n7!w_yCHPVFi+7j$?KwQ{vFj} z@}rmUHAQ5t?`jknW*NV#f<6_kd(-0h8ZjTs!cX6ApVsWK9aPV{VY9jIg>}F=fknNZ z{*M{dVtkj4p)d9~3|xxK^RiZx;8_$E?Vwg5BhpTMp)Mr#W0L|}kg4FLn$APChx=l0 zIkqC#ZxWq9>MMD5UnRe6yXLi?F8<`%{^HEZPZz@06&DRpOlx~ZY~C6}pBE%PJN~SP z+rBpCWn)pN`!exn)QlzzeW><5`Uu(KOx{XabwSC8Z!a~44+@N$5o(JE^DA8_wMeOlVX>w zO#hN8o1LM4TE=ShJ%e7?k=m#1pWa_+zTNkuJrZ*{HNBp}c$>qWE331n89A7C+>Z)K z&Oa5)z^62F{hpR|2T^qBpyCn7l0w_Vds1bRBlo=ez`1S@vl!v97&8%%lPode%jG=w ze#D>g9DW~e=a^-8k4{s%&Qgc7$EDXrnk(Z(`V9EWq|Ge2ZPE2?;CYH}E@KagM z>T9?a&+h2@@MX;wn?Xkp`cK2~S7Z{wFgr{*3GlPW8S7Ylo!C7r%4hN~(2 zte#l-zT&p5I|X@5${rf=4iD*!K2Z6w!)mEY3I zEK9ez)?6XyEoj;xaTHXzz zQSY&zij8)r>oNk~xY1oq5wFEEj|U|2DMn>+?M-`paQJR(RV`-KSHtcv_9FqeO>bzf zzFOk08Q!^6YvXWCQ<>Fa!`(f8YpqR#7Axf6(Qs45@cC}x{~0_h`c=VlPg`aGo;@yG zYyCAd+-D^v8O~-jN90*3ES=`s(E6n{ddHJtX_K6%@3De@sml`@W7OS~vP+W^of~#q z5hs0jp1pD9d20IOH;RjbA07J9z|1pNz*Oe^P-H^JIBQCPxKB0X{2H73fcOY;g8~UA zKZ!^ByNmH56M~=77UPkO2X9LA3(4P2d){-f8s&Z9-aetF-@hyPjpH6q)s@JGh8{l7 zzpMM6Wq(NjMEYn>-TOAFnt;sH@3EFylkQ9xhY|(p<)yAC5_gpe^%?KP^Ij}#ck12w zbfRNtfuQ+7cKt)8^dQ4`)8C03Go~^Hw@31YO7ey<$(RtEx)VNbns$`6NGirpsG*|+ zF*#D$;6Y8>k>+ao{1%N$kv4Z;g~G>|y26e%Ur8U7a$qXsdiNngbt=5)%iF+haf=`D zn7kZ{`n-5Cbwz;bFz?q&l8*PVnyP2wfD51T3dZW;o|5z{)f|bZj()A zCHcDj+j8U2uXEwXCyb4YjcvYk@5-T~``H%sg>o^sEe6JpysT8~QN5gMY>CrcU2SkO zL$hZ2Fvqi<@)vfj*IJyxw?4+5JO6lOQ(ff4Px0HwiXvEVNw1l%@@(KMFOzF3DQj6i z>vJSs2y0@J+LFtzF?eD&UQ5;dPLskgd!g~T%(G^mk7~+JpN|}$vPrVL_(Xf=r3e@5 zM&QYjSeAX!Hbai0u_~FKn~Y@6WrmtnBs84}YpkEClHXv^ZPF^#a4upKeZn_w)11$% z)!jv2eL1=5)^uxrO_=R^#YKKjH_`8IjPS6!E4x(7NUoi+E-P3kW*#)qCS-0Q<8WI4 zu-eVFUGLa}f7+hwun-=IDwYh^e02K>*GO*aZHuk4MIT)Z%@2N4u|FwPsmrbW9^pGSZHqQWmagyO>*El7QNC)T%2z2>8d7i zUYl8+ly=vGy}{Vy;nk(>D!p1Ig*7@%fl7+R;GfS0@sjV}bns4PA2|4$cp_r^dUi$! zR+&pj`3z3yZrja6z}~Zy>u2J+6p;SRM|5`l^QhqWG~TL@%i^Pgyo>x3I?~T=u5Ml~ zEZ93^ezYbtng!Fw+_r7p?@qPg-pb6z=JFNae&8FwpFVPax5zF*V~*(VQ=$IirK=P( zs=V%D8g-TR_N`Q!RCeP}<)7UyFw3a4CE?+WXY-myC;3lT4erI&zei=e1|J++(abAr z9ww}z8sGQo)Rx<i&pQ~>p0+96F`u02w>MW9!uy#~WT6BaytO$Aec^L`!59;Qdv2i^V5yR#oXg_pGvt z7C$`weFtWA$JBV2+U%_uN45IQAV#OQ0!(!4>n~=8H}y>YoPxH6+X&X~|h-3%h)d-_MZ5 zyT?j&cgVZw*MeEo58WdiPlpGOdHnRzP(n?o>-A_#)pI0o)lL3%H;y}K;OL=s7Ps49 z8LTL1xYhpHK&q%S-|e(N#^=$`$tQ-obo473R>B?{6N>ah=VqrD&bF}KS`XKsz%JO_ zmqH}{6S2<*Z_r|F;3olSH2fmqPmZaEl<6ZKouf>^Fj@BR1Ma^-DZ~VzlXGycxW6&< za_y$y-kIGNgI_0HRzsoqelg&03h^Dh4v)lO92^K(2Mp;Z9x{VSzjJ695niFnfviY7}fHlP$~f^6ufae_oWc?0gNR5yCq3x z4$03JJ1@K3fZG)6;ffigIYkQO5D%=UmmAs>k9Q_rHS?!AB+|%?+qH7oB)(!j3CR5dsrC&P(Ct@%ZQzbV9P>Xcz_SPyqHt4N?N z^^vH>09pplMaB`2&O9I->6azs2DPoWcUuNcZ3bJt;PbgJg=lU>nd5;&d)N^up!z@Z zPY;00UqDDAQCIiPk8-7i+FL2ppafQ>fmO)bm08Y%qR6Rh-jaD9uFe%%Mec4nyLmXC zcpO>XNI~GTdb;v1bSzG|9glQU;@6i#T0z{k*;h*?GV+jO2r9QBcX>+p$^zc=XDmEkPz?eY|k9WknqMhKD1k#fv*qiwMN zJfcEw@#my4nI70S|P62eu%*YTZc%=;REQhd$$g zrCj~x0mrhM0DTV7$S_8Kq=JSZ@N~rbc%W_Z1cEIdYfr&)KZ#DJQqTpAisWqP8A^s+k3hGBkeHG^9_BZL5eTFGL!-IYYsRT=>sM4 z1&AJ4XM41V7hDG9jq~{Dp8G#@1!<&aJy1*c0_56PkZWQHQ^U3`0H&(k>Q}Nk#(@h~ zf`5^DEkTMZcODT}n4UED2^?^3EqUhgmZQ$3sOKU-qSMxZU(2CWh$5<=tWFgq)N>I)`S*80U@up=mUWC4l?XfI#^MOe*l z?`{OK+X5+mE20K^PXD0*isFz-i*0X4dj@`6a61bT2SM4XlU=9 zg>ID%*J;7PJNKm!W8(fx8C2z7fK1Ih6jX*4Dg&d=+?PV^Po%vdI~+x2c1+CPwFC0) zb`5fAvOSe{_`I^xz`|j(A#@8yC<5suyIk7wG$_p0KC|>5h^_O! ztW}A<1v&2(OmdOETeX6AI%VA%Xb`>TJGkgAG_TxT+N7#%bU?7kH zQH1P!$KTLi3N32%!T9kOV`y$J2l6I&Y^J>=8ic9#D!(ZqNQ=p`C={%%&3!4v6@CAI zLGxRrw6`JbHjGi^rb#M zMKA=WLr+JRv3)5$na2J@V3LioFvXik#FCVu;icWqnV<~2U@S)}!##Wpl!0@B)rN(n zY}P~}PccNVE<`U<%Bl)bG#6?C>ewp+<0Q6(~gH=oE;WJ zX({}Bt=Wq@@WprNkm7T#MoO9|RB2&5Vx391osxN)6cGYB2S?Mu>f*mbEpZnuWM^;& zhSFkF`)E*C7l66{a!0f_EnpXnt0OE1V9DJ<3Xj#LEAn-r&t*XMXb5ebGloEUVy$)D-JccACvD`w@0RAi6r!;? zZKYs+-JCJteLJ)_6f)0B)T~c|PcQ_IFmzDZrI`Cthz}0YR*VZoB*~=av)l5A%~GxW#?PX45gn`sD`SEE+zNi0qlP$(!<>g zLjWhvSByl7s^W7fMg)pcM${}2_kR@QjCYu~DATgQv?*{R2gnIz21#_GttdC>iyjm= z(|T%_H`qh=aNvRLkElj3fi^ZlibDD@d3&cb55o8gjI_v}crJi8ER5n90(5o?a0S+8 zvqJF4B&3kN2%F=M(Fpf*`FB4LjSOQN!mHrL%;9#Z*htR9V5TT`VjV89Moh|9MXmWwIe7kRa2ynf@W@LZxPoarBHxZx!uQ|xV_zhYV z2Q4azFk0^xO>n~RO(+U@{!(s>b71=;upKEfA~I;h&$q7?$XXs=3WmRfIVH?q=Drl7 z<9%AmI5(I}V4R^rU7g5`C-tPNTNVuY(33VoF`MQJAPqoAau+DV6YJ}_kQi__L>;z= zP0CU{`GR6lJ~gZjmOv@2O9zY^Nsq6xLMGmTuvDahD$*HCX)os}*E-gpAXge8Dnt;- zh<0j70^W<_hEq>pRX`-n(Zi(3b@Hpv)R-O^TLKnsL)qA$I+xEZhQyi(LxwQI*pwM+ zV4r_3`1!Nbk=p+0vHh+vt4H00wnv_)7+_!^o9a=WdqrIhS+*2RtA=!jJSgyEF=d!N zj`TY2pGJ{{b>B75-Swb`gkimGEuxw#+>|*u*iHNARVoU!3*`#pVeBvz!fXg>)!y}# zX-*Wa>WREX2!a012Em7n6n1IK9A`J2+rm8z zbsI)PQ#NuCQIj%gp6TN=9hb4aFhAG=VTKGbtS&i-wn@P#-zHHFpv6ENASa~b`U?-dWn(>M1*XKUg@p&+x&eJR9hzXgC4#rv?`5%)5v zyf-u;vYYHXwg77Z#jc-vhs7U=H(@Z^HD?>Cx|U%J5&wUgUaZ4X^b9&?ES!r#iWd2( z1xlnrn=_uKvtofHVhr^`Hl6N;zsWQR@pt;9mox%?-jJL7oIxYF2C|# z7%Z0s%aNYef+ZT{0o%EtkeK@y<T~SqK+Ma>ves7JCb#38|X``YCcK zNp+I`y?|FtQ>Y#tR1Z|*+?PU>{y~vOS(F=j zX3K=Y|I7%jhfJU1a7qW+a$cS|XPhSvi+GNUTu?W6)k%p%PqwxD_a4w+hQUz10MOPE zYwJX5;oJSacCseS{^KF6kdp>WIPEf@eRi0?juUR-Z#P||yWR0s^rgS|H8t7)~)k1e?6xCr}s!S@~3fs)AFPip#Aw!?w?3X zZ?67EX2G-H^Lg<13#@IPwSJe&b4&$Z>PG$S!cEhWtIT z-@G!Fd3ofoi~VUNX(St~epQd z!%?_v?brQ&V8wREvZxgxx0=WNq)oJZ=|smbtK}xIdU}l4RgQHIdmB`p#QyZ X&QO=M=mGK!{>y_iS?PW-FGKwg0hu`% diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO b/EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO deleted file mode 100644 index 8499b0025..000000000 --- a/EE/paddlemetric/src/paddle_extension.egg-info/PKG-INFO +++ /dev/null @@ -1,22 +0,0 @@ -Metadata-Version: 2.1 -Name: paddle-extension -Version: 1.0.0b0 -Summary: Metrics library for paddle, porting from torch metrics. -Home-page: UNKNOWN -Author: Mingming Sun -Author-email: sunmingming01@baidu.com -License: Apache -Keywords: Deep Learning,Paddlepaddle -Platform: UNKNOWN -Description-Content-Type: text/markdown - -# Paddle Metrics - -Metrics library for paddle, porting from torch metrics -## Install - -pip install http://public.bcc-bdbl.baidu.com:8000/Package/paddle_extension-1.0.0b0-py3-none-any.whl - -## Document - - diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt b/EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt deleted file mode 100644 index d359b6fb1..000000000 --- a/EE/paddlemetric/src/paddle_extension.egg-info/SOURCES.txt +++ /dev/null @@ -1,152 +0,0 @@ -README.md -setup.py -paddle_extension.egg-info/PKG-INFO -paddle_extension.egg-info/SOURCES.txt -paddle_extension.egg-info/dependency_links.txt -paddle_extension.egg-info/top_level.txt -paddlemetrics/__about__.py -paddlemetrics/__init__.py -paddlemetrics/aggregation.py -paddlemetrics/collections.py -paddlemetrics/metric.py -paddlemetrics/setup_tools.py -paddlemetrics/audio/__init__.py -paddlemetrics/audio/pesq.py -paddlemetrics/audio/pit.py -paddlemetrics/audio/si_sdr.py -paddlemetrics/audio/si_snr.py -paddlemetrics/audio/snr.py -paddlemetrics/audio/stoi.py -paddlemetrics/classification/__init__.py -paddlemetrics/classification/accuracy.py -paddlemetrics/classification/auc.py -paddlemetrics/classification/auroc.py -paddlemetrics/classification/average_precision.py -paddlemetrics/classification/binned_precision_recall.py -paddlemetrics/classification/calibration_error.py -paddlemetrics/classification/cohen_kappa.py -paddlemetrics/classification/confusion_matrix.py -paddlemetrics/classification/f_beta.py -paddlemetrics/classification/hamming_distance.py -paddlemetrics/classification/hinge.py -paddlemetrics/classification/iou.py -paddlemetrics/classification/kl_divergence.py -paddlemetrics/classification/matthews_corrcoef.py -paddlemetrics/classification/precision_recall.py -paddlemetrics/classification/precision_recall_curve.py -paddlemetrics/classification/roc.py -paddlemetrics/classification/specificity.py -paddlemetrics/classification/stat_scores.py -paddlemetrics/functional/__init__.py -paddlemetrics/functional/self_supervised.py -paddlemetrics/functional/audio/__init__.py -paddlemetrics/functional/audio/pesq.py -paddlemetrics/functional/audio/pit.py -paddlemetrics/functional/audio/si_sdr.py -paddlemetrics/functional/audio/si_snr.py -paddlemetrics/functional/audio/snr.py -paddlemetrics/functional/audio/stoi.py -paddlemetrics/functional/classification/__init__.py -paddlemetrics/functional/classification/accuracy.py -paddlemetrics/functional/classification/auc.py -paddlemetrics/functional/classification/auroc.py -paddlemetrics/functional/classification/average_precision.py -paddlemetrics/functional/classification/calibration_error.py -paddlemetrics/functional/classification/cohen_kappa.py -paddlemetrics/functional/classification/confusion_matrix.py -paddlemetrics/functional/classification/dice.py -paddlemetrics/functional/classification/f_beta.py -paddlemetrics/functional/classification/hamming_distance.py -paddlemetrics/functional/classification/hinge.py -paddlemetrics/functional/classification/iou.py -paddlemetrics/functional/classification/kl_divergence.py -paddlemetrics/functional/classification/matthews_corrcoef.py -paddlemetrics/functional/classification/precision_recall.py -paddlemetrics/functional/classification/precision_recall_curve.py -paddlemetrics/functional/classification/roc.py -paddlemetrics/functional/classification/specificity.py -paddlemetrics/functional/classification/stat_scores.py -paddlemetrics/functional/image/__init__.py -paddlemetrics/functional/image/gradients.py -paddlemetrics/functional/image/psnr.py -paddlemetrics/functional/image/ssim.py -paddlemetrics/functional/pairwise/__init__.py -paddlemetrics/functional/pairwise/cosine.py -paddlemetrics/functional/pairwise/euclidean.py -paddlemetrics/functional/pairwise/helpers.py -paddlemetrics/functional/pairwise/linear.py -paddlemetrics/functional/pairwise/manhatten.py -paddlemetrics/functional/regression/__init__.py -paddlemetrics/functional/regression/cosine_similarity.py -paddlemetrics/functional/regression/explained_variance.py -paddlemetrics/functional/regression/mean_absolute_error.py -paddlemetrics/functional/regression/mean_absolute_percentage_error.py -paddlemetrics/functional/regression/mean_squared_error.py -paddlemetrics/functional/regression/mean_squared_log_error.py -paddlemetrics/functional/regression/pearson.py -paddlemetrics/functional/regression/r2.py -paddlemetrics/functional/regression/spearman.py -paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py -paddlemetrics/functional/regression/tweedie_deviance.py -paddlemetrics/functional/retrieval/__init__.py -paddlemetrics/functional/retrieval/average_precision.py -paddlemetrics/functional/retrieval/fall_out.py -paddlemetrics/functional/retrieval/hit_rate.py -paddlemetrics/functional/retrieval/ndcg.py -paddlemetrics/functional/retrieval/precision.py -paddlemetrics/functional/retrieval/r_precision.py -paddlemetrics/functional/retrieval/recall.py -paddlemetrics/functional/retrieval/reciprocal_rank.py -paddlemetrics/functional/text/__init__.py -paddlemetrics/functional/text/bert.py -paddlemetrics/functional/text/bleu.py -paddlemetrics/functional/text/rouge.py -paddlemetrics/functional/text/sacre_bleu.py -paddlemetrics/functional/text/wer.py -paddlemetrics/image/__init__.py -paddlemetrics/image/fid.py -paddlemetrics/image/inception.py -paddlemetrics/image/kid.py -paddlemetrics/image/lpip_similarity.py -paddlemetrics/image/psnr.py -paddlemetrics/image/ssim.py -paddlemetrics/regression/__init__.py -paddlemetrics/regression/cosine_similarity.py -paddlemetrics/regression/explained_variance.py -paddlemetrics/regression/mean_absolute_error.py -paddlemetrics/regression/mean_absolute_percentage_error.py -paddlemetrics/regression/mean_squared_error.py -paddlemetrics/regression/mean_squared_log_error.py -paddlemetrics/regression/pearson.py -paddlemetrics/regression/r2.py -paddlemetrics/regression/spearman.py -paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py -paddlemetrics/regression/tweedie_deviance.py -paddlemetrics/retrieval/__init__.py -paddlemetrics/retrieval/mean_average_precision.py -paddlemetrics/retrieval/mean_reciprocal_rank.py -paddlemetrics/retrieval/retrieval_fallout.py -paddlemetrics/retrieval/retrieval_hit_rate.py -paddlemetrics/retrieval/retrieval_metric.py -paddlemetrics/retrieval/retrieval_ndcg.py -paddlemetrics/retrieval/retrieval_precision.py -paddlemetrics/retrieval/retrieval_r_precision.py -paddlemetrics/retrieval/retrieval_recall.py -paddlemetrics/text/__init__.py -paddlemetrics/text/bert.py -paddlemetrics/text/bleu.py -paddlemetrics/text/rouge.py -paddlemetrics/text/sacre_bleu.py -paddlemetrics/text/wer.py -paddlemetrics/utilities/__init__.py -paddlemetrics/utilities/checks.py -paddlemetrics/utilities/data.py -paddlemetrics/utilities/distributed.py -paddlemetrics/utilities/enums.py -paddlemetrics/utilities/exceptions.py -paddlemetrics/utilities/imports.py -paddlemetrics/utilities/prints.py -paddlemetrics/wrappers/__init__.py -paddlemetrics/wrappers/bootstrapping.py -paddlemetrics/wrappers/multioutput.py -paddlemetrics/wrappers/tracker.py \ No newline at end of file diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt b/EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/EE/paddlemetric/src/paddle_extension.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt b/EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt deleted file mode 100644 index b722df99a..000000000 --- a/EE/paddlemetric/src/paddle_extension.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -paddlemetrics diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO b/EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO deleted file mode 100644 index b6b4be7d9..000000000 --- a/EE/paddlemetric/src/paddlemetrics.egg-info/PKG-INFO +++ /dev/null @@ -1,22 +0,0 @@ -Metadata-Version: 2.1 -Name: paddlemetrics -Version: 1.0.0b0 -Summary: Metrics library for paddle, porting from torch metrics. -Home-page: UNKNOWN -Author: Mingming Sun -Author-email: sunmingming01@baidu.com -License: Apache -Keywords: Deep Learning,Paddlepaddle -Platform: UNKNOWN -Description-Content-Type: text/markdown - -# Paddle Metrics - -Metrics library for paddle, porting from torch metrics -## Install - -pip install http://public.bcc-bdbl.baidu.com:8000/Package/paddle_extension-1.0.0b0-py3-none-any.whl - -## Document - - diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt b/EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt deleted file mode 100644 index 4850ceca3..000000000 --- a/EE/paddlemetric/src/paddlemetrics.egg-info/SOURCES.txt +++ /dev/null @@ -1,152 +0,0 @@ -README.md -setup.py -paddlemetrics/__about__.py -paddlemetrics/__init__.py -paddlemetrics/aggregation.py -paddlemetrics/collections.py -paddlemetrics/metric.py -paddlemetrics/setup_tools.py -paddlemetrics.egg-info/PKG-INFO -paddlemetrics.egg-info/SOURCES.txt -paddlemetrics.egg-info/dependency_links.txt -paddlemetrics.egg-info/top_level.txt -paddlemetrics/audio/__init__.py -paddlemetrics/audio/pesq.py -paddlemetrics/audio/pit.py -paddlemetrics/audio/si_sdr.py -paddlemetrics/audio/si_snr.py -paddlemetrics/audio/snr.py -paddlemetrics/audio/stoi.py -paddlemetrics/classification/__init__.py -paddlemetrics/classification/accuracy.py -paddlemetrics/classification/auc.py -paddlemetrics/classification/auroc.py -paddlemetrics/classification/average_precision.py -paddlemetrics/classification/binned_precision_recall.py -paddlemetrics/classification/calibration_error.py -paddlemetrics/classification/cohen_kappa.py -paddlemetrics/classification/confusion_matrix.py -paddlemetrics/classification/f_beta.py -paddlemetrics/classification/hamming_distance.py -paddlemetrics/classification/hinge.py -paddlemetrics/classification/iou.py -paddlemetrics/classification/kl_divergence.py -paddlemetrics/classification/matthews_corrcoef.py -paddlemetrics/classification/precision_recall.py -paddlemetrics/classification/precision_recall_curve.py -paddlemetrics/classification/roc.py -paddlemetrics/classification/specificity.py -paddlemetrics/classification/stat_scores.py -paddlemetrics/functional/__init__.py -paddlemetrics/functional/self_supervised.py -paddlemetrics/functional/audio/__init__.py -paddlemetrics/functional/audio/pesq.py -paddlemetrics/functional/audio/pit.py -paddlemetrics/functional/audio/si_sdr.py -paddlemetrics/functional/audio/si_snr.py -paddlemetrics/functional/audio/snr.py -paddlemetrics/functional/audio/stoi.py -paddlemetrics/functional/classification/__init__.py -paddlemetrics/functional/classification/accuracy.py -paddlemetrics/functional/classification/auc.py -paddlemetrics/functional/classification/auroc.py -paddlemetrics/functional/classification/average_precision.py -paddlemetrics/functional/classification/calibration_error.py -paddlemetrics/functional/classification/cohen_kappa.py -paddlemetrics/functional/classification/confusion_matrix.py -paddlemetrics/functional/classification/dice.py -paddlemetrics/functional/classification/f_beta.py -paddlemetrics/functional/classification/hamming_distance.py -paddlemetrics/functional/classification/hinge.py -paddlemetrics/functional/classification/iou.py -paddlemetrics/functional/classification/kl_divergence.py -paddlemetrics/functional/classification/matthews_corrcoef.py -paddlemetrics/functional/classification/precision_recall.py -paddlemetrics/functional/classification/precision_recall_curve.py -paddlemetrics/functional/classification/roc.py -paddlemetrics/functional/classification/specificity.py -paddlemetrics/functional/classification/stat_scores.py -paddlemetrics/functional/image/__init__.py -paddlemetrics/functional/image/gradients.py -paddlemetrics/functional/image/psnr.py -paddlemetrics/functional/image/ssim.py -paddlemetrics/functional/pairwise/__init__.py -paddlemetrics/functional/pairwise/cosine.py -paddlemetrics/functional/pairwise/euclidean.py -paddlemetrics/functional/pairwise/helpers.py -paddlemetrics/functional/pairwise/linear.py -paddlemetrics/functional/pairwise/manhatten.py -paddlemetrics/functional/regression/__init__.py -paddlemetrics/functional/regression/cosine_similarity.py -paddlemetrics/functional/regression/explained_variance.py -paddlemetrics/functional/regression/mean_absolute_error.py -paddlemetrics/functional/regression/mean_absolute_percentage_error.py -paddlemetrics/functional/regression/mean_squared_error.py -paddlemetrics/functional/regression/mean_squared_log_error.py -paddlemetrics/functional/regression/pearson.py -paddlemetrics/functional/regression/r2.py -paddlemetrics/functional/regression/spearman.py -paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py -paddlemetrics/functional/regression/tweedie_deviance.py -paddlemetrics/functional/retrieval/__init__.py -paddlemetrics/functional/retrieval/average_precision.py -paddlemetrics/functional/retrieval/fall_out.py -paddlemetrics/functional/retrieval/hit_rate.py -paddlemetrics/functional/retrieval/ndcg.py -paddlemetrics/functional/retrieval/precision.py -paddlemetrics/functional/retrieval/r_precision.py -paddlemetrics/functional/retrieval/recall.py -paddlemetrics/functional/retrieval/reciprocal_rank.py -paddlemetrics/functional/text/__init__.py -paddlemetrics/functional/text/bert.py -paddlemetrics/functional/text/bleu.py -paddlemetrics/functional/text/rouge.py -paddlemetrics/functional/text/sacre_bleu.py -paddlemetrics/functional/text/wer.py -paddlemetrics/image/__init__.py -paddlemetrics/image/fid.py -paddlemetrics/image/inception.py -paddlemetrics/image/kid.py -paddlemetrics/image/lpip_similarity.py -paddlemetrics/image/psnr.py -paddlemetrics/image/ssim.py -paddlemetrics/regression/__init__.py -paddlemetrics/regression/cosine_similarity.py -paddlemetrics/regression/explained_variance.py -paddlemetrics/regression/mean_absolute_error.py -paddlemetrics/regression/mean_absolute_percentage_error.py -paddlemetrics/regression/mean_squared_error.py -paddlemetrics/regression/mean_squared_log_error.py -paddlemetrics/regression/pearson.py -paddlemetrics/regression/r2.py -paddlemetrics/regression/spearman.py -paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py -paddlemetrics/regression/tweedie_deviance.py -paddlemetrics/retrieval/__init__.py -paddlemetrics/retrieval/mean_average_precision.py -paddlemetrics/retrieval/mean_reciprocal_rank.py -paddlemetrics/retrieval/retrieval_fallout.py -paddlemetrics/retrieval/retrieval_hit_rate.py -paddlemetrics/retrieval/retrieval_metric.py -paddlemetrics/retrieval/retrieval_ndcg.py -paddlemetrics/retrieval/retrieval_precision.py -paddlemetrics/retrieval/retrieval_r_precision.py -paddlemetrics/retrieval/retrieval_recall.py -paddlemetrics/text/__init__.py -paddlemetrics/text/bert.py -paddlemetrics/text/bleu.py -paddlemetrics/text/rouge.py -paddlemetrics/text/sacre_bleu.py -paddlemetrics/text/wer.py -paddlemetrics/utilities/__init__.py -paddlemetrics/utilities/checks.py -paddlemetrics/utilities/data.py -paddlemetrics/utilities/distributed.py -paddlemetrics/utilities/enums.py -paddlemetrics/utilities/exceptions.py -paddlemetrics/utilities/imports.py -paddlemetrics/utilities/prints.py -paddlemetrics/wrappers/__init__.py -paddlemetrics/wrappers/bootstrapping.py -paddlemetrics/wrappers/multioutput.py -paddlemetrics/wrappers/tracker.py \ No newline at end of file diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt b/EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/EE/paddlemetric/src/paddlemetrics.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt b/EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt deleted file mode 100644 index b722df99a..000000000 --- a/EE/paddlemetric/src/paddlemetrics.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -paddlemetrics From a305356e1490b4c9035b065a96f7c512e1b70161 Mon Sep 17 00:00:00 2001 From: rudaoshi Date: Sat, 26 Nov 2022 11:27:20 +0800 Subject: [PATCH 3/4] remove unnecessary file --- EE/README.md | 4 ---- RE/README.md | 4 ++++ {EE => RE}/paddleext/CHANGELOG.md | 0 {EE => RE}/paddleext/README.md | 0 {EE => RE}/paddleext/__init__.py | 0 {EE => RE}/paddleext/paddleext/__init__.py | 0 {EE => RE}/paddleext/paddleext/torchapi/__init__.py | 0 {EE => RE}/paddleext/paddleext/torchapi/core.py | 0 {EE => RE}/paddleext/paddleext/torchapi/cuda.py | 0 {EE => RE}/paddleext/paddleext/torchapi/data.py | 0 .../paddleext/paddleext/torchapi/distributed.py | 0 .../paddleext/paddleext/torchapi/functional.py | 0 {EE => RE}/paddleext/paddleext/torchapi/machine.py | 0 {EE => RE}/paddleext/paddleext/torchapi/metrics.py | 0 .../paddleext/paddleext/torchapi/nn/__init__.py | 0 .../paddleext/paddleext/torchapi/nn/functional.py | 0 {EE => RE}/paddleext/paddleext/torchapi/nn/init.py | 0 .../paddleext/paddleext/torchapi/optim/__init__.py | 0 .../paddleext/torchapi/optim/lr_scheduler.py | 0 {EE => RE}/paddleext/paddleext/torchapi/sampler.py | 0 {EE => RE}/paddleext/paddleext/torchapi/tensor_.py | 0 {EE => RE}/paddleext/setup.py | 0 {EE => RE}/paddleext/test/__init__.py | 0 {EE => RE}/paddleext/test/test_diagonal.py | 0 {EE => RE}/paddleext/test/test_function.py | 0 {EE => RE}/paddleext/test/test_pad.py | 0 {EE => RE}/paddleext/test/test_scatter.py | 0 {EE => RE}/paddlemetric/.gitignore | 0 {EE => RE}/paddlemetric/.ignore | 0 {EE => RE}/paddlemetric/CHANGELOG.md | 0 {EE => RE}/paddlemetric/src/README.md | 0 .../paddlemetric/src/paddlemetrics/__about__.py | 0 .../paddlemetric/src/paddlemetrics/__init__.py | 0 .../paddlemetric/src/paddlemetrics/aggregation.py | 0 .../src/paddlemetrics/audio/__init__.py | 0 .../paddlemetric/src/paddlemetrics/audio/pesq.py | 0 .../paddlemetric/src/paddlemetrics/audio/pit.py | 0 .../paddlemetric/src/paddlemetrics/audio/si_sdr.py | 0 .../paddlemetric/src/paddlemetrics/audio/si_snr.py | 0 .../paddlemetric/src/paddlemetrics/audio/snr.py | 0 .../paddlemetric/src/paddlemetrics/audio/stoi.py | 0 .../src/paddlemetrics/classification/__init__.py | 0 .../src/paddlemetrics/classification/accuracy.py | 0 .../src/paddlemetrics/classification/auc.py | 0 .../src/paddlemetrics/classification/auroc.py | 0 .../classification/average_precision.py | 0 .../classification/binned_precision_recall.py | 0 .../classification/calibration_error.py | 0 .../src/paddlemetrics/classification/cohen_kappa.py | 0 .../classification/confusion_matrix.py | 0 .../src/paddlemetrics/classification/f_beta.py | 0 .../classification/hamming_distance.py | 0 .../src/paddlemetrics/classification/hinge.py | 0 .../src/paddlemetrics/classification/iou.py | 0 .../paddlemetrics/classification/kl_divergence.py | 0 .../classification/matthews_corrcoef.py | 0 .../classification/precision_recall.py | 0 .../classification/precision_recall_curve.py | 0 .../src/paddlemetrics/classification/roc.py | 0 .../src/paddlemetrics/classification/specificity.py | 0 .../src/paddlemetrics/classification/stat_scores.py | 0 .../paddlemetric/src/paddlemetrics/collections.py | 0 .../src/paddlemetrics/functional/__init__.py | 0 .../src/paddlemetrics/functional/audio/__init__.py | 0 .../src/paddlemetrics/functional/audio/pesq.py | 0 .../src/paddlemetrics/functional/audio/pit.py | 0 .../src/paddlemetrics/functional/audio/si_sdr.py | 0 .../src/paddlemetrics/functional/audio/si_snr.py | 0 .../src/paddlemetrics/functional/audio/snr.py | 0 .../src/paddlemetrics/functional/audio/stoi.py | 0 .../functional/classification/__init__.py | 0 .../functional/classification/accuracy.py | 0 .../paddlemetrics/functional/classification/auc.py | 0 .../functional/classification/auroc.py | 0 .../functional/classification/average_precision.py | 0 .../functional/classification/calibration_error.py | 0 .../functional/classification/cohen_kappa.py | 0 .../functional/classification/confusion_matrix.py | 0 .../paddlemetrics/functional/classification/dice.py | 0 .../functional/classification/f_beta.py | 0 .../functional/classification/hamming_distance.py | 0 .../functional/classification/hinge.py | 0 .../paddlemetrics/functional/classification/iou.py | 0 .../functional/classification/kl_divergence.py | 0 .../functional/classification/matthews_corrcoef.py | 0 .../functional/classification/precision_recall.py | 0 .../classification/precision_recall_curve.py | 0 .../paddlemetrics/functional/classification/roc.py | 0 .../functional/classification/specificity.py | 0 .../functional/classification/stat_scores.py | 0 .../src/paddlemetrics/functional/image/__init__.py | 0 .../src/paddlemetrics/functional/image/gradients.py | 0 .../src/paddlemetrics/functional/image/psnr.py | 0 .../src/paddlemetrics/functional/image/ssim.py | 0 .../paddlemetrics/functional/pairwise/__init__.py | 0 .../src/paddlemetrics/functional/pairwise/cosine.py | 0 .../paddlemetrics/functional/pairwise/euclidean.py | 0 .../paddlemetrics/functional/pairwise/helpers.py | 0 .../src/paddlemetrics/functional/pairwise/linear.py | 0 .../paddlemetrics/functional/pairwise/manhatten.py | 0 .../paddlemetrics/functional/regression/__init__.py | 0 .../functional/regression/cosine_similarity.py | 0 .../functional/regression/explained_variance.py | 0 .../functional/regression/mean_absolute_error.py | 0 .../regression/mean_absolute_percentage_error.py | 0 .../functional/regression/mean_squared_error.py | 0 .../functional/regression/mean_squared_log_error.py | 0 .../paddlemetrics/functional/regression/pearson.py | 0 .../src/paddlemetrics/functional/regression/r2.py | 0 .../paddlemetrics/functional/regression/spearman.py | 0 .../symmetric_mean_absolute_percentage_error.py | 0 .../functional/regression/tweedie_deviance.py | 0 .../paddlemetrics/functional/retrieval/__init__.py | 0 .../functional/retrieval/average_precision.py | 0 .../paddlemetrics/functional/retrieval/fall_out.py | 0 .../paddlemetrics/functional/retrieval/hit_rate.py | 0 .../src/paddlemetrics/functional/retrieval/ndcg.py | 0 .../paddlemetrics/functional/retrieval/precision.py | 0 .../functional/retrieval/r_precision.py | 0 .../paddlemetrics/functional/retrieval/recall.py | 0 .../functional/retrieval/reciprocal_rank.py | 0 .../src/paddlemetrics/functional/self_supervised.py | 0 .../src/paddlemetrics/functional/text/__init__.py | 0 .../src/paddlemetrics/functional/text/bert.py | 0 .../src/paddlemetrics/functional/text/bleu.py | 0 .../src/paddlemetrics/functional/text/rouge.py | 0 .../src/paddlemetrics/functional/text/sacre_bleu.py | 0 .../src/paddlemetrics/functional/text/wer.py | 0 .../src/paddlemetrics/image/__init__.py | 0 .../paddlemetric/src/paddlemetrics/image/fid.py | 0 .../src/paddlemetrics/image/inception.py | 0 .../paddlemetric/src/paddlemetrics/image/kid.py | 0 .../src/paddlemetrics/image/lpip_similarity.py | 0 .../paddlemetric/src/paddlemetrics/image/psnr.py | 0 .../paddlemetric/src/paddlemetrics/image/ssim.py | 0 {EE => RE}/paddlemetric/src/paddlemetrics/metric.py | 0 {EE => RE}/paddlemetric/src/paddlemetrics/py.typed | 0 .../src/paddlemetrics/regression/__init__.py | 0 .../paddlemetrics/regression/cosine_similarity.py | 0 .../paddlemetrics/regression/explained_variance.py | 0 .../paddlemetrics/regression/mean_absolute_error.py | 0 .../regression/mean_absolute_percentage_error.py | 0 .../paddlemetrics/regression/mean_squared_error.py | 0 .../regression/mean_squared_log_error.py | 0 .../src/paddlemetrics/regression/pearson.py | 0 .../paddlemetric/src/paddlemetrics/regression/r2.py | 0 .../src/paddlemetrics/regression/spearman.py | 0 .../symmetric_mean_absolute_percentage_error.py | 0 .../paddlemetrics/regression/tweedie_deviance.py | 0 .../src/paddlemetrics/retrieval/__init__.py | 0 .../retrieval/mean_average_precision.py | 0 .../paddlemetrics/retrieval/mean_reciprocal_rank.py | 0 .../paddlemetrics/retrieval/retrieval_fallout.py | 0 .../paddlemetrics/retrieval/retrieval_hit_rate.py | 0 .../src/paddlemetrics/retrieval/retrieval_metric.py | 0 .../src/paddlemetrics/retrieval/retrieval_ndcg.py | 0 .../paddlemetrics/retrieval/retrieval_precision.py | 0 .../retrieval/retrieval_r_precision.py | 0 .../src/paddlemetrics/retrieval/retrieval_recall.py | 0 .../paddlemetric/src/paddlemetrics/setup_tools.py | 0 .../paddlemetric/src/paddlemetrics/text/__init__.py | 0 .../paddlemetric/src/paddlemetrics/text/bert.py | 0 .../paddlemetric/src/paddlemetrics/text/bleu.py | 0 .../paddlemetric/src/paddlemetrics/text/rouge.py | 0 .../src/paddlemetrics/text/sacre_bleu.py | 0 .../paddlemetric/src/paddlemetrics/text/wer.py | 0 .../src/paddlemetrics/utilities/__init__.py | 0 .../src/paddlemetrics/utilities/checks.py | 0 .../src/paddlemetrics/utilities/data.py | 0 .../src/paddlemetrics/utilities/distributed.py | 0 .../src/paddlemetrics/utilities/enums.py | 0 .../src/paddlemetrics/utilities/exceptions.py | 0 .../src/paddlemetrics/utilities/imports.py | 0 .../src/paddlemetrics/utilities/prints.py | 0 .../src/paddlemetrics/wrappers/__init__.py | 0 .../src/paddlemetrics/wrappers/bootstrapping.py | 0 .../src/paddlemetrics/wrappers/multioutput.py | 0 .../src/paddlemetrics/wrappers/tracker.py | 0 {EE => RE}/paddlemetric/src/setup.py | 0 {EE => RE}/paddlemetric/src/tests/__init__.py | 0 {EE => RE}/paddlemetric/src/tests/audio/__init__.py | 0 .../src/tests/audio/examples/audio_speech.wav | Bin .../tests/audio/examples/audio_speech_bab_0dB.wav | Bin .../paddlemetric/src/tests/audio/test_pesq.py | 0 {EE => RE}/paddlemetric/src/tests/audio/test_pit.py | 0 .../paddlemetric/src/tests/audio/test_si_sdr.py | 0 .../paddlemetric/src/tests/audio/test_si_snr.py | 0 {EE => RE}/paddlemetric/src/tests/audio/test_snr.py | 0 .../paddlemetric/src/tests/audio/test_stoi.py | 0 {EE => RE}/paddlemetric/src/tests/bases/__init__.py | 0 {EE => RE}/paddlemetric/src/tests/bases/test.log | 0 .../src/tests/bases/test_aggregation.py | 0 .../src/tests/bases/test_collections.py | 0 .../src/tests/bases/test_composition.py | 0 {EE => RE}/paddlemetric/src/tests/bases/test_ddp.py | 0 .../paddlemetric/src/tests/bases/test_hashing.py | 0 .../paddlemetric/src/tests/bases/test_metric.py | 0 .../src/tests/classification/__init__.py | 0 .../paddlemetric/src/tests/classification/inputs.py | 0 .../paddlemetric/src/tests/classification/test.log | 0 .../src/tests/classification/test_accuracy.py | 0 .../src/tests/classification/test_auc.py | 0 .../src/tests/classification/test_auroc.py | 0 .../tests/classification/test_average_precision.py | 0 .../classification/test_binned_precision_recall.py | 0 .../tests/classification/test_calibration_error.py | 0 .../src/tests/classification/test_cohen_kappa.py | 0 .../tests/classification/test_confusion_matrix.py | 0 .../src/tests/classification/test_f_beta.py | 0 .../tests/classification/test_hamming_distance.py | 0 .../src/tests/classification/test_hinge.py | 0 .../src/tests/classification/test_inputs.py | 0 .../src/tests/classification/test_iou.py | 0 .../src/tests/classification/test_kl_divergence.py | 0 .../tests/classification/test_matthews_corrcoef.py | 0 .../tests/classification/test_precision_recall.py | 0 .../classification/test_precision_recall_curve.py | 0 .../src/tests/classification/test_roc.py | 0 .../src/tests/classification/test_specificity.py | 0 .../src/tests/classification/test_stat_scores.py | 0 .../paddlemetric/src/tests/functional/__init__.py | 0 .../src/tests/functional/test_classification.py | 0 .../src/tests/functional/test_image_gradients.py | 0 .../src/tests/functional/test_reduction.py | 0 .../src/tests/functional/test_self_supervised.py | 0 .../paddlemetric/src/tests/helpers/__init__.py | 0 .../src/tests/helpers/non_sklearn_metrics.py | 0 .../paddlemetric/src/tests/helpers/testers.py | 0 {EE => RE}/paddlemetric/src/tests/image/__init__.py | 0 {EE => RE}/paddlemetric/src/tests/image/test_fid.py | 0 .../paddlemetric/src/tests/image/test_inception.py | 0 {EE => RE}/paddlemetric/src/tests/image/test_kid.py | 0 .../paddlemetric/src/tests/image/test_lpips.py | 0 .../paddlemetric/src/tests/image/test_psnr.py | 0 .../paddlemetric/src/tests/image/test_ssim.py | 0 .../paddlemetric/src/tests/pairwise/__init__.py | 0 .../src/tests/pairwise/test_pairwise_distance.py | 0 .../paddlemetric/src/tests/regression/__init__.py | 0 .../src/tests/regression/test_cosine_similarity.py | 0 .../src/tests/regression/test_explained_variance.py | 0 .../src/tests/regression/test_mean_error.py | 0 .../src/tests/regression/test_pearson.py | 0 .../paddlemetric/src/tests/regression/test_r2.py | 0 .../src/tests/regression/test_spearman.py | 0 .../src/tests/regression/test_tweedie_deviance.py | 0 .../paddlemetric/src/tests/retrieval/__init__.py | 0 .../paddlemetric/src/tests/retrieval/helpers.py | 0 .../paddlemetric/src/tests/retrieval/inputs.py | 0 .../src/tests/retrieval/test_fallout.py | 0 .../src/tests/retrieval/test_hit_rate.py | 0 .../paddlemetric/src/tests/retrieval/test_map.py | 0 .../paddlemetric/src/tests/retrieval/test_mrr.py | 0 .../paddlemetric/src/tests/retrieval/test_ndcg.py | 0 .../src/tests/retrieval/test_precision.py | 0 .../src/tests/retrieval/test_r_precision.py | 0 .../paddlemetric/src/tests/retrieval/test_recall.py | 0 {EE => RE}/paddlemetric/src/tests/test_utilities.py | 0 {EE => RE}/paddlemetric/src/tests/text/__init__.py | 0 {EE => RE}/paddlemetric/src/tests/text/helpers.py | 0 .../paddlemetric/src/tests/text/test_bertscore.py | 0 {EE => RE}/paddlemetric/src/tests/text/test_bleu.py | 0 .../paddlemetric/src/tests/text/test_rouge.py | 0 .../paddlemetric/src/tests/text/test_sacre_bleu.py | 0 {EE => RE}/paddlemetric/src/tests/text/test_wer.py | 0 .../paddlemetric/src/tests/wrappers/__init__.py | 0 .../src/tests/wrappers/test_bootstrapping.py | 0 .../src/tests/wrappers/test_multioutput.py | 0 .../paddlemetric/src/tests/wrappers/test_tracker.py | 0 README.md | 6 ++++++ 269 files changed, 10 insertions(+), 4 deletions(-) delete mode 100644 EE/README.md create mode 100644 RE/README.md rename {EE => RE}/paddleext/CHANGELOG.md (100%) rename {EE => RE}/paddleext/README.md (100%) rename {EE => RE}/paddleext/__init__.py (100%) rename {EE => RE}/paddleext/paddleext/__init__.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/__init__.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/core.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/cuda.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/data.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/distributed.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/functional.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/machine.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/metrics.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/nn/__init__.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/nn/functional.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/nn/init.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/optim/__init__.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/optim/lr_scheduler.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/sampler.py (100%) rename {EE => RE}/paddleext/paddleext/torchapi/tensor_.py (100%) rename {EE => RE}/paddleext/setup.py (100%) rename {EE => RE}/paddleext/test/__init__.py (100%) rename {EE => RE}/paddleext/test/test_diagonal.py (100%) rename {EE => RE}/paddleext/test/test_function.py (100%) rename {EE => RE}/paddleext/test/test_pad.py (100%) rename {EE => RE}/paddleext/test/test_scatter.py (100%) rename {EE => RE}/paddlemetric/.gitignore (100%) rename {EE => RE}/paddlemetric/.ignore (100%) rename {EE => RE}/paddlemetric/CHANGELOG.md (100%) rename {EE => RE}/paddlemetric/src/README.md (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/__about__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/aggregation.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/audio/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/audio/pesq.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/audio/pit.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/audio/si_sdr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/audio/si_snr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/audio/snr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/audio/stoi.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/accuracy.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/auc.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/auroc.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/average_precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/calibration_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/f_beta.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/hamming_distance.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/hinge.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/iou.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/kl_divergence.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/precision_recall.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/roc.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/specificity.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/classification/stat_scores.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/collections.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/audio/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/audio/pesq.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/audio/pit.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/audio/snr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/audio/stoi.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/auc.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/auroc.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/dice.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/hinge.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/iou.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/roc.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/specificity.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/image/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/image/gradients.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/image/psnr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/image/ssim.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/pearson.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/r2.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/spearman.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/self_supervised.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/text/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/text/bert.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/text/bleu.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/text/rouge.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/functional/text/wer.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/image/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/image/fid.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/image/inception.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/image/kid.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/image/lpip_similarity.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/image/psnr.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/image/ssim.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/metric.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/py.typed (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/explained_variance.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/pearson.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/r2.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/spearman.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/setup_tools.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/text/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/text/bert.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/text/bleu.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/text/rouge.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/text/sacre_bleu.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/text/wer.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/checks.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/data.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/distributed.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/enums.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/exceptions.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/imports.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/utilities/prints.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/wrappers/__init__.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/wrappers/multioutput.py (100%) rename {EE => RE}/paddlemetric/src/paddlemetrics/wrappers/tracker.py (100%) rename {EE => RE}/paddlemetric/src/setup.py (100%) rename {EE => RE}/paddlemetric/src/tests/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/audio/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/audio/examples/audio_speech.wav (100%) rename {EE => RE}/paddlemetric/src/tests/audio/examples/audio_speech_bab_0dB.wav (100%) rename {EE => RE}/paddlemetric/src/tests/audio/test_pesq.py (100%) rename {EE => RE}/paddlemetric/src/tests/audio/test_pit.py (100%) rename {EE => RE}/paddlemetric/src/tests/audio/test_si_sdr.py (100%) rename {EE => RE}/paddlemetric/src/tests/audio/test_si_snr.py (100%) rename {EE => RE}/paddlemetric/src/tests/audio/test_snr.py (100%) rename {EE => RE}/paddlemetric/src/tests/audio/test_stoi.py (100%) rename {EE => RE}/paddlemetric/src/tests/bases/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/bases/test.log (100%) rename {EE => RE}/paddlemetric/src/tests/bases/test_aggregation.py (100%) rename {EE => RE}/paddlemetric/src/tests/bases/test_collections.py (100%) rename {EE => RE}/paddlemetric/src/tests/bases/test_composition.py (100%) rename {EE => RE}/paddlemetric/src/tests/bases/test_ddp.py (100%) rename {EE => RE}/paddlemetric/src/tests/bases/test_hashing.py (100%) rename {EE => RE}/paddlemetric/src/tests/bases/test_metric.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/inputs.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test.log (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_accuracy.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_auc.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_auroc.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_average_precision.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_binned_precision_recall.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_calibration_error.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_cohen_kappa.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_confusion_matrix.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_f_beta.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_hamming_distance.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_hinge.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_inputs.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_iou.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_kl_divergence.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_matthews_corrcoef.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_precision_recall.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_precision_recall_curve.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_roc.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_specificity.py (100%) rename {EE => RE}/paddlemetric/src/tests/classification/test_stat_scores.py (100%) rename {EE => RE}/paddlemetric/src/tests/functional/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/functional/test_classification.py (100%) rename {EE => RE}/paddlemetric/src/tests/functional/test_image_gradients.py (100%) rename {EE => RE}/paddlemetric/src/tests/functional/test_reduction.py (100%) rename {EE => RE}/paddlemetric/src/tests/functional/test_self_supervised.py (100%) rename {EE => RE}/paddlemetric/src/tests/helpers/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/helpers/non_sklearn_metrics.py (100%) rename {EE => RE}/paddlemetric/src/tests/helpers/testers.py (100%) rename {EE => RE}/paddlemetric/src/tests/image/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/image/test_fid.py (100%) rename {EE => RE}/paddlemetric/src/tests/image/test_inception.py (100%) rename {EE => RE}/paddlemetric/src/tests/image/test_kid.py (100%) rename {EE => RE}/paddlemetric/src/tests/image/test_lpips.py (100%) rename {EE => RE}/paddlemetric/src/tests/image/test_psnr.py (100%) rename {EE => RE}/paddlemetric/src/tests/image/test_ssim.py (100%) rename {EE => RE}/paddlemetric/src/tests/pairwise/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/pairwise/test_pairwise_distance.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/test_cosine_similarity.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/test_explained_variance.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/test_mean_error.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/test_pearson.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/test_r2.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/test_spearman.py (100%) rename {EE => RE}/paddlemetric/src/tests/regression/test_tweedie_deviance.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/helpers.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/inputs.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_fallout.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_hit_rate.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_map.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_mrr.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_ndcg.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_precision.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_r_precision.py (100%) rename {EE => RE}/paddlemetric/src/tests/retrieval/test_recall.py (100%) rename {EE => RE}/paddlemetric/src/tests/test_utilities.py (100%) rename {EE => RE}/paddlemetric/src/tests/text/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/text/helpers.py (100%) rename {EE => RE}/paddlemetric/src/tests/text/test_bertscore.py (100%) rename {EE => RE}/paddlemetric/src/tests/text/test_bleu.py (100%) rename {EE => RE}/paddlemetric/src/tests/text/test_rouge.py (100%) rename {EE => RE}/paddlemetric/src/tests/text/test_sacre_bleu.py (100%) rename {EE => RE}/paddlemetric/src/tests/text/test_wer.py (100%) rename {EE => RE}/paddlemetric/src/tests/wrappers/__init__.py (100%) rename {EE => RE}/paddlemetric/src/tests/wrappers/test_bootstrapping.py (100%) rename {EE => RE}/paddlemetric/src/tests/wrappers/test_multioutput.py (100%) rename {EE => RE}/paddlemetric/src/tests/wrappers/test_tracker.py (100%) diff --git a/EE/README.md b/EE/README.md deleted file mode 100644 index 2cda11a78..000000000 --- a/EE/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Engineer Efficiency - - - diff --git a/RE/README.md b/RE/README.md new file mode 100644 index 000000000..b4966c382 --- /dev/null +++ b/RE/README.md @@ -0,0 +1,4 @@ +# Research Efficiency + + + diff --git a/EE/paddleext/CHANGELOG.md b/RE/paddleext/CHANGELOG.md similarity index 100% rename from EE/paddleext/CHANGELOG.md rename to RE/paddleext/CHANGELOG.md diff --git a/EE/paddleext/README.md b/RE/paddleext/README.md similarity index 100% rename from EE/paddleext/README.md rename to RE/paddleext/README.md diff --git a/EE/paddleext/__init__.py b/RE/paddleext/__init__.py similarity index 100% rename from EE/paddleext/__init__.py rename to RE/paddleext/__init__.py diff --git a/EE/paddleext/paddleext/__init__.py b/RE/paddleext/paddleext/__init__.py similarity index 100% rename from EE/paddleext/paddleext/__init__.py rename to RE/paddleext/paddleext/__init__.py diff --git a/EE/paddleext/paddleext/torchapi/__init__.py b/RE/paddleext/paddleext/torchapi/__init__.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/__init__.py rename to RE/paddleext/paddleext/torchapi/__init__.py diff --git a/EE/paddleext/paddleext/torchapi/core.py b/RE/paddleext/paddleext/torchapi/core.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/core.py rename to RE/paddleext/paddleext/torchapi/core.py diff --git a/EE/paddleext/paddleext/torchapi/cuda.py b/RE/paddleext/paddleext/torchapi/cuda.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/cuda.py rename to RE/paddleext/paddleext/torchapi/cuda.py diff --git a/EE/paddleext/paddleext/torchapi/data.py b/RE/paddleext/paddleext/torchapi/data.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/data.py rename to RE/paddleext/paddleext/torchapi/data.py diff --git a/EE/paddleext/paddleext/torchapi/distributed.py b/RE/paddleext/paddleext/torchapi/distributed.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/distributed.py rename to RE/paddleext/paddleext/torchapi/distributed.py diff --git a/EE/paddleext/paddleext/torchapi/functional.py b/RE/paddleext/paddleext/torchapi/functional.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/functional.py rename to RE/paddleext/paddleext/torchapi/functional.py diff --git a/EE/paddleext/paddleext/torchapi/machine.py b/RE/paddleext/paddleext/torchapi/machine.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/machine.py rename to RE/paddleext/paddleext/torchapi/machine.py diff --git a/EE/paddleext/paddleext/torchapi/metrics.py b/RE/paddleext/paddleext/torchapi/metrics.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/metrics.py rename to RE/paddleext/paddleext/torchapi/metrics.py diff --git a/EE/paddleext/paddleext/torchapi/nn/__init__.py b/RE/paddleext/paddleext/torchapi/nn/__init__.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/nn/__init__.py rename to RE/paddleext/paddleext/torchapi/nn/__init__.py diff --git a/EE/paddleext/paddleext/torchapi/nn/functional.py b/RE/paddleext/paddleext/torchapi/nn/functional.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/nn/functional.py rename to RE/paddleext/paddleext/torchapi/nn/functional.py diff --git a/EE/paddleext/paddleext/torchapi/nn/init.py b/RE/paddleext/paddleext/torchapi/nn/init.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/nn/init.py rename to RE/paddleext/paddleext/torchapi/nn/init.py diff --git a/EE/paddleext/paddleext/torchapi/optim/__init__.py b/RE/paddleext/paddleext/torchapi/optim/__init__.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/optim/__init__.py rename to RE/paddleext/paddleext/torchapi/optim/__init__.py diff --git a/EE/paddleext/paddleext/torchapi/optim/lr_scheduler.py b/RE/paddleext/paddleext/torchapi/optim/lr_scheduler.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/optim/lr_scheduler.py rename to RE/paddleext/paddleext/torchapi/optim/lr_scheduler.py diff --git a/EE/paddleext/paddleext/torchapi/sampler.py b/RE/paddleext/paddleext/torchapi/sampler.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/sampler.py rename to RE/paddleext/paddleext/torchapi/sampler.py diff --git a/EE/paddleext/paddleext/torchapi/tensor_.py b/RE/paddleext/paddleext/torchapi/tensor_.py similarity index 100% rename from EE/paddleext/paddleext/torchapi/tensor_.py rename to RE/paddleext/paddleext/torchapi/tensor_.py diff --git a/EE/paddleext/setup.py b/RE/paddleext/setup.py similarity index 100% rename from EE/paddleext/setup.py rename to RE/paddleext/setup.py diff --git a/EE/paddleext/test/__init__.py b/RE/paddleext/test/__init__.py similarity index 100% rename from EE/paddleext/test/__init__.py rename to RE/paddleext/test/__init__.py diff --git a/EE/paddleext/test/test_diagonal.py b/RE/paddleext/test/test_diagonal.py similarity index 100% rename from EE/paddleext/test/test_diagonal.py rename to RE/paddleext/test/test_diagonal.py diff --git a/EE/paddleext/test/test_function.py b/RE/paddleext/test/test_function.py similarity index 100% rename from EE/paddleext/test/test_function.py rename to RE/paddleext/test/test_function.py diff --git a/EE/paddleext/test/test_pad.py b/RE/paddleext/test/test_pad.py similarity index 100% rename from EE/paddleext/test/test_pad.py rename to RE/paddleext/test/test_pad.py diff --git a/EE/paddleext/test/test_scatter.py b/RE/paddleext/test/test_scatter.py similarity index 100% rename from EE/paddleext/test/test_scatter.py rename to RE/paddleext/test/test_scatter.py diff --git a/EE/paddlemetric/.gitignore b/RE/paddlemetric/.gitignore similarity index 100% rename from EE/paddlemetric/.gitignore rename to RE/paddlemetric/.gitignore diff --git a/EE/paddlemetric/.ignore b/RE/paddlemetric/.ignore similarity index 100% rename from EE/paddlemetric/.ignore rename to RE/paddlemetric/.ignore diff --git a/EE/paddlemetric/CHANGELOG.md b/RE/paddlemetric/CHANGELOG.md similarity index 100% rename from EE/paddlemetric/CHANGELOG.md rename to RE/paddlemetric/CHANGELOG.md diff --git a/EE/paddlemetric/src/README.md b/RE/paddlemetric/src/README.md similarity index 100% rename from EE/paddlemetric/src/README.md rename to RE/paddlemetric/src/README.md diff --git a/EE/paddlemetric/src/paddlemetrics/__about__.py b/RE/paddlemetric/src/paddlemetrics/__about__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/__about__.py rename to RE/paddlemetric/src/paddlemetrics/__about__.py diff --git a/EE/paddlemetric/src/paddlemetrics/__init__.py b/RE/paddlemetric/src/paddlemetrics/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/__init__.py rename to RE/paddlemetric/src/paddlemetrics/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/aggregation.py b/RE/paddlemetric/src/paddlemetrics/aggregation.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/aggregation.py rename to RE/paddlemetric/src/paddlemetrics/aggregation.py diff --git a/EE/paddlemetric/src/paddlemetrics/audio/__init__.py b/RE/paddlemetric/src/paddlemetrics/audio/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/audio/__init__.py rename to RE/paddlemetric/src/paddlemetrics/audio/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/audio/pesq.py b/RE/paddlemetric/src/paddlemetrics/audio/pesq.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/audio/pesq.py rename to RE/paddlemetric/src/paddlemetrics/audio/pesq.py diff --git a/EE/paddlemetric/src/paddlemetrics/audio/pit.py b/RE/paddlemetric/src/paddlemetrics/audio/pit.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/audio/pit.py rename to RE/paddlemetric/src/paddlemetrics/audio/pit.py diff --git a/EE/paddlemetric/src/paddlemetrics/audio/si_sdr.py b/RE/paddlemetric/src/paddlemetrics/audio/si_sdr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/audio/si_sdr.py rename to RE/paddlemetric/src/paddlemetrics/audio/si_sdr.py diff --git a/EE/paddlemetric/src/paddlemetrics/audio/si_snr.py b/RE/paddlemetric/src/paddlemetrics/audio/si_snr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/audio/si_snr.py rename to RE/paddlemetric/src/paddlemetrics/audio/si_snr.py diff --git a/EE/paddlemetric/src/paddlemetrics/audio/snr.py b/RE/paddlemetric/src/paddlemetrics/audio/snr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/audio/snr.py rename to RE/paddlemetric/src/paddlemetrics/audio/snr.py diff --git a/EE/paddlemetric/src/paddlemetrics/audio/stoi.py b/RE/paddlemetric/src/paddlemetrics/audio/stoi.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/audio/stoi.py rename to RE/paddlemetric/src/paddlemetrics/audio/stoi.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/__init__.py b/RE/paddlemetric/src/paddlemetrics/classification/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/__init__.py rename to RE/paddlemetric/src/paddlemetrics/classification/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/accuracy.py b/RE/paddlemetric/src/paddlemetrics/classification/accuracy.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/accuracy.py rename to RE/paddlemetric/src/paddlemetrics/classification/accuracy.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/auc.py b/RE/paddlemetric/src/paddlemetrics/classification/auc.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/auc.py rename to RE/paddlemetric/src/paddlemetrics/classification/auc.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/auroc.py b/RE/paddlemetric/src/paddlemetrics/classification/auroc.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/auroc.py rename to RE/paddlemetric/src/paddlemetrics/classification/auroc.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/average_precision.py b/RE/paddlemetric/src/paddlemetrics/classification/average_precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/average_precision.py rename to RE/paddlemetric/src/paddlemetrics/classification/average_precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py b/RE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py rename to RE/paddlemetric/src/paddlemetrics/classification/binned_precision_recall.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/calibration_error.py b/RE/paddlemetric/src/paddlemetrics/classification/calibration_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/calibration_error.py rename to RE/paddlemetric/src/paddlemetrics/classification/calibration_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py b/RE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py rename to RE/paddlemetric/src/paddlemetrics/classification/cohen_kappa.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py b/RE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py rename to RE/paddlemetric/src/paddlemetrics/classification/confusion_matrix.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/f_beta.py b/RE/paddlemetric/src/paddlemetrics/classification/f_beta.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/f_beta.py rename to RE/paddlemetric/src/paddlemetrics/classification/f_beta.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py b/RE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py rename to RE/paddlemetric/src/paddlemetrics/classification/hamming_distance.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/hinge.py b/RE/paddlemetric/src/paddlemetrics/classification/hinge.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/hinge.py rename to RE/paddlemetric/src/paddlemetrics/classification/hinge.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/iou.py b/RE/paddlemetric/src/paddlemetrics/classification/iou.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/iou.py rename to RE/paddlemetric/src/paddlemetrics/classification/iou.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py b/RE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py rename to RE/paddlemetric/src/paddlemetrics/classification/kl_divergence.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py b/RE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py rename to RE/paddlemetric/src/paddlemetrics/classification/matthews_corrcoef.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/precision_recall.py b/RE/paddlemetric/src/paddlemetrics/classification/precision_recall.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/precision_recall.py rename to RE/paddlemetric/src/paddlemetrics/classification/precision_recall.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py b/RE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py rename to RE/paddlemetric/src/paddlemetrics/classification/precision_recall_curve.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/roc.py b/RE/paddlemetric/src/paddlemetrics/classification/roc.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/roc.py rename to RE/paddlemetric/src/paddlemetrics/classification/roc.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/specificity.py b/RE/paddlemetric/src/paddlemetrics/classification/specificity.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/specificity.py rename to RE/paddlemetric/src/paddlemetrics/classification/specificity.py diff --git a/EE/paddlemetric/src/paddlemetrics/classification/stat_scores.py b/RE/paddlemetric/src/paddlemetrics/classification/stat_scores.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/classification/stat_scores.py rename to RE/paddlemetric/src/paddlemetrics/classification/stat_scores.py diff --git a/EE/paddlemetric/src/paddlemetrics/collections.py b/RE/paddlemetric/src/paddlemetrics/collections.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/collections.py rename to RE/paddlemetric/src/paddlemetrics/collections.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/audio/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py b/RE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py rename to RE/paddlemetric/src/paddlemetrics/functional/audio/pesq.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/pit.py b/RE/paddlemetric/src/paddlemetrics/functional/audio/pit.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/audio/pit.py rename to RE/paddlemetric/src/paddlemetrics/functional/audio/pit.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py b/RE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py rename to RE/paddlemetric/src/paddlemetrics/functional/audio/si_sdr.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py b/RE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py rename to RE/paddlemetric/src/paddlemetrics/functional/audio/si_snr.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/snr.py b/RE/paddlemetric/src/paddlemetrics/functional/audio/snr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/audio/snr.py rename to RE/paddlemetric/src/paddlemetrics/functional/audio/snr.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py b/RE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py rename to RE/paddlemetric/src/paddlemetrics/functional/audio/stoi.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/accuracy.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/auc.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/auc.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/auc.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/auc.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/auroc.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/average_precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/calibration_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/cohen_kappa.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/confusion_matrix.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/dice.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/dice.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/dice.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/dice.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/f_beta.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/hamming_distance.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/hinge.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/iou.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/iou.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/iou.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/iou.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/kl_divergence.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/matthews_corrcoef.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/precision_recall_curve.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/roc.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/roc.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/roc.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/roc.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/specificity.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py b/RE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py rename to RE/paddlemetric/src/paddlemetrics/functional/classification/stat_scores.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/image/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/image/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/image/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/gradients.py b/RE/paddlemetric/src/paddlemetrics/functional/image/gradients.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/image/gradients.py rename to RE/paddlemetric/src/paddlemetrics/functional/image/gradients.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/psnr.py b/RE/paddlemetric/src/paddlemetrics/functional/image/psnr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/image/psnr.py rename to RE/paddlemetric/src/paddlemetrics/functional/image/psnr.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/image/ssim.py b/RE/paddlemetric/src/paddlemetrics/functional/image/ssim.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/image/ssim.py rename to RE/paddlemetric/src/paddlemetrics/functional/image/ssim.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/pairwise/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py b/RE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py rename to RE/paddlemetric/src/paddlemetrics/functional/pairwise/cosine.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py b/RE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py rename to RE/paddlemetric/src/paddlemetrics/functional/pairwise/euclidean.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py b/RE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py rename to RE/paddlemetric/src/paddlemetrics/functional/pairwise/helpers.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py b/RE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py rename to RE/paddlemetric/src/paddlemetrics/functional/pairwise/linear.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py b/RE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py rename to RE/paddlemetric/src/paddlemetrics/functional/pairwise/manhatten.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/cosine_similarity.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/explained_variance.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/mean_absolute_percentage_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/mean_squared_log_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/pearson.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/r2.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/r2.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/r2.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/r2.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/spearman.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/symmetric_mean_absolute_percentage_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py b/RE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py rename to RE/paddlemetric/src/paddlemetrics/functional/regression/tweedie_deviance.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/average_precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/fall_out.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/hit_rate.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/ndcg.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/r_precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/recall.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py b/RE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py rename to RE/paddlemetric/src/paddlemetrics/functional/retrieval/reciprocal_rank.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/self_supervised.py b/RE/paddlemetric/src/paddlemetrics/functional/self_supervised.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/self_supervised.py rename to RE/paddlemetric/src/paddlemetrics/functional/self_supervised.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/__init__.py b/RE/paddlemetric/src/paddlemetrics/functional/text/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/text/__init__.py rename to RE/paddlemetric/src/paddlemetrics/functional/text/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/bert.py b/RE/paddlemetric/src/paddlemetrics/functional/text/bert.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/text/bert.py rename to RE/paddlemetric/src/paddlemetrics/functional/text/bert.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/bleu.py b/RE/paddlemetric/src/paddlemetrics/functional/text/bleu.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/text/bleu.py rename to RE/paddlemetric/src/paddlemetrics/functional/text/bleu.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/rouge.py b/RE/paddlemetric/src/paddlemetrics/functional/text/rouge.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/text/rouge.py rename to RE/paddlemetric/src/paddlemetrics/functional/text/rouge.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py b/RE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py rename to RE/paddlemetric/src/paddlemetrics/functional/text/sacre_bleu.py diff --git a/EE/paddlemetric/src/paddlemetrics/functional/text/wer.py b/RE/paddlemetric/src/paddlemetrics/functional/text/wer.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/functional/text/wer.py rename to RE/paddlemetric/src/paddlemetrics/functional/text/wer.py diff --git a/EE/paddlemetric/src/paddlemetrics/image/__init__.py b/RE/paddlemetric/src/paddlemetrics/image/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/image/__init__.py rename to RE/paddlemetric/src/paddlemetrics/image/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/image/fid.py b/RE/paddlemetric/src/paddlemetrics/image/fid.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/image/fid.py rename to RE/paddlemetric/src/paddlemetrics/image/fid.py diff --git a/EE/paddlemetric/src/paddlemetrics/image/inception.py b/RE/paddlemetric/src/paddlemetrics/image/inception.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/image/inception.py rename to RE/paddlemetric/src/paddlemetrics/image/inception.py diff --git a/EE/paddlemetric/src/paddlemetrics/image/kid.py b/RE/paddlemetric/src/paddlemetrics/image/kid.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/image/kid.py rename to RE/paddlemetric/src/paddlemetrics/image/kid.py diff --git a/EE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py b/RE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py rename to RE/paddlemetric/src/paddlemetrics/image/lpip_similarity.py diff --git a/EE/paddlemetric/src/paddlemetrics/image/psnr.py b/RE/paddlemetric/src/paddlemetrics/image/psnr.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/image/psnr.py rename to RE/paddlemetric/src/paddlemetrics/image/psnr.py diff --git a/EE/paddlemetric/src/paddlemetrics/image/ssim.py b/RE/paddlemetric/src/paddlemetrics/image/ssim.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/image/ssim.py rename to RE/paddlemetric/src/paddlemetrics/image/ssim.py diff --git a/EE/paddlemetric/src/paddlemetrics/metric.py b/RE/paddlemetric/src/paddlemetrics/metric.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/metric.py rename to RE/paddlemetric/src/paddlemetrics/metric.py diff --git a/EE/paddlemetric/src/paddlemetrics/py.typed b/RE/paddlemetric/src/paddlemetrics/py.typed similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/py.typed rename to RE/paddlemetric/src/paddlemetrics/py.typed diff --git a/EE/paddlemetric/src/paddlemetrics/regression/__init__.py b/RE/paddlemetric/src/paddlemetrics/regression/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/__init__.py rename to RE/paddlemetric/src/paddlemetrics/regression/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py b/RE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py rename to RE/paddlemetric/src/paddlemetrics/regression/cosine_similarity.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/explained_variance.py b/RE/paddlemetric/src/paddlemetrics/regression/explained_variance.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/explained_variance.py rename to RE/paddlemetric/src/paddlemetrics/regression/explained_variance.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py b/RE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py rename to RE/paddlemetric/src/paddlemetrics/regression/mean_absolute_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py b/RE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py rename to RE/paddlemetric/src/paddlemetrics/regression/mean_absolute_percentage_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py b/RE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py rename to RE/paddlemetric/src/paddlemetrics/regression/mean_squared_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py b/RE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py rename to RE/paddlemetric/src/paddlemetrics/regression/mean_squared_log_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/pearson.py b/RE/paddlemetric/src/paddlemetrics/regression/pearson.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/pearson.py rename to RE/paddlemetric/src/paddlemetrics/regression/pearson.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/r2.py b/RE/paddlemetric/src/paddlemetrics/regression/r2.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/r2.py rename to RE/paddlemetric/src/paddlemetrics/regression/r2.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/spearman.py b/RE/paddlemetric/src/paddlemetrics/regression/spearman.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/spearman.py rename to RE/paddlemetric/src/paddlemetrics/regression/spearman.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py b/RE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py rename to RE/paddlemetric/src/paddlemetrics/regression/symmetric_mean_absolute_percentage_error.py diff --git a/EE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py b/RE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py rename to RE/paddlemetric/src/paddlemetrics/regression/tweedie_deviance.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/__init__.py b/RE/paddlemetric/src/paddlemetrics/retrieval/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/__init__.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py b/RE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/mean_average_precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py b/RE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/mean_reciprocal_rank.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py b/RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_fallout.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py b/RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_hit_rate.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py b/RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_metric.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py b/RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_ndcg.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py b/RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py b/RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_r_precision.py diff --git a/EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py b/RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py rename to RE/paddlemetric/src/paddlemetrics/retrieval/retrieval_recall.py diff --git a/EE/paddlemetric/src/paddlemetrics/setup_tools.py b/RE/paddlemetric/src/paddlemetrics/setup_tools.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/setup_tools.py rename to RE/paddlemetric/src/paddlemetrics/setup_tools.py diff --git a/EE/paddlemetric/src/paddlemetrics/text/__init__.py b/RE/paddlemetric/src/paddlemetrics/text/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/text/__init__.py rename to RE/paddlemetric/src/paddlemetrics/text/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/text/bert.py b/RE/paddlemetric/src/paddlemetrics/text/bert.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/text/bert.py rename to RE/paddlemetric/src/paddlemetrics/text/bert.py diff --git a/EE/paddlemetric/src/paddlemetrics/text/bleu.py b/RE/paddlemetric/src/paddlemetrics/text/bleu.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/text/bleu.py rename to RE/paddlemetric/src/paddlemetrics/text/bleu.py diff --git a/EE/paddlemetric/src/paddlemetrics/text/rouge.py b/RE/paddlemetric/src/paddlemetrics/text/rouge.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/text/rouge.py rename to RE/paddlemetric/src/paddlemetrics/text/rouge.py diff --git a/EE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py b/RE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py rename to RE/paddlemetric/src/paddlemetrics/text/sacre_bleu.py diff --git a/EE/paddlemetric/src/paddlemetrics/text/wer.py b/RE/paddlemetric/src/paddlemetrics/text/wer.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/text/wer.py rename to RE/paddlemetric/src/paddlemetrics/text/wer.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/__init__.py b/RE/paddlemetric/src/paddlemetrics/utilities/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/__init__.py rename to RE/paddlemetric/src/paddlemetrics/utilities/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/checks.py b/RE/paddlemetric/src/paddlemetrics/utilities/checks.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/checks.py rename to RE/paddlemetric/src/paddlemetrics/utilities/checks.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/data.py b/RE/paddlemetric/src/paddlemetrics/utilities/data.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/data.py rename to RE/paddlemetric/src/paddlemetrics/utilities/data.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/distributed.py b/RE/paddlemetric/src/paddlemetrics/utilities/distributed.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/distributed.py rename to RE/paddlemetric/src/paddlemetrics/utilities/distributed.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/enums.py b/RE/paddlemetric/src/paddlemetrics/utilities/enums.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/enums.py rename to RE/paddlemetric/src/paddlemetrics/utilities/enums.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/exceptions.py b/RE/paddlemetric/src/paddlemetrics/utilities/exceptions.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/exceptions.py rename to RE/paddlemetric/src/paddlemetrics/utilities/exceptions.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/imports.py b/RE/paddlemetric/src/paddlemetrics/utilities/imports.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/imports.py rename to RE/paddlemetric/src/paddlemetrics/utilities/imports.py diff --git a/EE/paddlemetric/src/paddlemetrics/utilities/prints.py b/RE/paddlemetric/src/paddlemetrics/utilities/prints.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/utilities/prints.py rename to RE/paddlemetric/src/paddlemetrics/utilities/prints.py diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/__init__.py b/RE/paddlemetric/src/paddlemetrics/wrappers/__init__.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/wrappers/__init__.py rename to RE/paddlemetric/src/paddlemetrics/wrappers/__init__.py diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py b/RE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py rename to RE/paddlemetric/src/paddlemetrics/wrappers/bootstrapping.py diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py b/RE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py rename to RE/paddlemetric/src/paddlemetrics/wrappers/multioutput.py diff --git a/EE/paddlemetric/src/paddlemetrics/wrappers/tracker.py b/RE/paddlemetric/src/paddlemetrics/wrappers/tracker.py similarity index 100% rename from EE/paddlemetric/src/paddlemetrics/wrappers/tracker.py rename to RE/paddlemetric/src/paddlemetrics/wrappers/tracker.py diff --git a/EE/paddlemetric/src/setup.py b/RE/paddlemetric/src/setup.py similarity index 100% rename from EE/paddlemetric/src/setup.py rename to RE/paddlemetric/src/setup.py diff --git a/EE/paddlemetric/src/tests/__init__.py b/RE/paddlemetric/src/tests/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/__init__.py rename to RE/paddlemetric/src/tests/__init__.py diff --git a/EE/paddlemetric/src/tests/audio/__init__.py b/RE/paddlemetric/src/tests/audio/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/audio/__init__.py rename to RE/paddlemetric/src/tests/audio/__init__.py diff --git a/EE/paddlemetric/src/tests/audio/examples/audio_speech.wav b/RE/paddlemetric/src/tests/audio/examples/audio_speech.wav similarity index 100% rename from EE/paddlemetric/src/tests/audio/examples/audio_speech.wav rename to RE/paddlemetric/src/tests/audio/examples/audio_speech.wav diff --git a/EE/paddlemetric/src/tests/audio/examples/audio_speech_bab_0dB.wav b/RE/paddlemetric/src/tests/audio/examples/audio_speech_bab_0dB.wav similarity index 100% rename from EE/paddlemetric/src/tests/audio/examples/audio_speech_bab_0dB.wav rename to RE/paddlemetric/src/tests/audio/examples/audio_speech_bab_0dB.wav diff --git a/EE/paddlemetric/src/tests/audio/test_pesq.py b/RE/paddlemetric/src/tests/audio/test_pesq.py similarity index 100% rename from EE/paddlemetric/src/tests/audio/test_pesq.py rename to RE/paddlemetric/src/tests/audio/test_pesq.py diff --git a/EE/paddlemetric/src/tests/audio/test_pit.py b/RE/paddlemetric/src/tests/audio/test_pit.py similarity index 100% rename from EE/paddlemetric/src/tests/audio/test_pit.py rename to RE/paddlemetric/src/tests/audio/test_pit.py diff --git a/EE/paddlemetric/src/tests/audio/test_si_sdr.py b/RE/paddlemetric/src/tests/audio/test_si_sdr.py similarity index 100% rename from EE/paddlemetric/src/tests/audio/test_si_sdr.py rename to RE/paddlemetric/src/tests/audio/test_si_sdr.py diff --git a/EE/paddlemetric/src/tests/audio/test_si_snr.py b/RE/paddlemetric/src/tests/audio/test_si_snr.py similarity index 100% rename from EE/paddlemetric/src/tests/audio/test_si_snr.py rename to RE/paddlemetric/src/tests/audio/test_si_snr.py diff --git a/EE/paddlemetric/src/tests/audio/test_snr.py b/RE/paddlemetric/src/tests/audio/test_snr.py similarity index 100% rename from EE/paddlemetric/src/tests/audio/test_snr.py rename to RE/paddlemetric/src/tests/audio/test_snr.py diff --git a/EE/paddlemetric/src/tests/audio/test_stoi.py b/RE/paddlemetric/src/tests/audio/test_stoi.py similarity index 100% rename from EE/paddlemetric/src/tests/audio/test_stoi.py rename to RE/paddlemetric/src/tests/audio/test_stoi.py diff --git a/EE/paddlemetric/src/tests/bases/__init__.py b/RE/paddlemetric/src/tests/bases/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/bases/__init__.py rename to RE/paddlemetric/src/tests/bases/__init__.py diff --git a/EE/paddlemetric/src/tests/bases/test.log b/RE/paddlemetric/src/tests/bases/test.log similarity index 100% rename from EE/paddlemetric/src/tests/bases/test.log rename to RE/paddlemetric/src/tests/bases/test.log diff --git a/EE/paddlemetric/src/tests/bases/test_aggregation.py b/RE/paddlemetric/src/tests/bases/test_aggregation.py similarity index 100% rename from EE/paddlemetric/src/tests/bases/test_aggregation.py rename to RE/paddlemetric/src/tests/bases/test_aggregation.py diff --git a/EE/paddlemetric/src/tests/bases/test_collections.py b/RE/paddlemetric/src/tests/bases/test_collections.py similarity index 100% rename from EE/paddlemetric/src/tests/bases/test_collections.py rename to RE/paddlemetric/src/tests/bases/test_collections.py diff --git a/EE/paddlemetric/src/tests/bases/test_composition.py b/RE/paddlemetric/src/tests/bases/test_composition.py similarity index 100% rename from EE/paddlemetric/src/tests/bases/test_composition.py rename to RE/paddlemetric/src/tests/bases/test_composition.py diff --git a/EE/paddlemetric/src/tests/bases/test_ddp.py b/RE/paddlemetric/src/tests/bases/test_ddp.py similarity index 100% rename from EE/paddlemetric/src/tests/bases/test_ddp.py rename to RE/paddlemetric/src/tests/bases/test_ddp.py diff --git a/EE/paddlemetric/src/tests/bases/test_hashing.py b/RE/paddlemetric/src/tests/bases/test_hashing.py similarity index 100% rename from EE/paddlemetric/src/tests/bases/test_hashing.py rename to RE/paddlemetric/src/tests/bases/test_hashing.py diff --git a/EE/paddlemetric/src/tests/bases/test_metric.py b/RE/paddlemetric/src/tests/bases/test_metric.py similarity index 100% rename from EE/paddlemetric/src/tests/bases/test_metric.py rename to RE/paddlemetric/src/tests/bases/test_metric.py diff --git a/EE/paddlemetric/src/tests/classification/__init__.py b/RE/paddlemetric/src/tests/classification/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/__init__.py rename to RE/paddlemetric/src/tests/classification/__init__.py diff --git a/EE/paddlemetric/src/tests/classification/inputs.py b/RE/paddlemetric/src/tests/classification/inputs.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/inputs.py rename to RE/paddlemetric/src/tests/classification/inputs.py diff --git a/EE/paddlemetric/src/tests/classification/test.log b/RE/paddlemetric/src/tests/classification/test.log similarity index 100% rename from EE/paddlemetric/src/tests/classification/test.log rename to RE/paddlemetric/src/tests/classification/test.log diff --git a/EE/paddlemetric/src/tests/classification/test_accuracy.py b/RE/paddlemetric/src/tests/classification/test_accuracy.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_accuracy.py rename to RE/paddlemetric/src/tests/classification/test_accuracy.py diff --git a/EE/paddlemetric/src/tests/classification/test_auc.py b/RE/paddlemetric/src/tests/classification/test_auc.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_auc.py rename to RE/paddlemetric/src/tests/classification/test_auc.py diff --git a/EE/paddlemetric/src/tests/classification/test_auroc.py b/RE/paddlemetric/src/tests/classification/test_auroc.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_auroc.py rename to RE/paddlemetric/src/tests/classification/test_auroc.py diff --git a/EE/paddlemetric/src/tests/classification/test_average_precision.py b/RE/paddlemetric/src/tests/classification/test_average_precision.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_average_precision.py rename to RE/paddlemetric/src/tests/classification/test_average_precision.py diff --git a/EE/paddlemetric/src/tests/classification/test_binned_precision_recall.py b/RE/paddlemetric/src/tests/classification/test_binned_precision_recall.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_binned_precision_recall.py rename to RE/paddlemetric/src/tests/classification/test_binned_precision_recall.py diff --git a/EE/paddlemetric/src/tests/classification/test_calibration_error.py b/RE/paddlemetric/src/tests/classification/test_calibration_error.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_calibration_error.py rename to RE/paddlemetric/src/tests/classification/test_calibration_error.py diff --git a/EE/paddlemetric/src/tests/classification/test_cohen_kappa.py b/RE/paddlemetric/src/tests/classification/test_cohen_kappa.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_cohen_kappa.py rename to RE/paddlemetric/src/tests/classification/test_cohen_kappa.py diff --git a/EE/paddlemetric/src/tests/classification/test_confusion_matrix.py b/RE/paddlemetric/src/tests/classification/test_confusion_matrix.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_confusion_matrix.py rename to RE/paddlemetric/src/tests/classification/test_confusion_matrix.py diff --git a/EE/paddlemetric/src/tests/classification/test_f_beta.py b/RE/paddlemetric/src/tests/classification/test_f_beta.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_f_beta.py rename to RE/paddlemetric/src/tests/classification/test_f_beta.py diff --git a/EE/paddlemetric/src/tests/classification/test_hamming_distance.py b/RE/paddlemetric/src/tests/classification/test_hamming_distance.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_hamming_distance.py rename to RE/paddlemetric/src/tests/classification/test_hamming_distance.py diff --git a/EE/paddlemetric/src/tests/classification/test_hinge.py b/RE/paddlemetric/src/tests/classification/test_hinge.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_hinge.py rename to RE/paddlemetric/src/tests/classification/test_hinge.py diff --git a/EE/paddlemetric/src/tests/classification/test_inputs.py b/RE/paddlemetric/src/tests/classification/test_inputs.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_inputs.py rename to RE/paddlemetric/src/tests/classification/test_inputs.py diff --git a/EE/paddlemetric/src/tests/classification/test_iou.py b/RE/paddlemetric/src/tests/classification/test_iou.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_iou.py rename to RE/paddlemetric/src/tests/classification/test_iou.py diff --git a/EE/paddlemetric/src/tests/classification/test_kl_divergence.py b/RE/paddlemetric/src/tests/classification/test_kl_divergence.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_kl_divergence.py rename to RE/paddlemetric/src/tests/classification/test_kl_divergence.py diff --git a/EE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py b/RE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py rename to RE/paddlemetric/src/tests/classification/test_matthews_corrcoef.py diff --git a/EE/paddlemetric/src/tests/classification/test_precision_recall.py b/RE/paddlemetric/src/tests/classification/test_precision_recall.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_precision_recall.py rename to RE/paddlemetric/src/tests/classification/test_precision_recall.py diff --git a/EE/paddlemetric/src/tests/classification/test_precision_recall_curve.py b/RE/paddlemetric/src/tests/classification/test_precision_recall_curve.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_precision_recall_curve.py rename to RE/paddlemetric/src/tests/classification/test_precision_recall_curve.py diff --git a/EE/paddlemetric/src/tests/classification/test_roc.py b/RE/paddlemetric/src/tests/classification/test_roc.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_roc.py rename to RE/paddlemetric/src/tests/classification/test_roc.py diff --git a/EE/paddlemetric/src/tests/classification/test_specificity.py b/RE/paddlemetric/src/tests/classification/test_specificity.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_specificity.py rename to RE/paddlemetric/src/tests/classification/test_specificity.py diff --git a/EE/paddlemetric/src/tests/classification/test_stat_scores.py b/RE/paddlemetric/src/tests/classification/test_stat_scores.py similarity index 100% rename from EE/paddlemetric/src/tests/classification/test_stat_scores.py rename to RE/paddlemetric/src/tests/classification/test_stat_scores.py diff --git a/EE/paddlemetric/src/tests/functional/__init__.py b/RE/paddlemetric/src/tests/functional/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/functional/__init__.py rename to RE/paddlemetric/src/tests/functional/__init__.py diff --git a/EE/paddlemetric/src/tests/functional/test_classification.py b/RE/paddlemetric/src/tests/functional/test_classification.py similarity index 100% rename from EE/paddlemetric/src/tests/functional/test_classification.py rename to RE/paddlemetric/src/tests/functional/test_classification.py diff --git a/EE/paddlemetric/src/tests/functional/test_image_gradients.py b/RE/paddlemetric/src/tests/functional/test_image_gradients.py similarity index 100% rename from EE/paddlemetric/src/tests/functional/test_image_gradients.py rename to RE/paddlemetric/src/tests/functional/test_image_gradients.py diff --git a/EE/paddlemetric/src/tests/functional/test_reduction.py b/RE/paddlemetric/src/tests/functional/test_reduction.py similarity index 100% rename from EE/paddlemetric/src/tests/functional/test_reduction.py rename to RE/paddlemetric/src/tests/functional/test_reduction.py diff --git a/EE/paddlemetric/src/tests/functional/test_self_supervised.py b/RE/paddlemetric/src/tests/functional/test_self_supervised.py similarity index 100% rename from EE/paddlemetric/src/tests/functional/test_self_supervised.py rename to RE/paddlemetric/src/tests/functional/test_self_supervised.py diff --git a/EE/paddlemetric/src/tests/helpers/__init__.py b/RE/paddlemetric/src/tests/helpers/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/helpers/__init__.py rename to RE/paddlemetric/src/tests/helpers/__init__.py diff --git a/EE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py b/RE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py similarity index 100% rename from EE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py rename to RE/paddlemetric/src/tests/helpers/non_sklearn_metrics.py diff --git a/EE/paddlemetric/src/tests/helpers/testers.py b/RE/paddlemetric/src/tests/helpers/testers.py similarity index 100% rename from EE/paddlemetric/src/tests/helpers/testers.py rename to RE/paddlemetric/src/tests/helpers/testers.py diff --git a/EE/paddlemetric/src/tests/image/__init__.py b/RE/paddlemetric/src/tests/image/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/image/__init__.py rename to RE/paddlemetric/src/tests/image/__init__.py diff --git a/EE/paddlemetric/src/tests/image/test_fid.py b/RE/paddlemetric/src/tests/image/test_fid.py similarity index 100% rename from EE/paddlemetric/src/tests/image/test_fid.py rename to RE/paddlemetric/src/tests/image/test_fid.py diff --git a/EE/paddlemetric/src/tests/image/test_inception.py b/RE/paddlemetric/src/tests/image/test_inception.py similarity index 100% rename from EE/paddlemetric/src/tests/image/test_inception.py rename to RE/paddlemetric/src/tests/image/test_inception.py diff --git a/EE/paddlemetric/src/tests/image/test_kid.py b/RE/paddlemetric/src/tests/image/test_kid.py similarity index 100% rename from EE/paddlemetric/src/tests/image/test_kid.py rename to RE/paddlemetric/src/tests/image/test_kid.py diff --git a/EE/paddlemetric/src/tests/image/test_lpips.py b/RE/paddlemetric/src/tests/image/test_lpips.py similarity index 100% rename from EE/paddlemetric/src/tests/image/test_lpips.py rename to RE/paddlemetric/src/tests/image/test_lpips.py diff --git a/EE/paddlemetric/src/tests/image/test_psnr.py b/RE/paddlemetric/src/tests/image/test_psnr.py similarity index 100% rename from EE/paddlemetric/src/tests/image/test_psnr.py rename to RE/paddlemetric/src/tests/image/test_psnr.py diff --git a/EE/paddlemetric/src/tests/image/test_ssim.py b/RE/paddlemetric/src/tests/image/test_ssim.py similarity index 100% rename from EE/paddlemetric/src/tests/image/test_ssim.py rename to RE/paddlemetric/src/tests/image/test_ssim.py diff --git a/EE/paddlemetric/src/tests/pairwise/__init__.py b/RE/paddlemetric/src/tests/pairwise/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/pairwise/__init__.py rename to RE/paddlemetric/src/tests/pairwise/__init__.py diff --git a/EE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py b/RE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py similarity index 100% rename from EE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py rename to RE/paddlemetric/src/tests/pairwise/test_pairwise_distance.py diff --git a/EE/paddlemetric/src/tests/regression/__init__.py b/RE/paddlemetric/src/tests/regression/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/__init__.py rename to RE/paddlemetric/src/tests/regression/__init__.py diff --git a/EE/paddlemetric/src/tests/regression/test_cosine_similarity.py b/RE/paddlemetric/src/tests/regression/test_cosine_similarity.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/test_cosine_similarity.py rename to RE/paddlemetric/src/tests/regression/test_cosine_similarity.py diff --git a/EE/paddlemetric/src/tests/regression/test_explained_variance.py b/RE/paddlemetric/src/tests/regression/test_explained_variance.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/test_explained_variance.py rename to RE/paddlemetric/src/tests/regression/test_explained_variance.py diff --git a/EE/paddlemetric/src/tests/regression/test_mean_error.py b/RE/paddlemetric/src/tests/regression/test_mean_error.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/test_mean_error.py rename to RE/paddlemetric/src/tests/regression/test_mean_error.py diff --git a/EE/paddlemetric/src/tests/regression/test_pearson.py b/RE/paddlemetric/src/tests/regression/test_pearson.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/test_pearson.py rename to RE/paddlemetric/src/tests/regression/test_pearson.py diff --git a/EE/paddlemetric/src/tests/regression/test_r2.py b/RE/paddlemetric/src/tests/regression/test_r2.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/test_r2.py rename to RE/paddlemetric/src/tests/regression/test_r2.py diff --git a/EE/paddlemetric/src/tests/regression/test_spearman.py b/RE/paddlemetric/src/tests/regression/test_spearman.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/test_spearman.py rename to RE/paddlemetric/src/tests/regression/test_spearman.py diff --git a/EE/paddlemetric/src/tests/regression/test_tweedie_deviance.py b/RE/paddlemetric/src/tests/regression/test_tweedie_deviance.py similarity index 100% rename from EE/paddlemetric/src/tests/regression/test_tweedie_deviance.py rename to RE/paddlemetric/src/tests/regression/test_tweedie_deviance.py diff --git a/EE/paddlemetric/src/tests/retrieval/__init__.py b/RE/paddlemetric/src/tests/retrieval/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/__init__.py rename to RE/paddlemetric/src/tests/retrieval/__init__.py diff --git a/EE/paddlemetric/src/tests/retrieval/helpers.py b/RE/paddlemetric/src/tests/retrieval/helpers.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/helpers.py rename to RE/paddlemetric/src/tests/retrieval/helpers.py diff --git a/EE/paddlemetric/src/tests/retrieval/inputs.py b/RE/paddlemetric/src/tests/retrieval/inputs.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/inputs.py rename to RE/paddlemetric/src/tests/retrieval/inputs.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_fallout.py b/RE/paddlemetric/src/tests/retrieval/test_fallout.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_fallout.py rename to RE/paddlemetric/src/tests/retrieval/test_fallout.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_hit_rate.py b/RE/paddlemetric/src/tests/retrieval/test_hit_rate.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_hit_rate.py rename to RE/paddlemetric/src/tests/retrieval/test_hit_rate.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_map.py b/RE/paddlemetric/src/tests/retrieval/test_map.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_map.py rename to RE/paddlemetric/src/tests/retrieval/test_map.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_mrr.py b/RE/paddlemetric/src/tests/retrieval/test_mrr.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_mrr.py rename to RE/paddlemetric/src/tests/retrieval/test_mrr.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_ndcg.py b/RE/paddlemetric/src/tests/retrieval/test_ndcg.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_ndcg.py rename to RE/paddlemetric/src/tests/retrieval/test_ndcg.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_precision.py b/RE/paddlemetric/src/tests/retrieval/test_precision.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_precision.py rename to RE/paddlemetric/src/tests/retrieval/test_precision.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_r_precision.py b/RE/paddlemetric/src/tests/retrieval/test_r_precision.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_r_precision.py rename to RE/paddlemetric/src/tests/retrieval/test_r_precision.py diff --git a/EE/paddlemetric/src/tests/retrieval/test_recall.py b/RE/paddlemetric/src/tests/retrieval/test_recall.py similarity index 100% rename from EE/paddlemetric/src/tests/retrieval/test_recall.py rename to RE/paddlemetric/src/tests/retrieval/test_recall.py diff --git a/EE/paddlemetric/src/tests/test_utilities.py b/RE/paddlemetric/src/tests/test_utilities.py similarity index 100% rename from EE/paddlemetric/src/tests/test_utilities.py rename to RE/paddlemetric/src/tests/test_utilities.py diff --git a/EE/paddlemetric/src/tests/text/__init__.py b/RE/paddlemetric/src/tests/text/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/text/__init__.py rename to RE/paddlemetric/src/tests/text/__init__.py diff --git a/EE/paddlemetric/src/tests/text/helpers.py b/RE/paddlemetric/src/tests/text/helpers.py similarity index 100% rename from EE/paddlemetric/src/tests/text/helpers.py rename to RE/paddlemetric/src/tests/text/helpers.py diff --git a/EE/paddlemetric/src/tests/text/test_bertscore.py b/RE/paddlemetric/src/tests/text/test_bertscore.py similarity index 100% rename from EE/paddlemetric/src/tests/text/test_bertscore.py rename to RE/paddlemetric/src/tests/text/test_bertscore.py diff --git a/EE/paddlemetric/src/tests/text/test_bleu.py b/RE/paddlemetric/src/tests/text/test_bleu.py similarity index 100% rename from EE/paddlemetric/src/tests/text/test_bleu.py rename to RE/paddlemetric/src/tests/text/test_bleu.py diff --git a/EE/paddlemetric/src/tests/text/test_rouge.py b/RE/paddlemetric/src/tests/text/test_rouge.py similarity index 100% rename from EE/paddlemetric/src/tests/text/test_rouge.py rename to RE/paddlemetric/src/tests/text/test_rouge.py diff --git a/EE/paddlemetric/src/tests/text/test_sacre_bleu.py b/RE/paddlemetric/src/tests/text/test_sacre_bleu.py similarity index 100% rename from EE/paddlemetric/src/tests/text/test_sacre_bleu.py rename to RE/paddlemetric/src/tests/text/test_sacre_bleu.py diff --git a/EE/paddlemetric/src/tests/text/test_wer.py b/RE/paddlemetric/src/tests/text/test_wer.py similarity index 100% rename from EE/paddlemetric/src/tests/text/test_wer.py rename to RE/paddlemetric/src/tests/text/test_wer.py diff --git a/EE/paddlemetric/src/tests/wrappers/__init__.py b/RE/paddlemetric/src/tests/wrappers/__init__.py similarity index 100% rename from EE/paddlemetric/src/tests/wrappers/__init__.py rename to RE/paddlemetric/src/tests/wrappers/__init__.py diff --git a/EE/paddlemetric/src/tests/wrappers/test_bootstrapping.py b/RE/paddlemetric/src/tests/wrappers/test_bootstrapping.py similarity index 100% rename from EE/paddlemetric/src/tests/wrappers/test_bootstrapping.py rename to RE/paddlemetric/src/tests/wrappers/test_bootstrapping.py diff --git a/EE/paddlemetric/src/tests/wrappers/test_multioutput.py b/RE/paddlemetric/src/tests/wrappers/test_multioutput.py similarity index 100% rename from EE/paddlemetric/src/tests/wrappers/test_multioutput.py rename to RE/paddlemetric/src/tests/wrappers/test_multioutput.py diff --git a/EE/paddlemetric/src/tests/wrappers/test_tracker.py b/RE/paddlemetric/src/tests/wrappers/test_tracker.py similarity index 100% rename from EE/paddlemetric/src/tests/wrappers/test_tracker.py rename to RE/paddlemetric/src/tests/wrappers/test_tracker.py diff --git a/README.md b/README.md index b2b936ee6..a31abf504 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ * [自然语言处理(Natrual Language Processing)](#自然语言处理) * [知识图谱(Knowledge Graph)](#知识图谱) * [时空数据挖掘(Spatial-Temporal Data-Mining)](#时空数据挖掘) +* [研发效率(Research Efficiency)](#研发效率) ## 计算机视觉 | 任务类型 | 目录 | 简介 | 论文链接 | @@ -68,6 +69,11 @@ | 兴趣点生成 |[P3AC](ST_DM/KDD2020-P3AC)| 具备个性化的前缀嵌入的POI自动生成。 | - | | 区域生成 |[P3AC](ST_DM/GenRegion)| 基于路网进行区域划分的方法, 实现对特定区域基于路网的全划分,区域之间无交叠,无空隙,算法支持对全球的区域划分。| - | +## 研发效率 +| 软件名称 | 目录 | 简介 | +|--------------|------------------------|-------------------------------------------------| +| paddleext | [RE](RE/paddleext) | paddle的扩展功能插件,可以让部分pytorch code 无缝运行在paddle平台上。 | +| paddlemetric | [P3AC](RE/paddlemetric) | torchmetric 的 paddle迁移版本,目前支持分类测度。 | ## 许可证书 此向导由[PaddlePaddle](https://github.com/PaddlePaddle/Paddle)贡献,受[Apache-2.0 license](LICENSE)许可认证。 From c8ce71ba26c0c590932f7ef40948dd46befc3441 Mon Sep 17 00:00:00 2001 From: rudaoshi Date: Sat, 26 Nov 2022 11:29:39 +0800 Subject: [PATCH 4/4] revise readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a31abf504..30fcc4c19 100644 --- a/README.md +++ b/README.md @@ -72,8 +72,8 @@ ## 研发效率 | 软件名称 | 目录 | 简介 | |--------------|------------------------|-------------------------------------------------| -| paddleext | [RE](RE/paddleext) | paddle的扩展功能插件,可以让部分pytorch code 无缝运行在paddle平台上。 | -| paddlemetric | [P3AC](RE/paddlemetric) | torchmetric 的 paddle迁移版本,目前支持分类测度。 | +| paddleext | [paddleext](RE/paddleext) | paddle的扩展功能插件,可以让部分pytorch code 无缝运行在paddle平台上。 | +| paddlemetric | [paddlemetric](RE/paddlemetric) | torchmetric 的 paddle迁移版本,目前支持分类测度。 | ## 许可证书 此向导由[PaddlePaddle](https://github.com/PaddlePaddle/Paddle)贡献,受[Apache-2.0 license](LICENSE)许可认证。