diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index cd23502..223ccc0 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -21,7 +21,7 @@ jobs: version: - '1.7' - '1.8' - - '~1.9.0-0' + - '1.9' - 'nightly' os: - ubuntu-latest diff --git a/README.md b/README.md index e283ea0..50454ba 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ +# πŸƒ Quick Tour ![](dev/logo/wide_logo.png) @@ -5,8 +6,6 @@ `ConformalPrediction.jl` is a package for Predictive Uncertainty Quantification (UQ) through Conformal Prediction (CP) in Julia. It is designed to work with supervised models trained in [MLJ](https://alan-turing-institute.github.io/MLJ.jl/dev/) (Blaom et al. 2020). Conformal Prediction is easy-to-understand, easy-to-use and model-agnostic and it works under minimal distributional assumptions. -## πŸƒ Quick Tour - > First time here? Take a quick interactive [tour](https://binder.plutojl.org/v0.19.12/open?url=https%253A%252F%252Fraw.githubusercontent.com%252Fpat-alt%252FConformalPrediction.jl%252Fmain%252Fdocs%252Fpluto%252Fintro.jl) to see what this package can do: [![Binder](https://mybinder.org/badge_logo.svg)](https://binder.plutojl.org/v0.19.12/open?url=https%253A%252F%252Fraw.githubusercontent.com%252Fpat-alt%252FConformalPrediction.jl%252Fmain%252Fdocs%252Fpluto%252Fintro.jl) The button takes you to a [`Pluto.jl`](https://github.com/fonsp/Pluto.jl) 🎈 notebook hosted on [binder](https://mybinder.org/). In my own experience, this may take some time to load, certainly long enough to get yourself a hot beverage β˜•. Alternatively, you can run the notebook locally or skip the tour for now and read on below. @@ -106,11 +105,11 @@ yΜ‚[1:show_first] ``` 5-element Vector{Tuple{Float64, Float64}}: - (0.3514065102722679, 2.4948272235282696) - (-0.36580206168104035, 1.7780775120607) - (0.13671800582612756, 2.2792132778975933) - (0.15237308545277795, 2.2801138611534326) - (0.19080981472120032, 2.3863592104933966) + (0.3633641966158244, 2.4931870917039434) + (-0.3996500917580523, 1.7928089786632433) + (0.09653821719666224, 2.284119083077198) + (0.13354256573784634, 2.260005698592606) + (0.21655224395842643, 2.434258746076169) For simple models like this one, we can call a custom `Plots` recipe on our instance, fit result and data to generate the chart below: @@ -138,16 +137,16 @@ println("SSC: $(round(_eval.measurement[2], digits=3))") per_observation, fitted_params_per_fold, report_per_fold, train_test_rows Extract: - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€ - β”‚ measure β”‚ operation β”‚ meas β‹― - β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€ - β”‚ emp_coverage (generic function with 1 method) β”‚ predict β”‚ 0.95 β‹― - β”‚ size_stratified_coverage (generic function with 1 method) β”‚ predict β”‚ 0.84 β‹― - └───────────────────────────────────────────────────────────┴───────────┴─────── - 3 columns omitted + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€ + β”‚ measure β”‚ operation β”‚ measurement β”‚ 1.9 β‹― + β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€ + β”‚ ConformalPrediction.emp_coverage β”‚ predict β”‚ 0.95 β”‚ 0.0 β‹― + β”‚ ConformalPrediction.size_stratified_coverage β”‚ predict β”‚ 0.903 β”‚ 0.0 β‹― + └──────────────────────────────────────────────┴───────────┴─────────────┴────── + 2 columns omitted Empirical coverage: 0.95 - SSC: 0.841 + SSC: 0.903 ## πŸ“š Read on @@ -196,10 +195,11 @@ The package has been tested for the following supervised models offered by [MLJ] keys(tested_atomic_models[:regression]) ``` - KeySet for a Dict{Symbol, Expr} with 4 entries. Keys: - :nearest_neighbor + KeySet for a Dict{Symbol, Expr} with 5 entries. Keys: + :ridge + :lasso :evo_tree - :light_gbm + :nearest_neighbor :linear **Classification**: @@ -208,10 +208,9 @@ keys(tested_atomic_models[:regression]) keys(tested_atomic_models[:classification]) ``` - KeySet for a Dict{Symbol, Expr} with 4 entries. Keys: + KeySet for a Dict{Symbol, Expr} with 3 entries. Keys: :nearest_neighbor :evo_tree - :light_gbm :logistic ### Implemented Evaluation Metrics diff --git a/README.qmd b/README.qmd index 57f6bfa..feac2e7 100644 --- a/README.qmd +++ b/README.qmd @@ -12,7 +12,7 @@ crossref: fig-prefix: Figure tbl-prefix: Table bibliography: https://raw.githubusercontent.com/pat-alt/bib/main/bib.bib -jupyter: julia-1.8 +jupyter: julia-1.9 --- ![](dev/logo/wide_logo.png) diff --git a/README_files/figure-commonmark/cell-11-output-1.svg b/README_files/figure-commonmark/cell-11-output-1.svg index 7ccd4cd..2a280ec 100644 --- a/README_files/figure-commonmark/cell-11-output-1.svg +++ b/README_files/figure-commonmark/cell-11-output-1.svg @@ -1,55 +1,59 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/README_files/figure-commonmark/cell-7-output-1.svg b/README_files/figure-commonmark/cell-7-output-1.svg index 5336671..3e8ebcd 100644 --- a/README_files/figure-commonmark/cell-7-output-1.svg +++ b/README_files/figure-commonmark/cell-7-output-1.svg @@ -1,294 +1,294 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/_freeze/docs/src/how_to_guides/llm/execute-results/md.json b/_freeze/docs/src/how_to_guides/llm/execute-results/md.json new file mode 100644 index 0000000..3d1b5ed --- /dev/null +++ b/_freeze/docs/src/how_to_guides/llm/execute-results/md.json @@ -0,0 +1,10 @@ +{ + "hash": "9ace0a1ec38b37c490957e4b679ebce9", + "result": { + "markdown": "---\ntitle: How to Build a Conformal Chatbot\n---\n\n\n``` @meta\nCurrentModule = ConformalPrediction\n```\n\n\n\n\nLarge Language Models are all the buzz right now. They are used for a variety of tasks, including text classification, question answering, and text generation. In this tutorial, we will show how to conformalize a transformer language model for text classification. We will use the [Banking77](https://arxiv.org/abs/2003.04807) dataset [@casanueva2020efficient], which consists of 13,083 queries from 77 intents. On the model side, we will use the [DistilRoBERTa](https://huggingface.co/mrm8488/distilroberta-finetuned-banking77) model, which is a distilled version of [RoBERTa](https://arxiv.org/abs/1907.11692) [@liu2019roberta] finetuned on the Banking77 dataset.\n\n## Data\n\nThe data was downloaded from [HuggingFace](https://huggingface.co/datasets/PolyAI/banking77) πŸ€— (HF) and split into a proper training, calibration, and test set. All that's left to do is to load the data and preprocess it. We add 1 to the labels to make them 1-indexed (sorry Pythonistas 😜)\n\n\n::: {.cell execution_count=2}\n``` {.julia .cell-code}\n# Get labels:\ndf_labels = CSV.read(\"dev/artifacts/data/banking77/labels.csv\", DataFrame, drop=[1])\nlabels = df_labels[:,1]\n\n# Get data:\ndf_train = CSV.read(\"dev/artifacts/data/banking77/train.csv\", DataFrame, drop=[1])\ndf_cal = CSV.read(\"dev/artifacts/data/banking77/calibration.csv\", DataFrame, drop=[1])\ndf_full_train = vcat(df_train, df_cal)\ntrain_ratio = round(nrow(df_train)/nrow(df_full_train), digits=2)\ndf_test = CSV.read(\"dev/artifacts/data/banking77/test.csv\", DataFrame, drop=[1])\n\n# Preprocess data:\nqueries_train, y_train = collect(df_train.text), categorical(df_train.labels .+ 1)\nqueries_cal, y_cal = collect(df_cal.text), categorical(df_cal.labels .+ 1)\nqueries, y = collect(df_full_train.text), categorical(df_full_train.labels .+ 1)\nqueries_test, y_test = collect(df_test.text), categorical(df_test.labels .+ 1)\n```\n:::\n\n\n## HuggingFace Model\n\nThe model can be loaded from HF straight into our running Julia session using the [`Transformers.jl`](https://github.com/chengchingwen/Transformers.jl/tree/master) package. Below we load the tokenizer `tkr` and the model `mod`. The tokenizer is used to convert the text into a sequence of integers, which is then fed into the model. The model outputs a hidden state, which is then fed into a classifier to get the logits for each class. Finally, the logits are then passed through a softmax function to get the corresponding predicted probabilities. Below we run a few queries through the model to see how it performs.\n\n::: {.cell execution_count=3}\n``` {.julia .cell-code}\n# Load model from HF πŸ€—:\ntkr = hgf\"mrm8488/distilroberta-finetuned-banking77:tokenizer\"\nmod = hgf\"mrm8488/distilroberta-finetuned-banking77:ForSequenceClassification\"\n\n# Test model:\nquery = [\n \"What is the base of the exchange rates?\",\n \"Why is my card not working?\",\n \"My Apple Pay is not working, what should I do?\",\n]\na = encode(tkr, query)\nb = mod.model(a)\nc = mod.cls(b.hidden_state)\nd = softmax(c.logit)\n[labels[i] for i in Flux.onecold(d)]\n```\n\n::: {.cell-output .cell-output-display execution_count=4}\n```\n3-element Vector{String}:\n \"exchange_rate\"\n \"card_not_working\"\n \"apple_pay_or_google_pay\"\n```\n:::\n:::\n\n\n## `MLJ` Interface\n\nSince our package is interfaced to [`MLJ.jl`](https://alan-turing-institute.github.io/MLJ.jl/dev/), we need to define a wrapper model that conforms to the `MLJ` interface. In order to add the model for general use, we would probably go through [`MLJFlux.jl`](https://github.com/FluxML/MLJFlux.jl), but for this tutorial, we will make our life easy and simply overload the `MLJBase.fit` and `MLJBase.predict` methods. Since the model from HF is already pre-trained and we are not interested in further fine-tuning, we will simply return the model object in the `MLJBase.fit` method. The `MLJBase.predict` method will then take the model object and the query and return the predicted probabilities. We also need to define the `MLJBase.target_scitype` and `MLJBase.predict_mode` methods. The former tells `MLJ` what the output type of the model is, and the latter can be used to retrieve the label with the highest predicted probability.\n\n::: {.cell execution_count=4}\n``` {.julia .cell-code}\nstruct IntentClassifier <: MLJBase.Probabilistic\n tkr::TextEncoders.AbstractTransformerTextEncoder\n mod::HuggingFace.HGFRobertaForSequenceClassification\nend\n\nfunction IntentClassifier(;\n tokenizer::TextEncoders.AbstractTransformerTextEncoder, \n model::HuggingFace.HGFRobertaForSequenceClassification,\n)\n IntentClassifier(tkr, mod)\nend\n\nfunction get_hidden_state(clf::IntentClassifier, query::Union{AbstractString, Vector{<:AbstractString}})\n token = encode(clf.tkr, query)\n hidden_state = clf.mod.model(token).hidden_state\n return hidden_state\nend\n\n# This doesn't actually retrain the model, but it retrieves the classifier object\nfunction MLJBase.fit(clf::IntentClassifier, verbosity, X, y)\n cache=nothing\n report=nothing\n fitresult = (clf = clf.mod.cls, labels = levels(y))\n return fitresult, cache, report\nend\n\nfunction MLJBase.predict(clf::IntentClassifier, fitresult, Xnew)\n output = fitresult.clf(get_hidden_state(clf, Xnew))\n pΜ‚ = UnivariateFinite(fitresult.labels,softmax(output.logit)',pool=missing)\n return pΜ‚\nend\n\nMLJBase.target_scitype(clf::IntentClassifier) = AbstractVector{<:Finite}\n\nMLJBase.predict_mode(clf::IntentClassifier, fitresult, Xnew) = mode.(MLJBase.predict(clf, fitresult, Xnew))\n```\n:::\n\n\nTo test that everything is working as expected, we fit the model and generated predictions for a subset of the test data:\n\n::: {.cell execution_count=5}\n``` {.julia .cell-code}\nclf = IntentClassifier(tkr, mod)\ntop_n = 10\nfitresult, _, _ = MLJBase.fit(clf, 1, nothing, y_test[1:top_n])\n@time yΜ‚ = MLJBase.predict(clf, fitresult, queries_test[1:top_n]);\n```\n:::\n\n\n## Conformal Chatbot\n\nTo turn the wrapped, pre-trained model into a conformal intent classifier, we can now rely on standard API calls. We first wrap our atomic model where we also specify the desired coverage rate and method. Since even simple forward passes are computationally expensive for our (small) LLM, we rely on Simple Inductive Conformal Classification.\n\n::: {.cell execution_count=6}\n``` {.julia .cell-code}\nconf_model = conformal_model(clf; coverage=0.95, method=:simple_inductive, train_ratio=train_ratio)\nmach = machine(conf_model, queries, y)\n@time fit!(mach)\nSerialization.serialize(\"dev/artifacts/models/banking77/simple_inductive.jls\", mach)\n```\n:::\n\n\nFinally, we use our conformal LLM to build a simple and yet powerful chatbot that runs directly in the Julia REPL. Without dwelling on the details too much, the `conformal_chatbot` works as follows:\n\n1. Prompt user to explain their intent.\n2. Feed user input through conformal LLM and present the output to the user.\n3. If the conformal prediction sets includes more than one label, prompt the user to either refine their input or choose one of the options included in the set.\n\n::: {.cell execution_count=7}\n``` {.julia .cell-code}\nmach = Serialization.deserialize(\"dev/artifacts/models/banking77/simple_inductive.jls\")\n\nfunction prediction_set(mach, query::String)\n pΜ‚ = MLJBase.predict(mach, query)[1]\n probs = pdf.(pΜ‚, collect(1:77))\n in_set = findall(probs .!= 0)\n labels_in_set = labels[in_set]\n probs_in_set = probs[in_set]\n _order = sortperm(-probs_in_set)\n plt = UnicodePlots.barplot(labels_in_set[_order], probs_in_set[_order], title=\"Possible Intents\")\n return labels_in_set, plt\nend\n\nfunction conformal_chatbot()\n println(\"πŸ‘‹ Hi, I'm a Julia, your conformal chatbot. I'm here to help you with your banking query. Ask me anything or type 'exit' to exit ...\\n\")\n completed = false\n queries = \"\"\n while !completed\n query = readline()\n queries = queries * \",\" * query\n labels, plt = prediction_set(mach, queries)\n if length(labels) > 1\n println(\"πŸ€” Hmmm ... I can think of several options here. If any of these applies, simply type the corresponding number (e.g. '1' for the first option). Otherwise, can you refine your question, please?\\n\")\n println(plt)\n else\n println(\"πŸ₯³ I think you mean $(labels[1]). Correct?\")\n end\n\n # Exit:\n if query == \"exit\"\n println(\"πŸ‘‹ Bye!\")\n break\n end\n if query ∈ string.(collect(1:77))\n println(\"πŸ‘ Great! You've chosen '$(labels[parse(Int64, query)])'. I'm glad I could help you. Have a nice day!\")\n completed = true\n end\n end\nend\n```\n:::\n\n\nBelow we show the output for two example queries. The first one is very ambiguous. As expected, the size of the prediction set is therefore large. \n\n::: {.cell execution_count=8}\n``` {.julia .cell-code}\nambiguous_query = \"transfer mondey?\"\nprediction_set(mach, ambiguous_query)[2]\n```\n:::\n\n\nThe more refined version of the prompt yields a smaller prediction set: less ambiguous prompts result in lower predictive uncertainty. \n\n::: {.cell execution_count=9}\n``` {.julia .cell-code}\nrefined_query = \"I tried to transfer money to my friend, but it failed.\"\nprediction_set(mach, refined_query)[2]\n```\n:::\n\n\nBelow we include a short demo video that shows the REPL-based chatbot in action.\n\n![](/docs/src/www/demo_llm.gif)\n\n## Final Remarks\n\nThis work was done in collaboration with colleagues at ING as part of the ING Analytics 2023 Experiment Week. Our team demonstrated that Conformal Prediction provides a powerful and principled alternative to top-*K* intent classification. We won the first prize by popular vote.\n\n", + "supporting": [ + "llm_files" + ], + "filters": [] + } +} \ No newline at end of file diff --git a/_freeze/docs/src/how_to_guides/mnist/execute-results/md.json b/_freeze/docs/src/how_to_guides/mnist/execute-results/md.json index a5e6403..7569fce 100644 --- a/_freeze/docs/src/how_to_guides/mnist/execute-results/md.json +++ b/_freeze/docs/src/how_to_guides/mnist/execute-results/md.json @@ -1,9 +1,9 @@ { - "hash": "70e7bfce4be9fa1fc08fda1cdc896820", + "hash": "ebfd9bdb3db0f57773605e73ddba4806", "result": { - "markdown": "```@meta\nCurrentModule = ConformalPrediction\n```\n\n# How to Conformalize a Deep Image Classifier\n\n\n\nDeep Learning is popular and --- for some tasks like image classification --- remarkably powerful. But it is also well-known that Deep Neural Networks (DNN) can be unstable [@goodfellow2014explaining] and poorly calibrated. Conformal Prediction can be used to mitigate these pitfalls. This how-to guide demonstrates how you can build an image classifier in `Flux.jl` and conformalize its predictions. For a formal treatment see @angelopoulos2022uncertainty.\n\n## The Task at Hand \n\nThe task at hand is to predict the labels of handwritten images of digits using the famous MNIST dataset [@lecun1998mnist]. Importing this popular machine learning dataset in Julia is made remarkably easy through `MLDatasets.jl`:\n\n::: {.cell execution_count=2}\n``` {.julia .cell-code}\nusing MLDatasets\nN = 1000\nXraw, yraw = MNIST(split=:train)[:]\nXraw = Xraw[:,:,1:N]\nyraw = yraw[1:N]\n```\n:::\n\n\nThe chart below shows a few random samples from the training data:\n\n::: {.cell execution_count=3}\n``` {.julia .cell-code}\nusing MLJ\nusing Images\nX = map(x -> convert2image(MNIST, x), eachslice(Xraw, dims=3))\ny = coerce(yraw, Multiclass)\n\nn_samples = 10\nmosaic(rand(X, n_samples)..., ncol=n_samples)\n```\n\n::: {.cell-output .cell-output-display execution_count=4}\n![Random samples from the MNIST dataset.](mnist_files/figure-commonmark/fig-samples-output-1.png){#fig-samples}\n:::\n:::\n\n\n## Building the Network\n\nTo model the mapping from image inputs to labels will rely on a simple Multi-Layer Perceptron (MLP). A great Julia library for Deep Learning is `Flux.jl`. But wait ... doesn't `ConformalPrediction.jl` work with models trained in `MLJ.jl`? That's right, but fortunately there exists a `Flux.jl` interface to `MLJ.jl`, namely `MLJFlux.jl`. The interface is still in its early stages, but already very powerful and easily accessible for anyone (like myself) who is used to building Neural Networks in `Flux.jl`. \n\nIn `Flux.jl`, you could build an MLP for this task as follows,\n\n::: {.cell execution_count=4}\n``` {.julia .cell-code}\nusing Flux\n\nmlp = Chain(\n Flux.flatten,\n Dense(prod((28,28)), 32, relu),\n Dense(32, 10)\n)\n```\n:::\n\n\nwhere `(28,28)` is just the input dimension (28x28 pixel images). Since we have ten digits, our output dimension is ten.^[For a full tutorial on how to build an MNIST image classifier relying solely on `Flux.jl`, check out this [tutorial](https://fluxml.ai/Flux.jl/stable/tutorials/2021-01-26-mlp/).]\n\nWe can do the exact same thing in `MLJFlux.jl` as follows,\n\n::: {.cell execution_count=5}\n``` {.julia .cell-code}\nusing MLJFlux\n\nbuilder = MLJFlux.@builder Chain(\n Flux.flatten,\n Dense(prod(n_in), 32, relu),\n Dense(32, n_out)\n)\n```\n:::\n\n\nwhere here we rely on the `@builder` macro to make the transition from `Flux.jl` to `MLJ.jl` as seamless as possible. Finally, `MLJFlux.jl` already comes with a number of helper functions to define plain-vanilla networks. In this case, we will use the `ImageClassifier` with our custom builder and cross-entropy loss:\n\n::: {.cell execution_count=6}\n``` {.julia .cell-code}\nImageClassifier = @load ImageClassifier\nclf = ImageClassifier(\n builder=builder,\n epochs=10,\n loss=Flux.crossentropy\n)\n```\n:::\n\n\nThe generated instance `clf` is a model (in the `MLJ.jl` sense) so from this point on we can rely on standard `MLJ.jl` workflows. For example, we can wrap our model in data to create a machine and then evaluate it on a holdout set as follows:\n\n::: {.cell execution_count=7}\n``` {.julia .cell-code}\nmach = machine(clf, X, y)\n\nevaluate!(\n mach,\n resampling=Holdout(rng=123, fraction_train=0.8),\n operation=predict_mode,\n measure=[accuracy]\n)\n```\n:::\n\n\nThe accuracy of our very simple model is not amazing, but good enough for the purpose of this tutorial. For each image, our MLP returns a softmax output for each possible digit: 0,1,2,3,...,9. Since each individual softmax output is valued between zero and one, $y_k\\in(0,1)$, this is commonly interpreted as a probability: $y_k \\coloneqq p(y=k|X)$. Edge cases -- that is values close to either zero or one -- indicate high predictive certainty. But this is only a heuristic notion of predictive uncertainty [@angelopoulos2021gentle]. Next, we will turn this heuristic notion of uncertainty into a rigorous one using Conformal Prediction.\n\n## Conformalizing the Network\n\nSince `clf` is a model, it is also compatible with our package: `ConformalPrediction.jl`. To conformalize our MLP, we therefore only need to call `conformal_model(clf)`. Since the generated instance `conf_model` is also just a model, we can still rely on standard `MLJ.jl` workflows. Below we first wrap it in data and then fit it. Aaaand ... we're done! Let's look at the results in the next section.\n\n::: {.cell execution_count=8}\n``` {.julia .cell-code}\nusing ConformalPrediction\nconf_model = conformal_model(clf; method=:simple_inductive)\nmach = machine(conf_model, X, y)\nfit!(mach)\n```\n:::\n\n\n## Results\n\n\n\nThe charts below present the results. The first row displays highly certain predictions, now defined in the rigorous sense of Conformal Prediction: in each case, the conformal set (just beneath the image) includes only one label. \n\nThe following two rows display increasingly uncertain predictions of set size two and three, respectively. They demonstrate that CP is well equipped to deal with samples characterized by high aleatoric uncertainty: digits four (4), seven (7) and nine (9) share certain similarities. So do digits five (5) and six (6) as well as three (3) and eight (8). These may be hard to distinguish from each other even after seeing many examples (and even for a human). It is therefore unsurprising to see that these digits often end up together in conformal sets. \n\n::: {#fig-plots .cell layout-nrow='3' execution_count=10}\n\n::: {.cell-output .cell-output-display}\n![Plot 1](mnist_files/figure-commonmark/fig-plots-output-1.svg){#fig-plots-1}\n:::\n\n::: {.cell-output .cell-output-display}\n![Plot 2](mnist_files/figure-commonmark/fig-plots-output-2.svg){#fig-plots-2}\n:::\n\n::: {.cell-output .cell-output-display}\n![Plot 3](mnist_files/figure-commonmark/fig-plots-output-3.svg){#fig-plots-3}\n:::\n\nConformalised predictions from an image classifier.\n:::\n\n\n## Evaluation\n\nAs always, we can also evaluate our conformal model in terms of coverage (correctness) and size-stratified coverage (adaptiveness).\n\n::: {.cell execution_count=11}\n``` {.julia .cell-code}\n_eval = evaluate!(\n mach,\n resampling=Holdout(rng=123, fraction_train=0.8),\n operation=predict,\n measure=[emp_coverage, ssc]\n)\ndisplay(_eval)\nprintln(\"Empirical coverage: $(round(_eval.measurement[1], digits=3))\")\nprintln(\"SSC: $(round(_eval.measurement[2], digits=3))\")\n```\n\n::: {.cell-output .cell-output-display}\n```\nPerformanceEvaluation object with these fields:\n measure, operation, measurement, per_fold,\n per_observation, fitted_params_per_fold,\n report_per_fold, train_test_rows\nExtract:\nβ”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€\nβ”‚ measure β”‚ operation β”‚ meas β‹―\nβ”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€\nβ”‚ emp_coverage (generic function with 1 method) β”‚ predict β”‚ 0.95 β‹―\nβ”‚ size_stratified_coverage (generic function with 1 method) β”‚ predict β”‚ 0.86 β‹―\n└───────────────────────────────────────────────────────────┴───────────┴───────\n 2 columns omitted\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\nEmpirical coverage: 0.955\nSSC: 0.867\n```\n:::\n:::\n\n\n\n\nUnsurprisingly, we can attain higher adaptivity (SSC) when using adaptive prediction sets:\n\n::: {.cell execution_count=13}\n``` {.julia .cell-code}\nconf_model = conformal_model(clf; method=:adaptive_inductive)\nmach = machine(conf_model, X, y)\nfit!(mach)\n_eval = evaluate!(\n mach,\n resampling=Holdout(rng=123, fraction_train=0.8),\n operation=predict,\n measure=[emp_coverage, ssc]\n)\nresults[:adaptive_inductive] = mach\ndisplay(_eval)\nprintln(\"Empirical coverage: $(round(_eval.measurement[1], digits=3))\")\nprintln(\"SSC: $(round(_eval.measurement[2], digits=3))\")\n```\n\n::: {.cell-output .cell-output-display}\n```\nPerformanceEvaluation object with these fields:\n measure, operation, measurement, per_fold,\n per_observation, fitted_params_per_fold,\n report_per_fold, train_test_rows\nExtract:\nβ”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€\nβ”‚ measure β”‚ operation β”‚ meas β‹―\nβ”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€\nβ”‚ emp_coverage (generic function with 1 method) β”‚ predict β”‚ 0.99 β‹―\nβ”‚ size_stratified_coverage (generic function with 1 method) β”‚ predict β”‚ 0.96 β‹―\n└───────────────────────────────────────────────────────────┴───────────┴───────\n 2 columns omitted\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\nEmpirical coverage: 0.995\nSSC: 0.967\n```\n:::\n:::\n\n\nWe can also have a look at the resulting set size for both approaches:\n\n::: {.cell execution_count=14}\n``` {.julia .cell-code}\nplt_list = []\nfor (_mod, mach) in results\n push!(plt_list, bar(mach.model, mach.fitresult, X; title=String(_mod)))\nend\nplot(plt_list..., size=(800,300))\n```\n\n::: {.cell-output .cell-output-display execution_count=15}\n![Prediction interval width.](mnist_files/figure-commonmark/fig-setsize-output-1.svg){#fig-setsize}\n:::\n:::\n\n\n# References\n\n", + "markdown": "---\ntitle: How to Conformalize a Deep Image Classifier\n---\n\n\n\n\n\n\nDeep Learning is popular and --- for some tasks like image classification --- remarkably powerful. But it is also well-known that Deep Neural Networks (DNN) can be unstable [@goodfellow2014explaining] and poorly calibrated. Conformal Prediction can be used to mitigate these pitfalls. This how-to guide demonstrates how you can build an image classifier in `Flux.jl` and conformalize its predictions. For a formal treatment see @angelopoulos2022uncertainty.\n\n## The Task at Hand \n\nThe task at hand is to predict the labels of handwritten images of digits using the famous MNIST dataset [@lecun1998mnist]. Importing this popular machine learning dataset in Julia is made remarkably easy through `MLDatasets.jl`:\n\n::: {.cell execution_count=2}\n``` {.julia .cell-code}\nusing MLDatasets\nN = 1000\nXraw, yraw = MNIST(split=:train)[:]\nXraw = Xraw[:,:,1:N]\nyraw = yraw[1:N]\n```\n:::\n\n\nThe chart below shows a few random samples from the training data:\n\n::: {.cell execution_count=3}\n``` {.julia .cell-code}\nusing MLJ\nusing Images\nX = map(x -> convert2image(MNIST, x), eachslice(Xraw, dims=3))\ny = coerce(yraw, Multiclass)\n\nn_samples = 10\nmosaic(rand(X, n_samples)..., ncol=n_samples)\n```\n\n::: {.cell-output .cell-output-display execution_count=4}\n![Random samples from the MNIST dataset.](mnist_files/figure-commonmark/fig-samples-output-1.png){#fig-samples}\n:::\n:::\n\n\n## Building the Network\n\nTo model the mapping from image inputs to labels will rely on a simple Multi-Layer Perceptron (MLP). A great Julia library for Deep Learning is `Flux.jl`. But wait ... doesn't `ConformalPrediction.jl` work with models trained in `MLJ.jl`? That's right, but fortunately there exists a `Flux.jl` interface to `MLJ.jl`, namely `MLJFlux.jl`. The interface is still in its early stages, but already very powerful and easily accessible for anyone (like myself) who is used to building Neural Networks in `Flux.jl`. \n\nIn `Flux.jl`, you could build an MLP for this task as follows,\n\n::: {.cell execution_count=4}\n``` {.julia .cell-code}\nusing Flux\n\nmlp = Chain(\n Flux.flatten,\n Dense(prod((28,28)), 32, relu),\n Dense(32, 10)\n)\n```\n:::\n\n\nwhere `(28,28)` is just the input dimension (28x28 pixel images). Since we have ten digits, our output dimension is ten.^[For a full tutorial on how to build an MNIST image classifier relying solely on `Flux.jl`, check out this [tutorial](https://fluxml.ai/Flux.jl/stable/tutorials/2021-01-26-mlp/).]\n\nWe can do the exact same thing in `MLJFlux.jl` as follows,\n\n::: {.cell execution_count=5}\n``` {.julia .cell-code}\nusing MLJFlux\n\nbuilder = MLJFlux.@builder Chain(\n Flux.flatten,\n Dense(prod(n_in), 32, relu),\n Dense(32, n_out)\n)\n```\n:::\n\n\nwhere here we rely on the `@builder` macro to make the transition from `Flux.jl` to `MLJ.jl` as seamless as possible. Finally, `MLJFlux.jl` already comes with a number of helper functions to define plain-vanilla networks. In this case, we will use the `ImageClassifier` with our custom builder and cross-entropy loss:\n\n::: {.cell execution_count=6}\n``` {.julia .cell-code}\nImageClassifier = @load ImageClassifier\nclf = ImageClassifier(\n builder=builder,\n epochs=10,\n loss=Flux.crossentropy\n)\n```\n:::\n\n\nThe generated instance `clf` is a model (in the `MLJ.jl` sense) so from this point on we can rely on standard `MLJ.jl` workflows. For example, we can wrap our model in data to create a machine and then evaluate it on a holdout set as follows:\n\n::: {.cell execution_count=7}\n``` {.julia .cell-code}\nmach = machine(clf, X, y)\n\nevaluate!(\n mach,\n resampling=Holdout(rng=123, fraction_train=0.8),\n operation=predict_mode,\n measure=[accuracy]\n)\n```\n:::\n\n\nThe accuracy of our very simple model is not amazing, but good enough for the purpose of this tutorial. For each image, our MLP returns a softmax output for each possible digit: 0,1,2,3,...,9. Since each individual softmax output is valued between zero and one, $y_k\\in(0,1)$, this is commonly interpreted as a probability: $y_k \\coloneqq p(y=k|X)$. Edge cases -- that is values close to either zero or one -- indicate high predictive certainty. But this is only a heuristic notion of predictive uncertainty [@angelopoulos2021gentle]. Next, we will turn this heuristic notion of uncertainty into a rigorous one using Conformal Prediction.\n\n## Conformalizing the Network\n\nSince `clf` is a model, it is also compatible with our package: `ConformalPrediction.jl`. To conformalize our MLP, we therefore only need to call `conformal_model(clf)`. Since the generated instance `conf_model` is also just a model, we can still rely on standard `MLJ.jl` workflows. Below we first wrap it in data and then fit it. Aaaand ... we're done! Let's look at the results in the next section.\n\n::: {.cell execution_count=8}\n``` {.julia .cell-code}\nusing ConformalPrediction\nconf_model = conformal_model(clf; method=:simple_inductive)\nmach = machine(conf_model, X, y)\nfit!(mach)\n```\n:::\n\n\n## Results\n\n\n\nThe charts below present the results. The first row displays highly certain predictions, now defined in the rigorous sense of Conformal Prediction: in each case, the conformal set (just beneath the image) includes only one label. \n\nThe following two rows display increasingly uncertain predictions of set size two and three, respectively. They demonstrate that CP is well equipped to deal with samples characterized by high aleatoric uncertainty: digits four (4), seven (7) and nine (9) share certain similarities. So do digits five (5) and six (6) as well as three (3) and eight (8). These may be hard to distinguish from each other even after seeing many examples (and even for a human). It is therefore unsurprising to see that these digits often end up together in conformal sets. \n\n::: {#fig-plots .cell layout-nrow='3' execution_count=10}\n\n::: {.cell-output .cell-output-display}\n![Plot 1](mnist_files/figure-commonmark/fig-plots-output-1.svg){#fig-plots-1}\n:::\n\n::: {.cell-output .cell-output-display}\n![Plot 2](mnist_files/figure-commonmark/fig-plots-output-2.svg){#fig-plots-2}\n:::\n\n::: {.cell-output .cell-output-display}\n![Plot 3](mnist_files/figure-commonmark/fig-plots-output-3.svg){#fig-plots-3}\n:::\n\nConformalized predictions from an image classifier.\n:::\n\n\n## Evaluation\n\nAs always, we can also evaluate our conformal model in terms of coverage (correctness) and size-stratified coverage (adaptiveness).\n\n::: {.cell execution_count=11}\n``` {.julia .cell-code}\n_eval = evaluate!(\n mach,\n resampling=Holdout(rng=123, fraction_train=0.8),\n operation=predict,\n measure=[emp_coverage, ssc]\n)\ndisplay(_eval)\nprintln(\"Empirical coverage: $(round(_eval.measurement[1], digits=3))\")\nprintln(\"SSC: $(round(_eval.measurement[2], digits=3))\")\n```\n\n::: {.cell-output .cell-output-display}\n```\nPerformanceEvaluation object with these fields:\n measure, operation, measurement, per_fold,\n per_observation, fitted_params_per_fold,\n report_per_fold, train_test_rows\nExtract:\nβ”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€\nβ”‚ measure β”‚ operation β”‚ measurement β”‚ per β‹―\nβ”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€\nβ”‚ ConformalPrediction.emp_coverage β”‚ predict β”‚ 0.96 β”‚ [0. β‹―\nβ”‚ ConformalPrediction.size_stratified_coverage β”‚ predict β”‚ 0.885 β”‚ [0. β‹―\n└──────────────────────────────────────────────┴───────────┴─────────────┴──────\n 1 column omitted\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\nEmpirical coverage: 0.96\nSSC: 0.885\n```\n:::\n:::\n\n\n\n\nUnsurprisingly, we can attain higher adaptivity (SSC) when using adaptive prediction sets:\n\n::: {.cell execution_count=13}\n``` {.julia .cell-code}\nconf_model = conformal_model(clf; method=:adaptive_inductive)\nmach = machine(conf_model, X, y)\nfit!(mach)\n_eval = evaluate!(\n mach,\n resampling=Holdout(rng=123, fraction_train=0.8),\n operation=predict,\n measure=[emp_coverage, ssc]\n)\nresults[:adaptive_inductive] = mach\ndisplay(_eval)\nprintln(\"Empirical coverage: $(round(_eval.measurement[1], digits=3))\")\nprintln(\"SSC: $(round(_eval.measurement[2], digits=3))\")\n```\n\n::: {.cell-output .cell-output-display}\n```\nPerformanceEvaluation object with these fields:\n measure, operation, measurement, per_fold,\n per_observation, fitted_params_per_fold,\n report_per_fold, train_test_rows\nExtract:\nβ”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€\nβ”‚ measure β”‚ operation β”‚ measurement β”‚ per β‹―\nβ”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€\nβ”‚ ConformalPrediction.emp_coverage β”‚ predict β”‚ 1.0 β”‚ [1. β‹―\nβ”‚ ConformalPrediction.size_stratified_coverage β”‚ predict β”‚ 1.0 β”‚ [1. β‹―\n└──────────────────────────────────────────────┴───────────┴─────────────┴──────\n 1 column omitted\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\nEmpirical coverage: 1.0\nSSC: 1.0\n```\n:::\n:::\n\n\nWe can also have a look at the resulting set size for both approaches:\n\n::: {.cell execution_count=14}\n``` {.julia .cell-code}\nplt_list = []\nfor (_mod, mach) in results\n push!(plt_list, bar(mach.model, mach.fitresult, X; title=String(_mod)))\nend\nplot(plt_list..., size=(800,300))\n```\n\n::: {.cell-output .cell-output-display execution_count=15}\n![Prediction interval width.](mnist_files/figure-commonmark/fig-setsize-output-1.svg){#fig-setsize}\n:::\n:::\n\n\n# References\n\n", "supporting": [ - "mnist_files" + "mnist_files/figure-commonmark" ], "filters": [] } diff --git a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-1.svg b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-1.svg index 69e172c..3dd0a06 100644 --- a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-1.svg +++ b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-1.svg @@ -1,349 +1,283 @@ - + - + - + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + diff --git a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-2.svg b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-2.svg index 07ddb40..8fe2a2b 100644 --- a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-2.svg +++ b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-2.svg @@ -1,356 +1,270 @@ - + - + - + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + diff --git a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-3.svg b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-3.svg index 4231d68..799425f 100644 --- a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-3.svg +++ b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-plots-output-3.svg @@ -1,356 +1,285 @@ - + - + - + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + diff --git a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-samples-output-1.png b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-samples-output-1.png index e4de4a0..9b11cbd 100644 Binary files a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-samples-output-1.png and b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-samples-output-1.png differ diff --git a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-setsize-output-1.svg b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-setsize-output-1.svg index c217aad..9b77ec7 100644 --- a/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-setsize-output-1.svg +++ b/_freeze/docs/src/how_to_guides/mnist/figure-commonmark/fig-setsize-output-1.svg @@ -1,234 +1,97 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/_quarto.yml b/_quarto.yml index c5443d7..66efc23 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -5,7 +5,7 @@ project: crossref: fig-prefix: Figure tbl-prefix: Table -bibliography: https://raw.githubusercontent.com/pat-alt/bib/main/bib.bib +bibliography: bib.bib fig-format: png execute: @@ -14,5 +14,5 @@ execute: echo: true output: false -jupyter: julia-1.8 +jupyter: julia-1.9 diff --git a/bib.bib b/bib.bib new file mode 100644 index 0000000..b1535f4 --- /dev/null +++ b/bib.bib @@ -0,0 +1,2948 @@ +@TechReport{kingma2017adam, + author = {Kingma, Diederik P. and Ba, Jimmy}, + date = {2017-01}, + institution = {arXiv}, + title = {Adam: {A} {Method} for {Stochastic} {Optimization}}, + doi = {10.48550/arXiv.1412.6980}, + note = {arXiv:1412.6980 [cs] type: article}, + url = {http://arxiv.org/abs/1412.6980}, + urldate = {2023-05-17}, + abstract = {We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.}, + annotation = {Comment: Published as a conference paper at the 3rd International Conference for Learning Representations, San Diego, 2015}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/1412.6980.pdf:application/pdf}, + keywords = {Computer Science - Machine Learning}, + shorttitle = {Adam}, +} + +@TechReport{xiao2017fashion, + author = {Xiao, Han and Rasul, Kashif and Vollgraf, Roland}, + date = {2017-09}, + institution = {arXiv}, + title = {Fashion-{MNIST}: a {Novel} {Image} {Dataset} for {Benchmarking} {Machine} {Learning} {Algorithms}}, + doi = {10.48550/arXiv.1708.07747}, + note = {arXiv:1708.07747 [cs, stat] type: article}, + url = {http://arxiv.org/abs/1708.07747}, + urldate = {2023-05-10}, + abstract = {We present Fashion-MNIST, a new dataset comprising of 28x28 grayscale images of 70,000 fashion products from 10 categories, with 7,000 images per category. The training set has 60,000 images and the test set has 10,000 images. Fashion-MNIST is intended to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms, as it shares the same image size, data format and the structure of training and testing splits. The dataset is freely available at https://github.com/zalandoresearch/fashion-mnist}, + annotation = {Comment: Dataset is freely available at https://github.com/zalandoresearch/fashion-mnist Benchmark is available at http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/}, + file = {:xiao2017fashion - Fashion MNIST_ a Novel Image Dataset for Benchmarking Machine Learning Algorithms.pdf:PDF}, + keywords = {Computer Science - Machine Learning, Computer Science - Computer Vision and Pattern Recognition, Statistics - Machine Learning}, + shorttitle = {Fashion-{MNIST}}, +} + +@Online{mw2023fidelity, + author = {Merriam-Webster}, + title = {"Fidelity"}, + url = {https://www.merriam-webster.com/dictionary/fidelity}, + language = {en}, + organization = {Merriam-Webster}, + urldate = {2023-03-23}, + abstract = {the quality or state of being faithful; accuracy in details : exactness; the degree to which an electronic device (such as a record player, radio, or television) accurately reproduces its effect (such as sound or picture)… See the full definition}, +} + +@InProceedings{altmeyer2023endogenous, + author = {Altmeyer, Patrick and Angela, Giovan and Buszydlik, Aleksander and Dobiczek, Karol and van Deursen, Arie and Liem, Cynthia}, + booktitle = {First {IEEE} {Conference} on {Secure} and {Trustworthy} {Machine} {Learning}}, + title = {Endogenous {Macrodynamics} in {Algorithmic} {Recourse}}, + file = {:altmeyerendogenous - Endogenous Macrodynamics in Algorithmic Recourse.pdf:PDF}, + year = {2023}, +} + +%% This BibTeX bibliography file was created using BibDesk. +%% https://bibdesk.sourceforge.io/ + +%% Created for Patrick Altmeyer at 2022-12-13 12:58:22 +0100 + + +%% Saved with string encoding Unicode (UTF-8) + + + +@Article{abadie2002instrumental, + author = {Abadie, Alberto and Angrist, Joshua and Imbens, Guido}, + title = {Instrumental Variables Estimates of the Effect of Subsidized Training on the Quantiles of Trainee Earnings}, + number = {1}, + pages = {91--117}, + volume = {70}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometrica : journal of the Econometric Society}, + shortjournal = {Econometrica}, + year = {2002}, +} + +@Article{abadie2003economic, + author = {Abadie, Alberto and Gardeazabal, Javier}, + title = {The Economic Costs of Conflict: {{A}} Case Study of the {{Basque Country}}}, + number = {1}, + pages = {113--132}, + volume = {93}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {American economic review}, + year = {2003}, +} + +@InProceedings{ackerman2021machine, + author = {Ackerman, Samuel and Dube, Parijat and Farchi, Eitan and Raz, Orna and Zalmanovici, Marcel}, + booktitle = {2021 {{IEEE}}/{{ACM Third International Workshop}} on {{Deep Learning}} for {{Testing}} and {{Testing}} for {{Deep Learning}} ({{DeepTest}})}, + title = {Machine {{Learning Model Drift Detection Via Weak Data Slices}}}, + pages = {1--8}, + publisher = {{IEEE}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@Article{allen2017referencedependent, + author = {Allen, Eric J and Dechow, Patricia M and Pope, Devin G and Wu, George}, + title = {Reference-Dependent Preferences: {{Evidence}} from Marathon Runners}, + number = {6}, + pages = {1657--1672}, + volume = {63}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Management Science}, + year = {2017}, +} + +@Article{altmeyer2018option, + author = {Altmeyer, Patrick and Grapendal, Jacob Daniel and Pravosud, Makar and Quintana, Gand Derry}, + title = {Option Pricing in the {{Heston}} Stochastic Volatility Model: An Empirical Evaluation}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2018}, +} + +@Article{altmeyer2021deep, + author = {Altmeyer, Patrick and Agusti, Marc and Vidal-Quadras Costa, Ignacio}, + title = {Deep {{Vector Autoregression}} for {{Macroeconomic Data}}}, + url = {https://thevoice.bse.eu/wp-content/uploads/2021/07/ds21-project-agusti-et-al.pdf}, + bdsk-url-1 = {https://thevoice.bse.eu/wp-content/uploads/2021/07/ds21-project-agusti-et-al.pdf}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@Book{altmeyer2021deepvars, + author = {Altmeyer, Patrick}, + title = {Deepvars: {{Deep Vector Autoregession}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@Misc{altmeyer2022counterfactualexplanations, + author = {Altmeyer, Patrick}, + title = {{{CounterfactualExplanations}}.Jl - a {{Julia}} Package for {{Counterfactual Explanations}} and {{Algorithmic Recourse}}}, + url = {https://github.com/pat-alt/CounterfactualExplanations.jl}, + bdsk-url-1 = {https://github.com/pat-alt/CounterfactualExplanations.jl}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2022}, +} + +@Software{altmeyerCounterfactualExplanationsJlJulia2022, + author = {Altmeyer, Patrick}, + title = {{{CounterfactualExplanations}}.Jl - a {{Julia}} Package for {{Counterfactual Explanations}} and {{Algorithmic Recourse}}}, + url = {https://github.com/pat-alt/CounterfactualExplanations.jl}, + version = {0.1.2}, + bdsk-url-1 = {https://github.com/pat-alt/CounterfactualExplanations.jl}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2022}, +} + +@Unpublished{angelopoulos2021gentle, + author = {Angelopoulos, Anastasios N. and Bates, Stephen}, + title = {A Gentle Introduction to Conformal Prediction and Distribution-Free Uncertainty Quantification}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2107.07511}, + eprinttype = {arxiv}, + file = {:/Users/FA31DU/Zotero/storage/RKSUMYZG/Angelopoulos and Bates - 2021 - A gentle introduction to conformal prediction and .pdf:;:/Users/FA31DU/Zotero/storage/PRUEKRR3/2107.html:}, + year = {2021}, +} + +@Misc{angelopoulos2022uncertainty, + author = {Angelopoulos, Anastasios and Bates, Stephen and Malik, Jitendra and Jordan, Michael I.}, + title = {Uncertainty {{Sets}} for {{Image Classifiers}} Using {{Conformal Prediction}}}, + eprint = {2009.14193}, + eprinttype = {arxiv}, + abstract = {Convolutional image classifiers can achieve high predictive accuracy, but quantifying their uncertainty remains an unresolved challenge, hindering their deployment in consequential settings. Existing uncertainty quantification techniques, such as Platt scaling, attempt to calibrate the network's probability estimates, but they do not have formal guarantees. We present an algorithm that modifies any classifier to output a predictive set containing the true label with a user-specified probability, such as 90\%. The algorithm is simple and fast like Platt scaling, but provides a formal finite-sample coverage guarantee for every model and dataset. Our method modifies an existing conformal prediction algorithm to give more stable predictive sets by regularizing the small scores of unlikely classes after Platt scaling. In experiments on both Imagenet and Imagenet-V2 with ResNet-152 and other classifiers, our scheme outperforms existing approaches, achieving coverage with sets that are often factors of 5 to 10 smaller than a stand-alone Platt scaling baseline.}, + archiveprefix = {arXiv}, + bdsk-url-1 = {http://arxiv.org/abs/2009.14193}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + file = {:/Users/FA31DU/Zotero/storage/5BYIRBR2/Angelopoulos et al. - 2022 - Uncertainty Sets for Image Classifiers using Confo.pdf:;:/Users/FA31DU/Zotero/storage/2QJAKFKV/2009.html:}, + keywords = {Computer Science - Computer Vision and Pattern Recognition, Mathematics - Statistics Theory, Statistics - Machine Learning}, + month = sep, + number = {arXiv:2009.14193}, + primaryclass = {cs, math, stat}, + publisher = {{arXiv}}, + year = {2022}, +} + +@Article{angelucci2009indirect, + author = {Angelucci, Manuela and De Giorgi, Giacomo}, + title = {Indirect Effects of an Aid Program: How Do Cash Transfers Affect Ineligibles' Consumption?}, + number = {1}, + pages = {486--508}, + volume = {99}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {American economic review}, + year = {2009}, +} + +@Article{angrist1990lifetime, + author = {Angrist, Joshua D}, + title = {Lifetime Earnings and the {{Vietnam}} Era Draft Lottery: Evidence from Social Security Administrative Records}, + pages = {313--336}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The American Economic Review}, + year = {1990}, +} + +@Unpublished{antoran2020getting, + author = {Antor{\'a}n, Javier and Bhatt, Umang and Adel, Tameem and Weller, Adrian and Hern{\'a}ndez-Lobato, Jos{\'e} Miguel}, + title = {Getting a Clue: {{A}} Method for Explaining Uncertainty Estimates}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2006.06848}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Article{arcones1992bootstrap, + author = {Arcones, Miguel A and Gine, Evarist}, + title = {On the Bootstrap of {{U}} and {{V}} Statistics}, + pages = {655--674}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The Annals of Statistics}, + year = {1992}, +} + +@Article{ariely2003coherent, + author = {Ariely, Dan and Loewenstein, George and Prelec, Drazen}, + title = {``{{Coherent}} Arbitrariness'': {{Stable}} Demand Curves without Stable Preferences}, + number = {1}, + pages = {73--106}, + volume = {118}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The Quarterly journal of economics}, + year = {2003}, +} + +@Article{ariely2006tom, + author = {Ariely, Dan and Loewenstein, George and Prelec, Drazen}, + title = {Tom {{Sawyer}} and the Construction of Value}, + number = {1}, + pages = {1--10}, + volume = {60}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Economic Behavior \& Organization}, + year = {2006}, +} + +@Article{arrieta2020explainable, + author = {Arrieta, Alejandro Barredo and Diaz-Rodriguez, Natalia and Del Ser, Javier and Bennetot, Adrien and Tabik, Siham and Barbado, Alberto and Garcia, Salvador and Gil-Lopez, Sergio and Molina, Daniel and Benjamins, Richard and others}, + title = {Explainable {{Artificial Intelligence}} ({{XAI}}): {{Concepts}}, Taxonomies, Opportunities and Challenges toward Responsible {{AI}}}, + pages = {82--115}, + volume = {58}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Information Fusion}, + year = {2020}, +} + +@Article{auer2002finitetime, + author = {Auer, Peter and Cesa-Bianchi, Nicolo and Fischer, Paul}, + title = {Finite-Time Analysis of the Multiarmed Bandit Problem}, + number = {2}, + pages = {235--256}, + volume = {47}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Machine learning}, + year = {2002}, +} + +@Article{barabasi2016network, + author = {Barab{\'a}si, Albert-L{\'a}szl{\'o}}, + title = {Network {{Science}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Network Science}, + year = {2016}, +} + +@Unpublished{bastounis2021mathematics, + author = {Bastounis, Alexander and Hansen, Anders C and Vla{\v c}i{\'c}, Verner}, + title = {The Mathematics of Adversarial Attacks in {{AI}}--{{Why}} Deep Learning Is Unstable despite the Existence of Stable Neural Networks}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2109.06098}, + eprinttype = {arxiv}, + year = {2021}, +} + +@Article{bechara1997deciding, + author = {Bechara, Antoine and Damasio, Hanna and Tranel, Daniel and Damasio, Antonio R}, + title = {Deciding Advantageously before Knowing the Advantageous Strategy}, + number = {5304}, + pages = {1293--1295}, + volume = {275}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Science (New York, N.Y.)}, + shortjournal = {Science}, + year = {1997}, +} + +@Book{berlinet2011reproducing, + author = {Berlinet, Alain and Thomas-Agnan, Christine}, + title = {Reproducing Kernel {{Hilbert}} Spaces in Probability and Statistics}, + publisher = {{Springer Science \& Business Media}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2011}, +} + +@Misc{bernanke1990federal, + author = {Bernanke, Ben S}, + title = {The Federal Funds Rate and the Channels of Monetary Transnission}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + publisher = {{National Bureau of Economic Research Cambridge, Mass., USA}}, + year = {1990}, +} + +@Article{besbes2014stochastic, + author = {Besbes, Omar and Gur, Yonatan and Zeevi, Assaf}, + title = {Stochastic Multi-Armed-Bandit Problem with Non-Stationary Rewards}, + pages = {199--207}, + volume = {27}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Advances in neural information processing systems}, + year = {2014}, +} + +@Article{bholat2020impact, + author = {Bholat, D and Gharbawi, M and Thew, O}, + title = {The {{Impact}} of {{Covid}} on {{Machine Learning}} and {{Data Science}} in {{UK Banking}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Bank of England Quarterly Bulletin, Q4}, + year = {2020}, +} + +@Book{bishop2006pattern, + author = {Bishop, Christopher M}, + title = {Pattern Recognition and Machine Learning}, + publisher = {{springer}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2006}, +} + +@Article{blaom2020mlj, + author = {Blaom, Anthony D. and Kiraly, Franz and Lienart, Thibaut and Simillides, Yiannis and Arenas, Diego and Vollmer, Sebastian J.}, + title = {{{MLJ}}: {{A Julia}} Package for Composable Machine Learning}, + doi = {10.21105/joss.02704}, + issn = {2475-9066}, + number = {55}, + pages = {2704}, + urldate = {2022-10-27}, + volume = {5}, + abstract = {Blaom et al., (2020). MLJ: A Julia package for composable machine learning. Journal of Open Source Software, 5(55), 2704, https://doi.org/10.21105/joss.02704}, + bdsk-url-1 = {https://joss.theoj.org/papers/10.21105/joss.02704}, + bdsk-url-2 = {https://doi.org/10.21105/joss.02704}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + file = {:/Users/FA31DU/Zotero/storage/7AY87FGP/Blaom et al. - 2020 - MLJ A Julia package for composable machine learni.pdf:;:/Users/FA31DU/Zotero/storage/D69YSMVF/joss.html:}, + journal = {Journal of Open Source Software}, + langid = {english}, + month = nov, + shorttitle = {{{MLJ}}}, + year = {2020}, +} + +@InProceedings{blundell2015weight, + author = {Blundell, Charles and Cornebise, Julien and Kavukcuoglu, Koray and Wierstra, Daan}, + booktitle = {International Conference on Machine Learning}, + title = {Weight Uncertainty in Neural Network}, + pages = {1613--1622}, + publisher = {{PMLR}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2015}, +} + +@Article{borch2022machine, + author = {Borch, Christian}, + title = {Machine Learning, Knowledge Risk, and Principal-Agent Problems in Automated Trading}, + pages = {101852}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Technology in Society}, + year = {2022}, +} + +@Unpublished{borisov2021deep, + author = {Borisov, Vadim and Leemann, Tobias and Se{\ss}ler, Kathrin and Haug, Johannes and Pawelczyk, Martin and Kasneci, Gjergji}, + title = {Deep Neural Networks and Tabular Data: {{A}} Survey}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2110.01889}, + eprinttype = {arxiv}, + year = {2021}, +} + +@Article{bramoulle2009identification, + author = {Bramoull{\'e}, Yann and Djebbari, Habiba and Fortin, Bernard}, + title = {Identification of Peer Effects through Social Networks}, + number = {1}, + pages = {41--55}, + volume = {150}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of econometrics}, + year = {2009}, +} + +@Article{bramoulle2020peer, + author = {Bramoull{\'e}, Yann and Djebbari, Habiba and Fortin, Bernard}, + title = {Peer Effects in Networks: {{A}} Survey}, + pages = {603--629}, + volume = {12}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Annual Review of Economics}, + year = {2020}, +} + +@Unpublished{branco2015survey, + author = {Branco, Paula and Torgo, Luis and Ribeiro, Rita}, + title = {A Survey of Predictive Modelling under Imbalanced Distributions}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1505.01658}, + eprinttype = {arxiv}, + year = {2015}, +} + +@Book{brock1991nonlinear, + author = {Brock, William Allen and Brock, William A and Hsieh, David Arthur and LeBaron, Blake Dean and Brock, William E}, + title = {Nonlinear Dynamics, Chaos, and Instability: Statistical Theory and Economic Evidence}, + publisher = {{MIT press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {1991}, +} + +@InProceedings{buolamwini2018gender, + author = {Buolamwini, Joy and Gebru, Timnit}, + booktitle = {Conference on Fairness, Accountability and Transparency}, + title = {Gender Shades: {{Intersectional}} Accuracy Disparities in Commercial Gender Classification}, + pages = {77--91}, + publisher = {{PMLR}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2018}, +} + +@Unpublished{bussmann2020neural, + author = {Bussmann, Bart and Nys, Jannes and Latr{\'e}, Steven}, + title = {Neural {{Additive Vector Autoregression Models}} for {{Causal Discovery}} in {{Time Series Data}}}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2010.09429}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Report{card1993minimum, + author = {Card, David and Krueger, Alan B}, + title = {Minimum Wages and Employment: {{A}} Case Study of the Fast Food Industry in {{New Jersey}} and {{Pennsylvania}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + school = {{National Bureau of Economic Research}}, + year = {1993}, +} + +@InProceedings{carlini2017evaluating, + author = {Carlini, Nicholas and Wagner, David}, + booktitle = {2017 Ieee Symposium on Security and Privacy (Sp)}, + title = {Towards Evaluating the Robustness of Neural Networks}, + pages = {39--57}, + publisher = {{IEEE}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2017}, +} + +@Article{carlisle2019racist, + author = {Carlisle, M.}, + title = {Racist Data Destruction? - a {{Boston}} Housing Dataset Controversy}, + url = {https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8}, + bdsk-url-1 = {https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2019}, +} + +@Article{carrell2009does, + author = {Carrell, Scott E and Fullerton, Richard L and West, James E}, + title = {Does Your Cohort Matter? {{Measuring}} Peer Effects in College Achievement}, + number = {3}, + pages = {439--464}, + volume = {27}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Labor Economics}, + year = {2009}, +} + +@Article{carrell2013natural, + author = {Carrell, Scott E and Sacerdote, Bruce I and West, James E}, + title = {From Natural Variation to Optimal Policy? {{The}} Importance of Endogenous Peer Group Formation}, + number = {3}, + pages = {855--882}, + volume = {81}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometrica : journal of the Econometric Society}, + shortjournal = {Econometrica}, + year = {2013}, +} + +@Article{carrizosa2021generating, + author = {Carrizosa, Emilio and RamΔ±rez-Ayerbe, Jasone and Romero, Dolores}, + title = {Generating {{Collective Counterfactual Explanations}} in {{Score-Based Classification}} via {{Mathematical Optimization}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@Article{cascarino2022explainable, + author = {Cascarino, Giuseppe and Moscatelli, Mirko and Parlapiano, Fabio}, + title = {Explainable {{Artificial Intelligence}}: Interpreting Default Forecasting Models Based on {{Machine Learning}}}, + number = {674}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Bank of Italy Occasional Paper}, + year = {2022}, +} + +@Article{chandola2009anomaly, + author = {Chandola, Varun and Banerjee, Arindam and Kumar, Vipin}, + title = {Anomaly Detection: {{A}} Survey}, + number = {3}, + pages = {1--58}, + volume = {41}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {ACM computing surveys (CSUR)}, + year = {2009}, +} + +@Article{chapelle2011empirical, + author = {Chapelle, Olivier and Li, Lihong}, + title = {An Empirical Evaluation of Thompson Sampling}, + pages = {2249--2257}, + volume = {24}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Advances in neural information processing systems}, + year = {2011}, +} + +@Article{chetty2011adjustment, + author = {Chetty, Raj and Friedman, John N and Olsen, Tore and Pistaferri, Luigi}, + title = {Adjustment Costs, Firm Responses, and Micro vs. Macro Labor Supply Elasticities: {{Evidence}} from {{Danish}} Tax Records}, + number = {2}, + pages = {749--804}, + volume = {126}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The quarterly journal of economics}, + year = {2011}, +} + +@Article{cortes1995supportvector, + author = {Cortes, Corinna and Vapnik, Vladimir}, + title = {Support-Vector Networks}, + number = {3}, + pages = {273--297}, + volume = {20}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Machine learning}, + year = {1995}, +} + +@Article{crawford2019variable, + author = {Crawford, Lorin and Flaxman, Seth R and Runcie, Daniel E and West, Mike}, + title = {Variable Prioritization in Nonlinear Black Box Methods: {{A}} Genetic Association Case Study}, + number = {2}, + pages = {958}, + volume = {13}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The annals of applied statistics}, + year = {2019}, +} + +@InProceedings{dai2022counterfactual, + author = {Dai, Xinyue and Keane, Mark T and Shalloo, Laurence and Ruelle, Elodie and Byrne, Ruth MJ}, + title = {Counterfactual Explanations for Prediction and Diagnosis in Xai}, + eventtitle = {Proceedings of the 2022 {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}}, + pages = {215--226}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2022}, +} + +@Article{danielsson2021artificial, + author = {Danielsson, Jon and Macrae, Robert and Uthemann, Andreas}, + title = {Artificial Intelligence and Systemic Risk}, + pages = {106290}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Banking \& Finance}, + year = {2021}, +} + +@Article{daxberger2021laplace, + author = {Daxberger, Erik and Kristiadi, Agustinus and Immer, Alexander and Eschenhagen, Runa and Bauer, Matthias and Hennig, Philipp}, + title = {Laplace {{Redux-Effortless Bayesian Deep Learning}}}, + volume = {34}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Advances in Neural Information Processing Systems}, + year = {2021}, +} + +@Article{dehejia1999causal, + author = {Dehejia, Rajeev H and Wahba, Sadek}, + title = {Causal Effects in Nonexperimental Studies: {{Reevaluating}} the Evaluation of Training Programs}, + number = {448}, + pages = {1053--1062}, + volume = {94}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of the American statistical Association}, + year = {1999}, +} + +@Article{dell2010persistent, + author = {Dell, Melissa}, + title = {The Persistent Effects of {{Peru}}'s Mining Mita}, + number = {6}, + pages = {1863--1903}, + volume = {78}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometrica : journal of the Econometric Society}, + shortjournal = {Econometrica}, + year = {2010}, +} + +@Article{denhengst2020reinforcement, + author = {den Hengst, Floris and Grua, Eoin Martino and el Hassouni, Ali and Hoogendoorn, Mark}, + title = {Reinforcement Learning for Personalization: {{A}} Systematic Literature Review}, + issue = {Preprint}, + pages = {1--41}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Data Science}, + options = {useprefix=true}, + year = {2020}, +} + +@Article{deoliveira2021framework, + author = {de Oliveira, Raphael Mazzine Barbosa and Martens, David}, + title = {A Framework and Benchmarking Study for Counterfactual Generating Methods on Tabular Data}, + number = {16}, + pages = {7274}, + volume = {11}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Applied Sciences}, + options = {useprefix=true}, + year = {2021}, +} + +@Article{dhurandhar2018explanations, + author = {Dhurandhar, Amit and Chen, Pin-Yu and Luss, Ronny and Tu, Chun-Chen and Ting, Paishun and Shanmugam, Karthikeyan and Das, Payel}, + title = {Explanations Based on the Missing: {{Towards}} Contrastive Explanations with Pertinent Negatives}, + volume = {31}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Advances in neural information processing systems}, + year = {2018}, +} + +@InProceedings{dombrowski2021diffeomorphic, + author = {Dombrowski, Ann-Kathrin and Gerken, Jan E and Kessel, Pan}, + booktitle = {{{ICML Workshop}} on {{Invertible Neural Networks}}, {{Normalizing Flows}}, and {{Explicit Likelihood Models}}}, + title = {Diffeomorphic Explanations with Normalizing Flows}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@InProceedings{dorffner1996neural, + author = {Dorffner, Georg}, + booktitle = {Neural Network World}, + title = {Neural Networks for Time Series Processing}, + publisher = {{Citeseer}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {1996}, +} + +@Article{epstein1979stability, + author = {Epstein, Seymour}, + title = {The Stability of Behavior: {{I}}. {{On}} Predicting Most of the People Much of the Time.}, + number = {7}, + pages = {1097}, + volume = {37}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of personality and social psychology}, + year = {1979}, +} + +@Online{barocas2022fairness, + author = {Solon Barocas and Moritz Hardt and Arvind Narayanan}, + title = {Fairness and Machine Learning}, + url = {https://fairmlbook.org/index.html}, + urldate = {2022-11-08}, + bdsk-url-1 = {https://fairmlbook.org/index.html}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + month = dec, + year = {2022}, +} + +@Article{falk2006clean, + author = {Falk, Armin and Ichino, Andrea}, + title = {Clean Evidence on Peer Effects}, + number = {1}, + pages = {39--57}, + volume = {24}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of labor economics}, + year = {2006}, +} + +@Unpublished{fan2020interpretability, + author = {Fan, Fenglei and Xiong, Jinjun and Wang, Ge}, + title = {On Interpretability of Artificial Neural Networks}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2001.02522}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Article{fang2011dynamic, + author = {Fang, Hanming and Gavazza, Alessandro}, + title = {Dynamic Inefficiencies in an Employment-Based Health Insurance System: {{Theory}} and Evidence}, + number = {7}, + pages = {3047--77}, + volume = {101}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {American Economic Review}, + year = {2011}, +} + +@Article{fehr2000cooperation, + author = {Fehr, Ernst and Gachter, Simon}, + title = {Cooperation and Punishment in Public Goods Experiments}, + number = {4}, + pages = {980--994}, + volume = {90}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {American Economic Review}, + year = {2000}, +} + +@Article{fix1951important, + author = {Fix, E and Hodges, J}, + title = {An Important Contribution to Nonparametric Discriminant Analysis and Density Estimation}, + number = {57}, + pages = {233--238}, + volume = {3}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {International Statistical Review}, + year = {1951}, +} + +@Book{friedman2008monetary, + author = {Friedman, Milton and Schwartz, Anna Jacobson}, + title = {A Monetary History of the {{United States}}, 1867-1960}, + publisher = {{Princeton University Press}}, + volume = {14}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2008}, +} + +@InProceedings{gal2016dropout, + author = {Gal, Yarin and Ghahramani, Zoubin}, + booktitle = {International Conference on Machine Learning}, + title = {Dropout as a Bayesian Approximation: {{Representing}} Model Uncertainty in Deep Learning}, + pages = {1050--1059}, + publisher = {{PMLR}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2016}, +} + +@InProceedings{gal2017deep, + author = {Gal, Yarin and Islam, Riashat and Ghahramani, Zoubin}, + booktitle = {International {{Conference}} on {{Machine Learning}}}, + title = {Deep Bayesian Active Learning with Image Data}, + pages = {1183--1192}, + publisher = {{PMLR}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2017}, +} + +@Article{galizzi2019external, + author = {Galizzi, Matteo M and Navarro-Martinez, Daniel}, + title = {On the External Validity of Social Preference Games: A Systematic Lab-Field Study}, + number = {3}, + pages = {976--1002}, + volume = {65}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Management Science}, + year = {2019}, +} + +@Article{gama2014survey, + author = {Gama, Jo{\~a}o and {\v Z}liobait{\.e}, Indr{\.e} and Bifet, Albert and Pechenizkiy, Mykola and Bouchachia, Abdelhamid}, + title = {A Survey on Concept Drift Adaptation}, + number = {4}, + pages = {1--37}, + volume = {46}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {ACM computing surveys (CSUR)}, + year = {2014}, +} + +@Unpublished{garivier2008upperconfidence, + author = {Garivier, Aur{\'e}lien and Moulines, Eric}, + title = {On Upper-Confidence Bound Policies for Non-Stationary Bandit Problems}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {0805.3415}, + eprinttype = {arxiv}, + year = {2008}, +} + +@Book{gelman2013bayesian, + author = {Gelman, Andrew and Carlin, John B and Stern, Hal S and Dunson, David B and Vehtari, Aki and Rubin, Donald B}, + title = {Bayesian Data Analysis}, + publisher = {{CRC press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2013}, +} + +@Article{gilbert1998immune, + author = {Gilbert, Daniel T and Pinel, Elizabeth C and Wilson, Timothy D and Blumberg, Stephen J and Wheatley, Thalia P}, + title = {Immune Neglect: A Source of Durability Bias in Affective Forecasting.}, + number = {3}, + pages = {617}, + volume = {75}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of personality and social psychology}, + year = {1998}, +} + +@Article{gneezy2006uncertainty, + author = {Gneezy, Uri and List, John A and Wu, George}, + title = {The Uncertainty Effect: {{When}} a Risky Prospect Is Valued Less than Its Worst Possible Outcome}, + number = {4}, + pages = {1283--1309}, + volume = {121}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The Quarterly Journal of Economics}, + year = {2006}, +} + +@InCollection{goan2020bayesian, + author = {Goan, Ethan and Fookes, Clinton}, + booktitle = {Case {{Studies}} in {{Applied Bayesian Data Science}}}, + title = {Bayesian {{Neural Networks}}: {{An Introduction}} and {{Survey}}}, + pages = {45--87}, + publisher = {{Springer}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Article{goldsmith-pinkham2013social, + author = {Goldsmith-Pinkham, Paul and Imbens, Guido W}, + title = {Social Networks and the Identification of Peer Effects}, + number = {3}, + pages = {253--264}, + volume = {31}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Business \& Economic Statistics}, + year = {2013}, +} + +@Unpublished{goodfellow2014explaining, + author = {Goodfellow, Ian J and Shlens, Jonathon and Szegedy, Christian}, + title = {Explaining and Harnessing Adversarial Examples}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1412.6572}, + eprinttype = {arxiv}, + year = {2014}, +} + +@Book{goodfellow2016deep, + author = {Goodfellow, Ian and Bengio, Yoshua and Courville, Aaron}, + title = {Deep {{Learning}}}, + publisher = {{MIT Press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2016}, +} + +@Article{goodfriend2005incredible, + author = {Goodfriend, Marvin and King, Robert G}, + title = {The Incredible {{Volcker}} Disinflation}, + number = {5}, + pages = {981--1015}, + volume = {52}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Monetary Economics}, + year = {2005}, +} + +@Article{graham2017econometric, + author = {Graham, Bryan S}, + title = {An Econometric Model of Network Formation with Degree Heterogeneity}, + number = {4}, + pages = {1033--1063}, + volume = {85}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometrica : journal of the Econometric Society}, + shortjournal = {Econometrica}, + year = {2017}, +} + +@Article{greene2012econometric, + author = {Greene, William H}, + title = {Econometric Analysis, 71e}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Stern School of Business, New York University}, + year = {2012}, +} + +@Article{grether1979economic, + author = {Grether, David M and Plott, Charles R}, + title = {Economic Theory of Choice and the Preference Reversal Phenomenon}, + number = {4}, + pages = {623--638}, + volume = {69}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The American Economic Review}, + year = {1979}, +} + +@Article{gretton2012kernel, + author = {Gretton, Arthur and Borgwardt, Karsten M and Rasch, Malte J and Sch{\"o}lkopf, Bernhard and Smola, Alexander}, + title = {A Kernel Two-Sample Test}, + number = {1}, + pages = {723--773}, + volume = {13}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The Journal of Machine Learning Research}, + year = {2012}, +} + +@Unpublished{griffith2020name, + author = {Griffith, Alan}, + title = {Name {{Your Friends}}, but {{Only Five}}? {{The Importance}} of {{Censoring}} in {{Peer Effects Estimates}} Using {{Social Network Data}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Unpublished{grinsztajn2022why, + author = {Grinsztajn, L{\'e}o and Oyallon, Edouard and Varoquaux, Ga{\"e}l}, + title = {Why Do Tree-Based Models Still Outperform Deep Learning on Tabular Data?}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2207.08815}, + eprinttype = {arxiv}, + year = {2022}, +} + +@Misc{group2020detailed, + author = {Group, Open COVID-19 Data Working}, + title = {Detailed {{Epidemiological Data}} from the {{COVID-19 Outbreak}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@InProceedings{gupta2011thompson, + author = {Gupta, Neha and Granmo, Ole-Christoffer and Agrawala, Ashok}, + booktitle = {2011 10th {{International Conference}} on {{Machine Learning}} and {{Applications}} and {{Workshops}}}, + title = {Thompson Sampling for Dynamic Multi-Armed Bandits}, + pages = {484--489}, + publisher = {{IEEE}}, + volume = {1}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2011}, +} + +@Book{hamilton2020time, + author = {Hamilton, James Douglas}, + title = {Time Series Analysis}, + publisher = {{Princeton university press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Article{hamon2020robustness, + author = {Hamon, Ronan and Junklewitz, Henrik and Sanchez, Ignacio}, + title = {Robustness and Explainability of Artificial Intelligence}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Publications Office of the European Union}, + year = {2020}, +} + +@Article{hamzacebi2008improving, + author = {Hamza{\c c}ebi, Co{\c s}kun}, + title = {Improving Artificial Neural Networks' Performance in Seasonal Time Series Forecasting}, + number = {23}, + pages = {4550--4559}, + volume = {178}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Information Sciences}, + year = {2008}, +} + +@InProceedings{hanneke2007bound, + author = {Hanneke, Steve}, + booktitle = {Proceedings of the 24th International Conference on {{Machine}} Learning}, + title = {A Bound on the Label Complexity of Agnostic Active Learning}, + pages = {353--360}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2007}, +} + +@Article{hansen2020virtue, + author = {Hansen, Kristian Bondo}, + title = {The Virtue of Simplicity: {{On}} Machine Learning Models in Algorithmic Trading}, + number = {1}, + pages = {2053951720926558}, + volume = {7}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Big Data \& Society}, + year = {2020}, +} + +@Article{hartland2006multiarmed, + author = {Hartland, C{\'e}dric and Gelly, Sylvain and Baskiotis, Nicolas and Teytaud, Olivier and Sebag, Michele}, + title = {Multi-Armed Bandit, Dynamic Environments and Meta-Bandits}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2006}, +} + +@Article{heckman1985alternative, + author = {Heckman, James J and Robb Jr, Richard}, + title = {Alternative Methods for Evaluating the Impact of Interventions: {{An}} Overview}, + number = {1-2}, + pages = {239--267}, + volume = {30}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of econometrics}, + year = {1985}, +} + +@Article{hershfield2011increasing, + author = {Hershfield, Hal E and Goldstein, Daniel G and Sharpe, William F and Fox, Jesse and Yeykelis, Leo and Carstensen, Laura L and Bailenson, Jeremy N}, + title = {Increasing Saving Behavior through Age-Progressed Renderings of the Future Self}, + issue = {SPL}, + pages = {S23--S37}, + volume = {48}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Marketing Research}, + year = {2011}, +} + +@InProceedings{ho1995random, + author = {Ho, Tin Kam}, + booktitle = {Proceedings of 3rd International Conference on Document Analysis and Recognition}, + title = {Random Decision Forests}, + pages = {278--282}, + publisher = {{IEEE}}, + volume = {1}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {1995}, +} + +@Article{hochreiter1997long, + author = {Hochreiter, Sepp and Schmidhuber, J{\"u}rgen}, + title = {Long Short-Term Memory}, + number = {8}, + pages = {1735--1780}, + volume = {9}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Neural computation}, + year = {1997}, +} + +@Unpublished{hoff2021bayesoptimal, + author = {Hoff, Peter}, + title = {Bayes-Optimal Prediction with Frequentist Coverage Control}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2105.14045}, + eprinttype = {arxiv}, + file = {:/Users/FA31DU/Zotero/storage/IQK27WVA/Hoff - 2021 - Bayes-optimal prediction with frequentist coverage.pdf:;:/Users/FA31DU/Zotero/storage/K8EAZA25/2105.html:}, + year = {2021}, +} + +@Misc{hoffman1994german, + author = {Hoffman, Hans}, + title = {German {{Credit Data}}}, + url = {https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)}, + bdsk-url-1 = {https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {1994}, +} + +@Online{hoffmanGermanCreditData1994, + author = {Hoffman, Hans}, + title = {German {{Credit Data}}}, + url = {https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)}, + bdsk-url-1 = {https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {1994}, +} + +@Unpublished{houlsby2011bayesian, + author = {Houlsby, Neil and Husz{\'a}r, Ferenc and Ghahramani, Zoubin and Lengyel, M{\'a}t{\'e}}, + title = {Bayesian Active Learning for Classification and Preference Learning}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1112.5745}, + eprinttype = {arxiv}, + year = {2011}, +} + +@Article{hsee1996evaluability, + author = {Hsee, Christopher K}, + title = {The Evaluability Hypothesis: {{An}} Explanation for Preference Reversals between Joint and Separate Evaluations of Alternatives}, + number = {3}, + pages = {247--257}, + volume = {67}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Organizational behavior and human decision processes}, + year = {1996}, +} + +@Article{hsee2004music, + author = {Hsee, Christopher K and Rottenstreich, Yuval}, + title = {Music, Pandas, and Muggers: On the Affective Psychology of Value.}, + number = {1}, + pages = {23}, + volume = {133}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Experimental Psychology: General}, + year = {2004}, +} + +@Article{hsieh2016social, + author = {Hsieh, Chih-Sheng and Lee, Lung Fei}, + title = {A Social Interactions Model with Endogenous Friendship Formation and Selectivity}, + number = {2}, + pages = {301--319}, + volume = {31}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Applied Econometrics}, + year = {2016}, +} + +@Unpublished{immer2020improving, + author = {Immer, Alexander and Korzepa, Maciej and Bauer, Matthias}, + title = {Improving Predictions of Bayesian Neural Networks via Local Linearization}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2008.08400}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Unpublished{innes2018fashionable, + author = {Innes, Michael and Saba, Elliot and Fischer, Keno and Gandhi, Dhairya and Rudilosso, Marco Concetto and Joy, Neethu Mariya and Karmali, Tejan and Pal, Avik and Shah, Viral}, + title = {Fashionable Modelling with Flux}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1811.01457}, + eprinttype = {arxiv}, + year = {2018}, +} + +@Article{innes2018flux, + author = {Innes, Mike}, + title = {Flux: {{Elegant}} Machine Learning with {{Julia}}}, + number = {25}, + pages = {602}, + volume = {3}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Open Source Software}, + year = {2018}, +} + +@Unpublished{ish-horowicz2019interpreting, + author = {Ish-Horowicz, Jonathan and Udwin, Dana and Flaxman, Seth and Filippi, Sarah and Crawford, Lorin}, + title = {Interpreting Deep Neural Networks through Variable Importance}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1901.09839}, + eprinttype = {arxiv}, + year = {2019}, +} + +@InProceedings{jabbari2017fairness, + author = {Jabbari, Shahin and Joseph, Matthew and Kearns, Michael and Morgenstern, Jamie and Roth, Aaron}, + booktitle = {International {{Conference}} on {{Machine Learning}}}, + title = {Fairness in Reinforcement Learning}, + pages = {1617--1626}, + publisher = {{PMLR}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2017}, +} + +@Article{jackson2007meeting, + author = {Jackson, Matthew O and Rogers, Brian W}, + title = {Meeting Strangers and Friends of Friends: {{How}} Random Are Social Networks?}, + number = {3}, + pages = {890--915}, + volume = {97}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {American Economic Review}, + year = {2007}, +} + +@Unpublished{jeanneret2022diffusion, + author = {Jeanneret, Guillaume and Simon, Lo{\"\i}c and Jurie, Fr{\'e}d{\'e}ric}, + title = {Diffusion {{Models}} for {{Counterfactual Explanations}}}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2203.15636}, + eprinttype = {arxiv}, + year = {2022}, +} + +@Article{johansson2005failure, + author = {Johansson, Petter and Hall, Lars and Sikstr{\"o}m, Sverker and Olsson, Andreas}, + title = {Failure to Detect Mismatches between Intention and Outcome in a Simple Decision Task}, + number = {5745}, + pages = {116--119}, + volume = {310}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Science (New York, N.Y.)}, + shortjournal = {Science}, + year = {2005}, +} + +@Article{johnsson2021estimation, + author = {Johnsson, Ida and Moon, Hyungsik Roger}, + title = {Estimation of Peer Effects in Endogenous Social Networks: {{Control}} Function Approach}, + number = {2}, + pages = {328--345}, + volume = {103}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Review of Economics and Statistics}, + year = {2021}, +} + +@Article{jolliffe2003modified, + author = {Jolliffe, Ian T and Trendafilov, Nickolay T and Uddin, Mudassir}, + title = {A Modified Principal Component Technique Based on the {{LASSO}}}, + number = {3}, + pages = {531--547}, + volume = {12}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of computational and Graphical Statistics}, + year = {2003}, +} + +@Article{joseph2021forecasting, + author = {Joseph, Andreas and Kalamara, Eleni and Kapetanios, George and Potjagailo, Galina}, + title = {Forecasting Uk Inflation Bottom Up}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@Unpublished{joshi2019realistic, + author = {Joshi, Shalmali and Koyejo, Oluwasanmi and Vijitbenjaronk, Warut and Kim, Been and Ghosh, Joydeep}, + title = {Towards Realistic Individual Recourse and Actionable Explanations in Black-Box Decision Making Systems}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1907.09615}, + eprinttype = {arxiv}, + year = {2019}, +} + +@Unpublished{jospin2020handson, + author = {Jospin, Laurent Valentin and Buntine, Wray and Boussaid, Farid and Laga, Hamid and Bennamoun, Mohammed}, + title = {Hands-on {{Bayesian Neural Networks}}--a {{Tutorial}} for {{Deep Learning Users}}}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2007.06823}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Misc{kaggle2011give, + author = {Kaggle}, + title = {Give Me Some Credit, {{Improve}} on the State of the Art in Credit Scoring by Predicting the Probability That Somebody Will Experience Financial Distress in the next Two Years.}, + url = {https://www.kaggle.com/c/GiveMeSomeCredit}, + bdsk-url-1 = {https://www.kaggle.com/c/GiveMeSomeCredit}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + publisher = {{Kaggle}}, + year = {2011}, +} + +@online{kagglecompetitionGiveMeCredit, + author = {Kaggle Competition}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + title = {Give Me Some Credit, {{Improve}} on the State of the Art in Credit Scoring by Predicting the Probability That Somebody Will Experience Financial Distress in the next Two Years.}, + url = {https://www.kaggle.com/c/GiveMeSomeCredit}, + bdsk-url-1 = {https://www.kaggle.com/c/GiveMeSomeCredit}} + +@Article{kahneman1979prospect, + author = {Kahneman, Daniel and Tversky, Amos}, + title = {Prospect {{Theory}}: {{An Analysis}} of {{Decision}} under {{Risk}}}, + pages = {263--291}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometrica: Journal of the Econometric Society}, + year = {1979}, +} + +@Article{kahneman1990experimental, + author = {Kahneman, Daniel and Knetsch, Jack L and Thaler, Richard H}, + title = {Experimental Tests of the Endowment Effect and the {{Coase}} Theorem}, + number = {6}, + pages = {1325--1348}, + volume = {98}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of political Economy}, + year = {1990}, +} + +@Article{kahneman1992reference, + author = {Kahneman, Daniel}, + title = {Reference Points, Anchors, Norms, and Mixed Feelings}, + number = {2}, + pages = {296--312}, + volume = {51}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Organizational behavior and human decision processes}, + year = {1992}, +} + +@Unpublished{karimi2020algorithmic, + author = {Karimi, Amir-Hossein and Von K{\"u}gelgen, Julius and Sch{\"o}lkopf, Bernhard and Valera, Isabel}, + title = {Algorithmic Recourse under Imperfect Causal Knowledge: A Probabilistic Approach}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2006.06831}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Unpublished{karimi2020survey, + author = {Karimi, Amir-Hossein and Barthe, Gilles and Sch{\"o}lkopf, Bernhard and Valera, Isabel}, + title = {A Survey of Algorithmic Recourse: Definitions, Formulations, Solutions, and Prospects}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2010.04050}, + eprinttype = {arxiv}, + year = {2020}, +} + +@InProceedings{karimi2021algorithmic, + author = {Karimi, Amir-Hossein and Sch{\"o}lkopf, Bernhard and Valera, Isabel}, + booktitle = {Proceedings of the 2021 {{ACM Conference}} on {{Fairness}}, {{Accountability}}, and {{Transparency}}}, + title = {Algorithmic Recourse: From Counterfactual Explanations to Interventions}, + pages = {353--362}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@InProceedings{kaur2020interpreting, + author = {Kaur, Harmanpreet and Nori, Harsha and Jenkins, Samuel and Caruana, Rich and Wallach, Hanna and Wortman Vaughan, Jennifer}, + booktitle = {Proceedings of the 2020 {{CHI}} Conference on Human Factors in Computing Systems}, + title = {Interpreting Interpretability: Understanding Data Scientists' Use of Interpretability Tools for Machine Learning}, + pages = {1--14}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Article{kehoe2021defence, + author = {Kehoe, Aidan and Wittek, Peter and Xue, Yanbo and Pozas-Kerstjens, Alejandro}, + title = {Defence against Adversarial Attacks Using Classical and Quantum-Enhanced {{Boltzmann}} Machines}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Machine Learning: Science and Technology}, + year = {2021}, +} + +@Unpublished{kendall2017what, + author = {Kendall, Alex and Gal, Yarin}, + title = {What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1703.04977}, + eprinttype = {arxiv}, + year = {2017}, +} + +@Article{kihoro2004seasonal, + author = {Kihoro, J and Otieno, RO and Wafula, C}, + title = {Seasonal Time Series Forecasting: {{A}} Comparative Study of {{ARIMA}} and {{ANN}} Models}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2004}, +} + +@Book{kilian2017structural, + author = {Kilian, Lutz and L{\"u}tkepohl, Helmut}, + title = {Structural Vector Autoregressive Analysis}, + publisher = {{Cambridge University Press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2017}, +} + +@Unpublished{kingma2014adam, + author = {Kingma, Diederik P and Ba, Jimmy}, + title = {Adam: {{A}} Method for Stochastic Optimization}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1412.6980}, + eprinttype = {arxiv}, + year = {2014}, +} + +@Article{kirsch2019batchbald, + author = {Kirsch, Andreas and Van Amersfoort, Joost and Gal, Yarin}, + title = {Batchbald: {{Efficient}} and Diverse Batch Acquisition for Deep Bayesian Active Learning}, + pages = {7026--7037}, + volume = {32}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Advances in neural information processing systems}, + year = {2019}, +} + +@Unpublished{kuiper2021exploring, + author = {Kuiper, Ouren and van den Berg, Martin and van den Burgt, Joost and Leijnen, Stefan}, + title = {Exploring {{Explainable AI}} in the {{Financial Sector}}: {{Perspectives}} of {{Banks}} and {{Supervisory Authorities}}}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2111.02244}, + eprinttype = {arxiv}, + year = {2021}, +} + +@Article{kydland1982time, + author = {Kydland, Finn E and Prescott, Edward C}, + title = {Time to Build and Aggregate Fluctuations}, + pages = {1345--1370}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometrica: Journal of the Econometric Society}, + year = {1982}, +} + +@Unpublished{lachapelle2019gradientbased, + author = {Lachapelle, S{\'e}bastien and Brouillard, Philippe and Deleu, Tristan and Lacoste-Julien, Simon}, + title = {Gradient-Based Neural Dag Learning}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1906.02226}, + eprinttype = {arxiv}, + year = {2019}, +} + +@InProceedings{lakkaraju2020how, + author = {Lakkaraju, Himabindu and Bastani, Osbert}, + booktitle = {Proceedings of the {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}}, + title = {" {{How}} Do {{I}} Fool You?" {{Manipulating User Trust}} via {{Misleading Black Box Explanations}}}, + pages = {79--85}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@InProceedings{lakkaraju2020how, + author = {Lakkaraju, Himabindu and Bastani, Osbert}, + booktitle = {Proceedings of the {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}}, + title = {" {{How Do I Fool You}}?" {{Manipulating User Trust}} via {{Misleading Black Box Explanations}}}, + pages = {79--85}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Unpublished{lakshminarayanan2016simple, + author = {Lakshminarayanan, Balaji and Pritzel, Alexander and Blundell, Charles}, + title = {Simple and Scalable Predictive Uncertainty Estimation Using Deep Ensembles}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1612.01474}, + eprinttype = {arxiv}, + year = {2016}, +} + +@Unpublished{laugel2017inverse, + author = {Laugel, Thibault and Lesot, Marie-Jeanne and Marsala, Christophe and Renard, Xavier and Detyniecki, Marcin}, + title = {Inverse Classification for Comparison-Based Interpretability in Machine Learning}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1712.08443}, + eprinttype = {arxiv}, + shortjournal = {arXiv preprint arXiv:1712.08443}, + year = {2017}, +} + +@Thesis{lawrence2001variational, + author = {Lawrence, Neil David}, + title = {Variational Inference in Probabilistic Models}, + type = {phdthesis}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + school = {{University of Cambridge}}, + year = {2001}, +} + +@Article{lecun1998mnist, + author = {LeCun, Yann}, + title = {The {{MNIST}} Database of Handwritten Digits}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + shortjournal = {http://yann. lecun. com/exdb/mnist/}, + year = {1998}, +} + +@Article{lee2003best, + author = {Lee, Lung-fei}, + title = {Best Spatial Two-Stage Least Squares Estimators for a Spatial Autoregressive Model with Autoregressive Disturbances}, + number = {4}, + pages = {307--335}, + volume = {22}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometric Reviews}, + year = {2003}, +} + +@Article{lerner2013financial, + author = {Lerner, Jennifer S and Li, Ye and Weber, Elke U}, + title = {The Financial Costs of Sadness}, + number = {1}, + pages = {72--79}, + volume = {24}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Psychological science}, + year = {2013}, +} + +@Article{list2004neoclassical, + author = {List, John A}, + title = {Neoclassical Theory versus Prospect Theory: {{Evidence}} from the Marketplace}, + number = {2}, + pages = {615--625}, + volume = {72}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Econometrica : journal of the Econometric Society}, + shortjournal = {Econometrica}, + year = {2004}, +} + +@Article{lucas1976econometric, + author = {Lucas, JR}, + title = {Econometric Policy Evaluation: A Critique `, in {{K}}. {{Brunner}} and {{A Meltzer}}, {{The Phillips}} Curve and Labor Markets, {{North Holland}}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {1976}, +} + +@InProceedings{lundberg2017unified, + author = {Lundberg, Scott M and Lee, Su-In}, + booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems}, + title = {A Unified Approach to Interpreting Model Predictions}, + pages = {4768--4777}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2017}, +} + +@Book{lutkepohl2005new, + author = {L{\"u}tkepohl, Helmut}, + title = {New Introduction to Multiple Time Series Analysis}, + publisher = {{Springer Science \& Business Media}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2005}, +} + +@Article{madrian2001power, + author = {Madrian, Brigitte C and Shea, Dennis F}, + title = {The Power of Suggestion: {{Inertia}} in 401 (k) Participation and Savings Behavior}, + number = {4}, + pages = {1149--1187}, + volume = {116}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The Quarterly journal of economics}, + year = {2001}, +} + +@Book{manning2008introduction, + author = {Manning, Christopher D and Sch{\"u}tze, Hinrich and Raghavan, Prabhakar}, + title = {Introduction to Information Retrieval}, + publisher = {{Cambridge university press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2008}, +} + +@misc{manokhin2022awesome, + author = {Manokhin, Valery}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + title = {Awesome Conformal Prediction}} + +@Article{manski1993identification, + author = {Manski, Charles F}, + title = {Identification of Endogenous Social Effects: {{The}} Reflection Problem}, + number = {3}, + pages = {531--542}, + volume = {60}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The review of economic studies}, + year = {1993}, +} + +@Article{markle2018goals, + author = {Markle, Alex and Wu, George and White, Rebecca and Sackett, Aaron}, + title = {Goals as Reference Points in Marathon Running: {{A}} Novel Test of Reference Dependence}, + number = {1}, + pages = {19--50}, + volume = {56}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Risk and Uncertainty}, + year = {2018}, +} + +@Article{masini2021machine, + author = {Masini, Ricardo P and Medeiros, Marcelo C and Mendes, Eduardo F}, + title = {Machine Learning Advances for Time Series Forecasting}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Economic Surveys}, + year = {2021}, +} + +@Article{mccracken2016fredmd, + author = {McCracken, Michael W and Ng, Serena}, + title = {{{FRED-MD}}: {{A}} Monthly Database for Macroeconomic Research}, + number = {4}, + pages = {574--589}, + volume = {34}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Business \& Economic Statistics}, + year = {2016}, +} + +@Article{mcculloch1990logical, + author = {McCulloch, Warren S and Pitts, Walter}, + title = {A Logical Calculus of the Ideas Immanent in Nervous Activity}, + number = {1}, + pages = {99--115}, + volume = {52}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Bulletin of mathematical biology}, + year = {1990}, +} + +@Article{migut2015visualizing, + author = {Migut, MA and Worring, Marcel and Veenman, Cor J}, + title = {Visualizing Multi-Dimensional Decision Boundaries in {{2D}}}, + number = {1}, + pages = {273--295}, + volume = {29}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Data Mining and Knowledge Discovery}, + year = {2015}, +} + +@Article{miller2019explanation, + author = {Miller, Tim}, + title = {Explanation in Artificial Intelligence: {{Insights}} from the Social Sciences}, + pages = {1--38}, + volume = {267}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Artificial intelligence}, + year = {2019}, +} + +@InProceedings{miller2020strategic, + author = {Miller, John and Milli, Smitha and Hardt, Moritz}, + booktitle = {Proceedings of the 37th {{International Conference}} on {{Machine Learning}}}, + title = {Strategic {{Classification}} Is {{Causal Modeling}} in {{Disguise}}}, + eventtitle = {International {{Conference}} on {{Machine Learning}}}, + pages = {6917--6926}, + publisher = {{PMLR}}, + url = {https://proceedings.mlr.press/v119/miller20b.html}, + urldate = {2022-11-03}, + abstract = {Consequential decision-making incentivizes individuals to strategically adapt their behavior to the specifics of the decision rule. While a long line of work has viewed strategic adaptation as gaming and attempted to mitigate its effects, recent work has instead sought to design classifiers that incentivize individuals to improve a desired quality. Key to both accounts is a cost function that dictates which adaptations are rational to undertake. In this work, we develop a causal framework for strategic adaptation. Our causal perspective clearly distinguishes between gaming and improvement and reveals an important obstacle to incentive design. We prove any procedure for designing classifiers that incentivize improvement must inevitably solve a non-trivial causal inference problem. We show a similar result holds for designing cost functions that satisfy the requirements of previous work. With the benefit of hindsight, our results show much of the prior work on strategic classification is causal modeling in disguise.}, + bdsk-url-1 = {https://proceedings.mlr.press/v119/miller20b.html}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + file = {:/Users/FA31DU/Zotero/storage/46I2QMPI/Miller et al. - 2020 - Strategic Classification is Causal Modeling in Dis.pdf:;:/Users/FA31DU/Zotero/storage/NWREET6B/Miller et al. - 2020 - Strategic Classification is Causal Modeling in Dis.pdf:}, + issn = {2640-3498}, + langid = {english}, + month = nov, + year = {2020}, +} + +@Article{mischel1988nature, + author = {Mischel, Walter and Shoda, Yuichi and Peake, Philip K}, + title = {The Nature of Adolescent Competencies Predicted by Preschool Delay of Gratification.}, + number = {4}, + pages = {687}, + volume = {54}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of personality and social psychology}, + year = {1988}, +} + +@InProceedings{mittelstadt2019explaining, + author = {Mittelstadt, Brent and Russell, Chris and Wachter, Sandra}, + booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency}, + title = {Explaining Explanations in {{AI}}}, + pages = {279--288}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2019}, +} + +@Book{molnar2020interpretable, + author = {Molnar, Christoph}, + title = {Interpretable Machine Learning}, + publisher = {{Lulu. com}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Book{morgan2015counterfactuals, + author = {Morgan, Stephen L and Winship, Christopher}, + title = {Counterfactuals and Causal Inference}, + publisher = {{Cambridge University Press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2015}, +} + +@Article{mosteller1951experimental, + author = {Mosteller, Frederick and Nogee, Philip}, + title = {An Experimental Measurement of Utility}, + number = {5}, + pages = {371--404}, + volume = {59}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Political Economy}, + year = {1951}, +} + +@InProceedings{mothilal2020explaining, + author = {Mothilal, Ramaravind K and Sharma, Amit and Tan, Chenhao}, + booktitle = {Proceedings of the 2020 {{Conference}} on {{Fairness}}, {{Accountability}}, and {{Transparency}}}, + title = {Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations}, + pages = {607--617}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Book{murphy2012machine, + author = {Murphy, Kevin P}, + title = {Machine Learning: A Probabilistic Perspective}, + publisher = {{MIT press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2012}, +} + +@Book{murphy2012machine, + author = {Murphy, Kevin P}, + title = {Machine Learning: {{A}} Probabilistic Perspective}, + publisher = {{MIT press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2012}, +} + +@Book{murphy2022probabilistic, + author = {Murphy, Kevin P}, + title = {Probabilistic {{Machine Learning}}: {{An}} Introduction}, + publisher = {{MIT Press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2022}, +} + +@Article{nagel1995unraveling, + author = {Nagel, Rosemarie}, + title = {Unraveling in Guessing Games: {{An}} Experimental Study}, + number = {5}, + pages = {1313--1326}, + volume = {85}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The American Economic Review}, + year = {1995}, +} + +@Unpublished{navarro-martinez2021bridging, + author = {Navarro-Martinez, Daniel and Wang, Xinghua}, + title = {Bridging the Gap between the Lab and the Field: {{Dictator}} Games and Donations}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@InProceedings{nelson2015evaluating, + author = {Nelson, Kevin and Corbin, George and Anania, Mark and Kovacs, Matthew and Tobias, Jeremy and Blowers, Misty}, + booktitle = {2015 {{IEEE Symposium}} on {{Computational Intelligence}} for {{Security}} and {{Defense Applications}} ({{CISDA}})}, + title = {Evaluating Model Drift in Machine Learning Algorithms}, + pages = {1--8}, + publisher = {{IEEE}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2015}, +} + +@Book{nocedal2006numerical, + author = {Nocedal, Jorge and Wright, Stephen}, + title = {Numerical Optimization}, + publisher = {{Springer Science \& Business Media}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2006}, +} + +@Misc{oecd2021artificial, + author = {{OECD}}, + title = {Artificial {{Intelligence}}, {{Machine Learning}} and {{Big Data}} in {{Finance}}: {{Opportunities}}, {{Challenges}} and {{Implications}} for {{Policy Makers}}}, + url = {https://www.oecd.org/finance/financial-markets/Artificial-intelligence-machine-learning-big-data-in-finance.pdf}, + bdsk-url-1 = {https://www.oecd.org/finance/financial-markets/Artificial-intelligence-machine-learning-big-data-in-finance.pdf}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@Online{oecdArtificialIntelligenceMachine2021, + author = {{OECD}}, + title = {Artificial {{Intelligence}}, {{Machine Learning}} and {{Big Data}} in {{Finance}}: {{Opportunities}}, {{Challenges}} and {{Implications}} for {{Policy Makers}}}, + url = {https://www.oecd.org/finance/financial-markets/Artificial-intelligence-machine-learning-big-data-in-finance.pdf}, + bdsk-url-1 = {https://www.oecd.org/finance/financial-markets/Artificial-intelligence-machine-learning-big-data-in-finance.pdf}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + publisher = {{OECD}}, + year = {2021}, +} + +@Book{oneil2016weapons, + author = {O'Neil, Cathy}, + title = {Weapons of Math Destruction: {{How}} Big Data Increases Inequality and Threatens Democracy}, + publisher = {{Crown}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2016}, +} + +@Article{pace1997sparse, + author = {Pace, R Kelley and Barry, Ronald}, + title = {Sparse Spatial Autoregressions}, + number = {3}, + pages = {291--297}, + volume = {33}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Statistics \& Probability Letters}, + year = {1997}, +} + +@Unpublished{parr2018matrix, + author = {Parr, Terence and Howard, Jeremy}, + title = {The Matrix Calculus You Need for Deep Learning}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1802.01528}, + eprinttype = {arxiv}, + year = {2018}, +} + +@Unpublished{pawelczyk2021carla, + author = {Pawelczyk, Martin and Bielawski, Sascha and van den Heuvel, Johannes and Richter, Tobias and Kasneci, Gjergji}, + title = {Carla: A Python Library to Benchmark Algorithmic Recourse and Counterfactual Explanation Algorithms}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2108.00783}, + eprinttype = {arxiv}, + year = {2021}, +} + +@Book{pearl2018book, + author = {Pearl, Judea and Mackenzie, Dana}, + title = {The Book of Why: The New Science of Cause and Effect}, + publisher = {{Basic books}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2018}, +} + +@Article{pearl2019seven, + author = {Pearl, Judea}, + title = {The Seven Tools of Causal Inference, with Reflections on Machine Learning}, + number = {3}, + pages = {54--60}, + volume = {62}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Communications of the ACM}, + year = {2019}, +} + +@Article{pedregosa2011scikitlearn, + author = {Pedregosa, Fabian and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and others}, + title = {Scikit-Learn: {{Machine}} Learning in {{Python}}}, + pages = {2825--2830}, + volume = {12}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {the Journal of machine Learning research}, + year = {2011}, +} + +@Book{perry2010economic, + author = {Perry, George L and Tobin, James}, + title = {Economic {{Events}}, {{Ideas}}, and {{Policies}}: The 1960s and After}, + publisher = {{Brookings Institution Press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2010}, +} + +@Article{pfaff2008var, + author = {Pfaff, Bernhard and others}, + title = {{{VAR}}, {{SVAR}} and {{SVEC}} Models: {{Implementation}} within {{R}} Package Vars}, + number = {4}, + pages = {1--32}, + volume = {27}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Statistical Software}, + year = {2008}, +} + +@Book{pindyck2014microeconomics, + author = {Pindyck, Robert S and Rubinfeld, Daniel L}, + title = {Microeconomics}, + publisher = {{Pearson Education}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2014}, +} + +@Article{pope2011numbers, + author = {Pope, Devin and Simonsohn, Uri}, + title = {Round Numbers as Goals: {{Evidence}} from Baseball, {{SAT}} Takers, and the Lab}, + number = {1}, + pages = {71--79}, + volume = {22}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Psychological science}, + year = {2011}, +} + +@InProceedings{poyiadzi2020face, + author = {Poyiadzi, Rafael and Sokol, Kacper and Santos-Rodriguez, Raul and De Bie, Tijl and Flach, Peter}, + booktitle = {Proceedings of the {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}}, + title = {{{FACE}}: {{Feasible}} and Actionable Counterfactual Explanations}, + pages = {344--350}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Article{qu2015estimating, + author = {Qu, Xi and Lee, Lung-fei}, + title = {Estimating a Spatial Autoregressive Model with an Endogenous Spatial Weight Matrix}, + number = {2}, + pages = {209--232}, + volume = {184}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of Econometrics}, + year = {2015}, +} + +@Article{rabanser2019failing, + author = {Rabanser, Stephan and G{\"u}nnemann, Stephan and Lipton, Zachary}, + title = {Failing Loudly: {{An}} Empirical Study of Methods for Detecting Dataset Shift}, + volume = {32}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Advances in Neural Information Processing Systems}, + year = {2019}, +} + +@Unpublished{raghunathan2019adversarial, + author = {Raghunathan, Aditi and Xie, Sang Michael and Yang, Fanny and Duchi, John C and Liang, Percy}, + title = {Adversarial Training Can Hurt Generalization}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1906.06032}, + eprinttype = {arxiv}, + year = {2019}, +} + +@Unpublished{raj2017taming, + author = {Raj, Vishnu and Kalyani, Sheetal}, + title = {Taming Non-Stationary Bandits: {{A Bayesian}} Approach}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1707.09727}, + eprinttype = {arxiv}, + year = {2017}, +} + +@InProceedings{rasmussen2003gaussian, + author = {Rasmussen, Carl Edward}, + booktitle = {Summer School on Machine Learning}, + title = {Gaussian Processes in Machine Learning}, + pages = {63--71}, + publisher = {{Springer}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2003}, +} + +@InProceedings{ribeiro2016why, + author = {Ribeiro, Marco Tulio and Singh, Sameer and Guestrin, Carlos}, + booktitle = {Proceedings of the 22nd {{ACM SIGKDD}} International Conference on Knowledge Discovery and Data Mining}, + title = {"{{Why}} Should i Trust You?" {{Explaining}} the Predictions of Any Classifier}, + pages = {1135--1144}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2016}, +} + +@Article{romer1989does, + author = {Romer, Christina D and Romer, David H}, + title = {Does Monetary Policy Matter? {{A}} New Test in the Spirit of {{Friedman}} and {{Schwartz}}}, + pages = {121--170}, + volume = {4}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {NBER macroeconomics annual}, + year = {1989}, +} + +@Article{rudin2019stop, + author = {Rudin, Cynthia}, + title = {Stop Explaining Black Box Machine Learning Models for High Stakes Decisions and Use Interpretable Models Instead}, + number = {5}, + pages = {206--215}, + volume = {1}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Nature Machine Intelligence}, + year = {2019}, +} + +@Article{sacerdote2001peer, + author = {Sacerdote, Bruce}, + title = {Peer Effects with Random Assignment: {{Results}} for {{Dartmouth}} Roommates}, + number = {2}, + pages = {681--704}, + volume = {116}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The Quarterly journal of economics}, + year = {2001}, +} + +@Article{sadinle2019least, + author = {Sadinle, Mauricio and Lei, Jing and Wasserman, Larry}, + title = {Least Ambiguous Set-Valued Classifiers with Bounded Error Levels}, + number = {525}, + pages = {223--234}, + volume = {114}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + file = {:/Users/FA31DU/Zotero/storage/YXQ8N76A/Sadinle et al. - 2019 - Least ambiguous set-valued classifiers with bounde.pdf:;:/Users/FA31DU/Zotero/storage/ZHB56F3V/01621459.2017.html:}, + journal = {Journal of the American Statistical Association}, + publisher = {{Taylor \& Francis}}, + year = {2019}, +} + +@InProceedings{satopaa2011finding, + author = {Satopaa, Ville and Albrecht, Jeannie and Irwin, David and Raghavan, Barath}, + booktitle = {2011 31st International Conference on Distributed Computing Systems Workshops}, + title = {Finding a" Kneedle" in a Haystack: {{Detecting}} Knee Points in System Behavior}, + pages = {166--171}, + publisher = {{IEEE}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2011}, +} + +@InProceedings{schut2021generating, + author = {Schut, Lisa and Key, Oscar and Mc Grath, Rory and Costabello, Luca and Sacaleanu, Bogdan and Gal, Yarin and others}, + booktitle = {International {{Conference}} on {{Artificial Intelligence}} and {{Statistics}}}, + title = {Generating {{Interpretable Counterfactual Explanations By Implicit Minimisation}} of {{Epistemic}} and {{Aleatoric Uncertainties}}}, + pages = {1756--1764}, + publisher = {{PMLR}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2021}, +} + +@Book{schutze2008introduction, + author = {Sch{\"u}tze, Hinrich and Manning, Christopher D and Raghavan, Prabhakar}, + title = {Introduction to Information Retrieval}, + publisher = {{Cambridge University Press Cambridge}}, + volume = {39}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2008}, +} + +@Article{shafir1993reasonbased, + author = {Shafir, Eldar and Simonson, Itamar and Tversky, Amos}, + title = {Reason-Based Choice}, + number = {1-2}, + pages = {11--36}, + volume = {49}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Cognition}, + year = {1993}, +} + +@Article{simonson1989choice, + author = {Simonson, Itamar}, + title = {Choice Based on Reasons: {{The}} Case of Attraction and Compromise Effects}, + number = {2}, + pages = {158--174}, + volume = {16}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of consumer research}, + year = {1989}, +} + +@Article{sims1986are, + author = {Sims, Christopher A and others}, + title = {Are Forecasting Models Usable for Policy Analysis?}, + issue = {Win}, + pages = {2--16}, + volume = {10}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Quarterly Review}, + year = {1986}, +} + +@InProceedings{slack2020fooling, + author = {Slack, Dylan and Hilgard, Sophie and Jia, Emily and Singh, Sameer and Lakkaraju, Himabindu}, + booktitle = {Proceedings of the {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}}, + title = {Fooling Lime and Shap: {{Adversarial}} Attacks on Post Hoc Explanation Methods}, + pages = {180--186}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2020}, +} + +@Article{slack2021counterfactual, + author = {Slack, Dylan and Hilgard, Anna and Lakkaraju, Himabindu and Singh, Sameer}, + title = {Counterfactual Explanations Can Be Manipulated}, + volume = {34}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Advances in Neural Information Processing Systems}, + year = {2021}, +} + +@Article{slovic1974who, + author = {Slovic, Paul and Tversky, Amos}, + title = {Who Accepts {{Savage}}'s Axiom?}, + number = {6}, + pages = {368--373}, + volume = {19}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Behavioral science}, + year = {1974}, +} + +@Unpublished{spooner2021counterfactual, + author = {Spooner, Thomas and Dervovic, Danial and Long, Jason and Shepard, Jon and Chen, Jiahao and Magazzeni, Daniele}, + title = {Counterfactual {{Explanations}} for {{Arbitrary Regression Models}}}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2106.15212}, + eprinttype = {arxiv}, + shortjournal = {arXiv preprint arXiv:2106.15212}, + year = {2021}, +} + +@Article{srivastava2014dropout, + author = {Srivastava, Nitish and Hinton, Geoffrey and Krizhevsky, Alex and Sutskever, Ilya and Salakhutdinov, Ruslan}, + title = {Dropout: A Simple Way to Prevent Neural Networks from Overfitting}, + number = {1}, + pages = {1929--1958}, + volume = {15}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The journal of machine learning research}, + year = {2014}, +} + +@Unpublished{stanton2022bayesian, + author = {Stanton, Samuel and Maddox, Wesley and Wilson, Andrew Gordon}, + title = {Bayesian {{Optimization}} with {{Conformal Coverage Guarantees}}}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2210.12496}, + eprinttype = {arxiv}, + file = {:/Users/FA31DU/Zotero/storage/XFGZAB9J/Stanton et al. - 2022 - Bayesian Optimization with Conformal Coverage Guar.pdf:;:/Users/FA31DU/Zotero/storage/RPWYDPVW/2210.html:}, + year = {2022}, +} + +@Article{sturm2014simple, + author = {Sturm, Bob L}, + title = {A Simple Method to Determine If a Music Information Retrieval System Is a ``Horse''}, + number = {6}, + pages = {1636--1644}, + volume = {16}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {IEEE Transactions on Multimedia}, + year = {2014}, +} + +@Article{sunstein2003libertarian, + author = {Sunstein, Cass R and Thaler, Richard H}, + title = {Libertarian Paternalism Is Not an Oxymoron}, + pages = {1159--1202}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {The University of Chicago Law Review}, + year = {2003}, +} + +@Book{sutton2018reinforcement, + author = {Sutton, Richard S and Barto, Andrew G}, + title = {Reinforcement Learning: {{An}} Introduction}, + publisher = {{MIT press}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2018}, +} + +@Unpublished{szegedy2013intriguing, + author = {Szegedy, Christian and Zaremba, Wojciech and Sutskever, Ilya and Bruna, Joan and Erhan, Dumitru and Goodfellow, Ian and Fergus, Rob}, + title = {Intriguing Properties of Neural Networks}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1312.6199}, + eprinttype = {arxiv}, + year = {2013}, +} + +@Article{thaler1981empirical, + author = {Thaler, Richard}, + title = {Some Empirical Evidence on Dynamic Inconsistency}, + number = {3}, + pages = {201--207}, + volume = {8}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Economics letters}, + year = {1981}, +} + +@Article{thaler2004more, + author = {Thaler, Richard H and Benartzi, Shlomo}, + title = {Save More Tomorrow{\texttrademark}: {{Using}} Behavioral Economics to Increase Employee Saving}, + number = {S1}, + pages = {S164--S187}, + volume = {112}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of political Economy}, + year = {2004}, +} + +@Article{tversky1981framing, + author = {Tversky, Amos and Kahneman, Daniel}, + title = {The Framing of Decisions and the Psychology of Choice}, + number = {4481}, + pages = {453--458}, + volume = {211}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Science (New York, N.Y.)}, + shortjournal = {science}, + year = {1981}, +} + +@Article{ungemach2011how, + author = {Ungemach, Christoph and Stewart, Neil and Reimers, Stian}, + title = {How Incidental Values from the Environment Affect Decisions about Money, Risk, and Delay}, + number = {2}, + pages = {253--260}, + volume = {22}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Psychological Science}, + year = {2011}, +} + +@Unpublished{upadhyay2021robust, + author = {Upadhyay, Sohini and Joshi, Shalmali and Lakkaraju, Himabindu}, + title = {Towards {{Robust}} and {{Reliable Algorithmic Recourse}}}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2102.13620}, + eprinttype = {arxiv}, + year = {2021}, +} + +@InProceedings{ustun2019actionable, + author = {Ustun, Berk and Spangher, Alexander and Liu, Yang}, + booktitle = {Proceedings of the {{Conference}} on {{Fairness}}, {{Accountability}}, and {{Transparency}}}, + title = {Actionable Recourse in Linear Classification}, + pages = {10--19}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2019}, +} + +@Article{vanboven2000egocentric, + author = {Van Boven, Leaf and Dunning, David and Loewenstein, George}, + title = {Egocentric Empathy Gaps between Owners and Buyers: Misperceptions of the Endowment Effect.}, + number = {1}, + pages = {66}, + volume = {79}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of personality and social psychology}, + year = {2000}, +} + +@Book{varshney2022trustworthy, + author = {Varshney, Kush R.}, + title = {Trustworthy {{Machine Learning}}}, + publisher = {{Independently Published}}, + address = {{Chappaqua, NY, USA}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2022}, +} + +@Unpublished{verma2020counterfactual, + author = {Verma, Sahil and Dickerson, John and Hines, Keegan}, + title = {Counterfactual Explanations for Machine Learning: {{A}} Review}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2010.10596}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Article{verstyuk2020modeling, + author = {Verstyuk, Sergiy}, + title = {Modeling Multivariate Time Series in Economics: {{From}} Auto-Regressions to Recurrent Neural Networks}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Available at SSRN 3589337}, + year = {2020}, +} + +@Article{wachter2017counterfactual, + author = {Wachter, Sandra and Mittelstadt, Brent and Russell, Chris}, + title = {Counterfactual Explanations without Opening the Black Box: {{Automated}} Decisions and the {{GDPR}}}, + pages = {841}, + volume = {31}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Harv. JL \& Tech.}, + year = {2017}, +} + +@Article{wang2018optimal, + author = {Wang, HaiYing and Zhu, Rong and Ma, Ping}, + title = {Optimal Subsampling for Large Sample Logistic Regression}, + number = {522}, + pages = {829--844}, + volume = {113}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Journal of the American Statistical Association}, + year = {2018}, +} + +@Book{wasserman2006all, + author = {Wasserman, Larry}, + title = {All of Nonparametric Statistics}, + publisher = {{Springer Science \& Business Media}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2006}, +} + +@Book{wasserman2013all, + author = {Wasserman, Larry}, + title = {All of Statistics: A Concise Course in Statistical Inference}, + publisher = {{Springer Science \& Business Media}}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + year = {2013}, +} + +@Article{widmer1996learning, + author = {Widmer, Gerhard and Kubat, Miroslav}, + title = {Learning in the Presence of Concept Drift and Hidden Contexts}, + number = {1}, + pages = {69--101}, + volume = {23}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Machine learning}, + year = {1996}, +} + +@Unpublished{wilson2020case, + author = {Wilson, Andrew Gordon}, + title = {The Case for {{Bayesian}} Deep Learning}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {2001.10995}, + eprinttype = {arxiv}, + year = {2020}, +} + +@Article{witten2009penalized, + author = {Witten, Daniela M and Tibshirani, Robert and Hastie, Trevor}, + title = {A Penalized Matrix Decomposition, with Applications to Sparse Principal Components and Canonical Correlation Analysis}, + number = {3}, + pages = {515--534}, + volume = {10}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Biostatistics (Oxford, England)}, + shortjournal = {Biostatistics}, + year = {2009}, +} + +@Article{xu2020epidemiological, + author = {Xu, Bo and Gutierrez, Bernardo and Mekaru, Sumiko and Sewalk, Kara and Goodwin, Lauren and Loskill, Alyssa and Cohn, Emily and Hswen, Yulin and Hill, Sarah C. and Cobo, Maria M and Zarebski, Alexander and Li, Sabrina and Wu, Chieh-Hsi and Hulland, Erin and Morgan, Julia and Wang, Lin and O'Brien, Katelynn and Scarpino, Samuel V. and Brownstein, John S. and Pybus, Oliver G. and Pigott, David M. and Kraemer, Moritz U. G.}, + title = {Epidemiological Data from the {{COVID-19}} Outbreak, Real-Time Case Information}, + doi = {doi.org/10.1038/s41597-020-0448-0}, + number = {106}, + volume = {7}, + bdsk-url-1 = {https://doi.org/10.1038/s41597-020-0448-0}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Scientific Data}, + year = {2020}, +} + +@Article{yeh2009comparisons, + author = {Yeh, I-Cheng and Lien, Che-hui}, + title = {The Comparisons of Data Mining Techniques for the Predictive Accuracy of Probability of Default of Credit Card Clients}, + number = {2}, + pages = {2473--2480}, + volume = {36}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Expert systems with applications}, + year = {2009}, +} + +@Article{zhang1998forecasting, + author = {Zhang, Guoqiang and Patuwo, B Eddy and Hu, Michael Y}, + title = {Forecasting with Artificial Neural Networks:: {{The}} State of the Art}, + number = {1}, + pages = {35--62}, + volume = {14}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {International journal of forecasting}, + year = {1998}, +} + +@Article{zhang2003time, + author = {Zhang, G Peter}, + title = {Time Series Forecasting Using a Hybrid {{ARIMA}} and Neural Network Model}, + pages = {159--175}, + volume = {50}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {Neurocomputing}, + year = {2003}, +} + +@Unpublished{zheng2018dags, + author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P}, + title = {Dags with No Tears: {{Continuous}} Optimization for Structure Learning}, + archiveprefix = {arXiv}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + eprint = {1803.01422}, + eprinttype = {arxiv}, + year = {2018}, +} + +@Article{zhu2015optimal, + author = {Zhu, Rong and Ma, Ping and Mahoney, Michael W and Yu, Bin}, + title = {Optimal Subsampling Approaches for Large Sample Linear Regression}, + pages = {arXiv--1509}, + date-added = {2022-12-13 12:58:01 +0100}, + date-modified = {2022-12-13 12:58:01 +0100}, + journal = {arXiv}, + year = {2015}, +} + +@Article{barber2021predictive, + author = {Barber, Rina Foygel and CandΓ¨s, Emmanuel J. and Ramdas, Aaditya and Tibshirani, Ryan J.}, + title = {Predictive inference with the jackknife+}, + doi = {10.1214/20-AOS1965}, + issn = {0090-5364, 2168-8966}, + number = {1}, + pages = {486--507}, + urldate = {2022-12-13}, + volume = {49}, + abstract = {This paper introduces the jackknife+, which is a novel method for constructing predictive confidence intervals. Whereas the jackknife outputs an interval centered at the predicted response of a test point, with the width of the interval determined by the quantiles of leave-one-out residuals, the jackknife+ also uses the leave-one-out predictions at the test point to account for the variability in the fitted regression function. Assuming exchangeable training samples, we prove that this crucial modification permits rigorous coverage guarantees regardless of the distribution of the data points, for any algorithm that treats the training points symmetrically. Such guarantees are not possible for the original jackknife and we demonstrate examples where the coverage rate may actually vanish. Our theoretical and empirical analysis reveals that the jackknife and the jackknife+ intervals achieve nearly exact coverage and have similar lengths whenever the fitting algorithm obeys some form of stability. Further, we extend the jackknife+ to \$K\$-fold cross validation and similarly establish rigorous coverage properties. Our methods are related to cross-conformal prediction proposed by Vovk (Ann. Math. Artif. Intell. 74 (2015) 9–28) and we discuss connections.}, + file = {:Barber2021 - Predictive Inference with the Jackknife+.pdf:PDF}, + journal = {The Annals of Statistics}, + keywords = {62F40, 62G08, 62G09, conformal inference, cross-validation, distribution-free, jackknife, leave-one-out, stability}, + month = feb, + publisher = {Institute of Mathematical Statistics}, + year = {2021}, +} + +@TechReport{chouldechova2018frontiers, + author = {Chouldechova, Alexandra and Roth, Aaron}, + title = {The {Frontiers} of {Fairness} in {Machine} {Learning}}, + doi = {10.48550/arXiv.1810.08810}, + eprint = {1810.08810}, + note = {arXiv:1810.08810 [cs, stat] type: article}, + abstract = {The last few years have seen an explosion of academic and popular interest in algorithmic fairness. Despite this interest and the volume and velocity of work that has been produced recently, the fundamental science of fairness in machine learning is still in a nascent state. In March 2018, we convened a group of experts as part of a CCC visioning workshop to assess the state of the field, and distill the most promising research directions going forward. This report summarizes the findings of that workshop. Along the way, it surveys recent theoretical work in the field and points towards promising directions for research.}, + archiveprefix = {arxiv}, + file = {:chouldechova2018frontiers - The Frontiers of Fairness in Machine Learning.pdf:PDF}, + keywords = {Computer Science - Machine Learning, Computer Science - Data Structures and Algorithms, Computer Science - Computer Science and Game Theory, Statistics - Machine Learning}, + month = oct, + school = {arXiv}, + year = {2018}, +} + +@Article{pawelczyk2022probabilistically, + author = {Pawelczyk, Martin and Datta, Teresa and van-den-Heuvel, Johannes and Kasneci, Gjergji and Lakkaraju, Himabindu}, + title = {Probabilistically {Robust} {Recourse}: {Navigating} the {Trade}-offs between {Costs} and {Robustness} in {Algorithmic} {Recourse}}, + file = {:pawelczyk2022probabilistically - Probabilistically Robust Recourse_ Navigating the Trade Offs between Costs and Robustness in Algorithmic Recourse.pdf:PDF}, + journal = {arXiv preprint arXiv:2203.06768}, + shorttitle = {Probabilistically {Robust} {Recourse}}, + year = {2022}, +} + +@InProceedings{stutz2022learning, + author = {Stutz, David and Dvijotham, Krishnamurthy Dj and Cemgil, Ali Taylan and Doucet, Arnaud}, + title = {Learning {Optimal} {Conformal} {Classifiers}}, + language = {en}, + url = {https://openreview.net/forum?id=t8O-4LKFVx}, + urldate = {2023-02-13}, + abstract = {Modern deep learning based classifiers show very high accuracy on test data but this does not provide sufficient guarantees for safe deployment, especially in high-stake AI applications such as medical diagnosis. Usually, predictions are obtained without a reliable uncertainty estimate or a formal guarantee. Conformal prediction (CP) addresses these issues by using the classifier's predictions, e.g., its probability estimates, to predict confidence sets containing the true class with a user-specified probability. However, using CP as a separate processing step after training prevents the underlying model from adapting to the prediction of confidence sets. Thus, this paper explores strategies to differentiate through CP during training with the goal of training model with the conformal wrapper end-to-end. In our approach, conformal training (ConfTr), we specifically "simulate" conformalization on mini-batches during training. Compared to standard training, ConfTr reduces the average confidence set size (inefficiency) of state-of-the-art CP methods applied after training. Moreover, it allows to "shape" the confidence sets predicted at test time, which is difficult for standard CP. On experiments with several datasets, we show ConfTr can influence how inefficiency is distributed across classes, or guide the composition of confidence sets in terms of the included classes, while retaining the guarantees offered by CP.}, + file = {:stutz2022learning - Learning Optimal Conformal Classifiers.pdf:PDF}, + month = may, + year = {2022}, +} + +@InProceedings{grathwohl2020your, + author = {Grathwohl, Will and Wang, Kuan-Chieh and Jacobsen, Joern-Henrik and Duvenaud, David and Norouzi, Mohammad and Swersky, Kevin}, + title = {Your classifier is secretly an energy based model and you should treat it like one}, + language = {en}, + url = {https://openreview.net/forum?id=Hkxzx0NtDB}, + urldate = {2023-02-13}, + abstract = {We propose to reinterpret a standard discriminative classifier of p(y{\textbar}x) as an energy based model for the joint distribution p(x, y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x{\textbar}y). Within this framework, standard discriminative architectures may be used and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, and out-of-distribution detection while also enabling our models to generate samples rivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and present an approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-art in both generative and discriminative learning within one hybrid model.}, + file = {:grathwohl2020your - Your Classifier Is Secretly an Energy Based Model and You Should Treat It like One.pdf:PDF}, + month = mar, + year = {2020}, +} + +@Book{murphy2023probabilistic, + author = {Murphy, Kevin P.}, + date = {2023}, + title = {Probabilistic machine learning: {Advanced} topics}, + publisher = {MIT Press}, + shorttitle = {Probabilistic machine learning}, +} + +@TechReport{artelt2021evaluating, + author = {Artelt, AndrΓ© and Vaquet, Valerie and Velioglu, Riza and Hinder, Fabian and Brinkrolf, Johannes and Schilling, Malte and Hammer, Barbara}, + date = {2021-07}, + institution = {arXiv}, + title = {Evaluating {Robustness} of {Counterfactual} {Explanations}}, + note = {arXiv:2103.02354 [cs] type: article}, + url = {http://arxiv.org/abs/2103.02354}, + urldate = {2023-03-24}, + abstract = {Transparency is a fundamental requirement for decision making systems when these should be deployed in the real world. It is usually achieved by providing explanations of the system's behavior. A prominent and intuitive type of explanations are counterfactual explanations. Counterfactual explanations explain a behavior to the user by proposing actions -- as changes to the input -- that would cause a different (specified) behavior of the system. However, such explanation methods can be unstable with respect to small changes to the input -- i.e. even a small change in the input can lead to huge or arbitrary changes in the output and of the explanation. This could be problematic for counterfactual explanations, as two similar individuals might get very different explanations. Even worse, if the recommended actions differ considerably in their complexity, one would consider such unstable (counterfactual) explanations as individually unfair. In this work, we formally and empirically study the robustness of counterfactual explanations in general, as well as under different models and different kinds of perturbations. Furthermore, we propose that plausible counterfactual explanations can be used instead of closest counterfactual explanations to improve the robustness and consequently the individual fairness of counterfactual explanations.}, + annotation = {Comment: Rewrite paper to make things more clear; Remove one theorem \& corollary due to buggy proof}, + file = {:artelt2021evaluating - Evaluating Robustness of Counterfactual Explanations.pdf:PDF}, + keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence}, +} + +@Article{guidotti2022counterfactual, + author = {Guidotti, Riccardo}, + date = {2022-04}, + journaltitle = {Data Mining and Knowledge Discovery}, + title = {Counterfactual explanations and how to find them: literature review and benchmarking}, + doi = {10.1007/s10618-022-00831-6}, + issn = {1573-756X}, + language = {en}, + url = {https://doi.org/10.1007/s10618-022-00831-6}, + urldate = {2023-03-24}, + abstract = {Interpretable machine learning aims at unveiling the reasons behind predictions returned by uninterpretable classifiers. One of the most valuable types of explanation consists of counterfactuals. A counterfactual explanation reveals what should have been different in an instance to observe a diverse outcome. For instance, a bank customer asks for a loan that is rejected. The counterfactual explanation consists of what should have been different for the customer in order to have the loan accepted. Recently, there has been an explosion of proposals for counterfactual explainers. The aim of this work is to survey the most recent explainers returning counterfactual explanations. We categorize explainers based on the approach adopted to return the counterfactuals, and we label them according to characteristics of the method and properties of the counterfactuals returned. In addition, we visually compare the explanations, and we report quantitative benchmarking assessing minimality, actionability, stability, diversity, discriminative power, and running time. The results make evident that the current state of the art does not provide a counterfactual explainer able to guarantee all these properties simultaneously.}, + file = {Full Text PDF:https\://link.springer.com/content/pdf/10.1007%2Fs10618-022-00831-6.pdf:application/pdf}, + keywords = {Explainable AI, Counterfactual explanations, Contrastive explanations, Interpretable machine learning}, + shorttitle = {Counterfactual explanations and how to find them}, +} + +@TechReport{mahajan2020preserving, + author = {Mahajan, Divyat and Tan, Chenhao and Sharma, Amit}, + date = {2020-06}, + institution = {arXiv}, + title = {Preserving {Causal} {Constraints} in {Counterfactual} {Explanations} for {Machine} {Learning} {Classifiers}}, + doi = {10.48550/arXiv.1912.03277}, + note = {arXiv:1912.03277 [cs, stat] type: article}, + url = {http://arxiv.org/abs/1912.03277}, + urldate = {2023-03-24}, + abstract = {To construct interpretable explanations that are consistent with the original ML model, counterfactual examples---showing how the model's output changes with small perturbations to the input---have been proposed. This paper extends the work in counterfactual explanations by addressing the challenge of feasibility of such examples. For explanations of ML models in critical domains such as healthcare and finance, counterfactual examples are useful for an end-user only to the extent that perturbation of feature inputs is feasible in the real world. We formulate the problem of feasibility as preserving causal relationships among input features and present a method that uses (partial) structural causal models to generate actionable counterfactuals. When feasibility constraints cannot be easily expressed, we consider an alternative mechanism where people can label generated CF examples on feasibility: whether it is feasible to intervene and realize the candidate CF example from the original input. To learn from this labelled feasibility data, we propose a modified variational auto encoder loss for generating CF examples that optimizes for feasibility as people interact with its output. Our experiments on Bayesian networks and the widely used ''Adult-Income'' dataset show that our proposed methods can generate counterfactual explanations that better satisfy feasibility constraints than existing methods.. Code repository can be accessed here: {\textbackslash}textit\{https://github.com/divyat09/cf-feasibility\}}, + annotation = {Comment: 2019 NeurIPS Workshop on Do the right thing: Machine learning and Causal Inference for improved decision making}, + file = {:mahajan2020preserving - Preserving Causal Constraints in Counterfactual Explanations for Machine Learning Classifiers.pdf:PDF}, + keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence, Statistics - Machine Learning}, +} + +@TechReport{antoran2023sampling, + author = {AntorΓ‘n, Javier and Padhy, Shreyas and Barbano, Riccardo and Nalisnick, Eric and Janz, David and HernΓ‘ndez-Lobato, JosΓ© Miguel}, + date = {2023-03}, + institution = {arXiv}, + title = {Sampling-based inference for large linear models, with application to linearised {Laplace}}, + note = {arXiv:2210.04994 [cs, stat] type: article}, + url = {http://arxiv.org/abs/2210.04994}, + urldate = {2023-03-25}, + abstract = {Large-scale linear models are ubiquitous throughout machine learning, with contemporary application as surrogate models for neural network uncertainty quantification; that is, the linearised Laplace method. Alas, the computational cost associated with Bayesian linear models constrains this method's application to small networks, small output spaces and small datasets. We address this limitation by introducing a scalable sample-based Bayesian inference method for conjugate Gaussian multi-output linear models, together with a matching method for hyperparameter (regularisation) selection. Furthermore, we use a classic feature normalisation method (the g-prior) to resolve a previously highlighted pathology of the linearised Laplace method. Together, these contributions allow us to perform linearised neural network inference with ResNet-18 on CIFAR100 (11M parameters, 100 outputs x 50k datapoints), with ResNet-50 on Imagenet (50M parameters, 1000 outputs x 1.2M datapoints) and with a U-Net on a high-resolution tomographic reconstruction task (2M parameters, 251k output{\textasciitilde}dimensions).}, + annotation = {Comment: Published at ICLR 2023. This latest Arxiv version is extended with a demonstration of the proposed methods on the Imagenet dataset}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/2210.04994.pdf:application/pdf}, + keywords = {Statistics - Machine Learning, Computer Science - Artificial Intelligence, Computer Science - Machine Learning}, +} + +@Misc{altmeyer2022conformal, + author = {Altmeyer, Patrick}, + date = {2022-10}, + title = {{Conformal} {Prediction} in {Julia}}, + language = {en}, + url = {https://www.paltmeyer.com/blog/posts/conformal-prediction/}, + urldate = {2023-03-27}, + abstract = {A (very) gentle introduction to Conformal Prediction in Julia using my new package ConformalPrediction.jl.}, +} + +@InProceedings{welling2011bayesian, + author = {Welling, M. and Teh, Y.}, + date = {2011-06}, + title = {Bayesian {Learning} via {Stochastic} {Gradient} {Langevin} {Dynamics}}, + url = {https://www.semanticscholar.org/paper/Bayesian-Learning-via-Stochastic-Gradient-Langevin-Welling-Teh/aeed631d6a84100b5e9a021ec1914095c66de415}, + urldate = {2023-05-15}, + abstract = {In this paper we propose a new framework for learning from large scale datasets based on iterative learning from small mini-batches. By adding the right amount of noise to a standard stochastic gradient optimization algorithm we show that the iterates will converge to samples from the true posterior distribution as we anneal the stepsize. This seamless transition between optimization and Bayesian posterior sampling provides an inbuilt protection against overfitting. We also propose a practical method for Monte Carlo estimates of posterior statistics which monitors a "sampling threshold" and collects samples after it has been surpassed. We apply the method to three models: a mixture of Gaussians, logistic regression and ICA with natural gradients.}, + annotation = {[TLDR] This paper proposes a new framework for learning from large scale datasets based on iterative learning from small mini-batches by adding the right amount of noise to a standard stochastic gradient optimization algorithm and shows that the iterates will converge to samples from the true posterior distribution as the authors anneal the stepsize.}, + file = {:welling_bayesian_2011 - Bayesian Learning Via Stochastic Gradient Langevin Dynamics.html:URL;:welling2011bayesian - Bayesian Learning Via Stochastic Gradient Langevin Dynamics.pdf:PDF}, +} + +@Article{gill2010circular, + author = {Gill, Jeff and Hangartner, Dominik}, + date = {2010}, + journaltitle = {Political Analysis}, + title = {Circular {Data} in {Political} {Science} and {How} to {Handle} {It}}, + doi = {10.1093/pan/mpq009}, + issn = {1047-1987, 1476-4989}, + language = {en}, + number = {3}, + pages = {316--336}, + url = {https://www.cambridge.org/core/journals/political-analysis/article/circular-data-in-political-science-and-how-to-handle-it/6DF2D9DA60C455E6A48FFB0FF011F747}, + urldate = {2023-05-15}, + volume = {18}, + abstract = {There has been no attention to circular (purely cyclical) data in political science research. We show that such data exist and are mishandled by models that do not take into account the inherently recycling nature of some phenomenon. Clock and calendar effects are the obvious cases, but directional data are observed as well. We describe a standard maximum likelihood regression modeling framework based on the von Mises distribution, then develop a general Bayesian regression procedure for the first time, providing an easy-to-use Metropolis-Hastings sampler for this approach. Applications include a chronographic analysis of U.S. domestic terrorism and directional party preferences in a two-dimensional ideological space for German Bundestag elections. The results demonstrate the importance of circular models to handle periodic and directional data in political science.}, + file = {Full Text PDF:https\://www.cambridge.org/core/services/aop-cambridge-core/content/view/6DF2D9DA60C455E6A48FFB0FF011F747/S1047198700012493a.pdf/div-class-title-circular-data-in-political-science-and-how-to-handle-it-div.pdf:application/pdf}, + publisher = {Cambridge University Press}, +} + +@InProceedings{liu2023goggle, + author = {Liu, Tennison and Qian, Zhaozhi and Berrevoets, Jeroen and Schaar, Mihaela van der}, + date = {2023-02}, + title = {{GOGGLE}: {Generative} {Modelling} for {Tabular} {Data} by {Learning} {Relational} {Structure}}, + language = {en}, + url = {https://openreview.net/forum?id=fPVRcJqspu}, + urldate = {2023-05-15}, + abstract = {Deep generative models learn highly complex and non-linear representations to generate realistic synthetic data. While they have achieved notable success in computer vision and natural language processing, similar advances have been less demonstrable in the tabular domain. This is partially because generative modelling of tabular data entails a particular set of challenges, including heterogeneous relationships, limited number of samples, and difficulties in incorporating prior knowledge. Additionally, unlike their counterparts in image and sequence domain, deep generative models for tabular data almost exclusively employ fully-connected layers, which encode weak inductive biases about relationships between inputs. Real-world data generating processes can often be represented using relational structures, which encode sparse, heterogeneous relationships between variables. In this work, we learn and exploit relational structure underlying tabular data to better model variable dependence, and as a natural means to introduce regularization on relationships and include prior knowledge. Specifically, we introduce GOGGLE, an end-to-end message passing scheme that jointly learns the relational structure and corresponding functional relationships as the basis of generating synthetic samples. Using real-world datasets, we provide empirical evidence that the proposed method is effective in generating realistic synthetic data and exploiting domain knowledge for downstream tasks.}, + file = {Full Text PDF:https\://openreview.net/pdf?id=fPVRcJqspu:application/pdf}, + shorttitle = {{GOGGLE}}, +} + +@TechReport{du2020implicit, + author = {Du, Yilun and Mordatch, Igor}, + date = {2020-06}, + institution = {arXiv}, + title = {Implicit {Generation} and {Generalization} in {Energy}-{Based} {Models}}, + doi = {10.48550/arXiv.1903.08689}, + note = {arXiv:1903.08689 [cs, stat] type: article}, + url = {http://arxiv.org/abs/1903.08689}, + urldate = {2023-05-16}, + abstract = {Energy based models (EBMs) are appealing due to their generality and simplicity in likelihood modeling, but have been traditionally difficult to train. We present techniques to scale MCMC based EBM training on continuous neural networks, and we show its success on the high-dimensional data domains of ImageNet32x32, ImageNet128x128, CIFAR-10, and robotic hand trajectories, achieving better samples than other likelihood models and nearing the performance of contemporary GAN approaches, while covering all modes of the data. We highlight some unique capabilities of implicit generation such as compositionality and corrupt image reconstruction and inpainting. Finally, we show that EBMs are useful models across a wide variety of tasks, achieving state-of-the-art out-of-distribution classification, adversarially robust classification, state-of-the-art continual online class learning, and coherent long term predicted trajectory rollouts.}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/1903.08689.pdf:application/pdf}, + keywords = {Computer Science - Machine Learning, Computer Science - Computer Vision and Pattern Recognition, Statistics - Machine Learning}, +} + +@InProceedings{krizhevsky2009learning, + author = {Krizhevsky, A.}, + date = {2009}, + title = {Learning {Multiple} {Layers} of {Features} from {Tiny} {Images}}, + url = {https://www.semanticscholar.org/paper/Learning-Multiple-Layers-of-Features-from-Tiny-Krizhevsky/5d90f06bb70a0a3dced62413346235c02b1aa086}, + urldate = {2023-06-21}, + abstract = {Groups at MIT and NYU have collected a dataset of millions of tiny colour images from the web. It is, in principle, an excellent dataset for unsupervised training of deep generative models, but previous researchers who have tried this have found it dicult to learn a good set of lters from the images. We show how to train a multi-layer generative model that learns to extract meaningful features which resemble those found in the human visual cortex. Using a novel parallelization algorithm to distribute the work among multiple machines connected on a network, we show how training such a model can be done in reasonable time. A second problematic aspect of the tiny images dataset is that there are no reliable class labels which makes it hard to use for object recognition experiments. We created two sets of reliable labels. The CIFAR-10 set has 6000 examples of each of 10 classes and the CIFAR-100 set has 600 examples of each of 100 non-overlapping classes. Using these labels, we show that object recognition is signicantly improved by pre-training a layer of features on a large set of unlabeled tiny images.}, + annotation = {[TLDR] It is shown how to train a multi-layer generative model that learns to extract meaningful features which resemble those found in the human visual cortex, using a novel parallelization algorithm to distribute the work among multiple machines connected on a network.}, + file = {Semantic Scholar Link:https\://www.semanticscholar.org/paper/Learning-Multiple-Layers-of-Features-from-Tiny-Krizhevsky/5d90f06bb70a0a3dced62413346235c02b1aa086:text/html;Full Text PDF:http\://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf:application/pdf}, +} + +@Misc{becker1996adult, + author = {Barry Becker, Ronny Kohavi}, + date = {1996}, + title = {Adult}, + doi = {10.24432/C5XW20}, + note = {Type: dataset}, + url = {https://archive.ics.uci.edu/dataset/2}, + urldate = {2023-06-21}, + publisher = {UCI Machine Learning Repository}, +} + +@InProceedings{tolomei2017interpretable, + author = {Tolomei, Gabriele and Silvestri, Fabrizio and Haines, Andrew and Lalmas, Mounia}, + booktitle = {Proceedings of the 23rd {ACM} {SIGKDD} {International} {Conference} on {Knowledge} {Discovery} and {Data} {Mining}}, + date = {2017-08}, + title = {Interpretable {Predictions} of {Tree}-based {Ensembles} via {Actionable} {Feature} {Tweaking}}, + doi = {10.1145/3097983.3098039}, + note = {arXiv:1706.06691 [stat]}, + pages = {465--474}, + url = {http://arxiv.org/abs/1706.06691}, + urldate = {2023-06-21}, + abstract = {Machine-learned models are often described as "black boxes". In many real-world applications however, models may have to sacrifice predictive power in favour of human-interpretability. When this is the case, feature engineering becomes a crucial task, which requires significant and time-consuming human effort. Whilst some features are inherently static, representing properties that cannot be influenced (e.g., the age of an individual), others capture characteristics that could be adjusted (e.g., the daily amount of carbohydrates taken). Nonetheless, once a model is learned from the data, each prediction it makes on new instances is irreversible - assuming every instance to be a static point located in the chosen feature space. There are many circumstances however where it is important to understand (i) why a model outputs a certain prediction on a given instance, (ii) which adjustable features of that instance should be modified, and finally (iii) how to alter such a prediction when the mutated instance is input back to the model. In this paper, we present a technique that exploits the internals of a tree-based ensemble classifier to offer recommendations for transforming true negative instances into positively predicted ones. We demonstrate the validity of our approach using an online advertising application. First, we design a Random Forest classifier that effectively separates between two types of ads: low (negative) and high (positive) quality ads (instances). Then, we introduce an algorithm that provides recommendations that aim to transform a low quality ad (negative instance) into a high quality one (positive instance). Finally, we evaluate our approach on a subset of the active inventory of a large ad network, Yahoo Gemini.}, + annotation = {Comment: 10 pages, KDD 2017}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/1706.06691.pdf:application/pdf}, + keywords = {Statistics - Machine Learning, 68T01, I.2.0, I.5.1}, +} + +@TechReport{dandl2023counterfactuals, + author = {Dandl, Susanne and Hofheinz, Andreas and Binder, Martin and Bischl, Bernd and Casalicchio, Giuseppe}, + date = {2023-04}, + institution = {arXiv}, + title = {counterfactuals: {An} {R} {Package} for {Counterfactual} {Explanation} {Methods}}, + note = {arXiv:2304.06569 [cs, stat] type: article}, + url = {http://arxiv.org/abs/2304.06569}, + urldate = {2023-06-21}, + abstract = {Counterfactual explanation methods provide information on how feature values of individual observations must be changed to obtain a desired prediction. Despite the increasing amount of proposed methods in research, only a few implementations exist whose interfaces and requirements vary widely. In this work, we introduce the counterfactuals R package, which provides a modular and unified R6-based interface for counterfactual explanation methods. We implemented three existing counterfactual explanation methods and propose some optional methodological extensions to generalize these methods to different scenarios and to make them more comparable. We explain the structure and workflow of the package using real use cases and show how to integrate additional counterfactual explanation methods into the package. In addition, we compared the implemented methods for a variety of models and datasets with regard to the quality of their counterfactual explanations and their runtime behavior.}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/2304.06569.pdf:application/pdf}, + keywords = {Statistics - Machine Learning, Computer Science - Machine Learning, Statistics - Computation}, + shorttitle = {counterfactuals}, +} + +@TechReport{laugel2017inversea, + author = {Laugel, Thibault and Lesot, Marie-Jeanne and Marsala, Christophe and Renard, Xavier and Detyniecki, Marcin}, + date = {2017-12}, + institution = {arXiv}, + title = {Inverse {Classification} for {Comparison}-based {Interpretability} in {Machine} {Learning}}, + doi = {10.48550/arXiv.1712.08443}, + note = {arXiv:1712.08443 [cs, stat] type: article}, + url = {http://arxiv.org/abs/1712.08443}, + urldate = {2023-06-21}, + abstract = {In the context of post-hoc interpretability, this paper addresses the task of explaining the prediction of a classifier, considering the case where no information is available, neither on the classifier itself, nor on the processed data (neither the training nor the test data). It proposes an instance-based approach whose principle consists in determining the minimal changes needed to alter a prediction: given a data point whose classification must be explained, the proposed method consists in identifying a close neighbour classified differently, where the closeness definition integrates a sparsity constraint. This principle is implemented using observation generation in the Growing Spheres algorithm. Experimental results on two datasets illustrate the relevance of the proposed approach that can be used to gain knowledge about the classifier.}, + annotation = {Comment: preprint}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/1712.08443.pdf:application/pdf}, + keywords = {Statistics - Machine Learning, Computer Science - Artificial Intelligence, Computer Science - Machine Learning}, +} + +@TechReport{delaney2021uncertainty, + author = {Delaney, Eoin and Greene, Derek and Keane, Mark T.}, + date = {2021-07}, + institution = {arXiv}, + title = {Uncertainty {Estimation} and {Out}-of-{Distribution} {Detection} for {Counterfactual} {Explanations}: {Pitfalls} and {Solutions}}, + note = {arXiv:2107.09734 [cs] type: article}, + url = {http://arxiv.org/abs/2107.09734}, + urldate = {2023-06-23}, + abstract = {Whilst an abundance of techniques have recently been proposed to generate counterfactual explanations for the predictions of opaque black-box systems, markedly less attention has been paid to exploring the uncertainty of these generated explanations. This becomes a critical issue in high-stakes scenarios, where uncertain and misleading explanations could have dire consequences (e.g., medical diagnosis and treatment planning). Moreover, it is often difficult to determine if the generated explanations are well grounded in the training data and sensitive to distributional shifts. This paper proposes several practical solutions that can be leveraged to solve these problems by establishing novel connections with other research works in explainability (e.g., trust scores) and uncertainty estimation (e.g., Monte Carlo Dropout). Two experiments demonstrate the utility of our proposed solutions.}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/2107.09734.pdf:application/pdf}, + keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence}, + shorttitle = {Uncertainty {Estimation} and {Out}-of-{Distribution} {Detection} for {Counterfactual} {Explanations}}, +} + +@InProceedings{casanueva2020efficient, + author = {Casanueva, IΓ±igo and Temčinas, Tadas and Gerz, Daniela and Henderson, Matthew and VuliΔ‡, Ivan}, + booktitle = {Proceedings of the 2nd {Workshop} on {Natural} {Language} {Processing} for {Conversational} {AI}}, + date = {2020-07}, + title = {Efficient {Intent} {Detection} with {Dual} {Sentence} {Encoders}}, + doi = {10.18653/v1/2020.nlp4convai-1.5}, + location = {Online}, + pages = {38--45}, + publisher = {Association for Computational Linguistics}, + url = {https://aclanthology.org/2020.nlp4convai-1.5}, + urldate = {2023-06-27}, + abstract = {Building conversational systems in new domains and with added functionality requires resource-efficient models that work under low-data regimes (i.e., in few-shot setups). Motivated by these requirements, we introduce intent detection methods backed by pretrained dual sentence encoders such as USE and ConveRT. We demonstrate the usefulness and wide applicability of the proposed intent detectors, showing that: 1) they outperform intent detectors based on fine-tuning the full BERT-Large model or using BERT as a fixed black-box encoder on three diverse intent detection data sets; 2) the gains are especially pronounced in few-shot setups (i.e., with only 10 or 30 annotated examples per intent); 3) our intent detectors can be trained in a matter of minutes on a single CPU; and 4) they are stable across different hyperparameter settings. In hope of facilitating and democratizing research focused on intention detection, we release our code, as well as a new challenging single-domain intent detection dataset comprising 13,083 annotated examples over 77 intents.}, + file = {Full Text PDF:https\://aclanthology.org/2020.nlp4convai-1.5.pdf:application/pdf}, +} + +@TechReport{liu2019roberta, + author = {Liu, Yinhan and Ott, Myle and Goyal, Naman and Du, Jingfei and Joshi, Mandar and Chen, Danqi and Levy, Omer and Lewis, Mike and Zettlemoyer, Luke and Stoyanov, Veselin}, + date = {2019-07}, + institution = {arXiv}, + title = {{RoBERTa}: {A} {Robustly} {Optimized} {BERT} {Pretraining} {Approach}}, + doi = {10.48550/arXiv.1907.11692}, + note = {arXiv:1907.11692 [cs] type: article}, + url = {http://arxiv.org/abs/1907.11692}, + urldate = {2023-06-27}, + abstract = {Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.}, + file = {arXiv Fulltext PDF:https\://arxiv.org/pdf/1907.11692.pdf:application/pdf}, + keywords = {Computer Science - Computation and Language}, + shorttitle = {{RoBERTa}}, +} + +@Comment{jabref-meta: databaseType:biblatex;} diff --git a/dev/ing_experiment/notebook.qmd b/dev/ing_experiment/notebook.qmd new file mode 100644 index 0000000..35ff961 --- /dev/null +++ b/dev/ing_experiment/notebook.qmd @@ -0,0 +1,360 @@ +```@meta +CurrentModule = ConformalPrediction +``` + +```{julia} +#| echo: false +include("$(pwd())/docs/setup_docs.jl") +eval(setup_docs) +``` + +# How to Conformalize a Transformer Language Model + +Large Language Models are all the buzz right now. They are used for a variety of tasks, including text classification, question answering, and text generation. In this tutorial, we will show how to conformalize a transformer language model for text classification. We will use the [Banking77](https://arxiv.org/abs/2005.00796) dataset, which consists of 13,083 queries from 77 intents. We will use the [DistilRoBERTa](https://huggingface.co/mrm8488/distilroberta-finetuned-banking77) model, which is a distilled version of [RoBERTa](https://arxiv.org/abs/1907.11692) trained on the Banking77 dataset. + +## Data + +```{julia} +# Get labels: +df_labels = CSV.read("dev/artifacts/data/banking77/labels.csv", DataFrame, drop=[1]) +labels = df_labels[:,1] + +# Get data: +df_train = CSV.read("dev/artifacts/data/banking77/train.csv", DataFrame, drop=[1]) +df_cal = CSV.read("dev/artifacts/data/banking77/calibration.csv", DataFrame, drop=[1]) +df_full_train = vcat(df_train, df_cal) +train_ratio = round(nrow(df_train)/nrow(df_full_train), digits=2) +df_test = CSV.read("dev/artifacts/data/banking77/test.csv", DataFrame, drop=[1]) + +# Preprocess data: +queries_train, y_train = collect(df_train.text), categorical(df_train.labels .+ 1) +queries_cal, y_cal = collect(df_cal.text), categorical(df_cal.labels .+ 1) +queries, y = collect(df_full_train.text), categorical(df_full_train.labels .+ 1) +queries_test, y_test = collect(df_test.text), categorical(df_test.labels .+ 1) +``` + +## HuggingFace Model + +```{julia} +tkr = hgf"mrm8488/distilroberta-finetuned-banking77:tokenizer" +mod = hgf"mrm8488/distilroberta-finetuned-banking77:ForSequenceClassification" +``` + +```{julia} +query = [ + "What is the base of the exchange rates?", + "Exchange rates for the US dollar.", +] +a = encode(tkr, query) +b = mod.model(a) +c = mod.cls(b.hidden_state) +d = softmax(c.logit) +[labels[i] for i in Flux.onecold(d)] +``` + +## `MLJ` Models + +### Full Model + +```{julia} +struct IntentClassifier <: MLJBase.Probabilistic + tkr::TextEncoders.AbstractTransformerTextEncoder + mod::HuggingFace.HGFRobertaForSequenceClassification +end + +function IntentClassifier(; + tokenizer::TextEncoders.AbstractTransformerTextEncoder, + model::HuggingFace.HGFRobertaForSequenceClassification, +) + IntentClassifier(tkr, mod) +end + +function get_hidden_state(clf::IntentClassifier, query::Union{AbstractString, Vector{<:AbstractString}}) + token = encode(clf.tkr, query) + hidden_state = clf.mod.model(token).hidden_state + return hidden_state +end + +# This doesn't actually retrain the model, but it retrieves the classifier object +function MLJBase.fit(clf::IntentClassifier, verbosity, X, y) + cache=nothing + report=nothing + fitresult = (clf = clf.mod.cls, labels = levels(y)) + return fitresult, cache, report +end + +function MLJBase.predict(clf::IntentClassifier, fitresult, Xnew) + output = fitresult.clf(get_hidden_state(clf,Xnew)) + pΜ‚ = UnivariateFinite(fitresult.labels,softmax(output.logit)',pool=missing) + return pΜ‚ +end + +MLJBase.target_scitype(clf::IntentClassifier) = AbstractVector{<:Finite} + +MLJBase.predict_mode(clf::IntentClassifier, fitresult, Xnew) = mode.(MLJBase.predict(clf, fitresult, Xnew)) +``` + +```{julia} +clf = IntentClassifier(tkr, mod) +top_n = 1000 +fitresult, _, _ = fit(clf, 1, nothing, y_test[1:top_n]) +@time yΜ‚ = predict(clf, fitresult, queries_test[1:top_n]); +``` + +### Omniscent Model + +```{julia} +struct OmniscentClassifier <: MLJBase.Probabilistic end + +# This doesn't actually retrain the model, but it retrieves the classifier object +function MLJBase.fit(clf::OmniscentClassifier, verbosity, X, y) + cache=nothing + report=nothing + fitresult = (labels = levels(y),) + return fitresult, cache, report +end + +function MLJBase.predict(clf::OmniscentClassifier, fitresult, pΜ‚) + pΜ‚ = UnivariateFinite(fitresult.labels,pΜ‚,pool=missing) + return pΜ‚ +end + +MLJBase.target_scitype(clf::OmniscentClassifier) = AbstractVector{<:Finite} + +MLJBase.predict_mode(clf::OmniscentClassifier, fitresult, Xnew) = mode.(MLJBase.predict(clf, fitresult, Xnew)) +``` + +```{julia} +# Get predictions: +pΜ‚_train = Matrix(CSV.read("dev/artifacts/data/banking77/train_softmax.csv", DataFrame, header=false)) +pΜ‚_cal = Matrix(CSV.read("dev/artifacts/data/banking77/calibration_softmax.csv", DataFrame, header=false)) +pΜ‚_full_train = vcat(pΜ‚_train, pΜ‚_cal) +pΜ‚_test = Matrix(CSV.read("dev/artifacts/data/banking77/test_softmax.csv", DataFrame, header=false)) + +clf_omni = OmniscentClassifier() +top_n = 1000 +fitresult, _, _ = fit(clf_omni, 1, nothing, y_test[1:top_n]) +@time yΜ‚ = predict(clf_omni, fitresult, pΜ‚_full_train); +``` + +## Conformal Prediction + +```{julia} +cov = 0.95 +``` + +### Simple Inductive Conformal Prediction + +```{julia} +conf_model = conformal_model(clf; coverage=cov, method=:simple_inductive, train_ratio=train_ratio) +mach = machine(conf_model, queries, y) +@time fit!(mach) +Serialization.serialize("dev/artifacts/models/banking77/simple_inductive.jls", mach) +``` + +### Adaptive Inductive Conformal Prediction + +```{julia} +conf_model = conformal_model(clf; coverage=cov, method=:adaptive_inductive, train_ratio=train_ratio) +mach = machine(conf_model, queries, y) +@time fit!(mach) +Serialization.serialize("dev/artifacts/models/banking77/adaptive_inductive.jls", mach) +``` + +## Evaluation + +### Roberta + +```{julia} +# Get all test predictions: +using ConformalPrediction: reformat_mlj_prediction +pΜ‚_test = reformat_mlj_prediction( + predict(mach.model.model, mach.fitresult, MMI.reformat(mach.model.model, queries_test)...), +) +Serialization.serialize("dev/artifacts/results/banking77/roberta_cp.jls", pΜ‚_test) +``` + +```{julia} +# Helper functions: +using ConformalPrediction: SimpleInductiveClassifier, AdaptiveInductiveClassifier + +# Simple Inductive: +function MLJBase.predict(conf_model::SimpleInductiveClassifier, fitresult, pΜ‚; cov=0.9) + v = conf_model.scores[:calibration] + n = length(v) + q_level = ceil((n+1)*(cov))/n + qΜ‚ = StatsBase.quantile(v, q_level) + pΜ‚ = map(pΜ‚) do pp + L = pΜ‚.decoder.classes + probas = pdf.(pp, L) + is_in_set = 1.0 .- probas .<= qΜ‚ + if !all(is_in_set .== false) + pp = UnivariateFinite(L[is_in_set], probas[is_in_set]) + else + pp = missing + end + return pp + end + return pΜ‚ +end + +# Adaptive Inductive: +function MLJBase.predict(conf_model::AdaptiveInductiveClassifier, fitresult, pΜ‚; cov=0.9) + v = conf_model.scores[:calibration] + n = length(v) + q_level = ceil((n+1)*(cov))/n + qΜ‚ = StatsBase.quantile(v, q_level) + pΜ‚ = map(pΜ‚) do pp + L = pΜ‚.decoder.classes + probas = pdf.(pp, L) + Ξ  = sortperm(.-probas) # rank in descending order + k = findall(cumsum(probas[Ξ ]) .> qΜ‚)[1] + 1 # index of first class with probability > qΜ‚ (supremum) + pp = UnivariateFinite(L[Ξ ][1:k], probas[Ξ ][1:k]) + return pp + end + return pΜ‚ +end +``` + +```{julia} +using ConformalPrediction: emp_coverage, size_stratified_coverage, set_size +function evaluation_plots( + mach::Machine, pΜ‚, y; + cov_rates=0.01:0.01:0.99, height=300, + plot_ec=true, + plot_ssc=true, + plot_avg_size=true, + margin=5mm, + dpi=300 +) + + conf_model = mach.model + fitresult = mach.fitresult + ec = [] + ssc = [] + avg_size = [] + + # Compute metrics:: + for cov in cov_rates + yΜ‚ = predict(conf_model, fitresult, pΜ‚; cov=cov) + !plot_ec || push!(ec,emp_coverage(yΜ‚, y)) + !plot_ssc || push!(ssc,size_stratified_coverage(yΜ‚, y)) + !plot_avg_size ||push!(avg_size, mean(set_size.(yΜ‚))) + end + + # Plot metrics: + plts = [] + if plot_ec + plt = plot([0,1],[0,1],label="", color=:black) + scatter!(plt, cov_rates, ec, label="", xlabel="Coverage", ylabel="Observed", title="Empirical Coverage") + push!(plts, plt) + end + if plot_ssc + plt = plot([0,1],[0,1],label="", color=:black) + scatter!(plt, cov_rates, ssc, label="", xlabel="Coverage", ylabel="Observed", title="Size-Stratified Coverage") + push!(plts, plt) + end + !plot_avg_size || push!(plts, scatter(cov_rates, avg_size, label="", xlabel="Coverage", ylabel="Size", title="Average Set Size")) + + return plot(plts..., layout=(1,length(plts)), size=(length(plts)*height,height), margin=margin, dpi=dpi) + +end +``` + +```{julia} +pΜ‚_test = Serialization.deserialize("dev/artifacts/results/banking77/roberta_cp.jls") +``` + +#### Simple Inductive Conformal Prediction + +```{julia} +mach = Serialization.deserialize("dev/artifacts/models/banking77/simple_inductive.jls") +plt = evaluation_plots(mach, pΜ‚_test, y_test) +plt +savefig(plt, "dev/artifacts/figures/banking77/roberta_simple_inductive.png") +``` + +#### Adaptive Inductive Conformal Prediction + +```{julia} +mach = Serialization.deserialize("dev/artifacts/models/banking77/adaptive_inductive.jls") +plt = evaluation_plots(mach, pΜ‚_test, y_test; plot_ssc=true, plot_avg_size=true) +plt +savefig(plt, "dev/artifacts/figures/banking77/roberta_adaptive_inductive.png") +``` + +### BERT banking77 + +#### Simple Inductive Conformal Prediction + +```{julia} +conf_model = conformal_model(clf_omni; coverage=cov, method=:simple_inductive, train_ratio=train_ratio) +mach = machine(conf_model, pΜ‚_full_train, y) +@time fit!(mach) +pΜ‚_test = Matrix(CSV.read("dev/artifacts/data/banking77/test_softmax.csv", DataFrame, header=false)) +pΜ‚_test = predict(mach.model.model, mach.fitresult, MMI.reformat(mach.model.model, pΜ‚_test)...) +plt = evaluation_plots(mach, pΜ‚_test, y_test; plot_ssc=true, plot_avg_size=true) +savefig(plt, "dev/artifacts/figures/banking77/bert_simple_inductive.png") +``` + +#### Adaptive Inductive Conformal Prediction + +```{julia} +conf_model = conformal_model(clf_omni; coverage=cov, method=:adaptive_inductive, train_ratio=train_ratio) +mach = machine(conf_model, pΜ‚_full_train, y) +@time fit!(mach) +pΜ‚_test = Matrix(CSV.read("dev/artifacts/data/banking77/test_softmax.csv", DataFrame, header=false)) +pΜ‚_test = predict(mach.model.model, mach.fitresult, MMI.reformat(mach.model.model, pΜ‚_test)...) +plt = evaluation_plots(mach, pΜ‚_test, y_test; plot_ssc=true, plot_avg_size=true) +savefig(plt, "dev/artifacts/figures/banking77/bert_adaptive_inductive.png") +``` + +### DFCX + +```{julia} + +``` + +## Demo + +```{julia} +mach = Serialization.deserialize("dev/artifacts/models/banking77/simple_inductive.jls") + +function prediction_set(mach, query::String) + pΜ‚ = predict(mach, query)[1] + probs = pdf.(pΜ‚, collect(1:77)) + in_set = findall(probs .!= 0) + labels_in_set = labels[in_set] + probs_in_set = probs[in_set] + _order = sortperm(-probs_in_set) + plt = UnicodePlots.barplot(labels_in_set[_order], probs_in_set[_order], title="Possible Intents") + return labels_in_set, plt +end + +function conformal_chatbot() + println("πŸ‘‹ Hi, I'm a Julia, your conformal chatbot. I'm here to help you with your banking query. Ask me anything or type 'exit' to exit ...\n") + completed = false + queries = "" + while !completed + query = readline() + queries = queries * "," * query + labels, plt = prediction_set(mach, queries) + if length(labels) > 1 + println("πŸ€” Hmmm ... I can think of several options here. If any of these applies, simply type the corresponding number (e.g. '1' for the first option). Otherwise, can you refine your question, please?\n") + println(plt) + else + println("πŸ₯³ I think you mean $(labels[1]). Correct?") + end + + # Exit: + if query == "exit" + println("πŸ‘‹ Bye!") + break + end + if query ∈ string.(collect(1:77)) + println("πŸ‘ Great! You've chosen '$(labels[parse(Int64, query)])'. I'm glad I could help you. Have a nice day!") + completed = true + end + end +end +``` \ No newline at end of file diff --git a/dev/juliacon-2023/presentation.qmd b/dev/juliacon-2023/presentation.qmd new file mode 100644 index 0000000..e69de29 diff --git a/docs/Manifest.toml b/docs/Manifest.toml index b0a58c2..903c672 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -1,8 +1,8 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.8.5" +julia_version = "1.9.1" manifest_format = "2.0" -project_hash = "21ec928789df9b31a4467d156f527d23ae504347" +project_hash = "347af1ad749e1c928f82064592bd19f36512aeff" [[deps.ANSIColoredPrinters]] git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" @@ -16,10 +16,14 @@ uuid = "da404889-ca92-49ff-9e8b-0aa6b4d38dc8" version = "1.4.1" [[deps.AbstractFFTs]] -deps = ["ChainRulesCore", "LinearAlgebra"] -git-tree-sha1 = "16b6dbc4cf7caee4e1e75c49485ec67b667098a0" +deps = ["LinearAlgebra"] +git-tree-sha1 = "8bc0aaec0ca548eb6cf5f0d7d16351650c1ee956" uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" -version = "1.3.1" +version = "1.3.2" +weakdeps = ["ChainRulesCore"] + + [deps.AbstractFFTs.extensions] + AbstractFFTsChainRulesCoreExt = "ChainRulesCore" [[deps.AbstractPlutoDingetjes]] deps = ["Pkg"] @@ -32,17 +36,15 @@ git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" version = "0.4.4" -[[deps.Accessors]] -deps = ["Compat", "CompositionsBase", "ConstructionBase", "Dates", "InverseFunctions", "LinearAlgebra", "MacroTools", "Requires", "StaticArrays", "Test"] -git-tree-sha1 = "beabc31fa319f9de4d16372bff31b4801e43d32c" -uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" -version = "0.1.28" - [[deps.Adapt]] deps = ["LinearAlgebra", "Requires"] -git-tree-sha1 = "cc37d689f599e8df4f464b2fa3870ff7db7492ef" +git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.6.1" +version = "3.6.2" +weakdeps = ["StaticArrays"] + + [deps.Adapt.extensions] + AdaptStaticArraysExt = "StaticArrays" [[deps.ArgCheck]] git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" @@ -72,10 +74,26 @@ uuid = "68821587-b530-5797-8361-c406ea357684" version = "3.5.1+1" [[deps.ArrayInterface]] -deps = ["Adapt", "LinearAlgebra", "Requires", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "38911c7737e123b28182d89027f4216cfc8a9da7" +deps = ["Adapt", "LinearAlgebra", "Requires", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "f83ec24f76d4c8f525099b2ac475fc098138ec31" uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "7.4.3" +version = "7.4.11" + + [deps.ArrayInterface.extensions] + ArrayInterfaceBandedMatricesExt = "BandedMatrices" + ArrayInterfaceBlockBandedMatricesExt = "BlockBandedMatrices" + ArrayInterfaceCUDAExt = "CUDA" + ArrayInterfaceGPUArraysCoreExt = "GPUArraysCore" + ArrayInterfaceStaticArraysCoreExt = "StaticArraysCore" + ArrayInterfaceTrackerExt = "Tracker" + + [deps.ArrayInterface.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" + BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0" + CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" + GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" + StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" [[deps.ArrayInterfaceCore]] deps = ["LinearAlgebra", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] @@ -116,10 +134,24 @@ uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" version = "0.3.7" [[deps.BangBang]] -deps = ["Compat", "ConstructionBase", "Future", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables", "ZygoteRules"] -git-tree-sha1 = "7fe6d92c4f281cf4ca6f2fba0ce7b299742da7ca" +deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"] +git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed" uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66" -version = "0.3.37" +version = "0.3.39" + + [deps.BangBang.extensions] + BangBangChainRulesCoreExt = "ChainRulesCore" + BangBangDataFramesExt = "DataFrames" + BangBangStaticArraysExt = "StaticArrays" + BangBangStructArraysExt = "StructArrays" + BangBangTypedTablesExt = "TypedTables" + + [deps.BangBang.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" + TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9" [[deps.Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" @@ -129,6 +161,12 @@ git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e" uuid = "9718e550-a3fa-408a-8086-8db961cd8217" version = "0.1.1" +[[deps.BetaML]] +deps = ["AbstractTrees", "CategoricalArrays", "Combinatorics", "DelimitedFiles", "Distributions", "DocStringExtensions", "ForceImport", "JLD2", "LinearAlgebra", "LoopVectorization", "MLJModelInterface", "PDMats", "PrecompileTools", "Printf", "ProgressMeter", "Random", "Reexport", "StableRNGs", "StaticArrays", "Statistics", "StatsBase", "Test", "Zygote"] +git-tree-sha1 = "bca5bbed67662e6018215d6e46419e3bbeba45fd" +uuid = "024491cd-cc6b-443e-8034-08ea7eb7db2b" +version = "0.10.1" + [[deps.BitFlags]] git-tree-sha1 = "43b1a4a8f797c1cddadf60499a8a077d4af2cd2d" uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35" @@ -141,9 +179,15 @@ uuid = "62783981-4cbd-42fc-bca8-16325de8dc4b" version = "0.1.5" [[deps.BufferedStreams]] -git-tree-sha1 = "bb065b14d7f941b8617bc323063dbe79f55d16ea" +git-tree-sha1 = "5bcb75a2979e40b29eb250cb26daab67aa8f97f5" uuid = "e1450e63-4bb3-523b-b2a4-4ffa8c0fd77d" -version = "1.1.0" +version = "1.2.0" + +[[deps.BytePairEncoding]] +deps = ["StructWalk", "TextEncodeBase", "Unicode"] +git-tree-sha1 = "40ee2783de5efc5b478e1bb828b750ad8ce1714f" +uuid = "a4280ba5-8788-555a-8ca8-4a8c3d966a71" +version = "0.3.1" [[deps.Bzip2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -157,22 +201,22 @@ uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" version = "0.4.2" [[deps.CPUSummary]] -deps = ["CpuId", "IfElse", "Static"] -git-tree-sha1 = "2c144ddb46b552f72d7eafe7cc2f50746e41ea21" +deps = ["CpuId", "IfElse", "PrecompileTools", "Static"] +git-tree-sha1 = "89e0654ed8c7aebad6d5ad235d6242c2d737a928" uuid = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9" -version = "0.2.2" +version = "0.2.3" [[deps.CSV]] -deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "SentinelArrays", "SnoopPrecompile", "Tables", "Unicode", "WeakRefStrings", "WorkerUtilities"] -git-tree-sha1 = "c700cce799b51c9045473de751e9319bdd1c6e94" +deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "PrecompileTools", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings", "WorkerUtilities"] +git-tree-sha1 = "44dbf560808d49041989b8a96cae4cffbeb7966a" uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" -version = "0.10.9" +version = "0.10.11" [[deps.CUDA]] -deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"] -git-tree-sha1 = "8547829ee0da896ce48a24b8d2f4bf929cf3e45e" +deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"] +git-tree-sha1 = "35160ef0f03b14768abfd68b830f8e3940e8e0dc" uuid = "052768ef-5323-5732-b1bb-66c8b64840ba" -version = "4.1.4" +version = "4.4.0" [[deps.CUDA_Driver_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] @@ -188,15 +232,15 @@ version = "0.2.2" [[deps.CUDA_Runtime_jll]] deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "81eed046f28a0cdd0dc1f61d00a49061b7cc9433" +git-tree-sha1 = "5248d9c45712e51e27ba9b30eebec65658c6ce29" uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2" -version = "0.5.0+2" +version = "0.6.0+0" [[deps.CUDNN_jll]] deps = ["Artifacts", "CUDA_Runtime_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "2918fbffb50e3b7a0b9127617587afa76d4276e8" +git-tree-sha1 = "c30b29597102341a1ea4c2175c4acae9ae522c9d" uuid = "62b44479-cb7b-5706-934f-f13b2eb2e645" -version = "8.8.1+0" +version = "8.9.2+0" [[deps.Cairo_jll]] deps = ["Artifacts", "Bzip2_jll", "CompilerSupportLibraries_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] @@ -218,45 +262,50 @@ version = "0.2.2" [[deps.CategoricalArrays]] deps = ["DataAPI", "Future", "Missings", "Printf", "Requires", "Statistics", "Unicode"] -git-tree-sha1 = "5084cc1a28976dd1642c9f337b28a3cb03e0f7d2" +git-tree-sha1 = "1568b28f91293458345dabba6a5ea3f183250a61" uuid = "324d7699-5711-5eae-9e2f-1d82baa6b597" -version = "0.10.7" +version = "0.10.8" +weakdeps = ["JSON", "RecipesBase", "SentinelArrays", "StructTypes"] + + [deps.CategoricalArrays.extensions] + CategoricalArraysJSONExt = "JSON" + CategoricalArraysRecipesBaseExt = "RecipesBase" + CategoricalArraysSentinelArraysExt = "SentinelArrays" + CategoricalArraysStructTypesExt = "StructTypes" [[deps.CategoricalDistributions]] -deps = ["CategoricalArrays", "Distributions", "Missings", "OrderedCollections", "Random", "ScientificTypes", "UnicodePlots"] +deps = ["CategoricalArrays", "Distributions", "Missings", "OrderedCollections", "Random", "ScientificTypes"] git-tree-sha1 = "da68989f027dcefa74d44a452c9e36af9730a70d" uuid = "af321ab8-2d2e-40a6-b165-3d674595d28e" version = "0.1.10" +weakdeps = ["UnicodePlots"] + + [deps.CategoricalDistributions.extensions] + UnivariateFiniteDisplayExt = "UnicodePlots" [[deps.ChainRules]] deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"] -git-tree-sha1 = "7d20c2fb8ab838e41069398685e7b6b5f89ed85b" +git-tree-sha1 = "2afc496e94d15a1af5502625246d172361542133" uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2" -version = "1.48.0" +version = "1.52.0" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra", "SparseArrays"] -git-tree-sha1 = "c6d890a52d2c4d55d326439580c3b8d0875a77d9" +git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.15.7" - -[[deps.ChangesOfVariables]] -deps = ["ChainRulesCore", "LinearAlgebra", "Test"] -git-tree-sha1 = "485193efd2176b88e6622a39a246f8c5b600e74e" -uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" -version = "0.1.6" +version = "1.16.0" [[deps.Chemfiles]] deps = ["Chemfiles_jll", "DocStringExtensions"] -git-tree-sha1 = "9126d0271c337ca5ed02ba92f2dec087c4260d4a" +git-tree-sha1 = "6951fe6a535a07041122a3a6860a63a7a83e081e" uuid = "46823bd8-5fb3-5f92-9aa0-96921f3dd015" -version = "0.10.31" +version = "0.10.40" [[deps.Chemfiles_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "d4e54b053fc584e7a0f37e9d3a5c4500927b343a" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "f3743181e30d87c23d9c8ebd493b77f43d8f1890" uuid = "78a364fa-1a3c-552a-b4bb-8fa0f9c1fcca" -version = "0.10.3+0" +version = "0.10.4+0" [[deps.CloseOpenIntervals]] deps = ["Static", "StaticArrayInterface"] @@ -266,9 +315,9 @@ version = "0.1.12" [[deps.Clustering]] deps = ["Distances", "LinearAlgebra", "NearestNeighbors", "Printf", "Random", "SparseArrays", "Statistics", "StatsBase"] -git-tree-sha1 = "7ebbd653f74504447f1c33b91cd706a69a1b189f" +git-tree-sha1 = "42fe66dbc8f1d09a44aa87f18d26926d06a35f84" uuid = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5" -version = "0.14.4" +version = "0.15.3" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] @@ -277,10 +326,10 @@ uuid = "944b1d66-785c-5afd-91f1-9de20f533193" version = "0.7.1" [[deps.ColorSchemes]] -deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "Random", "SnoopPrecompile"] -git-tree-sha1 = "aa3edc8f8dea6cbfa176ee12f7c2fc82f0608ed3" +deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "PrecompileTools", "Random"] +git-tree-sha1 = "be6ab11021cd29f0344d5c4357b163af05a48cba" uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4" -version = "3.20.0" +version = "3.21.0" [[deps.ColorTypes]] deps = ["FixedPointNumbers", "Random"] @@ -312,37 +361,58 @@ uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" version = "0.3.0" [[deps.Compat]] -deps = ["Dates", "LinearAlgebra", "UUIDs"] -git-tree-sha1 = "7a60c856b9fa189eb34f5f8a6f6b5529b7942957" +deps = ["UUIDs"] +git-tree-sha1 = "4e88377ae7ebeaf29a047aa1ee40826e0b708a5d" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.6.1" +version = "4.7.0" +weakdeps = ["Dates", "LinearAlgebra"] + + [deps.Compat.extensions] + CompatLinearAlgebraExt = "LinearAlgebra" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.0.1+0" +version = "1.0.2+0" [[deps.CompositionsBase]] -git-tree-sha1 = "455419f7e328a1a2493cabc6428d79e951349769" +git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad" uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b" -version = "0.1.1" +version = "0.1.2" + + [deps.CompositionsBase.extensions] + CompositionsBaseInverseFunctionsExt = "InverseFunctions" + + [deps.CompositionsBase.weakdeps] + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" [[deps.ComputationalResources]] git-tree-sha1 = "52cb3ec90e8a8bea0e62e275ba577ad0f74821f7" uuid = "ed09eef8-17a6-5b46-8889-db040fac31e3" version = "0.3.2" +[[deps.ConcurrentUtilities]] +deps = ["Serialization", "Sockets"] +git-tree-sha1 = "96d823b94ba8d187a6d8f0826e731195a74b90e9" +uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb" +version = "2.2.0" + [[deps.ConformalPrediction]] -deps = ["CategoricalArrays", "ChainRules", "Flux", "LinearAlgebra", "MLJBase", "MLJFlux", "MLJModelInterface", "NaturalSort", "Plots", "StatsBase"] +deps = ["CategoricalArrays", "ChainRules", "Flux", "LinearAlgebra", "MLJBase", "MLJEnsembles", "MLJFlux", "MLJModelInterface", "MLUtils", "NaturalSort", "Plots", "StatsBase"] path = ".." uuid = "98bfc277-1877-43dc-819b-a3e38c30242f" -version = "0.1.6" +version = "0.1.7" [[deps.ConstructionBase]] deps = ["LinearAlgebra"] -git-tree-sha1 = "89a9db8d28102b094992472d333674bd1a83ce2a" +git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c" uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" -version = "1.5.1" +version = "1.5.2" +weakdeps = ["IntervalSets", "StaticArrays"] + + [deps.ConstructionBase.extensions] + ConstructionBaseIntervalSetsExt = "IntervalSets" + ConstructionBaseStaticArraysExt = "StaticArrays" [[deps.ContextVariablesX]] deps = ["Compat", "Logging", "UUIDs"] @@ -357,9 +427,9 @@ version = "0.6.2" [[deps.CoordinateTransformations]] deps = ["LinearAlgebra", "StaticArrays"] -git-tree-sha1 = "681ea870b918e7cff7111da58791d7f718067a19" +git-tree-sha1 = "f9d7112bfff8a19a3a4ea4e03a8e6a91fe8456bf" uuid = "150eb455-5306-5404-9cee-2592286d6298" -version = "0.6.2" +version = "0.6.3" [[deps.CpuId]] deps = ["Markdown"] @@ -378,15 +448,15 @@ uuid = "dc8bdbbb-1ca9-579f-8c36-e416f6a65cce" version = "1.0.2" [[deps.DataAPI]] -git-tree-sha1 = "e8119c1a33d267e16108be441a287a6981ba1630" +git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c" uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" -version = "1.14.0" +version = "1.15.0" [[deps.DataDeps]] deps = ["HTTP", "Libdl", "Reexport", "SHA", "p7zip_jll"] -git-tree-sha1 = "bc0a264d3e7b3eeb0b6fc9f6481f970697f29805" +git-tree-sha1 = "6e8d74545d34528c30ccd3fa0f3c00f8ed49584c" uuid = "124859b0-ceae-595e-8997-d05f6a7a8dfe" -version = "0.7.10" +version = "0.7.11" [[deps.DataFrames]] deps = ["Compat", "DataAPI", "Future", "InlineStrings", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrettyTables", "Printf", "REPL", "Random", "Reexport", "SentinelArrays", "SnoopPrecompile", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"] @@ -396,9 +466,9 @@ version = "1.5.0" [[deps.DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "d1fff3a548102f48987a52a2e0d114fa97d730f0" +git-tree-sha1 = "cf25ccb972fec4e4817764d01c82386ae94f77b4" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.13" +version = "0.18.14" [[deps.DataValueInterfaces]] git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" @@ -422,13 +492,9 @@ version = "0.1.2" [[deps.DelimitedFiles]] deps = ["Mmap"] +git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae" uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" - -[[deps.DensityInterface]] -deps = ["InverseFunctions", "Test"] -git-tree-sha1 = "80c3e8639e3353e5d2912fb3a1916b8455e2494b" -uuid = "b429d917-457f-4dbc-8f4c-0cc954292b1d" -version = "0.4.0" +version = "1.9.1" [[deps.DiffResults]] deps = ["StaticArraysCore"] @@ -438,9 +504,9 @@ version = "1.1.0" [[deps.DiffRules]] deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "a4ad7ef19d2cdc2eff57abbbe68032b1cd0bd8f8" +git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.13.0" +version = "1.15.1" [[deps.Distances]] deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] @@ -453,10 +519,18 @@ deps = ["Random", "Serialization", "Sockets"] uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" [[deps.Distributions]] -deps = ["ChainRulesCore", "DensityInterface", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns", "Test"] -git-tree-sha1 = "13027f188d26206b9e7b863036f87d2f2e7d013a" +deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"] +git-tree-sha1 = "e76a3281de2719d7c81ed62c6ea7057380c87b1d" uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" -version = "0.25.87" +version = "0.25.98" + + [deps.Distributions.extensions] + DistributionsChainRulesCoreExt = "ChainRulesCore" + DistributionsDensityInterfaceExt = "DensityInterface" + + [deps.Distributions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d" [[deps.DocStringExtensions]] deps = ["LibGit2"] @@ -466,9 +540,15 @@ version = "0.9.3" [[deps.Documenter]] deps = ["ANSIColoredPrinters", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] -git-tree-sha1 = "58fea7c536acd71f3eef6be3b21c0df5f3df88fd" +git-tree-sha1 = "39fd748a73dce4c05a9655475e437170d8fb1b67" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "0.27.24" +version = "0.27.25" + +[[deps.DoubleArrayTries]] +deps = ["OffsetArrays", "Preferences", "StringViews"] +git-tree-sha1 = "9667af23bda5ce51bad3dd759812c398a58d8b9d" +uuid = "abbaa0e5-f788-499c-92af-c35ff4258c82" +version = "0.1.0" [[deps.Downloads]] deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] @@ -494,16 +574,22 @@ uuid = "792122b4-ca99-40de-a6bc-6742525f08b6" version = "0.3.0" [[deps.EvoTrees]] -deps = ["BSON", "CUDA", "CategoricalArrays", "Distributions", "LoopVectorization", "MLJModelInterface", "NetworkLayout", "Random", "RecipesBase", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "38f8075dc39ddc7044ee7ae0cae08e1dacd86bd6" +deps = ["BSON", "CUDA", "CategoricalArrays", "Distributions", "MLJModelInterface", "NetworkLayout", "Random", "RecipesBase", "Statistics", "StatsBase", "Tables"] +git-tree-sha1 = "1b63fdc0acad47c3203398171c138835c1c40d69" uuid = "f6006082-12f8-11e9-0c9c-0d5d367ab1e5" -version = "0.14.8" +version = "0.15.0" + +[[deps.ExceptionUnwrapping]] +deps = ["Test"] +git-tree-sha1 = "e90caa41f5a86296e014e148ee061bd6c3edec96" +uuid = "460bff9d-24e4-43bc-9d9f-a8973cb893f4" +version = "0.1.9" [[deps.Expat_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "bad72f730e9e91c08d9427d5e8db95478a3c323d" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "4558ab818dcceaab612d1bb8c19cee87eda2b83c" uuid = "2e619515-83b5-522b-bb60-26c02a35a201" -version = "2.4.8+0" +version = "2.5.0+0" [[deps.ExprTools]] git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00" @@ -535,9 +621,9 @@ version = "0.3.2" [[deps.FFTW]] deps = ["AbstractFFTs", "FFTW_jll", "LinearAlgebra", "MKL_jll", "Preferences", "Reexport"] -git-tree-sha1 = "f9818144ce7c8c41edf5c4c179c684d92aa4d9fe" +git-tree-sha1 = "b4fbdd20c889804969571cc589900803edda16b7" uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" -version = "1.6.0" +version = "1.7.1" [[deps.FFTW_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -557,11 +643,17 @@ git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7" uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6" version = "0.1.1" +[[deps.Fetch]] +deps = ["Base64", "HTTP", "JSON3", "Random", "StructTypes", "p7zip_jll"] +git-tree-sha1 = "781292162fd5bfe8d001210f9dddbb6baa509bf4" +uuid = "bb354801-46f6-40b6-9c3d-d42d7a74c775" +version = "0.1.4" + [[deps.FileIO]] deps = ["Pkg", "Requires", "UUIDs"] -git-tree-sha1 = "7be5f99f7d15578798f338f5433b6c432ea8037b" +git-tree-sha1 = "299dc33549f68299137e51e6d49a13b5b1da9673" uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" -version = "1.16.0" +version = "1.16.1" [[deps.FilePathsBase]] deps = ["Compat", "Dates", "Mmap", "Printf", "Test", "UUIDs"] @@ -574,15 +666,25 @@ uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" [[deps.FillArrays]] deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"] -git-tree-sha1 = "7072f1e3e5a8be51d525d64f63d3ec1287ff2790" +git-tree-sha1 = "2250347838b28a108d1967663cba57bfb3c02a58" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "0.13.11" +version = "1.3.0" [[deps.FiniteDiff]] -deps = ["ArrayInterface", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] -git-tree-sha1 = "03fcb1c42ec905d15b305359603888ec3e65f886" +deps = ["ArrayInterface", "LinearAlgebra", "Requires", "Setfield", "SparseArrays"] +git-tree-sha1 = "c6e4a1fbe73b31a3dea94b1da449503b8830c306" uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" -version = "2.19.0" +version = "2.21.1" + + [deps.FiniteDiff.extensions] + FiniteDiffBandedMatricesExt = "BandedMatrices" + FiniteDiffBlockBandedMatricesExt = "BlockBandedMatrices" + FiniteDiffStaticArraysExt = "StaticArrays" + + [deps.FiniteDiff.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" + BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [[deps.FixedPointNumbers]] deps = ["Statistics"] @@ -591,16 +693,18 @@ uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" version = "0.8.4" [[deps.Flux]] -deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "Zygote", "cuDNN"] -git-tree-sha1 = "3f6f32ec0bfd80be0cb65907cf74ec796a632012" +deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"] +git-tree-sha1 = "3e2c3704c2173ab4b1935362384ca878b53d4c34" uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c" -version = "0.13.15" +version = "0.13.17" -[[deps.FoldsThreads]] -deps = ["Accessors", "FunctionWrappers", "InitialValues", "SplittablesBase", "Transducers"] -git-tree-sha1 = "eb8e1989b9028f7e0985b4268dabe94682249025" -uuid = "9c68100b-dfe1-47cf-94c8-95104e173443" -version = "0.1.1" + [deps.Flux.extensions] + AMDGPUExt = "AMDGPU" + FluxMetalExt = "Metal" + + [deps.Flux.weakdeps] + AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" + Metal = "dde4c033-4e86-420c-a63e-0dd931031962" [[deps.Fontconfig_jll]] deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] @@ -608,6 +712,12 @@ git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03" uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" version = "2.13.93+0" +[[deps.ForceImport]] +deps = ["Test"] +git-tree-sha1 = "7ac07d5194360af910146abd33af89bb69541194" +uuid = "9dda63f9-cce7-5873-89fa-eccbb2fffcde" +version = "0.0.3" + [[deps.Formatting]] deps = ["Printf"] git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" @@ -615,10 +725,14 @@ uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" version = "0.4.2" [[deps.ForwardDiff]] -deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] +deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d" uuid = "f6369f11-7733-5829-9624-2563aa707210" version = "0.10.35" +weakdeps = ["StaticArrays"] + + [deps.ForwardDiff.extensions] + ForwardDiffStaticArraysExt = "StaticArrays" [[deps.FreeType2_jll]] deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] @@ -632,10 +746,10 @@ git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91" uuid = "559328eb-81f9-559d-9380-de523a88c83c" version = "1.0.10+0" -[[deps.FunctionWrappers]] -git-tree-sha1 = "d62485945ce5ae9c0c48f124a84998d755bae00e" -uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e" -version = "1.1.3" +[[deps.FuncPipelines]] +git-tree-sha1 = "6484a27c35ecc680948c7dc7435c97f12c2bfaf7" +uuid = "9ed96fbb-10b6-44d4-99a6-7e2a3dc8861b" +version = "0.2.3" [[deps.Functors]] deps = ["LinearAlgebra"] @@ -655,39 +769,39 @@ version = "3.3.8+0" [[deps.GLM]] deps = ["Distributions", "LinearAlgebra", "Printf", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "StatsModels"] -git-tree-sha1 = "cd3e314957dc11c4c905d54d1f5a65c979e4748a" +git-tree-sha1 = "97829cfda0df99ddaeaafb5b370d6cab87b7013e" uuid = "38e38edf-8417-5370-95a0-9cbb8c7f171a" -version = "1.8.2" +version = "1.8.3" [[deps.GPUArrays]] deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"] -git-tree-sha1 = "9ade6983c3dbbd492cf5729f865fe030d1541463" +git-tree-sha1 = "2e57b4a4f9cc15e85a24d603256fe08e527f48d1" uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" -version = "8.6.6" +version = "8.8.1" [[deps.GPUArraysCore]] deps = ["Adapt"] -git-tree-sha1 = "1cd7f0af1aa58abc02ea1d872953a97359cb87fa" +git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0" uuid = "46192b85-c4d5-4398-a991-12ede77f4527" -version = "0.1.4" +version = "0.1.5" [[deps.GPUCompiler]] deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"] -git-tree-sha1 = "e9a9173cd77e16509cdf9c1663fda19b22a518b7" +git-tree-sha1 = "d60b5fe7333b5fa41a0378ead6614f1ab51cf6d0" uuid = "61eb1bfa-7361-4325-ad38-22787b887f55" -version = "0.19.3" +version = "0.21.3" [[deps.GR]] deps = ["Artifacts", "Base64", "DelimitedFiles", "Downloads", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Preferences", "Printf", "Random", "Serialization", "Sockets", "TOML", "Tar", "Test", "UUIDs", "p7zip_jll"] -git-tree-sha1 = "0635807d28a496bb60bc15f465da0107fb29649c" +git-tree-sha1 = "8b8a2fd4536ece6e554168c21860b6820a8a83db" uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" -version = "0.72.0" +version = "0.72.7" [[deps.GR_jll]] deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "99e248f643b052a77d2766fe1a16fb32b661afd4" +git-tree-sha1 = "19fad9cd9ae44847fe842558a744748084a722d1" uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" -version = "0.72.0+0" +version = "0.72.7+0" [[deps.GZip]] deps = ["Libdl"] @@ -697,15 +811,15 @@ version = "0.5.1" [[deps.GeoInterface]] deps = ["Extents"] -git-tree-sha1 = "0eb6de0b312688f852f347171aba888658e29f20" +git-tree-sha1 = "bb198ff907228523f3dee1070ceee63b9359b6ab" uuid = "cf35fbd7-0cd7-5166-be24-54bfbe79505f" -version = "1.3.0" +version = "1.3.1" [[deps.GeometryBasics]] deps = ["EarCut_jll", "GeoInterface", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"] -git-tree-sha1 = "303202358e38d2b01ba46844b92e48a3c238fd9e" +git-tree-sha1 = "659140c9375afa2f685e37c1a0b9c9a60ef56b40" uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326" -version = "0.4.6" +version = "0.4.7" [[deps.Gettext_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"] @@ -755,9 +869,9 @@ version = "1.0.2" [[deps.HDF5]] deps = ["Compat", "HDF5_jll", "Libdl", "Mmap", "Random", "Requires", "UUIDs"] -git-tree-sha1 = "3dab31542b3da9f25a6a1d11159d4af8fdce7d67" +git-tree-sha1 = "c73fdc3d9da7700691848b78c61841274076932a" uuid = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -version = "0.16.14" +version = "0.16.15" [[deps.HDF5_jll]] deps = ["Artifacts", "JLLWrappers", "LibCURL_jll", "Libdl", "OpenSSL_jll", "Pkg", "Zlib_jll"] @@ -765,11 +879,17 @@ git-tree-sha1 = "4cc2bb72df6ff40b055295fdef6d92955f9dede8" uuid = "0234f1f7-429e-5d53-9886-15a909be8d59" version = "1.12.2+2" +[[deps.HTML_Entities]] +deps = ["StrTables"] +git-tree-sha1 = "c4144ed3bc5f67f595622ad03c0e39fa6c70ccc7" +uuid = "7693890a-d069-55fe-a829-b4a6d304f0ee" +version = "1.0.1" + [[deps.HTTP]] -deps = ["Base64", "CodecZlib", "Dates", "IniFile", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] -git-tree-sha1 = "37e4657cd56b11abe3d10cd4a1ec5fbdb4180263" +deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] +git-tree-sha1 = "7f5ef966a02a8fdf3df2ca03108a88447cb3c6f0" uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" -version = "1.7.4" +version = "1.9.8" [[deps.HarfBuzz_jll]] deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg"] @@ -779,15 +899,21 @@ version = "2.8.1+1" [[deps.HostCPUFeatures]] deps = ["BitTwiddlingConvenienceFunctions", "IfElse", "Libdl", "Static"] -git-tree-sha1 = "734fd90dd2f920a2f1921d5388dcebe805b262dc" +git-tree-sha1 = "d38bd0d9759e3c6cfa19bdccc314eccf8ce596cc" uuid = "3e5b6fbb-0976-4d2c-9146-d79de83f2fb0" -version = "0.1.14" +version = "0.1.15" + +[[deps.HuggingFaceApi]] +deps = ["Dates", "Downloads", "JSON3", "LibGit2", "OhMyArtifacts", "Pkg", "SHA"] +git-tree-sha1 = "bcf9b0ee12839d9bbee389ec13cd926845a2d39f" +uuid = "3cc741c3-0c9d-4fbe-84fa-cdec264173de" +version = "0.1.0" [[deps.HypergeometricFunctions]] deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"] -git-tree-sha1 = "432b5b03176f8182bd6841fbfc42c718506a2d5f" +git-tree-sha1 = "0ec02c648befc2f94156eaef13b0f38106212f3f" uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a" -version = "0.3.15" +version = "0.3.17" [[deps.Hyperscript]] deps = ["Test"] @@ -803,15 +929,15 @@ version = "0.9.4" [[deps.IOCapture]] deps = ["Logging", "Random"] -git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a" +git-tree-sha1 = "d75853a0bdbfb1ac815478bacd89cd27b550ace6" uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" -version = "0.2.2" +version = "0.2.3" [[deps.IRTools]] deps = ["InteractiveUtils", "MacroTools", "Test"] -git-tree-sha1 = "0ade27f0c49cebd8db2523c4eeccf779407cf12c" +git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5" uuid = "7869d1d1-7146-5819-86e3-90919afe41df" -version = "0.4.9" +version = "0.4.10" [[deps.IfElse]] git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1" @@ -850,9 +976,9 @@ version = "0.2.16" [[deps.ImageFiltering]] deps = ["CatIndices", "ComputationalResources", "DataStructures", "FFTViews", "FFTW", "ImageBase", "ImageCore", "LinearAlgebra", "OffsetArrays", "Reexport", "SnoopPrecompile", "SparseArrays", "StaticArrays", "Statistics", "TiledIteration"] -git-tree-sha1 = "f265e53558fbbf23e0d54e4fab7106c0f2a9e576" +git-tree-sha1 = "d90867cbe037730a73c9a9499b3591eedbe387a0" uuid = "6a3955dd-da59-5b1f-98d4-e7296123deb5" -version = "0.7.3" +version = "0.7.5" [[deps.ImageIO]] deps = ["FileIO", "IndirectArrays", "JpegTurbo", "LazyModules", "Netpbm", "OpenEXR", "PNGFiles", "QOI", "Sixel", "TiffImages", "UUIDs"] @@ -867,10 +993,10 @@ uuid = "6218d12a-5da1-5696-b52f-db25d2ecc6d1" version = "1.2.1" [[deps.ImageMagick_jll]] -deps = ["Artifacts", "Ghostscript_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pkg", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "124626988534986113cfd876e3093e4a03890f58" +deps = ["Artifacts", "Ghostscript_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "OpenJpeg_jll", "Pkg", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "7607ad4100c75908a79ff31fabb792cd37711d70" uuid = "c73af94c-d91f-53ed-93a7-00f77d67a9d7" -version = "6.9.12+3" +version = "6.9.12+4" [[deps.ImageMetadata]] deps = ["AxisArrays", "ImageAxes", "ImageBase", "ImageCore"] @@ -885,16 +1011,16 @@ uuid = "787d08f9-d448-5407-9aad-5290dd7ab264" version = "0.3.2" [[deps.ImageQualityIndexes]] -deps = ["ImageContrastAdjustment", "ImageCore", "ImageDistances", "ImageFiltering", "LazyModules", "OffsetArrays", "SnoopPrecompile", "Statistics"] -git-tree-sha1 = "5985d467623f106523ed8351f255642b5141e7be" +deps = ["ImageContrastAdjustment", "ImageCore", "ImageDistances", "ImageFiltering", "LazyModules", "OffsetArrays", "PrecompileTools", "Statistics"] +git-tree-sha1 = "bfb3a198ef5c96582b8095f8a6eece8937c8ceb3" uuid = "2996bd0c-7a13-11e9-2da2-2f5ce47296a9" -version = "0.3.4" +version = "0.3.6" [[deps.ImageSegmentation]] deps = ["Clustering", "DataStructures", "Distances", "Graphs", "ImageCore", "ImageFiltering", "ImageMorphology", "LinearAlgebra", "MetaGraphs", "RegionTrees", "SimpleWeightedGraphs", "StaticArrays", "Statistics"] -git-tree-sha1 = "fb0b597b4928e29fed0597724cfbb5940974f8ca" +git-tree-sha1 = "44664eea5408828c03e5addb84fa4f916132fc26" uuid = "80713f31-8817-5129-9cf8-209ff8fb23e1" -version = "1.8.0" +version = "1.8.1" [[deps.ImageShow]] deps = ["Base64", "ColorSchemes", "FileIO", "ImageBase", "ImageCore", "OffsetArrays", "StackViews"] @@ -910,9 +1036,9 @@ version = "0.9.5" [[deps.Images]] deps = ["Base64", "FileIO", "Graphics", "ImageAxes", "ImageBase", "ImageContrastAdjustment", "ImageCore", "ImageDistances", "ImageFiltering", "ImageIO", "ImageMagick", "ImageMetadata", "ImageMorphology", "ImageQualityIndexes", "ImageSegmentation", "ImageShow", "ImageTransformations", "IndirectArrays", "IntegralArrays", "Random", "Reexport", "SparseArrays", "StaticArrays", "Statistics", "StatsBase", "TiledIteration"] -git-tree-sha1 = "03d1301b7ec885b266c0f816f338368c6c0b81bd" +git-tree-sha1 = "5fa9f92e1e2918d9d1243b1131abe623cdf98be7" uuid = "916415d5-f1e6-5110-898d-aaa5f9f070e0" -version = "0.25.2" +version = "0.25.3" [[deps.Imath_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -930,11 +1056,6 @@ git-tree-sha1 = "5cd07aab533df5170988219191dfad0519391428" uuid = "d25df0c9-e2be-5dd7-82c8-3ad0b3e990b9" version = "0.1.3" -[[deps.IniFile]] -git-tree-sha1 = "f550e6e32074c939295eb5ea6de31849ac2c9625" -uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f" -version = "0.5.1" - [[deps.InitialValues]] git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3" uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c" @@ -980,12 +1101,6 @@ git-tree-sha1 = "16c0cc91853084cb5f58a78bd209513900206ce6" uuid = "8197267c-284f-5f27-9208-e0e47529a953" version = "0.7.4" -[[deps.InverseFunctions]] -deps = ["Test"] -git-tree-sha1 = "49510dfcb407e572524ba94aeae2fced1f3feb0f" -uuid = "3587e190-3f89-42d0-90ee-14403ec27112" -version = "0.1.8" - [[deps.InvertedIndices]] git-tree-sha1 = "0dc7b50b8d436461be01300fd8cd45aa0274b038" uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f" @@ -997,9 +1112,9 @@ uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" version = "0.2.2" [[deps.IterTools]] -git-tree-sha1 = "fa6287a4469f5e048d763df38279ee729fbd44e5" +git-tree-sha1 = "4ced6667f9974fc5c5943fa5e2ef1ca43ea9e450" uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" -version = "1.4.0" +version = "1.8.0" [[deps.IterationControl]] deps = ["EarlyStopping", "InteractiveUtils"] @@ -1043,10 +1158,10 @@ uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" version = "0.21.4" [[deps.JSON3]] -deps = ["Dates", "Mmap", "Parsers", "SnoopPrecompile", "StructTypes", "UUIDs"] -git-tree-sha1 = "84b10656a41ef564c39d2d477d7236966d2b5683" +deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"] +git-tree-sha1 = "5b62d93f2582b09e469b3099d839c2d2ebf5066d" uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" -version = "1.12.0" +version = "1.13.1" [[deps.JpegTurbo]] deps = ["CEnum", "FileIO", "ImageCore", "JpegTurbo_jll", "TOML"] @@ -1067,16 +1182,16 @@ uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec" version = "0.2.4" [[deps.KernelAbstractions]] -deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "SnoopPrecompile", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"] -git-tree-sha1 = "976231af02176082fb266a9f96a59da51fcacf20" +deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"] +git-tree-sha1 = "b48617c5d764908b5fac493cd907cf33cc11eec1" uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c" -version = "0.9.2" +version = "0.9.6" [[deps.KernelDensity]] deps = ["Distributions", "DocStringExtensions", "FFTW", "Interpolations", "StatsBase"] -git-tree-sha1 = "9816b296736292a80b9a3200eb7fbb57aaa3917a" +git-tree-sha1 = "90442c50e202a5cdf21a7899c66b240fdef14035" uuid = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b" -version = "0.6.5" +version = "0.6.7" [[deps.LAME_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1092,15 +1207,21 @@ version = "3.0.0+1" [[deps.LLVM]] deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"] -git-tree-sha1 = "a8960cae30b42b66dd41808beb76490519f6f9e2" +git-tree-sha1 = "7d5788011dd273788146d40eb5b1fbdc199d0296" uuid = "929cbde3-209d-540e-8aea-75f648917ca0" -version = "5.0.0" +version = "6.0.1" [[deps.LLVMExtra_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "09b7505cc0b1cee87e5d4a26eea61d2e1b0dcd35" +git-tree-sha1 = "1222116d7313cdefecf3d45a2bc1a89c4e7c9217" uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab" -version = "0.0.21+0" +version = "0.0.22+0" + +[[deps.LLVMOpenMP_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "f689897ccbe049adb19a065c495e75f372ecd42b" +uuid = "1d63c593-3942-5779-bab2-d838dc0a180e" +version = "15.0.4+0" [[deps.LZO_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1115,15 +1236,23 @@ version = "1.3.0" [[deps.Latexify]] deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "OrderedCollections", "Printf", "Requires"] -git-tree-sha1 = "2422f47b34d4b127720a18f86fa7b1aa2e141f29" +git-tree-sha1 = "f428ae552340899a935973270b8d98e5a31c49fe" uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -version = "0.15.18" +version = "0.16.1" + + [deps.Latexify.extensions] + DataFramesExt = "DataFrames" + SymEngineExt = "SymEngine" + + [deps.Latexify.weakdeps] + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + SymEngine = "123dc426-2d89-5057-bbad-38513e3affd8" [[deps.LatinHypercubeSampling]] deps = ["Random", "StableRNGs", "StatsBase", "Test"] -git-tree-sha1 = "42938ab65e9ed3c3029a8d2c58382ca75bdab243" +git-tree-sha1 = "825289d43c753c7f1bf9bed334c253e9913997f8" uuid = "a5e1c1ea-c99a-51d3-a14d-a9a37257b02d" -version = "1.8.0" +version = "1.9.0" [[deps.LayoutPointers]] deps = ["ArrayInterface", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface"] @@ -1212,9 +1341,15 @@ version = "2.36.0+0" [[deps.LightGBM]] deps = ["Dates", "Libdl", "MLJModelInterface", "SparseArrays", "Statistics"] -git-tree-sha1 = "658faa6a229fb5bb4aea5cc897cd99db66aafb51" +git-tree-sha1 = "ce5f0bbb93610549e94dc1b1d6a1e238ae021d7d" uuid = "7acf609c-83a4-11e9-1ffb-b912bcd3b04a" -version = "0.6.0" +version = "0.6.1" + +[[deps.LightXML]] +deps = ["Libdl", "XML2_jll"] +git-tree-sha1 = "e129d9391168c677cd4800f5c0abb1ed8cb3794f" +uuid = "9c8b4983-aa76-5018-a973-4c85ecc9e179" +version = "0.9.0" [[deps.LineSearches]] deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] @@ -1223,20 +1358,42 @@ uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" version = "7.2.0" [[deps.LinearAlgebra]] -deps = ["Libdl", "libblastrampoline_jll"] +deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.LinearMaps]] -deps = ["ChainRulesCore", "LinearAlgebra", "SparseArrays", "Statistics"] -git-tree-sha1 = "4af48c3585177561e9f0d24eb9619ad3abf77cc7" +deps = ["LinearAlgebra"] +git-tree-sha1 = "62f9b2762cc107667b137af621e951f52e020a0f" uuid = "7a12625a-238d-50fd-b39a-03d52299707e" -version = "3.10.0" +version = "3.10.2" +weakdeps = ["ChainRulesCore", "SparseArrays", "Statistics"] + + [deps.LinearMaps.extensions] + LinearMapsChainRulesCoreExt = "ChainRulesCore" + LinearMapsSparseArraysExt = "SparseArrays" + LinearMapsStatisticsExt = "Statistics" + +[[deps.LittleCMS_jll]] +deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pkg"] +git-tree-sha1 = "110897e7db2d6836be22c18bffd9422218ee6284" +uuid = "d3a379c0-f9a3-5b72-a4c0-6bf4d2e8af0f" +version = "2.12.0+0" [[deps.LogExpFunctions]] -deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "0a1b7c2863e44523180fdb3146534e265a91870b" +deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] +git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.23" +version = "0.3.24" + + [deps.LogExpFunctions.extensions] + LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" + LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables" + LogExpFunctionsInverseFunctionsExt = "InverseFunctions" + + [deps.LogExpFunctions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" @@ -1248,22 +1405,27 @@ uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36" version = "1.0.0" [[deps.LoopVectorization]] -deps = ["ArrayInterface", "ArrayInterfaceCore", "CPUSummary", "ChainRulesCore", "CloseOpenIntervals", "DocStringExtensions", "ForwardDiff", "HostCPUFeatures", "IfElse", "LayoutPointers", "LinearAlgebra", "OffsetArrays", "PolyesterWeave", "SIMDTypes", "SLEEFPirates", "SnoopPrecompile", "SpecialFunctions", "Static", "StaticArrayInterface", "ThreadingUtilities", "UnPack", "VectorizationBase"] -git-tree-sha1 = "defbfba8ddbccdc8ca3edb4a96a6d6fd3cd33ebd" +deps = ["ArrayInterface", "ArrayInterfaceCore", "CPUSummary", "CloseOpenIntervals", "DocStringExtensions", "HostCPUFeatures", "IfElse", "LayoutPointers", "LinearAlgebra", "OffsetArrays", "PolyesterWeave", "PrecompileTools", "SIMDTypes", "SLEEFPirates", "Static", "StaticArrayInterface", "ThreadingUtilities", "UnPack", "VectorizationBase"] +git-tree-sha1 = "e4eed22d70ac91d7a4bf9e0f6902383061d17105" uuid = "bdcacae8-1622-11e9-2a5c-532679323890" -version = "0.12.157" +version = "0.12.162" +weakdeps = ["ChainRulesCore", "ForwardDiff", "SpecialFunctions"] + + [deps.LoopVectorization.extensions] + ForwardDiffExt = ["ChainRulesCore", "ForwardDiff"] + SpecialFunctionsExt = "SpecialFunctions" [[deps.LossFunctions]] -deps = ["InteractiveUtils", "Markdown", "RecipesBase"] -git-tree-sha1 = "f27330f931944ecee340f004302db724c1985955" +deps = ["CategoricalArrays", "Markdown", "Statistics"] +git-tree-sha1 = "44a7bfeb7b5eb9386a62b9cccc6e21f406c15bea" uuid = "30fc2ffe-d236-52d8-8643-a9d8f7c094a7" -version = "0.8.1" +version = "0.10.0" [[deps.MAT]] deps = ["BufferedStreams", "CodecZlib", "HDF5", "SparseArrays"] -git-tree-sha1 = "6eff5740c8ab02c90065719579c7aa0eb40c9f69" +git-tree-sha1 = "79fd0b5ee384caf8ebba6c8fb3f365ca3e2c5493" uuid = "23992714-dd62-5051-b70f-ba57cb901cac" -version = "0.10.4" +version = "0.10.5" [[deps.MIMEs]] git-tree-sha1 = "65f28ad4b594aebe22157d6fac869786a255b7eb" @@ -1272,27 +1434,27 @@ version = "0.1.4" [[deps.MKL_jll]] deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] -git-tree-sha1 = "2ce8695e1e699b68702c03402672a69f54b8aca9" +git-tree-sha1 = "154d7aaa82d24db6d8f7e4ffcfe596f40bff214b" uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7" -version = "2022.2.0+0" +version = "2023.1.0+0" [[deps.MLDatasets]] -deps = ["CSV", "Chemfiles", "DataDeps", "DataFrames", "DelimitedFiles", "FileIO", "FixedPointNumbers", "GZip", "Glob", "HDF5", "ImageShow", "JLD2", "JSON3", "LazyModules", "MAT", "MLUtils", "NPZ", "Pickle", "Printf", "Requires", "SparseArrays", "Tables"] -git-tree-sha1 = "498b37aa3ebb4407adea36df1b244fa4e397de5e" +deps = ["CSV", "Chemfiles", "DataDeps", "DataFrames", "DelimitedFiles", "FileIO", "FixedPointNumbers", "GZip", "Glob", "HDF5", "ImageShow", "JLD2", "JSON3", "LazyModules", "MAT", "MLUtils", "NPZ", "Pickle", "Printf", "Requires", "SparseArrays", "Statistics", "Tables"] +git-tree-sha1 = "a03a093b03824f07fe00931df76b18d99398ebb9" uuid = "eb30cadb-4394-5ae3-aed4-317e484a6458" -version = "0.7.9" +version = "0.7.11" [[deps.MLJ]] deps = ["CategoricalArrays", "ComputationalResources", "Distributed", "Distributions", "LinearAlgebra", "MLJBase", "MLJEnsembles", "MLJIteration", "MLJModels", "MLJTuning", "OpenML", "Pkg", "ProgressMeter", "Random", "ScientificTypes", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "80149328ca780b522b5a95e402450d10df7904f2" +git-tree-sha1 = "d26cd777c711c332019b39445823cbb1f6cdb7e5" uuid = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7" -version = "0.19.1" +version = "0.19.2" [[deps.MLJBase]] deps = ["CategoricalArrays", "CategoricalDistributions", "ComputationalResources", "Dates", "DelimitedFiles", "Distributed", "Distributions", "InteractiveUtils", "InvertedIndices", "LinearAlgebra", "LossFunctions", "MLJModelInterface", "Missings", "OrderedCollections", "Parameters", "PrettyTables", "ProgressMeter", "Random", "ScientificTypes", "Serialization", "StatisticalTraits", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "37a311b0cd581764fc460f6632e6219dc32f9427" +git-tree-sha1 = "4cc167b6c0a3ab25d7050e4ac38fe119e97cd1ab" uuid = "a7f614a8-145f-11e9-1d2a-a57a1082229d" -version = "0.21.8" +version = "0.21.11" [[deps.MLJDecisionTreeInterface]] deps = ["CategoricalArrays", "DecisionTree", "MLJModelInterface", "Random", "Tables"] @@ -1302,21 +1464,21 @@ version = "0.4.0" [[deps.MLJEnsembles]] deps = ["CategoricalArrays", "CategoricalDistributions", "ComputationalResources", "Distributed", "Distributions", "MLJBase", "MLJModelInterface", "ProgressMeter", "Random", "ScientificTypesBase", "StatsBase"] -git-tree-sha1 = "bb8a1056b1d8b40f2f27167fc3ef6412a6719fbf" +git-tree-sha1 = "95b306ef8108067d26dfde9ff3457d59911cc0d6" uuid = "50ed68f4-41fd-4504-931a-ed422449fee0" -version = "0.3.2" +version = "0.3.3" [[deps.MLJFlux]] deps = ["CategoricalArrays", "ColorTypes", "ComputationalResources", "Flux", "MLJModelInterface", "Metalhead", "ProgressMeter", "Random", "Statistics", "Tables"] -git-tree-sha1 = "2ecdce4dd9214789ee1796103d29eaee7619ebd0" +git-tree-sha1 = "b27c3b96cc2a602a1e91eba36b8ca3d796f30ae0" uuid = "094fc8d1-fd35-5302-93ea-dabda2abf845" -version = "0.2.9" +version = "0.2.10" [[deps.MLJGLMInterface]] -deps = ["Distributions", "GLM", "MLJModelInterface", "Tables"] -git-tree-sha1 = "87b665f79b082e15acb8def31ede049386dcb0c6" +deps = ["Distributions", "GLM", "MLJModelInterface", "StatsModels", "Tables"] +git-tree-sha1 = "06aba1c96b19f31744f7e97d96fcf66b79739e05" uuid = "caf8df21-4939-456d-ac9c-5fefbfb04c0c" -version = "0.3.4" +version = "0.3.5" [[deps.MLJIteration]] deps = ["IterationControl", "MLJBase", "Random", "Serialization"] @@ -1326,9 +1488,9 @@ version = "0.5.1" [[deps.MLJLinearModels]] deps = ["DocStringExtensions", "IterativeSolvers", "LinearAlgebra", "LinearMaps", "MLJModelInterface", "Optim", "Parameters"] -git-tree-sha1 = "c811b3877f1328179cef6662388d200c78b95c09" +git-tree-sha1 = "c92bf0ea37bf51e1ef0160069c572825819748b8" uuid = "6ee0df7b-362f-4a72-a706-9e79364fb692" -version = "0.9.1" +version = "0.9.2" [[deps.MLJModelInterface]] deps = ["Random", "ScientificTypesBase", "StatisticalTraits"] @@ -1338,15 +1500,15 @@ version = "1.8.0" [[deps.MLJModels]] deps = ["CategoricalArrays", "CategoricalDistributions", "Combinatorics", "Dates", "Distances", "Distributions", "InteractiveUtils", "LinearAlgebra", "MLJModelInterface", "Markdown", "OrderedCollections", "Parameters", "Pkg", "PrettyPrinting", "REPL", "Random", "RelocatableFolders", "ScientificTypes", "StatisticalTraits", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "21acf47dc53ccc3d68e38ac7629756cd09b599f5" +git-tree-sha1 = "38c3b4af6e52edcd94144c32dc1bea335dbfaec7" uuid = "d491faf4-2d78-11e9-2867-c94bc002c0b7" -version = "0.16.6" +version = "0.16.8" [[deps.MLJMultivariateStatsInterface]] deps = ["CategoricalDistributions", "Distances", "LinearAlgebra", "MLJModelInterface", "MultivariateStats", "StatsBase"] -git-tree-sha1 = "1a63598ce4d10800be601c6a759cce4bc9984383" +git-tree-sha1 = "0d76e36bf83926235dcd3eaeafa7f47d3e7f32ea" uuid = "1b6a4a23-ba22-4f51-9698-8599985d3728" -version = "0.5.2" +version = "0.5.3" [[deps.MLJNaiveBayesInterface]] deps = ["LogExpFunctions", "MLJModelInterface", "NaiveBayes"] @@ -1366,10 +1528,10 @@ uuid = "d8e11817-5142-5d16-987a-aa16d5891078" version = "0.4.17" [[deps.MLUtils]] -deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "FoldsThreads", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"] -git-tree-sha1 = "f69cdbb5b7c630c02481d81d50eac43697084fe0" +deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"] +git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0" uuid = "f1d291b0-491e-4a28-83b9-f70985020b54" -version = "0.4.1" +version = "0.4.3" [[deps.MacroTools]] deps = ["Markdown", "Random"] @@ -1377,27 +1539,21 @@ git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" version = "0.5.10" -[[deps.MakieCore]] -deps = ["Observables"] -git-tree-sha1 = "9926529455a331ed73c19ff06d16906737a876ed" -uuid = "20f20a25-4f0e-4fdf-b5d1-57303727442b" -version = "0.6.3" - [[deps.ManualMemory]] git-tree-sha1 = "bcaef4fc7a0cfe2cba636d84cda54b5e4e4ca3cd" uuid = "d125e4d3-2237-4719-b19c-fa641b8a4667" version = "0.1.8" [[deps.MappedArrays]] -git-tree-sha1 = "e8b359ef06ec72e8c030463fe02efe5527ee5142" +git-tree-sha1 = "2dab0221fe2b0f2cb6754eaa743cc266339f527e" uuid = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900" -version = "0.4.1" +version = "0.4.2" [[deps.MarchingCubes]] -deps = ["SnoopPrecompile", "StaticArrays"] -git-tree-sha1 = "b198463d1a631e8771709bc8e011ba329da9ad38" +deps = ["PrecompileTools", "StaticArrays"] +git-tree-sha1 = "c8e29e2bacb98c9b6f10445227a8b0402f2f173a" uuid = "299715c1-40a9-479a-aaf9-4a633d36f717" -version = "0.1.7" +version = "0.1.8" [[deps.Markdown]] deps = ["Base64"] @@ -1412,7 +1568,7 @@ version = "1.1.7" [[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.0+0" +version = "2.28.2+0" [[deps.Measures]] git-tree-sha1 = "c13304c81eec1ed3af7fc20e75fb6b26092a1102" @@ -1454,19 +1610,13 @@ version = "0.3.4" [[deps.MozillaCACerts_jll]] uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2022.2.1" +version = "2022.10.11" [[deps.MultivariateStats]] deps = ["Arpack", "LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI", "StatsBase"] -git-tree-sha1 = "91a48569383df24f0fd2baf789df2aade3d0ad80" +git-tree-sha1 = "68bf5103e002c44adfd71fea6bd770b3f0586843" uuid = "6f286f6a-111f-5878-ab1e-185364afe411" -version = "0.10.1" - -[[deps.MutableArithmetics]] -deps = ["LinearAlgebra", "SparseArrays", "Test"] -git-tree-sha1 = "3295d296288ab1a0a2528feb424b854418acff57" -uuid = "d8a4904e-b15c-11e9-3269-09a3773c0cb0" -version = "1.2.3" +version = "0.10.2" [[deps.NLSolversBase]] deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] @@ -1476,9 +1626,15 @@ version = "7.8.3" [[deps.NNlib]] deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"] -git-tree-sha1 = "99e6dbb50d8a96702dc60954569e9fe7291cc55d" +git-tree-sha1 = "72240e3f5ca031937bd536182cb2c031da5f46dd" uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -version = "0.8.20" +version = "0.8.21" + + [deps.NNlib.extensions] + NNlibAMDGPUExt = "AMDGPU" + + [deps.NNlib.weakdeps] + AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" [[deps.NNlibCUDA]] deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"] @@ -1500,9 +1656,9 @@ version = "1.0.2" [[deps.NaiveBayes]] deps = ["Distributions", "HDF5", "Interpolations", "KernelDensity", "LinearAlgebra", "Random", "SparseArrays", "StatsBase"] -git-tree-sha1 = "830c601de91378e773e7286c3a3e8964d6248657" +git-tree-sha1 = "3e8f66cad75d84820bf146ad3ae3785836497258" uuid = "9bbee03b-0db5-5f46-924f-b5c9c21b8c60" -version = "0.5.4" +version = "0.5.5" [[deps.NameResolution]] deps = ["PrettyPrint"] @@ -1517,9 +1673,9 @@ version = "1.0.0" [[deps.NearestNeighborModels]] deps = ["Distances", "FillArrays", "InteractiveUtils", "LinearAlgebra", "MLJModelInterface", "NearestNeighbors", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "c2179f9d8de066c481b889a1426068c5831bb10b" +git-tree-sha1 = "e411143a8362926e4284a54e745972e939fbab78" uuid = "636a865e-7cf4-491e-846c-de09b730eb36" -version = "0.2.2" +version = "0.2.3" [[deps.NearestNeighbors]] deps = ["Distances", "StaticArrays"] @@ -1543,16 +1699,17 @@ version = "0.4.5" uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" version = "1.2.0" -[[deps.Observables]] -git-tree-sha1 = "6862738f9796b3edc1c09d0890afce4eca9e7e93" -uuid = "510215fc-4207-5dde-b226-833fc4488ee2" -version = "0.5.4" +[[deps.NeuralAttentionlib]] +deps = ["Adapt", "CUDA", "ChainRulesCore", "GPUArrays", "GPUArraysCore", "LinearAlgebra", "NNlib", "NNlibCUDA", "Requires", "Static"] +git-tree-sha1 = "5ee110f3d54e0f29daacc3bdde01b638bf05b9bc" +uuid = "12afc1b8-fad6-47e1-9132-84abc478905f" +version = "0.2.10" [[deps.OffsetArrays]] deps = ["Adapt"] -git-tree-sha1 = "82d7c9e310fe55aa54996e6f7f94674e2a38fcb4" +git-tree-sha1 = "2ac17d29c523ce1cd38e27785a7d23024853a4bb" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.12.9" +version = "1.12.10" [[deps.Ogg_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1560,16 +1717,22 @@ git-tree-sha1 = "887579a3eb005446d514ab7aeac5d1d027658b8f" uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" version = "1.3.5+1" +[[deps.OhMyArtifacts]] +deps = ["Dates", "Downloads", "Pidfile", "Pkg", "Printf", "SHA", "Scratch", "TOML"] +git-tree-sha1 = "1ae208c3919548b9e7e6783ba294289cd204b4cb" +uuid = "cf8be1f4-309d-442e-839d-29d2a0af6cb7" +version = "0.3.1" + [[deps.OneHotArrays]] deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"] -git-tree-sha1 = "f511fca956ed9e70b80cd3417bb8c2dde4b68644" +git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c" uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f" -version = "0.2.3" +version = "0.2.4" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.20+0" +version = "0.3.21+4" [[deps.OpenEXR]] deps = ["Colors", "FileIO", "OpenEXR_jll"] @@ -1583,6 +1746,12 @@ git-tree-sha1 = "a4ca623df1ae99d09bc9868b008262d0c0ac1e4f" uuid = "18a262bb-aa17-5467-a713-aee519bc75cb" version = "3.1.4+0" +[[deps.OpenJpeg_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libtiff_jll", "LittleCMS_jll", "Pkg", "libpng_jll"] +git-tree-sha1 = "76374b6e7f632c130e78100b166e5a48464256f8" +uuid = "643b3616-a352-519d-856d-80112ee9badc" +version = "2.4.0+0" + [[deps.OpenLibm_jll]] deps = ["Artifacts", "Libdl"] uuid = "05823500-19ac-5b8b-9628-191a04bc5112" @@ -1596,15 +1765,15 @@ version = "0.3.1" [[deps.OpenSSL]] deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"] -git-tree-sha1 = "5b3e170ea0724f1e3ed6018c5b006c190f80e87d" +git-tree-sha1 = "51901a49222b09e3743c65b8847687ae5fc78eb2" uuid = "4d8831e6-92b7-49fb-bdf8-b643e874388c" -version = "1.3.5" +version = "1.4.1" [[deps.OpenSSL_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "9ff31d101d987eb9d66bd8b176ac7c277beccd09" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "1aa4b74f80b01c6bc2b89992b861b5f210e665b5" uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "1.1.20+0" +version = "1.1.21+0" [[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] @@ -1614,9 +1783,9 @@ version = "0.5.5+0" [[deps.Optim]] deps = ["Compat", "FillArrays", "ForwardDiff", "LineSearches", "LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "PositiveFactorizations", "Printf", "SparseArrays", "StatsBase"] -git-tree-sha1 = "a89b11f0f354f06099e4001c151dffad7ebab015" +git-tree-sha1 = "e3a6546c1577bfd701771b477b794a52949e7594" uuid = "429524aa-4258-5aef-a3af-852621145aeb" -version = "1.7.5" +version = "1.7.6" [[deps.Optimisers]] deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"] @@ -1638,7 +1807,7 @@ version = "1.6.0" [[deps.PCRE2_jll]] deps = ["Artifacts", "Libdl"] uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.40.0+0" +version = "10.42.0+0" [[deps.PDMats]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] @@ -1665,16 +1834,27 @@ uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" version = "0.12.3" [[deps.Parsers]] -deps = ["Dates", "SnoopPrecompile"] -git-tree-sha1 = "478ac6c952fddd4399e71d4779797c538d0ff2bf" +deps = ["Dates", "PrecompileTools", "UUIDs"] +git-tree-sha1 = "4b2e829ee66d4218e0cef22c0a64ee37cf258c29" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.8" +version = "2.7.1" + +[[deps.PartialFunctions]] +git-tree-sha1 = "b3901ea034cfd8aae57a2fa0dde0b0ea18bad1cb" +uuid = "570af359-4316-4cb7-8c74-252c00c2016b" +version = "1.1.1" [[deps.Pickle]] -deps = ["DataStructures", "InternedStrings", "Serialization", "SparseArrays", "Strided", "StringEncodings", "ZipFile"] -git-tree-sha1 = "e6a34eb1dc0c498f0774bbfbbbeff2de101f4235" +deps = ["BFloat16s", "DataStructures", "InternedStrings", "Serialization", "SparseArrays", "Strided", "StringEncodings", "ZipFile"] +git-tree-sha1 = "2e71d7dbcab8dc47306c0ed6ac6018fbc1a7070f" uuid = "fbb45041-c46e-462f-888f-7c521cafbc2c" -version = "0.3.2" +version = "0.3.3" + +[[deps.Pidfile]] +deps = ["FileWatching", "Test"] +git-tree-sha1 = "2d8aaf8ee10df53d0dfb9b8ee44ae7c04ced2b03" +uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307" +version = "1.3.0" [[deps.Pipe]] git-tree-sha1 = "6842804e7867b115ca9de748a0cf6b364523c16d" @@ -1682,15 +1862,15 @@ uuid = "b98c9c47-44ae-5843-9183-064241ee97a0" version = "1.3.0" [[deps.Pixman_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29" +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LLVMOpenMP_jll", "Libdl"] +git-tree-sha1 = "64779bc4c9784fee475689a1752ef4d5747c5e87" uuid = "30392449-352a-5448-841d-b1acce4e97dc" -version = "0.40.1+0" +version = "0.42.2+0" [[deps.Pkg]] -deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.8.0" +version = "1.9.0" [[deps.PkgVersion]] deps = ["Pkg"] @@ -1705,22 +1885,36 @@ uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" version = "3.1.0" [[deps.PlotUtils]] -deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "SnoopPrecompile", "Statistics"] -git-tree-sha1 = "c95373e73290cf50a8a22c3375e4625ded5c5280" +deps = ["ColorSchemes", "Colors", "Dates", "PrecompileTools", "Printf", "Random", "Reexport", "Statistics"] +git-tree-sha1 = "f92e1315dadf8c46561fb9396e525f7200cdc227" uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" -version = "1.3.4" +version = "1.3.5" [[deps.Plots]] -deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "Preferences", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SnoopPrecompile", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "Unzip"] -git-tree-sha1 = "5434b0ee344eaf2854de251f326df8720f6a7b55" +deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Preferences", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "UnitfulLatexify", "Unzip"] +git-tree-sha1 = "75ca67b2c6512ad2d0c767a7cfc55e75075f8bbc" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.38.10" +version = "1.38.16" + + [deps.Plots.extensions] + FileIOExt = "FileIO" + GeometryBasicsExt = "GeometryBasics" + IJuliaExt = "IJulia" + ImageInTerminalExt = "ImageInTerminal" + UnitfulExt = "Unitful" + + [deps.Plots.weakdeps] + FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" + GeometryBasics = "5c1252a2-5f33-56bf-86c9-59e7332b4326" + IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" + ImageInTerminal = "d8c32880-2388-543b-8c61-d9f865259254" + Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" [[deps.PlutoUI]] deps = ["AbstractPlutoDingetjes", "Base64", "ColorTypes", "Dates", "FixedPointNumbers", "Hyperscript", "HypertextLiteral", "IOCapture", "InteractiveUtils", "JSON", "Logging", "MIMEs", "Markdown", "Random", "Reexport", "URIs", "UUIDs"] -git-tree-sha1 = "5bb5129fdd62a2bbbe17c2756932259acf467386" +git-tree-sha1 = "b478a748be27bd2f2c73a7690da219d0844db305" uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8" -version = "0.7.50" +version = "0.7.51" [[deps.PolyesterWeave]] deps = ["BitTwiddlingConvenienceFunctions", "CPUSummary", "IfElse", "Static", "ThreadingUtilities"] @@ -1729,10 +1923,20 @@ uuid = "1d0040c9-8b98-4ee7-8388-3f51789ca0ad" version = "0.2.1" [[deps.Polynomials]] -deps = ["ChainRulesCore", "LinearAlgebra", "MakieCore", "MutableArithmetics", "RecipesBase"] -git-tree-sha1 = "66443538efd80fac4962b74523ec0b35c9464a21" +deps = ["LinearAlgebra", "RecipesBase"] +git-tree-sha1 = "3aa2bb4982e575acd7583f01531f241af077b163" uuid = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" -version = "3.2.9" +version = "3.2.13" + + [deps.Polynomials.extensions] + PolynomialsChainRulesCoreExt = "ChainRulesCore" + PolynomialsMakieCoreExt = "MakieCore" + PolynomialsMutableArithmeticsExt = "MutableArithmetics" + + [deps.Polynomials.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + MakieCore = "20f20a25-4f0e-4fdf-b5d1-57303727442b" + MutableArithmetics = "d8a4904e-b15c-11e9-3269-09a3773c0cb0" [[deps.PooledArrays]] deps = ["DataAPI", "Future"] @@ -1746,11 +1950,17 @@ git-tree-sha1 = "17275485f373e6673f7e7f97051f703ed5b15b20" uuid = "85a6dd25-e78a-55b7-8502-1745935b8125" version = "0.2.4" +[[deps.PrecompileTools]] +deps = ["Preferences"] +git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81" +uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" +version = "1.1.2" + [[deps.Preferences]] deps = ["TOML"] -git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" +git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1" uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.3.0" +version = "1.4.0" [[deps.PrettyPrint]] git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4" @@ -1758,15 +1968,21 @@ uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98" version = "0.2.0" [[deps.PrettyPrinting]] -git-tree-sha1 = "4be53d093e9e37772cc89e1009e8f6ad10c4681b" +git-tree-sha1 = "22a601b04a154ca38867b991d5017469dc75f2db" uuid = "54e16d92-306c-5ea0-a30b-337be88ac337" -version = "0.4.0" +version = "0.4.1" [[deps.PrettyTables]] deps = ["Crayons", "Formatting", "LaTeXStrings", "Markdown", "Reexport", "StringManipulation", "Tables"] -git-tree-sha1 = "548793c7859e28ef026dba514752275ee871169f" +git-tree-sha1 = "213579618ec1f42dea7dd637a42785a608b1ea9c" uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" -version = "2.2.3" +version = "2.2.4" + +[[deps.PrimitiveOneHot]] +deps = ["Adapt", "ChainRulesCore", "NNlib", "Requires"] +git-tree-sha1 = "b744f8f1cb9a700472529fc58986ab00d86996ae" +uuid = "13d12f88-f12b-451e-9b9f-13b97e01cc85" +version = "0.1.3" [[deps.Printf]] deps = ["Unicode"] @@ -1818,9 +2034,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.Random123]] deps = ["Random", "RandomNumbers"] -git-tree-sha1 = "7a1a306b72cfa60634f03a911405f4e64d1b718b" +git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3" uuid = "74087812-796a-5b5d-8853-05524746bad3" -version = "1.6.0" +version = "1.6.1" [[deps.RandomNumbers]] deps = ["Random", "Requires"] @@ -1835,9 +2051,13 @@ version = "0.3.2" [[deps.Ratios]] deps = ["Requires"] -git-tree-sha1 = "dc84268fe0e3335a62e315a3a7cf2afa7178a734" +git-tree-sha1 = "1342a47bf3260ee108163042310d26f2be5ec90b" uuid = "c84ed2f1-dad5-54f0-aa8e-dbefe2724439" -version = "0.4.3" +version = "0.4.5" +weakdeps = ["FixedPointNumbers"] + + [deps.Ratios.extensions] + RatiosFixedPointNumbersExt = "FixedPointNumbers" [[deps.RealDot]] deps = ["LinearAlgebra"] @@ -1846,16 +2066,16 @@ uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9" version = "0.1.0" [[deps.RecipesBase]] -deps = ["SnoopPrecompile"] -git-tree-sha1 = "261dddd3b862bd2c940cf6ca4d1c8fe593e457c8" +deps = ["PrecompileTools"] +git-tree-sha1 = "5c3d09cc4f31f5fc6af001c250bf1278733100ff" uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -version = "1.3.3" +version = "1.3.4" [[deps.RecipesPipeline]] -deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase", "SnoopPrecompile"] -git-tree-sha1 = "e974477be88cb5e3040009f3767611bc6357846f" +deps = ["Dates", "NaNMath", "PlotUtils", "PrecompileTools", "RecipesBase"] +git-tree-sha1 = "45cf9fd0ca5839d06ef333c8201714e888486342" uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" -version = "0.6.11" +version = "0.6.12" [[deps.Reexport]] git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" @@ -1893,10 +2113,10 @@ uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f" version = "0.4.0+0" [[deps.Rotations]] -deps = ["LinearAlgebra", "Quaternions", "Random", "StaticArrays", "Statistics"] -git-tree-sha1 = "72a6abdcd088764878b473102df7c09bbc0548de" +deps = ["LinearAlgebra", "Quaternions", "Random", "StaticArrays"] +git-tree-sha1 = "54ccb4dbab4b1f69beb255a2c0ca5f65a9c82f08" uuid = "6038ab10-8711-5258-84ad-4b1120ba62dc" -version = "1.4.0" +version = "1.5.1" [[deps.SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" @@ -1909,9 +2129,9 @@ version = "0.1.0" [[deps.SLEEFPirates]] deps = ["IfElse", "Static", "VectorizationBase"] -git-tree-sha1 = "cda0aece8080e992f6370491b08ef3909d1c04e7" +git-tree-sha1 = "4b8586aece42bee682399c4c4aee95446aa5cd19" uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa" -version = "0.6.38" +version = "0.6.39" [[deps.ScientificTypes]] deps = ["CategoricalArrays", "ColorTypes", "Dates", "Distributions", "PrettyTables", "Reexport", "ScientificTypesBase", "StatisticalTraits", "Tables"] @@ -1938,9 +2158,9 @@ version = "1.2.0" [[deps.SentinelArrays]] deps = ["Dates", "Random"] -git-tree-sha1 = "77d3c4726515dca71f6d80fbb5e251088defe305" +git-tree-sha1 = "04bdff0b09c65ff3e06a05e3eb7b120223da3d39" uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c" -version = "1.3.18" +version = "1.4.0" [[deps.Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" @@ -1983,10 +2203,10 @@ uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" version = "0.9.4" [[deps.SimpleWeightedGraphs]] -deps = ["Graphs", "LinearAlgebra", "Markdown", "SparseArrays", "Test"] -git-tree-sha1 = "7d0b07df35fccf9b866a94bcab98822a87a3cb6f" +deps = ["Graphs", "LinearAlgebra", "Markdown", "SparseArrays"] +git-tree-sha1 = "4b33e0e081a825dbfaf314decf58fa47e53d6acb" uuid = "47aef6b3-ad0c-573a-a1e2-d07658019622" -version = "1.3.0" +version = "1.4.0" [[deps.Sixel]] deps = ["Dates", "FileIO", "ImageCore", "IndirectArrays", "OffsetArrays", "REPL", "libsixel_jll"] @@ -2005,19 +2225,23 @@ uuid = "6462fe0b-24de-5631-8697-dd941f90decc" [[deps.SortingAlgorithms]] deps = ["DataStructures"] -git-tree-sha1 = "a4ada03f999bd01b3a25dcaa30b2d929fe537e00" +git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee" uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" -version = "1.1.0" +version = "1.1.1" [[deps.SparseArrays]] -deps = ["LinearAlgebra", "Random"] +deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [[deps.SpecialFunctions]] -deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] -git-tree-sha1 = "ef28127915f4229c971eb43f3fc075dd3fe91880" +deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] +git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3" uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "2.2.0" +version = "2.3.0" +weakdeps = ["ChainRulesCore"] + + [deps.SpecialFunctions.extensions] + SpecialFunctionsChainRulesCoreExt = "ChainRulesCore" [[deps.SplittablesBase]] deps = ["Setfield", "Test"] @@ -2039,21 +2263,30 @@ version = "0.1.1" [[deps.Static]] deps = ["IfElse"] -git-tree-sha1 = "08be5ee09a7632c32695d954a602df96a877bf0d" +git-tree-sha1 = "dbde6766fc677423598138a5951269432b0fcc90" uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" -version = "0.8.6" +version = "0.8.7" [[deps.StaticArrayInterface]] deps = ["ArrayInterface", "Compat", "IfElse", "LinearAlgebra", "Requires", "SnoopPrecompile", "SparseArrays", "Static", "SuiteSparse"] git-tree-sha1 = "33040351d2403b84afce74dae2e22d3f5b18edcb" uuid = "0d7ed370-da01-4f52-bd93-41d350b8b718" version = "1.4.0" +weakdeps = ["OffsetArrays", "StaticArrays"] + + [deps.StaticArrayInterface.extensions] + StaticArrayInterfaceOffsetArraysExt = "OffsetArrays" + StaticArrayInterfaceStaticArraysExt = "StaticArrays" [[deps.StaticArrays]] -deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] -git-tree-sha1 = "63e84b7fdf5021026d0f17f76af7c57772313d99" +deps = ["LinearAlgebra", "Random", "StaticArraysCore"] +git-tree-sha1 = "0da7e6b70d1bb40b1ace3b576da9ea2992f76318" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.5.21" +version = "1.6.0" +weakdeps = ["Statistics"] + + [deps.StaticArrays.extensions] + StaticArraysStatisticsExt = "Statistics" [[deps.StaticArraysCore]] git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" @@ -2069,6 +2302,7 @@ version = "3.2.0" [[deps.Statistics]] deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +version = "1.9.0" [[deps.StatsAPI]] deps = ["LinearAlgebra"] @@ -2078,21 +2312,35 @@ version = "1.6.0" [[deps.StatsBase]] deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] -git-tree-sha1 = "d1bf48bfcc554a3761a133fe3a9bb01488e06916" +git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -version = "0.33.21" +version = "0.34.0" [[deps.StatsFuns]] -deps = ["ChainRulesCore", "HypergeometricFunctions", "InverseFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] +deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" version = "1.3.0" + [deps.StatsFuns.extensions] + StatsFunsChainRulesCoreExt = "ChainRulesCore" + StatsFunsInverseFunctionsExt = "InverseFunctions" + + [deps.StatsFuns.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + [[deps.StatsModels]] deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Printf", "REPL", "ShiftedArrays", "SparseArrays", "StatsBase", "StatsFuns", "Tables"] -git-tree-sha1 = "51cdf1afd9d78552e7a08536930d7abc3b288a5c" +git-tree-sha1 = "8cc7a5385ecaa420f0b3426f9b0135d0df0638ed" uuid = "3eaba693-59b7-5ba5-a881-562e759f1c8d" -version = "0.7.1" +version = "0.7.2" + +[[deps.StrTables]] +deps = ["Dates"] +git-tree-sha1 = "5998faae8c6308acc25c25896562a1e66a3bb038" +uuid = "9700d1a9-a7c8-5760-9816-a99fda30bb8f" +version = "1.0.1" [[deps.Strided]] deps = ["LinearAlgebra", "TupleTools"] @@ -2111,6 +2359,11 @@ git-tree-sha1 = "46da2434b41f41ac3594ee9816ce5541c6096123" uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e" version = "0.3.0" +[[deps.StringViews]] +git-tree-sha1 = "1b099e645a8e7ebb16eb8abc1b718cf71316b913" +uuid = "354b36f9-a18e-4713-926e-db85100087ba" +version = "1.3.2" + [[deps.StructArrays]] deps = ["Adapt", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"] git-tree-sha1 = "521a0e828e98bb69042fec1809c1b5a680eb7389" @@ -2123,14 +2376,25 @@ git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70" uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4" version = "1.10.0" +[[deps.StructWalk]] +deps = ["ConstructionBase"] +git-tree-sha1 = "ef626534f40a9d99b3dafdbd54cfe411ad86e3b8" +uuid = "31cdf514-beb7-4750-89db-dda9d2eb8d3d" +version = "0.2.1" + [[deps.SuiteSparse]] deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" +[[deps.SuiteSparse_jll]] +deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] +uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" +version = "5.10.1+6" + [[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -version = "1.0.0" +version = "1.0.3" [[deps.TableTraits]] deps = ["IteratorInterfaceExtensions"] @@ -2147,7 +2411,7 @@ version = "1.10.1" [[deps.Tar]] deps = ["ArgTools", "SHA"] uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.1" +version = "1.10.0" [[deps.TensorCore]] deps = ["LinearAlgebra"] @@ -2159,11 +2423,17 @@ version = "0.1.1" deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +[[deps.TextEncodeBase]] +deps = ["FuncPipelines", "PartialFunctions", "PrimitiveOneHot", "StaticArrays", "StructWalk", "Unicode", "WordTokenizers"] +git-tree-sha1 = "1304ca2c65d9b28c1e2a78cdf5032348c0c405e5" +uuid = "f92c20c0-9f2a-4705-8116-881385faba05" +version = "0.6.0" + [[deps.ThreadingUtilities]] deps = ["ManualMemory"] -git-tree-sha1 = "c97f60dd4f2331e1a495527f80d242501d2f9865" +git-tree-sha1 = "eda08f7e9818eb53661b3deb74e3159460dfbc27" uuid = "8290d209-cae3-49c0-8002-c8c24d57dab5" -version = "0.5.1" +version = "0.5.2" [[deps.TiffImages]] deps = ["ColorTypes", "DataStructures", "DocStringExtensions", "FileIO", "FixedPointNumbers", "IndirectArrays", "Inflate", "Mmap", "OffsetArrays", "PkgVersion", "ProgressMeter", "UUIDs"] @@ -2179,21 +2449,41 @@ version = "0.3.1" [[deps.TimerOutputs]] deps = ["ExprTools", "Printf"] -git-tree-sha1 = "f2fd3f288dfc6f507b0c3a2eb3bac009251e548b" +git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7" uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" -version = "0.5.22" +version = "0.5.23" [[deps.TranscodingStreams]] deps = ["Random", "Test"] -git-tree-sha1 = "0b829474fed270a4b0ab07117dce9b9a2fa7581a" +git-tree-sha1 = "9a6ae7ed916312b41236fcef7e0af564ef934769" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.12" +version = "0.9.13" [[deps.Transducers]] deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"] -git-tree-sha1 = "c42fa452a60f022e9e087823b47e5a5f8adc53d5" +git-tree-sha1 = "a66fb81baec325cf6ccafa243af573b031e87b00" uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999" -version = "0.4.75" +version = "0.4.77" + + [deps.Transducers.extensions] + TransducersBlockArraysExt = "BlockArrays" + TransducersDataFramesExt = "DataFrames" + TransducersLazyArraysExt = "LazyArrays" + TransducersOnlineStatsBaseExt = "OnlineStatsBase" + TransducersReferenceablesExt = "Referenceables" + + [deps.Transducers.weakdeps] + BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02" + OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338" + Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e" + +[[deps.Transformers]] +deps = ["Base64", "BytePairEncoding", "CUDA", "ChainRulesCore", "DataDeps", "DataStructures", "Dates", "DelimitedFiles", "DoubleArrayTries", "Fetch", "FillArrays", "Flux", "FuncPipelines", "Functors", "HTTP", "HuggingFaceApi", "JSON3", "LightXML", "LinearAlgebra", "Mmap", "NNlib", "NNlibCUDA", "NeuralAttentionlib", "Pickle", "Pkg", "PrimitiveOneHot", "Random", "SHA", "Static", "Statistics", "StringViews", "StructWalk", "TextEncodeBase", "Unicode", "ValSplit", "WordTokenizers", "Zygote"] +git-tree-sha1 = "35b63543a154cea7e9068f45e67c5fdb7467f2ed" +uuid = "21ca0261-441d-5938-ace7-c90938fde4d4" +version = "0.2.6" [[deps.Tricks]] git-tree-sha1 = "aadb748be58b492045b4f56166b5188aa63ce549" @@ -2229,10 +2519,45 @@ uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1" version = "0.4.1" [[deps.UnicodePlots]] -deps = ["ColorSchemes", "ColorTypes", "Contour", "Crayons", "Dates", "LinearAlgebra", "MarchingCubes", "NaNMath", "Printf", "Requires", "SnoopPrecompile", "SparseArrays", "StaticArrays", "StatsBase"] -git-tree-sha1 = "2825e58f6ec3cab889dfa2c824f8d89b9f7ee731" +deps = ["ColorSchemes", "ColorTypes", "Contour", "Crayons", "Dates", "LinearAlgebra", "MarchingCubes", "NaNMath", "PrecompileTools", "Printf", "Requires", "SparseArrays", "StaticArrays", "StatsBase"] +git-tree-sha1 = "b96de03092fe4b18ac7e4786bee55578d4b75ae8" uuid = "b8865327-cd53-5732-bb35-84acbb429228" -version = "3.5.1" +version = "3.6.0" + + [deps.UnicodePlots.extensions] + FreeTypeExt = ["FileIO", "FreeType"] + ImageInTerminalExt = "ImageInTerminal" + IntervalSetsExt = "IntervalSets" + TermExt = "Term" + UnitfulExt = "Unitful" + + [deps.UnicodePlots.weakdeps] + FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" + FreeType = "b38be410-82b0-50bf-ab77-7b57e271db43" + ImageInTerminal = "d8c32880-2388-543b-8c61-d9f865259254" + IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953" + Term = "22787eb5-b846-44ae-b979-8e399b8463ab" + Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" + +[[deps.Unitful]] +deps = ["Dates", "LinearAlgebra", "Random"] +git-tree-sha1 = "c4d2a349259c8eba66a00a540d550f122a3ab228" +uuid = "1986cc42-f94f-5a68-af5c-568840ba703d" +version = "1.15.0" + + [deps.Unitful.extensions] + ConstructionBaseUnitfulExt = "ConstructionBase" + InverseFunctionsUnitfulExt = "InverseFunctions" + + [deps.Unitful.weakdeps] + ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + +[[deps.UnitfulLatexify]] +deps = ["LaTeXStrings", "Latexify", "Unitful"] +git-tree-sha1 = "e2d817cc500e960fdbafcf988ac8436ba3208bfd" +uuid = "45397f5d-5981-4c77-b2b3-fc36d6e9b728" +version = "1.6.3" [[deps.UnsafeAtomics]] git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278" @@ -2241,15 +2566,21 @@ version = "0.2.1" [[deps.UnsafeAtomicsLLVM]] deps = ["LLVM", "UnsafeAtomics"] -git-tree-sha1 = "ea37e6066bf194ab78f4e747f5245261f17a7175" +git-tree-sha1 = "323e3d0acf5e78a56dfae7bd8928c989b4f3083e" uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249" -version = "0.1.2" +version = "0.1.3" [[deps.Unzip]] git-tree-sha1 = "ca0969166a028236229f63514992fc073799bb78" uuid = "41fe7b60-77ed-43a1-b4f0-825fd5a5650d" version = "0.2.0" +[[deps.ValSplit]] +deps = ["ExprTools", "Tricks"] +git-tree-sha1 = "0d087f8ddc8eced370cc968eeb3b01db32cb2c01" +uuid = "0625e100-946b-11ec-09cd-6328dd093154" +version = "0.1.0" + [[deps.VectorizationBase]] deps = ["ArrayInterface", "CPUSummary", "HostCPUFeatures", "IfElse", "LayoutPointers", "Libdl", "LinearAlgebra", "SIMDTypes", "Static", "StaticArrayInterface"] git-tree-sha1 = "b182207d4af54ac64cbc71797765068fdeff475d" @@ -2280,6 +2611,12 @@ git-tree-sha1 = "de67fa59e33ad156a590055375a30b23c40299d3" uuid = "efce3f68-66dc-5838-9240-27a6d6f5f9b6" version = "0.5.5" +[[deps.WordTokenizers]] +deps = ["DataDeps", "HTML_Entities", "StrTables", "Unicode"] +git-tree-sha1 = "01dd4068c638da2431269f49a5964bf42ff6c9d2" +uuid = "796a5d58-b03d-544a-977e-18100b691f6e" +version = "0.5.6" + [[deps.WorkerUtilities]] git-tree-sha1 = "cd1659ba0d57b71a464a29e64dbc67cfe83d54e7" uuid = "76eceee3-57b5-4d4a-8e66-0e911cebbf60" @@ -2298,16 +2635,16 @@ uuid = "aed1982a-8fda-507f-9586-7b0439959a61" version = "1.1.34+0" [[deps.Xorg_libX11_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] -git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] +git-tree-sha1 = "afead5aba5aa507ad5a3bf01f58f82c8d1403495" uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc" -version = "1.6.9+4" +version = "1.8.6+0" [[deps.Xorg_libXau_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "6035850dcc70518ca32f012e46015b9beeda49d8" uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec" -version = "1.0.9+4" +version = "1.0.11+0" [[deps.Xorg_libXcursor_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"] @@ -2316,10 +2653,10 @@ uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724" version = "1.2.0+4" [[deps.Xorg_libXdmcp_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "34d526d318358a859d7de23da945578e8e8727b7" uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05" -version = "1.1.3+4" +version = "1.1.4+0" [[deps.Xorg_libXext_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] @@ -2358,22 +2695,22 @@ uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" version = "0.9.10+4" [[deps.Xorg_libpthread_stubs_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "8fdda4c692503d44d04a0603d9ac0982054635f9" uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74" -version = "0.1.0+3" +version = "0.1.1+0" [[deps.Xorg_libxcb_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] -git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6" +deps = ["Artifacts", "JLLWrappers", "Libdl", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] +git-tree-sha1 = "b4bfde5d5b652e22b9c790ad00af08b6d042b97d" uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b" -version = "1.13.0+3" +version = "1.15.0+0" [[deps.Xorg_libxkbfile_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] -git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libX11_jll"] +git-tree-sha1 = "730eeca102434283c50ccf7d1ecdadf521a765a4" uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a" -version = "1.1.0+4" +version = "1.1.2+0" [[deps.Xorg_xcb_util_image_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] @@ -2406,22 +2743,22 @@ uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361" version = "0.4.1+1" [[deps.Xorg_xkbcomp_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"] -git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libxkbfile_jll"] +git-tree-sha1 = "330f955bc41bb8f5270a369c473fc4a5a4e4d3cb" uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4" -version = "1.4.2+4" +version = "1.4.6+0" [[deps.Xorg_xkeyboard_config_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"] -git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_xkbcomp_jll"] +git-tree-sha1 = "691634e5453ad362044e2ad653e79f3ee3bb98c3" uuid = "33bec58e-1273-512f-9401-5d533626f822" -version = "2.27.0+4" +version = "2.39.0+0" [[deps.Xorg_xtrans_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "e92a1a012a10506618f10b7047e478403a046c77" uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10" -version = "1.4.0+3" +version = "1.5.0+0" [[deps.ZipFile]] deps = ["Libdl", "Printf", "Zlib_jll"] @@ -2432,7 +2769,7 @@ version = "0.10.1" [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.12+3" +version = "1.2.13+0" [[deps.Zstd_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -2441,10 +2778,20 @@ uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" version = "1.5.5+0" [[deps.Zygote]] -deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "Random", "Requires", "SnoopPrecompile", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"] -git-tree-sha1 = "987ae5554ca90e837594a0f30325eeb5e7303d1e" +deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"] +git-tree-sha1 = "5be3ddb88fc992a7d8ea96c3f10a49a7e98ebc7b" uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" -version = "0.6.60" +version = "0.6.62" + + [deps.Zygote.extensions] + ZygoteColorsExt = "Colors" + ZygoteDistancesExt = "Distances" + ZygoteTrackerExt = "Tracker" + + [deps.Zygote.weakdeps] + Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" + Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" [[deps.ZygoteRules]] deps = ["ChainRulesCore", "MacroTools"] @@ -2454,9 +2801,9 @@ version = "0.2.3" [[deps.cuDNN]] deps = ["CEnum", "CUDA", "CUDNN_jll"] -git-tree-sha1 = "3aa15aba7aad5be8b9b3c1b77a9b81e3e1357280" +git-tree-sha1 = "ee79f97d07bf875231559f9b3f2649f34fac140b" uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" -version = "1.0.2" +version = "1.1.0" [[deps.fzf_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -2477,9 +2824,9 @@ uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" version = "0.15.1+0" [[deps.libblastrampoline_jll]] -deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] +deps = ["Artifacts", "Libdl"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.1.1+0" +version = "5.8.0+0" [[deps.libfdk_aac_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] diff --git a/docs/Project.toml b/docs/Project.toml index 18b07f3..423a49a 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,4 +1,6 @@ [deps] +BetaML = "024491cd-cc6b-443e-8034-08ea7eb7db2b" +CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" CategoricalArrays = "324d7699-5711-5eae-9e2f-1d82baa6b597" ConformalPrediction = "98bfc277-1877-43dc-819b-a3e38c30242f" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" @@ -20,6 +22,7 @@ MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692" MLJModelInterface = "e80e1ace-859a-464e-9ed9-23947d8ae3ea" MLJMultivariateStatsInterface = "1b6a4a23-ba22-4f51-9698-8599985d3728" MLJNaiveBayesInterface = "33e4bacb-b9e2-458e-9a13-5d9a90b235fa" +Measures = "442fdcdd-2543-5da2-b0f3-8c86c306513e" NaiveBayes = "9bbee03b-0db5-5f46-924f-b5c9c21b8c60" NaturalSort = "c020b1a1-e9b0-503a-9c33-f039bfc54a85" NearestNeighborModels = "636a865e-7cf4-491e-846c-de09b730eb36" @@ -28,4 +31,7 @@ Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8" Polynomials = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" +Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +Transformers = "21ca0261-441d-5938-ace7-c90938fde4d4" +UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228" diff --git a/docs/setup_docs.jl b/docs/setup_docs.jl new file mode 100644 index 0000000..6a1563b --- /dev/null +++ b/docs/setup_docs.jl @@ -0,0 +1,32 @@ +setup_docs = quote + + # Environment: + using Pkg + Pkg.activate("docs") + + # Dependencies: + using ConformalPrediction + using CSV + using DataFrames + using Flux + using MLJBase + using MLJFlux + using Plots + using Plots.PlotMeasures + using Random + using Serialization + using StatsBase + using Transformers + using Transformers.TextEncoders + using Transformers.HuggingFace + + # Explicit imports: + import MLJModelInterface as MMI + import UnicodePlots + + # Setup: + theme(:wong) + Random.seed!(2023) + www_path = "$(pwd())/docs/src/www" + +end; diff --git a/docs/src/how_to_guides/llm.md b/docs/src/how_to_guides/llm.md new file mode 100644 index 0000000..efb6e06 --- /dev/null +++ b/docs/src/how_to_guides/llm.md @@ -0,0 +1,193 @@ +# How to Build a Conformal Chatbot + +``` @meta +CurrentModule = ConformalPrediction +``` + +Large Language Models are all the buzz right now. They are used for a variety of tasks, including text classification, question answering, and text generation. In this tutorial, we will show how to conformalize a transformer language model for text classification. We will use the [Banking77](https://arxiv.org/abs/2003.04807) dataset (Casanueva et al. 2020), which consists of 13,083 queries from 77 intents. On the model side, we will use the [DistilRoBERTa](https://huggingface.co/mrm8488/distilroberta-finetuned-banking77) model, which is a distilled version of [RoBERTa](https://arxiv.org/abs/1907.11692) (Liu et al. 2019) finetuned on the Banking77 dataset. + +## Data + +The data was downloaded from [HuggingFace](https://huggingface.co/datasets/PolyAI/banking77) πŸ€— (HF) and split into a proper training, calibration, and test set. All that’s left to do is to load the data and preprocess it. We add 1 to the labels to make them 1-indexed (sorry Pythonistas 😜) + +``` julia +# Get labels: +df_labels = CSV.read("dev/artifacts/data/banking77/labels.csv", DataFrame, drop=[1]) +labels = df_labels[:,1] + +# Get data: +df_train = CSV.read("dev/artifacts/data/banking77/train.csv", DataFrame, drop=[1]) +df_cal = CSV.read("dev/artifacts/data/banking77/calibration.csv", DataFrame, drop=[1]) +df_full_train = vcat(df_train, df_cal) +train_ratio = round(nrow(df_train)/nrow(df_full_train), digits=2) +df_test = CSV.read("dev/artifacts/data/banking77/test.csv", DataFrame, drop=[1]) + +# Preprocess data: +queries_train, y_train = collect(df_train.text), categorical(df_train.labels .+ 1) +queries_cal, y_cal = collect(df_cal.text), categorical(df_cal.labels .+ 1) +queries, y = collect(df_full_train.text), categorical(df_full_train.labels .+ 1) +queries_test, y_test = collect(df_test.text), categorical(df_test.labels .+ 1) +``` + +## HuggingFace Model + +The model can be loaded from HF straight into our running Julia session using the [`Transformers.jl`](https://github.com/chengchingwen/Transformers.jl/tree/master) package. Below we load the tokenizer `tkr` and the model `mod`. The tokenizer is used to convert the text into a sequence of integers, which is then fed into the model. The model outputs a hidden state, which is then fed into a classifier to get the logits for each class. Finally, the logits are then passed through a softmax function to get the corresponding predicted probabilities. Below we run a few queries through the model to see how it performs. + +``` julia +# Load model from HF πŸ€—: +tkr = hgf"mrm8488/distilroberta-finetuned-banking77:tokenizer" +mod = hgf"mrm8488/distilroberta-finetuned-banking77:ForSequenceClassification" + +# Test model: +query = [ + "What is the base of the exchange rates?", + "Why is my card not working?", + "My Apple Pay is not working, what should I do?", +] +a = encode(tkr, query) +b = mod.model(a) +c = mod.cls(b.hidden_state) +d = softmax(c.logit) +[labels[i] for i in Flux.onecold(d)] +``` + + 3-element Vector{String}: + "exchange_rate" + "card_not_working" + "apple_pay_or_google_pay" + +## `MLJ` Interface + +Since our package is interfaced to [`MLJ.jl`](https://alan-turing-institute.github.io/MLJ.jl/dev/), we need to define a wrapper model that conforms to the `MLJ` interface. In order to add the model for general use, we would probably go through [`MLJFlux.jl`](https://github.com/FluxML/MLJFlux.jl), but for this tutorial, we will make our life easy and simply overload the `MLJBase.fit` and `MLJBase.predict` methods. Since the model from HF is already pre-trained and we are not interested in further fine-tuning, we will simply return the model object in the `MLJBase.fit` method. The `MLJBase.predict` method will then take the model object and the query and return the predicted probabilities. We also need to define the `MLJBase.target_scitype` and `MLJBase.predict_mode` methods. The former tells `MLJ` what the output type of the model is, and the latter can be used to retrieve the label with the highest predicted probability. + +``` julia +struct IntentClassifier <: MLJBase.Probabilistic + tkr::TextEncoders.AbstractTransformerTextEncoder + mod::HuggingFace.HGFRobertaForSequenceClassification +end + +function IntentClassifier(; + tokenizer::TextEncoders.AbstractTransformerTextEncoder, + model::HuggingFace.HGFRobertaForSequenceClassification, +) + IntentClassifier(tkr, mod) +end + +function get_hidden_state(clf::IntentClassifier, query::Union{AbstractString, Vector{<:AbstractString}}) + token = encode(clf.tkr, query) + hidden_state = clf.mod.model(token).hidden_state + return hidden_state +end + +# This doesn't actually retrain the model, but it retrieves the classifier object +function MLJBase.fit(clf::IntentClassifier, verbosity, X, y) + cache=nothing + report=nothing + fitresult = (clf = clf.mod.cls, labels = levels(y)) + return fitresult, cache, report +end + +function MLJBase.predict(clf::IntentClassifier, fitresult, Xnew) + output = fitresult.clf(get_hidden_state(clf, Xnew)) + pΜ‚ = UnivariateFinite(fitresult.labels,softmax(output.logit)',pool=missing) + return pΜ‚ +end + +MLJBase.target_scitype(clf::IntentClassifier) = AbstractVector{<:Finite} + +MLJBase.predict_mode(clf::IntentClassifier, fitresult, Xnew) = mode.(MLJBase.predict(clf, fitresult, Xnew)) +``` + +To test that everything is working as expected, we fit the model and generated predictions for a subset of the test data: + +``` julia +clf = IntentClassifier(tkr, mod) +top_n = 10 +fitresult, _, _ = MLJBase.fit(clf, 1, nothing, y_test[1:top_n]) +@time yΜ‚ = MLJBase.predict(clf, fitresult, queries_test[1:top_n]); +``` + +## Conformal Chatbot + +To turn the wrapped, pre-trained model into a conformal intent classifier, we can now rely on standard API calls. We first wrap our atomic model where we also specify the desired coverage rate and method. Since even simple forward passes are computationally expensive for our (small) LLM, we rely on Simple Inductive Conformal Classification. + +``` julia +conf_model = conformal_model(clf; coverage=0.95, method=:simple_inductive, train_ratio=train_ratio) +mach = machine(conf_model, queries, y) +@time fit!(mach) +Serialization.serialize("dev/artifacts/models/banking77/simple_inductive.jls", mach) +``` + +Finally, we use our conformal LLM to build a simple and yet powerful chatbot that runs directly in the Julia REPL. Without dwelling on the details too much, the `conformal_chatbot` works as follows: + +1. Prompt user to explain their intent. +2. Feed user input through conformal LLM and present the output to the user. +3. If the conformal prediction sets includes more than one label, prompt the user to either refine their input or choose one of the options included in the set. + +``` julia +mach = Serialization.deserialize("dev/artifacts/models/banking77/simple_inductive.jls") + +function prediction_set(mach, query::String) + pΜ‚ = MLJBase.predict(mach, query)[1] + probs = pdf.(pΜ‚, collect(1:77)) + in_set = findall(probs .!= 0) + labels_in_set = labels[in_set] + probs_in_set = probs[in_set] + _order = sortperm(-probs_in_set) + plt = UnicodePlots.barplot(labels_in_set[_order], probs_in_set[_order], title="Possible Intents") + return labels_in_set, plt +end + +function conformal_chatbot() + println("πŸ‘‹ Hi, I'm a Julia, your conformal chatbot. I'm here to help you with your banking query. Ask me anything or type 'exit' to exit ...\n") + completed = false + queries = "" + while !completed + query = readline() + queries = queries * "," * query + labels, plt = prediction_set(mach, queries) + if length(labels) > 1 + println("πŸ€” Hmmm ... I can think of several options here. If any of these applies, simply type the corresponding number (e.g. '1' for the first option). Otherwise, can you refine your question, please?\n") + println(plt) + else + println("πŸ₯³ I think you mean $(labels[1]). Correct?") + end + + # Exit: + if query == "exit" + println("πŸ‘‹ Bye!") + break + end + if query ∈ string.(collect(1:77)) + println("πŸ‘ Great! You've chosen '$(labels[parse(Int64, query)])'. I'm glad I could help you. Have a nice day!") + completed = true + end + end +end +``` + +Below we show the output for two example queries. The first one is very ambiguous. As expected, the size of the prediction set is therefore large. + +``` julia +ambiguous_query = "transfer mondey?" +prediction_set(mach, ambiguous_query)[2] +``` + +The more refined version of the prompt yields a smaller prediction set: less ambiguous prompts result in lower predictive uncertainty. + +``` julia +refined_query = "I tried to transfer money to my friend, but it failed." +prediction_set(mach, refined_query)[2] +``` + +Below we include a short demo video that shows the REPL-based chatbot in action. + +![](../../../docs/src/www/demo_llm.gif) + +## Final Remarks + +This work was done in collaboration with colleagues at ING as part of the ING Analytics 2023 Experiment Week. Our team demonstrated that Conformal Prediction provides a powerful and principled alternative to top-*K* intent classification. We won the first prize by popular vote. + +Casanueva, IΓ±igo, Tadas Temčinas, Daniela Gerz, Matthew Henderson, and Ivan VuliΔ‡. 2020. β€œEfficient Intent Detection with Dual Sentence Encoders.” In *Proceedings of the 2nd Workshop on Natural Language Processing for Conversational AI*, 38–45. Online: Association for Computational Linguistics. . + +Liu, Yinhan, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. β€œRoBERTa: A Robustly Optimized BERT Pretraining Approach.” arXiv. . diff --git a/docs/src/how_to_guides/llm.qmd b/docs/src/how_to_guides/llm.qmd new file mode 100644 index 0000000..36857e0 --- /dev/null +++ b/docs/src/how_to_guides/llm.qmd @@ -0,0 +1,193 @@ + +```{julia} +#| echo: false +include("$(pwd())/docs/setup_docs.jl") +eval(setup_docs) +``` + +# How to Build a Conformal Chatbot + +Large Language Models are all the buzz right now. They are used for a variety of tasks, including text classification, question answering, and text generation. In this tutorial, we will show how to conformalize a transformer language model for text classification. We will use the [Banking77](https://arxiv.org/abs/2003.04807) dataset [@casanueva2020efficient], which consists of 13,083 queries from 77 intents. On the model side, we will use the [DistilRoBERTa](https://huggingface.co/mrm8488/distilroberta-finetuned-banking77) model, which is a distilled version of [RoBERTa](https://arxiv.org/abs/1907.11692) [@liu2019roberta] finetuned on the Banking77 dataset. + +## Data + +The data was downloaded from [HuggingFace](https://huggingface.co/datasets/PolyAI/banking77) πŸ€— (HF) and split into a proper training, calibration, and test set. All that's left to do is to load the data and preprocess it. We add 1 to the labels to make them 1-indexed (sorry Pythonistas 😜) + +```{julia} +# Get labels: +df_labels = CSV.read("dev/artifacts/data/banking77/labels.csv", DataFrame, drop=[1]) +labels = df_labels[:,1] + +# Get data: +df_train = CSV.read("dev/artifacts/data/banking77/train.csv", DataFrame, drop=[1]) +df_cal = CSV.read("dev/artifacts/data/banking77/calibration.csv", DataFrame, drop=[1]) +df_full_train = vcat(df_train, df_cal) +train_ratio = round(nrow(df_train)/nrow(df_full_train), digits=2) +df_test = CSV.read("dev/artifacts/data/banking77/test.csv", DataFrame, drop=[1]) + +# Preprocess data: +queries_train, y_train = collect(df_train.text), categorical(df_train.labels .+ 1) +queries_cal, y_cal = collect(df_cal.text), categorical(df_cal.labels .+ 1) +queries, y = collect(df_full_train.text), categorical(df_full_train.labels .+ 1) +queries_test, y_test = collect(df_test.text), categorical(df_test.labels .+ 1) +``` + +## HuggingFace Model + +The model can be loaded from HF straight into our running Julia session using the [`Transformers.jl`](https://github.com/chengchingwen/Transformers.jl/tree/master) package. Below we load the tokenizer `tkr` and the model `mod`. The tokenizer is used to convert the text into a sequence of integers, which is then fed into the model. The model outputs a hidden state, which is then fed into a classifier to get the logits for each class. Finally, the logits are then passed through a softmax function to get the corresponding predicted probabilities. Below we run a few queries through the model to see how it performs. + +```{julia} +#| output: true + +# Load model from HF πŸ€—: +tkr = hgf"mrm8488/distilroberta-finetuned-banking77:tokenizer" +mod = hgf"mrm8488/distilroberta-finetuned-banking77:ForSequenceClassification" + +# Test model: +query = [ + "What is the base of the exchange rates?", + "Why is my card not working?", + "My Apple Pay is not working, what should I do?", +] +a = encode(tkr, query) +b = mod.model(a) +c = mod.cls(b.hidden_state) +d = softmax(c.logit) +[labels[i] for i in Flux.onecold(d)] +``` + +## `MLJ` Interface + +Since our package is interfaced to [`MLJ.jl`](https://alan-turing-institute.github.io/MLJ.jl/dev/), we need to define a wrapper model that conforms to the `MLJ` interface. In order to add the model for general use, we would probably go through [`MLJFlux.jl`](https://github.com/FluxML/MLJFlux.jl), but for this tutorial, we will make our life easy and simply overload the `MLJBase.fit` and `MLJBase.predict` methods. Since the model from HF is already pre-trained and we are not interested in further fine-tuning, we will simply return the model object in the `MLJBase.fit` method. The `MLJBase.predict` method will then take the model object and the query and return the predicted probabilities. We also need to define the `MLJBase.target_scitype` and `MLJBase.predict_mode` methods. The former tells `MLJ` what the output type of the model is, and the latter can be used to retrieve the label with the highest predicted probability. + +```{julia} +struct IntentClassifier <: MLJBase.Probabilistic + tkr::TextEncoders.AbstractTransformerTextEncoder + mod::HuggingFace.HGFRobertaForSequenceClassification +end + +function IntentClassifier(; + tokenizer::TextEncoders.AbstractTransformerTextEncoder, + model::HuggingFace.HGFRobertaForSequenceClassification, +) + IntentClassifier(tkr, mod) +end + +function get_hidden_state(clf::IntentClassifier, query::Union{AbstractString, Vector{<:AbstractString}}) + token = encode(clf.tkr, query) + hidden_state = clf.mod.model(token).hidden_state + return hidden_state +end + +# This doesn't actually retrain the model, but it retrieves the classifier object +function MLJBase.fit(clf::IntentClassifier, verbosity, X, y) + cache=nothing + report=nothing + fitresult = (clf = clf.mod.cls, labels = levels(y)) + return fitresult, cache, report +end + +function MLJBase.predict(clf::IntentClassifier, fitresult, Xnew) + output = fitresult.clf(get_hidden_state(clf, Xnew)) + pΜ‚ = UnivariateFinite(fitresult.labels,softmax(output.logit)',pool=missing) + return pΜ‚ +end + +MLJBase.target_scitype(clf::IntentClassifier) = AbstractVector{<:Finite} + +MLJBase.predict_mode(clf::IntentClassifier, fitresult, Xnew) = mode.(MLJBase.predict(clf, fitresult, Xnew)) +``` + +To test that everything is working as expected, we fit the model and generated predictions for a subset of the test data: + +```{julia} +clf = IntentClassifier(tkr, mod) +top_n = 10 +fitresult, _, _ = MLJBase.fit(clf, 1, nothing, y_test[1:top_n]) +@time yΜ‚ = MLJBase.predict(clf, fitresult, queries_test[1:top_n]); +``` + +## Conformal Chatbot + +To turn the wrapped, pre-trained model into a conformal intent classifier, we can now rely on standard API calls. We first wrap our atomic model where we also specify the desired coverage rate and method. Since even simple forward passes are computationally expensive for our (small) LLM, we rely on Simple Inductive Conformal Classification. + +```{.julia} +#| eval: false + +conf_model = conformal_model(clf; coverage=0.95, method=:simple_inductive, train_ratio=train_ratio) +mach = machine(conf_model, queries, y) +@time fit!(mach) +Serialization.serialize("dev/artifacts/models/banking77/simple_inductive.jls", mach) +``` + +Finally, we use our conformal LLM to build a simple and yet powerful chatbot that runs directly in the Julia REPL. Without dwelling on the details too much, the `conformal_chatbot` works as follows: + +1. Prompt user to explain their intent. +2. Feed user input through conformal LLM and present the output to the user. +3. If the conformal prediction sets includes more than one label, prompt the user to either refine their input or choose one of the options included in the set. + +```{julia} +mach = Serialization.deserialize("dev/artifacts/models/banking77/simple_inductive.jls") + +function prediction_set(mach, query::String) + pΜ‚ = MLJBase.predict(mach, query)[1] + probs = pdf.(pΜ‚, collect(1:77)) + in_set = findall(probs .!= 0) + labels_in_set = labels[in_set] + probs_in_set = probs[in_set] + _order = sortperm(-probs_in_set) + plt = UnicodePlots.barplot(labels_in_set[_order], probs_in_set[_order], title="Possible Intents") + return labels_in_set, plt +end + +function conformal_chatbot() + println("πŸ‘‹ Hi, I'm a Julia, your conformal chatbot. I'm here to help you with your banking query. Ask me anything or type 'exit' to exit ...\n") + completed = false + queries = "" + while !completed + query = readline() + queries = queries * "," * query + labels, plt = prediction_set(mach, queries) + if length(labels) > 1 + println("πŸ€” Hmmm ... I can think of several options here. If any of these applies, simply type the corresponding number (e.g. '1' for the first option). Otherwise, can you refine your question, please?\n") + println(plt) + else + println("πŸ₯³ I think you mean $(labels[1]). Correct?") + end + + # Exit: + if query == "exit" + println("πŸ‘‹ Bye!") + break + end + if query ∈ string.(collect(1:77)) + println("πŸ‘ Great! You've chosen '$(labels[parse(Int64, query)])'. I'm glad I could help you. Have a nice day!") + completed = true + end + end +end +``` + +Below we show the output for two example queries. The first one is very ambiguous. As expected, the size of the prediction set is therefore large. + +```{julia} +ambiguous_query = "transfer mondey?" +prediction_set(mach, ambiguous_query)[2] +``` + +The more refined version of the prompt yields a smaller prediction set: less ambiguous prompts result in lower predictive uncertainty. + +```{julia} +refined_query = "I tried to transfer money to my friend, but it failed." +prediction_set(mach, refined_query)[2] +``` + +Below we include a short demo video that shows the REPL-based chatbot in action. + +![](/docs/src/www/demo_llm.gif) + +## Final Remarks + +This work was done in collaboration with colleagues at ING as part of the ING Analytics 2023 Experiment Week. Our team demonstrated that Conformal Prediction provides a powerful and principled alternative to top-*K* intent classification. We won the first prize by popular vote. + +## References \ No newline at end of file diff --git a/docs/src/how_to_guides/mnist.md b/docs/src/how_to_guides/mnist.md index a7d2b59..cc72727 100644 --- a/docs/src/how_to_guides/mnist.md +++ b/docs/src/how_to_guides/mnist.md @@ -1,8 +1,3 @@ - -``` @meta -CurrentModule = ConformalPrediction -``` - # How to Conformalize a Deep Image Classifier Deep Learning is popular and β€” for some tasks like image classification β€” remarkably powerful. But it is also well-known that Deep Neural Networks (DNN) can be unstable (Goodfellow, Shlens, and Szegedy 2014) and poorly calibrated. Conformal Prediction can be used to mitigate these pitfalls. This how-to guide demonstrates how you can build an image classifier in `Flux.jl` and conformalize its predictions. For a formal treatment see A. Angelopoulos et al. (2022). @@ -135,16 +130,16 @@ println("SSC: $(round(_eval.measurement[2], digits=3))") per_observation, fitted_params_per_fold, report_per_fold, train_test_rows Extract: - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€ - β”‚ measure β”‚ operation β”‚ meas β‹― - β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€ - β”‚ emp_coverage (generic function with 1 method) β”‚ predict β”‚ 0.95 β‹― - β”‚ size_stratified_coverage (generic function with 1 method) β”‚ predict β”‚ 0.86 β‹― - └───────────────────────────────────────────────────────────┴───────────┴─────── - 2 columns omitted + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€ + β”‚ measure β”‚ operation β”‚ measurement β”‚ per β‹― + β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€ + β”‚ ConformalPrediction.emp_coverage β”‚ predict β”‚ 0.96 β”‚ [0. β‹― + β”‚ ConformalPrediction.size_stratified_coverage β”‚ predict β”‚ 0.885 β”‚ [0. β‹― + └──────────────────────────────────────────────┴───────────┴─────────────┴────── + 1 column omitted - Empirical coverage: 0.955 - SSC: 0.867 + Empirical coverage: 0.96 + SSC: 0.885 Unsurprisingly, we can attain higher adaptivity (SSC) when using adaptive prediction sets: @@ -169,16 +164,16 @@ println("SSC: $(round(_eval.measurement[2], digits=3))") per_observation, fitted_params_per_fold, report_per_fold, train_test_rows Extract: - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€ - β”‚ measure β”‚ operation β”‚ meas β‹― - β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€ - β”‚ emp_coverage (generic function with 1 method) β”‚ predict β”‚ 0.99 β‹― - β”‚ size_stratified_coverage (generic function with 1 method) β”‚ predict β”‚ 0.96 β‹― - └───────────────────────────────────────────────────────────┴───────────┴─────── - 2 columns omitted + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€ + β”‚ measure β”‚ operation β”‚ measurement β”‚ per β‹― + β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€ + β”‚ ConformalPrediction.emp_coverage β”‚ predict β”‚ 1.0 β”‚ [1. β‹― + β”‚ ConformalPrediction.size_stratified_coverage β”‚ predict β”‚ 1.0 β”‚ [1. β‹― + └──────────────────────────────────────────────┴───────────┴─────────────┴────── + 1 column omitted - Empirical coverage: 0.995 - SSC: 0.967 + Empirical coverage: 1.0 + SSC: 1.0 We can also have a look at the resulting set size for both approaches: @@ -196,7 +191,7 @@ plot(plt_list..., size=(800,300)) Angelopoulos, Anastasios N., and Stephen Bates. 2021. β€œA Gentle Introduction to Conformal Prediction and Distribution-Free Uncertainty Quantification.” . -Angelopoulos, Anastasios, Stephen Bates, Jitendra Malik, and Michael I. Jordan. 2022. β€œUncertainty Sets for Image Classifiers Using Conformal Prediction.” arXiv. . +Angelopoulos, Anastasios, Stephen Bates, Jitendra Malik, and Michael I. Jordan. 2022. β€œUncertainty Sets for Image Classifiers Using Conformal Prediction.” arXiv. . Goodfellow, Ian J, Jonathon Shlens, and Christian Szegedy. 2014. β€œExplaining and Harnessing Adversarial Examples.” . diff --git a/docs/src/how_to_guides/mnist.qmd b/docs/src/how_to_guides/mnist.qmd index 29319bd..f065578 100644 --- a/docs/src/how_to_guides/mnist.qmd +++ b/docs/src/how_to_guides/mnist.qmd @@ -1,7 +1,3 @@ -```@meta -CurrentModule = ConformalPrediction -``` - # How to Conformalize a Deep Image Classifier ```{julia} diff --git a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-1.svg b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-1.svg index 69e172c..3dd0a06 100644 --- a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-1.svg +++ b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-1.svg @@ -1,349 +1,283 @@ - + - + - + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + diff --git a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-2.svg b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-2.svg index 07ddb40..8fe2a2b 100644 --- a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-2.svg +++ b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-2.svg @@ -1,356 +1,270 @@ - + - + - + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + diff --git a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-3.svg b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-3.svg index 4231d68..799425f 100644 --- a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-3.svg +++ b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-plots-output-3.svg @@ -1,356 +1,285 @@ - + - + - + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - - - - - - - - - - - + + + + + + + + + + + + - + diff --git a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-samples-output-1.png b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-samples-output-1.png index e4de4a0..9b11cbd 100644 Binary files a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-samples-output-1.png and b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-samples-output-1.png differ diff --git a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-setsize-output-1.svg b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-setsize-output-1.svg index c217aad..9b77ec7 100644 --- a/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-setsize-output-1.svg +++ b/docs/src/how_to_guides/mnist_files/figure-commonmark/fig-setsize-output-1.svg @@ -1,234 +1,97 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/src/tutorials/classification.qmd b/docs/src/tutorials/classification.qmd index b0180bd..8471574 100644 --- a/docs/src/tutorials/classification.qmd +++ b/docs/src/tutorials/classification.qmd @@ -1,9 +1,5 @@ # Classification -```@meta -CurrentModule = ConformalPrediction -``` - ```{julia} #| echo: false using Pkg; Pkg.activate("docs") @@ -28,6 +24,7 @@ Random.seed!(123) # Data: X, y = make_moons(500; noise=0.15) +X = MLJ.table(convert.(Float32, MLJ.matrix(X))) train, test = partition(eachindex(y), 0.8, shuffle=true) ``` @@ -45,7 +42,7 @@ $$ {#eq-set} This is the default procedure used for classification and regression in [`ConformalPrediction.jl`](https://github.com/juliatrustworthyai/ConformalPrediction.jl). -Now let's take this to our πŸŒ™ data. To illustrate the package functionality we will demonstrate the envisioned workflow. We first define our atomic machine learning model following standard [`MLJ.jl`](https://alan-turing-institute.github.io/MLJ.jl/v0.18/) conventions. Using [`ConformalPrediction.jl`](https://github.com/juliatrustworthyai/ConformalPrediction.jl) we then wrap our atomic model in a conformal model using the standard API call `conformal_model(model::Supervised; kwargs...)`. To train and predict from our conformal model we can then rely on the conventional [`MLJ.jl`](https://alan-turing-institute.github.io/MLJ.jl/v0.18/) procedure again. In particular, we wrap our conformal model in data (turning it into a machine) and then fit it on the training set. Finally, we use our machine to predict the label for a new test sample `Xtest`: +Now let's take this to our πŸŒ™ data. To illustrate the package functionality we will demonstrate the envisioned workflow. We first define our atomic machine learning model following standard [`MLJ.jl`](https://alan-turing-institute.github.io/MLJ.jl/v0.18/) conventions. Using [`ConformalPrediction.jl`](https://github.com/juliatrustworthyai/ConformalPrediction.jl) we then wrap our atomic model in a conformal model using the standard API call `conformal_model(model::Supervised; kwargs...)`. To train and predict from our conformal model we can then rely on the conventional [`MLJ.jl`](https://alan-turing-institute.github.io/MLJ.jl/v0.18/) procedure again. In particular, we wrap our conformal model in data (turning it into a machine) and then fit it to the training data. Finally, we use our machine to predict the label for a new test sample `Xtest`: ```{julia} #| output: true @@ -83,7 +80,7 @@ When specifying a coverage rate very close to one, the prediction set will typic ```{julia} #| output: true -conf_model = conformal_model(model; coverage=coverage, method=:adaptive_inductive) +conf_model = conformal_model(model; coverage=coverage, method=:simple_inductive) mach = machine(conf_model, X, y) fit!(mach, rows=train) @@ -132,7 +129,7 @@ The following chart shows the resulting predicted probabilities for ``y=1`` (lef using Plots p_proba = contourf(mach.model, mach.fitresult, X, y) p_set_size = contourf(mach.model, mach.fitresult, X, y; plot_set_size=true) -contourf(p_proba, p_set_size, size=(800,250)) +plot(p_proba, p_set_size, size=(800,250)) ``` ```{julia} @@ -207,6 +204,13 @@ fit!(mach, rows=train) results[:adaptive_inductive] = mach ``` +```{julia} +using Plots +p_proba = contourf(mach.model, mach.fitresult, X, y) +p_set_size = contourf(mach.model, mach.fitresult, X, y; plot_set_size=true) +plot(p_proba, p_set_size, size=(800,250)) +``` + ## Evaluation For evaluation of conformal predictors we follow @angelopoulos2021gentle (Section 3). As a first step towards adaptiveness (adaptivity), the authors recommend to inspect the set size of conformal predictions. The chart below shows the interval width for the different methods along with the ground truth interval width: diff --git a/docs/src/tutorials/regression.qmd b/docs/src/tutorials/regression.qmd index cbdab37..68d45a0 100644 --- a/docs/src/tutorials/regression.qmd +++ b/docs/src/tutorials/regression.qmd @@ -1,9 +1,5 @@ # Regression -```@meta -CurrentModule = ConformalPrediction -``` - ```{julia} #| echo: false using Pkg; Pkg.activate("docs") @@ -72,7 +68,7 @@ results = Dict() for _mod in keys(conformal_models) conf_model = conformal_model(pipe; method=_mod, coverage=0.95) global mach = machine(conf_model, X, y) - fit!(mach, rows=train) + MLJ.fit!(mach, rows=train) results[_mod] = mach end ``` diff --git a/docs/src/www/classification.gif b/docs/src/www/classification.gif index 70f6164..f2ac25a 100644 Binary files a/docs/src/www/classification.gif and b/docs/src/www/classification.gif differ diff --git a/docs/src/www/demo_llm.gif b/docs/src/www/demo_llm.gif new file mode 100644 index 0000000..26c5d29 Binary files /dev/null and b/docs/src/www/demo_llm.gif differ diff --git a/src/conformal_models/conformal_models.jl b/src/conformal_models/conformal_models.jl index 7ff73d0..4289769 100644 --- a/src/conformal_models/conformal_models.jl +++ b/src/conformal_models/conformal_models.jl @@ -110,16 +110,18 @@ const available_models = Dict( const tested_atomic_models = Dict( :regression => Dict( :linear => :(@load LinearRegressor pkg = MLJLinearModels), + :ridge => :(@load RidgeRegressor pkg = MLJLinearModels), + :lasso => :(@load LassoRegressor pkg = MLJLinearModels), :evo_tree => :(@load EvoTreeRegressor pkg = EvoTrees), :nearest_neighbor => :(@load KNNRegressor pkg = NearestNeighborModels), - :light_gbm => :(@load LGBMRegressor pkg = LightGBM), + # :light_gbm => :(@load LGBMRegressor pkg = LightGBM), # :neural_network => :(@load NeuralNetworkRegressor pkg = MLJFlux), ), :classification => Dict( :logistic => :(@load LogisticClassifier pkg = MLJLinearModels), :evo_tree => :(@load EvoTreeClassifier pkg = EvoTrees), :nearest_neighbor => :(@load KNNClassifier pkg = NearestNeighborModels), - :light_gbm => :(@load LGBMClassifier pkg = LightGBM), + # :light_gbm => :(@load LGBMClassifier pkg = LightGBM), # :neural_network => :(@load NeuralNetworkClassifier pkg = MLJFlux), ), ) diff --git a/src/conformal_models/inductive_classification.jl b/src/conformal_models/inductive_classification.jl index 1b637f1..9334519 100644 --- a/src/conformal_models/inductive_classification.jl +++ b/src/conformal_models/inductive_classification.jl @@ -7,6 +7,22 @@ function score(conf_model::ConformalProbabilisticSet, fitresult, X, y::Union{Not score(conf_model, typeof(conf_model.model), fitresult, X, y) end +""" + split_data(conf_model::ConformalProbabilisticSet, indices::Base.OneTo{Int}) + +Splits the data into a proper training and calibration set. +""" +function split_data(conf_model::ConformalProbabilisticSet, X, y) + train, calibration = partition(eachindex(y), conf_model.train_ratio) + Xtrain = selectrows(X, train) + ytrain = y[train] + Xtrain, ytrain = MMI.reformat(conf_model.model, Xtrain, ytrain) + Xcal = selectrows(X, calibration) + ycal = y[calibration] + Xcal, ycal = MMI.reformat(conf_model.model, Xcal, ycal) + return Xtrain, ytrain, Xcal, ycal +end + # Simple "The `SimpleInductiveClassifier` is the simplest approach to Inductive Conformal Classification. Contrary to the [`NaiveClassifier`](@ref) it computes nonconformity scores using a designated calibration dataset." mutable struct SimpleInductiveClassifier{Model<:Supervised} <: ConformalProbabilisticSet @@ -21,7 +37,7 @@ function SimpleInductiveClassifier( model::Supervised; coverage::AbstractFloat=0.95, heuristic::Function=f(pΜ‚) = 1.0 - pΜ‚, - train_ratio::AbstractFloat=0.5 + train_ratio::AbstractFloat=0.5, ) return SimpleInductiveClassifier(model, coverage, nothing, heuristic, train_ratio) end @@ -58,13 +74,7 @@ A typical choice for the heuristic function is ``h(\hat\mu(X_i), Y_i)=1-\hat\mu( function MMI.fit(conf_model::SimpleInductiveClassifier, verbosity, X, y) # Data Splitting: - train, calibration = partition(eachindex(y), conf_model.train_ratio) - Xtrain = selectrows(X, train) - ytrain = y[train] - Xtrain, ytrain = MMI.reformat(conf_model.model, Xtrain, ytrain) - Xcal = selectrows(X, calibration) - ycal = y[calibration] - Xcal, ycal = MMI.reformat(conf_model.model, Xcal, ycal) + Xtrain, ytrain, Xcal, ycal = split_data(conf_model, X, y) # Training: fitresult, cache, report = MMI.fit(conf_model.model, verbosity, Xtrain, ytrain) @@ -124,7 +134,7 @@ function AdaptiveInductiveClassifier( model::Supervised; coverage::AbstractFloat=0.95, heuristic::Function=f(y, yΜ‚) = 1.0 - yΜ‚, - train_ratio::AbstractFloat=0.5 + train_ratio::AbstractFloat=0.5, ) return AdaptiveInductiveClassifier(model, coverage, nothing, heuristic, train_ratio) end @@ -141,13 +151,7 @@ S_i^{\text{CAL}} = s(X_i,Y_i) = \sum_{j=1}^k \hat\mu(X_i)_{\pi_j} \ \text{where function MMI.fit(conf_model::AdaptiveInductiveClassifier, verbosity, X, y) # Data Splitting: - train, calibration = partition(eachindex(y), conf_model.train_ratio) - Xtrain = selectrows(X, train) - ytrain = y[train] - Xtrain, ytrain = MMI.reformat(conf_model.model, Xtrain, ytrain) - Xcal = selectrows(X, calibration) - ycal = y[calibration] - Xcal, ycal = MMI.reformat(conf_model.model, Xcal, ycal) + Xtrain, ytrain, Xcal, ycal = split_data(conf_model, X, y) # Training: fitresult, cache, report = MMI.fit(conf_model.model, verbosity, Xtrain, ytrain) @@ -173,9 +177,9 @@ function score(conf_model::AdaptiveInductiveClassifier, ::Type{<:Supervised}, fi probas = pdf(pΜ‚, L) # compute probabilities for all classes scores = map(Base.Iterators.product(eachrow(probas), L)) do Z probasα΅’, yβ‚– = Z - ranks = sortperm(.-probasα΅’) # rank in descending order - index_y = findall(L[ranks] .== yβ‚–)[1] # index of true y in sorted array - scoresα΅’ = last(cumsum(probasα΅’[ranks][1:index_y])) # sum up until true y is reached + Ξ  = sortperm(.-probasα΅’) # rank in descending order + Ο€β‚– = findall(L[Ξ ] .== yβ‚–)[1] # index of true y in sorted array + scoresα΅’ = last(cumsum(probasα΅’[Ξ ][1:Ο€β‚–])) # sum up until true y is reached return scoresα΅’ end if isnothing(y) @@ -206,12 +210,16 @@ function MMI.predict(conf_model::AdaptiveInductiveClassifier, fitresult, Xnew) pΜ‚ = map(pΜ‚) do pp L = pΜ‚.decoder.classes probas = pdf.(pp, L) - is_in_set = 1.0 .- probas .<= qΜ‚ - if !all(is_in_set .== false) - pp = UnivariateFinite(L[is_in_set], probas[is_in_set]) + Ξ  = sortperm(.-probas) # rank in descending order + in_set = findall(cumsum(probas[Ξ ]) .> qΜ‚) + if length(in_set) > 0 + k = in_set[1] # index of first class with probability > qΜ‚ (supremum) else - pp = missing + k = 0 end + k += 1 + final_idx = minimum([k, length(Ξ )]) + pp = UnivariateFinite(L[Ξ ][1:final_idx], probas[Ξ ][1:final_idx]) return pp end return pΜ‚ diff --git a/src/conformal_models/training/inductive_classification.jl b/src/conformal_models/training/inductive_classification.jl index 03a927d..c25101c 100644 --- a/src/conformal_models/training/inductive_classification.jl +++ b/src/conformal_models/training/inductive_classification.jl @@ -1,5 +1,5 @@ using MLJEnsembles: EitherEnsembleModel -using MLJFlux: MLJFluxModel +using MLJFlux: MLJFluxModel, reformat using MLUtils """ @@ -8,7 +8,8 @@ using MLUtils Overloads the `score` function for the `MLJFluxModel` type. """ function score(conf_model::SimpleInductiveClassifier, ::Type{<:MLJFluxModel}, fitresult, X, y::Union{Nothing,AbstractArray}=nothing) - X = permutedims(matrix(X)) + X = reformat(X) + X = typeof(X) <: AbstractArray ? X : permutedims(matrix(X)) probas = permutedims(fitresult[1](X)) scores = @.(conf_model.heuristic(probas)) if isnothing(y) @@ -25,7 +26,8 @@ end Overloads the `score` function for ensembles of `MLJFluxModel` types. """ function score(conf_model::SimpleInductiveClassifier, ::Type{<:EitherEnsembleModel{<:MLJFluxModel}}, fitresult, X, y::Union{Nothing,AbstractArray}=nothing) - X = permutedims(matrix(X)) + X = reformat(X) + X = typeof(X) <: AbstractArray ? X : permutedims(matrix(X)) _chains = map(res -> res[1], fitresult.ensemble) probas = MLUtils.stack(map(chain -> chain(X), _chains)) |> p -> mean(p, dims=ndims(p)) |> @@ -47,7 +49,8 @@ Overloads the `score` function for the `MLJFluxModel` type. """ function score(conf_model::AdaptiveInductiveClassifier, ::Type{<:MLJFluxModel}, fitresult, X, y::Union{Nothing,AbstractArray}=nothing) L = levels(fitresult[2]) - X = permutedims(matrix(X)) + X = reformat(X) + X = typeof(X) <: AbstractArray ? X : permutedims(matrix(X)) probas = permutedims(fitresult[1](X)) # compute probabilities for all classes scores = map(Base.Iterators.product(eachrow(probas), L)) do Z probasα΅’, yβ‚– = Z @@ -71,7 +74,8 @@ Overloads the `score` function for ensembles of `MLJFluxModel` types. """ function score(conf_model::AdaptiveInductiveClassifier, ::Type{<:EitherEnsembleModel{<:MLJFluxModel}}, fitresult, X, y::Union{Nothing,AbstractArray}=nothing) L = levels(fitresult.ensemble[1][2]) - X = permutedims(matrix(X)) + X = reformat(X) + X = typeof(X) <: AbstractArray ? X : permutedims(matrix(X)) _chains = map(res -> res[1], fitresult.ensemble) probas = MLUtils.stack(map(chain -> chain(X), _chains)) |> p -> mean(p, dims=ndims(p)) |> diff --git a/src/conformal_models/transductive_classification.jl b/src/conformal_models/transductive_classification.jl index 9227dd5..ea58387 100644 --- a/src/conformal_models/transductive_classification.jl +++ b/src/conformal_models/transductive_classification.jl @@ -77,4 +77,4 @@ function MMI.predict(conf_model::NaiveClassifier, fitresult, Xnew) return pp end return pΜ‚ -end +end \ No newline at end of file diff --git a/test/Manifest.toml b/test/Manifest.toml index c8b6fce..54a7cd2 100644 --- a/test/Manifest.toml +++ b/test/Manifest.toml @@ -1,6 +1,6 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.8.5" +julia_version = "1.9.0" manifest_format = "2.0" project_hash = "35130c42d0ed70ece3ae50bdfeedecf590b3fb1d" @@ -16,27 +16,29 @@ uuid = "da404889-ca92-49ff-9e8b-0aa6b4d38dc8" version = "1.4.1" [[deps.AbstractFFTs]] -deps = ["ChainRulesCore", "LinearAlgebra"] +deps = ["LinearAlgebra"] git-tree-sha1 = "16b6dbc4cf7caee4e1e75c49485ec67b667098a0" uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" version = "1.3.1" +weakdeps = ["ChainRulesCore"] + + [deps.AbstractFFTs.extensions] + AbstractFFTsChainRulesCoreExt = "ChainRulesCore" [[deps.AbstractTrees]] git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" version = "0.4.4" -[[deps.Accessors]] -deps = ["Compat", "CompositionsBase", "ConstructionBase", "Dates", "InverseFunctions", "LinearAlgebra", "MacroTools", "Requires", "Test"] -git-tree-sha1 = "2b301c2388067d655fe5e4ca6d4aa53b61f895b4" -uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" -version = "0.1.31" - [[deps.Adapt]] deps = ["LinearAlgebra", "Requires"] git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" version = "3.6.2" +weakdeps = ["StaticArrays"] + + [deps.Adapt.extensions] + AdaptStaticArraysExt = "StaticArrays" [[deps.ArgCheck]] git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" @@ -49,15 +51,25 @@ version = "1.1.1" [[deps.ArrayInterface]] deps = ["Adapt", "LinearAlgebra", "Requires", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "917286faa2abb288796e75b88ca67edc016f3219" +git-tree-sha1 = "f83ec24f76d4c8f525099b2ac475fc098138ec31" uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "7.4.5" - -[[deps.ArrayInterfaceCore]] -deps = ["LinearAlgebra", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "e5f08b5689b1aad068e01751889f2f615c7db36d" -uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.29" +version = "7.4.11" + + [deps.ArrayInterface.extensions] + ArrayInterfaceBandedMatricesExt = "BandedMatrices" + ArrayInterfaceBlockBandedMatricesExt = "BlockBandedMatrices" + ArrayInterfaceCUDAExt = "CUDA" + ArrayInterfaceGPUArraysCoreExt = "GPUArraysCore" + ArrayInterfaceStaticArraysCoreExt = "StaticArraysCore" + ArrayInterfaceTrackerExt = "Tracker" + + [deps.ArrayInterface.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" + BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0" + CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" + GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" + StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" [[deps.Artifacts]] uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" @@ -81,9 +93,23 @@ version = "0.3.7" [[deps.BangBang]] deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"] -git-tree-sha1 = "54b00d1b93791f8e19e31584bd30f2cb6004614b" +git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed" uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66" -version = "0.3.38" +version = "0.3.39" + + [deps.BangBang.extensions] + BangBangChainRulesCoreExt = "ChainRulesCore" + BangBangDataFramesExt = "DataFrames" + BangBangStaticArraysExt = "StaticArrays" + BangBangStructArraysExt = "StructArrays" + BangBangTypedTablesExt = "TypedTables" + + [deps.BangBang.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" + TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9" [[deps.Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" @@ -98,12 +124,6 @@ git-tree-sha1 = "43b1a4a8f797c1cddadf60499a8a077d4af2cd2d" uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35" version = "0.1.7" -[[deps.BitTwiddlingConvenienceFunctions]] -deps = ["Static"] -git-tree-sha1 = "0c5f81f47bbbcf4aea7b2959135713459170798b" -uuid = "62783981-4cbd-42fc-bca8-16325de8dc4b" -version = "0.1.5" - [[deps.Bzip2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2" @@ -115,17 +135,11 @@ git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" version = "0.4.2" -[[deps.CPUSummary]] -deps = ["CpuId", "IfElse", "Static"] -git-tree-sha1 = "2c144ddb46b552f72d7eafe7cc2f50746e41ea21" -uuid = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9" -version = "0.2.2" - [[deps.CUDA]] deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"] -git-tree-sha1 = "280893f920654ebfaaaa1999fbd975689051f890" +git-tree-sha1 = "442d989978ed3ff4e174c928ee879dc09d1ef693" uuid = "052768ef-5323-5732-b1bb-66c8b64840ba" -version = "4.2.0" +version = "4.3.2" [[deps.CUDA_Driver_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] @@ -169,17 +183,35 @@ git-tree-sha1 = "1568b28f91293458345dabba6a5ea3f183250a61" uuid = "324d7699-5711-5eae-9e2f-1d82baa6b597" version = "0.10.8" + [deps.CategoricalArrays.extensions] + CategoricalArraysJSONExt = "JSON" + CategoricalArraysRecipesBaseExt = "RecipesBase" + CategoricalArraysSentinelArraysExt = "SentinelArrays" + CategoricalArraysStructTypesExt = "StructTypes" + + [deps.CategoricalArrays.weakdeps] + JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" + RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" + SentinelArrays = "91c51154-3ec4-41a3-a24f-3f23e20d615c" + StructTypes = "856f2bd8-1eba-4b0a-8007-ebc267875bd4" + [[deps.CategoricalDistributions]] -deps = ["CategoricalArrays", "Distributions", "Missings", "OrderedCollections", "Random", "ScientificTypes", "UnicodePlots"] +deps = ["CategoricalArrays", "Distributions", "Missings", "OrderedCollections", "Random", "ScientificTypes"] git-tree-sha1 = "da68989f027dcefa74d44a452c9e36af9730a70d" uuid = "af321ab8-2d2e-40a6-b165-3d674595d28e" version = "0.1.10" + [deps.CategoricalDistributions.extensions] + UnivariateFiniteDisplayExt = "UnicodePlots" + + [deps.CategoricalDistributions.weakdeps] + UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228" + [[deps.ChainRules]] deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"] -git-tree-sha1 = "8bae903893aeeb429cf732cf1888490b93ecf265" +git-tree-sha1 = "61549d9b52c88df34d21bd306dba1d43bb039c87" uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2" -version = "1.49.0" +version = "1.51.0" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra", "SparseArrays"] @@ -187,18 +219,6 @@ git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" version = "1.16.0" -[[deps.ChangesOfVariables]] -deps = ["LinearAlgebra", "Test"] -git-tree-sha1 = "f84967c4497e0e1955f9a582c232b02847c5f589" -uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" -version = "0.1.7" - -[[deps.CloseOpenIntervals]] -deps = ["Static", "StaticArrayInterface"] -git-tree-sha1 = "70232f82ffaab9dc52585e0dd043b5e0c6b714f1" -uuid = "fb6a15b2-703c-40df-9091-08a04967cfa9" -version = "0.1.12" - [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] git-tree-sha1 = "9c209fb7536406834aa938fb149964b985de6c83" @@ -241,10 +261,14 @@ uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" version = "0.3.0" [[deps.Compat]] -deps = ["Dates", "LinearAlgebra", "UUIDs"] -git-tree-sha1 = "7a60c856b9fa189eb34f5f8a6f6b5529b7942957" +deps = ["UUIDs"] +git-tree-sha1 = "4e88377ae7ebeaf29a047aa1ee40826e0b708a5d" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.6.1" +version = "4.7.0" +weakdeps = ["Dates", "LinearAlgebra"] + + [deps.Compat.extensions] + CompatLinearAlgebraExt = "LinearAlgebra" [[deps.CompatHelperLocal]] deps = ["DocStringExtensions", "Pkg", "UUIDs"] @@ -255,13 +279,19 @@ version = "0.1.25" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.0.1+0" +version = "1.0.2+0" [[deps.CompositionsBase]] git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad" uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b" version = "0.1.2" + [deps.CompositionsBase.extensions] + CompositionsBaseInverseFunctionsExt = "InverseFunctions" + + [deps.CompositionsBase.weakdeps] + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + [[deps.ComputationalResources]] git-tree-sha1 = "52cb3ec90e8a8bea0e62e275ba577ad0f74821f7" uuid = "ed09eef8-17a6-5b46-8889-db040fac31e3" @@ -279,6 +309,14 @@ git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c" uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" version = "1.5.2" + [deps.ConstructionBase.extensions] + ConstructionBaseIntervalSetsExt = "IntervalSets" + ConstructionBaseStaticArraysExt = "StaticArrays" + + [deps.ConstructionBase.weakdeps] + IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + [[deps.ContextVariablesX]] deps = ["Compat", "Logging", "UUIDs"] git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc" @@ -290,12 +328,6 @@ git-tree-sha1 = "d05d9e7b7aedff4e5b51a029dced05cfb6125781" uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" version = "0.6.2" -[[deps.CpuId]] -deps = ["Markdown"] -git-tree-sha1 = "fcbb72b032692610bfbdb15018ac16a36cf2e406" -uuid = "adafc99b-e345-5852-983c-f28acb93d879" -version = "0.3.1" - [[deps.Crayons]] git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15" uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" @@ -334,13 +366,9 @@ version = "0.1.2" [[deps.DelimitedFiles]] deps = ["Mmap"] +git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae" uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" - -[[deps.DensityInterface]] -deps = ["InverseFunctions", "Test"] -git-tree-sha1 = "80c3e8639e3353e5d2912fb3a1916b8455e2494b" -uuid = "b429d917-457f-4dbc-8f4c-0cc954292b1d" -version = "0.4.0" +version = "1.9.1" [[deps.DiffResults]] deps = ["StaticArraysCore"] @@ -350,9 +378,9 @@ version = "1.1.0" [[deps.DiffRules]] deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "a4ad7ef19d2cdc2eff57abbbe68032b1cd0bd8f8" +git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.13.0" +version = "1.15.1" [[deps.Distances]] deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] @@ -365,10 +393,18 @@ deps = ["Random", "Serialization", "Sockets"] uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" [[deps.Distributions]] -deps = ["ChainRulesCore", "DensityInterface", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"] -git-tree-sha1 = "5eeb2bd01e5065090ad591a205d8cad432ae6cb6" +deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"] +git-tree-sha1 = "db40d3aff76ea6a3619fdd15a8c78299221a2394" uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" -version = "0.25.93" +version = "0.25.97" + + [deps.Distributions.extensions] + DistributionsChainRulesCoreExt = "ChainRulesCore" + DistributionsDensityInterfaceExt = "DensityInterface" + + [deps.Distributions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d" [[deps.DocStringExtensions]] deps = ["LibGit2"] @@ -406,16 +442,22 @@ uuid = "792122b4-ca99-40de-a6bc-6742525f08b6" version = "0.3.0" [[deps.EvoTrees]] -deps = ["BSON", "CUDA", "CategoricalArrays", "Distributions", "LoopVectorization", "MLJModelInterface", "NetworkLayout", "Random", "RecipesBase", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "4d640db07bae2a6cf6ec126bb02b4606e931f9bf" +deps = ["BSON", "CUDA", "CategoricalArrays", "Distributions", "MLJModelInterface", "NetworkLayout", "Random", "RecipesBase", "Statistics", "StatsBase", "Tables"] +git-tree-sha1 = "1b63fdc0acad47c3203398171c138835c1c40d69" uuid = "f6006082-12f8-11e9-0c9c-0d5d367ab1e5" -version = "0.14.10" +version = "0.15.0" + +[[deps.ExceptionUnwrapping]] +deps = ["Test"] +git-tree-sha1 = "e90caa41f5a86296e014e148ee061bd6c3edec96" +uuid = "460bff9d-24e4-43bc-9d9f-a8973cb893f4" +version = "0.1.9" [[deps.Expat_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "bad72f730e9e91c08d9427d5e8db95478a3c323d" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "4558ab818dcceaab612d1bb8c19cee87eda2b83c" uuid = "2e619515-83b5-522b-bb60-26c02a35a201" -version = "2.4.8+0" +version = "2.5.0+0" [[deps.ExprTools]] git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00" @@ -456,15 +498,25 @@ uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" [[deps.FillArrays]] deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"] -git-tree-sha1 = "3cce72ec679a5e8e6a84ff09dd03b721de420cfe" +git-tree-sha1 = "0b3b52afd0f87b0a3f5ada0466352d125c9db458" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "1.0.1" +version = "1.2.1" [[deps.FiniteDiff]] -deps = ["ArrayInterface", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] -git-tree-sha1 = "6604e18a0220650dbbea7854938768f15955dd8e" +deps = ["ArrayInterface", "LinearAlgebra", "Requires", "Setfield", "SparseArrays"] +git-tree-sha1 = "c6e4a1fbe73b31a3dea94b1da449503b8830c306" uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" -version = "2.20.0" +version = "2.21.1" + + [deps.FiniteDiff.extensions] + FiniteDiffBandedMatricesExt = "BandedMatrices" + FiniteDiffBlockBandedMatricesExt = "BlockBandedMatrices" + FiniteDiffStaticArraysExt = "StaticArrays" + + [deps.FiniteDiff.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" + BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [[deps.FixedPointNumbers]] deps = ["Statistics"] @@ -474,15 +526,17 @@ version = "0.8.4" [[deps.Flux]] deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"] -git-tree-sha1 = "64005071944bae14fc145661f617eb68b339189c" +git-tree-sha1 = "3e2c3704c2173ab4b1935362384ca878b53d4c34" uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c" -version = "0.13.16" +version = "0.13.17" -[[deps.FoldsThreads]] -deps = ["Accessors", "FunctionWrappers", "InitialValues", "SplittablesBase", "Transducers"] -git-tree-sha1 = "eb8e1989b9028f7e0985b4268dabe94682249025" -uuid = "9c68100b-dfe1-47cf-94c8-95104e173443" -version = "0.1.1" + [deps.Flux.extensions] + AMDGPUExt = "AMDGPU" + FluxMetalExt = "Metal" + + [deps.Flux.weakdeps] + AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" + Metal = "dde4c033-4e86-420c-a63e-0dd931031962" [[deps.Fontconfig_jll]] deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] @@ -497,10 +551,14 @@ uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" version = "0.4.2" [[deps.ForwardDiff]] -deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] +deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d" uuid = "f6369f11-7733-5829-9624-2563aa707210" version = "0.10.35" +weakdeps = ["StaticArrays"] + + [deps.ForwardDiff.extensions] + ForwardDiffStaticArraysExt = "StaticArrays" [[deps.FreeType2_jll]] deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] @@ -514,11 +572,6 @@ git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91" uuid = "559328eb-81f9-559d-9380-de523a88c83c" version = "1.0.10+0" -[[deps.FunctionWrappers]] -git-tree-sha1 = "d62485945ce5ae9c0c48f124a84998d755bae00e" -uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e" -version = "1.1.3" - [[deps.Functors]] deps = ["LinearAlgebra"] git-tree-sha1 = "478f8c3145bb91d82c2cf20433e8c1b30df454cc" @@ -537,33 +590,33 @@ version = "3.3.8+0" [[deps.GPUArrays]] deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"] -git-tree-sha1 = "9ade6983c3dbbd492cf5729f865fe030d1541463" +git-tree-sha1 = "2e57b4a4f9cc15e85a24d603256fe08e527f48d1" uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" -version = "8.6.6" +version = "8.8.1" [[deps.GPUArraysCore]] deps = ["Adapt"] -git-tree-sha1 = "1cd7f0af1aa58abc02ea1d872953a97359cb87fa" +git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0" uuid = "46192b85-c4d5-4398-a991-12ede77f4527" -version = "0.1.4" +version = "0.1.5" [[deps.GPUCompiler]] deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"] -git-tree-sha1 = "5737dc242dadd392d934ee330c69ceff47f0259c" +git-tree-sha1 = "cb090aea21c6ca78d59672a7e7d13bd56d09de64" uuid = "61eb1bfa-7361-4325-ad38-22787b887f55" -version = "0.19.4" +version = "0.20.3" [[deps.GR]] deps = ["Artifacts", "Base64", "DelimitedFiles", "Downloads", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Preferences", "Printf", "Random", "Serialization", "Sockets", "TOML", "Tar", "Test", "UUIDs", "p7zip_jll"] -git-tree-sha1 = "d014972cd6f5afb1f8cd7adf000b7a966d62c304" +git-tree-sha1 = "8b8a2fd4536ece6e554168c21860b6820a8a83db" uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" -version = "0.72.5" +version = "0.72.7" [[deps.GR_jll]] deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "f670f269909a9114df1380cc0fcaa316fff655fb" +git-tree-sha1 = "19fad9cd9ae44847fe842558a744748084a722d1" uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" -version = "0.72.5+0" +version = "0.72.7+0" [[deps.GeoInterface]] deps = ["Extents"] @@ -601,10 +654,10 @@ uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" version = "1.0.2" [[deps.HTTP]] -deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] -git-tree-sha1 = "41f7dfb2b20e7e8bf64f6b6fae98f4d2df027b06" +deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] +git-tree-sha1 = "2613d054b0e18a3dea99ca1594e9a3960e025da4" uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" -version = "1.9.4" +version = "1.9.7" [[deps.HarfBuzz_jll]] deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg"] @@ -612,17 +665,11 @@ git-tree-sha1 = "129acf094d168394e80ee1dc4bc06ec835e510a3" uuid = "2e76f6c2-a576-52d4-95c1-20adfe4de566" version = "2.8.1+1" -[[deps.HostCPUFeatures]] -deps = ["BitTwiddlingConvenienceFunctions", "IfElse", "Libdl", "Static"] -git-tree-sha1 = "734fd90dd2f920a2f1921d5388dcebe805b262dc" -uuid = "3e5b6fbb-0976-4d2c-9146-d79de83f2fb0" -version = "0.1.14" - [[deps.HypergeometricFunctions]] deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"] -git-tree-sha1 = "84204eae2dd237500835990bcade263e27674a93" +git-tree-sha1 = "0ec02c648befc2f94156eaef13b0f38106212f3f" uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a" -version = "0.3.16" +version = "0.3.17" [[deps.IOCapture]] deps = ["Logging", "Random"] @@ -636,11 +683,6 @@ git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5" uuid = "7869d1d1-7146-5819-86e3-90919afe41df" version = "0.4.10" -[[deps.IfElse]] -git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1" -uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173" -version = "0.1.1" - [[deps.InitialValues]] git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3" uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c" @@ -650,12 +692,6 @@ version = "0.3.1" deps = ["Markdown"] uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" -[[deps.InverseFunctions]] -deps = ["Test"] -git-tree-sha1 = "6667aadd1cdee2c6cd068128b3d226ebc4fb0c67" -uuid = "3587e190-3f89-42d0-90ee-14403ec27112" -version = "0.1.9" - [[deps.InvertedIndices]] git-tree-sha1 = "0dc7b50b8d436461be01300fd8cd45aa0274b038" uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f" @@ -667,9 +703,9 @@ uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" version = "0.2.2" [[deps.IterTools]] -git-tree-sha1 = "fa6287a4469f5e048d763df38279ee729fbd44e5" +git-tree-sha1 = "4ced6667f9974fc5c5943fa5e2ef1ca43ea9e450" uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" -version = "1.4.0" +version = "1.8.0" [[deps.IterationControl]] deps = ["EarlyStopping", "InteractiveUtils"] @@ -720,9 +756,9 @@ version = "0.2.4" [[deps.KernelAbstractions]] deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"] -git-tree-sha1 = "47be64f040a7ece575c2b5f53ca6da7b548d69f4" +git-tree-sha1 = "b48617c5d764908b5fac493cd907cf33cc11eec1" uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c" -version = "0.9.4" +version = "0.9.6" [[deps.LAME_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -738,15 +774,21 @@ version = "3.0.0+1" [[deps.LLVM]] deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"] -git-tree-sha1 = "26a31cdd9f1f4ea74f649a7bf249703c687a953d" +git-tree-sha1 = "5007c1421563108110bbd57f63d8ad4565808818" uuid = "929cbde3-209d-540e-8aea-75f648917ca0" -version = "5.1.0" +version = "5.2.0" [[deps.LLVMExtra_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "09b7505cc0b1cee87e5d4a26eea61d2e1b0dcd35" +git-tree-sha1 = "1222116d7313cdefecf3d45a2bc1a89c4e7c9217" uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab" -version = "0.0.21+0" +version = "0.0.22+0" + +[[deps.LLVMOpenMP_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "f689897ccbe049adb19a065c495e75f372ecd42b" +uuid = "1d63c593-3942-5779-bab2-d838dc0a180e" +version = "15.0.4+0" [[deps.LZO_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -761,9 +803,17 @@ version = "1.3.0" [[deps.Latexify]] deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "OrderedCollections", "Printf", "Requires"] -git-tree-sha1 = "099e356f267354f46ba65087981a77da23a279b7" +git-tree-sha1 = "f428ae552340899a935973270b8d98e5a31c49fe" uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -version = "0.16.0" +version = "0.16.1" + + [deps.Latexify.extensions] + DataFramesExt = "DataFrames" + SymEngineExt = "SymEngine" + + [deps.Latexify.weakdeps] + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + SymEngine = "123dc426-2d89-5057-bbad-38513e3affd8" [[deps.LatinHypercubeSampling]] deps = ["Random", "StableRNGs", "StatsBase", "Test"] @@ -771,12 +821,6 @@ git-tree-sha1 = "825289d43c753c7f1bf9bed334c253e9913997f8" uuid = "a5e1c1ea-c99a-51d3-a14d-a9a37257b02d" version = "1.9.0" -[[deps.LayoutPointers]] -deps = ["ArrayInterface", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface"] -git-tree-sha1 = "88b8f66b604da079a627b6fb2860d3704a6729a1" -uuid = "10f19ff3-798f-405d-979b-55457f8fc047" -version = "0.1.14" - [[deps.LazyArtifacts]] deps = ["Artifacts", "Pkg"] uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" @@ -853,9 +897,9 @@ version = "2.36.0+0" [[deps.LightGBM]] deps = ["Dates", "Libdl", "MLJModelInterface", "SparseArrays", "Statistics"] -git-tree-sha1 = "658faa6a229fb5bb4aea5cc897cd99db66aafb51" +git-tree-sha1 = "ce5f0bbb93610549e94dc1b1d6a1e238ae021d7d" uuid = "7acf609c-83a4-11e9-1ffb-b912bcd3b04a" -version = "0.6.0" +version = "0.6.1" [[deps.LineSearches]] deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] @@ -864,20 +908,34 @@ uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" version = "7.2.0" [[deps.LinearAlgebra]] -deps = ["Libdl", "libblastrampoline_jll"] +deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.LinearMaps]] -deps = ["ChainRulesCore", "LinearAlgebra", "SparseArrays", "Statistics"] -git-tree-sha1 = "4af48c3585177561e9f0d24eb9619ad3abf77cc7" +deps = ["LinearAlgebra", "SparseArrays", "Statistics"] +git-tree-sha1 = "a1348b9b7c87d45fa859314d56e8a87ace20561e" uuid = "7a12625a-238d-50fd-b39a-03d52299707e" -version = "3.10.0" +version = "3.10.1" +weakdeps = ["ChainRulesCore"] + + [deps.LinearMaps.extensions] + LinearMapsChainRulesCoreExt = "ChainRulesCore" [[deps.LogExpFunctions]] -deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "0a1b7c2863e44523180fdb3146534e265a91870b" +deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] +git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.23" +version = "0.3.24" + + [deps.LogExpFunctions.extensions] + LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" + LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables" + LogExpFunctionsInverseFunctionsExt = "InverseFunctions" + + [deps.LogExpFunctions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" @@ -888,12 +946,6 @@ git-tree-sha1 = "cedb76b37bc5a6c702ade66be44f831fa23c681e" uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36" version = "1.0.0" -[[deps.LoopVectorization]] -deps = ["ArrayInterface", "ArrayInterfaceCore", "CPUSummary", "ChainRulesCore", "CloseOpenIntervals", "DocStringExtensions", "ForwardDiff", "HostCPUFeatures", "IfElse", "LayoutPointers", "LinearAlgebra", "OffsetArrays", "PolyesterWeave", "PrecompileTools", "SIMDTypes", "SLEEFPirates", "SpecialFunctions", "Static", "StaticArrayInterface", "ThreadingUtilities", "UnPack", "VectorizationBase"] -git-tree-sha1 = "3bb62b5003bc7d2d49f26663484267dc49fa1bf5" -uuid = "bdcacae8-1622-11e9-2a5c-532679323890" -version = "0.12.159" - [[deps.LossFunctions]] deps = ["CategoricalArrays", "Markdown", "Statistics"] git-tree-sha1 = "44a7bfeb7b5eb9386a62b9cccc6e21f406c15bea" @@ -902,9 +954,9 @@ version = "0.10.0" [[deps.MLJ]] deps = ["CategoricalArrays", "ComputationalResources", "Distributed", "Distributions", "LinearAlgebra", "MLJBase", "MLJEnsembles", "MLJIteration", "MLJModels", "MLJTuning", "OpenML", "Pkg", "ProgressMeter", "Random", "ScientificTypes", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "80149328ca780b522b5a95e402450d10df7904f2" +git-tree-sha1 = "d26cd777c711c332019b39445823cbb1f6cdb7e5" uuid = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7" -version = "0.19.1" +version = "0.19.2" [[deps.MLJBase]] deps = ["CategoricalArrays", "CategoricalDistributions", "ComputationalResources", "Dates", "DelimitedFiles", "Distributed", "Distributions", "InteractiveUtils", "InvertedIndices", "LinearAlgebra", "LossFunctions", "MLJModelInterface", "Missings", "OrderedCollections", "Parameters", "PrettyTables", "ProgressMeter", "Random", "ScientificTypes", "Serialization", "StatisticalTraits", "Statistics", "StatsBase", "Tables"] @@ -938,9 +990,9 @@ version = "0.5.1" [[deps.MLJLinearModels]] deps = ["DocStringExtensions", "IterativeSolvers", "LinearAlgebra", "LinearMaps", "MLJModelInterface", "Optim", "Parameters"] -git-tree-sha1 = "c811b3877f1328179cef6662388d200c78b95c09" +git-tree-sha1 = "c92bf0ea37bf51e1ef0160069c572825819748b8" uuid = "6ee0df7b-362f-4a72-a706-9e79364fb692" -version = "0.9.1" +version = "0.9.2" [[deps.MLJModelInterface]] deps = ["Random", "ScientificTypesBase", "StatisticalTraits"] @@ -950,9 +1002,9 @@ version = "1.8.0" [[deps.MLJModels]] deps = ["CategoricalArrays", "CategoricalDistributions", "Combinatorics", "Dates", "Distances", "Distributions", "InteractiveUtils", "LinearAlgebra", "MLJModelInterface", "Markdown", "OrderedCollections", "Parameters", "Pkg", "PrettyPrinting", "REPL", "Random", "RelocatableFolders", "ScientificTypes", "StatisticalTraits", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "6a1166e463cf0210364e84f334c79ecf9ac6f51f" +git-tree-sha1 = "38c3b4af6e52edcd94144c32dc1bea335dbfaec7" uuid = "d491faf4-2d78-11e9-2867-c94bc002c0b7" -version = "0.16.7" +version = "0.16.8" [[deps.MLJTuning]] deps = ["ComputationalResources", "Distributed", "Distributions", "LatinHypercubeSampling", "MLJBase", "ProgressMeter", "Random", "RecipesBase"] @@ -966,10 +1018,10 @@ uuid = "d8e11817-5142-5d16-987a-aa16d5891078" version = "0.4.17" [[deps.MLUtils]] -deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "FoldsThreads", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"] -git-tree-sha1 = "ca31739905ddb08c59758726e22b9e25d0d1521b" +deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"] +git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0" uuid = "f1d291b0-491e-4a28-83b9-f70985020b54" -version = "0.4.2" +version = "0.4.3" [[deps.MacroTools]] deps = ["Markdown", "Random"] @@ -977,17 +1029,6 @@ git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" version = "0.5.10" -[[deps.ManualMemory]] -git-tree-sha1 = "bcaef4fc7a0cfe2cba636d84cda54b5e4e4ca3cd" -uuid = "d125e4d3-2237-4719-b19c-fa641b8a4667" -version = "0.1.8" - -[[deps.MarchingCubes]] -deps = ["PrecompileTools", "StaticArrays"] -git-tree-sha1 = "c8e29e2bacb98c9b6f10445227a8b0402f2f173a" -uuid = "299715c1-40a9-479a-aaf9-4a633d36f717" -version = "0.1.8" - [[deps.Markdown]] deps = ["Base64"] uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" @@ -1001,7 +1042,7 @@ version = "1.1.7" [[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.0+0" +version = "2.28.2+0" [[deps.Measures]] git-tree-sha1 = "c13304c81eec1ed3af7fc20e75fb6b26092a1102" @@ -1031,7 +1072,7 @@ uuid = "a63ad114-7e13-5084-954f-fe012c677804" [[deps.MozillaCACerts_jll]] uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2022.2.1" +version = "2022.10.11" [[deps.NLSolversBase]] deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] @@ -1041,9 +1082,15 @@ version = "7.8.3" [[deps.NNlib]] deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"] -git-tree-sha1 = "99e6dbb50d8a96702dc60954569e9fe7291cc55d" +git-tree-sha1 = "72240e3f5ca031937bd536182cb2c031da5f46dd" uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -version = "0.8.20" +version = "0.8.21" + + [deps.NNlib.extensions] + NNlibAMDGPUExt = "AMDGPU" + + [deps.NNlib.weakdeps] + AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" [[deps.NNlibCUDA]] deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"] @@ -1085,12 +1132,6 @@ version = "0.4.5" uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" version = "1.2.0" -[[deps.OffsetArrays]] -deps = ["Adapt"] -git-tree-sha1 = "82d7c9e310fe55aa54996e6f7f94674e2a38fcb4" -uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.12.9" - [[deps.Ogg_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "887579a3eb005446d514ab7aeac5d1d027658b8f" @@ -1099,14 +1140,14 @@ version = "1.3.5+1" [[deps.OneHotArrays]] deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"] -git-tree-sha1 = "f511fca956ed9e70b80cd3417bb8c2dde4b68644" +git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c" uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f" -version = "0.2.3" +version = "0.2.4" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.20+0" +version = "0.3.21+4" [[deps.OpenLibm_jll]] deps = ["Artifacts", "Libdl"] @@ -1126,10 +1167,10 @@ uuid = "4d8831e6-92b7-49fb-bdf8-b643e874388c" version = "1.4.1" [[deps.OpenSSL_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "9ff31d101d987eb9d66bd8b176ac7c277beccd09" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "1aa4b74f80b01c6bc2b89992b861b5f210e665b5" uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "1.1.20+0" +version = "1.1.21+0" [[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] @@ -1139,9 +1180,9 @@ version = "0.5.5+0" [[deps.Optim]] deps = ["Compat", "FillArrays", "ForwardDiff", "LineSearches", "LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "PositiveFactorizations", "Printf", "SparseArrays", "StatsBase"] -git-tree-sha1 = "a89b11f0f354f06099e4001c151dffad7ebab015" +git-tree-sha1 = "e3a6546c1577bfd701771b477b794a52949e7594" uuid = "429524aa-4258-5aef-a3af-852621145aeb" -version = "1.7.5" +version = "1.7.6" [[deps.Optimisers]] deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"] @@ -1163,7 +1204,7 @@ version = "1.6.0" [[deps.PCRE2_jll]] deps = ["Artifacts", "Libdl"] uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.40.0+0" +version = "10.42.0+0" [[deps.PDMats]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] @@ -1179,9 +1220,9 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates", "PrecompileTools", "UUIDs"] -git-tree-sha1 = "a5aef8d4a6e8d81f171b2bd4be5265b01384c74c" +git-tree-sha1 = "4b2e829ee66d4218e0cef22c0a64ee37cf258c29" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.10" +version = "2.7.1" [[deps.Pipe]] git-tree-sha1 = "6842804e7867b115ca9de748a0cf6b364523c16d" @@ -1189,15 +1230,15 @@ uuid = "b98c9c47-44ae-5843-9183-064241ee97a0" version = "1.3.0" [[deps.Pixman_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29" +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LLVMOpenMP_jll", "Libdl"] +git-tree-sha1 = "64779bc4c9784fee475689a1752ef4d5747c5e87" uuid = "30392449-352a-5448-841d-b1acce4e97dc" -version = "0.40.1+0" +version = "0.42.2+0" [[deps.Pkg]] -deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.8.0" +version = "1.9.0" [[deps.PlotThemes]] deps = ["PlotUtils", "Statistics"] @@ -1212,16 +1253,24 @@ uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" version = "1.3.5" [[deps.Plots]] -deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Preferences", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "Unzip"] -git-tree-sha1 = "d03ef538114b38f89d66776f2d8fdc0280f90621" +deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Preferences", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "UnitfulLatexify", "Unzip"] +git-tree-sha1 = "75ca67b2c6512ad2d0c767a7cfc55e75075f8bbc" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.38.12" - -[[deps.PolyesterWeave]] -deps = ["BitTwiddlingConvenienceFunctions", "CPUSummary", "IfElse", "Static", "ThreadingUtilities"] -git-tree-sha1 = "240d7170f5ffdb285f9427b92333c3463bf65bf6" -uuid = "1d0040c9-8b98-4ee7-8388-3f51789ca0ad" -version = "0.2.1" +version = "1.38.16" + + [deps.Plots.extensions] + FileIOExt = "FileIO" + GeometryBasicsExt = "GeometryBasics" + IJuliaExt = "IJulia" + ImageInTerminalExt = "ImageInTerminal" + UnitfulExt = "Unitful" + + [deps.Plots.weakdeps] + FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" + GeometryBasics = "5c1252a2-5f33-56bf-86c9-59e7332b4326" + IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" + ImageInTerminal = "d8c32880-2388-543b-8c61-d9f865259254" + Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" [[deps.PositiveFactorizations]] deps = ["LinearAlgebra"] @@ -1231,9 +1280,9 @@ version = "0.2.4" [[deps.PrecompileTools]] deps = ["Preferences"] -git-tree-sha1 = "259e206946c293698122f63e2b513a7c99a244e8" +git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81" uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -version = "1.1.1" +version = "1.1.2" [[deps.Preferences]] deps = ["TOML"] @@ -1356,17 +1405,6 @@ version = "0.4.0+0" uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" version = "0.7.0" -[[deps.SIMDTypes]] -git-tree-sha1 = "330289636fb8107c5f32088d2741e9fd7a061a5c" -uuid = "94e857df-77ce-4151-89e5-788b33177be4" -version = "0.1.0" - -[[deps.SLEEFPirates]] -deps = ["IfElse", "Static", "VectorizationBase"] -git-tree-sha1 = "cda0aece8080e992f6370491b08ef3909d1c04e7" -uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa" -version = "0.6.38" - [[deps.ScientificTypes]] deps = ["CategoricalArrays", "ColorTypes", "Dates", "Distributions", "PrettyTables", "Reexport", "ScientificTypesBase", "StatisticalTraits", "Tables"] git-tree-sha1 = "75ccd10ca65b939dab03b812994e571bf1e3e1da" @@ -1421,30 +1459,28 @@ git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231" uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" version = "0.9.4" -[[deps.SnoopPrecompile]] -deps = ["Preferences"] -git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c" -uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" -version = "1.0.3" - [[deps.Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" [[deps.SortingAlgorithms]] deps = ["DataStructures"] -git-tree-sha1 = "a4ada03f999bd01b3a25dcaa30b2d929fe537e00" +git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee" uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" -version = "1.1.0" +version = "1.1.1" [[deps.SparseArrays]] -deps = ["LinearAlgebra", "Random"] +deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [[deps.SpecialFunctions]] -deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] -git-tree-sha1 = "ef28127915f4229c971eb43f3fc075dd3fe91880" +deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] +git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3" uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "2.2.0" +version = "2.3.0" +weakdeps = ["ChainRulesCore"] + + [deps.SpecialFunctions.extensions] + SpecialFunctionsChainRulesCoreExt = "ChainRulesCore" [[deps.SplittablesBase]] deps = ["Setfield", "Test"] @@ -1458,23 +1494,11 @@ git-tree-sha1 = "3be7d49667040add7ee151fefaf1f8c04c8c8276" uuid = "860ef19b-820b-49d6-a774-d7a799459cd3" version = "1.0.0" -[[deps.Static]] -deps = ["IfElse"] -git-tree-sha1 = "dbde6766fc677423598138a5951269432b0fcc90" -uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" -version = "0.8.7" - -[[deps.StaticArrayInterface]] -deps = ["ArrayInterface", "Compat", "IfElse", "LinearAlgebra", "Requires", "SnoopPrecompile", "SparseArrays", "Static", "SuiteSparse"] -git-tree-sha1 = "33040351d2403b84afce74dae2e22d3f5b18edcb" -uuid = "0d7ed370-da01-4f52-bd93-41d350b8b718" -version = "1.4.0" - [[deps.StaticArrays]] deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] -git-tree-sha1 = "8982b3607a212b070a5e46eea83eb62b4744ae12" +git-tree-sha1 = "832afbae2a45b4ae7e831f86965469a24d1d8a83" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.5.25" +version = "1.5.26" [[deps.StaticArraysCore]] git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" @@ -1490,6 +1514,7 @@ version = "3.2.0" [[deps.Statistics]] deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +version = "1.9.0" [[deps.StatsAPI]] deps = ["LinearAlgebra"] @@ -1499,16 +1524,24 @@ version = "1.6.0" [[deps.StatsBase]] deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] -git-tree-sha1 = "d1bf48bfcc554a3761a133fe3a9bb01488e06916" +git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -version = "0.33.21" +version = "0.34.0" [[deps.StatsFuns]] -deps = ["ChainRulesCore", "HypergeometricFunctions", "InverseFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] +deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" version = "1.3.0" + [deps.StatsFuns.extensions] + StatsFunsChainRulesCoreExt = "ChainRulesCore" + StatsFunsInverseFunctionsExt = "InverseFunctions" + + [deps.StatsFuns.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + [[deps.StringManipulation]] git-tree-sha1 = "46da2434b41f41ac3594ee9816ce5541c6096123" uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e" @@ -1524,10 +1557,15 @@ version = "0.6.15" deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" +[[deps.SuiteSparse_jll]] +deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] +uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" +version = "5.10.1+6" + [[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -version = "1.0.0" +version = "1.0.3" [[deps.TableTraits]] deps = ["IteratorInterfaceExtensions"] @@ -1544,7 +1582,7 @@ version = "1.10.1" [[deps.Tar]] deps = ["ArgTools", "SHA"] uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.1" +version = "1.10.0" [[deps.TensorCore]] deps = ["LinearAlgebra"] @@ -1556,12 +1594,6 @@ version = "0.1.1" deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -[[deps.ThreadingUtilities]] -deps = ["ManualMemory"] -git-tree-sha1 = "c97f60dd4f2331e1a495527f80d242501d2f9865" -uuid = "8290d209-cae3-49c0-8002-c8c24d57dab5" -version = "0.5.1" - [[deps.TimerOutputs]] deps = ["ExprTools", "Printf"] git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7" @@ -1576,9 +1608,23 @@ version = "0.9.13" [[deps.Transducers]] deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"] -git-tree-sha1 = "25358a5f2384c490e98abd565ed321ffae2cbb37" +git-tree-sha1 = "a66fb81baec325cf6ccafa243af573b031e87b00" uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999" -version = "0.4.76" +version = "0.4.77" + + [deps.Transducers.extensions] + TransducersBlockArraysExt = "BlockArrays" + TransducersDataFramesExt = "DataFrames" + TransducersLazyArraysExt = "LazyArrays" + TransducersOnlineStatsBaseExt = "OnlineStatsBase" + TransducersReferenceablesExt = "Referenceables" + + [deps.Transducers.weakdeps] + BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02" + OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338" + Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e" [[deps.URIs]] git-tree-sha1 = "074f993b0ca030848b897beff716d93aca60f06a" @@ -1603,11 +1649,23 @@ git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf" uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1" version = "0.4.1" -[[deps.UnicodePlots]] -deps = ["ColorSchemes", "ColorTypes", "Contour", "Crayons", "Dates", "LinearAlgebra", "MarchingCubes", "NaNMath", "PrecompileTools", "Printf", "Requires", "SparseArrays", "StaticArrays", "StatsBase"] -git-tree-sha1 = "5e3a9796dfae26edbe5a2cc436b230c86a8ab0c4" -uuid = "b8865327-cd53-5732-bb35-84acbb429228" -version = "3.5.3" +[[deps.Unitful]] +deps = ["ConstructionBase", "Dates", "LinearAlgebra", "Random"] +git-tree-sha1 = "ba4aa36b2d5c98d6ed1f149da916b3ba46527b2b" +uuid = "1986cc42-f94f-5a68-af5c-568840ba703d" +version = "1.14.0" + + [deps.Unitful.extensions] + InverseFunctionsUnitfulExt = "InverseFunctions" + + [deps.Unitful.weakdeps] + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + +[[deps.UnitfulLatexify]] +deps = ["LaTeXStrings", "Latexify", "Unitful"] +git-tree-sha1 = "e2d817cc500e960fdbafcf988ac8436ba3208bfd" +uuid = "45397f5d-5981-4c77-b2b3-fc36d6e9b728" +version = "1.6.3" [[deps.UnsafeAtomics]] git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278" @@ -1625,12 +1683,6 @@ git-tree-sha1 = "ca0969166a028236229f63514992fc073799bb78" uuid = "41fe7b60-77ed-43a1-b4f0-825fd5a5650d" version = "0.2.0" -[[deps.VectorizationBase]] -deps = ["ArrayInterface", "CPUSummary", "HostCPUFeatures", "IfElse", "LayoutPointers", "Libdl", "LinearAlgebra", "SIMDTypes", "Static", "StaticArrayInterface"] -git-tree-sha1 = "b182207d4af54ac64cbc71797765068fdeff475d" -uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f" -version = "0.21.64" - [[deps.Wayland_jll]] deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"] git-tree-sha1 = "ed8d92d9774b077c53e1da50fd81a36af3744c1c" @@ -1784,7 +1836,7 @@ version = "1.4.0+3" [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.12+3" +version = "1.2.13+0" [[deps.Zstd_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -1794,9 +1846,19 @@ version = "1.5.5+0" [[deps.Zygote]] deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"] -git-tree-sha1 = "ebac1ae9f048c669317ad48c9bed815790a468d8" +git-tree-sha1 = "5be3ddb88fc992a7d8ea96c3f10a49a7e98ebc7b" uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" -version = "0.6.61" +version = "0.6.62" + + [deps.Zygote.extensions] + ZygoteColorsExt = "Colors" + ZygoteDistancesExt = "Distances" + ZygoteTrackerExt = "Tracker" + + [deps.Zygote.weakdeps] + Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" + Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" [[deps.ZygoteRules]] deps = ["ChainRulesCore", "MacroTools"] @@ -1806,9 +1868,9 @@ version = "0.2.3" [[deps.cuDNN]] deps = ["CEnum", "CUDA", "CUDNN_jll"] -git-tree-sha1 = "ec954b59f6b0324543f2e3ed8118309ac60cb75b" +git-tree-sha1 = "f65490d187861d6222cb38bcbbff3fd949a7ec3e" uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" -version = "1.0.3" +version = "1.0.4" [[deps.fzf_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1829,9 +1891,9 @@ uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" version = "0.15.1+0" [[deps.libblastrampoline_jll]] -deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] +deps = ["Artifacts", "Libdl"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.1.1+0" +version = "5.7.0+0" [[deps.libfdk_aac_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]