diff --git a/.gitignore b/.gitignore index 5ff6fb08..edcf29f8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,124 +1,52 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - # Distribution / packaging -.Python +*.egg +*.egg-info/ +*.manifest +*.spec +.eggs/ +.installed.cfg build/ -develop-eggs/ dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ +MANIFEST sdist/ -var/ wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST -*build/ - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec -*.sublime-workspace -*.sublime-project -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation +# Documentation docs/_build/ +_build/ -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site +# IDE +.idea/ +.vscode/ -# mypy -.mypy_cache/ +# Jupyter +.ipynb_checkpoints/ -*notes/ +# macOS +.DS_Store -.idea/ +# pixi +.pixi/ -*.bak +# Python +__pycache__/ +*.py[cod] +*.so +*$py.class +# Ruff +.ruff_cache/ -*.db +# Testing +.cache/ +.coverage +.coverage.* +.hypothesis/ +.pytest_cache/ +coverage.xml +htmlcov/ +# Version file (generated by hatch-vcs) +src/*/_version.py -mixed_documents/ -src/skillmodels/_version.py -.pixi -.vscode -*.py.*.bin -*.py.*.html +.claude diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ee3f39be..300eaf14 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,10 @@ repos: hooks: - id: check-hooks-apply - id: check-useless-excludes - # - id: identity # Prints all files passed to pre-commits. Debugging. + - repo: https://github.com/tox-dev/tox-toml-fmt + rev: v1.2.2 + hooks: + - id: tox-toml-fmt - repo: https://github.com/lyz-code/yamlfix rev: 1.19.1 hooks: @@ -15,53 +18,51 @@ repos: - id: check-added-large-files args: - --maxkb=50000 + - id: check-ast - id: check-case-conflict + - id: check-docstring-first - id: check-merge-conflict + - id: check-toml - id: check-vcs-permalinks - id: check-yaml - - id: check-toml - id: debug-statements - id: end-of-file-fixer - id: fix-byte-order-marker types: - text - - id: forbid-submodules - id: mixed-line-ending args: - --fix=lf description: Forces to replace line ending by the UNIX 'lf' character. + - id: name-tests-test + args: + - --pytest-test-first - id: no-commit-to-branch args: - --branch - main - - id: name-tests-test - args: - - --pytest-test-first - id: trailing-whitespace - - id: check-ast - - id: check-docstring-first - repo: https://github.com/adrienverge/yamllint.git - rev: v1.37.1 + rev: v1.38.0 hooks: - id: yamllint - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.14.10 + rev: v0.14.14 hooks: - id: ruff-check - types_or: - - python - - pyi - - jupyter args: - --fix - # - --unsafe-fixes - - id: ruff-format types_or: - - python + - jupyter - pyi + - python + - id: ruff-format + types_or: - jupyter + - pyi + - python - repo: https://github.com/kynan/nbstripout - rev: 0.8.2 + rev: 0.9.0 hooks: - id: nbstripout args: @@ -72,10 +73,12 @@ repos: hooks: - id: mdformat additional_dependencies: - - mdformat-myst + - mdformat-gfm + - mdformat-gfm-alerts - mdformat-ruff args: - --wrap - '88' + files: (CLAUDE\.md|README\.md) ci: autoupdate_schedule: monthly diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..f0038d2b --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,20 @@ +--- +version: 2 +build: + os: ubuntu-24.04 + tools: + python: '3.14' + nodejs: '22' + jobs: + create_environment: + - asdf plugin add pixi + - asdf install pixi latest + - asdf global pixi latest + install: + - pixi install -e docs + post_build: + # Jupyter Book 2.0 builds site content to _build/html. + # For ReadTheDocs, we build and then copy to the expected output location. + - mkdir --parents $READTHEDOCS_OUTPUT/html/ + - BASE_URL="/$READTHEDOCS_LANGUAGE/$READTHEDOCS_VERSION" pixi run -e docs docs + - cp -a docs/_build/html/. "$READTHEDOCS_OUTPUT/html" && rm -r docs/_build diff --git a/.yamllint.yml b/.yamllint.yml index 72f64be1..631965cd 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -1,8 +1,4 @@ --- -yaml-files: - - '*.yaml' - - '*.yml' - - .yamllint rules: braces: enable brackets: enable @@ -23,9 +19,9 @@ rules: key-duplicates: enable key-ordering: disable line-length: - max: 88 + allow-non-breakable-inline-mappings: true allow-non-breakable-words: true - allow-non-breakable-inline-mappings: false + max: 88 new-line-at-end-of-file: enable new-lines: type: unix @@ -33,4 +29,11 @@ rules: quoted-strings: disable trailing-spaces: enable truthy: + check-keys: false level: warning +yaml-files: + - '*.yaml' + - '*.yml' + - .yamllint +ignore: + - src/skillmodels/test_data/simplest_augmented_model.yaml diff --git a/CLAUDE.md b/CLAUDE.md index 25ca239e..c6a1547c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -27,13 +27,29 @@ pixi run -e test-cpu pytest tests/test_kalman_filters.py::test_function_name # Type checking pixi run ty -# Install pre-commit hooks (required before committing) -pre-commit install +# Quality checks (linting, formatting) +prek run --all-files # Build documentation (from docs/ directory) make html ``` +## Command Rules + +Always use these command mappings: + +- **Python**: Use `pixi run python` instead of `python` or `python3` +- **Type checker**: Use `pixi run ty` instead of running ty/mypy/pyright directly +- **Tests**: Use `pixi run tests` instead of `pytest` directly +- **Linting/formatting**: Use `prek run --all-files` instead of `ruff` directly +- **All quality checks**: Use `prek run --all-files` + +Before finishing any task that modifies code, always run: + +1. `pixi run ty` (type checker) +1. `pixi run tests` (tests) +1. `prek run --all-files` (quality checks) + ## Architecture ### Core Pipeline Flow @@ -82,14 +98,19 @@ The main package exports three functions: - `get_maximization_inputs()`: Prepare optimization problem for parameter estimation - `get_filtered_states()`: Extract filtered latent factor estimates -- `simulate_dataset()`: Generate synthetic data from model specification +- `simulate_dataset()`: Generate synthetic data from model specification (accepts + optional `seed` parameter for reproducibility) ## Code Style -- Uses Ruff for linting (target: Python 3.13, line length: 88) +- Require Python 3.14 +- Uses Ruff for linting (target: Python 3.14, line length: 88) - Google-style docstrings - Pre-commit hooks enforce formatting and linting - Type checking via `ty` with strict rules +- Do not use `from __future__ import annotations` +- Use modern numpy random API: `rng = np.random.default_rng(seed)` instead of + `np.random.seed()` or legacy functions like `np.random.randn()` ## Testing diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index be6e0795..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,216 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - -.PHONY: clean -clean: - rm -rf $(BUILDDIR)/* - -.PHONY: html -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -.PHONY: dirhtml -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -.PHONY: singlehtml -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -.PHONY: pickle -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -.PHONY: json -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -.PHONY: htmlhelp -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -.PHONY: qthelp -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/skillmodels.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/skillmodels.qhc" - -.PHONY: applehelp -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -.PHONY: devhelp -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/skillmodels" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/skillmodels" - @echo "# devhelp" - -.PHONY: epub -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -.PHONY: latex -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -.PHONY: latexpdf -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: latexpdfja -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: text -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -.PHONY: man -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -.PHONY: texinfo -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -.PHONY: info -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -.PHONY: gettext -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -.PHONY: changes -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -.PHONY: linkcheck -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -.PHONY: doctest -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -.PHONY: coverage -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -.PHONY: xml -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -.PHONY: pseudoxml -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/source/_static/css/custom.css b/docs/_static/css/custom.css similarity index 100% rename from docs/source/_static/css/custom.css rename to docs/_static/css/custom.css diff --git a/docs/source/_static/images/book.svg b/docs/_static/images/book.svg similarity index 100% rename from docs/source/_static/images/book.svg rename to docs/_static/images/book.svg diff --git a/docs/source/_static/images/books.svg b/docs/_static/images/books.svg similarity index 100% rename from docs/source/_static/images/books.svg rename to docs/_static/images/books.svg diff --git a/docs/source/_static/images/coding.svg b/docs/_static/images/coding.svg similarity index 100% rename from docs/source/_static/images/coding.svg rename to docs/_static/images/coding.svg diff --git a/docs/source/_static/images/light-bulb.svg b/docs/_static/images/light-bulb.svg similarity index 100% rename from docs/source/_static/images/light-bulb.svg rename to docs/_static/images/light-bulb.svg diff --git a/docs/source/_static/images/logo.svg b/docs/_static/images/logo.svg similarity index 100% rename from docs/source/_static/images/logo.svg rename to docs/_static/images/logo.svg diff --git a/docs/explanations/names_and_concepts.md b/docs/explanations/names_and_concepts.md new file mode 100644 index 00000000..75a23060 --- /dev/null +++ b/docs/explanations/names_and_concepts.md @@ -0,0 +1,83 @@ +# Names and Concepts + +This section explains key concepts and variable names used throughout skillmodels. +Understanding these is helpful if you want to understand the implementation or extend +the package. + +## Dimensions + +The `Dimensions` dataclass contains integer values for model dimensions: + +- **n_latent_factors**: Number of latent factors (states) in the model +- **n_observed_factors**: Number of observed factors +- **n_periods**: Number of periods (one more than transition equations) +- **n_aug_periods**: Number of augmented periods (includes sub-periods for endogenous + factors) +- **n_mixtures**: Number of elements in the finite mixture of normals distribution +- **n_controls**: Number of control variables (always >= 1 due to constant) + +## Labels + +The `Labels` dataclass contains names and indices: + +- **latent_factors**: Tuple of latent factor names +- **observed_factors**: Tuple of observed factor names +- **controls**: Tuple of control variable names (first is always "constant") +- **periods**: Tuple of period indices (0, 1, 2, ...) +- **aug_periods**: Tuple of augmented period indices +- **stagemap**: Tuple mapping periods to stages +- **stages**: Tuple of stage indices + +## Development Stages vs Periods + +A **development stage** is a group of consecutive periods where the skill formation +technology (transition function parameters) remains constant. Stages are just equality +constraints on parameters. + +Example: With 5 periods, you can estimate at most 4 different transition functions. +The stagemap `[0, 0, 1, 1]` means: +- Periods 0→1 and 1→2 share the same parameters (stage 0) +- Periods 2→3 and 3→4 share the same parameters (stage 1) + +## Augmented Periods + +When models include endogenous factors (factors that depend on other factors in the +same period), skillmodels internally expands periods into "augmented periods" to handle +the sequential updating. Each regular period may contain multiple augmented periods. + +## Anchoring + +Anchoring links latent factors to observable outcomes, allowing identification and +interpretation of the factor scale. The `Anchoring` dataclass contains: + +- **outcomes**: Which factors are anchored to which outcome variables +- **free_controls**: Whether anchoring equations have their own control coefficients +- **free_constant**: Whether anchoring equations have a free constant +- **free_loadings**: Whether anchoring loadings are estimated (vs fixed to 1) +- **ignore_constant_when_anchoring**: Skip constant in anchoring transformation + +## Update Info + +A DataFrame specifying each Kalman update step: + +- One row per measurement equation evaluation +- Columns indicate which factors have free loadings for each measurement +- Used internally to structure the Kalman filter passes + +## Normalizations + +Settings for identifying the model scale and location: + +- **loadings**: Fixed factor loading values (cannot be zero) +- **intercepts**: Fixed intercept values for measurement equations + +Without normalizations, latent factor models are not identified (the scale and location +of factors are arbitrary). + +## Estimation Options + +The `EstimationOptions` dataclass controls numerical aspects: + +- **sigma_points_scale**: Controls spread of sigma points in unscented Kalman filter +- **robust_bounds**: Tightens parameter bounds to avoid numerical issues +- **clipping_***: Parameters for soft-clipping the log-likelihood to prevent infinities diff --git a/docs/explanations/notes_on_factor_scales.md b/docs/explanations/notes_on_factor_scales.md new file mode 100644 index 00000000..e938faf4 --- /dev/null +++ b/docs/explanations/notes_on_factor_scales.md @@ -0,0 +1,84 @@ +# Notes on Scales and Normalizations + +This section discusses factor scales and normalization, building on the +[critique by Wiswall and Agostinelli](https://tinyurl.com/y3wl43kz) of the original +CHS estimator. + +Wiswall and Agostinelli define a class of transition functions with Known Location and +Scale (KLS) that require fewer normalizations. Their critique potentially invalidates +certain empirical estimates from CHS, but not the general estimation approach. + +To get estimates that avoid renormalization issues, you can either: +1. Use fewer normalizations with KLS transition functions, or +2. Use non-KLS transition functions with one normalization per period and factor + +As there is no natural scale for skills, neither approach is inherently better. +However, we prefer using flexible non-KLS transition functions with explicit +normalizations because: +1. They are more compatible with development stages spanning multiple periods +2. Suitable normalizations can give latent factors a more meaningful interpretation + +## Why KLS Functions Don't Keep Scales Constant + +After reading the Wiswall-Agostinelli critique, one might think that using KLS +transition functions identifies some sort of "natural" scale. This is not the case. + +Consider a simple model of financial investments with two latent factors: +- **w**: wealth (stock variable) +- **i**: investment (flow variable) + +Suppose periods are one year and the annual interest rate is 10%. The most intuitive +representation measures everything in dollars: + +$$ +w_{t+1} = 1.1 w_t + i_t +$$ + +However, we could measure w in period t in dollars, i in 1000 dollars, and w in period +t+1 in cents. The transition equation becomes: + +$$ +w_{t+1} = 110 w_t + 100000 i_t +$$ + +This describes the exact same system in different scales. Any linear function could +describe this system—just with different scale combinations. + +The CES function is KLS and contains all linear functions (without intercept) whose +parameters sum to 1. If we set both factor scales to dollars initially, the CES +function would choose: + +$$ +w_{t+1} = \frac{1}{2.1}(1.1 w_t + i_t) \approx 0.524 w_t + 0.476 i_t +$$ + +This means wealth in period t+1 is measured in approximately 0.476 dollars—an +arbitrary choice made by the functional form, not something "natural." + +## Why CES and log_CES Functions are Problematic + +The KLS definition refers only to the scale of the output. But CES and log_CES +functions may also impose restrictions on input scales. + +Simulations suggest that with log_CES: +- You need initial location normalizations for all factors +- You only need to normalize the scale of one factor initially + +However, we don't have formal identification results for this. **We advise caution** +when using CES or log_CES functions—think carefully about your normalizations rather +than relying on automatic generation. + +## Normalizations and Development Stages + +When using development stages (periods with identical transition parameters), the +normalization requirements change. + +The key insight: you can identify scale from the first period of a stage, so no later +normalizations are needed until the next stage begins. + +**Recommendations:** +- Normalize only in the first period of each stage +- For the initial stage, normalize the first two periods +- Use automatic normalizations when working with stages to avoid confusion + +This reveals another type of over-normalization in the original CHS paper. diff --git a/docs/getting_started/tutorial.ipynb b/docs/getting_started/tutorial.ipynb new file mode 100644 index 00000000..b123b437 --- /dev/null +++ b/docs/getting_started/tutorial.ipynb @@ -0,0 +1,272 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Skillmodels Quickstart\n", + "\n", + "This tutorial demonstrates the basic workflow for estimating a latent factor model\n", + "using skillmodels. We'll use Example 2 from the Cunha, Heckman, and Schennach (2010)\n", + "replication files." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "from skillmodels import get_maximization_inputs\n", + "from skillmodels.config import TEST_DATA_DIR\n", + "from skillmodels.test_data.model2 import MODEL2" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "model = MODEL2\n", + "\n", + "# Show the structure\n", + "print(\"Factors:\", list(model.factors.keys()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", + "data = data.set_index([\"caseid\", \"period\"])\n", + "data.head()" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Getting Maximization Inputs\n", + "\n", + "The main entry point is `get_maximization_inputs()`. It takes a model specification\n", + "and dataset, and returns everything needed to maximize the likelihood using optimagic:\n", + "\n", + "- `loglike`: The compiled log-likelihood function\n", + "- `gradient`: The gradient of the log-likelihood\n", + "- `loglike_and_gradient`: Combined function (more efficient)\n", + "- `debug_loglike`: Uncompiled version for debugging\n", + "- `params_template`: Parameter DataFrame with bounds and starting values\n", + "- `constraints`: Parameter constraints for optimization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "max_inputs = get_maximization_inputs(model, data)\n", + "print(\"Available keys:\", list(max_inputs.keys()))" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## Parameter Template\n", + "\n", + "The `params_template` is a pandas DataFrame with:\n", + "- A MultiIndex identifying each parameter (category, period, name1, name2)\n", + "- Columns for `value` (to be filled with starting values), `lower_bound`, `upper_bound`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "params_template = max_inputs[\"params_template\"]\n", + "params_template.head(10)" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## Choosing Starting Values\n", + "\n", + "Good starting values are important for optimization. As a rule of thumb:\n", + "\n", + "- If measurements are standardized, use 1.0 for free loadings and 0.0 for free intercepts\n", + "- Start measurement and shock standard deviations slightly larger than expected\n", + "- Initial state means can often start at 0\n", + "\n", + "Here we set reasonable defaults:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "params = params_template.copy()\n", + "\n", + "# Set starting values by category\n", + "for category in params.index.get_level_values(\"category\").unique():\n", + " if category == \"loadings\":\n", + " params.loc[category, \"value\"] = 1.0\n", + " elif category == \"controls\":\n", + " params.loc[category, \"value\"] = 0.0\n", + " elif category in (\"meas_sds\", \"shock_sds\") or category == \"initial_cholcovs\":\n", + " params.loc[category, \"value\"] = 0.5\n", + " elif category == \"initial_states\":\n", + " params.loc[category, \"value\"] = 0.0\n", + " elif category == \"mixture_weights\":\n", + " params.loc[category, \"value\"] = 1.0\n", + " elif category == \"transition\":\n", + " # Set transition parameters to reasonable defaults\n", + " params.loc[category, \"value\"] = 0.5\n", + "\n", + "params.head(10)" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "## JAX Compilation\n", + "\n", + "Skillmodels uses JAX for just-in-time compilation and automatic differentiation.\n", + "The first call to `loglike` or `gradient` triggers compilation, which takes a few\n", + "seconds. Subsequent calls are very fast." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "loglike = max_inputs[\"loglike\"]\n", + "gradient = max_inputs[\"gradient\"]\n", + "loglike_and_gradient = max_inputs[\"loglike_and_gradient\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# First call includes compilation time\n", + "loglike_value = loglike(params)\n", + "print(f\"Log-likelihood at starting values: {loglike_value:.2f}\")" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "## Constraints\n", + "\n", + "Skillmodels automatically generates constraints from the model specification:\n", + "- Fixed parameters (normalized loadings and intercepts)\n", + "- Stagemap equality constraints\n", + "- Bound constraints\n", + "\n", + "You can add additional constraints for your specific model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "constraints = max_inputs[\"constraints\"]\n", + "print(f\"Number of auto-generated constraints: {len(constraints)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "## Estimation with optimagic\n", + "\n", + "To estimate the model, use optimagic's `maximize` function:\n", + "\n", + "```python\n", + "import optimagic as om\n", + "\n", + "result = om.maximize(\n", + " fun=loglike,\n", + " params=params,\n", + " algorithm=\"scipy_lbfgsb\",\n", + " fun_and_jac=loglike_and_gradient,\n", + " constraints=constraints,\n", + ")\n", + "```\n", + "\n", + "The `fun_and_jac` argument is important: it uses the combined function that\n", + "computes both the likelihood and gradient efficiently." + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "## Next Steps\n", + "\n", + "- See the [Model Specifications](../how_to_guides/model_specs.md) guide for details\n", + " on writing model specifications\n", + "- See the [Simulation](../how_to_guides/how_to_simulate_dataset.ipynb) guide for\n", + " generating synthetic data\n", + "- After estimation, use `get_filtered_states()` to extract latent factor estimates" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb b/docs/how_to_guides/how_to_simulate_dataset.ipynb similarity index 85% rename from docs/source/how_to_guides/how_to_simulate_dataset.ipynb rename to docs/how_to_guides/how_to_simulate_dataset.ipynb index bad7647c..e15340fa 100644 --- a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb +++ b/docs/how_to_guides/how_to_simulate_dataset.ipynb @@ -7,10 +7,10 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", - "from skillmodels.simulate_data import simulate_dataset" + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.simulate_data import simulate_dataset\n", + "from skillmodels.test_data.model2 import MODEL2" ] }, { @@ -34,13 +34,12 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", + "model = MODEL2\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])\n", "\n", - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])" ] }, @@ -58,7 +57,7 @@ "outputs": [], "source": [ "initial_data = simulate_dataset(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", ")\n", @@ -102,7 +101,7 @@ "outputs": [], "source": [ "data_after_policies = simulate_dataset(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", ")\n", diff --git a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb b/docs/how_to_guides/how_to_visualize_correlations.ipynb similarity index 81% rename from docs/source/how_to_guides/how_to_visualize_correlations.ipynb rename to docs/how_to_guides/how_to_visualize_correlations.ipynb index 299f3207..57e2c484 100644 --- a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/how_to_guides/how_to_visualize_correlations.ipynb @@ -14,17 +14,15 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.correlation_heatmap import (\n", " get_measurements_corr,\n", " get_quasi_scores_corr,\n", " get_scores_corr,\n", " plot_correlation_heatmap,\n", ")\n", - "\n", - "%load_ext nb_black" + "from skillmodels.test_data.model2 import MODEL2" ] }, { @@ -40,8 +38,7 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)" + "model = MODEL2" ] }, { @@ -50,10 +47,10 @@ "metadata": {}, "outputs": [], "source": [ - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])" ] }, @@ -71,7 +68,7 @@ "outputs": [], "source": [ "corr_meas = get_measurements_corr(\n", - " periods=0, data=data, model_dict=model_dict, factors=[\"fac1\", \"fac2\"]\n", + " periods=0, data=data, model_spec=model, factors=[\"fac1\", \"fac2\"]\n", ")" ] }, @@ -109,7 +106,7 @@ "outputs": [], "source": [ "corr_score = get_scores_corr(\n", - " periods=None, params=params, data=data, model_dict=model_dict, factors=\"fac1\"\n", + " periods=None, params=params, data=data, model_spec=model, factors=\"fac1\"\n", ")" ] }, @@ -140,7 +137,7 @@ "outputs": [], "source": [ "quasi_corr_score = get_quasi_scores_corr(\n", - " periods=None, data=data, model_dict=model_dict, factors=\"fac1\"\n", + " periods=None, data=data, model_spec=model, factors=\"fac1\"\n", ")" ] }, @@ -170,7 +167,10 @@ "metadata": {}, "outputs": [], "source": [ - "from skillmodels.visualize_transition_equations import _get_pardict, _set_index_params" + "from skillmodels.visualize_transition_equations import (\n", + " _get_parsed_params,\n", + " _set_index_params,\n", + ")" ] }, { @@ -188,10 +188,10 @@ "metadata": {}, "outputs": [], "source": [ - "_get_pardict(\n", - " params=_set_index_params(process_model(model_dict), params),\n", - " model=process_model(model_dict),\n", - ")[\"loadings\"]" + "_get_parsed_params(\n", + " params=_set_index_params(process_model(model), params),\n", + " model=process_model(model),\n", + ").loadings" ] }, { diff --git a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb similarity index 89% rename from docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb rename to docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index db9ec045..73831c95 100644 --- a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -6,24 +6,14 @@ "id": "0", "metadata": {}, "outputs": [], - "source": [ - "%load_ext nb_black" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", - "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.maximization_inputs import get_maximization_inputs\n", "from skillmodels.simulate_data import simulate_dataset\n", + "from skillmodels.test_data.model2 import MODEL2\n", "from skillmodels.visualize_factor_distributions import (\n", " bivariate_density_contours,\n", " bivariate_density_surfaces,\n", @@ -34,7 +24,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "# How to visualize the distribution of latent factors\n", @@ -53,22 +43,21 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "model = MODEL2\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])" ] }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Plotting one dataset of states" @@ -77,18 +66,18 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", @@ -98,12 +87,12 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "5", "metadata": {}, "outputs": [], "source": [ "surface_plots = bivariate_density_surfaces(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", @@ -113,7 +102,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -127,7 +116,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -136,7 +125,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "## (Outdated) Optional arguments of the plotting function\n", @@ -152,7 +141,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "## Getting simulated datasets (with and without policy)\n", @@ -163,11 +152,11 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ - "sim_states = simulate_dataset(model_dict=model_dict, params=params, data=data)[\n", + "sim_states = simulate_dataset(model_spec=model, params=params, data=data)[\n", " \"anchored_states\"\n", "][\"states\"]" ] @@ -175,7 +164,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -188,12 +177,12 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ "sim_states_policy = simulate_dataset(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " policies=policies,\n", @@ -202,7 +191,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "## Plotting differences in distributions" @@ -211,19 +200,19 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " states={\"baseline\": sim_states, \"subsidy\": sim_states_policy},\n", " data=data,\n", " params=params,\n", " period=1,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " states={\"baseline\": sim_states, \"subsidy\": sim_states_policy},\n", " data=data,\n", " params=params,\n", @@ -234,7 +223,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -244,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -253,7 +242,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "All the optional arguments stay the same. The only difference ist that 3d plots do not work for several datasets." @@ -261,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "18", "metadata": {}, "source": [ "# Plotting with observed factors" @@ -270,50 +259,51 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ - "model_dict[\"observed_factors\"] = [\"obs1\"]" + "model = MODEL2.with_added_observed_factors(\"obs1\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ - "data[\"obs1\"] = np.random.rand(data.shape[0])" + "rng = np.random.default_rng(42)\n", + "data[\"obs1\"] = rng.random(data.shape[0])" ] }, { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ - "params = get_maximization_inputs(model_dict=model_dict, data=data)[\"params_template\"]\n", + "params = get_maximization_inputs(model_spec=model, data=data)[\"params_template\"]\n", "params[\"value\"] = 0.1" ] }, { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", " observed_factors=True,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", @@ -324,7 +314,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb similarity index 94% rename from docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb rename to docs/how_to_guides/how_to_visualize_transition_equations.ipynb index f67e28b4..d4581df2 100644 --- a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb +++ b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb @@ -8,9 +8,9 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.test_data.model2 import MODEL2\n", "from skillmodels.visualize_transition_equations import (\n", " combine_transition_plots,\n", " get_transition_plots,\n", @@ -47,13 +47,12 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", + "model = MODEL2\n", "\n", - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])" ] }, @@ -73,7 +72,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " period=0,\n", @@ -109,7 +108,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " period=0,\n", @@ -145,7 +144,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model_dict=model_dict,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " period=1,\n", diff --git a/docs/how_to_guides/model_specs.md b/docs/how_to_guides/model_specs.md new file mode 100644 index 00000000..39680e14 --- /dev/null +++ b/docs/how_to_guides/model_specs.md @@ -0,0 +1,123 @@ +# Model Specifications + +Models are specified using Python dataclasses. + +## Defining a Model + +```python +from skillmodels import ( + AnchoringSpec, + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) + +# Define factors +fac1 = FactorSpec( + measurements=( + ("y1", "y2", "y3"), # period 0 + ("y1", "y2", "y3"), # period 1 + # ... + ), + normalizations=Normalizations( + loadings=( + {"y1": 1.0}, # fix loading of y1 to 1 in period 0 + {}, + ), + intercepts=({}, {}), + ), + transition_function="log_ces", +) + +# Create model +model = ModelSpec( + factors={"fac1": fac1, "fac2": fac2, "fac3": fac3}, + anchoring=AnchoringSpec( + outcomes={"fac1": "Q1"}, + free_loadings=True, + ), + controls=("x1", "x2"), + stagemap=(0, 0, 1, 1, 2, 2, 3), + estimation_options=EstimationOptionsSpec(), +) +``` + +## Factor Specification + +Each factor requires: + +- **measurements**: A nested tuple with measurement variable names for each period. + Empty tuples indicate no measurements in that period. +- **transition_function**: Name of a transition function (`linear`, `log_ces`, + `constant`, `translog`) or a custom function. +- **normalizations** (optional): Fixed values for loadings and intercepts to identify + the model. + +## Anchoring + +Anchoring links latent factors to observable outcomes. Options: + +- **outcomes**: Dictionary mapping factor names to anchoring outcome variables +- **free_controls**: Whether to estimate control coefficients in anchoring equations + (default: false) +- **free_constant**: Whether to estimate a constant in anchoring equations + (default: false) +- **free_loadings**: Whether to estimate loadings in anchoring equations + (default: false) +- **ignore_constant_when_anchoring**: Skip constant when anchoring (default: false) + +## Controls + +A tuple of variable names used as control variables in measurement equations. A constant +is always included automatically. + +## Stagemap + +Maps periods to development stages. Has one entry less than the number of periods. +Parameters are constrained to be equal within a stage. + +Example: `(0, 0, 1, 1)` means periods 0-1 share stage 0 parameters, and periods 2-3 +share stage 1 parameters. + +## Observed Factors + +Variables in the dataset that represent observed (not latent) factors. These don't need +transition equations or multiple measurements. + +```python +model = ModelSpec( + factors={...}, + observed_factors=("income", "treatment"), +) +``` + +## Estimation Options + +Fine-tune the estimation: + +- **sigma_points_scale**: Scaling for Julier sigma points (default: 2) +- **robust_bounds**: Make bounds stricter to avoid numerical issues (default: true) +- **bounds_distance**: How much stricter to make bounds (default: 0.001) +- **clipping_lower_bound**: Clip log-likelihood from below (default: -1e250) +- **clipping_upper_bound**: Clip log-likelihood from above (default: null) +- **clipping_lower_hardness**: Hardness of lower clipping (default: 1) +- **clipping_upper_hardness**: Hardness of upper clipping (default: 1) + +## Custom Transition Functions + +Define custom transition equations using the `@register_params` decorator: + +```python +from skillmodels.decorators import register_params + +@register_params(params=["lincoeff"]) +def my_linear(fac, params): + return params["lincoeff"] * fac +``` + +Custom functions must: +- Accept `params` as a required argument (dictionary with registered parameters) +- Accept factor values as floats or use `states` for a JAX array of all states +- Return a float +- Be JAX jit and vmap compatible diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..7ad49e93 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,52 @@ +# skillmodels + +Welcome to skillmodels, a Python implementation of estimators for nonlinear dynamic +latent factor models. The package implements the Kalman filter-based maximum likelihood +estimator proposed by Cunha, Heckman and Schennach +([Econometrica 2010](http://onlinelibrary.wiley.com/doi/10.3982/ECTA6551/abstract)). + +## Overview + +Skillmodels was developed for skill formation models but can be applied to any dynamic +nonlinear latent factor model. Key features: + +- **Kalman filter estimation**: Uses square-root implementations for numerical stability +- **Flexible model specification**: Define models using Python dataclasses or dictionaries +- **JAX-powered**: Automatic differentiation and JIT compilation for fast optimization +- **GPU support**: Optional CUDA acceleration + +## Public API + +The main package exports three functions: + +- `get_maximization_inputs()`: Prepare optimization problem for parameter estimation +- `get_filtered_states()`: Extract filtered latent factor estimates +- `simulate_dataset()`: Generate synthetic data from model specification + +And dataclasses for model specification: + +- `ModelSpec`: Main model specification container +- `FactorSpec`: Specification for individual factors +- `AnchoringSpec`: Anchoring settings +- `EstimationOptionsSpec`: Options for estimation +- `Normalizations`: Normalization settings for loadings and intercepts + +## Implementation Notes + +The CHS estimator implemented here differs from the original +[replication files](https://tinyurl.com/yyuq2sa4) in two ways: + +1. Uses different normalizations that account for the + [critique](https://tinyurl.com/y3wl43kz) of Wiswall and Agostinelli +2. Uses robust square-root implementations of the Kalman filters + +## Citation + +If you find skillmodels helpful for research, please cite it. See the +[GitHub repository](https://github.com/OpenSourceEconomics/skillmodels) for citation +information. + +## Feedback + +If you encounter any problems or have suggestions, please open an issue on +[GitHub](https://github.com/OpenSourceEconomics/skillmodels/issues). diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index efa432b9..00000000 --- a/docs/make.bat +++ /dev/null @@ -1,263 +0,0 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source -set I18NSPHINXOPTS=%SPHINXOPTS% source -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. xml to make Docutils-native XML files - echo. pseudoxml to make pseudoxml-XML files for display purposes - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - echo. coverage to run coverage check of the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - - -REM Check if sphinx-build is available and fallback to Python version if any -%SPHINXBUILD% 1>NUL 2>NUL -if errorlevel 9009 goto sphinx_python -goto sphinx_ok - -:sphinx_python - -set SPHINXBUILD=python -m sphinx.__init__ -%SPHINXBUILD% 2> nul -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -:sphinx_ok - - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\skillmodels.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\skillmodels.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdf" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf - cd %~dp0 - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdfja" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf-ja - cd %~dp0 - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -if "%1" == "coverage" ( - %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage - if errorlevel 1 exit /b 1 - echo. - echo.Testing of coverage in the sources finished, look at the ^ -results in %BUILDDIR%/coverage/python.txt. - goto end -) - -if "%1" == "xml" ( - %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The XML files are in %BUILDDIR%/xml. - goto end -) - -if "%1" == "pseudoxml" ( - %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. - goto end -) - -:end diff --git a/docs/myst.yml b/docs/myst.yml new file mode 100644 index 00000000..90508429 --- /dev/null +++ b/docs/myst.yml @@ -0,0 +1,53 @@ +--- +# Jupyter Book 2.0 configuration +# See: https://jupyterbook.org/ +version: 1 +project: + id: skillmodels + title: skillmodels + description: >- + Python implementation of estimators for nonlinear dynamic latent factor models, + primarily used for skill formation research in economics. + authors: + - name: Janos Gabler + email: janos.gabler@gmail.com + - name: Hans-Martin von Gaudecker + email: hmgaudecker@uni-bonn.de + keywords: + - skill formation + - latent factor models + - kalman filter + - economics + - econometrics + - python + github: https://github.com/OpenSourceEconomics/skillmodels + jupyter: true + execute: + enabled: true + toc: + - file: index.md + - title: Getting Started + children: + - file: getting_started/tutorial.ipynb + - title: How-to Guides + children: + - file: how_to_guides/model_specs.md + - file: how_to_guides/how_to_simulate_dataset.ipynb + - file: how_to_guides/how_to_visualize_transition_equations.ipynb + - file: how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb + - file: how_to_guides/how_to_visualize_correlations.ipynb + - title: Explanations + children: + - file: explanations/names_and_concepts.md + - file: explanations/notes_on_factor_scales.md + - title: Reference Guides + children: + - file: reference_guides/transition_functions.md + - file: reference_guides/endogeneity_corrections.md + error_rules: + - rule: link-resolves + severity: ignore +site: + template: book-theme + options: + logo_text: skillmodels diff --git a/docs/reference_guides/endogeneity_corrections.md b/docs/reference_guides/endogeneity_corrections.md new file mode 100644 index 00000000..11ad21c2 --- /dev/null +++ b/docs/reference_guides/endogeneity_corrections.md @@ -0,0 +1,50 @@ +# Endogeneity Corrections + +This page discusses endogeneity correction methods from the CHS paper and their +limitations. Note that skillmodels does not currently implement these methods—this is +background for users considering extensions. + +## CHS Methods + +CHS use two endogeneity correction methods, both requiring strong assumptions about +factor scales. + +### Time-Invariant Heterogeneity (Section 4.2.4) + +This method adds a time-invariant individual fixed effect. The assumption of time +invariance is only valid if factor scales remain constant throughout the model. + +**Requirements:** +- Age-invariant measurements for normalization in all periods for all factors +- Three adult outcomes +- Constant factor scales (highly unlikely with KLS transition functions) + +If your dataset meets these requirements, consider using the original CHS Fortran code. + +### Time-Varying Heterogeneity (Section 4.2.5) + +This method uses heterogeneity that follows an AR(1) process. It also relies on: +- Constant factor scales +- A time-invariant investment equation +- Exclusion restrictions (e.g., income affects investment but not skill transitions) + +To adapt this for models with changing factor scales, you would need: +- A linear transition function with period-specific parameters (instead of AR(1)) +- Period-specific investment functions + +Identification of such a model is an open question. + +## Wiswall-Agostinelli Approach + +Wiswall and Agostinelli propose a simpler endogeneity model (Section 6.1.2 of their +[paper](https://tinyurl.com/y5ezloh2)) that could work with both the CHS and WA +estimators. + +## Implementation Status + +None of these correction methods are currently implemented in skillmodels. Users +interested in endogeneity corrections should consider: + +1. The Wiswall-Agostinelli approach as a starting point +2. The original CHS Fortran code for their specific methods +3. Contributing an implementation to skillmodels diff --git a/docs/reference_guides/transition_functions.md b/docs/reference_guides/transition_functions.md new file mode 100644 index 00000000..63d3d775 --- /dev/null +++ b/docs/reference_guides/transition_functions.md @@ -0,0 +1,128 @@ +# Transition Functions + +Transition functions describe how latent factors evolve over time. skillmodels provides +several pre-built functions and supports custom functions. + +## Pre-built Transition Functions + +### linear + +Linear transition function with a constant term: + +$$ +f_{t+1} = \sum_j \beta_j \cdot s_j + c +$$ + +where $s_j$ are the state values and $c$ is a constant. + +**Parameters**: One coefficient per factor plus a constant. + +### translog + +Linear-in-parameters function with squares and interaction terms: + +$$ +f_{t+1} = \sum_j \beta_j s_j + \sum_j \gamma_j s_j^2 + \sum_{j < k} \delta_{jk} s_j s_k + c +$$ + +Despite the name (convention in skill formation literature), this is not a true +translog function. + +**Parameters**: Linear terms, squared terms, interaction terms, and constant. + +### robust_translog + +Same as `translog` but clips state values at ±10^12 before computation. Use this when +states might grow very large and cause numerical overflow. + +### linear_and_squares + +Like `translog` but without interaction terms: + +$$ +f_{t+1} = \sum_j \beta_j s_j + \sum_j \gamma_j s_j^2 + c +$$ + +### log_ces + +Log CES (Constant Elasticity of Substitution) in the Known Location and Scale version: + +$$ +f_{t+1} = \frac{1}{\phi} \ln\left(\sum_j \gamma_j e^{\phi \cdot s_j}\right) +$$ + +This is a KLS function—see [Notes on Factor Scales](../explanations/notes_on_factor_scales.md) +for implications. + +**Parameters**: One weight $\gamma_j$ per factor (constrained to sum to 1) plus $\phi$. + +### log_ces_general + +Generalized log CES without known location and scale: + +$$ +f_{t+1} = \text{tfp} \cdot \ln\left(\sum_j \gamma_j e^{\sigma_j \cdot s_j}\right) +$$ + +**Parameters**: Weights $\gamma_j$, factor-specific elasticities $\sigma_j$, and total +factor productivity. + +### constant + +The factor value does not change: + +$$ +f_{t+1} = f_t +$$ + +**Parameters**: None. + +## Custom Transition Functions + +Define custom functions using the `@register_params` decorator: + +```python +from skillmodels.decorators import register_params + +@register_params(params=["alpha", "beta"]) +def my_transition(fac1, fac2, params): + return params["alpha"] * fac1 + params["beta"] * fac2**2 +``` + +### Requirements + +Custom transition functions must: + +1. Accept `params` as a mandatory argument (dictionary with registered parameter names) +2. Accept factor values as floats or use `states` for a JAX array of all factors +3. Return a float (or scalar JAX array) +4. Be JAX jit and vmap compatible (no Python control flow on state values) + +### Using Custom Functions + +```python +from skillmodels import FactorSpec + +factor = FactorSpec( + measurements=[...], + transition_equation=my_transition, # Pass the function object +) +``` + +Or with a dictionary-based model: + +```python +model["factors"]["fac1"]["transition_function"] = my_transition +``` + +### Advanced: Accessing All States + +If your transition function needs access to all states at once: + +```python +@register_params(params=["weights"]) +def weighted_sum(states, params): + return jnp.dot(states, params["weights"]) +``` + +The `states` argument is a 1D JAX array with all factor values in order. diff --git a/docs/source/chs_test_params.csv b/docs/source/chs_test_params.csv deleted file mode 100644 index ea204de7..00000000 --- a/docs/source/chs_test_params.csv +++ /dev/null @@ -1,209 +0,0 @@ -category,period,name1,name2,lower,upper,chs_value,good_start_value,bad_start_value -control_coeffs,0,y1,constant,-inf,inf,1.00161847,1.0,0.0 -control_coeffs,0,y1,x1,-inf,inf,1.00545482,1.0,0.0 -control_coeffs,0,y2,constant,-inf,inf,1.03143922,1.0,0.0 -control_coeffs,0,y2,x1,-inf,inf,0.97599155,1.0,0.0 -control_coeffs,0,y3,constant,-inf,inf,0.99409082,1.0,0.0 -control_coeffs,0,y3,x1,-inf,inf,0.99413941,1.0,0.0 -control_coeffs,0,y4,constant,-inf,inf,1.01833579,1.0,0.0 -control_coeffs,0,y4,x1,-inf,inf,1.00601018,1.0,0.0 -control_coeffs,0,y5,constant,-inf,inf,1.02354619,1.0,0.0 -control_coeffs,0,y5,x1,-inf,inf,1.00099227,1.0,0.0 -control_coeffs,0,y6,constant,-inf,inf,1.04884144,1.0,0.0 -control_coeffs,0,y6,x1,-inf,inf,0.96857115,1.0,0.0 -control_coeffs,0,y7,constant,-inf,inf,1.04176152,1.0,0.0 -control_coeffs,0,y7,x1,-inf,inf,0.97482723,1.0,0.0 -control_coeffs,0,y8,constant,-inf,inf,1.00566791,1.0,0.0 -control_coeffs,0,y8,x1,-inf,inf,0.98188173,1.0,0.0 -control_coeffs,0,y9,constant,-inf,inf,1.04848607,1.0,0.0 -control_coeffs,0,y9,x1,-inf,inf,0.92514421,0.9,0.0 -control_coeffs,1,y1,constant,-inf,inf,0.9565773999999999,1.0,0.0 -control_coeffs,1,y1,x1,-inf,inf,1.0528786,1.1,0.0 -control_coeffs,1,y2,constant,-inf,inf,0.94514768,0.9,0.0 -control_coeffs,1,y2,x1,-inf,inf,1.10002752,1.1,0.0 -control_coeffs,1,y3,constant,-inf,inf,0.94921197,0.9,0.0 -control_coeffs,1,y3,x1,-inf,inf,1.11220924,1.1,0.0 -control_coeffs,1,y4,constant,-inf,inf,0.97927901,1.0,0.0 -control_coeffs,1,y4,x1,-inf,inf,1.05321768,1.1,0.0 -control_coeffs,1,y5,constant,-inf,inf,0.95632487,1.0,0.0 -control_coeffs,1,y5,x1,-inf,inf,1.07984824,1.1,0.0 -control_coeffs,1,y6,constant,-inf,inf,0.98720524,1.0,0.0 -control_coeffs,1,y6,x1,-inf,inf,1.04155956,1.0,0.0 -control_coeffs,2,y1,constant,-inf,inf,0.98278806,1.0,0.0 -control_coeffs,2,y1,x1,-inf,inf,1.01759386,1.0,0.0 -control_coeffs,2,y2,constant,-inf,inf,1.00015093,1.0,0.0 -control_coeffs,2,y2,x1,-inf,inf,0.99449381,1.0,0.0 -control_coeffs,2,y3,constant,-inf,inf,0.93918533,0.9,0.0 -control_coeffs,2,y3,x1,-inf,inf,1.04145063,1.0,0.0 -control_coeffs,2,y4,constant,-inf,inf,1.00539897,1.0,0.0 -control_coeffs,2,y4,x1,-inf,inf,0.98808056,1.0,0.0 -control_coeffs,2,y5,constant,-inf,inf,1.03995916,1.0,0.0 -control_coeffs,2,y5,x1,-inf,inf,0.93727871,0.9,0.0 -control_coeffs,2,y6,constant,-inf,inf,1.02370063,1.0,0.0 -control_coeffs,2,y6,x1,-inf,inf,0.97863335,1.0,0.0 -control_coeffs,3,y1,constant,-inf,inf,0.95263385,1.0,0.0 -control_coeffs,3,y1,x1,-inf,inf,1.07747808,1.1,0.0 -control_coeffs,3,y2,constant,-inf,inf,0.97511705,1.0,0.0 -control_coeffs,3,y2,x1,-inf,inf,1.01595775,1.0,0.0 -control_coeffs,3,y3,constant,-inf,inf,0.99671239,1.0,0.0 -control_coeffs,3,y3,x1,-inf,inf,1.00409134,1.0,0.0 -control_coeffs,3,y4,constant,-inf,inf,0.97463783,1.0,0.0 -control_coeffs,3,y4,x1,-inf,inf,1.00265983,1.0,0.0 -control_coeffs,3,y5,constant,-inf,inf,1.00354587,1.0,0.0 -control_coeffs,3,y5,x1,-inf,inf,0.98936892,1.0,0.0 -control_coeffs,3,y6,constant,-inf,inf,1.00220065,1.0,0.0 -control_coeffs,3,y6,x1,-inf,inf,1.01816115,1.0,0.0 -control_coeffs,4,y1,constant,-inf,inf,1.01871361,1.0,0.0 -control_coeffs,4,y1,x1,-inf,inf,0.97390947,1.0,0.0 -control_coeffs,4,y2,constant,-inf,inf,0.96884594,1.0,0.0 -control_coeffs,4,y2,x1,-inf,inf,1.01276643,1.0,0.0 -control_coeffs,4,y3,constant,-inf,inf,0.96348822,1.0,0.0 -control_coeffs,4,y3,x1,-inf,inf,1.0639416000000002,1.1,0.0 -control_coeffs,4,y4,constant,-inf,inf,0.97249741,1.0,0.0 -control_coeffs,4,y4,x1,-inf,inf,1.05030944,1.1,0.0 -control_coeffs,4,y5,constant,-inf,inf,1.01263275,1.0,0.0 -control_coeffs,4,y5,x1,-inf,inf,0.95867367,1.0,0.0 -control_coeffs,4,y6,constant,-inf,inf,0.97561054,1.0,0.0 -control_coeffs,4,y6,x1,-inf,inf,1.02067436,1.0,0.0 -control_coeffs,5,y1,constant,-inf,inf,1.02885338,1.0,0.0 -control_coeffs,5,y1,x1,-inf,inf,0.92274679,0.9,0.0 -control_coeffs,5,y2,constant,-inf,inf,0.99430379,1.0,0.0 -control_coeffs,5,y2,x1,-inf,inf,0.97278707,1.0,0.0 -control_coeffs,5,y3,constant,-inf,inf,0.97922354,1.0,0.0 -control_coeffs,5,y3,x1,-inf,inf,1.02364392,1.0,0.0 -control_coeffs,5,y4,constant,-inf,inf,1.01557986,1.0,0.0 -control_coeffs,5,y4,x1,-inf,inf,0.9576603000000001,1.0,0.0 -control_coeffs,5,y5,constant,-inf,inf,0.96898918,1.0,0.0 -control_coeffs,5,y5,x1,-inf,inf,1.04015694,1.0,0.0 -control_coeffs,5,y6,constant,-inf,inf,0.97658414,1.0,0.0 -control_coeffs,5,y6,x1,-inf,inf,1.00635915,1.0,0.0 -control_coeffs,6,y1,constant,-inf,inf,0.98368467,1.0,0.0 -control_coeffs,6,y1,x1,-inf,inf,0.99929141,1.0,0.0 -control_coeffs,6,y2,constant,-inf,inf,0.93183755,0.9,0.0 -control_coeffs,6,y2,x1,-inf,inf,1.04782772,1.0,0.0 -control_coeffs,6,y3,constant,-inf,inf,0.95146637,1.0,0.0 -control_coeffs,6,y3,x1,-inf,inf,1.00920751,1.0,0.0 -control_coeffs,6,y4,constant,-inf,inf,0.99101302,1.0,0.0 -control_coeffs,6,y4,x1,-inf,inf,1.05400193,1.1,0.0 -control_coeffs,6,y5,constant,-inf,inf,0.9781293,1.0,0.0 -control_coeffs,6,y5,x1,-inf,inf,1.06296891,1.1,0.0 -control_coeffs,6,y6,constant,-inf,inf,0.9949447,1.0,0.0 -control_coeffs,6,y6,x1,-inf,inf,1.01391456,1.0,0.0 -control_coeffs,7,y1,constant,-inf,inf,0.94997421,0.9,0.0 -control_coeffs,7,y1,x1,-inf,inf,1.03694603,1.0,0.0 -control_coeffs,7,y2,constant,-inf,inf,0.94683688,0.9,0.0 -control_coeffs,7,y2,x1,-inf,inf,1.05734328,1.1,0.0 -control_coeffs,7,y3,constant,-inf,inf,0.97187261,1.0,0.0 -control_coeffs,7,y3,x1,-inf,inf,1.04305552,1.0,0.0 -control_coeffs,7,y4,constant,-inf,inf,0.94043886,0.9,0.0 -control_coeffs,7,y4,x1,-inf,inf,1.06978168,1.1,0.0 -control_coeffs,7,y5,constant,-inf,inf,0.9762738999999999,1.0,0.0 -control_coeffs,7,y5,x1,-inf,inf,1.01505491,1.0,0.0 -control_coeffs,7,y6,constant,-inf,inf,0.9811178,1.0,0.0 -control_coeffs,7,y6,x1,-inf,inf,1.02936846,1.0,0.0 -control_coeffs,7,Q1,constant,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,7,Q1,x1,-inf,inf,1.03648928,1.0,0.0 -loading,0,y2,fac1,-inf,inf,1.24759799,1.2,1.0 -loading,0,y3,fac1,-inf,inf,1.55939677,1.6,1.0 -loading,0,y5,fac2,-inf,inf,0.9908065,1.0,1.0 -loading,0,y6,fac2,-inf,inf,0.68554932,0.7,1.0 -loading,0,y8,fac3,-inf,inf,1.1132403,1.1,1.0 -loading,0,y9,fac3,-inf,inf,0.70906931,0.7,1.0 -loading,1,y2,fac1,-inf,inf,1.21745308,1.2,1.0 -loading,1,y3,fac1,-inf,inf,1.34547457,1.3,1.0 -loading,1,y5,fac2,-inf,inf,0.82645675,0.8,1.0 -loading,1,y6,fac2,-inf,inf,0.60128174,0.6,1.0 -loading,2,y2,fac1,-inf,inf,1.18091396,1.2,1.0 -loading,2,y3,fac1,-inf,inf,1.41494654,1.4,1.0 -loading,2,y5,fac2,-inf,inf,0.78412623,0.8,1.0 -loading,2,y6,fac2,-inf,inf,0.6095229,0.6,1.0 -loading,3,y2,fac1,-inf,inf,1.28957256,1.3,1.0 -loading,3,y3,fac1,-inf,inf,1.42483912,1.4,1.0 -loading,3,y5,fac2,-inf,inf,0.75831051,0.8,1.0 -loading,3,y6,fac2,-inf,inf,0.60278636,0.6,1.0 -loading,4,y2,fac1,-inf,inf,1.24216652,1.2,1.0 -loading,4,y3,fac1,-inf,inf,1.4194579999999999,1.4,1.0 -loading,4,y5,fac2,-inf,inf,0.83681772,0.8,1.0 -loading,4,y6,fac2,-inf,inf,0.5249411999999999,0.5,1.0 -loading,5,y2,fac1,-inf,inf,1.20739978,1.2,1.0 -loading,5,y3,fac1,-inf,inf,1.43586124,1.4,1.0 -loading,5,y5,fac2,-inf,inf,0.76476822,0.8,1.0 -loading,5,y6,fac2,-inf,inf,0.60105183,0.6,1.0 -loading,6,y2,fac1,-inf,inf,1.22402329,1.2,1.0 -loading,6,y3,fac1,-inf,inf,1.42265974,1.4,1.0 -loading,6,y5,fac2,-inf,inf,0.68885845,0.7,1.0 -loading,6,y6,fac2,-inf,inf,0.61882325,0.6,1.0 -loading,7,y2,fac1,-inf,inf,1.23608389,1.2,1.0 -loading,7,y3,fac1,-inf,inf,1.47859872,1.5,1.0 -loading,7,y5,fac2,-inf,inf,0.81524559,0.8,1.0 -loading,7,y6,fac2,-inf,inf,0.57084593,0.6,1.0 -loading,7,Q1,fac1,-inf,inf,0.93520167,0.9,1.0 -meas_sd,0,y1,-,-inf,inf,0.50497719,0.5,1.0 -meas_sd,0,y2,-,-inf,inf,0.50088168,0.5,1.0 -meas_sd,0,y3,-,-inf,inf,0.48136282,0.5,1.0 -meas_sd,0,y4,-,-inf,inf,0.53215346,0.5,1.0 -meas_sd,0,y5,-,-inf,inf,0.47039143,0.5,1.0 -meas_sd,0,y6,-,-inf,inf,0.48344469,0.5,1.0 -meas_sd,0,y7,-,-inf,inf,0.47943359,0.5,1.0 -meas_sd,0,y8,-,-inf,inf,0.53421227,0.5,1.0 -meas_sd,0,y9,-,-inf,inf,0.50146093,0.5,1.0 -meas_sd,1,y1,-,-inf,inf,0.49105567,0.5,1.0 -meas_sd,1,y2,-,-inf,inf,0.49870431,0.5,1.0 -meas_sd,1,y3,-,-inf,inf,0.50514084,0.5,1.0 -meas_sd,1,y4,-,-inf,inf,0.49743526,0.5,1.0 -meas_sd,1,y5,-,-inf,inf,0.49941779999999997,0.5,1.0 -meas_sd,1,y6,-,-inf,inf,0.50424182,0.5,1.0 -meas_sd,2,y1,-,-inf,inf,0.50427244,0.5,1.0 -meas_sd,2,y2,-,-inf,inf,0.51856939,0.5,1.0 -meas_sd,2,y3,-,-inf,inf,0.50392617,0.5,1.0 -meas_sd,2,y4,-,-inf,inf,0.49161026,0.5,1.0 -meas_sd,2,y5,-,-inf,inf,0.50441808,0.5,1.0 -meas_sd,2,y6,-,-inf,inf,0.48482939,0.5,1.0 -meas_sd,3,y1,-,-inf,inf,0.49476345,0.5,1.0 -meas_sd,3,y2,-,-inf,inf,0.49363682,0.5,1.0 -meas_sd,3,y3,-,-inf,inf,0.49918763,0.5,1.0 -meas_sd,3,y4,-,-inf,inf,0.49728617,0.5,1.0 -meas_sd,3,y5,-,-inf,inf,0.5116465,0.5,1.0 -meas_sd,3,y6,-,-inf,inf,0.48035036,0.5,1.0 -meas_sd,4,y1,-,-inf,inf,0.50529312,0.5,1.0 -meas_sd,4,y2,-,-inf,inf,0.50706948,0.5,1.0 -meas_sd,4,y3,-,-inf,inf,0.47849704,0.5,1.0 -meas_sd,4,y4,-,-inf,inf,0.49962829,0.5,1.0 -meas_sd,4,y5,-,-inf,inf,0.49001347,0.5,1.0 -meas_sd,4,y6,-,-inf,inf,0.48723789,0.5,1.0 -meas_sd,5,y1,-,-inf,inf,0.51551926,0.5,1.0 -meas_sd,5,y2,-,-inf,inf,0.52331776,0.5,1.0 -meas_sd,5,y3,-,-inf,inf,0.48326815,0.5,1.0 -meas_sd,5,y4,-,-inf,inf,0.47910757,0.5,1.0 -meas_sd,5,y5,-,-inf,inf,0.50327233,0.5,1.0 -meas_sd,5,y6,-,-inf,inf,0.49705186,0.5,1.0 -meas_sd,6,y1,-,-inf,inf,0.52223919,0.5,1.0 -meas_sd,6,y2,-,-inf,inf,0.48641122,0.5,1.0 -meas_sd,6,y3,-,-inf,inf,0.47597189,0.5,1.0 -meas_sd,6,y4,-,-inf,inf,0.52595048,0.5,1.0 -meas_sd,6,y5,-,-inf,inf,0.51187305,0.5,1.0 -meas_sd,6,y6,-,-inf,inf,0.52425668,0.5,1.0 -meas_sd,7,y1,-,-inf,inf,0.52163477,0.5,1.0 -meas_sd,7,y2,-,-inf,inf,0.52112353,0.5,1.0 -meas_sd,7,y3,-,-inf,inf,0.47545353,0.5,1.0 -meas_sd,7,y4,-,-inf,inf,0.5150197,0.5,1.0 -meas_sd,7,y5,-,-inf,inf,0.48993218,0.5,1.0 -meas_sd,7,y6,-,-inf,inf,0.52777721,0.5,1.0 -meas_sd,7,Q1,-,-inf,inf,1.03957418,1.0,1.0 -shock_sd,0,fac1,-,-inf,inf,0.321936173798472,0.31622776601683794,1.0 -shock_sd,0,fac2,-,-inf,inf,0.3131064355774247,0.31622776601683794,1.0 -initial_mean,0,mixture_0,fac1,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac2,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac3,-inf,inf,0.0,0.0,0.0 -initial_cov,0,mixture_0,fac1-fac1,-inf,inf,0.17647290000000002,0.2,1.0 -initial_cov,0,mixture_0,fac2-fac1,-inf,inf,0.00524114,0.0,0.0 -initial_cov,0,mixture_0,fac2-fac2,-inf,inf,0.18362641,0.2,1.0 -initial_cov,0,mixture_0,fac3-fac1,-inf,inf,0.005665399999999999,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac2,-inf,inf,-0.00067522,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac3,-inf,inf,0.23194739,0.2,1.0 -trans,0,fac1,fac1,-inf,inf,0.65978837,0.7,0.4 -trans,0,fac1,fac2,-inf,inf,0.174038,0.2,0.3 -trans,0,fac1,fac3,-inf,inf,0.16617363,0.1,0.3 -trans,0,fac1,phi,-inf,inf,-0.40701787,-0.4,-0.5 -trans,0,fac2,fac2,-inf,inf,0.60887112,0.6,0.5 -trans,0,fac2,constant,-inf,inf,0.0,0.0,0.0 diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 8c9b2d42..00000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,153 +0,0 @@ -# -# Documentation build configuration file, created by sphinx-quickstart -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. -import os -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("../..")) - - -# -- General configuration ---------------------------------------------------- - -# If your documentation needs a minimal Sphinx, state it here. -needs_sphinx = "1.6" - -# Add any Sphinx extension module names here, as strings. -# They can be extensions coming with Sphinx (named "sphinx.ext.*") -# or your custom ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.viewcode", - "sphinx.ext.mathjax", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "nbsphinx", -] - -# Mock imports. -autodoc_mock_imports = [ - "optimagic", - "matplotlib", - "jax", - "numpy", - "pandas", - "scipy", - "filterpy", - "dags", - "plotly", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix of source filenames. -source_suffix = ".rst" - -# The encoding of source files. -source_encoding = "utf-8" - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "skillmodels" -copyright = "2016-2021, Janos Gabler" - -# The version info for the project you"re documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "0.2" -# The full version, including alpha/beta/rc tags. -release = "0.2.2" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = "" -# Else, today_fmt is used as the format for a strftime call. -today_fmt = "%d %B %Y" - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, "()" will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ["src."] - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "pydata_sphinx_theme" - -html_logo = "_static/images/logo.svg" - -html_theme_options = { - "github_url": "https://github.com/OpenSourceEconomics/skillmodels" -} - -html_css_files = ["css/custom.css"] - -html_sidebars = { - "**": [ - "relations.html", # needs 'show_related': True theme option to display - "searchbox.html", - ], -} - -templates_path = ["_templates"] -html_static_path = ["_static"] - - -html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = "" - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -html_file_suffix = ".html" - -# Output file base name for HTML help builder. -htmlhelp_basename = "somedoc" - -# Other settings - -autodoc_member_order = "bysource" -napoleon_use_rtype = False -napoleon_include_private_with_doc = False -todo_include_todos = True diff --git a/docs/source/explanations/index.rst b/docs/source/explanations/index.rst deleted file mode 100644 index 4102eda2..00000000 --- a/docs/source/explanations/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Explanations -============ - - -.. toctree:: - :maxdepth: 1 - - names_and_concepts - notes_on_factor_scales diff --git a/docs/source/explanations/names_and_concepts.rst b/docs/source/explanations/names_and_concepts.rst deleted file mode 100644 index 0e55e298..00000000 --- a/docs/source/explanations/names_and_concepts.rst +++ /dev/null @@ -1,95 +0,0 @@ -.. _names_and_concepts: - - -================== -Names and concepts -================== - -This section contains an overview of frequently used variable names and -concepts. It's not necessary to read this section if you are only interested in -using the code, but you might want to skim it if you are interested in what the -code actually does or plan to adapt it to your use case. - -Most of those quantities are generated once during the :ref:`model_processing` -and appear as arguments of many other functions. - -.. _dimensions: - -``dimensions`` -============== - -Dimensions of the model quantities. All of them are integers. - -- n_states: Number of latent factors or states in the model. Note that the terms - state and factor are used interchangeably throughout the documentation. -- n_periods: Number of periods of the model. There is one more period than - transition equations of the model. -- n_mixtures: Number of elements in the finite mixture of normals distribution. -- n_controls: Number of control variables in the measurement equations. This - includes the intercept of the measurement equation. Thus n_controls is always - 1 or larger. - - -.. _labels: - -``labels`` -========== - -Labels for the model quantities. All of them are lists. - - -- factors: Names of the latent factors. -- controls: Names of the control variables. The first entry is always "constant". -- periods: List of integers, starting at zero. The indices of the periods. -- stagemap: Maps periods to stages. Has one entry less than the number of periods. -- stages: The indices of the stages of the model. - - -.. _stages_vs_periods: - - -Development-Stages vs Periods -============================= - -A development is a group of consecutive periods for which the technology of skill -formation remains the same. Thus the number of stages is always <= the number of -periods of a model. - -Thus development stages are just equality constraints on the estimated parameter -vector. Because they are very frequently used, skillmodels can generate the -constraints automatically if you specify a stagemap in your model dictionary. - - -Example: If you have a model with 5 periods you can estimate at most 4 different -production functions (one for each transition between periods). If you want to -keep the parameters of the technology of skill formation constant between two -consecutive periods, you would specify the following stagemap: ``[0, 0, 1, 1]`` - - -.. _anchoring: - -``anchoring`` -============= - - - - -.. _update_info: - - -``update_info`` -=============== - - - -.. _normalizations: - -``normalizations`` -================== - - -.. _estimation_options: - - -``estimation_options`` -====================== diff --git a/docs/source/explanations/notes_on_factor_scales.rst b/docs/source/explanations/notes_on_factor_scales.rst deleted file mode 100644 index 3dd39ead..00000000 --- a/docs/source/explanations/notes_on_factor_scales.rst +++ /dev/null @@ -1,158 +0,0 @@ -********************************** -Notes on Scales and Normalizations -********************************** - -Here I collect Notes on different aspects of the discussion about factor -scales and re-normalization. This discussion originates in the `critique`_ by -Wiswall and Agostinelli but I argue below, that this critique is not yet -complete. - -Wiswall and Agostinelli define a class of transition functions with Known -Location and Scale (KLS) that require less normalizations. You should read -this definition in their paper. - -The critique by Wiswall and Agostinelli potentially invalidates the empirical -estimates of CHS, but not their general estimation routine. To get estimates -that don't suffer from renormalization you can either use less normalizations -or non-KLS transition functions. As there is no natural scale of skills, none -of the approaches is better or worse. Nevertheless, I prefer using flexible -Non-KLS transition functions with one normalization per period and factor. -Firstly, because they are more compatible with using development stages that -span more than one period. Secondly, because picking suitable normalizations -might help to give the latent factors a more meaningful scale. - - -.. _KLS_not_constant: - -Why KLS functions don't keep the scales constant -************************************************ - -Skills have no natural scale, but after reading the critique paper by Wiswall -and Agostinelli one could easily get the impression that using KLS transition -functions and less normalizations is better, because it identifies some sort -of natural scale. Moreover in their `estimation`_ paper (p. 7), they write: -"We argue that our limited normalization is appropriate for the dynamic -setting of child development we analyze. With our normalization for the -initial period only, latent skills in all periods share a common location -and scale with respect to the one chosen normalizing measure." - -The following example intuitively shows firstly that the scale identified with -KLS functions is as arbitrary as a scale identified through normalizations and -secondly that this scale is not constant over time in general. - -The example completely abstracts from measurement and estimation problems and -thereby allows to focus essential on the aspects of the problem. - -Consider a simple model of financial investments with two latent factors: a -stock variable wealth (w) and a flow variable investment (i). Suppose periods -last one year and annual interest rate on wealth is 10 percent. New -investments are deposited at the end of the year (get interests only in the -next year). - -The most intuitive scales to describe the system would be to measure all -latent factors in all periods in the same currency, say Dollars. In this case -the transition equation of wealth is given by: - -.. math:: - - w_{t + 1} = 1.1 w_t + i_t - -However, it would also be possible to measure w in period t in Dollars, i in -period t in 1000 Dollars and w in period t + 1 in Dollar cents. The transition -equation -- that still describes the exactly same system -- is then: - -.. math:: - - w_{t + 1} = 110 w_t + 100000 i_t - -The parameters now reflect the actual technology and scale changes between -periods. They are much harder to interpret than before. In fact any linear -function - -.. math:: - - f: \mathbb{R}^2 \rightarrow \mathbb{R} - -could describe the example system -- just in different combinations of scales. - -When latent factor models are estimated, the scales of each factor are usually -set through normalizations in each period. The main point of the first paper -by Wiswall and Agostinelli is that a KLS transition function prohibits to make -such normalizations except for the initial period. One could say that after -that, the transition function chooses the scale. - -The CES function has KLS and contains the subset of all linear functions -without intercept whose parameters sum to 1 as special cases. It can therefore -be used to describe the example system. After setting the scale of both -factors to Dollars in the initial period, the CES function would then choose -the scales for all other periods. - -The linear function that is a CES function and describes the system is: - -.. math:: - w_{t + 1} = \frac{1}{2.1} (1.1 w_t + i_t) \approx 0.524 w_t + 0.476 i_t - -The scale of w in period t + 1 chosen by this function is thus 1 / 2.1 or -approximately 0.476 Dollars which means that wealth in period t + 1 is -approximately measured in 100 Philippine Pesos. - - -.. _log_ces_problem: - -Why the CES and log_CES functions are problematic -************************************************* - -The definition of Known Location and Scale refers only to the scale of the -(always one-dimensional) output of a transition function. After reading the -Wiswall and Agostinelli critique, I wondered if the CES and log_CES functions -also pose restrictions on the scales of their inputs, i.e. can describe a system -only at a certain location or scale of inputs. - -According to Wiswall and Agostinelli, when using a log_CES function (which -belongs to the KLS class), one needs initial normalizations of location and -scale for all factors in the model. I made some pen-and-paper-calculations and -estimated models with simulated data and the results suggest that less -normalizations are needed with the log_CES function. - -While one does need to make initial normalizations for the location of all -factors, it is sufficient to normalize the scale of only one factor in the -initial period and the model is still identified. However, these are only -simulations and I do not have a formal result that shows that the restrictions -the log_CES function poses on the scale of its inputs are always enough for -identification. - -I would therefore currently advise not to use the CES or log_CES function -without thinking deeply about the normalizations you need. The automatic -generation of normalizations treats the log_ces function simply as a KLS -function. - - -.. _normalization_and_stages: - -Normalizations and Development stages -************************************* - -CHS use development stages, i.e. several periods of childhood in which the -parameters of the technology of skill formation remain the same. Wiswall and -Agostinelli do not use or analyze this case, but development stages do change -the normalization requirements. - -I always had the intuition that with development stages it is possible to -identify a scale from the first period of the stage, such that no later -normalizations are necessary until the next stage. When extending the WA -estimator to be compatible with development stages, I could confirm this -intuition as one nice feature of this estimator is that its identification -strategy has to be very explicit. - -If development stages are used, one only has to make normalizations in the first -period of each stage, except for the initial stage where the first two periods -have to be normalized. My recommendation is to use automatic normalizations if -you use development stages because it is very easy to get confused. - -This shows another type of over-normalization in the original CHS paper. - -.. _critique: - https://tinyurl.com/y3wl43kz - -.. _estimation: - https://tinyurl.com/y5ezloh2 diff --git a/docs/source/getting_started/index.rst b/docs/source/getting_started/index.rst deleted file mode 100644 index a70e7102..00000000 --- a/docs/source/getting_started/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Getting Started -=============== - -.. toctree:: - :maxdepth: 1 - - tutorial.ipynb diff --git a/docs/source/getting_started/tutorial.ipynb b/docs/source/getting_started/tutorial.ipynb deleted file mode 100644 index 15906e69..00000000 --- a/docs/source/getting_started/tutorial.ipynb +++ /dev/null @@ -1,384 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Skillmodels Quickstart" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from time import time\n", - "\n", - "import optimagic as om\n", - "import pandas as pd\n", - "import yaml\n", - "\n", - "from skillmodels.config import TEST_DIR\n", - "from skillmodels.maximization_inputs import get_maximization_inputs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Loading Model Specification and Data\n", - "\n", - "Model specifications are python dictionaries that can be safed in yaml or json files. For a moment, just assume you know how to write a model specification and have a skillmodels compatible dataset. Both are \n", - "explained in different tutorials.\n", - "\n", - "Next we load the model specification and the dataset. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.SafeLoader)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", - "data = data.set_index([\"caseid\", \"period\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Getting the inputs for ``optimagic.maximize``\n", - "\n", - "Skillmodels basically just has one public function called ``get_maximization_inputs``. When called with a model specification and a dataset it contains a dictionary with everything you need to maximize the likelihood function using optimagic. \n", - "\n", - "By everything you need I mean everything model-specific. You should still use the optional arguments of ``maximize`` to tune the optimization." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "max_inputs = get_maximization_inputs(model_dict, data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Filling the Params Template\n", - "\n", - "Often you can greatly reduce estimation time by choosing good start parameters. What are good start parameters depends strongly on the model specifications, the scaling of your variables and the normalizations you make. \n", - "\n", - "If you have strong difficulties to pick good start values, you probably want to think again about the interpretability of your model parameters and possibly change the normalizations and scaling of your \n", - "measurements. \n", - "\n", - "As a rule of thumb: If all measurements are standardized and, all fixed loadings are 1 and all fixed intercepts are 0 then one is a good start value for all free loadings and 0 is a good start value for all free intercepts. \n", - "\n", - "Measurement and shock standard deviations are better started slightly larger than you would expect them. \n", - "\n", - "Below I just load start parameters for the CHS example model that I filled out manually. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params_template = max_inputs[\"params_template\"]\n", - "params_template.head()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "index_cols = [\"category\", \"period\", \"name1\", \"name2\"]\n", - "chs_path = TEST_DIR / \"regression_vault\" / \"chs_results.csv\"\n", - "chs_values = pd.read_csv(chs_path)\n", - "chs_values = chs_values.set_index(index_cols)\n", - "chs_values = chs_values[[\"chs_value\", \"good_start_value\", \"bad_start_value\"]]\n", - "chs_values.head()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = params_template.copy()\n", - "params[\"value\"] = chs_values[\"chs_value\"]\n", - "params.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Time compilation speed\n", - "\n", - "Skillmodels uses jax to just-in-time compile the numerical code and get a gradient of the likelihood function by automatic differentiation. \n", - "\n", - "There are several versions of the log likelihood function and its gradient:\n", - "\n", - "- **debug_loglike**: Is not compiled, can be debugged with a debugger, returns a lot of intermediate outputs and is slow. \n", - "- **loglike**: Is compiled and fast but does not return intermediate outputs\n", - "- **gradient**: Is compiled and fast, returns the gradient of loglike\n", - "- **loglike_and_gradient**: Is compiled and fast and exploits synergies between loglike and gradient calculation. This is the most important one for estimation. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "debug_loglike = max_inputs[\"debug_loglike\"]\n", - "loglike = max_inputs[\"loglike\"]\n", - "gradient = max_inputs[\"gradient\"]\n", - "loglike_and_gradient = max_inputs[\"loglike_and_gradient\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "debug_loglike_value = debug_loglike(params)\n", - "print(time() - start)\n", - "debug_loglike_value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "loglike_value = loglike(params)\n", - "print(time() - start)\n", - "loglike_value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit loglike(params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "gradient_value = gradient(params)\n", - "print(time() - start)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit gradient(params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "loglike_and_gradient_value = loglike_and_gradient(params)\n", - "print(time() - start)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit loglike_and_gradient(params)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## A few additional constraints\n", - "\n", - "To get the same values as CHS we will have to do a little more work. The reason is that on top of the many constraints skillmodels generates atuomatically from the model specification, CHS impose two more constraints:\n", - "\n", - "1. All but the self productivity paramet in the linear transition equaltion are fixed to zero\n", - "2. The initial mean of the states is not estimated but assumed to be zero.\n", - "3. The anchoring parameters (intercepts, control variables, loadings and SDs of measurement error are pairwise equal across periods).\n", - "\n", - "Fortunately, optimagic makes it easy to express such constraints:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "constraints = max_inputs[\"constraints\"]\n", - "\n", - "additional_constraints = [\n", - " {\n", - " \"query\": \"category == 'transition' & name1 == 'fac2' & name2 != 'fac2'\",\n", - " \"type\": \"fixed\",\n", - " \"value\": 0,\n", - " },\n", - " {\"loc\": \"initial_states\", \"type\": \"fixed\", \"value\": 0},\n", - " {\n", - " \"queries\": [f\"period == {i} & name1 == 'Q1_fac1'\" for i in range(8)],\n", - " \"type\": \"pairwise_equality\",\n", - " },\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "constraints = constraints + additional_constraints" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Generating a group column for better dashboard output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pc, pp = om.process_constraints(constraints, params)\n", - "params[\"group\"] = params.index.get_level_values(\"category\")\n", - "params.loc[\"controls\", \"group\"] = params.loc[\"controls\"].index.get_level_values(\"name2\")\n", - "\n", - "params[\"group\"] = (\n", - " params[\"group\"].astype(str)\n", - " + \"_\"\n", - " + params.index.get_level_values(\"period\").astype(str)\n", - ")\n", - "params[\"group\"] = params[\"group\"].str.replace(\"_\", \"-\")\n", - "params[\"group\"] = params[\"group\"].astype(\"O\")\n", - "params.loc[~pp[\"_internal_free\"], \"group\"] = None\n", - "params" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Estimating the model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params[\"value\"] = chs_values[\"good_start_value\"]\n", - "loc = params.query(\"category == 'shock_sds' & name1 == 'fac3'\").index\n", - "params.loc[loc, \"lower_bound\"] = 0.00\n", - "loglike(params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res = om.maximize(\n", - " criterion=loglike,\n", - " params=params,\n", - " algorithm=\"scipy_lbfgsb\",\n", - " fun_and_jac=loglike_and_gradient,\n", - " constraints=constraints,\n", - " logging=False,\n", - " algo_options={\"convergence.relative_criterion_tolerance\": 1e-9},\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res[\"message\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res[\"success\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/how_to_guides/index.rst b/docs/source/how_to_guides/index.rst deleted file mode 100644 index 94ecff7b..00000000 --- a/docs/source/how_to_guides/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -How-To Guides -============= - - -.. toctree:: - :maxdepth: 1 - - model_specs - utilities - how_to_visualize_transition_equations.ipynb - how_to_simulate_dataset.ipynb - how_to_visualize_pairwise_factor_distribution.ipynb - how_to_visualize_correlations.ipynb diff --git a/docs/source/how_to_guides/model_specs.rst b/docs/source/how_to_guides/model_specs.rst deleted file mode 100644 index ecd7ff05..00000000 --- a/docs/source/how_to_guides/model_specs.rst +++ /dev/null @@ -1,236 +0,0 @@ - -.. _model_specs: - -******************** -Model specifications -******************** - -Models are specified as -`Python dictionaries `_. -To improve reuse of the model specifications these dictionaries can be stored in json -or yaml files. - -Example 2 from the CHS replication files -**************************************** - -Below the model-specification is illustrated using Example 2 from the CHS -`replication files`_. If you want you can read it in section 4.1 of their -readme file but I briefly reproduce it here for convenience. - -There are three latent factors fac1, fac2 and fac3 and 8 periods that all belong to the -same development stage. fac1 evolves according to a log_ces production function and -depends on its past values as well as the past values of all other factors. -Moreover, it is linearly anchored with anchoring outcome Q1. This results in the -following transition equation: - -.. math:: - - fac1_{t + 1} = \frac{1}{\phi \lambda_1} ln\big(\gamma_{1,t}e^{\phi - \lambda_1 fac1_t} + \gamma_{2,t}e^{\phi \lambda_2 fac2_t} + - \gamma_{3,t}e^{\phi \lambda_3 fac3_t}\big) + \eta_{1, t} - -where the lambdas are anchoring parameters from a linear anchoring equation. -fac1 is measured by measurements y1, y2 and y3 in all periods. To sum up: -fac1 has the same properties as cognitive and non-cognitive skills in the CHS -paper. - -The evolution of fac2 is described by a linear function and fac2 only depends -on its own past values, not on other factors, i.e. has the following -transition equation: - -.. math:: - - fac2_{t + 1} = lincoeff \cdot fac2_t + \eta_{2, t} - -It is measured by y4, y5 and y6 in all periods. Thus fac2 has the same -properties as parental investments in the CHS paper. - -fac3 is constant over time. It is measured by y7, y8 and y9 in the first -period and has no measurements in other periods. This makes it similar to -parental skills in the CHS paper. - -In all periods and for all measurement equations the control variables x1 and -x2 are used, where x2 is a constant. - -What has to be specified? -************************* - -Before thinking about how to translate the above example into a model -specification it is helpful to recall what information is needed to define a -general latent factor model: - - #. What are the latent factors of the model and how are they related over time? - (transition equations) - #. What are the measurement variables of each factor in each period and how are - measurements and factors related? (measurement equations) - #. What are the normalizations of scale (normalized factor loadings) - and location (normalized intercepts or means)? - #. What are the control variables in each period? - #. If development stages are used: Which periods belong to which stage? - #. If anchoring is used: Which factors are anchored and what is the anchoring - outcome? - #. Are there any observed factors? - -Translating the model to a dictionary -************************************* - -before explaining how the model dictionary is written, here a full specification of the -example model as yaml file: - - -.. literalinclude:: ../../../skillmodels/tests/model2.yaml - :language: yaml - :linenos: - -The model specification is a nested dictionary. The outer keys (which I call sections) -are ``"factors"``, ``"anchoring"``, ``"controls"``, ``"stagemap"`` and -``"estimation_options"``. All but the first are optional, but typically you will use at -least some of them. - - -``factors`` ------------ - -The factors are described as a dictionary. The keys are the names of the factors. -Any python string is possible as factor name. The values are dictionaries with three -entries: - -- measurements: A nested list that is as long as the number of periods of the model. - Each sublist contains the names of the measurements in that period. If a factor has - no measurements in a period, it has to be an empty list. - -- normalizations: This entry is optional. It is a dictionary that can have the keys - ``"loadings"`` and ``"intercepts"``. The values are lists of dictionaries. The list - needs to contain one dictionary per period of the model. The keys of the dictionaries - are names of measurements. The values are the value they are normalized to. Note that - loadings cannot be normalized to zero. - -- transition_equation: A string with the name of a pre-implemented transition equation - or a custom transition equation. Pre-implemented transition equations are - linear, log_ces (in the known location and scale version), constant and translog. - The example model dictionary only uses pre-implemented transition functions. - - To see how to use custom transition functions, assume that the yaml file shown above - has been loaded into a python dictionary called ``model`` and look at the following - code: - - .. code-block:: - - from skillmodels.decorators import register_params - - @register_params(params=[]) - def constant(fac3, params): - return fac3 - - @register_params(params=["fac1", "fac2", "fac3", "constant"]) - def linear(fac1, fac2, fac3, params): - p = params - out = p["constant"] + fac1 * p["fac1"] + fac2 * p["fac2"] + fac3 * p["fac3"] - return out - - model["factors"]["fac2"]["transition_function"] = linear - model["factors"]["fac3"]["transition_function"] = constant - - The modified model_dict describes the exact same model but this time it is expressed - in terms of custom transition functions. - - The ``@register_params`` decorator is necessary to tell skillmodels which parameters - are required for the transition function. Custom transition functions can take the - following arguments: - - - **params** (mandatory): A dictionary with the parameters described in the decorator. - - The observed and unobserved factors as floats - - **states**: A 1d jax array with states in the factor order specified in the - model dictionary. - - The order of arguments is irrelevant. All functions need to return a float. - The functions need to be jax jit and vmap compatible. We vmap over all arguments - except for params. - - - - -``"anchoring"`` ---------------- - -The specification for anchoring is a dictionary. It has the following entries: - -- ``"outcomes"``: a dictionary that maps names of factors to variables that are used - as anchoring outcome. Factors that are not anchored can simply be left out. -- ``"free_controls"``: Whether the control variables used in the measurement equations - should also be used in the anchoring equations. Default False. This is mainly there - to support the CHS example model and will probably not be set to True in any real - application. -- ``"free_constant"``: Whether the anchoring equation should have a constant. Default - False. This should be set to True if there are normalizations of location (i.e. - normalized intercepts) in the measurement equations. -- ``"free_loadings"``: If true, the loadings are estimated, otherwise they are fixed to - one. Default False. This should be set to True if there are normalizations of scale - (i.e. normalized loadings) in the measurement equations. -- ``"ignore_constant_when_anchoring"``: If true, no constant is used when anchoring the - latent factors, even if one was estimated. Default False. This is mainly there - to support the CHS example model and will probably not be set to True in any real - application. - - - -``"controls"`` --------------- - -A list of variables that are used as controls in the measurement equations. You do not -have to specify as constant as control variable, because it is always included. If you -want to get rid of controls in some periods, you have to normalize their coefficients -to zero. - -``"stagemap"`` --------------- - - -A list that has one entry less than the number of periods of the model. It maps periods -to development stages. See :ref:`stages_vs_periods` for the meaning of development -stages. - - -``"observed_factors"`` ----------------------- - -A list with variable names. Those variable names must be present in the dataset and -contain information about observed factors. An example of an observed factor could -be income, a treatment assignment or age. - - -Observed factors do not have transition equations, do not require multiple measurements -per period and are not part of the covariance matrix of the latent factors. As such, -adding an observed factor is computationally much less demanding than adding an -unobserved factor. - - -``"estimation_options"`` ------------------------- - -Another dictionary. It has the following entries. - -- ``"sigma_points_scale"``: The scaling factor of Julier sigma points. Default 2 which - was shown to work well for the example models by Cunha, Heckman and Schennach. -- ``"robust_bounds"``: Bool. If true, bound constraints are made stricter. This avoids - exploding likelihoods when the standard deviation of the measurement error is zero. - Default True. -- ``"bounds_distance"``: By how much the bounds are made stricter. Only relevant when - robust bounds are used. Default ``0.001``. -- ``"clipping_lower_bound": Strongly negative value at which the log likelihood is - clipped a log likelihood of -infinity. The clipping is done using a soft maximum - to avoid non-differentiable points in the likelihood. Default ``-1e-250``. Set to - ``None`` to disable this completely. -- ``"clipping_upper_bound". Same as ``"clipping_lower_bound"`` but from above. Default - None because typically the better way of avoiding upwards exploding likelihoods is to - set bounds strictly above zero for the measurement error standard deviations. -- ``"clipping_lower_hardness"`` and ``"clipping_upper_hardness"``. How closely the soft - maximum or minimum we use for clipping approximates its hard counterpart. Default 1 - which is an extremely close approximation of the hard maximum or minimum. If you want - to make the likelihood function smoother you should set it to a much lower value. - - - - -.. _replication files: - https://tinyurl.com/yyuq2sa4 diff --git a/docs/source/how_to_guides/utilities.rst b/docs/source/how_to_guides/utilities.rst deleted file mode 100644 index 7b372f30..00000000 --- a/docs/source/how_to_guides/utilities.rst +++ /dev/null @@ -1,14 +0,0 @@ -How to modify model specifications -================================== - - -``skillmodels.utilities`` contains functions to construct a model dictionary by varying -an existing one and to update the parameters of a larger model from estimated parameters -from smaller models. - -All functions that modify model dictionaries can can also modify a params DataFrame -that was constructed for the original model accordingly. - - -.. automodule:: skillmodels.utilities - :members: diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 37219c78..00000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,142 +0,0 @@ -Welcome to the documentation of skillmodels! -============================================ - - - -Structure of the Documentation -============================== - - -.. raw:: html - - - - -Welcome to skillmodels, a Python implementation of estimators for skill -formation models. The econometrics of skill formation models is a very active -field and several estimators were proposed. None of them is implemented in -standard econometrics packages. - - -Skillmodels implements the Kalman filter based maximum likelihood estimator -proposed by Cunha, Heckman and Schennach (CHS), (`Econometrica 2010`_) - - -Skillmodels was developed for skill formation models but is by no means -limited to this particular application. It can be applied to any dynamic -nonlinear latent factor model. - -The CHS estimator implemented here differs in two points from the one -implemented in their `replication files`_: 1) It uses different normalizations -that take into account the `critique`_ of Wiswall and Agostinelli. 2) It can -optionally use more robust square-root implementations of the Kalman filters. - - -Most of the code is unit tested. Furthermore, the results have been compared -to the Fortran code by CHS for two basic models with hypothetical data from -their `replication files`_. - - -**Citation** - -It took countless hours to write skillmodels. I make it available under a very -permissive license in the hope that it helps other people to do great research -that advances our knowledge about the formation of cognitive and noncognitive -siklls. If you find skillmodels helpful, please don't forget to cite it. You -can find a suggested citation in the README file on `GitHub`_. - - -**Feedback** - -If you find skillmodels helpful for research or teaching, please let me know. -If you encounter any problems with the installation or while using -skillmodels, please complain or open an issue at `GitHub`_. - - - -.. _critique: - https://tinyurl.com/y3wl43kz - -.. _replication files: - https://tinyurl.com/yyuq2sa4 - -.. _GitHub: - https://github.com/OpenSourceEconomics/skillmodels - - -.. _Econometrica 2010: - http://onlinelibrary.wiley.com/doi/10.3982/ECTA6551/abstract - - -.. toctree:: - :maxdepth: 1 - - getting_started/index - how_to_guides/index - explanations/index - reference_guides/index diff --git a/docs/source/reference_guides/endogeneity_corrections.rst b/docs/source/reference_guides/endogeneity_corrections.rst deleted file mode 100644 index 15eda667..00000000 --- a/docs/source/reference_guides/endogeneity_corrections.rst +++ /dev/null @@ -1,47 +0,0 @@ -A note on endogeneity correction methods: -***************************************** - -In the empirical part of their paper, CHS use two methods for endogeneity -correction. Both require very strong assumptions on the scale of factors. -Below I give an overview of the proposed endogeneity correction methods that -can serve as a starting point for someone who wants to extend skillmodels in -that direction: - -In secton 4.2.4 CHS extend their basic model with a time invariant individual -specific heterogeneity component, i.e. a fixed effect. The time invariance -assumption can only be valid if the scale of all factors remains the same -throughout the model. This is highly unlikely, unless age invariant -measurements (as defined by Wiswall and Agostinelli) are available and used -for normalization in all periods for all factors. With KLS transition -functions the assumption of the factor scales remaining constant in all -periods is highly unlikely (see: :ref:`KLS_not_constant`). Moreover, this -approach requires 3 adult outcomes. If you have a dataset with enough time -invariant measurements and enough adult outcomes, this method is suitable for -you and you could use the Fortran code by CHS as a starting point. - -In 4.2.5 they make a endogeneity correction with time varying heterogeneity. -However, this heterogeneity follows the same AR1 process in each period and -relies on an estimated time invariant investment equation, so it also requires -the factor scales to be constant. This might not be a good assumption in many -applications. Moreover, this correction method relies on a exclusion -restriction (Income is an argument of the investment function but not of the -transition functions of other latent factors) or suitable functional form -assumptions for identification. - -To use this correction method in models where not enough age invariant -measurements are available to ensure constant factor scales, one would have to -replace the AR1 process by a linear transition function with different -estimated parameters in each period and also estimate a different investment -function in each period. I don't know if this model is identified. - -I don't know if these methods could be used in the WA estimator. - -Wiswall and Agostinelli use a simpler model of endegeneity of investments that -could be used with both estimators. See section 6.1.2 of their `paper`_. - -.. _paper: - https://tinyurl.com/y5ezloh2 - - -.. _replication files: - https://tinyurl.com/yyuq2sa4 diff --git a/docs/source/reference_guides/estimation.rst b/docs/source/reference_guides/estimation.rst deleted file mode 100644 index c1b4201f..00000000 --- a/docs/source/reference_guides/estimation.rst +++ /dev/null @@ -1,42 +0,0 @@ -============================= -Modules Related to Estimation -============================= - -.. _likelihood_function: - -The Likelihood Function -======================= - -.. automodule:: skillmodels.likelihood_function - :members: - -.. _kalman_filters: - -The Kalman Filters -================== - - -.. automodule:: skillmodels.kalman_filters - :members: - - -The Index of the Parameter DataFrame -==================================== - - -.. _params_index: - - -.. automodule:: skillmodels.params_index - :members: - - - -.. _parse_params: - -Parsing the Parameter Vector -============================ - - -.. automodule:: skillmodels.parse_params - :members: diff --git a/docs/source/reference_guides/index.rst b/docs/source/reference_guides/index.rst deleted file mode 100644 index 97224906..00000000 --- a/docs/source/reference_guides/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -Reference Guides -================ - - -.. toctree:: - :maxdepth: 1 - - pre_processing - estimation - simulation - transition_functions - endogeneity_corrections diff --git a/docs/source/reference_guides/pre_processing.rst b/docs/source/reference_guides/pre_processing.rst deleted file mode 100644 index 2b1b1e21..00000000 --- a/docs/source/reference_guides/pre_processing.rst +++ /dev/null @@ -1,37 +0,0 @@ -================================= -How the User Inputs are Processed -================================= - - - - -.. _model_processing: - -Model Processing -================ - - -.. automodule:: skillmodels.process_model - :members: - - - -.. _data_processing: - -Data Processing -=============== - - -.. automodule:: skillmodels.process_data - :members: - - - -.. _model_checking: - -Model Checking -============== - - -.. automodule:: skillmodels.check_model - :members: diff --git a/docs/source/reference_guides/simulation.rst b/docs/source/reference_guides/simulation.rst deleted file mode 100644 index f44cea30..00000000 --- a/docs/source/reference_guides/simulation.rst +++ /dev/null @@ -1,12 +0,0 @@ -============================= -Modules Related to Simulation -============================= - -.. _simulate_data: - - -Simulating a Dataset -==================== - -.. automodule:: skillmodels.simulate_data - :members: diff --git a/docs/source/reference_guides/transition_functions.rst b/docs/source/reference_guides/transition_functions.rst deleted file mode 100644 index 72c46fe9..00000000 --- a/docs/source/reference_guides/transition_functions.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _transition_functions: - -Transition Equations -==================== - - -.. automodule:: skillmodels.transition_functions - :members: diff --git a/docs/source/rtd_environment.yml b/docs/source/rtd_environment.yml deleted file mode 100644 index 66fd0464..00000000 --- a/docs/source/rtd_environment.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -name: skillmodels_rtd -channels: - - conda-forge -dependencies: - - python=3.12 - - skillmodels - - sphinxcontrib-bibtex - - pydata-sphinx-theme>=0.3 - - sphinx - - nbsphinx diff --git a/docs/source/start_params.csv b/docs/source/start_params.csv deleted file mode 100644 index bce6c807..00000000 --- a/docs/source/start_params.csv +++ /dev/null @@ -1,237 +0,0 @@ -category,period,name1,name2,value,lower,upper,chs_value,good_start_value,bad_start_value -control_coeffs,0,y1,constant,1.00545482,-inf,inf,1.00545482,1.0,0.0 -control_coeffs,0,y1,x1,1.00161847,-inf,inf,1.00161847,1.0,0.0 -control_coeffs,0,y2,constant,0.97599155,-inf,inf,0.97599155,1.0,0.0 -control_coeffs,0,y2,x1,1.03143922,-inf,inf,1.03143922,1.0,0.0 -control_coeffs,0,y3,constant,0.99413941,-inf,inf,0.99413941,1.0,0.0 -control_coeffs,0,y3,x1,0.99409082,-inf,inf,0.99409082,1.0,0.0 -control_coeffs,0,y4,constant,1.00601018,-inf,inf,1.00601018,1.0,0.0 -control_coeffs,0,y4,x1,1.01833579,-inf,inf,1.01833579,1.0,0.0 -control_coeffs,0,y5,constant,1.00099227,-inf,inf,1.00099227,1.0,0.0 -control_coeffs,0,y5,x1,1.02354619,-inf,inf,1.02354619,1.0,0.0 -control_coeffs,0,y6,constant,0.96857115,-inf,inf,0.96857115,1.0,0.0 -control_coeffs,0,y6,x1,1.04884144,-inf,inf,1.04884144,1.0,0.0 -control_coeffs,0,y7,constant,0.97482723,-inf,inf,0.97482723,1.0,0.0 -control_coeffs,0,y7,x1,1.04176152,-inf,inf,1.04176152,1.0,0.0 -control_coeffs,0,y8,constant,0.98188173,-inf,inf,0.98188173,1.0,0.0 -control_coeffs,0,y8,x1,1.00566791,-inf,inf,1.00566791,1.0,0.0 -control_coeffs,0,y9,constant,0.92514421,-inf,inf,0.92514421,0.9,0.0 -control_coeffs,0,y9,x1,1.04848607,-inf,inf,1.04848607,1.0,0.0 -control_coeffs,0,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,0,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,1,y1,constant,1.0528786,-inf,inf,1.0528786,1.1,0.0 -control_coeffs,1,y1,x1,0.9565774,-inf,inf,0.9565774,1.0,0.0 -control_coeffs,1,y2,constant,1.10002752,-inf,inf,1.10002752,1.1,0.0 -control_coeffs,1,y2,x1,0.94514768,-inf,inf,0.94514768,0.9,0.0 -control_coeffs,1,y3,constant,1.11220924,-inf,inf,1.11220924,1.1,0.0 -control_coeffs,1,y3,x1,0.94921197,-inf,inf,0.94921197,0.9,0.0 -control_coeffs,1,y4,constant,1.05321768,-inf,inf,1.05321768,1.1,0.0 -control_coeffs,1,y4,x1,0.97927901,-inf,inf,0.97927901,1.0,0.0 -control_coeffs,1,y5,constant,1.07984824,-inf,inf,1.07984824,1.1,0.0 -control_coeffs,1,y5,x1,0.95632487,-inf,inf,0.95632487,1.0,0.0 -control_coeffs,1,y6,constant,1.04155956,-inf,inf,1.04155956,1.0,0.0 -control_coeffs,1,y6,x1,0.98720524,-inf,inf,0.98720524,1.0,0.0 -control_coeffs,1,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,1,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,2,y1,constant,1.01759386,-inf,inf,1.01759386,1.0,0.0 -control_coeffs,2,y1,x1,0.98278806,-inf,inf,0.98278806,1.0,0.0 -control_coeffs,2,y2,constant,0.99449381,-inf,inf,0.99449381,1.0,0.0 -control_coeffs,2,y2,x1,1.00015093,-inf,inf,1.00015093,1.0,0.0 -control_coeffs,2,y3,constant,1.04145063,-inf,inf,1.04145063,1.0,0.0 -control_coeffs,2,y3,x1,0.93918533,-inf,inf,0.93918533,0.9,0.0 -control_coeffs,2,y4,constant,0.98808056,-inf,inf,0.98808056,1.0,0.0 -control_coeffs,2,y4,x1,1.00539897,-inf,inf,1.00539897,1.0,0.0 -control_coeffs,2,y5,constant,0.93727871,-inf,inf,0.93727871,0.9,0.0 -control_coeffs,2,y5,x1,1.03995916,-inf,inf,1.03995916,1.0,0.0 -control_coeffs,2,y6,constant,0.97863335,-inf,inf,0.97863335,1.0,0.0 -control_coeffs,2,y6,x1,1.02370063,-inf,inf,1.02370063,1.0,0.0 -control_coeffs,2,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,2,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,3,y1,constant,1.07747808,-inf,inf,1.07747808,1.1,0.0 -control_coeffs,3,y1,x1,0.95263385,-inf,inf,0.95263385,1.0,0.0 -control_coeffs,3,y2,constant,1.01595775,-inf,inf,1.01595775,1.0,0.0 -control_coeffs,3,y2,x1,0.97511705,-inf,inf,0.97511705,1.0,0.0 -control_coeffs,3,y3,constant,1.00409134,-inf,inf,1.00409134,1.0,0.0 -control_coeffs,3,y3,x1,0.99671239,-inf,inf,0.99671239,1.0,0.0 -control_coeffs,3,y4,constant,1.00265983,-inf,inf,1.00265983,1.0,0.0 -control_coeffs,3,y4,x1,0.97463783,-inf,inf,0.97463783,1.0,0.0 -control_coeffs,3,y5,constant,0.98936892,-inf,inf,0.98936892,1.0,0.0 -control_coeffs,3,y5,x1,1.00354587,-inf,inf,1.00354587,1.0,0.0 -control_coeffs,3,y6,constant,1.01816115,-inf,inf,1.01816115,1.0,0.0 -control_coeffs,3,y6,x1,1.00220065,-inf,inf,1.00220065,1.0,0.0 -control_coeffs,3,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,3,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,4,y1,constant,0.97390947,-inf,inf,0.97390947,1.0,0.0 -control_coeffs,4,y1,x1,1.01871361,-inf,inf,1.01871361,1.0,0.0 -control_coeffs,4,y2,constant,1.01276643,-inf,inf,1.01276643,1.0,0.0 -control_coeffs,4,y2,x1,0.96884594,-inf,inf,0.96884594,1.0,0.0 -control_coeffs,4,y3,constant,1.0639416000000002,-inf,inf,1.0639416000000002,1.1,0.0 -control_coeffs,4,y3,x1,0.96348822,-inf,inf,0.96348822,1.0,0.0 -control_coeffs,4,y4,constant,1.05030944,-inf,inf,1.05030944,1.1,0.0 -control_coeffs,4,y4,x1,0.97249741,-inf,inf,0.97249741,1.0,0.0 -control_coeffs,4,y5,constant,0.95867367,-inf,inf,0.95867367,1.0,0.0 -control_coeffs,4,y5,x1,1.01263275,-inf,inf,1.01263275,1.0,0.0 -control_coeffs,4,y6,constant,1.02067436,-inf,inf,1.02067436,1.0,0.0 -control_coeffs,4,y6,x1,0.97561054,-inf,inf,0.97561054,1.0,0.0 -control_coeffs,4,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,4,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,5,y1,constant,0.92274679,-inf,inf,0.92274679,0.9,0.0 -control_coeffs,5,y1,x1,1.02885338,-inf,inf,1.02885338,1.0,0.0 -control_coeffs,5,y2,constant,0.97278707,-inf,inf,0.97278707,1.0,0.0 -control_coeffs,5,y2,x1,0.99430379,-inf,inf,0.99430379,1.0,0.0 -control_coeffs,5,y3,constant,1.02364392,-inf,inf,1.02364392,1.0,0.0 -control_coeffs,5,y3,x1,0.97922354,-inf,inf,0.97922354,1.0,0.0 -control_coeffs,5,y4,constant,0.9576603,-inf,inf,0.9576603,1.0,0.0 -control_coeffs,5,y4,x1,1.01557986,-inf,inf,1.01557986,1.0,0.0 -control_coeffs,5,y5,constant,1.04015694,-inf,inf,1.04015694,1.0,0.0 -control_coeffs,5,y5,x1,0.96898918,-inf,inf,0.96898918,1.0,0.0 -control_coeffs,5,y6,constant,1.00635915,-inf,inf,1.00635915,1.0,0.0 -control_coeffs,5,y6,x1,0.97658414,-inf,inf,0.97658414,1.0,0.0 -control_coeffs,5,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,5,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,6,y1,constant,0.99929141,-inf,inf,0.99929141,1.0,0.0 -control_coeffs,6,y1,x1,0.98368467,-inf,inf,0.98368467,1.0,0.0 -control_coeffs,6,y2,constant,1.04782772,-inf,inf,1.04782772,1.0,0.0 -control_coeffs,6,y2,x1,0.93183755,-inf,inf,0.93183755,0.9,0.0 -control_coeffs,6,y3,constant,1.00920751,-inf,inf,1.00920751,1.0,0.0 -control_coeffs,6,y3,x1,0.95146637,-inf,inf,0.95146637,1.0,0.0 -control_coeffs,6,y4,constant,1.05400193,-inf,inf,1.05400193,1.1,0.0 -control_coeffs,6,y4,x1,0.99101302,-inf,inf,0.99101302,1.0,0.0 -control_coeffs,6,y5,constant,1.06296891,-inf,inf,1.06296891,1.1,0.0 -control_coeffs,6,y5,x1,0.9781293,-inf,inf,0.9781293,1.0,0.0 -control_coeffs,6,y6,constant,1.01391456,-inf,inf,1.01391456,1.0,0.0 -control_coeffs,6,y6,x1,0.9949447,-inf,inf,0.9949447,1.0,0.0 -control_coeffs,6,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,6,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,7,y1,constant,1.03694603,-inf,inf,1.03694603,1.0,0.0 -control_coeffs,7,y1,x1,0.94997421,-inf,inf,0.94997421,0.9,0.0 -control_coeffs,7,y2,constant,1.05734328,-inf,inf,1.05734328,1.1,0.0 -control_coeffs,7,y2,x1,0.94683688,-inf,inf,0.94683688,0.9,0.0 -control_coeffs,7,y3,constant,1.04305552,-inf,inf,1.04305552,1.0,0.0 -control_coeffs,7,y3,x1,0.97187261,-inf,inf,0.97187261,1.0,0.0 -control_coeffs,7,y4,constant,1.06978168,-inf,inf,1.06978168,1.1,0.0 -control_coeffs,7,y4,x1,0.94043886,-inf,inf,0.94043886,0.9,0.0 -control_coeffs,7,y5,constant,1.01505491,-inf,inf,1.01505491,1.0,0.0 -control_coeffs,7,y5,x1,0.9762739,-inf,inf,0.9762739,1.0,0.0 -control_coeffs,7,y6,constant,1.02936846,-inf,inf,1.02936846,1.0,0.0 -control_coeffs,7,y6,x1,0.9811178,-inf,inf,0.9811178,1.0,0.0 -control_coeffs,7,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,7,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -loading,0,y2,fac1,1.24759799,-inf,inf,1.24759799,1.2,1.0 -loading,0,y3,fac1,1.55939677,-inf,inf,1.55939677,1.6,1.0 -loading,0,y5,fac2,0.9908065,-inf,inf,0.9908065,1.0,1.0 -loading,0,y6,fac2,0.68554932,-inf,inf,0.68554932,0.7,1.0 -loading,0,y8,fac3,1.1132403,-inf,inf,1.1132403,1.1,1.0 -loading,0,y9,fac3,0.70906931,-inf,inf,0.70906931,0.7,1.0 -loading,0,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,1,y2,fac1,1.21745308,-inf,inf,1.21745308,1.2,1.0 -loading,1,y3,fac1,1.34547457,-inf,inf,1.34547457,1.3,1.0 -loading,1,y5,fac2,0.82645675,-inf,inf,0.82645675,0.8,1.0 -loading,1,y6,fac2,0.60128174,-inf,inf,0.60128174,0.6,1.0 -loading,1,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,2,y2,fac1,1.18091396,-inf,inf,1.18091396,1.2,1.0 -loading,2,y3,fac1,1.41494654,-inf,inf,1.41494654,1.4,1.0 -loading,2,y5,fac2,0.78412623,-inf,inf,0.78412623,0.8,1.0 -loading,2,y6,fac2,0.6095229,-inf,inf,0.6095229,0.6,1.0 -loading,2,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,3,y2,fac1,1.28957256,-inf,inf,1.28957256,1.3,1.0 -loading,3,y3,fac1,1.42483912,-inf,inf,1.42483912,1.4,1.0 -loading,3,y5,fac2,0.75831051,-inf,inf,0.75831051,0.8,1.0 -loading,3,y6,fac2,0.60278636,-inf,inf,0.60278636,0.6,1.0 -loading,3,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,4,y2,fac1,1.24216652,-inf,inf,1.24216652,1.2,1.0 -loading,4,y3,fac1,1.419458,-inf,inf,1.419458,1.4,1.0 -loading,4,y5,fac2,0.83681772,-inf,inf,0.83681772,0.8,1.0 -loading,4,y6,fac2,0.5249411999999999,-inf,inf,0.5249411999999999,0.5,1.0 -loading,4,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,5,y2,fac1,1.20739978,-inf,inf,1.20739978,1.2,1.0 -loading,5,y3,fac1,1.43586124,-inf,inf,1.43586124,1.4,1.0 -loading,5,y5,fac2,0.76476822,-inf,inf,0.76476822,0.8,1.0 -loading,5,y6,fac2,0.60105183,-inf,inf,0.60105183,0.6,1.0 -loading,5,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,6,y2,fac1,1.22402329,-inf,inf,1.22402329,1.2,1.0 -loading,6,y3,fac1,1.42265974,-inf,inf,1.42265974,1.4,1.0 -loading,6,y5,fac2,0.68885845,-inf,inf,0.68885845,0.7,1.0 -loading,6,y6,fac2,0.61882325,-inf,inf,0.61882325,0.6,1.0 -loading,6,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,7,y2,fac1,1.23608389,-inf,inf,1.23608389,1.2,1.0 -loading,7,y3,fac1,1.47859872,-inf,inf,1.47859872,1.5,1.0 -loading,7,y5,fac2,0.81524559,-inf,inf,0.81524559,0.8,1.0 -loading,7,y6,fac2,0.57084593,-inf,inf,0.57084593,0.6,1.0 -loading,7,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -meas_sd,0,y1,-,0.50497719,-inf,inf,0.50497719,0.5,0.8 -meas_sd,0,y2,-,0.50088168,-inf,inf,0.50088168,0.5,0.8 -meas_sd,0,y3,-,0.48136282,-inf,inf,0.48136282,0.5,0.8 -meas_sd,0,y4,-,0.53215346,-inf,inf,0.53215346,0.5,0.8 -meas_sd,0,y5,-,0.47039143,-inf,inf,0.47039143,0.5,0.8 -meas_sd,0,y6,-,0.48344469,-inf,inf,0.48344469,0.5,0.8 -meas_sd,0,y7,-,0.47943359,-inf,inf,0.47943359,0.5,0.8 -meas_sd,0,y8,-,0.53421227,-inf,inf,0.53421227,0.5,0.8 -meas_sd,0,y9,-,0.50146093,-inf,inf,0.50146093,0.5,0.8 -meas_sd,0,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,1,y1,-,0.49105567,-inf,inf,0.49105567,0.5,0.8 -meas_sd,1,y2,-,0.49870431,-inf,inf,0.49870431,0.5,0.8 -meas_sd,1,y3,-,0.50514084,-inf,inf,0.50514084,0.5,0.8 -meas_sd,1,y4,-,0.49743526,-inf,inf,0.49743526,0.5,0.8 -meas_sd,1,y5,-,0.4994178,-inf,inf,0.4994178,0.5,0.8 -meas_sd,1,y6,-,0.50424182,-inf,inf,0.50424182,0.5,0.8 -meas_sd,1,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,2,y1,-,0.50427244,-inf,inf,0.50427244,0.5,0.8 -meas_sd,2,y2,-,0.51856939,-inf,inf,0.51856939,0.5,0.8 -meas_sd,2,y3,-,0.50392617,-inf,inf,0.50392617,0.5,0.8 -meas_sd,2,y4,-,0.49161026,-inf,inf,0.49161026,0.5,0.8 -meas_sd,2,y5,-,0.50441808,-inf,inf,0.50441808,0.5,0.8 -meas_sd,2,y6,-,0.48482939,-inf,inf,0.48482939,0.5,0.8 -meas_sd,2,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,3,y1,-,0.49476345,-inf,inf,0.49476345,0.5,0.8 -meas_sd,3,y2,-,0.49363682,-inf,inf,0.49363682,0.5,0.8 -meas_sd,3,y3,-,0.49918763,-inf,inf,0.49918763,0.5,0.8 -meas_sd,3,y4,-,0.49728617,-inf,inf,0.49728617,0.5,0.8 -meas_sd,3,y5,-,0.5116465,-inf,inf,0.5116465,0.5,0.8 -meas_sd,3,y6,-,0.48035036,-inf,inf,0.48035036,0.5,0.8 -meas_sd,3,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,4,y1,-,0.50529312,-inf,inf,0.50529312,0.5,0.8 -meas_sd,4,y2,-,0.50706948,-inf,inf,0.50706948,0.5,0.8 -meas_sd,4,y3,-,0.47849704,-inf,inf,0.47849704,0.5,0.8 -meas_sd,4,y4,-,0.49962829,-inf,inf,0.49962829,0.5,0.8 -meas_sd,4,y5,-,0.49001347,-inf,inf,0.49001347,0.5,0.8 -meas_sd,4,y6,-,0.48723789,-inf,inf,0.48723789,0.5,0.8 -meas_sd,4,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,5,y1,-,0.51551926,-inf,inf,0.51551926,0.5,0.8 -meas_sd,5,y2,-,0.52331776,-inf,inf,0.52331776,0.5,0.8 -meas_sd,5,y3,-,0.48326815,-inf,inf,0.48326815,0.5,0.8 -meas_sd,5,y4,-,0.47910757,-inf,inf,0.47910757,0.5,0.8 -meas_sd,5,y5,-,0.50327233,-inf,inf,0.50327233,0.5,0.8 -meas_sd,5,y6,-,0.49705186,-inf,inf,0.49705186,0.5,0.8 -meas_sd,5,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,6,y1,-,0.52223919,-inf,inf,0.52223919,0.5,0.8 -meas_sd,6,y2,-,0.48641122,-inf,inf,0.48641122,0.5,0.8 -meas_sd,6,y3,-,0.47597189,-inf,inf,0.47597189,0.5,0.8 -meas_sd,6,y4,-,0.52595048,-inf,inf,0.52595048,0.5,0.8 -meas_sd,6,y5,-,0.51187305,-inf,inf,0.51187305,0.5,0.8 -meas_sd,6,y6,-,0.52425668,-inf,inf,0.52425668,0.5,0.8 -meas_sd,6,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,7,y1,-,0.52163477,-inf,inf,0.52163477,0.5,0.8 -meas_sd,7,y2,-,0.52112353,-inf,inf,0.52112353,0.5,0.8 -meas_sd,7,y3,-,0.47545353,-inf,inf,0.47545353,0.5,0.8 -meas_sd,7,y4,-,0.5150197,-inf,inf,0.5150197,0.5,0.8 -meas_sd,7,y5,-,0.48993218,-inf,inf,0.48993218,0.5,0.8 -meas_sd,7,y6,-,0.52777721,-inf,inf,0.52777721,0.5,0.8 -meas_sd,7,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -shock_sd,0,fac1,-,0.321936173798472,-inf,inf,0.5673941961268832,0.31622776601683794,0.7071067811865476 -shock_sd,0,fac2,-,0.3131064355774247,-inf,inf,0.5595591439494352,0.31622776601683794,0.7071067811865476 -initial_mean,0,mixture_0,fac1,0.0,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac2,0.0,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac3,0.0,-inf,inf,0.0,0.0,0.0 -initial_cov,0,mixture_0,fac1-fac1,0.17647290000000002,-inf,inf,0.17647290000000002,0.2,0.5 -initial_cov,0,mixture_0,fac2-fac1,0.00524114,-inf,inf,0.00524114,0.0,0.0 -initial_cov,0,mixture_0,fac2-fac2,0.18362641,-inf,inf,0.18362641,0.2,0.5 -initial_cov,0,mixture_0,fac3-fac1,0.005665399999999999,-inf,inf,0.005665399999999999,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac2,-0.00067522,-inf,inf,-0.00067522,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac3,0.23194739,-inf,inf,0.23194739,0.2,0.5 -trans,0,fac1,fac1,0.65978837,-inf,inf,0.65978837,0.7,0.5 -trans,0,fac1,fac2,0.174038,-inf,inf,0.174038,0.2,0.25 -trans,0,fac1,fac3,0.16617363,-inf,inf,0.16617363,0.1,0.25 -trans,0,fac1,phi,-0.40701787,-inf,inf,-0.40701787,-0.4,-0.2 -trans,0,fac2,fac2,0.60887112,-inf,inf,0.60887112,0.6,0.5 -trans,0,fac2,constant,0.0,-inf,inf,0.0,0.0,0.0 diff --git a/docs/source/start_params_template.csv b/docs/source/start_params_template.csv deleted file mode 100644 index 7b219d81..00000000 --- a/docs/source/start_params_template.csv +++ /dev/null @@ -1,237 +0,0 @@ -category,period,name1,name2,value,lower_bound,upper_bound -delta,0,y1,constant,,-inf,inf -delta,0,y1,x1,,-inf,inf -delta,0,y2,constant,,-inf,inf -delta,0,y2,x1,,-inf,inf -delta,0,y3,constant,,-inf,inf -delta,0,y3,x1,,-inf,inf -delta,0,y4,constant,,-inf,inf -delta,0,y4,x1,,-inf,inf -delta,0,y5,constant,,-inf,inf -delta,0,y5,x1,,-inf,inf -delta,0,y6,constant,,-inf,inf -delta,0,y6,x1,,-inf,inf -delta,0,y7,constant,,-inf,inf -delta,0,y7,x1,,-inf,inf -delta,0,y8,constant,,-inf,inf -delta,0,y8,x1,,-inf,inf -delta,0,y9,constant,,-inf,inf -delta,0,y9,x1,,-inf,inf -delta,0,Q1_fac1,constant,,-inf,inf -delta,0,Q1_fac1,x1,,-inf,inf -delta,1,y1,constant,,-inf,inf -delta,1,y1,x1,,-inf,inf -delta,1,y2,constant,,-inf,inf -delta,1,y2,x1,,-inf,inf -delta,1,y3,constant,,-inf,inf -delta,1,y3,x1,,-inf,inf -delta,1,y4,constant,,-inf,inf -delta,1,y4,x1,,-inf,inf -delta,1,y5,constant,,-inf,inf -delta,1,y5,x1,,-inf,inf -delta,1,y6,constant,,-inf,inf -delta,1,y6,x1,,-inf,inf -delta,1,Q1_fac1,constant,,-inf,inf -delta,1,Q1_fac1,x1,,-inf,inf -delta,2,y1,constant,,-inf,inf -delta,2,y1,x1,,-inf,inf -delta,2,y2,constant,,-inf,inf -delta,2,y2,x1,,-inf,inf -delta,2,y3,constant,,-inf,inf -delta,2,y3,x1,,-inf,inf -delta,2,y4,constant,,-inf,inf -delta,2,y4,x1,,-inf,inf -delta,2,y5,constant,,-inf,inf -delta,2,y5,x1,,-inf,inf -delta,2,y6,constant,,-inf,inf -delta,2,y6,x1,,-inf,inf -delta,2,Q1_fac1,constant,,-inf,inf -delta,2,Q1_fac1,x1,,-inf,inf -delta,3,y1,constant,,-inf,inf -delta,3,y1,x1,,-inf,inf -delta,3,y2,constant,,-inf,inf -delta,3,y2,x1,,-inf,inf -delta,3,y3,constant,,-inf,inf -delta,3,y3,x1,,-inf,inf -delta,3,y4,constant,,-inf,inf -delta,3,y4,x1,,-inf,inf -delta,3,y5,constant,,-inf,inf -delta,3,y5,x1,,-inf,inf -delta,3,y6,constant,,-inf,inf -delta,3,y6,x1,,-inf,inf -delta,3,Q1_fac1,constant,,-inf,inf -delta,3,Q1_fac1,x1,,-inf,inf -delta,4,y1,constant,,-inf,inf -delta,4,y1,x1,,-inf,inf -delta,4,y2,constant,,-inf,inf -delta,4,y2,x1,,-inf,inf -delta,4,y3,constant,,-inf,inf -delta,4,y3,x1,,-inf,inf -delta,4,y4,constant,,-inf,inf -delta,4,y4,x1,,-inf,inf -delta,4,y5,constant,,-inf,inf -delta,4,y5,x1,,-inf,inf -delta,4,y6,constant,,-inf,inf -delta,4,y6,x1,,-inf,inf -delta,4,Q1_fac1,constant,,-inf,inf -delta,4,Q1_fac1,x1,,-inf,inf -delta,5,y1,constant,,-inf,inf -delta,5,y1,x1,,-inf,inf -delta,5,y2,constant,,-inf,inf -delta,5,y2,x1,,-inf,inf -delta,5,y3,constant,,-inf,inf -delta,5,y3,x1,,-inf,inf -delta,5,y4,constant,,-inf,inf -delta,5,y4,x1,,-inf,inf -delta,5,y5,constant,,-inf,inf -delta,5,y5,x1,,-inf,inf -delta,5,y6,constant,,-inf,inf -delta,5,y6,x1,,-inf,inf -delta,5,Q1_fac1,constant,,-inf,inf -delta,5,Q1_fac1,x1,,-inf,inf -delta,6,y1,constant,,-inf,inf -delta,6,y1,x1,,-inf,inf -delta,6,y2,constant,,-inf,inf -delta,6,y2,x1,,-inf,inf -delta,6,y3,constant,,-inf,inf -delta,6,y3,x1,,-inf,inf -delta,6,y4,constant,,-inf,inf -delta,6,y4,x1,,-inf,inf -delta,6,y5,constant,,-inf,inf -delta,6,y5,x1,,-inf,inf -delta,6,y6,constant,,-inf,inf -delta,6,y6,x1,,-inf,inf -delta,6,Q1_fac1,constant,,-inf,inf -delta,6,Q1_fac1,x1,,-inf,inf -delta,7,y1,constant,,-inf,inf -delta,7,y1,x1,,-inf,inf -delta,7,y2,constant,,-inf,inf -delta,7,y2,x1,,-inf,inf -delta,7,y3,constant,,-inf,inf -delta,7,y3,x1,,-inf,inf -delta,7,y4,constant,,-inf,inf -delta,7,y4,x1,,-inf,inf -delta,7,y5,constant,,-inf,inf -delta,7,y5,x1,,-inf,inf -delta,7,y6,constant,,-inf,inf -delta,7,y6,x1,,-inf,inf -delta,7,Q1_fac1,constant,,-inf,inf -delta,7,Q1_fac1,x1,,-inf,inf -loading,0,y2,fac1,,-inf,inf -loading,0,y3,fac1,,-inf,inf -loading,0,y5,fac2,,-inf,inf -loading,0,y6,fac2,,-inf,inf -loading,0,y8,fac3,,-inf,inf -loading,0,y9,fac3,,-inf,inf -loading,0,Q1_fac1,fac1,,-inf,inf -loading,1,y2,fac1,,-inf,inf -loading,1,y3,fac1,,-inf,inf -loading,1,y5,fac2,,-inf,inf -loading,1,y6,fac2,,-inf,inf -loading,1,Q1_fac1,fac1,,-inf,inf -loading,2,y2,fac1,,-inf,inf -loading,2,y3,fac1,,-inf,inf -loading,2,y5,fac2,,-inf,inf -loading,2,y6,fac2,,-inf,inf -loading,2,Q1_fac1,fac1,,-inf,inf -loading,3,y2,fac1,,-inf,inf -loading,3,y3,fac1,,-inf,inf -loading,3,y5,fac2,,-inf,inf -loading,3,y6,fac2,,-inf,inf -loading,3,Q1_fac1,fac1,,-inf,inf -loading,4,y2,fac1,,-inf,inf -loading,4,y3,fac1,,-inf,inf -loading,4,y5,fac2,,-inf,inf -loading,4,y6,fac2,,-inf,inf -loading,4,Q1_fac1,fac1,,-inf,inf -loading,5,y2,fac1,,-inf,inf -loading,5,y3,fac1,,-inf,inf -loading,5,y5,fac2,,-inf,inf -loading,5,y6,fac2,,-inf,inf -loading,5,Q1_fac1,fac1,,-inf,inf -loading,6,y2,fac1,,-inf,inf -loading,6,y3,fac1,,-inf,inf -loading,6,y5,fac2,,-inf,inf -loading,6,y6,fac2,,-inf,inf -loading,6,Q1_fac1,fac1,,-inf,inf -loading,7,y2,fac1,,-inf,inf -loading,7,y3,fac1,,-inf,inf -loading,7,y5,fac2,,-inf,inf -loading,7,y6,fac2,,-inf,inf -loading,7,Q1_fac1,fac1,,-inf,inf -meas_sd,0,y1,-,,-inf,inf -meas_sd,0,y2,-,,-inf,inf -meas_sd,0,y3,-,,-inf,inf -meas_sd,0,y4,-,,-inf,inf -meas_sd,0,y5,-,,-inf,inf -meas_sd,0,y6,-,,-inf,inf -meas_sd,0,y7,-,,-inf,inf -meas_sd,0,y8,-,,-inf,inf -meas_sd,0,y9,-,,-inf,inf -meas_sd,0,Q1_fac1,-,,-inf,inf -meas_sd,1,y1,-,,-inf,inf -meas_sd,1,y2,-,,-inf,inf -meas_sd,1,y3,-,,-inf,inf -meas_sd,1,y4,-,,-inf,inf -meas_sd,1,y5,-,,-inf,inf -meas_sd,1,y6,-,,-inf,inf -meas_sd,1,Q1_fac1,-,,-inf,inf -meas_sd,2,y1,-,,-inf,inf -meas_sd,2,y2,-,,-inf,inf -meas_sd,2,y3,-,,-inf,inf -meas_sd,2,y4,-,,-inf,inf -meas_sd,2,y5,-,,-inf,inf -meas_sd,2,y6,-,,-inf,inf -meas_sd,2,Q1_fac1,-,,-inf,inf -meas_sd,3,y1,-,,-inf,inf -meas_sd,3,y2,-,,-inf,inf -meas_sd,3,y3,-,,-inf,inf -meas_sd,3,y4,-,,-inf,inf -meas_sd,3,y5,-,,-inf,inf -meas_sd,3,y6,-,,-inf,inf -meas_sd,3,Q1_fac1,-,,-inf,inf -meas_sd,4,y1,-,,-inf,inf -meas_sd,4,y2,-,,-inf,inf -meas_sd,4,y3,-,,-inf,inf -meas_sd,4,y4,-,,-inf,inf -meas_sd,4,y5,-,,-inf,inf -meas_sd,4,y6,-,,-inf,inf -meas_sd,4,Q1_fac1,-,,-inf,inf -meas_sd,5,y1,-,,-inf,inf -meas_sd,5,y2,-,,-inf,inf -meas_sd,5,y3,-,,-inf,inf -meas_sd,5,y4,-,,-inf,inf -meas_sd,5,y5,-,,-inf,inf -meas_sd,5,y6,-,,-inf,inf -meas_sd,5,Q1_fac1,-,,-inf,inf -meas_sd,6,y1,-,,-inf,inf -meas_sd,6,y2,-,,-inf,inf -meas_sd,6,y3,-,,-inf,inf -meas_sd,6,y4,-,,-inf,inf -meas_sd,6,y5,-,,-inf,inf -meas_sd,6,y6,-,,-inf,inf -meas_sd,6,Q1_fac1,-,,-inf,inf -meas_sd,7,y1,-,,-inf,inf -meas_sd,7,y2,-,,-inf,inf -meas_sd,7,y3,-,,-inf,inf -meas_sd,7,y4,-,,-inf,inf -meas_sd,7,y5,-,,-inf,inf -meas_sd,7,y6,-,,-inf,inf -meas_sd,7,Q1_fac1,-,,-inf,inf -shock_variance,0,fac1,-,,-inf,inf -shock_variance,0,fac2,-,,-inf,inf -initial_mean,0,mixture_0,fac1,,-inf,inf -initial_mean,0,mixture_0,fac2,,-inf,inf -initial_mean,0,mixture_0,fac3,,-inf,inf -initial_cov,0,mixture_0,fac1-fac1,,-inf,inf -initial_cov,0,mixture_0,fac2-fac1,,-inf,inf -initial_cov,0,mixture_0,fac2-fac2,,-inf,inf -initial_cov,0,mixture_0,fac3-fac1,,-inf,inf -initial_cov,0,mixture_0,fac3-fac2,,-inf,inf -initial_cov,0,mixture_0,fac3-fac3,,-inf,inf -trans,0,fac1,fac1,,-inf,inf -trans,0,fac1,fac2,,-inf,inf -trans,0,fac1,fac3,,-inf,inf -trans,0,fac1,phi,,-inf,inf -trans,0,fac2,fac2,,-inf,inf -trans,0,fac2,constant,,-inf,inf diff --git a/pixi.lock b/pixi.lock index ea662f25..761c5859 100644 --- a/pixi.lock +++ b/pixi.lock @@ -14,13 +14,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45-default_hfdba357_105.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45-default_h4852527_105.conda @@ -28,20 +28,19 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-12.9.27-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-crt-dev_linux-64-12.9.86-ha770c72_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-crt-tools-12.9.86-ha770c72_2.conda @@ -62,15 +61,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-tools-12.9.86-h4bc722e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-version-12.9-h4f385c5_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-he8b2097_16.conda @@ -84,7 +81,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -110,7 +106,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/kernel-headers_linux-64-4.18.0-he073ed8_9.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -153,8 +149,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda @@ -165,25 +161,24 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -194,16 +189,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -212,11 +207,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda @@ -226,16 +221,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -251,13 +245,13 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/1c/38/4ba2486f95fcf2120723932feacdded438e785258148b18a703cd1177e41/jax_cuda12_plugin-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/27/58/a5a27d4677d6890570f7e58cecd51891469cb620e6f64c8faed4935d93d0/jax_cuda12_plugin-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/77/3c/aa88abe01f3be3d1f8f787d1d33dc83e76fec05945f9a28fbb41cfb99cd5/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/18/2a/d4cd8506d2044e082f8cd921be57392e6a9b5ccd3ffdf050362430a3d5d5/nvidia_cuda_cccl_cu12-12.9.27-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c1/2e/b84e32197e33f39907b455b83395a017e697c07a449a2b15fd07fc1c9981/nvidia_cuda_cupti_cu12-12.9.79-py3-none-manylinux_2_25_x86_64.whl @@ -272,10 +266,10 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ default: @@ -292,42 +286,39 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -337,7 +328,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -362,7 +352,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -400,8 +390,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda @@ -412,25 +402,24 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -441,16 +430,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -459,11 +448,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda @@ -472,16 +461,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -497,16 +485,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ osx-arm64: @@ -515,42 +503,39 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -559,7 +544,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -583,7 +567,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda @@ -617,8 +601,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-21.1.8-h4a912ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda @@ -629,47 +613,46 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda @@ -678,11 +661,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda @@ -691,16 +674,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -717,12 +699,12 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl @@ -732,42 +714,39 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -777,7 +756,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -801,7 +779,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda @@ -818,7 +796,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda @@ -836,8 +814,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -848,24 +826,23 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda @@ -875,18 +852,18 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyh09c184e_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda @@ -894,34 +871,33 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -939,17 +915,686 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl + - pypi: ./ + docs: + channels: + - url: https://conda.anaconda.org/conda-forge/ + indexes: + - https://pypi.org/simple + options: + pypi-prerelease-mode: if-necessary-or-explicit + packages: + linux-64: + - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipykernel-7.1.0-pyha191276_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.9.0-pyh53cf698_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-specifications-2025.9.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-with-format-nongpl-4.26.0-hcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.7.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_core-5.9.1-pyhc90fa1f_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.0-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.17.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45-default_hbd61a6d_105.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.0.0-h0aef613_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-5_h0358290_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.25-h17f619e_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.3-hecca717_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h9ec8514_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.1-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype6-2.14.1-h73754d4_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-15.2.0-he0feb66_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-15.2.0-h69a702a_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-15.2.0-h69a702a_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-15.2.0-h68bc16d_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-15.2.0-he0feb66_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libjpeg-turbo-3.1.2-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-5_h47877c9_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libmpdec-4.0.0-hb9d3cd8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.30-pthreads_h94d23a6_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.53-h421ea60_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.20-h4ab18f5_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.51.1-hf4e2dac_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.7.1-h9d88235_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libuv-1.51.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.16.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/nodejs-22.21.1-h273caaf_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.32.5-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.12-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.5-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h280c20c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zeromq-4.3.5-h387f397_9.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zlib-ng-2.3.2-hceb46e0_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb78ec9c_6.conda + - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl + - pypi: ./ + osx-arm64: + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/_openmp_mutex-4.5-7_kmp_llvm.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/icu-75.1-hfee45f7_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipykernel-7.1.0-pyh5552912_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.9.0-pyh53cf698_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-specifications-2025.9.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-with-format-nongpl-4.26.0-hcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.7.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_core-5.9.1-pyhc90fa1f_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.0-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.17.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lerc-4.0.0-hd64df32_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libabseil-20250512.1-cxx17_hd41c47c_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.11.0-5_h51639a9_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlicommon-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlidec-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlienc-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.11.0-5_hb0561ab_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-21.1.8-hf598326_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libdeflate-1.25-hc11a715_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.3-haf25636_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.5.2-he5f378a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype-2.14.1-hce30654_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype6-2.14.1-h6da58f4_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgcc-15.2.0-hcbb3090_16.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran-15.2.0-h07b0088_16.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran5-15.2.0-hdae7583_16.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libjpeg-turbo-3.1.2-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.11.0-5_hd9741b5_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.8.1-h39f12f2_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libmpdec-4.0.0-h5505292_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.67.0-hc438710_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.30-openmp_ha158390_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libpng-1.6.53-hfab5511_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.20-h99b78c6_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.51.1-h1b79a29_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libtiff-4.7.1-h4030677_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libuv-1.51.0-h6caf38d_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libwebp-base-1.6.0-h07db88b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libxcb-1.17.0-hdb1d25a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-21.1.8-h4a912ad_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.16.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/nodejs-25.2.1-h5230ea7_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.32.5-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xorg-libxau-1.0.12-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xorg-libxdmcp-1.1.5-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/yaml-0.2.5-h925e9cb_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zeromq-4.3.5-h888dc83_9.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zlib-ng-2.3.2-hed4e4f5_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zstd-1.5.7-hbf9d68e_6.conda + - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl + - pypi: ./ + win-64: + - conda: https://conda.anaconda.org/conda-forge/win-64/_openmp_mutex-4.5-2_gnu.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipykernel-7.1.0-pyh6dadd2b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.9.0-pyhe2676ad_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-specifications-2025.9.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-with-format-nongpl-4.26.0-hcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.7.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_core-5.9.1-pyh6dadd2b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.0-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.17.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/lerc-4.0.0-h6470a55_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libblas-3.11.0-5_hf2e6a31_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlicommon-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlidec-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlienc-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libcblas-3.11.0-5_h2a3cdd5_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.25-h51727cc_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.3-hac47afa_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.5.2-h52bdfb6_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype-2.14.1-h57928b3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/liblzma-5.8.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libmpdec-4.0.0-h2466b09_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libpng-1.6.53-h7351971_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.20-hc70643c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.51.1-hf5d6505_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.7.1-h8f73337_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libwebp-base-1.6.0-h4d5522a_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libwinpthread-12.0.0.r4.gg4f2fc60ca-h57928b3_10.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libxcb-1.17.0-h0e4246c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-16-2.15.1-h3cfd58e_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-2.15.1-h779ef1b_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.16.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/nodejs-25.2.1-he453025_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyh09c184e_7.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.32.5-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/win_inet_pton-1.1.0-pyh7428d3b_8.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/winpty-0.4.3-4.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/win-64/xorg-libxau-1.0.12-hba3369d_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/xorg-libxdmcp-1.1.5-hba3369d_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/yaml-0.2.5-h6a83c73_3.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/zeromq-4.3.5-h5bddc39_9.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/zlib-ng-2.3.2-h0261ad2_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/zstd-1.5.7-h534d264_6.conda + - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ test-cpu: @@ -966,46 +1611,43 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gmp-6.3.0-hac33072_2.conda @@ -1016,8 +1658,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1042,7 +1683,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -1096,12 +1737,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -1112,26 +1753,25 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/nettle-3.10.1-h4a9d5aa_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/p11-kit-0.25.10-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -1145,16 +1785,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -1164,31 +1804,30 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1204,16 +1843,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ osx-arm64: @@ -1222,44 +1861,41 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py313h65a2061_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py314h6e9b3f0_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -1268,7 +1904,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1292,7 +1927,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda @@ -1328,12 +1963,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lz4-c-1.10.0-h286801f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py313h78c9487_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py314habef2a7_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -1343,33 +1978,32 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda @@ -1377,16 +2011,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda @@ -1396,31 +2030,30 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1437,12 +2070,12 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl @@ -1452,44 +2085,41 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py314h2359020_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -1499,7 +2129,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1523,7 +2152,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda @@ -1540,7 +2169,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda @@ -1558,8 +2187,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -1570,24 +2199,23 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda @@ -1599,18 +2227,18 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-7.0.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda @@ -1618,35 +2246,34 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1664,17 +2291,17 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ test-gpu: @@ -1691,13 +2318,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45-default_hfdba357_105.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45-default_h4852527_105.conda @@ -1705,22 +2332,21 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-12.9.27-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-crt-dev_linux-64-12.9.86-ha770c72_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-crt-tools-12.9.86-ha770c72_2.conda @@ -1741,17 +2367,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-tools-12.9.86-h4bc722e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-version-12.9-h4f385c5_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-he8b2097_16.conda @@ -1767,7 +2391,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1793,7 +2416,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/kernel-headers_linux-64-4.18.0-he073ed8_9.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -1852,12 +2475,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -1868,26 +2491,25 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/nettle-3.10.1-h4a9d5aa_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/p11-kit-0.25.10-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -1901,16 +2523,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -1920,11 +2542,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -1932,20 +2554,19 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sysroot_linux-64-2.28-h4ee821c_9.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1961,13 +2582,13 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/1c/38/4ba2486f95fcf2120723932feacdded438e785258148b18a703cd1177e41/jax_cuda12_plugin-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/27/58/a5a27d4677d6890570f7e58cecd51891469cb620e6f64c8faed4935d93d0/jax_cuda12_plugin-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/77/3c/aa88abe01f3be3d1f8f787d1d33dc83e76fec05945f9a28fbb41cfb99cd5/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/18/2a/d4cd8506d2044e082f8cd921be57392e6a9b5ccd3ffdf050362430a3d5d5/nvidia_cuda_cccl_cu12-12.9.27-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c1/2e/b84e32197e33f39907b455b83395a017e697c07a449a2b15fd07fc1c9981/nvidia_cuda_cupti_cu12-12.9.79-py3-none-manylinux_2_25_x86_64.whl @@ -1982,10 +2603,10 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ ty: @@ -2002,46 +2623,43 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gmp-6.3.0-hac33072_2.conda @@ -2052,8 +2670,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -2078,7 +2695,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -2132,12 +2749,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -2148,26 +2765,25 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/nettle-3.10.1-h4a9d5aa_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/p11-kit-0.25.10-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -2181,16 +2797,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -2200,31 +2816,30 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2240,21 +2855,20 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/9e/4c/2f9ac5edbd0e67bf82f5cd04275c4e87cbbf69a78f43e5dcf90c1573d44e/ty-0.0.10-py3-none-manylinux_2_24_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/74/18/8dd4fe6df1fd66f3e83b4798eddb1d8482d9d9b105f25099b76703402ebb/ty-0.0.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ osx-arm64: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/_openmp_mutex-4.5-7_kmp_llvm.conda @@ -2262,44 +2876,41 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py313h65a2061_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py314h6e9b3f0_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -2308,7 +2919,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -2332,7 +2942,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda @@ -2368,12 +2978,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lz4-c-1.10.0-h286801f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py313h78c9487_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py314habef2a7_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -2383,33 +2993,32 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda @@ -2417,16 +3026,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda @@ -2436,31 +3045,30 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2477,63 +3085,59 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e8/cd/9dd49e6d40e54d4b7d563f9e2a432c4ec002c0673a81266e269c4bc194ce/ty-0.0.10-py3-none-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/ad/01/3a563dba8b1255e474c35e1c3810b7589e81ae8c41df401b6a37c8e2cde9/ty-0.0.11-py3-none-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ win-64: - conda: https://conda.anaconda.org/conda-forge/win-64/_openmp_mutex-4.5-2_gnu.conda - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py314h2359020_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -2543,7 +3147,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -2567,7 +3170,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda @@ -2584,7 +3187,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda @@ -2602,8 +3205,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -2614,24 +3217,23 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda @@ -2643,18 +3245,18 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-7.0.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda @@ -2662,35 +3264,34 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2708,22 +3309,21 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/42/36/82e66b9753a76964d26fd9bc3514ea0abce0a5ba5ad7d5f084070c6981da/ty-0.0.10-py3-none-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/df/04/5a5dfd0aec0ea99ead1e824ee6e347fb623c464da7886aa1e3660fb0f36c/ty-0.0.11-py3-none-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ packages: - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 @@ -2835,43 +3435,43 @@ packages: - pkg:pypi/argon2-cffi?source=hash-mapping size: 18715 timestamp: 1749017288144 -- conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda - sha256: ad188ccc06a06c633dc124b09e9e06fb9df4c32ffc38acc96ecc86e506062090 - md5: 27bbec9f2f3a15d32b60ec5734f5b41c +- conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda + sha256: 39234a99df3d2e3065383808ed8bfda36760de5ef590c54c3692bb53571ef02b + md5: 3cca1b74b2752917b5b65b81f61f0553 depends: - __glibc >=2.17,<3.0.a0 - - cffi >=1.0.1 + - cffi >=2.0.0b1 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/argon2-cffi-bindings?source=hash-mapping - size: 35943 - timestamp: 1762509452935 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda - sha256: 05ea6fa7109235cfb4fc24526bae1fe82d88bbb5e697ab3945c313f5f041af5b - md5: e23e087109b2096db4cf9a3985bab329 + size: 35598 + timestamp: 1762509505285 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda + sha256: aab60bbaea5cc49dff37438d1ad469d64025cda2ce58103cf68da61701ed2075 + md5: a240a79a49a95b388ef81ccda27a5e51 depends: - __osx >=11.0 - - cffi >=1.0.1 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - cffi >=2.0.0b1 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/argon2-cffi-bindings?source=hash-mapping - size: 33947 - timestamp: 1762510144907 -- conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda - sha256: 3f8a1affdfeb2be5289d709e365fc6e386d734773895215cf8cbc5100fa6af9a - md5: eabb4b677b54874d7d6ab775fdaa3d27 - depends: - - cffi >=1.0.1 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 34218 + timestamp: 1762509977830 +- conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda + sha256: a742e7cd0d5534bfff3fd550a0c1e430411fad60a24f88930d261056ab08096f + md5: ffa247e46f47e157851dc547f4c513e4 + depends: + - cffi >=2.0.0b1 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -2879,8 +3479,8 @@ packages: license_family: MIT purls: - pkg:pypi/argon2-cffi-bindings?source=hash-mapping - size: 38779 - timestamp: 1762509796090 + size: 38653 + timestamp: 1762509771011 - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda sha256: 792da8131b1b53ff667bd6fc617ea9087b570305ccb9913deb36b8e12b3b5141 md5: 85c4f19f377424eafc4ed7911b291642 @@ -2945,49 +3545,16 @@ packages: - pkg:pypi/babel?source=hash-mapping size: 6938256 timestamp: 1738490268466 -- conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda - sha256: 9552afbec37c4d8d0e83a5c4c6b3c7f4b8785f935094ce3881e0a249045909ce - md5: d9e90792551a527200637e23a915dd79 - depends: - - python - - libgcc >=14 - - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 - - zstd >=1.5.7,<1.6.0a0 - license: BSD-3-Clause AND MIT AND EPL-2.0 - purls: - - pkg:pypi/backports-zstd?source=hash-mapping - size: 240943 - timestamp: 1767044981366 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda - sha256: f3047ca3b41bb444b4b5a71a6eee182623192c77019746dd4685fd260becb249 - md5: 54008c5cc8928e5cb5a0f9206b829451 - depends: - - python - - python 3.13.* *_cp313 - - __osx >=11.0 - - zstd >=1.5.7,<1.6.0a0 - - python_abi 3.13.* *_cp313 - license: BSD-3-Clause AND MIT AND EPL-2.0 - purls: - - pkg:pypi/backports-zstd?source=hash-mapping - size: 244371 - timestamp: 1767045003420 -- conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda - sha256: 1e76ed9bcf07ef1df9c964d73e9cda08a0380845d09c8da1678a1687dc087c34 - md5: cdcdfe68c5bc9af9e908e35ebffc9fe1 +- conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + noarch: generic + sha256: c31ab719d256bc6f89926131e88ecd0f0c5d003fe8481852c6424f4ec6c7eb29 + md5: a2ac7763a9ac75055b68f325d3255265 depends: - - python - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 - - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 - - zstd >=1.5.7,<1.6.0a0 + - python >=3.14 license: BSD-3-Clause AND MIT AND EPL-2.0 - purls: - - pkg:pypi/backports-zstd?source=hash-mapping - size: 240406 - timestamp: 1767045016907 + purls: [] + size: 7514 + timestamp: 1767044983590 - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda sha256: bf1e71c3c0a5b024e44ff928225a0874fc3c3356ec1a0b6fe719108e6d1288f6 md5: 5267bef8efea4127aacd1f4e1f149b6e @@ -3128,46 +3695,46 @@ packages: purls: [] size: 22714 timestamp: 1764017952449 -- conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda - sha256: dadec2879492adede0a9af0191203f9b023f788c18efd45ecac676d424c458ae - md5: 6c4d3597cf43f3439a51b2b13e29a4ba +- conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda + sha256: 3ad3500bff54a781c29f16ce1b288b36606e2189d0b0ef2f67036554f47f12b0 + md5: 8910d2c46f7e7b519129f486e0fe927a depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - libstdcxx >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 constrains: - libbrotlicommon 1.2.0 hb03c661_1 license: MIT license_family: MIT purls: - pkg:pypi/brotli?source=hash-mapping - size: 367721 - timestamp: 1764017371123 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda - sha256: 2e21dccccd68bedd483300f9ab87a425645f6776e6e578e10e0dd98c946e1be9 - md5: b03732afa9f4f54634d94eb920dfb308 + size: 367376 + timestamp: 1764017265553 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda + sha256: 5c2e471fd262fcc3c5a9d5ea4dae5917b885e0e9b02763dbd0f0d9635ed4cb99 + md5: f9501812fe7c66b6548c7fcaa1c1f252 depends: - __osx >=11.0 - libcxx >=19 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 constrains: - libbrotlicommon 1.2.0 hc919400_1 license: MIT license_family: MIT purls: - pkg:pypi/brotli?source=hash-mapping - size: 359568 - timestamp: 1764018359470 -- conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda - sha256: 3558006cd6e836de8dff53cbe5f0b9959f96ea6a6776b4e14f1c524916dd956c - md5: 916a39a0261621b8c33e9db2366dd427 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 359854 + timestamp: 1764018178608 +- conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda + sha256: 6854ee7675135c57c73a04849c29cbebc2fb6a3a3bfee1f308e64bf23074719b + md5: 1302b74b93c44791403cbeee6a0f62a3 + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -3177,8 +3744,8 @@ packages: license_family: MIT purls: - pkg:pypi/brotli?source=hash-mapping - size: 335605 - timestamp: 1764018132514 + size: 335782 + timestamp: 1764018443683 - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda sha256: c30daba32ddebbb7ded490f0e371eae90f51e72db620554089103b4a6934b0d5 md5: 51a19bba1b8ebfb60df25cde030b7ebc @@ -3223,6 +3790,16 @@ packages: purls: [] size: 207882 timestamp: 1765214722852 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda + sha256: 2995f2aed4e53725e5efbc28199b46bf311c3cab2648fc4f10c2227d6d5fa196 + md5: bcb3cba70cf1eec964a03b4ba7775f01 + depends: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 180327 + timestamp: 1765215064054 - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda sha256: 4ddcb01be03f85d3db9d881407fb13a673372f1b9fac9c836ea441893390e049 md5: 84d389c9eee640dda3d26fc5335c67d8 @@ -3273,45 +3850,45 @@ packages: - pkg:pypi/certifi?source=compressed-mapping size: 150969 timestamp: 1767500900768 -- conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda - sha256: 2162a91819945c826c6ef5efe379e88b1df0fe9a387eeba23ddcf7ebeacd5bd6 - md5: d0616e7935acab407d1543b28c446f6f +- conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda + sha256: c6339858a0aaf5d939e00d345c98b99e4558f285942b27232ac098ad17ac7f8e + md5: cf45f4278afd6f4e6d03eda0f435d527 depends: - __glibc >=2.17,<3.0.a0 - libffi >=3.5.2,<3.6.0a0 - libgcc >=14 - pycparser - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/cffi?source=hash-mapping - size: 298357 - timestamp: 1761202966461 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda - sha256: 1fa69651f5e81c25d48ac42064db825ed1a3e53039629db69f86b952f5ce603c - md5: 050374657d1c7a4f2ea443c0d0cbd9a0 + size: 300271 + timestamp: 1761203085220 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda + sha256: 5b5ee5de01eb4e4fd2576add5ec9edfc654fbaf9293e7b7ad2f893a67780aa98 + md5: 10dd19e4c797b8f8bdb1ec1fbb6821d7 depends: - __osx >=11.0 - libffi >=3.5.2,<3.6.0a0 - pycparser - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/cffi?source=hash-mapping - size: 291376 - timestamp: 1761203583358 -- conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda - sha256: f867a11f42bb64a09b232e3decf10f8a8fe5194d7e3a216c6bac9f40483bd1c6 - md5: 55b44664f66a2caf584d72196aa98af9 + size: 292983 + timestamp: 1761203354051 +- conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda + sha256: 924f2f01fa7a62401145ef35ab6fc95f323b7418b2644a87fea0ea68048880ed + md5: c360170be1c9183654a240aadbedad94 depends: - pycparser - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -3319,19 +3896,8 @@ packages: license_family: MIT purls: - pkg:pypi/cffi?source=hash-mapping - size: 292681 - timestamp: 1761203203673 -- conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - sha256: aa589352e61bb221351a79e5946d56916e3c595783994884accdb3b97fe9d449 - md5: 381bd45fb7aa032691f3063aff47e3a1 - depends: - - python >=3.10 - license: MIT - license_family: MIT - purls: - - pkg:pypi/cfgv?source=hash-mapping - size: 13589 - timestamp: 1763607964133 + size: 294731 + timestamp: 1761203441365 - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda sha256: b32f8362e885f1b8417bac2b3da4db7323faa12d5db62b7fd6691c02d60d6f59 md5: a22d1fd9bf98827e280a02875d9a007a @@ -3385,45 +3951,45 @@ packages: - pkg:pypi/comm?source=hash-mapping size: 14690 timestamp: 1753453984907 -- conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - sha256: c545751fd48f119f2c28635514e6aa6ae784d9a1d4eb0e10be16c776e961f333 - md5: 6186382cb34a9953bf2a18fc763dc346 +- conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + sha256: 54c79736927c787e535db184bb7f3bce13217cb7d755c50666cfc0da7c6c86f3 + md5: 72d57382d0f63c20a16b1d514fcde6ff depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - libstdcxx >=14 - numpy >=1.25 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/contourpy?source=hash-mapping - size: 297459 - timestamp: 1762525479137 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - sha256: a0e69aa3a039f0dab4af8c30933bcc6b718404263a002936c21c274b1f460958 - md5: 5643cff3e9ab77999fba139465156e35 + size: 299226 + timestamp: 1762525516589 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + sha256: e5ca7f079f9bd49a9fce837dfe9014d96603600a29e5575cce19895d3639182c + md5: d75fae59fe0c8863de391e95959b2c65 depends: - __osx >=11.0 - libcxx >=19 - numpy >=1.25 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/contourpy?source=hash-mapping - size: 259519 - timestamp: 1762526242160 -- conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - sha256: f5acc168a1f5eedd159bd1a89dc1dd4d901dc0502b769b4fca2bc5bdb4293fcf - md5: a1d5292683730418cd19b6e0cefcfc76 + size: 262199 + timestamp: 1762525837746 +- conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + sha256: f014eb687eb8dd25cec124594f4e48cf85803ff1db85a2a1f95719f9ec6434d2 + md5: 3647d90eea49efc6076729ef0ae81075 depends: - numpy >=1.25 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -3431,44 +3997,44 @@ packages: license_family: BSD purls: - pkg:pypi/contourpy?source=hash-mapping - size: 225553 - timestamp: 1762525633181 -- conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - sha256: 4275280f4fcef6cd0a0e5cd236120d7454a11390dd4c271378bf90bc563f6780 - md5: 82315acb438e857f809f556e2dcdb822 + size: 227536 + timestamp: 1762525688384 +- conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + sha256: 63b91c7308704819bc35747ed88097c391a75502921f7f3c9422d42e1ed07909 + md5: a4525263f2fa741bffa4af1e40aec245 depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - tomli license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/coverage?source=hash-mapping - size: 393234 - timestamp: 1766951417242 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py313h65a2061_0.conda - sha256: 46e4af43bd60580fda7955cc6c21b3a40465ef25a98c2a256419dc74caae56b0 - md5: 3283d95f985c7f293cb13bb7e33500a5 + size: 410205 + timestamp: 1766951484026 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py314h6e9b3f0_0.conda + sha256: 06311a6cb704c7c2db910ef4bda5f4d4f2c3a9e8bdffe4cc5c4481fc253a47d6 + md5: 39869c1b0010c430849a7c2585c65f47 depends: - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 - tomli license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/coverage?source=hash-mapping - size: 393649 - timestamp: 1766951606379 -- conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py313hd650c13_0.conda - sha256: d41807f993eb1c097594f6481dc4a3ea1080ed57cfd1f0721216a3d7f7f3f949 - md5: 6799738f6603dfddd97389ee3e65e891 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 409230 + timestamp: 1766951563419 +- conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py314h2359020_0.conda + sha256: fd24db3e7d3407ae7a15cd636722c84ca26e4c274f639084cdd18afa6612fe5b + md5: c5cb6c314f63b0bd76c67775a515364d + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - tomli - ucrt >=10.0.20348.0 - vc >=14.3,<15 @@ -3477,19 +4043,19 @@ packages: license_family: APACHE purls: - pkg:pypi/coverage?source=hash-mapping - size: 418313 - timestamp: 1766951491957 -- conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + size: 434074 + timestamp: 1766951384017 +- conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda noarch: generic - sha256: 63f677762304e6f8dc55e11dff6aafe71129cbbd0a77d176b99ba1f6a5053b77 - md5: 5bf347916a543bcb290c780fa449bf73 + sha256: 9e345f306446500956ffb1414b773f5476f497d7a2b5335a59edd2c335209dbb + md5: 30f999d06f347b0116f0434624b6e559 depends: - - python >=3.13,<3.14.0a0 - - python_abi * *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi * *_cp314 license: Python-2.0 purls: [] - size: 48369 - timestamp: 1765019689213 + size: 49298 + timestamp: 1765020324943 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-12.9.27-ha770c72_0.conda sha256: 2ee3b9564ca326226e5cda41d11b251482df8e7c757e333d28ec75213c75d126 md5: 87ff6381e33b76e5b9b179a2cdd005ec @@ -3732,51 +4298,51 @@ packages: - flatten-dict - networkx requires_python: '>=3.10' -- conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda - sha256: 29d10b4520846d3cbc511545552c11b726199013354e7517a53679272629c20d - md5: 80fd7ff9877570d12cabb5c5037dac89 +- conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda + sha256: 2803e9285da433a5d704a63ac9c64c87b5df9aaa1e2d48cc333e65d5a945912e + md5: 69635aa34b45d84c2599ff8b48094978 depends: - python + - libgcc >=14 - __glibc >=2.17,<3.0.a0 - libstdcxx >=14 - - libgcc >=14 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/debugpy?source=hash-mapping - size: 2870642 - timestamp: 1765704059389 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda - sha256: 1eb7c9f5a994e273d714e945253fff40413fd63de9f6d5e01989d6d96199dad0 - md5: 95287e5abbe8a588d2a8d234f3d591a7 + size: 2888322 + timestamp: 1765704065377 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda + sha256: 5c263dafa3660660087443ad37e32e0597067cf098b351230a76adf83e462e12 + md5: 45961f5d077fca30eeff1a1973aca63d depends: - python - - python 3.13.* *_cp313 - - libcxx >=19 - __osx >=11.0 - - python_abi 3.13.* *_cp313 + - python 3.14.* *_cp314 + - libcxx >=19 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - - pkg:pypi/debugpy?source=compressed-mapping - size: 2759061 - timestamp: 1765840814720 -- conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda - sha256: d6d62b00c9a81cf9f183b9f3929455f11e1906e37891a28b953237245df6a5f3 - md5: a7e77991e54b031328253da027e2f3e1 + - pkg:pypi/debugpy?source=hash-mapping + size: 2776268 + timestamp: 1765840821598 +- conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda + sha256: 0ad7f50f664ede3aafcd23458ce4f669f63e32f7efb74c0938260bdb829679df + md5: 3361deac30d356844406fbe6def54d5b depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/debugpy?source=hash-mapping - size: 4002629 - timestamp: 1765840845981 + size: 4021751 + timestamp: 1765840833937 - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda sha256: c17c6b9937c08ad63cb20a26f403a3234088e57d4455600974a0ce865cb14017 md5: 9ce473d1d1be1cc3810856a48b3fab32 @@ -3799,17 +4365,6 @@ packages: - pkg:pypi/defusedxml?source=hash-mapping size: 24062 timestamp: 1615232388757 -- conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - sha256: 6d977f0b2fc24fee21a9554389ab83070db341af6d6f09285360b2e09ef8b26e - md5: 003b8ba0a94e2f1e117d0bd46aebc901 - depends: - - python >=3.9 - license: Apache-2.0 - license_family: APACHE - purls: - - pkg:pypi/distlib?source=hash-mapping - size: 275642 - timestamp: 1752823081585 - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda sha256: f71eae7dc8ff9392d225d2d529691b2db16289b7d8009646eeb1adf0caf3937b md5: 6da1f998c8ea85ba7692afbb5db72fb9 @@ -3863,16 +4418,6 @@ packages: - pkg:pypi/executing?source=hash-mapping size: 30753 timestamp: 1756729456476 -- conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - sha256: 8c4210ed4dc439e87528635e226042ddab9bf458d4d0a12e7ba48d6c5babd0f8 - md5: 7e7cf4d6c2be6991e6ae2b3f4331701c - depends: - - python >=3.10 - license: Unlicense - purls: - - pkg:pypi/filelock?source=compressed-mapping - size: 18646 - timestamp: 1767377337824 - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda sha256: dc81e6283bd2cdc5e8a3e5c88527870b2992a8f71f25ddec9dd995223c08aed8 md5: 261bd75b03d09c5eeea5aedf7365e811 @@ -3896,55 +4441,22 @@ packages: - pathlib2>=2.3,<3.0 ; python_full_version < '3.4' - six>=1.12,<2.0 requires_python: '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' -- conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda - sha256: 97f225199e6e5dfb93f551087c0951fee92db2d29a9dcb6a0346d66bff06fea4 - md5: c0f36dfbb130da4f6ce2df31f6b25ea8 - depends: - - __glibc >=2.17,<3.0.a0 - - brotli - - libgcc >=14 - - munkres - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - license: MIT - license_family: MIT - purls: - - pkg:pypi/fonttools?source=hash-mapping - size: 2988776 - timestamp: 1765633043435 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda - sha256: 52d4aacd7c154adff1f0e86609bf1b0e63b7049c947c4df1e78eedb9f2913091 - md5: 894eb0c3e9a17643906a6da3209bf045 - depends: - - __osx >=11.0 - - brotli - - munkres - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 - license: MIT - license_family: MIT - purls: - - pkg:pypi/fonttools?source=hash-mapping - size: 2897709 - timestamp: 1765632961717 -- conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda - sha256: da82b8e843103bf4aaab470e4b8025286357dc8c34cd47817350dcb14ad307fb - md5: c6fbf3a96192c26a75ed5755bd904fea +- conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + sha256: bb74f1732065eb95c3ea4ae7f7ab29d6ddaafe6da32f009106bf9a335147cb77 + md5: d5da976e963e70364b9e3ff270842b9f depends: - brotli - munkres - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - ucrt >=10.0.20348.0 - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 + - python >=3.10 + - unicodedata2 >=15.1.0 + track_features: + - fonttools_no_compile license: MIT license_family: MIT purls: - pkg:pypi/fonttools?source=hash-mapping - size: 2523451 - timestamp: 1765632913315 + size: 834764 + timestamp: 1765632669874 - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda sha256: 2509992ec2fd38ab27c7cdb42cf6cadc566a1cc0d1021a2673475d9fa87c6276 md5: d3549fd50d450b6d9e7dddff25dd2110 @@ -4043,10 +4555,10 @@ packages: purls: [] size: 2009354 timestamp: 1765814947748 -- pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl name: greenlet version: 3.3.0 - sha256: 9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38 + sha256: 73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170 requires_dist: - sphinx ; extra == 'docs' - furo ; extra == 'docs' @@ -4054,10 +4566,10 @@ packages: - psutil ; extra == 'test' - setuptools ; extra == 'test' requires_python: '>=3.10' -- pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl name: greenlet version: 3.3.0 - sha256: 087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527 + sha256: 5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955 requires_dist: - sphinx ; extra == 'docs' - furo ; extra == 'docs' @@ -4184,6 +4696,28 @@ packages: purls: [] size: 12722920 timestamp: 1766299101259 +- conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda + sha256: 142a722072fa96cf16ff98eaaf641f54ab84744af81754c292cb81e0881c0329 + md5: 186a18e3ba246eccfc7cff00cd19a870 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libstdcxx >=14 + license: MIT + license_family: MIT + purls: [] + size: 12728445 + timestamp: 1767969922681 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/icu-75.1-hfee45f7_0.conda + sha256: 9ba12c93406f3df5ab0a43db8a4b4ef67a5871dfd401010fbe29b218b2cbe620 + md5: 5eb22c1d7b3fc4abb50d92d621583137 + depends: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 11857802 + timestamp: 1720853997952 - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda sha256: bee083d5a0f05c380fcec1f30a71ef5518b23563aeb0a21f6b60b792645f9689 md5: cb8048bed35ef01431184d6a88e46b3e @@ -4196,18 +4730,6 @@ packages: purls: [] size: 13849749 timestamp: 1766299627069 -- conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - sha256: 32d5007d12e5731867908cbf5345f5cd44a6c8755a2e8e63e15a184826a51f82 - md5: 25f954b7dae6dd7b0dc004dab74f1ce9 - depends: - - python >=3.10 - - ukkonen - license: MIT - license_family: MIT - purls: - - pkg:pypi/identify?source=hash-mapping - size: 79151 - timestamp: 1759437561529 - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda sha256: ae89d0299ada2a3162c2614a9d26557a92aa6a77120ce142f8e0109bbf0342b0 md5: 53abe63df7e10a6ba605dc5f9f961d36 @@ -4431,10 +4953,10 @@ packages: name: jax-cuda12-pjrt version: 0.8.2 sha256: e3bab41ca7c48e4163db9e7efd271b3aa85f0fe45f5ed0708d6bbed93a59f977 -- pypi: https://files.pythonhosted.org/packages/1c/38/4ba2486f95fcf2120723932feacdded438e785258148b18a703cd1177e41/jax_cuda12_plugin-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/27/58/a5a27d4677d6890570f7e58cecd51891469cb620e6f64c8faed4935d93d0/jax_cuda12_plugin-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl name: jax-cuda12-plugin version: 0.8.2 - sha256: 82c6798be66bf8c773386918e4c8e5cd8119753f3bfb3ca4bbc46818283750c6 + sha256: a5898bac1d8ab6020b54546440256409f2c66bcbbb3a1099ca473c84843addad requires_dist: - jax-cuda12-pjrt==0.8.2 - nvidia-cublas-cu12>=12.1.3.1 ; sys_platform == 'linux' and extra == 'with-cuda' @@ -4450,28 +4972,28 @@ packages: - nvidia-cuda-nvrtc-cu12>=12.1.55 ; sys_platform == 'linux' and extra == 'with-cuda' - nvidia-nvshmem-cu12>=3.2.5 ; sys_platform == 'linux' and extra == 'with-cuda' requires_python: '>=3.11' -- pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl name: jaxlib version: 0.8.2 - sha256: 1bfbcf6c3de221784fa4cdb6765a09d71cb4298b15626b3d0409b3dfcd8a8667 + sha256: e6a97dfb0232eed9a2bb6e3828e4f682dbac1a7fea840bfda574cae2dbf5faf9 requires_dist: - scipy>=1.13 - numpy>=2.0 - ml-dtypes>=0.5.0 requires_python: '>=3.11' -- pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl name: jaxlib version: 0.8.2 - sha256: f205e91c3a152a2a76c0bc59a6a2de03e87ec261b91e8812922777185e7b08f5 + sha256: 05b958f497e49824c432e734bb059723b7dfe69e2ad696a9f9c8ad82fff7c3f8 requires_dist: - scipy>=1.13 - numpy>=2.0 - ml-dtypes>=0.5.0 requires_python: '>=3.11' -- pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl +- pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl name: jaxlib version: 0.8.2 - sha256: 4d006db96be020c8165212a1216372f8acac4ff4f8fb067743d694ef2b301ace + sha256: beffb004e7eeb5c9afb24439e2b2cf45a4ee3e3e8adf45e355edf2af62acf8b8 requires_dist: - scipy>=1.13 - numpy>=2.0 @@ -4540,6 +5062,7 @@ packages: - rpds-py >=0.25.0 - python license: MIT + license_family: MIT purls: - pkg:pypi/jsonschema?source=compressed-mapping size: 82356 @@ -4572,9 +5095,27 @@ packages: - uri-template - webcolors >=24.6.0 license: MIT + license_family: MIT purls: [] size: 4740 timestamp: 1767839954258 +- conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + sha256: efea291760fba57a8abaf5b3a05c57f99d60cf11c8950fe8499f4d2eaa4473bb + md5: 29cc201b7334408707a8866d6baa35cc + depends: + - ipykernel + - jupyter_core + - jupyter_server + - nodejs >=20 + - platformdirs >=4.2.2 + - python >=3.10 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/jupyter-book?source=hash-mapping + size: 2175135 + timestamp: 1769203439705 - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda sha256: 897ad2e2c2335ef3c2826d7805e16002a1fd0d509b4ae0bc66617f0e0ff07bc2 md5: 62b7c96c6cd77f8173cc5cada6a9acaa @@ -4782,39 +5323,39 @@ packages: purls: [] size: 134088 timestamp: 1754905959823 -- conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda - sha256: 60d7d525db89401f88f5c91bdbb79d3afbf005e7d7c1326318659fa097607e51 - md5: 3e0e65595330e26515e31b7fc6d933c7 +- conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda + sha256: a707d08c095d02148201f2da9fba465054fb750e33117e215892a4fefcc1b54a + md5: 57f1ce4f7ba6bcd460be8f83c8f04c69 depends: - python - - __glibc >=2.17,<3.0.a0 - libstdcxx >=14 - libgcc >=14 - - python_abi 3.13.* *_cp313 + - __glibc >=2.17,<3.0.a0 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/kiwisolver?source=hash-mapping - size: 77616 - timestamp: 1762488778882 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda - sha256: adc6b89070b6858b81fbe24dd034a73295e8fa9ccb68ed871bf04f1ed498f51c - md5: 9583687276aaa393e723f3b7970be69f + size: 78071 + timestamp: 1762488742381 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda + sha256: c4d7e6653d343e768110ec77ac1c6c89f313f77a19a1f2cd60b7c7b8b0758bdf + md5: 9aa431bf603c231e8c77a1b0842a85ed depends: - python - - libcxx >=19 - - python 3.13.* *_cp313 + - python 3.14.* *_cp314 - __osx >=11.0 - - python_abi 3.13.* *_cp313 + - libcxx >=19 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/kiwisolver?source=hash-mapping - size: 68438 - timestamp: 1762488945877 -- conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda - sha256: 40eafae7e9cdbe97eeb56ab0882816d3f68a2af4080a822f7349f986de2adeb6 - md5: f77249adfa3f0091e016610346affd09 + size: 68534 + timestamp: 1762489024029 +- conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda + sha256: ded907ab1ce24abcff20bc239e770ae7ef4cff6fdcfb8cc24ca59ebe736a1d3f + md5: e9d93271b021332f5492ff5478601614 depends: - python - vc >=14.3,<15 @@ -4823,13 +5364,13 @@ packages: - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/kiwisolver?source=hash-mapping - size: 73825 - timestamp: 1762488792613 + size: 73670 + timestamp: 1762488752873 - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda sha256: 99df692f7a8a5c27cd14b5fb1374ee55e756631b9c3d659ed3ee60830249b238 md5: 3f43953b7d3fb3aaa1d0d0723d91e368 @@ -4970,6 +5511,20 @@ packages: purls: [] size: 164701 timestamp: 1745264384716 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libabseil-20250512.1-cxx17_hd41c47c_0.conda + sha256: 7f0ee9ae7fa2cf7ac92b0acf8047c8bac965389e48be61bf1d463e057af2ea6a + md5: 360dbb413ee2c170a0a684a33c4fc6b8 + depends: + - __osx >=11.0 + - libcxx >=18 + constrains: + - libabseil-static =20250512.1=cxx17* + - abseil-cpp =20250512.1 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 1174081 + timestamp: 1750194620012 - conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.5-gpl_hc2c16d8_100.conda sha256: ee2cf1499a5a5fd5f03c6203597fe14bf28c6ca2a8fffb761e41f3cf371e768e md5: 5fdaa8b856683a5598459dead3976578 @@ -5287,6 +5842,14 @@ packages: purls: [] size: 112766 timestamp: 1702146165126 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda + sha256: 95cecb3902fbe0399c3a7e67a5bed1db813e5ab0e22f4023a5e0f722f2cc214f + md5: 36d33e440c31857372a72137f78bacf5 + license: BSD-2-Clause + license_family: BSD + purls: [] + size: 107458 + timestamp: 1702146414478 - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.3-hecca717_0.conda sha256: 1e1b08f6211629cbc2efe7a5bca5953f8f6b3cae0eeb04ca4dacee1bd4e2db2f md5: 8b09ae86839581147ef2e5c5e229d164 @@ -5561,9 +6124,9 @@ packages: purls: [] size: 663567 timestamp: 1765260367147 -- conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda - sha256: 2d534c09f92966b885acb3f4a838f7055cea043165a03079a539b06c54e20a49 - md5: d1699ce4fe195a9f61264a1c29b87035 +- conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda + sha256: 8cdf11333a81085468d9aa536ebb155abd74adc293576f6013fc0c85a7a90da3 + md5: 3b576f6860f838f950c570f4433b086e depends: - libwinpthread >=12.0.0.r4.gg4f2fc60ca - libxml2 @@ -5574,8 +6137,8 @@ packages: license: BSD-3-Clause license_family: BSD purls: [] - size: 2412642 - timestamp: 1765090345611 + size: 2411241 + timestamp: 1765104337762 - conda: https://conda.anaconda.org/conda-forge/linux-64/libiconv-1.18-h3b78370_2.conda sha256: c467851a7312765447155e071752d7bf9bf44d610a5687e32706f480aad2833f md5: 915f5995e94f60e9a4826e0b0920ee88 @@ -5788,6 +6351,22 @@ packages: purls: [] size: 666600 timestamp: 1756834976695 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.67.0-hc438710_0.conda + sha256: a07cb53b5ffa2d5a18afc6fd5a526a5a53dd9523fbc022148bd2f9395697c46d + md5: a4b4dd73c67df470d091312ab87bf6ae + depends: + - __osx >=11.0 + - c-ares >=1.34.5,<2.0a0 + - libcxx >=19 + - libev >=4.33,<4.34.0a0 + - libev >=4.33,<5.0a0 + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.2,<4.0a0 + license: MIT + license_family: MIT + purls: [] + size: 575454 + timestamp: 1756835746393 - conda: https://conda.anaconda.org/conda-forge/linux-64/libnvptxcompiler-dev-12.9.86-ha770c72_2.conda sha256: 1e7a7b34f8639a5feb75ba864127059e4d83edfe1a516547f0dbb9941e7b8f8b md5: 3fd926c321c6dbf386aa14bd8b125bfb @@ -6084,6 +6663,27 @@ packages: purls: [] size: 40311 timestamp: 1766271528534 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libuv-1.51.0-hb03c661_1.conda + sha256: c180f4124a889ac343fc59d15558e93667d894a966ec6fdb61da1604481be26b + md5: 0f03292cc56bf91a077a134ea8747118 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: MIT + license_family: MIT + purls: [] + size: 895108 + timestamp: 1753948278280 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libuv-1.51.0-h6caf38d_1.conda + sha256: 042c7488ad97a5629ec0a991a8b2a3345599401ecc75ad6a5af73b60e6db9689 + md5: c0d87c3c8e075daf1daf6c31b53e8083 + depends: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 421195 + timestamp: 1753948426421 - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda sha256: 3aed21ab28eddffdaf7f804f49be7a7d701e8f0e46c856d801270b470820a37b md5: aea31d2e5b1091feca96fcfe945c3cf9 @@ -6370,58 +6970,24 @@ packages: - pkg:pypi/markdown-it-py?source=hash-mapping size: 64736 timestamp: 1754951288511 -- conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - sha256: a530a411bdaaf0b1e4de8869dfaca46cb07407bc7dc0702a9e231b0e5ce7ca85 - md5: c14389156310b8ed3520d84f854be1ee - depends: - - __glibc >=2.17,<3.0.a0 - - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - constrains: - - jinja2 >=3.0.0 - license: BSD-3-Clause - license_family: BSD - purls: - - pkg:pypi/markupsafe?source=hash-mapping - size: 25909 - timestamp: 1759055357045 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - sha256: e06902a1bf370fdd4ada0a8c81c504868fdb7e9971b72c6bd395aa4e5a497bd2 - md5: 3df5979cc0b761dda0053ffdb0bca3ea +- conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + sha256: e0cbfea51a19b3055ca19428bd9233a25adca956c208abb9d00b21e7259c7e03 + md5: fab1be106a50e20f10fe5228fd1d1651 depends: - - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 - constrains: - - jinja2 >=3.0.0 - license: BSD-3-Clause - license_family: BSD - purls: - - pkg:pypi/markupsafe?source=hash-mapping - size: 25778 - timestamp: 1759055530601 -- conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - sha256: 988d14095c1392e055fd75e24544da2db01ade73b0c2f99ddc8e2b8678ead4cc - md5: 47eaaa4405741beb171ea6edc6eaf874 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - ucrt >=10.0.20348.0 - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 + - python >=3.10 constrains: - jinja2 >=3.0.0 + track_features: + - markupsafe_no_compile license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/markupsafe?source=hash-mapping - size: 28959 - timestamp: 1759055685616 -- conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda - sha256: b1117aa2c1d11ca70d1704054cdc8801cbcf2dfb846c565531edd417ddd82559 - md5: ffe67570e1a9192d2f4c189b27f75f89 + size: 15499 + timestamp: 1759055275624 +- conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda + sha256: ee773261fbd6c76fc8174b0e4e1ce272b0bbaa56610f130e9d3d1f575106f04f + md5: b8683e6068099b69c10dbfcf7204203f depends: - __glibc >=2.17,<3.0.a0 - contourpy >=1.0.1 @@ -6438,20 +7004,20 @@ packages: - packaging >=20.0 - pillow >=8 - pyparsing >=2.3.1 - - python >=3.13,<3.14.0a0 + - python >=3.14,<3.15.0a0 - python-dateutil >=2.7 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - qhull >=2020.2,<2020.3.0a0 - tk >=8.6.13,<8.7.0a0 license: PSF-2.0 license_family: PSF purls: - - pkg:pypi/matplotlib?source=compressed-mapping - size: 8405862 - timestamp: 1763055358671 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda - sha256: 24767ca32ea9db74a4a5965d2df8c69c83c82583e8ba32b683123d406092e205 - md5: 745c18472bc6d3dc9146c3dec18bb740 + - pkg:pypi/matplotlib?source=hash-mapping + size: 8473358 + timestamp: 1763055439346 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda + sha256: 198dcc0ed83e78bc7bf48e6ef8d4ecd220e9cf1f07db98508251b2bc0be067f9 + md5: c84152e510d41378b8758826655b6ed7 depends: - __osx >=11.0 - contourpy >=1.0.1 @@ -6467,20 +7033,20 @@ packages: - packaging >=20.0 - pillow >=8 - pyparsing >=2.3.1 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 - python-dateutil >=2.7 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - qhull >=2020.2,<2020.3.0a0 license: PSF-2.0 license_family: PSF purls: - pkg:pypi/matplotlib?source=hash-mapping - size: 8197793 - timestamp: 1763056104477 -- conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda - sha256: f63c4a5ded62cfb216c9d107a3c4527940036eef19cf481418080a0bd9bc11d8 - md5: 05f96c429201a64ea752decf4b910a7c + size: 8286510 + timestamp: 1763055937766 +- conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda + sha256: 82a50284275e8a1818cd3323846f3032dc89bd23a3f80dcf44e34a62b016256b + md5: 9d491a60700e0e90e92607fcc4e2566c depends: - contourpy >=1.0.1 - cycler >=0.10 @@ -6494,9 +7060,9 @@ packages: - packaging >=20.0 - pillow >=8 - pyparsing >=2.3.1 - - python >=3.13,<3.14.0a0 + - python >=3.14,<3.15.0a0 - python-dateutil >=2.7 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - qhull >=2020.2,<2020.3.0a0 - ucrt >=10.0.20348.0 - vc >=14.3,<15 @@ -6505,8 +7071,8 @@ packages: license_family: PSF purls: - pkg:pypi/matplotlib?source=hash-mapping - size: 8007333 - timestamp: 1763055517579 + size: 8185296 + timestamp: 1763055983613 - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda sha256: 9d690334de0cd1d22c51bc28420663f4277cfa60d34fa5cad1ce284a13f1d603 md5: 00e120ce3e40bad7bfc78861ce3c4a25 @@ -6542,44 +7108,44 @@ packages: - pkg:pypi/mdurl?source=hash-mapping size: 14465 timestamp: 1733255681319 -- conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda - sha256: 1a752d45a2c5da1289afac51ea5b89bde0a80f290708505b487f38d47b4e3267 - md5: 6f9810aa09fbdab0c6b941d48a3b72bb +- conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda + sha256: 43801200d3b8dcaa1f9ab47f527c9fe94028780b2760173a240e132e25be2194 + md5: cc1bee6de727d07ce2dad51a5e8364b9 depends: - python - rich >=11.2.0 - jinja2 - textual >=0.34.0 - - libgcc >=14 - libstdcxx >=14 + - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 - lz4-c >=1.10.0,<1.11.0a0 - elfutils >=0.194,<0.195.0a0 - libunwind >=1.8.3,<1.9.0a0 + - python_abi 3.14.* *_cp314 license: Apache-2.0 AND BSD-3-Clause purls: - pkg:pypi/memray?source=hash-mapping - size: 1816303 - timestamp: 1765821582847 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py313h78c9487_3.conda - sha256: eece155fd7c5f59226e24015ae08e5d8eb9a3e453f6c97bf16d04348e7f94c97 - md5: f1dcaa6d7f501b2b8bd6294610c3982a + size: 1824670 + timestamp: 1765821568349 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py314habef2a7_3.conda + sha256: 912a462c888a867a22e6ebf0607ad3a42d078260cafc7d4f25654989a17f41ac + md5: f8c08fd9eb42146b0489d42069fb270e depends: - python - rich >=11.2.0 - jinja2 - textual >=0.34.0 + - python 3.14.* *_cp314 - __osx >=11.0 - - python 3.13.* *_cp313 - libcxx >=19 - lz4-c >=1.10.0,<1.11.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: Apache-2.0 AND BSD-3-Clause purls: - pkg:pypi/memray?source=hash-mapping - size: 1712578 - timestamp: 1765821632543 + size: 1721669 + timestamp: 1765821674618 - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda sha256: d3fb4beb5e0a52b6cc33852c558e077e1bfe44df1159eb98332d69a264b14bae md5: b11e360fc4de2b0035fc8aaa74f17fd6 @@ -6590,7 +7156,7 @@ packages: license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/mistune?source=compressed-mapping + - pkg:pypi/mistune?source=hash-mapping size: 74250 timestamp: 1766504456031 - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -6607,10 +7173,10 @@ packages: purls: [] size: 100224829 timestamp: 1767634557029 -- pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl +- pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl name: ml-dtypes version: 0.5.4 - sha256: 8c760d85a2f82e2bed75867079188c9d18dae2ee77c25a54d60e9cc79be1bc48 + sha256: 2b857d3af6ac0d39db1de7c706e69c7f9791627209c3d6dedbfca8c7e5faec22 requires_dist: - numpy>=1.21 - numpy>=1.21.2 ; python_full_version >= '3.10' @@ -6623,10 +7189,10 @@ packages: - pylint>=2.6.0 ; extra == 'dev' - pyink ; extra == 'dev' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl name: ml-dtypes version: 0.5.4 - sha256: f21c9219ef48ca5ee78402d5cc831bd58ea27ce89beda894428bc67a52da5328 + sha256: 14a4fd3228af936461db66faccef6e4f41c1d82fcc30e9f8d58a08916b1d811f requires_dist: - numpy>=1.21 - numpy>=1.21.2 ; python_full_version >= '3.10' @@ -6639,10 +7205,10 @@ packages: - pylint>=2.6.0 ; extra == 'dev' - pyink ; extra == 'dev' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl name: ml-dtypes version: 0.5.4 - sha256: 533ce891ba774eabf607172254f2e7260ba5f57bdd64030c9a4fcfbd99815d0d + sha256: 8c6a2dcebd6f3903e05d51960a8058d6e131fe69f952a5397e5dbabc841b6d56 requires_dist: - numpy>=1.21 - numpy>=1.21.2 ; python_full_version >= '3.10' @@ -6796,18 +7362,54 @@ packages: - pkg:pypi/networkx?source=compressed-mapping size: 1587439 timestamp: 1765215107045 -- conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - sha256: 4fa40e3e13fc6ea0a93f67dfc76c96190afd7ea4ffc1bac2612d954b42cdc3ee - md5: eb52d14a901e23c39e9e7b4a1a5c015f +- conda: https://conda.anaconda.org/conda-forge/linux-64/nodejs-22.21.1-h273caaf_1.conda + sha256: cff5f9e02bdb2be15e25b3fd1ea0f5d933cb68ff2da5983ec6962cfe86f50b89 + md5: 2306549f0179b16be2e9e40e5396456e depends: - - python >=3.10 - - setuptools - license: BSD-3-Clause - license_family: BSD - purls: - - pkg:pypi/nodeenv?source=hash-mapping - size: 40866 - timestamp: 1766261270149 + - libstdcxx >=14 + - libgcc >=14 + - __glibc >=2.28,<3.0.a0 + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.4,<4.0a0 + - icu >=78.2,<79.0a0 + - libuv >=1.51.0,<2.0a0 + license: MIT + license_family: MIT + purls: [] + size: 24191530 + timestamp: 1769159735495 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/nodejs-25.2.1-h5230ea7_1.conda + sha256: acb4a33a096fa89d0ec0eea5d5f19988594d4e5c8d482ac60d2b0365d16dd984 + md5: 0b6dfe96bcfb469afe82885b3fecbd56 + depends: + - __osx >=11.0 + - libcxx >=19 + - libsqlite >=3.51.1,<4.0a0 + - libbrotlicommon >=1.2.0,<1.3.0a0 + - libbrotlienc >=1.2.0,<1.3.0a0 + - libbrotlidec >=1.2.0,<1.3.0a0 + - openssl >=3.5.4,<4.0a0 + - c-ares >=1.34.6,<2.0a0 + - icu >=75.1,<76.0a0 + - zstd >=1.5.7,<1.6.0a0 + - libabseil >=20250512.1,<20250513.0a0 + - libabseil * cxx17* + - libnghttp2 >=1.67.0,<2.0a0 + - libuv >=1.51.0,<2.0a0 + - libzlib >=1.3.1,<2.0a0 + license: MIT + license_family: MIT + purls: [] + size: 16202237 + timestamp: 1765482731453 +- conda: https://conda.anaconda.org/conda-forge/win-64/nodejs-25.2.1-he453025_2.conda + sha256: abe64c5dce6d7024919807f9d5ac72729862848238e6ad6bf9ed4e721c8cc232 + md5: b965c8d527c0a5b4781e39339abc808a + license: MIT + license_family: MIT + purls: [] + size: 30449041 + timestamp: 1769159661802 - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda sha256: 7b920e46b9f7a2d2aa6434222e5c8d739021dbc5cc75f32d124a8191d86f9056 md5: e7f89ea5f7ea9401642758ff50a2d9c1 @@ -6820,49 +7422,49 @@ packages: - pkg:pypi/notebook-shim?source=hash-mapping size: 16817 timestamp: 1733408419340 -- conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda - sha256: 2f8aff2a17e4d43012e9863ef4392e6d5de3ae9da0c3e322831f8c5c3d86df71 - md5: dce261869f78ba9b81b9091b084d328d +- conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda + sha256: 81425306df4f0ddba159e80c8d91323a34df335079ca93a194201e57b337231c + md5: ab17cb5f388fa17c08937cb9cc24e7b6 depends: - python + - __glibc >=2.17,<3.0.a0 - libgcc >=14 - libstdcxx >=14 - - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 - - libcblas >=3.9.0,<4.0a0 - liblapack >=3.9.0,<4.0a0 + - libcblas >=3.9.0,<4.0a0 - libblas >=3.9.0,<4.0a0 + - python_abi 3.14.* *_cp314 constrains: - numpy-base <0a0 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/numpy?source=hash-mapping - size: 8919234 - timestamp: 1766383469748 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda - sha256: d759e7fee853d8e18709a15b8fc8a6db90c96986cb9d316c4d5ccdf5a1d3f61f - md5: c72599556b49dc853839f4439c1eea32 + size: 8983076 + timestamp: 1766383421113 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda + sha256: bc9dfe41ba4898365a82c485416fd4a572f86d94e606d89379766de70d34fc79 + md5: d421394cf6758a6f27ead1530cfdfa6a depends: - python - libcxx >=19 - __osx >=11.0 - - python 3.13.* *_cp313 - - liblapack >=3.9.0,<4.0a0 - - python_abi 3.13.* *_cp313 + - python 3.14.* *_cp314 - libcblas >=3.9.0,<4.0a0 + - python_abi 3.14.* *_cp314 - libblas >=3.9.0,<4.0a0 + - liblapack >=3.9.0,<4.0a0 constrains: - numpy-base <0a0 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/numpy?source=hash-mapping - size: 6792353 - timestamp: 1766383288679 -- conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda - sha256: c02d9587864174146bf0024051c76d368b2de18c94421e2f4e611fbb18576dd1 - md5: 78749843445581c6dcc0cb80d146982d + size: 6861028 + timestamp: 1766383292611 +- conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda + sha256: 111a7af69521dce54ce6b4d89ef767ade9f3769576353a526174792de8702b5d + md5: 71dabea9914329c08b4864955c3793fc depends: - python - vc >=14.3,<15 @@ -6870,16 +7472,16 @@ packages: - ucrt >=10.0.20348.0 - liblapack >=3.9.0,<4.0a0 - libblas >=3.9.0,<4.0a0 + - python_abi 3.14.* *_cp314 - libcblas >=3.9.0,<4.0a0 - - python_abi 3.13.* *_cp313 constrains: - numpy-base <0a0 license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/numpy?source=hash-mapping - size: 7524105 - timestamp: 1766383318405 + - pkg:pypi/numpy?source=compressed-mapping + size: 7584934 + timestamp: 1766383321713 - pypi: https://files.pythonhosted.org/packages/77/3c/aa88abe01f3be3d1f8f787d1d33dc83e76fec05945f9a28fbb41cfb99cd5/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl name: nvidia-cublas-cu12 version: 12.9.1.4 @@ -7042,9 +7644,9 @@ packages: version: 3.4.0 sha256: 69bb92469f86a1565195ece4ac0323943e83477171b91d24c35afe028a90d7cd requires_python: '>=3.8' -- pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 +- pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d name: optimagic - version: 0.5.3.dev30+ge02ea4743 + version: 0.5.3.dev31+g522b8c9a2 requires_dist: - annotated-types - cloudpickle @@ -7057,53 +7659,53 @@ packages: - sqlalchemy>=1.3 - typing-extensions requires_python: '>=3.10' -- conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda - sha256: 6bb36f180ea4ba4f13f5e6ef8ec0b2fdd010d73430af53a05986ffc312091e8f - md5: 5dd1f02f38d71a29f3cfaf13c4cbf3dd +- conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda + sha256: f8da6a925be44a867c172dd945049d7690ba6ae3a7905b61b1d5a4ba81fe0554 + md5: 15ae5e4f52f2d9a98997e8859d35aa21 depends: - python - - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python_abi 3.13.* *_cp313 + - __glibc >=2.17,<3.0.a0 + - python_abi 3.14.* *_cp314 constrains: - __glibc >=2.17 license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/orjson?source=hash-mapping - size: 317253 - timestamp: 1765811463186 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda - sha256: 259cf50b358d2c1915123f0bf889db27d277efab7a3388c287f0dd4797764fe5 - md5: d80421fc2b6f692925c82351f1c98407 + size: 317280 + timestamp: 1765811464445 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda + sha256: 08f70edd4fc9f684083d18350c8a33c5a092e05aaaab7a97b09b381f8ca19eb7 + md5: 21db7b1b5c5c04461bf40c33953f8cf7 depends: - python + - python 3.14.* *_cp314 - __osx >=11.0 - - python 3.13.* *_cp313 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 constrains: - __osx >=11.0 license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/orjson?source=hash-mapping - size: 288912 - timestamp: 1765811468774 -- conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda - sha256: cf55c2f55f7c0e8973da287217315c4b8652ca29dbcbcecfd0b3b8e48e784422 - md5: db9e91caa5ee3f4891d340f8e323cc79 + size: 288991 + timestamp: 1765811524857 +- conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda + sha256: 32014651690ee74eb65d4ef3f42f1ff679b216274f85c52b3be701cf16c6dff3 + md5: 84b27320349d7fbf9fb6ad06141eec5b depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/orjson?source=hash-mapping - size: 197832 - timestamp: 1764441550892 + size: 197828 + timestamp: 1765811532648 - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda sha256: 1840bd90d25d4930d60f57b4f38d4e0ae3f5b8db2819638709c36098c6ba770c md5: e51f1e4089cad105b6cac64bd8166587 @@ -7141,10 +7743,10 @@ packages: - pkg:pypi/packaging?source=hash-mapping size: 62477 timestamp: 1745345660407 -- pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl name: pandas version: 2.3.3 - sha256: 318d77e0e42a628c04dc56bcef4b40de67918f7041c2b061af1da41dcff670ac + sha256: ee67acbbf05014ea6c763beb097e03cd629961c8a632075eeb34247120abcb4b requires_dist: - numpy>=1.22.4 ; python_full_version < '3.11' - numpy>=1.23.2 ; python_full_version == '3.11.*' @@ -7232,10 +7834,10 @@ packages: - xlsxwriter>=3.0.5 ; extra == 'all' - zstandard>=0.19.0 ; extra == 'all' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl +- pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl name: pandas version: 2.3.3 - sha256: bdcd9d1167f4885211e401b3036c0c8d9e274eee67ea8d0758a256d60704cfe8 + sha256: 1611aedd912e1ff81ff41c745822980c49ce4a7907537be8692c8dbc31924593 requires_dist: - numpy>=1.22.4 ; python_full_version < '3.11' - numpy>=1.23.2 ; python_full_version == '3.11.*' @@ -7323,10 +7925,10 @@ packages: - xlsxwriter>=3.0.5 ; extra == 'all' - zstandard>=0.19.0 ; extra == 'all' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl name: pandas version: 2.3.3 - sha256: f8bfc0e12dc78f777f323f55c58649591b2cd0c43534e8355c51d3fede5f4dee + sha256: 1b07204a219b3b7350abaae088f451860223a52cfb8a6c53358e7948735158e5 requires_dist: - numpy>=1.22.4 ; python_full_version < '3.11' - numpy>=1.23.2 ; python_full_version == '3.11.*' @@ -7465,76 +8067,76 @@ packages: - pkg:pypi/pexpect?source=hash-mapping size: 53561 timestamp: 1733302019362 -- conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda - sha256: bdad1e21cadd64154c45fa554247dd672288ad51982ca7d54b3fab63e40938df - md5: 183fe6b9e99e5c2b464c1573ec78eac8 +- conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda + sha256: 6d8e32dc44165cff96ec9c00383e998fd035983d971c5f35ebed6f5f51c4022a + md5: f9b6a8fbb8dcb840a0c1c052dc5092e4 depends: - python - - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - tk >=8.6.13,<8.7.0a0 - - python_abi 3.13.* *_cp313 - - libtiff >=4.7.1,<4.8.0a0 - - libjpeg-turbo >=3.1.2,<4.0a0 + - libgcc >=14 - lcms2 >=2.17,<3.0a0 - - libxcb >=1.17.0,<2.0a0 + - libfreetype >=2.14.1 + - libfreetype6 >=2.14.1 + - libjpeg-turbo >=3.1.2,<4.0a0 - zlib-ng >=2.3.2,<2.4.0a0 + - libxcb >=1.17.0,<2.0a0 - libwebp-base >=1.6.0,<2.0a0 - openjpeg >=2.5.4,<3.0a0 - - libfreetype >=2.14.1 - - libfreetype6 >=2.14.1 + - python_abi 3.14.* *_cp314 + - libtiff >=4.7.1,<4.8.0a0 + - tk >=8.6.13,<8.7.0a0 license: HPND purls: - pkg:pypi/pillow?source=hash-mapping - size: 1043309 - timestamp: 1767353193450 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda - sha256: e5eaa7f00fca189848a0454303c56cc4edefd3e58a70bfd490d2cfe0d0aa525d - md5: 78a39731fd50dbd511de305934fe7e62 + size: 1072995 + timestamp: 1767353193452 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda + sha256: 3f88f2600862583c8bed3d37f4b95f0f96a459e9fdd36ca680472bc89a46e7bb + md5: 1f9dae6213643ac883e300c11df611eb depends: - python - __osx >=11.0 - - python 3.13.* *_cp313 - - libxcb >=1.17.0,<2.0a0 + - python 3.14.* *_cp314 + - libjpeg-turbo >=3.1.2,<4.0a0 - openjpeg >=2.5.4,<3.0a0 - - libtiff >=4.7.1,<4.8.0a0 + - python_abi 3.14.* *_cp314 - zlib-ng >=2.3.2,<2.4.0a0 - - tk >=8.6.13,<8.7.0a0 - - libjpeg-turbo >=3.1.2,<4.0a0 - - python_abi 3.13.* *_cp313 + - libxcb >=1.17.0,<2.0a0 - lcms2 >=2.17,<3.0a0 - libfreetype >=2.14.1 - libfreetype6 >=2.14.1 + - libtiff >=4.7.1,<4.8.0a0 + - tk >=8.6.13,<8.7.0a0 - libwebp-base >=1.6.0,<2.0a0 license: HPND purls: - pkg:pypi/pillow?source=hash-mapping - size: 966296 - timestamp: 1767353279679 -- conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda - sha256: 181b4d169e7a671c387427ceb398d931802adace8808836b44295b07c3484abd - md5: 1927a42726a4ca0e94d5e8cb94c7a06d + size: 995543 + timestamp: 1767353279681 +- conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda + sha256: b30a83db337dab8579a46e3da7906851f53d6cf8c09695aef6d2a38b17636c1c + md5: 17dbdfedee39f31166b7e548f3ccc58a depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - lcms2 >=2.17,<3.0a0 - - libwebp-base >=1.6.0,<2.0a0 - - python_abi 3.13.* *_cp313 - libfreetype >=2.14.1 - libfreetype6 >=2.14.1 - - openjpeg >=2.5.4,<3.0a0 + - tk >=8.6.13,<8.7.0a0 + - libwebp-base >=1.6.0,<2.0a0 + - lcms2 >=2.17,<3.0a0 + - libtiff >=4.7.1,<4.8.0a0 - zlib-ng >=2.3.2,<2.4.0a0 + - openjpeg >=2.5.4,<3.0a0 - libjpeg-turbo >=3.1.2,<4.0a0 - - libtiff >=4.7.1,<4.8.0a0 - libxcb >=1.17.0,<2.0a0 - - tk >=8.6.13,<8.7.0a0 + - python_abi 3.14.* *_cp314 license: HPND purls: - pkg:pypi/pillow?source=hash-mapping - size: 946833 - timestamp: 1767353195062 + size: 973387 + timestamp: 1767353195064 - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda sha256: 04c64fb78c520e5c396b6e07bc9082735a5cc28175dbe23138201d0a9441800b md5: 1bd2e65c8c7ef24f4639ae6e850dacc2 @@ -7557,6 +8159,7 @@ packages: constrains: - ipywidgets >=7.6 license: MIT + license_family: MIT purls: - pkg:pypi/plotly?source=hash-mapping size: 4455861 @@ -7573,22 +8176,43 @@ packages: - pkg:pypi/pluggy?source=compressed-mapping size: 25877 timestamp: 1764896838868 -- conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - sha256: 5b81b7516d4baf43d0c185896b245fa7384b25dc5615e7baa504b7fa4e07b706 - md5: 7f3ac694319c7eaf81a0325d6405e974 +- conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda + sha256: 5a97802244394fa59b9868cbeeeb7b88102608f4e9b70a386672e3634f04c578 + md5: 22a0109b98aa8c0b324f4a8b68dce7b5 depends: - - cfgv >=2.0.0 - - identify >=1.0.0 - - nodeenv >=0.11.1 - - python >=3.10 - - pyyaml >=5.1 - - virtualenv >=20.10.0 + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + constrains: + - __glibc >=2.17 license: MIT license_family: MIT - purls: - - pkg:pypi/pre-commit?source=hash-mapping - size: 200827 - timestamp: 1765937577534 + purls: [] + size: 4691139 + timestamp: 1769236430772 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda + sha256: 375344751361cf4c110fa9f712f86b953fb327a5d91016e60c24d115cc76ee8a + md5: ff0b1270caf4965286d1a12b7e2c1f94 + depends: + - __osx >=11.0 + constrains: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 4302853 + timestamp: 1769236582783 +- conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda + sha256: 5ad8041ded09bbc8ac863a440a5cd855706508c99af8615dfc63237c466fe73e + md5: b28e209b4c161efb64af7260c05097e3 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + license: MIT + license_family: MIT + purls: [] + size: 4974031 + timestamp: 1769236447810 - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda sha256: 13dc67de68db151ff909f2c1d2486fa7e2d51355b25cee08d26ede1b62d48d40 md5: a1e91db2d17fd258c64921cb38e6745a @@ -7614,49 +8238,49 @@ packages: - pkg:pypi/prompt-toolkit?source=hash-mapping size: 273927 timestamp: 1756321848365 -- conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda - sha256: 8a5f773e22ccd08fbda57c92f1d094533474db75f70db35311912cdcdb2f18ad - md5: d362949a1ed1ad4693b3928ad1d32c93 +- conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda + sha256: 324455a702ef721290de6e51d9af4f7ca057546d6398bbc6e88454db17cdaf6b + md5: 28af9719e28f0054e9aee68153899293 depends: - python - - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 + - libgcc >=14 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/psutil?source=hash-mapping - size: 225429 - timestamp: 1767012386804 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda - sha256: 2abd12a0371836075a72e12fde44f63ea08b3781e5b6ec997233d50b9c9832d9 - md5: c3a1b24571871fec4498a0226a3c22c1 + size: 228170 + timestamp: 1767012382363 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda + sha256: 686b643b97df8e7076b971820fb9b5d2ed0ea8a5a82922910da1600a6f462b79 + md5: 6d799fc0d0178eb63202bf99ff7bc24f depends: - python - - python 3.13.* *_cp313 + - python 3.14.* *_cp314 - __osx >=11.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/psutil?source=hash-mapping - size: 238851 - timestamp: 1767012473931 -- conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda - sha256: 025574efd6e9d5b90d89ec1da8423132ab9c6131e21be7ec91b9fd7a14665a57 - md5: 8732097a02c66f6b260dd15b705a014e + size: 241751 + timestamp: 1767012600474 +- conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda + sha256: d776855d47e14d8b1521a3949c1d1dc3848c690170253ecc439264e219859e22 + md5: 65df3730bedf9c24f54414c8316f8e72 depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/psutil?source=hash-mapping - size: 243141 - timestamp: 1767012395730 + - pkg:pypi/psutil?source=compressed-mapping + size: 245991 + timestamp: 1767012412984 - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda sha256: 9c88f8c64590e9567c6c80823f0328e58d3b1efb0e1c539c0315ceca764e0973 md5: b3c17d95b5a10c6e64a21fa17573e70e @@ -7745,38 +8369,38 @@ packages: - pkg:pypi/pygments?source=hash-mapping size: 889287 timestamp: 1750615908735 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - sha256: 307ca29ebf2317bd2561639b1ee0290fd8c03c3450fa302b9f9437d8df6a5280 - md5: 31a0a72f3466682d0ea2ebcbd7d319b8 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + sha256: df5af268c5a74b7160d772c263ece6f43257faff571783443e34b5f1d5a61cf2 + md5: 75a84fc8337557347252cc4fd3ba2a93 depends: - __osx >=11.0 - libffi >=3.5.2,<3.6.0a0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 - setuptools license: MIT license_family: MIT purls: - pkg:pypi/pyobjc-core?source=hash-mapping - size: 481508 - timestamp: 1763152124940 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda - sha256: 194e188d8119befc952d04157079733e2041a7a502d50340ddde632658799fdc - md5: a6d28c8fc266a3d3c3dae183e25c4d31 + size: 483374 + timestamp: 1763151489724 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda + sha256: aa76ee4328d0514d7c1c455dcd2d3b547db1c59797e54ce0a3f27de5b970e508 + md5: 4219bb3408016e22316cf8b443b5ef93 depends: - __osx >=11.0 - libffi >=3.5.2,<3.6.0a0 - pyobjc-core 12.1.* - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/pyobjc-framework-cocoa?source=hash-mapping - size: 376136 - timestamp: 1763160678792 + size: 374792 + timestamp: 1763160601898 - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda sha256: 0c70bc577f5efa87501bdc841b88f594f4d3f3a992dfb851e2130fa5c817835b md5: d837065e4e0de4962c3462079c23f969 @@ -7901,10 +8525,10 @@ packages: - pkg:pypi/pytest-xdist?source=hash-mapping size: 39300 timestamp: 1751452761594 -- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda build_number: 100 - sha256: 9cf014cf28e93ee242bacfbf664e8b45ae06e50b04291e640abeaeb0cba0364c - md5: 0cbb0010f1d8ecb64a428a8d4214609e + sha256: a120fb2da4e4d51dd32918c149b04a08815fd2bd52099dad1334647984bb07f1 + md5: 1cef1236a05c3a98f68c33ae9425f656 depends: - __glibc >=2.17,<3.0.a0 - bzip2 >=1.0.8,<2.0a0 @@ -7919,19 +8543,20 @@ packages: - libzlib >=1.3.1,<2.0a0 - ncurses >=6.5,<7.0a0 - openssl >=3.5.4,<4.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - readline >=8.2,<9.0a0 - tk >=8.6.13,<8.7.0a0 - tzdata + - zstd >=1.5.7,<1.6.0a0 license: Python-2.0 purls: [] - size: 37226336 - timestamp: 1765021889577 - python_site_packages_path: lib/python3.13/site-packages -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + size: 36790521 + timestamp: 1765021515427 + python_site_packages_path: lib/python3.14/site-packages +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda build_number: 100 - sha256: c476f4e9b6d97c46b496b442878924868a54e5727251549ebfc82027aa52af68 - md5: 18a8c69608151098a8fb75eea64cc266 + sha256: 1a93782e90b53e04c2b1a50a0f8bf0887936649d19dba6a05b05c4b44dae96b7 + md5: 14f15ab0d31a2ee5635aa56e77132594 depends: - __osx >=11.0 - bzip2 >=1.0.8,<2.0a0 @@ -7943,19 +8568,20 @@ packages: - libzlib >=1.3.1,<2.0a0 - ncurses >=6.5,<7.0a0 - openssl >=3.5.4,<4.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - readline >=8.2,<9.0a0 - tk >=8.6.13,<8.7.0a0 - tzdata + - zstd >=1.5.7,<1.6.0a0 license: Python-2.0 purls: [] - size: 12920650 - timestamp: 1765020887340 - python_site_packages_path: lib/python3.13/site-packages -- conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + size: 13575758 + timestamp: 1765021280625 + python_site_packages_path: lib/python3.14/site-packages +- conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda build_number: 100 - sha256: 0ee0402368783e1fad10025719530499c517a3dbbdfbe18351841d9b7aef1d6a - md5: 9e4c9a7ee9c4ab5b3778ab73e583283e + sha256: 6857d7c97cc71fe9ba298dcb1d3b66cc7df425132ab801babd655faa3df48f32 + md5: c3c73414d5ae3f543c531c978d9cc8b8 depends: - bzip2 >=1.0.8,<2.0a0 - libexpat >=2.7.3,<3.0a0 @@ -7965,16 +8591,17 @@ packages: - libsqlite >=3.51.1,<4.0a0 - libzlib >=1.3.1,<2.0a0 - openssl >=3.5.4,<4.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - tk >=8.6.13,<8.7.0a0 - tzdata - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 + - zstd >=1.5.7,<1.6.0a0 license: Python-2.0 purls: [] - size: 16617922 - timestamp: 1765019627175 + size: 16833248 + timestamp: 1765020224759 python_site_packages_path: Lib/site-packages - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda sha256: d6a17ece93bbd5139e02d2bd7dbfa80bee1a4261dced63f65f679121686bf664 @@ -8001,16 +8628,16 @@ packages: - pkg:pypi/fastjsonschema?source=hash-mapping size: 244628 timestamp: 1755304154927 -- conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda - sha256: 4b08d4c2c4b956d306b4868d3faf724eebb5d6e6b170fad2eb0f2d4eb227f1af - md5: d1461b2e63b1909f4f5b41c823bd90ae +- conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + sha256: 8203dc90a5cb6687f5bfcf332eeaf494ec95d24ed13fca3c82ef840f0bb92a5d + md5: 0064ab66736c4814864e808169dc7497 depends: - - cpython 3.13.11.* - - python_abi * *_cp313 + - cpython 3.14.2.* + - python_abi * *_cp314 license: Python-2.0 purls: [] - size: 48352 - timestamp: 1765019767640 + size: 49287 + timestamp: 1765020424843 - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda sha256: 4790787fe1f4e8da616edca4acf6a4f8ed4e7c6967aa31b920208fc8f95efcca md5: a61bf9ec79426938ff785eb69dbb1960 @@ -8050,17 +8677,17 @@ packages: - pkg:pypi/tzdata?source=compressed-mapping size: 143542 timestamp: 1765719982349 -- conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda +- conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda build_number: 8 - sha256: 210bffe7b121e651419cb196a2a63687b087497595c9be9d20ebe97dd06060a7 - md5: 94305520c52a4aa3f6c2b1ff6008d9f8 + sha256: ad6d2e9ac39751cc0529dd1566a26751a0bf2542adb0c232533d32e176e21db5 + md5: 0539938c55b6b1a59b560e843ad864a4 constrains: - - python 3.13.* *_cp313 + - python 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: [] - size: 7002 - timestamp: 1752805902938 + size: 6989 + timestamp: 1752805904792 - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda sha256: 8d2a8bf110cc1fc3df6904091dead158ba3e614d8402a83e51ed3a8aa93cdeb0 md5: bc8e3267d44011051f2eb14d22fb0960 @@ -8072,9 +8699,9 @@ packages: - pkg:pypi/pytz?source=hash-mapping size: 189015 timestamp: 1742920947249 -- conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - sha256: 87eaeb79b5961e0f216aa840bc35d5f0b9b123acffaecc4fda4de48891901f20 - md5: 1ce4f826332dca56c76a5b0cc89fb19e +- conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + sha256: 6918a8067f296f3c65d43e84558170c9e6c3f4dd735cfe041af41a7fdba7b171 + md5: 2d7b7ba21e8a8ced0eca553d4d53f773 depends: - python - vc >=14.3,<15 @@ -8083,19 +8710,19 @@ packages: - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: PSF-2.0 license_family: PSF purls: - pkg:pypi/pywin32?source=hash-mapping - size: 6695114 - timestamp: 1756487139550 -- conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - sha256: d34a7cd0a4a7dc79662cb6005e01d630245d9a942e359eb4d94b2fb464ed2552 - md5: 8f01ed27e2baa455e753301218e054fd - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 6713155 + timestamp: 1756487145487 +- conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + sha256: 048e20641da680aedaab285640a2aca56b7b5baf7a18f8f164f2796e13628c1f + md5: dd84e8748bd3c85a5c751b0576488080 + depends: + - python >=3.14.0rc3,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.2,<15 - vc14_runtime >=14.29.30139 @@ -8104,54 +8731,22 @@ packages: license_family: MIT purls: - pkg:pypi/pywinpty?source=hash-mapping - size: 216075 - timestamp: 1759556799508 -- conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda - sha256: 40dcd6718dce5fbee8aabdd0519f23d456d8feb2e15ac352eaa88bbfd3a881af - md5: 4794ea0adaebd9f844414e594b142cb2 - depends: - - __glibc >=2.17,<3.0.a0 - - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - yaml >=0.2.5,<0.3.0a0 - license: MIT - license_family: MIT - purls: - - pkg:pypi/pyyaml?source=hash-mapping - size: 207109 - timestamp: 1758892173548 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda - sha256: f5be0d84f72a567b7333b9efa74a65bfa44a25658cf107ffa3fc65d3ae6660d7 - md5: 0e8e3235217b4483a7461b63dca5826b - depends: - - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 - - yaml >=0.2.5,<0.3.0a0 - license: MIT - license_family: MIT - purls: - - pkg:pypi/pyyaml?source=hash-mapping - size: 191630 - timestamp: 1758892258120 -- conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda - sha256: 5d9fd32d318b9da615524589a372b33a6f3d07db2708de16570d70360bf638c2 - md5: c067122d76f8dcbe0848822942ba07be - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - ucrt >=10.0.20348.0 - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 - - yaml >=0.2.5,<0.3.0a0 + size: 216325 + timestamp: 1759557436167 +- conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + sha256: 828af2fd7bb66afc9ab1c564c2046be391aaf66c0215f05afaf6d7a9a270fe2a + md5: b12f41c0d7fb5ab81709fcc86579688f + depends: + - python >=3.10.* + - yaml + track_features: + - pyyaml_no_compile license: MIT license_family: MIT purls: - pkg:pypi/pyyaml?source=hash-mapping - size: 182043 - timestamp: 1758892011955 + size: 45223 + timestamp: 1758891992558 - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda noarch: python sha256: a00a41b66c12d9c60e66b391e9a4832b7e28743348cf4b48b410b91927cd7819 @@ -8347,56 +8942,56 @@ packages: - pkg:pypi/rich?source=hash-mapping size: 200840 timestamp: 1760026188268 -- conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - sha256: 076d26e51c62c8ecfca6eb19e3c1febdd7632df1990a7aa53da5df5e54482b1c - md5: 779e3307a0299518713765b83a36f4b1 +- conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + sha256: e53b0cbf3b324eaa03ca1fe1a688fdf4ab42cea9c25270b0a7307d8aaaa4f446 + md5: c1c368b5437b0d1a68f372ccf01cb133 depends: - python - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 constrains: - __glibc >=2.17 license: MIT license_family: MIT purls: - pkg:pypi/rpds-py?source=hash-mapping - size: 383230 - timestamp: 1764543223529 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - sha256: db63344f91e8bfe77703c6764aa9eeafb44d165e286053214722814eabda0264 - md5: 190c2d0d4e98ec97df48cdb74caf44d8 + size: 376121 + timestamp: 1764543122774 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + sha256: e161dd97403b8b8a083d047369a5cf854557dba1204d29e2f0250f5ac4403925 + md5: 76a4f88d1b7748c477abf3c341edc64c depends: - python - __osx >=11.0 - - python 3.13.* *_cp313 - - python_abi 3.13.* *_cp313 + - python 3.14.* *_cp314 + - python_abi 3.14.* *_cp314 constrains: - __osx >=11.0 license: MIT license_family: MIT purls: - pkg:pypi/rpds-py?source=hash-mapping - size: 358961 - timestamp: 1764543165314 -- conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - sha256: 27bd383787c0df7a0a926b11014fd692d60d557398dcf1d50c55aa2378507114 - md5: 58ae648b12cfa6df3923b5fd219931cb + size: 350976 + timestamp: 1764543169524 +- conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + sha256: e4435368c5c25076dc0f5918ba531c5a92caee8e0e2f9912ef6810049cf00db2 + md5: e86531e278ad304438e530953cd55d14 depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/rpds-py?source=hash-mapping - size: 243419 - timestamp: 1764543047271 -- conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda - sha256: a5ddc728be0589e770f59e45e3c6c670c56d96a801ddf76a304cc0af7bcef5c4 - md5: 0be9bd58abfb3e8f97260bd0176d5331 + size: 235780 + timestamp: 1764543046065 +- conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda + sha256: 652f9a235051c1d39ccd2fe7e9326792b046a1d93de42171977fa1ba9668a0e8 + md5: ee95e8bb52e35c3267a53d3ee1347cc4 depends: - __glibc >=2.17,<3.0.a0 - libblas >=3.9.0,<4.0a0 @@ -8409,17 +9004,17 @@ packages: - numpy <2.6 - numpy >=1.23,<3 - numpy >=1.25.2 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/scipy?source=compressed-mapping - size: 16785487 - timestamp: 1766108773270 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda - sha256: ee3cbddb7d598c78b592fafbfa3eaf8c89df353bbed56a1a9f32e9f7daa49bb4 - md5: a3324bd937a39cbbf1cbe0940160e19e + size: 16982488 + timestamp: 1766108668132 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda + sha256: 282f8b244f31d8c2e0ce401b0473e8090de4b59326018a360419693b629e6b87 + md5: 6333b784ddfcccd3f5569f812f66c352 depends: - __osx >=11.0 - libblas >=3.9.0,<4.0a0 @@ -8431,18 +9026,18 @@ packages: - numpy <2.6 - numpy >=1.23,<3 - numpy >=1.25.2 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/scipy?source=hash-mapping - size: 13929516 - timestamp: 1766109298759 -- conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda - sha256: 997a2202126425438a16de7ef1e5e924bd66feb43bda5b71326e281c7331489d - md5: a49556572438d5477f1eca06bb6d0770 + - pkg:pypi/scipy?source=compressed-mapping + size: 13880523 + timestamp: 1766109018710 +- conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda + sha256: 99d6198dc05171610073083c9d218d2a9adfa756659b391183d21cca55f888f1 + md5: b600c47282ee91e492b89f65708a5c9a depends: - libblas >=3.9.0,<4.0a0 - libcblas >=3.9.0,<4.0a0 @@ -8450,8 +9045,8 @@ packages: - numpy <2.6 - numpy >=1.23,<3 - numpy >=1.25.2 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -8459,8 +9054,8 @@ packages: license_family: BSD purls: - pkg:pypi/scipy?source=hash-mapping - size: 15066293 - timestamp: 1766109539389 + size: 15082636 + timestamp: 1766109482825 - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda sha256: 5893e203cb099c784bf5b08d29944b5402beebcc361d55e54b676e9b355c7844 md5: dcff6f8ea9e86a0bda978b88f89f2310 @@ -8513,40 +9108,40 @@ packages: - pkg:pypi/setuptools?source=hash-mapping size: 748788 timestamp: 1748804951958 -- conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda - sha256: cf44d6bd3dc3be6b683fac251d6b53d508d041506a2101fd7cdb404468cf8be3 - md5: 1cc1de04373b633177f4d367b8b75270 +- conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda + sha256: fde24560898ecbb63edb6580fbf09fa07e10f55a89f8ae35f891f712f1d07872 + md5: b2f9edf27e434edc6072e6f7c076015f depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/simplejson?source=hash-mapping - size: 133692 - timestamp: 1762506927030 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda - sha256: ef09659f0248066e8c06a0bd8bd1a360b8158cd2d73c65c969897e20344c6a2a - md5: 27a8bc65b5f0aecb87a01568e573e6ae + size: 135289 + timestamp: 1762507017143 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda + sha256: e6a6a2aab805c4c50464aecff3f752a78ce15bb1b9de006b1d929d0673f3a386 + md5: 82c463d19f1d85e60d520d129c67b483 depends: - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/simplejson?source=hash-mapping - size: 133717 - timestamp: 1762507593463 -- conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda - sha256: b1ea3625e7dcda6ea6121dc61461da9bc9be54a99aa20ed26a5ee5b43663b5c4 - md5: bcdc4785e018f4325845f8217333a17e - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 134426 + timestamp: 1762507464057 +- conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda + sha256: 28f67233b03f8f1ebdcd5b35d1700d75101be0e9decf4975b8dc867609d4a507 + md5: f5d14f3ecb62b185cb571b79034df477 + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -8554,8 +9149,8 @@ packages: license_family: MIT purls: - pkg:pypi/simplejson?source=hash-mapping - size: 132684 - timestamp: 1762507090611 + size: 134026 + timestamp: 1762507518751 - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda sha256: 458227f759d5e3fcec5d9b7acce54e10c9e1f4f4b7ec978f3bfd54ce4ee9853d md5: 3339e3b65d58accf4ca4fb8748ab16b3 @@ -8570,14 +9165,14 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.53.dev37+gcc710a63e.d20260108 - sha256: 167bd74677526ae18b099ed73f00727ec8b124bba6895ba9afe3862d04fbcd84 + version: 0.0.24.dev262+g7e7784e59.d20260130 + sha256: d7b8e677f24dc24e6a4c7b146f578efbc96c4494d1242caaf376ae8a7453f5f4 requires_dist: - dags - jax>=0.8 - numpy - pandas - requires_python: '>=3.13,<3.14' + requires_python: '>=3.14,<3.15' - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda sha256: 833326122c18887b338262c13365cb146b6702c79d72da74a1c6b8af4c50e162 md5: 421b7a950e384949ca1b0f04f0751ce0 @@ -8612,10 +9207,10 @@ packages: - pkg:pypi/soupsieve?source=compressed-mapping size: 37951 timestamp: 1766075884412 -- pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl name: sqlalchemy version: 2.0.45 - sha256: 672c45cae53ba88e0dad74b9027dddd09ef6f441e927786b05bec75d949fbb2e + sha256: 4748601c8ea959e37e03d13dcda4a44837afcd1b21338e637f7c935b8da06177 requires_dist: - importlib-metadata ; python_full_version < '3.8' - greenlet>=1 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' @@ -8650,10 +9245,10 @@ packages: - typing-extensions!=3.10.0.1 ; extra == 'aiosqlite' - sqlcipher3-binary ; extra == 'sqlcipher' requires_python: '>=3.7' -- pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl name: sqlalchemy version: 2.0.45 - sha256: 5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0 + sha256: 883c600c345123c033c2f6caca18def08f1f7f4c3ebeb591a63b6fceffc95cce requires_dist: - importlib-metadata ; python_full_version < '3.8' - greenlet>=1 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' @@ -8688,10 +9283,10 @@ packages: - typing-extensions!=3.10.0.1 ; extra == 'aiosqlite' - sqlcipher3-binary ; extra == 'sqlcipher' requires_python: '>=3.7' -- pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl name: sqlalchemy version: 2.0.45 - sha256: afbf47dc4de31fa38fd491f3705cac5307d21d4bb828a4f020ee59af412744ee + sha256: 5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0 requires_dist: - importlib-metadata ; python_full_version < '3.8' - greenlet>=1 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' @@ -8759,19 +9354,19 @@ packages: requires_dist: - pyreadline3 ; sys_platform == 'win32' requires_python: '>=3.8' -- conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda - sha256: c31cac57913a699745d124cdc016a63e31c5749f16f60b3202414d071fc50573 - md5: 17c38aaf14c640b85c4617ccb59c1146 +- conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda + sha256: abd9a489f059fba85c8ffa1abdaa4d515d6de6a3325238b8e81203b913cf65a9 + md5: 0f9817ffbe25f9e69ceba5ea70c52606 depends: - - libhwloc >=2.12.1,<2.12.2.0a0 + - libhwloc >=2.12.2,<2.12.3.0a0 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 license: Apache-2.0 license_family: APACHE purls: [] - size: 155714 - timestamp: 1762510341121 + size: 155869 + timestamp: 1767886839029 - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda sha256: b375e8df0d5710717c31e7c8e93c025c37fa3504aea325c7a55509f64e5d4340 md5: e43ca10d61e55d0a8ec5d8c62474ec9e @@ -8802,9 +9397,9 @@ packages: - pkg:pypi/terminado?source=hash-mapping size: 24749 timestamp: 1766513766867 -- conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda - sha256: 50ea42e243d349b8218168c06bfd408f4dcda68d4364de1f5866507e009e3cfd - md5: ca39d364b4f1b395bb6a70312d455c28 +- conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda + sha256: b601d7f7d200465547ed76fd6b95701d94b0bbf0ab1d9dae4beb2f7012947cdd + md5: 13e92b552eb58a0c243a967a7d9e4d78 depends: - pygments >=2.19.2,<3.0.0 - typing_extensions >=4.4.0,<5.0.0 @@ -8821,8 +9416,8 @@ packages: license_family: MIT purls: - pkg:pypi/textual?source=hash-mapping - size: 526014 - timestamp: 1767448924135 + size: 525875 + timestamp: 1767859034631 - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda sha256: 7c803480dbfb8b536b9bf6287fa2aa0a4f970f8c09075694174eb4550a4524cd md5: c0d0b883e97906f7524e2aac94be0e0d @@ -8885,40 +9480,40 @@ packages: - pkg:pypi/tomli?source=compressed-mapping size: 20973 timestamp: 1760014679845 -- conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda - sha256: 6006d4e5a6ff99be052c939e43adee844a38f2dc148f44a7c11aa0011fd3d811 - md5: 82da2dcf1ea3e298f2557b50459809e0 +- conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda + sha256: b8f9f9ae508d79c9c697eb01b6a8d2ed4bc1899370f44aa6497c8abbd15988ea + md5: e35f08043f54d26a1be93fdbf90d30c3 depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: Apache-2.0 license_family: Apache purls: - pkg:pypi/tornado?source=hash-mapping - size: 878109 - timestamp: 1765458900582 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda - sha256: a8130a361b7bc21190836ba8889276cc263fcb09f52bf22efcaed1de98179948 - md5: 67a85c1b5c17124eaf9194206afd5159 + size: 905436 + timestamp: 1765458949518 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda + sha256: affbc6300e1baef5848f6e69569733a3e7a118aa642487c853f53d6f2bd23b89 + md5: 83e1a2d7b0c1352870bbe9d9406135cf depends: - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: Apache-2.0 license_family: Apache purls: - pkg:pypi/tornado?source=hash-mapping - size: 877647 - timestamp: 1765836696426 -- conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda - sha256: 81b131db1bebed88f11a5f9891c0c0a7c6998dfd96cd96f54839f3a0cbebd5a0 - md5: 1402782887fafaa117a8d76d2cfa4761 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 909298 + timestamp: 1765836779269 +- conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda + sha256: 40fde32a4992ab0f875618f97d9aadf263d39c6c92ace7572c6b0a71c655abe1 + md5: 00157f40fd3ea957a2616e9ffda6b84f + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -8926,8 +9521,8 @@ packages: license_family: Apache purls: - pkg:pypi/tornado?source=hash-mapping - size: 880049 - timestamp: 1765836649731 + size: 908399 + timestamp: 1765836848636 - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda sha256: f39a5620c6e8e9e98357507262a7869de2ae8cc07da8b7f84e517c9fd6c2b959 md5: 019a7385be9af33791c989871317e1ed @@ -8939,31 +9534,26 @@ packages: - pkg:pypi/traitlets?source=hash-mapping size: 110051 timestamp: 1733367480074 -- pypi: https://files.pythonhosted.org/packages/42/36/82e66b9753a76964d26fd9bc3514ea0abce0a5ba5ad7d5f084070c6981da/ty-0.0.10-py3-none-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/74/18/8dd4fe6df1fd66f3e83b4798eddb1d8482d9d9b105f25099b76703402ebb/ty-0.0.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl name: ty - version: 0.0.10 - sha256: 16deb77a72cf93b89b4d29577829613eda535fbe030513dfd9fba70fe38bc9f5 + version: 0.0.11 + sha256: 25f88e8789072830348cb59b761d5ced70642ed5600673b4bf6a849af71eca8b requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/9e/4c/2f9ac5edbd0e67bf82f5cd04275c4e87cbbf69a78f43e5dcf90c1573d44e/ty-0.0.10-py3-none-manylinux_2_24_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/ad/01/3a563dba8b1255e474c35e1c3810b7589e81ae8c41df401b6a37c8e2cde9/ty-0.0.11-py3-none-macosx_11_0_arm64.whl name: ty - version: 0.0.10 - sha256: e206a23bd887574302138b33383ae1edfcc39d33a06a12a5a00803b3f0287a45 + version: 0.0.11 + sha256: 121987c906e02264c3b511b95cb9f8a3cdd66f3283b8bbab678ca3525652e304 requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/e8/cd/9dd49e6d40e54d4b7d563f9e2a432c4ec002c0673a81266e269c4bc194ce/ty-0.0.10-py3-none-macosx_11_0_arm64.whl +- pypi: https://files.pythonhosted.org/packages/df/04/5a5dfd0aec0ea99ead1e824ee6e347fb623c464da7886aa1e3660fb0f36c/ty-0.0.11-py3-none-win_amd64.whl name: ty - version: 0.0.10 - sha256: e4832f8879cb95fc725f7e7fcab4f22be0cf2550f3a50641d5f4409ee04176d4 + version: 0.0.11 + sha256: 1bb205db92715d4a13343bfd5b0c59ce8c0ca0daa34fb220ec9120fc66ccbda7 requires_python: '>=3.8' - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl name: types-pytz version: 2025.2.0.20251108 sha256: 0f1c9792cab4eb0e46c52f8845c8f77cf1e313cb3d68bf826aa867fe4717d91c requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - name: types-pyyaml - version: 6.0.12.20250915 - sha256: e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6 - requires_python: '>=3.9' - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda sha256: 7c2df5721c742c2a47b2c8f960e718c930031663ac1174da67c1ed5999f7938c md5: edd329d7d3a4ab45dcf905899a7a6115 @@ -9014,54 +9604,49 @@ packages: purls: [] size: 694692 timestamp: 1756385147981 -- conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda - sha256: bd1f3d159b204be5aeeb3dd165fad447d3a1c5df75fec64407a68f210a0cb722 - md5: 1fa8d662361896873a165b051322073e +- conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda + sha256: d1dafc15fc5d2b1dd5b0a525e8a815028de20dd53b2c775a1b56e8e4839fb736 + md5: 58e2ee530005067c5db23f33c6ab43d2 depends: - __glibc >=2.17,<3.0.a0 - - cffi - libgcc >=14 - - libstdcxx >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - license: MIT - license_family: MIT + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 + license: Apache-2.0 + license_family: Apache purls: - - pkg:pypi/ukkonen?source=hash-mapping - size: 14648 - timestamp: 1761594865380 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda - sha256: 66596db68cd50d61af97b01de4fd6ba5b08c4f5c779c331888196253b4daf353 - md5: 8e87b6fff522cabf8c02878c24d44312 + - pkg:pypi/unicodedata2?source=hash-mapping + size: 409745 + timestamp: 1763055060898 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda + sha256: 48c51dd2ef696f7a1a3635716585a8e383a8c00e719305cfda2b480c36ee1283 + md5: c673decfe1f120b0717d0aa193b10060 depends: - __osx >=11.0 - - cffi - - libcxx >=19 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 - license: MIT - license_family: MIT + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 + license: Apache-2.0 + license_family: Apache purls: - - pkg:pypi/ukkonen?source=hash-mapping - size: 14535 - timestamp: 1761595088230 -- conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda - sha256: f42cd55bd21746274d7074b93b53fb420b4ae0f8f1b6161cb2cc5004c20c7ec7 - md5: 77444fe3f3004fe52c5ee70626d11d66 + - pkg:pypi/unicodedata2?source=hash-mapping + size: 416770 + timestamp: 1763055099322 +- conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda + sha256: 47e061aec1487519c398e1c999ac3680f068f9e1d8574c8b365eac4787773250 + md5: 1f90bb13fa5ced89ca4dcc0af3bbebf3 depends: - - cffi - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - license: MIT - license_family: MIT + license: Apache-2.0 + license_family: Apache purls: - - pkg:pypi/ukkonen?source=hash-mapping - size: 18266 - timestamp: 1761595426854 + - pkg:pypi/unicodedata2?source=hash-mapping + size: 405783 + timestamp: 1763054877424 - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda sha256: e0eb6c8daf892b3056f08416a96d68b0a358b7c46b99c8a50481b22631a4dfc0 md5: e7cb0f5745e4c5035a460248334af7eb @@ -9125,21 +9710,6 @@ packages: purls: [] size: 115235 timestamp: 1767320173250 -- conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - sha256: 77193c99c6626c58446168d3700f9643d8c0dab1f6deb6b9dd039e6872781bfb - md5: cfccfd4e8d9de82ed75c8e2c91cab375 - depends: - - distlib >=0.3.7,<1 - - filelock >=3.12.2,<4 - - platformdirs >=3.9.1,<5 - - python >=3.10 - - typing_extensions >=4.13.2 - license: MIT - license_family: MIT - purls: - - pkg:pypi/virtualenv?source=hash-mapping - size: 4401341 - timestamp: 1761726489722 - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda sha256: e311b64e46c6739e2a35ab8582c20fa30eb608da130625ed379f4467219d4813 md5: 7e1e5ff31239f9cd5855714df8a3783d diff --git a/pyproject.toml b/pyproject.toml index 0b00efd9..6144dd45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,89 +3,82 @@ # ====================================================================================== [project] -name = "skillmodels" authors = [ - { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, + { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, ] -maintainers = [ - { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, - { name = "Hans-Martin von Gaudecker", email = "hmgaudecker@uni-bonn.de" }, +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Topic :: Scientific/Engineering", +] +dependencies = [ + "dags", + "jax>=0.8", + "numpy", + "pandas", ] description = "Estimators for skill formation models" -dynamic = ["version"] +dynamic = [ "version" ] keywords = [ - "Skill formation", - "Econometrics", - "Economics", - "Estimation", - "Statistics", -] -classifiers = [ - "Development Status :: 4 - Beta", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: MIT License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Operating System :: Unix", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Topic :: Scientific/Engineering", + "Skill formation", + "Econometrics", + "Economics", + "Estimation", + "Statistics", ] -requires-python = ">=3.13,<3.14" -dependencies = [ - "dags", - "jax>=0.8", - "numpy", - "pandas", +license = { text = "MIT" } +maintainers = [ + { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, + { name = "Hans-Martin von Gaudecker", email = "hmgaudecker@uni-bonn.de" }, ] - -[project.readme] -file = "README.md" -content-type = "text/markdown" - -[project.license] -text = "MIT" +name = "skillmodels" +readme = { file = "README.md", content-type = "text/markdown" } +requires-python = ">=3.14,<3.15" [project.urls] -Repository = "https://github.com/OpenSourceEconomics/skillmodels" Github = "https://github.com/OpenSourceEconomics/skillmodels" +Repository = "https://github.com/OpenSourceEconomics/skillmodels" Tracker = "https://github.com/OpenSourceEconomics/skillmodels/issues" - # ====================================================================================== # Build system configuration # ====================================================================================== [build-system] -requires = ["hatchling", "hatch-vcs"] build-backend = "hatchling.build" +requires = [ "hatchling", "hatch-vcs" ] [tool.hatch.build.hooks.vcs] version-file = "src/skillmodels/_version.py" [tool.hatch.build.targets.sdist] -exclude = ["tests"] +exclude = [ "tests" ] only-packages = true [tool.hatch.build.targets.wheel] -only-include = ["src"] -sources = ["src"] - -[tool.hatch.version] -source = "vcs" +only-include = [ "src" ] +sources = [ "src" ] [tool.hatch.metadata] allow-direct-references = true +[tool.hatch.version] +source = "vcs" # ====================================================================================== -# Pixi +# Pixi configuration # ====================================================================================== [tool.pixi.workspace] -channels = ["conda-forge"] -platforms = ["linux-64", "osx-arm64", "win-64"] +channels = [ "conda-forge" ] +platforms = [ "linux-64", "osx-arm64", "win-64" ] # Development Dependencies (conda) # -------------------------------------------------------------------------------------- @@ -99,28 +92,29 @@ networkx = "*" numpy = ">=2.3,<2.4" # optimagic = "*" plotly = ">=6.2" -pre-commit = "*" +prek = "*" pybaum = "*" +python = "~=3.14.0" python-kaleido = ">=1.0" scipy = "*" [tool.pixi.pypi-dependencies] -skillmodels = {path = ".", editable = true} -optimagic = { git = "https://github.com/optimagic-dev/optimagic.git", branch = "main"} +optimagic = { git = "https://github.com/optimagic-dev/optimagic.git", branch = "main" } pdbp = "*" +skillmodels = { path = ".", editable = true } # Features and Tasks # -------------------------------------------------------------------------------------- [tool.pixi.feature.cuda] -platforms = ["linux-64"] -system-requirements = {cuda = "12"} +platforms = [ "linux-64" ] +system-requirements = { cuda = "12" } [tool.pixi.feature.cuda.dependencies] cuda-nvcc = ">=12" [tool.pixi.feature.cuda.pypi-dependencies] -jax = {version = ">=0.7", extras = ["cuda12"]} +jax = { version = ">=0.7", extras = [ "cuda12" ] } [tool.pixi.feature.test.dependencies] pytest = "*" @@ -131,7 +125,6 @@ snakeviz = "*" [tool.pixi.feature.test.target.unix.dependencies] pytest-memray = "*" - [tool.pixi.feature.test.tasks] tests = "pytest tests" tests-with-cov = "pytest tests --cov-report=xml --cov=./" @@ -142,161 +135,97 @@ mem-on-clean-repo = "git status --porcelain && git diff-index --quiet HEAD -- && mem-cuda = "pytest -x -s --pdb --memray --fail-on-increase tests/test_likelihood_regression.py::test_likelihood_contributions_large_nobs" [tool.pixi.feature.ty.pypi-dependencies] -matplotlib = "*" # required because of pandas +matplotlib = "*" # required because of pandas pandas-stubs = "*" ty = "*" -types-PyYAML = "*" types-pytz = "*" [tool.pixi.feature.ty.tasks] -ty = "ty check" +ty = "ty check src tests docs" + +[tool.pixi.feature.docs.dependencies] +jupyter-book = ">=2.0" + +[tool.pixi.feature.docs.tasks] +docs = { cmd = "jupyter book build --html", cwd = "docs" } +view-docs = { cmd = "jupyter book start", cwd = "docs" } # Environments # -------------------------------------------------------------------------------------- [tool.pixi.environments] -cuda = {features = ["cuda"], solve-group = "cuda"} -test-cpu = {features = ["test"], solve-group = "default"} -test-gpu = {features = ["test", "cuda"], solve-group = "cuda"} -ty = {features = ["test", "ty"], solve-group = "default"} +cuda = { features = [ "cuda" ], solve-group = "cuda" } +docs = { features = [ "docs" ], solve-group = "default" } +test-cpu = { features = [ "test" ], solve-group = "default" } +test-gpu = { features = [ "test", "cuda" ], solve-group = "cuda" } +ty = { features = [ "test", "ty" ], solve-group = "default" } # ====================================================================================== # Ruff configuration # ====================================================================================== [tool.ruff] -target-version = "py313" fix = true line-length = 88 - +target-version = "py314" +unsafe-fixes = false [tool.ruff.lint] -select = ["ALL"] extend-ignore = [ - # missing type annotation - "ANN001", - - # missing type annotation for `*args` - "ANN002", - - # missing type annotation for `**kwargs` - "ANN003", - - # missing return type annotation for public function - "ANN201", - - # missing return type annotation for private function - "ANN202", - - # No explicit `stacklevel` keyword argument found - "B028", - - # In conflict with formatter - "COM812", - - # Missing docstring in public module - "D100", - - # missing docstring in public function - "D103", - - # missing docstring in public package - "D104", - - # exception must not use a string literal - "EM101", - - # exception must not use an f-string literal - "EM102", - - # Boolean-typed positional arguments. - "FBT001", - - # Boolean default positional argument in function definition - "FBT002", - - # line contains a todo - "FIX002", - - # In conflict with formatter - "ISC001", - - # Leave Numpy's legacy RNG - "NPY002", - - # array.at is perfectly valid Jax, but linter thinks it's Pandas... - "PD008", - - # pd.merge is fine - "PD015", - - # Many suggestions to use list comprehension are not helpful - "PERF401", - - # Magic values are fine - "PLR2004", - - # Too many arguments to function call - "PLR0913", - - # Assignment before return statement is fine. - "RET504", - - # use of `assert` detected - "S101", - - # `pickle` module is unsafe - "S301", - - # Private member accessed: `_stochastic_info` - "SLF001", - - # long messages outside the exception class - "TRY003", + "ANN401", # Dynamically typed expressions (typing.Any) are disallowed - too strict + "COM812", # Conflicts with ruff-format + "EM101", # Exception must not use a string literal + "EM102", # Exception must not use an f-string literal + "FBT002", # Boolean default positional argument in function definition + "FIX002", # Line contains TODO + "ISC001", # Conflicts with ruff-format + "PD015", # pd.merge is fine + "PERF401", # Many suggestions to use list comprehension are not helpful + "PLR0913", # Too many arguments to function call + "PLR2004", # Magic values are fine + "S301", # `pickle` module is unsafe + "TC001", # Move application import into a type-checking block + "TC002", # Move third-party import into a type-checking block + "TC003", # Move standard library import into a type-checking block + "TRY003", # Long messages outside exception class ] +select = [ "ALL" ] [tool.ruff.lint.per-file-ignores] -"src/skillmodels/constraints.py" = ["D417"] -"src/skillmodels/decorators.py" = ["D417"] -"src/skillmodels/kalman_filters.py" = ["D417"] -"src/skillmodels/likelihood_function.py" = ["D417"] -"src/skillmodels/likelihood_function_debug.py" = ["D417"] -"src/skillmodels/params_index.py" = ["D417"] -"src/skillmodels/parse_params.py" = ["D417"] -"src/skillmodels/process_data.py" = ["D417"] -"src/skillmodels/simulate_data.py" = ["D417"] -"src/skillmodels/visualize_*.py" = ["BLE001", "D417"] -"src/skillmodels/*_heatmap*.py" = ["D417"] -"**/*.ipynb" = ["B018", "T201", "E402", "PLR2004", "INP001", "PTH100"] -"docs/**/*" = ["A001", "ERA001", "INP001", "PTH100", "PTH123", "S506"] +"src/skillmodels/types.py" = [ "TC" ] # Dataclasses need types at runtime +"src/skillmodels/visualize_*.py" = [ "BLE001" ] +"**/*.ipynb" = [ + "B018", # Seemingly useless expression for printing. + "INP001", # No need for a namespace. + "T201", # Printing is fine here. +] "tests/*" = [ - "ARG001", - "E712", - "FBT003", - "INP001", - "PD002", - "PT011", - "NPY002", - "PTH123", - "S506" + "ANN", # No type annotations needed for tests + "ARG001", # Unused arguments are common in fixture-heavy tests + "D100", # No module docstrings needed for tests + "D103", # No function docstrings needed for tests + "E712", # Comparison to True/False using == might be necessary for arrays. + "FBT003", # Boolean positional values are common in test setup + "INP001", # No need for a namespace. + "PT011", # Broad pytest.raises() blocks are okay + "S101", # use of `assert` detected ] [tool.ruff.lint.pydocstyle] convention = "google" - # ====================================================================================== # ty configuration # ====================================================================================== [tool.ty.rules] -invalid-return-type = "error" ambiguous-protocol-member = "error" deprecated = "error" division-by-zero = "error" ignore-comment-unknown-rule = "error" invalid-argument-type = "error" invalid-ignore-comment = "error" +invalid-return-type = "error" possibly-missing-attribute = "error" possibly-missing-implicit-call = "error" possibly-missing-import = "error" @@ -308,16 +237,20 @@ unsupported-base = "error" unused-ignore-comment = "error" useless-overload-body = "error" +# ====================================================================================== +# pytest configuration +# ====================================================================================== [tool.pytest.ini_options] +addopts = [ "--pdbcls=pdbp:Pdb" ] +filterwarnings = [ ] markers = [ - "wip: Tests that are work-in-progress.", - "unit: Flag for unit tests which target mainly a single function.", - "integration: Flag for integration tests which may comprise of multiple unit tests.", - "end_to_end: Flag for tests that cover the whole program.", + "end_to_end: Flag for tests that cover the whole program.", + "integration: Flag for integration tests which may comprise of multiple unit tests.", + "unit: Flag for unit tests which target mainly a single function.", + "wip: Tests that are work-in-progress.", ] -norecursedirs = ["docs", ".envs"] - +norecursedirs = [ "docs", ".envs" ] # ====================================================================================== # yamlfix configuration @@ -325,5 +258,5 @@ norecursedirs = ["docs", ".envs"] [tool.yamlfix] line_length = 88 -sequence_style = "block_style" none_representation = "null" +sequence_style = "block_style" diff --git a/src/skillmodels/__init__.py b/src/skillmodels/__init__.py index e55229ad..0b15d218 100644 --- a/src/skillmodels/__init__.py +++ b/src/skillmodels/__init__.py @@ -1,12 +1,28 @@ +"""Skillmodels: A Python package for estimating latent factor models.""" + import contextlib -try: +with contextlib.suppress(ImportError): import pdbp # noqa: F401 -except ImportError: - contextlib.suppress(Exception) from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.model_spec import ( + AnchoringSpec, + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) from skillmodels.simulate_data import simulate_dataset -__all__ = ["get_filtered_states", "get_maximization_inputs", "simulate_dataset"] +__all__ = [ + "AnchoringSpec", + "EstimationOptionsSpec", + "FactorSpec", + "ModelSpec", + "Normalizations", + "get_filtered_states", + "get_maximization_inputs", + "simulate_dataset", +] diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index 41dc1e4c..18f85af7 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -1,61 +1,83 @@ +"""Functions to validate model specifications.""" + +from collections.abc import Mapping + import numpy as np +from skillmodels.model_spec import ModelSpec +from skillmodels.types import Anchoring, Dimensions, Labels -def check_model(model_dict, labels, dimensions, anchoring, has_endogenous_factors): + +def check_model( + model_spec: ModelSpec, + labels: Labels, + dimensions: Dimensions, + anchoring: Anchoring, + *, + has_endogenous_factors: bool, +) -> None: """Check consistency and validity of the model specification. labels, dimensions and anchoring information are done before the model checking because processing them will not raise any errors except for easy to understand KeyErrors. - Other specifications are checked in the model dict before processing to make sure + Other specifications are checked in the model spec before processing to make sure that the assumptions we make during the processing are fulfilled. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring (dict): Dictionary with information about anchoring. - See :ref:`anchoring` - has_endogenous_factors (bool): Whether the model has any endogenous factors + model_spec: The model specification. See: :ref:`model_specs` + dimensions: Dimensional information. + labels: Labels for model quantities. + anchoring: Information about anchoring. + has_endogenous_factors: Whether the model has any endogenous factors Raises: ValueError """ report = check_stagemap( - stagemap=labels["aug_stagemap"], - stages=labels["aug_stages"], - n_periods=dimensions["n_aug_periods"], + stagemap=labels.aug_stagemap, + stages=labels.aug_stages, + n_periods=dimensions.n_aug_periods, is_augmented=has_endogenous_factors, ) report += _check_anchoring(anchoring) - invalid_measurements = _check_measurements(model_dict, labels["latent_factors"]) + invalid_measurements = _check_measurements( + model_spec=model_spec, factors=labels.latent_factors + ) if invalid_measurements: report += invalid_measurements elif has_endogenous_factors: # Make this conditional because the check only works for valid meas. report += _check_no_overlap_in_measurements_of_states_and_inv( - model_dict, labels + model_spec=model_spec, labels=labels ) - report += _check_normalizations(model_dict, labels["latent_factors"]) + report += _check_normalizations( + model_spec=model_spec, factors=labels.latent_factors + ) report = "\n".join(report) if report != "": raise ValueError(f"Invalid model specification: {report}") -def check_stagemap(stagemap, stages, n_periods, is_augmented): - report = [] +def check_stagemap( + stagemap: tuple[int, ...], + stages: tuple[int, ...], + n_periods: int, + *, + is_augmented: bool, +) -> list[str]: + """Validate the stagemap configuration against model dimensions.""" + report: list[str] = [] step_size = 2 if is_augmented else 1 if len(stagemap) != n_periods - step_size: report.append( f"The stagemap needs to be of length n_periods - {step_size}. " f" n_periods is {n_periods}, the stagemap has length {len(stagemap)}.", ) - if stages != list(range(len(stages))): + if stages != tuple(range(len(stages))): report.append("Stages need to be integers, start at zero and increase by 1.") # Hijacking the stagemap for endogenous factors leads to interleaved elements. @@ -68,34 +90,39 @@ def check_stagemap(stagemap, stages, n_periods, is_augmented): return report -def _check_anchoring(anchoring): +def _check_anchoring(anchoring: Anchoring) -> list[str]: report = [] - if not isinstance(anchoring["anchoring"], bool): - report.append("anchoring['anchoring'] must be a bool.") - if not isinstance(anchoring["outcomes"], dict): - report.append("anchoring['outcomes'] must be a dict") + if not isinstance(anchoring.anchoring, bool): + report.append("anchoring.anchoring must be a bool.") + + if not isinstance(anchoring.outcomes, Mapping): + report.append("anchoring.outcomes must be a Mapping") else: - variables = list(anchoring["outcomes"].values()) + variables = list(anchoring.outcomes.values()) for var in variables: if not isinstance(var, str | int | tuple): report.append("Outcomes variables have to be valid variable names.") - if not isinstance(anchoring["free_controls"], bool): - report.append("anchoring['use_controls'] must be a bool") - if not isinstance(anchoring["free_constant"], bool): - report.append("anchoring['use_constant'] must be a bool.") - if not isinstance(anchoring["free_loadings"], bool): - report.append("anchoring['free_loadings'] must be a bool.") + if not isinstance(anchoring.free_controls, bool): + report.append("anchoring.free_controls must be a bool") + if not isinstance(anchoring.free_constant, bool): + report.append("anchoring.free_constant must be a bool.") + if not isinstance(anchoring.free_loadings, bool): + report.append("anchoring.free_loadings must be a bool.") return report -def _check_measurements(model_dict, factors): - report = [] +def _check_measurements( + model_spec: ModelSpec, + factors: tuple[str, ...], +) -> list[str]: + report: list[str] = [] for factor in factors: - candidate = model_dict["factors"][factor]["measurements"] - if not _is_list_of(candidate, list): + candidate = model_spec.factors[factor].measurements + if not _is_sequence_of(candidate=candidate, type_=tuple): report.append( - f"measurements must be lists of lists. Check measurements of {factor}.", + "measurements must be tuples of tuples. " + f"Check measurements of {factor}.", ) else: for period, meas_list in enumerate(candidate): @@ -108,16 +135,18 @@ def _check_measurements(model_dict, factors): return report -def _check_no_overlap_in_measurements_of_states_and_inv(model_dict, labels): +def _check_no_overlap_in_measurements_of_states_and_inv( + model_spec: ModelSpec, labels: Labels +) -> list[str]: report = [] - for period in labels["periods"]: - meas = {} - for factor in labels["latent_factors"]: - props = model_dict["factors"][factor] - if props.get("is_endogenous", False): - meas["endogenous_factors"] = set(props["measurements"][period]) + for period in labels.periods: + meas: dict[str, set] = {} + for factor in labels.latent_factors: + fspec = model_spec.factors[factor] + if fspec.is_endogenous: + meas["endogenous_factors"] = set(fspec.measurements[period]) else: - meas["states"] = set(props["measurements"][period]) + meas["states"] = set(fspec.measurements[period]) if overlap := meas["states"].intersection(meas["endogenous_factors"]): report.append( "Measurements for exogenous and endogenous latent factors must not " @@ -126,37 +155,47 @@ def _check_no_overlap_in_measurements_of_states_and_inv(model_dict, labels): return report -def _check_normalizations(model_dict, factors): - report = [] +def _check_normalizations( + model_spec: ModelSpec, + factors: tuple[str, ...], +) -> list[str]: + report: list[str] = [] for factor in factors: - norminfo = model_dict["factors"][factor].get("normalizations", {}) + fspec = model_spec.factors[factor] + if fspec.normalizations is None: + continue for norm_type in ["loadings", "intercepts"]: - candidate = norminfo.get(norm_type, []) - if not _is_list_of(candidate, dict): + norms = getattr(fspec.normalizations, norm_type) + candidate = [dict(m) for m in norms] + if not _is_sequence_of(candidate=candidate, type_=dict): report.append( - f"normalizations must be lists of dicts. Check {norm_type} " + f"normalizations must be sequences of dicts. Check {norm_type} " f"normalizations for {factor}.", ) else: report += _check_normalized_variables_are_present( - candidate, - model_dict, - factor, + list_of_normdicts=candidate, + model_spec=model_spec, + factor=factor, ) if norm_type == "loadings": report += _check_loadings_are_not_normalized_to_zero( - candidate, - factor, + list_of_normdicts=candidate, + factor=factor, ) return report -def _check_normalized_variables_are_present(list_of_normdicts, model_dict, factor): - report = [] +def _check_normalized_variables_are_present( + list_of_normdicts: list[dict], + model_spec: ModelSpec, + factor: str, +) -> list[str]: + report: list[str] = [] for period, norm_dict in enumerate(list_of_normdicts): for var in norm_dict: - if var not in model_dict["factors"][factor]["measurements"][period]: + if var not in model_spec.factors[factor].measurements[period]: report.append( "You can only normalize variables that are specified as " f"measurements. Check {var} for {factor} in period " @@ -166,8 +205,11 @@ def _check_normalized_variables_are_present(list_of_normdicts, model_dict, facto return report -def _check_loadings_are_not_normalized_to_zero(list_of_normdicts, factor): - report = [] +def _check_loadings_are_not_normalized_to_zero( + list_of_normdicts: list[dict], + factor: str, +) -> list[str]: + report: list[str] = [] for period, norm_dict in enumerate(list_of_normdicts): for var, val in norm_dict.items(): if val == 0: @@ -178,19 +220,22 @@ def _check_loadings_are_not_normalized_to_zero(list_of_normdicts, factor): return report -def _is_list_of(candidate, type_): - """Check if candidate is a list that only contains elements of type. +def _is_sequence_of(candidate: object, type_: type) -> bool: + """Check if candidate is a sequence that only contains elements of type. - Note that this is always falls if candidate is not a list and always true if - it is an empty list. + Works with both lists and tuples. Examples: - >>> _is_list_of([["a"], ["b"]], list) + >>> _is_sequence_of([["a"], ["b"]], list) + True + >>> _is_sequence_of((("a",), ("b",)), tuple) True - >>> _is_list_of([{}], list) + >>> _is_sequence_of([{}], list) False - >>> _is_list_of([], dict) + >>> _is_sequence_of([], dict) True """ - return isinstance(candidate, list) and all(isinstance(i, type_) for i in candidate) + return isinstance(candidate, list | tuple) and all( + isinstance(i, type_) for i in candidate + ) diff --git a/src/skillmodels/clipping.py b/src/skillmodels/clipping.py index 6c327997..ce8dd000 100644 --- a/src/skillmodels/clipping.py +++ b/src/skillmodels/clipping.py @@ -1,8 +1,17 @@ +"""Soft clipping utilities for constraining values to bounded ranges.""" + import jax import jax.numpy as jnp +from jax import Array -def soft_clipping(arr, lower=None, upper=None, lower_hardness=1, upper_hardness=1): +def soft_clipping( + arr: Array, + lower: float | None = None, + upper: float | None = None, + lower_hardness: float = 1, + upper_hardness: float = 1, +) -> Array: """Clip values in an array elementwise using a soft maximum to avoid kinks. Clipping from below is taking a maximum between two values. Clipping @@ -19,14 +28,13 @@ def soft_clipping(arr, lower=None, upper=None, lower_hardness=1, upper_hardness= ``scipy.special.logsumexp``. ``scipy.special.softmax`` is the gradient of ``scipy.special.logsumexp``. - Args: - arr (jax.numpy.array): Array that is clipped elementwise. - lower (float): The value at which the array is clipped from below. - upper (float): The value at which the array is clipped from above. - lower_hardness (float): Scaling factor that is applied inside the soft maximum. + arr: Array that is clipped elementwise. + lower: The value at which the array is clipped from below. + upper: The value at which the array is clipped from above. + lower_hardness: Scaling factor that is applied inside the soft maximum. High values imply a closer approximation of the real maximum. - upper_hardness (float): Scaling factor that is applied inside the soft maximum. + upper_hardness: Scaling factor that is applied inside the soft maximum. High values imply a closer approximation of the real maximum. """ diff --git a/src/skillmodels/config.py b/src/skillmodels/config.py index ddc66a44..cd7eb32b 100644 --- a/src/skillmodels/config.py +++ b/src/skillmodels/config.py @@ -1,3 +1,8 @@ +"""Configuration constants and paths for skillmodels.""" + from pathlib import Path -TEST_DIR = Path(__file__).resolve().parent / "tests" +TEST_DATA_DIR = Path(__file__).resolve().parent / "test_data" +REGRESSION_VAULT = ( + Path(__file__).resolve().parent.parent.parent / "tests" / "regression_vault" +) diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index 2fb1c704..bbb60283 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -10,31 +10,38 @@ import pandas as pd import skillmodels.transition_functions as t_f_module +from skillmodels.types import ( + Anchoring, + Dimensions, + EndogenousFactorsInfo, + Labels, + MeasurementType, +) def get_constraints_dicts( - dimensions, - labels, - anchoring_info, - update_info, - normalizations, - endogenous_factors_info, + dimensions: Dimensions, + labels: Labels, + anchoring_info: Anchoring, + update_info: pd.DataFrame, + normalizations: dict[str, dict[str, list]], + endogenous_factors_info: EndogenousFactorsInfo, ) -> list[dict]: """Generate constraints implied by the model specification. The result can easily be converted to optimagic-style constraints. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring (dict): Information about anchoring. See :ref:`anchoring` - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + anchoring_info: Information about anchoring. See :ref:`anchoring` + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - normalizations (dict): Nested dictionary with information on normalized factor + normalizations: Nested dictionary with information on normalized factor loadings and intercepts for each factor. See :ref:`normalizations`. + endogenous_factors_info: Information about endogenous factors in the model. Returns: A list of constraints dictionaries with entries: @@ -49,26 +56,26 @@ def get_constraints_dicts( constraints_dicts = [] constraints_dicts += _get_normalization_constraints( - normalizations, labels["latent_factors"] + normalizations=normalizations, factors=labels.latent_factors ) - constraints_dicts += _get_mixture_weights_constraints(dimensions["n_mixtures"]) + constraints_dicts += _get_mixture_weights_constraints(dimensions.n_mixtures) constraints_dicts += _get_stage_constraints( - stagemap=labels["aug_stagemap"], - stages=labels["aug_stages"], + stagemap=labels.aug_stagemap, + stages=labels.aug_stages, ) constraints_dicts += _get_constant_factors_constraints(labels=labels) constraints_dicts += _get_initial_states_constraints( - n_mixtures=dimensions["n_mixtures"], - factors=labels["latent_factors"], + n_mixtures=dimensions.n_mixtures, + factors=labels.latent_factors, ) constraints_dicts += _get_transition_constraints(labels=labels) constraints_dicts += _get_anchoring_constraints( update_info=update_info, - controls=labels["controls"], + controls=labels.controls, anchoring_info=anchoring_info, - periods=labels["aug_periods"], + periods=labels.aug_periods, ) - if endogenous_factors_info["has_endogenous_factors"]: + if endogenous_factors_info.has_endogenous_factors: constraints_dicts += _get_constraints_for_augmented_periods( labels=labels, endogenous_factors_info=endogenous_factors_info, @@ -116,7 +123,7 @@ def add_bounds(params: pd.DataFrame, bounds_distance: float) -> pd.DataFrame: return df -def _is_diagonal_entry(ind_tup): +def _is_diagonal_entry(ind_tup: tuple[str, ...]) -> bool: name2 = ind_tup[-1] middle_pos = int(len(name2) // 2) if ( @@ -130,12 +137,16 @@ def _is_diagonal_entry(ind_tup): return is_diag -def _get_normalization_constraints(normalizations, factors) -> list[dict]: +def _get_normalization_constraints( + normalizations: dict[str, dict[str, list]], + factors: tuple[str, ...], +) -> list[dict]: """List of constraints to enforce normalizations. Args: - normalizations (dict): Nested dictionary with information on normalized factor - loadings and intercepts for each factor. See :ref:`normalizations`. + normalizations: Nested dictionary with information on normalized factor + loadings and intercepts for each factor. See :ref:`normalizations`. + factors: Tuple of factor names to process. Returns: constraints_dicts @@ -171,7 +182,7 @@ def _get_normalization_constraints(normalizations, factors) -> list[dict]: return constraints_dicts -def _get_mixture_weights_constraints(n_mixtures) -> list[dict]: +def _get_mixture_weights_constraints(n_mixtures: int) -> list[dict]: """Constrain mixture weights to be between 0 and 1 and sum to 1.""" if n_mixtures == 1: msg = "Set the mixture weight to 1 if there is only one mixture element." @@ -191,12 +202,15 @@ def _get_mixture_weights_constraints(n_mixtures) -> list[dict]: return constraints_dicts -def _get_stage_constraints(stagemap, stages) -> list[dict]: +def _get_stage_constraints( + stagemap: tuple[int, ...], + stages: tuple[int, ...], +) -> list[dict]: """Equality constraints for transition and shock parameters within stages. Args: - stagemap (list): map aug_periods to aug_stages - stages (list): aug_stages + stagemap: map aug_periods to aug_stages + stages: aug_stages Returns: constraints_dicts @@ -232,11 +246,11 @@ def _get_stage_constraints(stagemap, stages) -> list[dict]: return constraints_dicts -def _get_constant_factors_constraints(labels) -> list[dict]: +def _get_constant_factors_constraints(labels: Labels) -> list[dict]: """Fix shock variances of constant factors to `bounds_distance`. Args: - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` Returns: @@ -244,10 +258,10 @@ def _get_constant_factors_constraints(labels) -> list[dict]: """ constraints_dicts = [] - for f, factor in enumerate(labels["latent_factors"]): - if labels["transition_names"][f] == "constant": + for f, factor in enumerate(labels.latent_factors): + if labels.transition_names[f] == "constant": msg = f"This constraint was generated because {factor} is constant." - for aug_period in labels["aug_periods"][:-1]: + for aug_period in labels.aug_periods[:-1]: constraints_dicts.append( { "loc": ("shock_sds", aug_period, factor, "-"), @@ -259,14 +273,17 @@ def _get_constant_factors_constraints(labels) -> list[dict]: return constraints_dicts -def _get_initial_states_constraints(n_mixtures, factors) -> list[dict]: +def _get_initial_states_constraints( + n_mixtures: int, + factors: tuple[str, ...], +) -> list[dict]: """Enforce that the x values of the first factor are increasing. Otherwise the model would only be identified up to the order of the start factors. Args: - n_mixtures (int): number of elements in the mixture of normal of the factors. - factors (list): the latent factors of the model + n_mixtures: number of elements in the mixture of normal of the factors. + factors: the latent factors of the model Returns: constraints_dicts @@ -290,11 +307,11 @@ def _get_initial_states_constraints(n_mixtures, factors) -> list[dict]: return constraints_dicts -def _get_transition_constraints(labels) -> list[dict]: +def _get_transition_constraints(labels: Labels) -> list[dict]: """Collect possible constraints on transition parameters. Args: - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` Returns: @@ -302,14 +319,14 @@ def _get_transition_constraints(labels) -> list[dict]: """ constraints_dicts = [] - for f, factor in enumerate(labels["latent_factors"]): - tname = labels["transition_names"][f] + for f, factor in enumerate(labels.latent_factors): + tname = labels.transition_names[f] msg = f"This constraint is inherent to the {tname} production function." - for aug_period in labels["aug_periods"][:-1]: + for aug_period in labels.aug_periods[:-1]: funcname = f"constraints_{tname}" if func := getattr(t_f_module, funcname, False): c = func( # ty: ignore[call-non-callable] - factor=factor, factors=labels["all_factors"], aug_period=aug_period + factor=factor, factors=labels.all_factors, aug_period=aug_period ) if "description" not in c: c["description"] = msg @@ -318,16 +335,19 @@ def _get_transition_constraints(labels) -> list[dict]: def _get_anchoring_constraints( - update_info, controls, anchoring_info, periods + update_info: pd.DataFrame, + controls: tuple[str, ...], + anchoring_info: Anchoring, + periods: tuple[int, ...], ) -> list[dict]: """Constraints on anchoring parameters. Args: - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - controls (list): List of control variables - anchoring_info (dict): Information about anchoring. See :ref:`anchoring` - periods (list): Period of the model + controls: List of control variables + anchoring_info: Information about anchoring. See :ref:`anchoring` + periods: Period of the model Returns: constraints_dicts @@ -336,7 +356,7 @@ def _get_anchoring_constraints( anchoring_updates = update_info[update_info["purpose"] == "anchoring"].index constraints_dicts = [] - if not anchoring_info["free_constant"]: + if not anchoring_info.free_constant: msg = ( "This constraint was generated because free_constant in the anchoring " "section of the model specification is set to False." @@ -348,7 +368,7 @@ def _get_anchoring_constraints( {"loc": locs, "type": "fixed", "value": 0, "description": msg}, ) - if not anchoring_info["free_controls"]: + if not anchoring_info.free_controls: msg = ( "This constraint was generated because free_controls in the anchoring " "section of the model specification is set to False." @@ -361,15 +381,15 @@ def _get_anchoring_constraints( {"loc": ind_tups, "type": "fixed", "value": 0, "description": msg}, ) - if not anchoring_info["free_loadings"]: + if not anchoring_info.free_loadings: msg = ( "This constraint was generated because free_loadings in the anchoring " "section of the model specification is set to False." ) ind_tups = [] for period in periods: - for factor in anchoring_info["factors"]: - outcome = anchoring_info["outcomes"][factor] + for factor in anchoring_info.factors: + outcome = anchoring_info.outcomes[factor] meas = f"{outcome}_{factor}" ind_tups.append(("loadings", period, meas, factor)) @@ -377,13 +397,12 @@ def _get_anchoring_constraints( {"loc": ind_tups, "type": "fixed", "value": 1, "description": msg}, ) - constraints_dicts = [c for c in constraints_dicts if c["loc"] != []] - - return constraints_dicts + return [c for c in constraints_dicts if c["loc"] != []] def _get_constraints_for_augmented_periods( - labels, endogenous_factors_info + labels: Labels, + endogenous_factors_info: EndogenousFactorsInfo, ) -> list[dict]: """Constraints for augmented periods. @@ -394,30 +413,33 @@ def _get_constraints_for_augmented_periods( Both depend on the transition function. Args: - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` + endogenous_factors_info: Information about endogenous factors and their + relationship to augmented periods. Returns: constraints_dicts """ constraints_dicts = [] - for f, factor in enumerate(labels["latent_factors"]): - tname = labels["transition_names"][f] + for f, factor in enumerate(labels.latent_factors): + tname = labels.transition_names[f] if tname == "constant": continue # We are restricting transitions and shocks, not measurements. So this might # look counterintuitive... aug_period_meas_type_to_constrain = ( - "states" - if endogenous_factors_info[factor]["is_state"] - else "endogenous_factors" + MeasurementType.STATES + if endogenous_factors_info.factor_info[factor].is_state + else MeasurementType.ENDOGENOUS_FACTORS + ) + aug_period_meas_types = ( + endogenous_factors_info.aug_periods_to_aug_period_meas_types ) aug_periods_to_constrain = [ k - for k, v in endogenous_factors_info[ - "aug_periods_to_aug_period_meas_types" - ].items() + for k, v in aug_period_meas_types.items() if v == aug_period_meas_type_to_constrain ] for aug_period in aug_periods_to_constrain: @@ -425,14 +447,14 @@ def _get_constraints_for_augmented_periods( constraints_dicts += func( # ty: ignore[call-non-callable] factor=factor, aug_period=aug_period, - all_factors=labels["all_factors"], + all_factors=labels.all_factors, ) for aug_period in aug_periods_to_constrain[:-1]: constraints_dicts.append( { "loc": ("shock_sds", aug_period, factor, "-"), "type": "fixed", - "value": endogenous_factors_info["bounds_distance"], + "value": endogenous_factors_info.bounds_distance, "description": "Identity constraint.", } ) @@ -440,7 +462,7 @@ def _get_constraints_for_augmented_periods( return constraints_dicts -def _sel(params, loc): +def _sel(params: pd.DataFrame, loc: Any) -> pd.DataFrame: return params.loc[loc] @@ -516,7 +538,7 @@ def constraints_dicts_to_om( """Convert constraints provided in dictionary form to optimagic constraints. Args: - constraints_dicts (list): see :ref:`get_constraints_dicts`. + constraints_dicts: see :ref:`get_constraints_dicts`. Returns: List of optimagic constraints. @@ -555,8 +577,8 @@ def enforce_fixed_constraints( This means that any robust bounds will be overridden for fixed parameters. Args: - params_template (pd.DataFrame): see :ref:`params_df`. - constraints_dicts (list): see :ref:`get_constraints_dicts`. + params_template: see :ref:`params_df`. + constraints_dicts: see :ref:`get_constraints_dicts`. Returns: pd.DataFrame: modified copy of params_template diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 9ea5d12c..b1f98d89 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -1,60 +1,70 @@ +"""Functions for creating correlation heatmap visualizations.""" + +from typing import Any + import numpy as np import pandas as pd +from numpy.typing import NDArray from plotly import graph_objects as go +from skillmodels.model_spec import ModelSpec from skillmodels.process_data import pre_process_data from skillmodels.process_model import process_model +from skillmodels.types import ProcessedModel def plot_correlation_heatmap( - corr, - heatmap_kwargs=None, - layout_kwargs=None, - rounding=2, - zmax=None, - zmin=None, - zmid=None, - colorscale="RdBu_r", - show_color_bar=True, - show_diagonal=True, - show_upper_triangle=True, - trim_heatmap=False, - annotate=True, - annotation_fontsize=13, - annotation_text_color="black", - annotation_text_angle=0, - axes_tick_fontsize=(12, 12), - axes_tick_label_angle=(90, 0), - axes_tick_label_color=("black", "black"), -): + corr: pd.DataFrame, + heatmap_kwargs: dict[str, Any] | None = None, + layout_kwargs: dict[str, Any] | None = None, + rounding: int = 2, + zmax: float | None = None, + zmin: float | None = None, + zmid: float | None = None, + colorscale: str = "RdBu_r", + *, + show_color_bar: bool = True, + show_diagonal: bool = True, + show_upper_triangle: bool = True, + trim_heatmap: bool = False, + annotate: bool = True, + annotation_fontsize: int = 13, + annotation_text_color: str = "black", + annotation_text_angle: float = 0, + axes_tick_fontsize: tuple[int, int] = (12, 12), + axes_tick_label_angle: tuple[float, float] = (90, 0), + axes_tick_label_color: tuple[str, str] = ("black", "black"), +) -> go.Figure: """Plot correlation heatmaps for factor measurements. Args: - corr (DataFrame): Data frame of measurement or factor score correlations. - heatmap_kwargs (dct): Dictionary of key word arguments to pass to go.Heatmap (). + corr: Data frame of measurement or factor score correlations. + heatmap_kwargs: Dictionary of key word arguments to pass to go.Heatmap (). If None, the default kwargs defined in the function will be used. - layout_kwargs (dct): Dictionary of key word arguments used to update layout of + layout_kwargs: Dictionary of key word arguments used to update layout of go.Figure object. If None, the default kwargs defined in the function will be used. Through layout_kwargs, you can edit figure properties such as - template - title - figsize - rounding (int): Number of digits after the decimal point to round the + rounding: Number of digits after the decimal point to round the correlation values to. Default 2. - zmax (float ot NoneType): Upper bound to set on correlation color map. If None, + zmax: Upper bound to set on correlation color map. If None, is set to maximum absolute correlation value. - zmin (float or NoneType): Lower bound to set on correlation color map. If None, + zmin: Lower bound to set on correlation color map. If None, is set to -zmax. - zmid (float or NoneType): Midpoint to set on correlation color map. If None, + zmid: Midpoint to set on correlation color map. If None, is set to 0. - colorscale (str): Name of the color palette to use in the heatmap. + colorscale: Name of the color palette to use in the heatmap. Default 'RdBu_r'. - show_color_bar (bool): A boolean variable for displaying heatmap colorbar. + show_color_bar: A boolean variable for displaying heatmap colorbar. Default True. - show_diagonal (bool): A boolean for displaying the correlations on the diagonal. + show_diagonal: A boolean for displaying the correlations on the diagonal. Default False. - show_upper_triangle (bool): A boolean for displaying upper triangular part + show_upper_triangle: A boolean for displaying upper triangular part of the correlation heatmap. Default False. + trim_heatmap: If True, trim empty rows/columns from the heatmap. + Default False. The following arguments are processed into dictionaries or special plotly objects and passed to layout_kwargs. Defining them as additional arguments @@ -67,38 +77,38 @@ def plot_correlation_heatmap( defined in layout_kwargs will overwrite values passed via the individual arguments. - annotate (bool): If True, annotate the heatmap figure with correlation values. + annotate: If True, annotate the heatmap figure with correlation values. Default False. - annotation_font_size (int): Font size of the annotation text. Default 13. - annotation_font_color (str): Collor of the annotation text. Default 'black'. - annotation_text_angle (float): The angle at which to rotate annotation text. + annotation_fontsize: Font size of the annotation text. Default 13. + annotation_text_color: Color of the annotation text. Default 'black'. + annotation_text_angle: The angle at which to rotate annotation text. Default 0. - axes_tick_fontsize (list, tuple, other iterable or dict): Fontsize of axes + axes_tick_fontsize: Fontsize of axes ticks. Default (12,12) - axes_tick_label_angle (list, tuple, other iterable or dict): Rotation angles of + axes_tick_label_angle: Rotation angles of axes tick labels. Default (90,0). - axes_tick_label_color (list, tuple, other iterable or dict): Colors of the axes + axes_tick_label_color: Colors of the axes tick labels. Default ('black', 'black'). Returns: - fig (plotly graph object): The figure with correlaiton heatmap. + fig: The figure with correlaiton heatmap. """ corr = _process_corr_data_for_plotting( - corr, - rounding, - show_upper_triangle, - show_diagonal, - trim_heatmap, + corr=corr, + rounding=rounding, + show_upper_triangle=show_upper_triangle, + show_diagonal=show_diagonal, + trim_heatmap=trim_heatmap, ) heatmap_kwargs = _get_heatmap_kwargs( - corr, - heatmap_kwargs, - colorscale, - show_color_bar, - zmax, - zmin, - zmid, + corr=corr, + heatmap_kwargs=heatmap_kwargs, + colorscale=colorscale, + show_color_bar=show_color_bar, + zmax=zmax, + zmin=zmin, + zmid=zmid, ) layout_kwargs = _get_layout_kwargs( corr=corr, @@ -122,33 +132,39 @@ def plot_correlation_heatmap( return fig -def get_measurements_corr(data, model_dict, factors, periods): +def get_measurements_corr( + data: pd.DataFrame, + model_spec: ModelSpec, + factors: list[str] | tuple[str, ...] | str | None, + periods: float | list[int] | None, +) -> pd.DataFrame: """Get data frame with measurement correlations. Process data to retrieve measurements for each period and calculate correlations across period specific measurements. Args: - data (pd.DataFrame): DataFrame with observed measurements. - model_dict (dct): Dictionary of model attributes to be passed to process_model - and extract measurements for each period. - factors (list, str or NoneType): List of factors, to retrieve measurements for. + data: The observed measurements. + model_spec: The model specification. See: :ref:`model_specs` + factors: Factors to retrieve measurements for. If None, then calculate correlations of measurements of all factors. - periods (int, float, list or NoneType): If int, the period within which to + periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Note: Periods - refer to originl periods, not the augmented periods. + refer to original periods, not the augmented periods. Returns: - corr (DataFrame): DataFrame with measurement correlations. + corr: Measurement correlations. """ data = data.copy(deep=True) - model = process_model(model_dict) - periods = _process_periods(periods, model) - processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(model, factors) - update_info_by_period = _get_update_info_for_periods(model) + processed_model = process_model(model_spec) + periods = _process_periods(periods=periods, model=processed_model) + processed_data = pre_process_data(df=data, periods=periods) + latent_factors, observed_factors = _process_factors( + model=processed_model, factors=factors + ) + update_info_by_period = _get_update_info_for_periods(processed_model) df = _get_measurement_data( data=processed_data, update_info_by_period=update_info_by_period, @@ -156,11 +172,15 @@ def get_measurements_corr(data, model_dict, factors, periods): latent_factors=latent_factors, observed_factors=observed_factors, ) - corr = df.corr() - return corr + return df.corr() -def get_quasi_scores_corr(data, model_dict, factors, periods): +def get_quasi_scores_corr( + data: pd.DataFrame, + model_spec: ModelSpec, + factors: list[str] | tuple[str, ...] | str | None, + periods: float | list[int] | None, +) -> pd.DataFrame: """Get data frame with correlations of factor scores. Process data to retrieve measurements for each period, standardize measurements @@ -171,25 +191,26 @@ def get_quasi_scores_corr(data, model_dict, factors, periods): The calculated scores coincide with factor scores for linear models. Args: - data (pd.DataFrame): DataFrame with observed measurements. - model_dict (dct): Dictionary of model attributes to be passed to process_model - and extract measurements for each period. - factors (list, str or NoneType): List of factors, to retrieve measurements for. + data: The observed measurements. + model_spec: The model specification. See: :ref:`model_specs` + factors: Factors to retrieve measurements for. If None, then calculate correlations of measurements of all factors. - periods (int,float, list or NoneType): If int, the period within which to + periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Returns: - corr (DataFrame): DataFrame with score correlations. + corr: Score correlations. """ data = data.copy(deep=True) - model = process_model(model_dict) - periods = _process_periods(periods, model) - processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(model, factors) - update_info = _get_update_info_for_periods(model) + processed_model = process_model(model_spec) + periods = _process_periods(periods=periods, model=processed_model) + processed_data = pre_process_data(df=data, periods=periods) + latent_factors, observed_factors = _process_factors( + model=processed_model, factors=factors + ) + update_info = _get_update_info_for_periods(processed_model) df = _get_quasi_factor_scores_data( data=processed_data, update_info_by_period=update_info, @@ -197,11 +218,16 @@ def get_quasi_scores_corr(data, model_dict, factors, periods): latent_factors=latent_factors, observed_factors=observed_factors, ) - corr = df.corr() - return corr + return df.corr() -def get_scores_corr(data, params, model_dict, factors, periods): +def get_scores_corr( + data: pd.DataFrame, + params: pd.DataFrame, + model_spec: ModelSpec, + factors: list[str] | tuple[str, ...] | str | None, + periods: float | list[int] | None, +) -> pd.DataFrame: """Get data frame with correlations of factor scores. Process data to retrieve measurements for each period, standardize measurements @@ -210,47 +236,50 @@ def get_scores_corr(data, params, model_dict, factors, periods): scores. Args: - data (pd.DataFrame): DataFrame with observed measurements. - params (pd.DataFrame): DataFrame with estimated model parameters - model_dict (dct): Dictionary of model attributes to be passed to process_model - and extract measurements for each period. - factors (list, str or NoneType): List of factors, to retrieve measurements for. + data: The observed measurements. + params: Estimated model parameters. + model_spec: The model specification. See: :ref:`model_specs` + factors: Factors to retrieve measurements for. If None, then calculate correlations of measurements of all factors. - periods (int,float, list or NoneType): If int, the period within which to + periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Returns: - corr (DataFrame): DataFrame with score correlations. + corr: DataFrame with score correlations. """ data = data.copy(deep=True) - model = process_model(model_dict) - periods = _process_periods(periods, model) - processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(model, factors) + processed_model = process_model(model_spec) + periods = _process_periods(periods=periods, model=processed_model) + processed_data = pre_process_data(df=data, periods=periods) + latent_factors, observed_factors = _process_factors( + model=processed_model, factors=factors + ) params = params.loc[["controls", "loadings"]] df = _get_factor_scores_data( data=processed_data, params=params, - model=model, + model=processed_model, periods=periods, latent_factors=latent_factors, observed_factors=observed_factors, ) - corr = df.corr() - return corr + return df.corr() def _process_corr_data_for_plotting( - corr, - rounding, - show_upper_triangle, - show_diagonal, - trim_heatmap, -): + corr: pd.DataFrame, + rounding: int, + *, + show_upper_triangle: bool, + show_diagonal: bool, + trim_heatmap: bool, +) -> pd.DataFrame: """Apply mask and rounding to correlation DataFrame.""" - mask = _get_mask(corr, show_upper_triangle, show_diagonal) + mask = _get_mask( + corr, show_upper_triangle=show_upper_triangle, show_diagonal=show_diagonal + ) corr = corr.where(mask).round(rounding) if trim_heatmap: keeprows = mask.any(axis=1) & corr.notna().any(axis="columns").to_numpy() @@ -262,7 +291,12 @@ def _process_corr_data_for_plotting( return corr -def _get_mask(corr, show_upper_triangle, show_diagonal): +def _get_mask( + corr: pd.DataFrame, + *, + show_upper_triangle: bool, + show_diagonal: bool, +) -> NDArray[np.bool_]: """Get array to mask the correlation DataFrame.""" mask = np.zeros_like(corr, dtype=bool) mask[np.tril_indices_from(mask, k=-1)] = True @@ -273,15 +307,15 @@ def _get_mask(corr, show_upper_triangle, show_diagonal): return mask -def _get_update_info_for_periods(model): +def _get_update_info_for_periods(model: ProcessedModel) -> pd.DataFrame: """Return update_info with user-provided periods instead of augmented periods.""" - update_info = model["update_info"].copy() + update_info = model.update_info.copy() # Replace period level with user-provided period using set_codes period_values = update_info.index.get_level_values("aug_period").map( - model["labels"]["aug_periods_to_periods"] + model.labels.aug_periods_to_periods ) - update_info.index = update_info.index.set_codes(period_values, level="aug_period") + update_info.index = update_info.index.set_codes(period_values, level="aug_period") # ty: ignore[unresolved-attribute] update_info.index = update_info.index.set_names(["period", "variable"]) # Group by period and variable, apply OR logic for boolean columns @@ -293,26 +327,30 @@ def _get_update_info_for_periods(model): def _get_measurement_data( - data, update_info_by_period, periods, latent_factors, observed_factors -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get data frame with factor measurements in each period, in wide format. For each factor, retrieve the data on measurements in each period and stack the data columns into a data frame. Args: - data (pd.DataFrame): Data with observable variables. - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each model period. - periods (list): The list of periods that correlations are + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the measurements of which + latent_factors: List of latent factors the measurements of which correlations are calculated for. - observed_factors (list): List of observed factors the measurements of which + observed_factors: List of observed factors the measurements of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if len(periods) == 1: @@ -336,26 +374,26 @@ def _get_measurement_data( def _get_measurement_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + period: int, + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Extract measurements of factors for the given period. Args: - data (pd.DataFrame): Data with observable variables. - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each model period. - periods (int or float): The period to extract measurements for. - latent_factors (list): List of latent factors the measurements of which + period: The period to extract measurements for. + latent_factors: List of latent factors the measurements of which correlations are calculated for. - observed_factors (list): List of observed factors the measurements of which + observed_factors: List of observed factors the measurements of which correlations are calculated for. Returns: - df (pd.DataFrame): DataFrame with measurements of factors for period 'period'. + df: DataFrame with measurements of factors for period 'period'. """ period_info = update_info_by_period.loc[period].reset_index() @@ -367,31 +405,30 @@ def _get_measurement_data_for_single_period( )["variable"].to_list() for fac in observed_factors: measurements.append(fac) - df = data.query(f"{update_info_by_period.index.names[0]}=={period}")[measurements] - return df + return data.query(f"{update_info_by_period.index.names[0]}=={period}")[measurements] def _get_measurement_data_for_multiple_periods( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Extract measurements for factors for given periods. Args: - data (pd.DataFrame): Data with observable variables. - update_info_by_period (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each user-provided period. - periods (list): The periods to extract measurements for. - latent_factors (list): List of latent factors the measurements of which + periods: The periods to extract measurements for. + latent_factors: List of latent factors the measurements of which correlations are calculated for. - observed_factors (list): List of observed factors the measurements of which + observed_factors: List of observed factors the measurements of which correlations are calculated for. Returns: - df (pd.DataFrame): DataFrame with measurements of factors in each period as + df: DataFrame with measurements of factors in each period as columns. """ @@ -408,17 +445,16 @@ def _get_measurement_data_for_multiple_periods( .add_suffix(f", {period}") .reset_index(drop=True), ) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) def _get_quasi_factor_scores_data( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get data frame with summary information on factor measurements in each period. In each period, standardize factor measurements to zero mean and unit standard @@ -427,63 +463,62 @@ def _get_quasi_factor_scores_data( models. Args: - data (pd.DataFrame): Data with observable variables. - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each model period. - periods (list): The list of periods that correlations are + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if len(periods) == 1: period = periods[0] df = _get_quasi_factor_scores_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, + data=data, + update_info_by_period=update_info_by_period, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) else: df = _get_quasi_factor_scores_data_for_multiple_periods( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, + data=data, + update_info_by_period=update_info_by_period, + periods=periods, + latent_factors=latent_factors, + observed_factors=observed_factors, ) return df def _get_quasi_factor_scores_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + period: int, + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get frame with summary scores on factor measurements in a given period. Args: - data (pd.DataFrame): Data with observable variables. - update_info_by_period (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each user-provided period. - periods (list): The list of periods that correlations are - calculated for. - latent_factors (list): List of latent factors the scores of which + period: The period that correlations are calculated for. + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ period_info = update_info_by_period.loc[period].reset_index() @@ -502,59 +537,57 @@ def _get_quasi_factor_scores_data_for_single_period( for factor in observed_factors: df = data.query(f"{update_info_by_period.index.names[0]}=={period}")[factor] to_concat.append(df) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) def _get_quasi_factor_scores_data_for_multiple_periods( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get frame with summary scores of factor measurements in a given period. Args: - data (pd.DataFrame): Data with observable variables. - update_info_by_period (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each user-provided period. - periods (list): The list of periods that correlations are + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ to_concat = [] for period in periods: to_concat.append( _get_quasi_factor_scores_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, + data=data, + update_info_by_period=update_info_by_period, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) .add_suffix(f", {period}") .reset_index(drop=True), ) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) def _get_factor_scores_data( - data, - params, - model, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + model: ProcessedModel, + periods: list[int], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get data frame with factor scores in each period. In each period, standardize factor measurements to with estimated intercepts and @@ -562,52 +595,52 @@ def _get_factor_scores_data( a summary statistics. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated measurement relevant + data: Data with observable variables. + params: Data frame with estimated measurement relevant model parameters. - model (dict): Processed model dict. - periods (list): The list of periods that correlations are + model: The processed model. + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if len(periods) == 1: period = periods[0] df = _get_factor_scores_data_for_single_period( - data, - params, - model, - period, - latent_factors, - observed_factors, + data=data, + params=params, + model=model, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) else: df = _get_factor_scores_data_for_multiple_periods( - data, - params, - model, - periods, - latent_factors, - observed_factors, + data=data, + params=params, + model=model, + periods=periods, + latent_factors=latent_factors, + observed_factors=observed_factors, ) return df def _get_factor_scores_data_for_single_period( - data, - params, - model, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + model: ProcessedModel, + period: int, + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get frame with factor scores in a given period. Careful: When we have endogenous factors, *period* refers to the raw period, but the @@ -615,27 +648,26 @@ def _get_factor_scores_data_for_single_period( augmented periods. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated measurement relevant - model parameters. - model (dict): Processed model dict. - period (int): The period that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + data: Data with observable variables. + params: Estimated measurement-relevant model parameters. + model: The processed model. + period: The period that correlations are calculated for. + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ - aug_periods = model["endogenous_factors_info"]["aug_periods_from_period"](period) + aug_periods = model.endogenous_factors_info.aug_periods_from_period(period) df = pd.concat( [ _get_factor_scores_data_for_single_model_period( data=data, params=params, - update_info=model["update_info"], + update_info=model.update_info, aug_period=ap, period=period, latent_factors=latent_factors, @@ -654,32 +686,32 @@ def _get_factor_scores_data_for_single_period( def _get_factor_scores_data_for_single_model_period( - data, - params, - update_info, - aug_period, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + update_info: pd.DataFrame, + aug_period: int, + period: int, + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get frame with factor scores in a given model period. In this function, all calculations are at the augmented period level. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated measurement relevant - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + params: Data frame with estimated measurement relevant + update_info: DataFrame with information on measurements for each factor in each model period. - aug_period (int): The (augmented) period that correlations are calculated for. - period (int): The (raw) period that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + aug_period: The (augmented) period that correlations are calculated for. + period: The (raw) period that correlations are calculated for. + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if aug_period not in update_info.index: return pd.DataFrame() @@ -690,7 +722,7 @@ def _get_factor_scores_data_for_single_model_period( params.loc["controls"].query("name2 == 'constant'").droplevel("name2")["value"] ) loadings_count = loadings.astype(bool).groupby("name1").sum() - leave_out_meas = loadings_count[loadings_count > 1].index.to_list() + leave_out_meas = loadings_count[loadings_count > 1].index.to_list() # ty: ignore[unsupported-operator] to_concat = [] for factor in latent_factors: period_factor_measurements = period_info.query( @@ -712,108 +744,113 @@ def _get_factor_scores_data_for_single_model_period( def _get_factor_scores_data_for_multiple_periods( - data, - params, - model, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + model: ProcessedModel, + periods: list[int], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], +) -> pd.DataFrame: """Get frame with factor scores in a given period. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated model parameters. - model (dict): Processed model dict. - periods (list): The list of periods that correlations are + data: Data with observable variables. + params: Estimated model parameters. + model: The processed model. + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ to_concat = [] for period in periods: to_concat.append( _get_factor_scores_data_for_single_period( - data, - params, - model, - period, - latent_factors, - observed_factors, + data=data, + params=params, + model=model, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) .add_suffix(f", {period}") .reset_index(drop=True), ) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) -def _process_factors(model, factors): - """Process factors to get a tuple of lists.""" +def _process_factors( + model: ProcessedModel, + factors: list[str] | tuple[str, ...] | str | None, +) -> tuple[tuple[str, ...], tuple[str, ...]]: + """Process factors to get a tuple of tuples.""" if not factors: - latent_factors = model["labels"]["latent_factors"] - observed_factors = model["labels"]["observed_factors"] + latent_factors = model.labels.latent_factors + observed_factors = model.labels.observed_factors elif isinstance(factors, str): - if factors in model["labels"]["latent_factors"]: - latent_factors = [factors] - observed_factors = [] - elif factors in model["labels"]["observed_factors"]: - observed_factors = [factors] - latent_factors = [] + if factors in model.labels.latent_factors: + latent_factors = (factors,) + observed_factors = () + elif factors in model.labels.observed_factors: + observed_factors = (factors,) + latent_factors = () else: - observed_factors = [] - latent_factors = [] - for factor in factors: - if factor in model["labels"]["latent_factors"]: - latent_factors.append(factor) - elif factor in model["labels"]["observed_factors"]: - observed_factors.append(factor) + latent_factors = tuple( + fac for fac in factors if fac in model.labels.latent_factors + ) + observed_factors = tuple( + fac for fac in factors if fac in model.labels.observed_factors + ) return latent_factors, observed_factors # ty: ignore[possibly-unresolved-reference] -def _process_periods(periods, model): +def _process_periods( + periods: float | list[int] | None, + model: ProcessedModel, +) -> list[int]: """Process periods to get a list.""" if periods is None: - periods = list(range(model["dimensions"]["n_periods"])) - elif isinstance(periods, int | float): - periods = [periods] + return list(range(model.dimensions.n_periods)) + if isinstance(periods, int | float): + return [int(periods)] return periods def _get_layout_kwargs( - corr, - layout_kwargs, - annotate, - annotation_fontsize, - annotation_text_color, - annotation_text_angle, - axes_tick_fontsize, - axes_tick_label_angle, - axes_tick_label_color, -): + corr: pd.DataFrame, + layout_kwargs: dict[str, Any] | None, + *, + annotate: bool, + annotation_fontsize: int, + annotation_text_color: str, + annotation_text_angle: float, + axes_tick_fontsize: tuple[int, int], + axes_tick_label_angle: tuple[float, float], + axes_tick_label_color: tuple[str, str], +) -> dict[str, Any]: """Get kwargs to update figure layout. Args: - corr (DataFrame): The processed data frame with correlation coefficients. - layout_kwargs (dct): Dictionary of keyword arguments used to update layout of + corr: The processed data frame with correlation coefficients. + layout_kwargs: Dictionary of keyword arguments used to update layout of go.Figure object. - annotate (bool): Add annotations to the figure if True. - annotation_font_size (int): Fontsize of the annotation text. - annotation_font_color (str): Color of the annotation text. - annotation_text_angle (float): The angle at which to rotate annotation text. + annotate: Add annotations to the figure if True. + annotation_fontsize: Fontsize of the annotation text. + annotation_text_color: Color of the annotation text. + annotation_text_angle: The angle at which to rotate annotation text. axes_tick_fontsize(tuple,list or dict): Fontsizes of axes tick labels. axes_tick_label_angle(tuple,list or dict): The angle at which to rotate axes tick labels. - axes_tick_label_color(tuple,list or dict): Collor of axes labels. + axes_tick_label_color(tuple,list or dict): Color of axes labels. Returns: - default_layout_kwargs (dict): Dictionary to update figure layout. + default_layout_kwargs: Dictionary to update figure layout. """ default_layout_kwargs = { @@ -825,17 +862,17 @@ def _get_layout_kwargs( default_layout_kwargs.update( _get_annotations( corr, - annotate, - annotation_fontsize, - annotation_text_color, - annotation_text_angle, + annotate=annotate, + annotation_fontsize=annotation_fontsize, + annotation_text_color=annotation_text_color, + annotation_text_angle=annotation_text_angle, ), ) default_layout_kwargs.update( _get_axes_ticks_kwargs( - axes_tick_fontsize, - axes_tick_label_angle, - axes_tick_label_color, + axes_tick_fontsize=axes_tick_fontsize, + axes_tick_label_angle=axes_tick_label_angle, + axes_tick_label_color=axes_tick_label_color, ), ) if layout_kwargs: @@ -844,10 +881,10 @@ def _get_layout_kwargs( def _get_axes_ticks_kwargs( - axes_tick_fontsize, - axes_tick_label_angle, - axes_tick_label_color, -): + axes_tick_fontsize: tuple[int, int] | dict[str, int], + axes_tick_label_angle: tuple[float, float] | dict[str, float], + axes_tick_label_color: tuple[str, str] | dict[str, str], +) -> dict[str, Any]: """Get kwargs for axes ticks label formating.""" axes_tick_fontsize = _process_axes_tick_args(axes_tick_fontsize) axes_tick_label_angle = _process_axes_tick_args(axes_tick_label_angle) @@ -865,12 +902,13 @@ def _get_axes_ticks_kwargs( def _get_annotations( - df, - annotate, - annotation_fontsize, - annotation_text_color, - annotation_text_angle, -): + df: pd.DataFrame, + *, + annotate: bool, + annotation_fontsize: int, + annotation_text_color: str, + annotation_text_angle: float, +) -> dict[str, Any]: """Get annotations and formatting kwargs.""" annotation_kwargs = {} if annotate: @@ -897,31 +935,34 @@ def _get_annotations( def _get_heatmap_kwargs( - corr, - heatmap_kwargs, - colorscale, - show_color_bar, - zmax, - zmin, - zmid, -): + corr: pd.DataFrame, + heatmap_kwargs: dict[str, Any] | None, + colorscale: str, + *, + show_color_bar: bool, + zmax: float | None, + zmin: float | None, + zmid: float | None, +) -> dict[str, Any]: """Get kwargs to instantiate Heatmap object. Args: - heatmap_kwargs (dct): Dictionary of key word arguments to pass to go.Heatmap(). - colorscale (str): Name of the color palette to use in the heatmap. + corr: Data frame with correlation coefficients. + heatmap_kwargs: Dictionary of key word arguments to pass to go.Heatmap(). + colorscale: Name of the color palette to use in the heatmap. Default 'RdBu_r'. - show_color_bar (bool): A boolean variable for displayin heatmap colorbar. - zmax (float or None): Upper bound to set on correlation color map. - zmin (float or None): Lower bound to set on correlation color map. - zmid (float or None): Midpoint to set on correlation color map. + show_color_bar: A boolean variable for displaying heatmap colorbar. + zmax: Upper bound to set on correlation color map. + zmin: Lower bound to set on correlation color map. + zmid: Midpoint to set on correlation color map. Returns: - default_heatmap_kwargs (dict): Dictionary of kwargs to instantiate go.Heatmap. + default_heatmap_kwargs: Dictionary of kwargs to instantiate go.Heatmap. """ if zmax is None: - zmax = np.abs(corr.to_numpy())[np.tril_indices_from(corr, k=-1)].max() + corr_arr = corr.to_numpy() + zmax = np.abs(corr_arr)[np.tril_indices_from(corr_arr, k=-1)].max() if zmin is None: zmin = -zmax if zmid is None: @@ -938,7 +979,9 @@ def _get_heatmap_kwargs( return default_heatmap_kwargs -def _process_axes_tick_args(args): +def _process_axes_tick_args( + args: tuple[Any, Any] | list[Any] | dict[str, Any], +) -> dict[str, Any]: if isinstance(args, tuple | list): args = {"x": args[0], "y": args[1]} return args diff --git a/src/skillmodels/decorators.py b/src/skillmodels/decorators.py index 6dcf779d..53060d00 100644 --- a/src/skillmodels/decorators.py +++ b/src/skillmodels/decorators.py @@ -1,27 +1,38 @@ +"""Decorators for parameter extraction and registration in transition functions.""" + import functools +from collections.abc import Callable +from typing import Any import jax.numpy as jnp +from jax import Array -def extract_params(func=None, *, key=None, names=None): +def extract_params( + func: Callable | None = None, + *, + key: str | None = None, + names: list[str] | None = None, +) -> Callable: """Process params before passing them to func. Note: The resulting function is keyword only! Args: - key (str or None): If key is not None, we assume params is a dictionary of which + func: The function to be decorated, or None if using decorator with arguments. + key: If key is not None, we assume params is a dictionary of which only the params[key] should be passed into func. - names (list or None): If names is provided, we assume that params + names: If names is provided, we assume that params (or params[key]) should be converted to a dictionary with names as keys before passing them to func. """ - def decorator_extract_params(func): + def decorator_extract_params(func: Callable) -> Callable: if key is not None and names is None: @functools.wraps(func) - def wrapper_extract_params(**kwargs): + def wrapper_extract_params(**kwargs: Any) -> Any: internal_kwargs = kwargs.copy() internal_kwargs["params"] = kwargs["params"][key] return func(**internal_kwargs) @@ -29,7 +40,7 @@ def wrapper_extract_params(**kwargs): elif key is None and names is not None: @functools.wraps(func) - def wrapper_extract_params(**kwargs): + def wrapper_extract_params(**kwargs: Any) -> Any: internal_kwargs = kwargs.copy() internal_kwargs["params"] = dict( zip(names, kwargs["params"], strict=False) @@ -39,7 +50,7 @@ def wrapper_extract_params(**kwargs): elif key is not None and names is not None: @functools.wraps(func) - def wrapper_extract_params(**kwargs): + def wrapper_extract_params(**kwargs: Any) -> Any: internal_kwargs = kwargs.copy() internal_kwargs["params"] = dict( zip(names, kwargs["params"][key], strict=False) @@ -56,21 +67,26 @@ def wrapper_extract_params(**kwargs): return decorator_extract_params -def jax_array_output(func): +def jax_array_output(func: Callable) -> Callable: """Convert tuple output to list output.""" @functools.wraps(func) - def wrapper_jax_array_output(*args, **kwargs): + def wrapper_jax_array_output(*args: Any, **kwargs: Any) -> Array: raw = func(*args, **kwargs) - out = jnp.array(raw) - return out + return jnp.array(raw) return wrapper_jax_array_output -def register_params(func=None, *, params=None): - def decorator_register_params(func): - func.__registered_params__ = params +def register_params( + func: Callable | None = None, + *, + params: list[str] | None = None, +) -> Callable: + """Register parameter names for a transition function.""" + + def decorator_register_params(func: Callable) -> Callable: + func.__registered_params__ = params # ty: ignore[unresolved-attribute] return func if callable(func): diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index 2efb4c6f..308707f4 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -1,35 +1,46 @@ +"""Functions to compute and process filtered latent states.""" + +from typing import Any + import jax.numpy as jnp import numpy as np +import pandas as pd from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model -def get_filtered_states(model_dict, data, params): - max_inputs = get_maximization_inputs(model_dict=model_dict, data=data) +def get_filtered_states( + model_spec: ModelSpec, + data: pd.DataFrame, + params: pd.DataFrame, +) -> dict[str, dict[str, Any]]: + """Compute filtered latent states given data and estimated parameters.""" + max_inputs = get_maximization_inputs(model_spec=model_spec, data=data) params = params.loc[max_inputs["params_template"].index] debug_loglike = max_inputs["debug_loglike"] debug_data = debug_loglike(params) unanchored_states_df = debug_data["filtered_states"] unanchored_ranges = debug_data["state_ranges"] - model = process_model(model_dict) + processed_model = process_model(model_spec) anchored_states_df = anchor_states_df( states_df=unanchored_states_df, - model_dict=model_dict, + model_spec=model_spec, params=params, use_aug_period=True, ) anchored_ranges = create_state_ranges( filtered_states=anchored_states_df, - factors=model["labels"]["latent_factors"], + factors=processed_model.labels.latent_factors, ) - out = { + return { "anchored_states": { "states": anchored_states_df, "state_ranges": anchored_ranges, @@ -40,10 +51,14 @@ def get_filtered_states(model_dict, data, params): }, } - return out - -def anchor_states_df(states_df, model_dict, params, use_aug_period): +def anchor_states_df( + states_df: pd.DataFrame, + model_spec: ModelSpec, + params: pd.DataFrame, + *, + use_aug_period: bool, +) -> pd.DataFrame: """Anchor states in a DataFrame. The DataFrame is expected to have a column called "period" as well as one column @@ -55,43 +70,41 @@ def anchor_states_df(states_df, model_dict, params, use_aug_period): as an internal function that only works with jax objects). """ - model = process_model(model_dict) + processed_model = process_model(model_spec) p_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=processed_model.update_info, + labels=processed_model.labels, + dimensions=processed_model.dimensions, + transition_info=processed_model.transition_info, + endogenous_factors_info=processed_model.endogenous_factors_info, ) params = params.loc[p_index] parsing_info = create_parsing_info( params_index=p_index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + update_info=processed_model.update_info, + labels=processed_model.labels, + anchoring=processed_model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, ) - *_, pardict = parse_params( + *_, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model["dimensions"], - labels=model["labels"], + dimensions=processed_model.dimensions, + labels=processed_model.labels, n_obs=1, ) - n_latent = model["dimensions"]["n_latent_factors"] + n_latent = processed_model.dimensions.n_latent_factors - _scaling_factors = np.array(pardict["anchoring_scaling_factors"][:, :n_latent]) - _constants = np.array(pardict["anchoring_constants"][:, :n_latent]) + _scaling_factors = np.array(parsed_params.anchoring_scaling_factors[:, :n_latent]) + _constants = np.array(parsed_params.anchoring_constants[:, :n_latent]) if use_aug_period: period_arr = states_df["aug_period"].to_numpy() - ap_to_p = model["labels"]["aug_periods_to_periods"] + ap_to_p = processed_model.labels.aug_periods_to_periods scaling_factors = np.empty(shape=(len(ap_to_p), n_latent)) constants = np.empty(shape=(len(ap_to_p), n_latent)) for ap, p in ap_to_p.items(): @@ -106,9 +119,7 @@ def anchor_states_df(states_df, model_dict, params, use_aug_period): constants_arr = constants[period_arr] out = states_df.copy(deep=True) - for pos, factor in enumerate(model["labels"]["latent_factors"]): + for pos, factor in enumerate(processed_model.labels.latent_factors): out[factor] = constants_arr[:, pos] + states_df[factor] * scaling_arr[:, pos] - out = out[states_df.columns] - - return out + return out[states_df.columns] diff --git a/src/skillmodels/kalman_filters.py b/src/skillmodels/kalman_filters.py index f9cfae97..10bc034f 100644 --- a/src/skillmodels/kalman_filters.py +++ b/src/skillmodels/kalman_filters.py @@ -1,5 +1,10 @@ +"""Kalman filter operations for state estimation using the square-root form.""" + +from collections.abc import Callable + import jax import jax.numpy as jnp +from jax import Array from skillmodels.qr import qr_gpu @@ -9,44 +14,43 @@ else jax.vmap(jax.vmap(jnp.linalg.qr)) ) + # ====================================================================================== # Update Step # ====================================================================================== - - def kalman_update( - states, - upper_chols, - loadings, - control_params, - meas_sd, - measurements, - controls, - log_mixture_weights, -): + states: Array, + upper_chols: Array, + loadings: Array, + control_params: Array, + meas_sd: Array, + measurements: Array, + controls: Array, + log_mixture_weights: Array, +) -> tuple[Array, Array, Array, Array]: """Perform a Kalman update with likelihood evaluation. Args: - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - loadings (jax.numpy.array): 1d array of length n_states with factor loadings. - control_params (jax.numpy.array): 1d array of length n_controls. - meas_sd (float): Standard deviation of the measurement error. - measurements (jax.numpy.array): 1d array of length n_obs with measurements. + loadings: 1d array of length n_states with factor loadings. + control_params: 1d array of length n_controls. + meas_sd: Standard deviation of the measurement error. + measurements: 1d array of length n_obs with measurements. May contain NaNs if no measurement was observed. - controls (jax.numpy.array): Array of shape (n_obs, n_controls) with data on the + controls: Array of shape (n_obs, n_controls) with data on the control variables. - log_mixture_weights (jax.numpy.array): Array of shape (n_obs, n_mixtures) with + log_mixture_weights: Array of shape (n_obs, n_mixtures) with the natural logarithm of the weights of each element of the mixture of normals distribution. Returns: - states (jax.numpy.array): Same format as states. - new_states (jax.numpy.array): Same format as states. - new_upper_chols (jax.numpy.array): Same format as upper_chols + states: Same format as states. + new_states: Same format as states. + new_upper_chols: Same format as upper_chols new_log_mixture_weights: (jax.numpy.array): Same format as log_mixture_weights new_loglikes: (jax.numpy.array): 1d array of length n_obs @@ -133,17 +137,18 @@ def kalman_update( # ====================================================================================== # Predict Step # ====================================================================================== - - -def calculate_sigma_scaling_factor_and_weights(n_states, kappa=2): +def calculate_sigma_scaling_factor_and_weights( + n_states: int, + kappa: float = 2, +) -> tuple[Array, Array]: """Calculate the scaling factor and weights for sigma points according to Julier. There are other sigma point algorithms, but many of them possibly have negative weights which makes the unscented predict step more complicated. Args: - n_states (int): Number of states. - kappa (float): Spreading factor of the sigma points. + n_states: Number of states. + kappa: Spreading factor of the sigma points. Returns: float: Scaling factor @@ -158,39 +163,41 @@ def calculate_sigma_scaling_factor_and_weights(n_states, kappa=2): def kalman_predict( - transition_func, - states, - upper_chols, - sigma_scaling_factor, - sigma_weights, - trans_coeffs, - shock_sds, - anchoring_scaling_factors, - anchoring_constants, - observed_factors, -): + transition_func: Callable, + states: Array, + upper_chols: Array, + sigma_scaling_factor: float, + sigma_weights: Array, + trans_coeffs: dict[str, Array], + shock_sds: Array, + anchoring_scaling_factors: Array, + anchoring_constants: Array, + observed_factors: Array, +) -> tuple[Array, Array]: """Make a unscented Kalman predict. Args: - transition_func (Callable): The transition function. - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + transition_func: The transition function. + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - sigma_scaling_factor (float): A scaling factor that controls the spread of the + sigma_scaling_factor: A scaling factor that controls the spread of the sigma points. Bigger means that sigma points are further apart. Depends on the sigma_point algorithm chosen. - sigma_weights (jax.numpy.array): 1d array of length n_sigma with non-negative + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. - trans_coeffs (tuple): Tuple of 1d jax.numpy.arrays with transition parameters. - anchoring_scaling_factors (jax.numpy.array): Array of shape (2, n_fac) with + trans_coeffs: Tuple of 1d jax.numpy.arrays with transition parameters. + shock_sds: 1d array of length n_fac with shock standard + deviations. + anchoring_scaling_factors: Array of shape (2, n_fac) with the scaling factors for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). - anchoring_constants (jax.numpy.array): Array of shape (2, n_states) with the + anchoring_constants: Array of shape (2, n_states) with the constants for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). - observed_factors (jax.numpy.array): Array of shape (n_obs, n_observed_factors) + observed_factors: Array of shape (n_obs, n_observed_factors) with data on the observed factors in period t. Returns: @@ -199,17 +206,17 @@ def kalman_predict( """ sigma_points = _calculate_sigma_points( - states, - upper_chols, - sigma_scaling_factor, - observed_factors, + states=states, + upper_chols=upper_chols, + scaling_factor=sigma_scaling_factor, + observed_factors=observed_factors, ) transformed = transform_sigma_points( - sigma_points, - transition_func, - trans_coeffs, - anchoring_scaling_factors, - anchoring_constants, + sigma_points=sigma_points, + transition_func=transition_func, + trans_coeffs=trans_coeffs, + anchoring_scaling_factors=anchoring_scaling_factors, + anchoring_constants=anchoring_constants, ) # do not use sigma_points.shape because sigma_points contain observed factors @@ -228,19 +235,24 @@ def kalman_predict( return predicted_states, predicted_covs -def _calculate_sigma_points(states, upper_chols, scaling_factor, observed_factors): +def _calculate_sigma_points( + states: Array, + upper_chols: Array, + scaling_factor: float, + observed_factors: Array, +) -> Array: """Calculate the array of sigma_points for the unscented transform. Args: - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - scaling_factor (float): A scaling factor that controls the spread of the + scaling_factor: A scaling factor that controls the spread of the sigma points. Bigger means that sigma points are further apart. Depends on the sigma_point algorithm chosen. - observed_factors (jax.numpy.array): Array of shape (n_obs, n_observed_factors) + observed_factors: Array of shape (n_obs, n_observed_factors) with data on the observed factors in period t. Returns: @@ -269,27 +281,26 @@ def _calculate_sigma_points(states, upper_chols, scaling_factor, observed_factor n_observed, ) - sigma_points = jnp.concatenate([sigma_points, observed_part], axis=-1) - return sigma_points + return jnp.concatenate([sigma_points, observed_part], axis=-1) def transform_sigma_points( - sigma_points, - transition_func, - trans_coeffs, - anchoring_scaling_factors, - anchoring_constants, -): + sigma_points: Array, + transition_func: Callable, + trans_coeffs: dict[str, Array], + anchoring_scaling_factors: Array, + anchoring_constants: Array, +) -> Array: """Anchor sigma points, transform them and unanchor the transformed sigma points. Args: - sigma_points (jax.numpy.array) of shape n_obs, n_mixtures, n_sigma, n_fac. - transition_func (Callable): The transition function. - trans_coeffs (tuple): Tuple of 1d jax.numpy.arrays with transition parameters. - anchoring_scaling_factors (jax.numpy.array): Array of shape (2, n_states) with + sigma_points: Array of shape n_obs, n_mixtures, n_sigma, n_fac. + transition_func: The transition function. + trans_coeffs: Tuple of 1d jax.numpy.arrays with transition parameters. + anchoring_scaling_factors: Array of shape (2, n_states) with the scaling factors for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). - anchoring_constants (jax.numpy.array): Array of shape (2, n_states) with the + anchoring_constants: Array of shape (2, n_states) with the constants for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). @@ -313,6 +324,4 @@ def transform_sigma_points( ) / anchoring_scaling_factors[1][:n_observed] out_shape = (n_obs, n_mixtures, n_sigma, -1) - out = transformed_unanchored.reshape(out_shape) - - return out + return transformed_unanchored.reshape(out_shape) diff --git a/src/skillmodels/kalman_filters_debug.py b/src/skillmodels/kalman_filters_debug.py index 14b07e1b..5d1ec44f 100644 --- a/src/skillmodels/kalman_filters_debug.py +++ b/src/skillmodels/kalman_filters_debug.py @@ -1,5 +1,10 @@ +"""Debug versions of Kalman filter operations that return intermediate results.""" + +from typing import Any + import jax import jax.numpy as jnp +from jax import Array array_qr_jax = jax.vmap(jax.vmap(jnp.linalg.qr)) @@ -7,44 +12,42 @@ # ====================================================================================== # Update Step # ====================================================================================== - - def kalman_update( - states, - upper_chols, - loadings, - control_params, - meas_sd, - measurements, - controls, - log_mixture_weights, -): + states: Array, + upper_chols: Array, + loadings: Array, + control_params: Array, + meas_sd: float, + measurements: Array, + controls: Array, + log_mixture_weights: Array, +) -> tuple[Array, Array, Array, Array, dict[str, Any]]: """Perform a Kalman update with likelihood evaluation, returning debug info on top. Args: - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - loadings (jax.numpy.array): 1d array of length n_states with factor loadings. - control_params (jax.numpy.array): 1d array of length n_controls. - meas_sd (float): Standard deviation of the measurement error. - measurements (jax.numpy.array): 1d array of length n_obs with measurements. + loadings: 1d array of length n_states with factor loadings. + control_params: 1d array of length n_controls. + meas_sd: Standard deviation of the measurement error. + measurements: 1d array of length n_obs with measurements. May contain NaNs if no measurement was observed. - controls (jax.numpy.array): Array of shape (n_obs, n_controls) with data on the + controls: Array of shape (n_obs, n_controls) with data on the control variables. - log_mixture_weights (jax.numpy.array): Array of shape (n_obs, n_mixtures) with + log_mixture_weights: Array of shape (n_obs, n_mixtures) with the natural logarithm of the weights of each element of the mixture of normals distribution. Returns: - states (jax.numpy.array): Same format as states. - new_states (jax.numpy.array): Same format as states. - new_upper_chols (jax.numpy.array): Same format as upper_chols + states: Same format as states. + new_states: Same format as states. + new_upper_chols: Same format as upper_chols new_log_mixture_weights: (jax.numpy.array): Same format as log_mixture_weights new_loglikes: (jax.numpy.array): 1d array of length n_obs - debug_info (dict): Empty or containing residuals and residual_sds + debug_info: Empty or containing residuals and residual_sds """ n_obs, n_mixtures, n_states = states.shape diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index 4e7eec21..5d6ec5c5 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -1,7 +1,12 @@ +"""Log-likelihood function for latent factor models.""" + import functools +from collections.abc import Callable +from typing import Any import jax import jax.numpy as jnp +from jax import Array from skillmodels.clipping import soft_clipping from skillmodels.kalman_filters import ( @@ -9,24 +14,63 @@ kalman_update, ) from skillmodels.parse_params import parse_params +from skillmodels.types import ( + Dimensions, + EstimationOptions, + Labels, + ParsedParams, + ParsingInfo, +) def log_likelihood( - params, - parsing_info, - measurements, - controls, - transition_func, - sigma_scaling_factor, - sigma_weights, - dimensions, - labels, - estimation_options, - is_measurement_iteration, - is_predict_iteration, - iteration_to_period, - observed_factors, -): + params: Array, + parsing_info: ParsingInfo, + measurements: Array, + controls: Array, + transition_func: Callable, + sigma_scaling_factor: float, + sigma_weights: Array, + dimensions: Dimensions, + labels: Labels, + estimation_options: EstimationOptions, + is_measurement_iteration: Array, + is_predict_iteration: Array, + iteration_to_period: Array, + observed_factors: Array, +) -> Array: + """Aggregated log likelihood of a skill formation model. + + Wrapper around log_likelihood_obs that sums contributions across observations. + + Args: + params: 1d array with model parameters. + parsing_info: Contains information how to parse parameter vector. + measurements: Array of shape (n_updates, n_obs) with data on + observed measurements. NaN if the measurement was not observed. + controls: Array of shape (n_periods, n_obs, n_controls) + with observed control variables for the measurement equations. + transition_func: The transition function. + sigma_scaling_factor: A scaling factor that controls the spread of the + sigma points. + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. + dimensions: Dimensional information like n_states, n_periods, n_controls, + n_mixtures. + labels: Labels for the model quantities like factors, periods, controls, + stagemap and stages. + estimation_options: Options for estimation including clipping bounds. + is_measurement_iteration: Boolean array indicating which iterations are + measurement updates. + is_predict_iteration: Boolean array indicating which iterations are predict + steps. + iteration_to_period: Array mapping iteration index to period. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with + data on the observed factors. + + Returns: + Scalar aggregated log likelihood. + + """ return log_likelihood_obs( params=params, parsing_info=parsing_info, @@ -46,21 +90,21 @@ def log_likelihood( def log_likelihood_obs( - params, - parsing_info, - measurements, - controls, - transition_func, - sigma_scaling_factor, - sigma_weights, - dimensions, - labels, - estimation_options, - is_measurement_iteration, - is_predict_iteration, - iteration_to_period, - observed_factors, -): + params: Array, + parsing_info: ParsingInfo, + measurements: Array, + controls: Array, + transition_func: Callable, + sigma_scaling_factor: float, + sigma_weights: Array, + dimensions: Dimensions, + labels: Labels, + estimation_options: EstimationOptions, + is_measurement_iteration: Array, + is_predict_iteration: Array, + iteration_to_period: Array, + observed_factors: Array, +) -> Array: """Log likelihood of a skill formation model. This function is jax-differentiable and jax-jittable as long as all but the first @@ -73,25 +117,29 @@ def log_likelihood_obs( with Jax. Args: - params (jax.numpy.array): 1d array with model parameters. - parsing_info (dict): Contains information how to parse parameter vector. - update_info (pandas.DataFrame): Contains information about number of updates in - each period and purpose of each update. - measurements (jax.numpy.array): Array of shape (n_updates, n_obs) with data on + params: 1d array with model parameters. + parsing_info: Contains information how to parse parameter vector. + measurements: Array of shape (n_updates, n_obs) with data on observed measurements. NaN if the measurement was not observed. - controls (jax.numpy.array): Array of shape (n_periods, n_obs, n_controls) + controls: Array of shape (n_periods, n_obs, n_controls) with observed control variables for the measurement equations. - transition_func (Callable): The transition function. - sigma_scaling_factor (float): A scaling factor that controls the spread of the + transition_func: The transition function. + sigma_scaling_factor: A scaling factor that controls the spread of the sigma points. Bigger means that sigma points are further apart. Depends on the sigma_point algorithm chosen. - sigma_weights (jax.numpy.array): 1d array of length n_sigma with non-negative + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - observed_factors (jax.numpy.array): Array of shape (n_periods, n_obs, + estimation_options: Options for estimation including clipping bounds. + is_measurement_iteration: Boolean array indicating which + iterations are measurement updates. + is_predict_iteration: Boolean array indicating which + iterations are predict steps. + iteration_to_period: Array mapping iteration index to period. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with data on the observed factors. Returns: @@ -99,12 +147,12 @@ def log_likelihood_obs( """ n_obs = measurements.shape[1] - states, upper_chols, log_mixture_weights, pardict = parse_params( - params, - parsing_info, - dimensions, - labels, - n_obs, + states, upper_chols, log_mixture_weights, parsed_params = parse_params( + params=params, + parsing_info=parsing_info, + dimensions=dimensions, + labels=labels, + n_obs=n_obs, ) carry = { @@ -115,9 +163,9 @@ def log_likelihood_obs( loop_args = { "period": iteration_to_period, - "loadings": pardict["loadings"], - "control_params": pardict["controls"], - "meas_sds": pardict["meas_sds"], + "loadings": parsed_params.loadings, + "control_params": parsed_params.controls, + "meas_sds": parsed_params.meas_sds, "measurements": measurements, "is_measurement_iteration": is_measurement_iteration, "is_predict_iteration": is_predict_iteration, @@ -126,7 +174,7 @@ def log_likelihood_obs( _body = functools.partial( _scan_body, controls=controls, - pardict=pardict, + parsed_params=parsed_params, sigma_scaling_factor=sigma_scaling_factor, sigma_weights=sigma_weights, transition_func=transition_func, @@ -139,23 +187,23 @@ def log_likelihood_obs( # possible. return soft_clipping( arr=static_out["loglikes"], - lower=estimation_options["clipping_lower_bound"], - upper=estimation_options["clipping_upper_bound"], - lower_hardness=estimation_options["clipping_lower_hardness"], - upper_hardness=estimation_options["clipping_upper_hardness"], + lower=estimation_options.clipping_lower_bound, + upper=estimation_options.clipping_upper_bound, + lower_hardness=estimation_options.clipping_lower_hardness, + upper_hardness=estimation_options.clipping_upper_hardness, ).sum(axis=0) def _scan_body( - carry, - loop_args, - controls, - pardict, - sigma_scaling_factor, - sigma_weights, - transition_func, - observed_factors, -): + carry: dict[str, Array], + loop_args: dict[str, Array], + controls: Array, + parsed_params: ParsedParams, + sigma_scaling_factor: float, + sigma_weights: Array, + transition_func: Callable, + observed_factors: Array, +) -> tuple[dict[str, Array], dict[str, Array]]: # ================================================================================== # create arguments needed for update # ================================================================================== @@ -193,12 +241,12 @@ def _scan_body( "upper_chols": upper_chols, "sigma_scaling_factor": sigma_scaling_factor, "sigma_weights": sigma_weights, - "trans_coeffs": {k: arr[t] for k, arr in pardict["transition"].items()}, - "shock_sds": pardict["shock_sds"][t], - "anchoring_scaling_factors": pardict["anchoring_scaling_factors"][ + "trans_coeffs": {k: arr[t] for k, arr in parsed_params.transition.items()}, + "shock_sds": parsed_params.shock_sds[t], + "anchoring_scaling_factors": parsed_params.anchoring_scaling_factors[ jnp.array([t, t + 1]) ], - "anchoring_constants": pardict["anchoring_constants"][jnp.array([t, t + 1])], + "anchoring_constants": parsed_params.anchoring_constants[jnp.array([t, t + 1])], "observed_factors": observed_factors[t], } @@ -224,28 +272,36 @@ def _scan_body( return new_state, static_out -def _one_arg_measurement_update(kwargs): - out = kalman_update(**kwargs) - return out +def _one_arg_measurement_update( + kwargs: dict[str, Array], +) -> tuple[Array, Array, Array, Array]: + return kalman_update(**kwargs) -def _one_arg_anchoring_update(kwargs): +def _one_arg_anchoring_update( + kwargs: dict[str, Array], +) -> tuple[Array, Array, Array, Array]: _, _, new_log_mixture_weights, new_loglikes = kalman_update(**kwargs) - out = ( + return ( kwargs["states"], kwargs["upper_chols"], new_log_mixture_weights, new_loglikes, ) - return out -def _one_arg_no_predict(kwargs, transition_func): # noqa: ARG001 +def _one_arg_no_predict( + kwargs: dict[str, Any], + transition_func: Callable, # noqa: ARG001 +) -> tuple[Array, Array, Array]: """Just return the states cond chols without any changes.""" return kwargs["states"], kwargs["upper_chols"], kwargs["states"] -def _one_arg_predict(kwargs, transition_func): +def _one_arg_predict( + kwargs: dict[str, Any], + transition_func: Callable, +) -> tuple[Array, Array, Array]: """Do a predict step but also return the input states as filtered states.""" new_states, new_upper_chols = kalman_predict( transition_func, diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index 6624050a..203d90f9 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -1,77 +1,83 @@ +"""Debug version of log-likelihood function that returns intermediate results.""" + import functools +from collections.abc import Callable +from typing import Any import jax import jax.numpy as jnp +from jax import Array from skillmodels.clipping import soft_clipping from skillmodels.kalman_filters import kalman_predict from skillmodels.kalman_filters_debug import kalman_update from skillmodels.parse_params import parse_params +from skillmodels.types import ( + Dimensions, + EstimationOptions, + Labels, + ParsedParams, + ParsingInfo, +) def log_likelihood( - params, - parsing_info, - measurements, - controls, - transition_func, - sigma_scaling_factor, - sigma_weights, - dimensions, - labels, - estimation_options, - is_measurement_iteration, - is_predict_iteration, - iteration_to_period, - observed_factors, -): + params: Array, + parsing_info: ParsingInfo, + measurements: Array, + controls: Array, + transition_func: Callable[..., Array], + sigma_scaling_factor: float, + sigma_weights: Array, + dimensions: Dimensions, + labels: Labels, + estimation_options: EstimationOptions, + is_measurement_iteration: Array, + is_predict_iteration: Array, + iteration_to_period: Array, + observed_factors: Array, +) -> dict[str, Any]: """Log likelihood of a skill formation model, returning debug data on top. This function is jax-differentiable and jax-jittable as long as all but the first argument are marked as static. - The function returns both a tuple (float, dict). The first entry is the aggregated - log likelihood value. The second additional information like the log likelihood - contribution of each individual. Note that the dict also contains the aggregated - value. Returning that value separately is only needed to calculate a gradient with - Jax. - Args: - params (jax.numpy.array): 1d array with model parameters. parsing_info (dict): - Contains information how to parse parameter vector. update_info - (pandas.DataFrame): Contains information about number of updates in - each period and purpose of each update. - measurements (jax.numpy.array): Array of shape (n_updates, n_obs) with data on - observed measurements. NaN if the measurement was not observed. - controls (jax.numpy.array): Array of shape (n_periods, n_obs, n_controls) - with observed control variables for the measurement equations. - transition_func (dict): Dict with the entries "func" (the actual transition - function) and "columns" (a dictionary mapping factors that are needed as - individual columns to positions in the factor array). - sigma_scaling_factor (float): A scaling factor that controls the spread of the - sigma points. Bigger means that sigma points are further apart. Depends on - the sigma_point algorithm chosen. - sigma_weights (jax.numpy.array): 1d array of length n_sigma with non-negative - sigma weights. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - observed_factors (jax.numpy.array): Array of shape (n_periods, n_obs, - n_observed_factors) with data on the observed factors. + params: 1d array with model parameters. + parsing_info: Contains information how to parse parameter vector. + measurements: Array of shape (n_updates, n_obs) with data on observed + measurements. NaN if the measurement was not observed. + controls: Array of shape (n_periods, n_obs, n_controls) with observed + control variables for the measurement equations. + transition_func: The transition function. + sigma_scaling_factor: A scaling factor that controls the spread of the + sigma points. Bigger means that sigma points are further apart. + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. + dimensions: Dimensional information like n_states, n_periods, n_controls, + n_mixtures. + labels: Labels for the model quantities like factors, periods, controls, + stagemap and stages. + estimation_options: Options for estimation including clipping bounds. + is_measurement_iteration: Boolean array indicating which iterations are + measurement updates. + is_predict_iteration: Boolean array indicating which iterations are predict + steps. + iteration_to_period: Array mapping iteration index to period. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with + data on the observed factors. Returns: - dict: All data relevant for debugging, e.g. the log likelihood contribution of - each Kalman update and additional information like the filtered states. + All data relevant for debugging, e.g. the log likelihood contribution of + each Kalman update and additional information like the filtered states. """ n_obs = measurements.shape[1] - states, upper_chols, log_mixture_weights, pardict = parse_params( - params, - parsing_info, - dimensions, - labels, - n_obs, + states, upper_chols, log_mixture_weights, parsed_params = parse_params( + params=params, + parsing_info=parsing_info, + dimensions=dimensions, + labels=labels, + n_obs=n_obs, ) carry = { @@ -82,9 +88,9 @@ def log_likelihood( loop_args = { "period": iteration_to_period, - "loadings": pardict["loadings"], - "control_params": pardict["controls"], - "meas_sds": pardict["meas_sds"], + "loadings": parsed_params.loadings, + "control_params": parsed_params.controls, + "meas_sds": parsed_params.meas_sds, "measurements": measurements, "is_measurement_iteration": is_measurement_iteration, "is_predict_iteration": is_predict_iteration, @@ -93,7 +99,7 @@ def log_likelihood( _body = functools.partial( _scan_body, controls=controls, - pardict=pardict, + parsed_params=parsed_params, sigma_scaling_factor=sigma_scaling_factor, sigma_weights=sigma_weights, transition_func=transition_func, @@ -106,10 +112,10 @@ def log_likelihood( # possible. clipped = soft_clipping( arr=static_out["loglikes"], - lower=estimation_options["clipping_lower_bound"], - upper=estimation_options["clipping_upper_bound"], - lower_hardness=estimation_options["clipping_lower_hardness"], - upper_hardness=estimation_options["clipping_upper_hardness"], + lower=estimation_options.clipping_lower_bound, + upper=estimation_options.clipping_upper_bound, + lower_hardness=estimation_options.clipping_lower_hardness, + upper_hardness=estimation_options.clipping_upper_hardness, ) value = clipped.sum() @@ -126,11 +132,11 @@ def log_likelihood( out["residual_sds"] = static_out["residual_sds"] initial_states, _, initial_log_mixture_weights, _ = parse_params( - params, - parsing_info, - dimensions, - labels, - n_obs, + params=params, + parsing_info=parsing_info, + dimensions=dimensions, + labels=labels, + n_obs=n_obs, ) out["initial_states"] = initial_states out["initial_log_mixture_weights"] = initial_log_mixture_weights @@ -142,15 +148,15 @@ def log_likelihood( def _scan_body( - carry, - loop_args, - controls, - pardict, - sigma_scaling_factor, - sigma_weights, - transition_func, - observed_factors, -): + carry: dict[str, Array], + loop_args: dict[str, Array], + controls: Array, + parsed_params: ParsedParams, + sigma_scaling_factor: float, + sigma_weights: Array, + transition_func: Callable[..., Array], + observed_factors: Array, +) -> tuple[dict[str, Array], dict[str, Any]]: # ================================================================================== # create arguments needed for update # ================================================================================== @@ -188,12 +194,12 @@ def _scan_body( "upper_chols": upper_chols, "sigma_scaling_factor": sigma_scaling_factor, "sigma_weights": sigma_weights, - "trans_coeffs": {k: arr[t] for k, arr in pardict["transition"].items()}, - "shock_sds": pardict["shock_sds"][t], - "anchoring_scaling_factors": pardict["anchoring_scaling_factors"][ + "trans_coeffs": {k: arr[t] for k, arr in parsed_params.transition.items()}, + "shock_sds": parsed_params.shock_sds[t], + "anchoring_scaling_factors": parsed_params.anchoring_scaling_factors[ jnp.array([t, t + 1]) ], - "anchoring_constants": pardict["anchoring_constants"][jnp.array([t, t + 1])], + "anchoring_constants": parsed_params.anchoring_constants[jnp.array([t, t + 1])], "observed_factors": observed_factors[t], } @@ -219,29 +225,37 @@ def _scan_body( return new_state, static_out -def _one_arg_measurement_update(kwargs): - out = kalman_update(**kwargs) - return out +def _one_arg_measurement_update( + kwargs: dict[str, Any], +) -> tuple[Array, Array, Array, Array, dict[str, Any]]: + return kalman_update(**kwargs) -def _one_arg_anchoring_update(kwargs): +def _one_arg_anchoring_update( + kwargs: dict[str, Any], +) -> tuple[Array, Array, Array, Array, dict[str, Any]]: _, _, new_log_mixture_weights, new_loglikes, debug_info = kalman_update(**kwargs) - out = ( + return ( kwargs["states"], kwargs["upper_chols"], new_log_mixture_weights, new_loglikes, debug_info, ) - return out -def _one_arg_no_predict(kwargs, transition_func): # noqa: ARG001 +def _one_arg_no_predict( + kwargs: dict[str, Any], + transition_func: Callable[..., Array], # noqa: ARG001 +) -> tuple[Array, Array, Array]: """Just return the states cond chols without any changes.""" return kwargs["states"], kwargs["upper_chols"], kwargs["states"] -def _one_arg_predict(kwargs, transition_func): +def _one_arg_predict( + kwargs: dict[str, Any], + transition_func: Callable[..., Array], +) -> tuple[Array, Array, Array]: """Do a predict step but also return the input states as filtered states.""" new_states, new_upper_chols = kalman_predict( transition_func, diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 333804bd..3d376cb2 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -1,9 +1,15 @@ +"""Functions to create inputs for optimization of the log-likelihood.""" + import functools +from collections.abc import Callable +from typing import Any import jax import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array +from numpy.typing import NDArray import skillmodels.likelihood_function as lf import skillmodels.likelihood_function_debug as lfd @@ -14,80 +20,80 @@ get_constraints_dicts, ) from skillmodels.kalman_filters import calculate_sigma_scaling_factor_and_weights +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info from skillmodels.process_data import process_data from skillmodels.process_debug_data import process_debug_data from skillmodels.process_model import process_model +from skillmodels.types import ParsingInfo, ProcessedModel jax.config.update("jax_enable_x64", True) # noqa: FBT003 -def get_maximization_inputs(model_dict, data, split_dataset=1): +def get_maximization_inputs( + model_spec: ModelSpec, + data: pd.DataFrame, + split_dataset: int = 1, +) -> dict[str, Any]: """Create inputs for optimagic's maximize function. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - data (DataFrame): dataset in long format. - split_dataset(Int): Controls into how many sclices to split the dataset + model_spec: The model specification. See: :ref:`model_specs` + data: Dataset in long format. + split_dataset: Controls into how many slices to split the dataset during the gradient computation. Returns a dictionary with keys: - loglike (function): A jax jitted function that takes an optimagic-style + loglike: A jax jitted function that takes an optimagic-style params dataframe as only input and returns a dict with entries: - "value": The scalar log likelihood - "contributions": An array with the log likelihood per observation - debug_loglike (function): Similar to loglike, with the following differences: + debug_loglike: Similar to loglike, with the following differences: - It is not jitted and thus faster on the first call and debuggable - It will add intermediate results as additional entries in the returned dictionary. Those can be used for debugging and plotting. - gradient (function): The gradient of the scalar log likelihood + gradient: The gradient of the scalar log likelihood function with respect to the parameters. - loglike_and_gradient (function): Combination of loglike and + loglike_and_gradient: Combination of loglike and loglike_gradient that is faster than calling the two functions separately. - constraints (list): List of optimagic constraints that are implied by the + constraints: List of optimagic constraints that are implied by the model specification. - params_template (pd.DataFrame): Parameter DataFrame with correct index and + params_template: Parameter DataFrame with correct index and bounds. The value column is empty except for the fixed constraints, which are set including the bounds. - data_aug (pd.DataFrame): DataFrame with augmented data. If model contains + data_aug: DataFrame with augmented data. If model contains endogenous factors, we double up the number of periods in order to add - - """ - model = process_model(model_dict) + processed_model = process_model(model_spec) p_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=processed_model.update_info, + labels=processed_model.labels, + dimensions=processed_model.dimensions, + transition_info=processed_model.transition_info, + endogenous_factors_info=processed_model.endogenous_factors_info, ) parsing_info = create_parsing_info( params_index=p_index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + update_info=processed_model.update_info, + labels=processed_model.labels, + anchoring=processed_model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, ) processed_data = process_data( df=data, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], - labels=model["labels"], - update_info=model["update_info"], - anchoring_info=model["anchoring"], + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, + labels=processed_model.labels, + update_info=processed_model.update_info, + anchoring_info=processed_model.anchoring, purpose="estimation", ) sigma_scaling_factor, sigma_weights = calculate_sigma_scaling_factor_and_weights( - model["dimensions"]["n_latent_factors"], - model["estimation_options"]["sigma_points_scale"], + n_states=processed_model.dimensions.n_latent_factors, + kappa=processed_model.estimation_options.sigma_points_scale, ) partialed_get_jnp_params_vec = functools.partial( @@ -107,7 +113,7 @@ def get_maximization_inputs(model_dict, data, split_dataset=1): measurements=processed_data["measurements"], controls=processed_data["controls"], observed_factors=processed_data["observed_factors"], - model=model, + model=processed_model, sigma_weights=sigma_weights, sigma_scaling_factor=sigma_scaling_factor, ) @@ -116,15 +122,17 @@ def get_maximization_inputs(model_dict, data, split_dataset=1): _jitted_loglikeobs = jax.jit(partialed_loglikes["llo"]) _gradient = jax.jit(jax.grad(partialed_loglikes["ll"])) - def loglike(params): + def loglike(params: pd.DataFrame) -> float: params_vec = partialed_get_jnp_params_vec(params) return float(_jitted_loglike(params_vec)) - def loglikeobs(params): + def loglikeobs(params: pd.DataFrame) -> NDArray[np.floating]: params_vec = partialed_get_jnp_params_vec(params) return _to_numpy(_jitted_loglikeobs(params_vec)) - def loglike_and_gradient(params): + def loglike_and_gradient( + params: pd.DataFrame, + ) -> tuple[float, NDArray[np.floating]]: params_vec = partialed_get_jnp_params_vec(params) crit = float(_jitted_loglike(params_vec)) n_obs = processed_data["measurements"].shape[1] @@ -150,20 +158,20 @@ def loglike_and_gradient(params): grad = _to_numpy(_grad) return crit, grad - def debug_loglike(params): + def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: params_vec = partialed_get_jnp_params_vec(params) jax_output = partialed_loglikes["debug_ll"](params_vec) tmp = _to_numpy(jax_output) tmp["value"] = float(tmp["value"]) - return process_debug_data(debug_data=tmp, model=model) + return process_debug_data(debug_data=tmp, model=processed_model) _constraints_dicts = get_constraints_dicts( - dimensions=model["dimensions"], - labels=model["labels"], - anchoring_info=model["anchoring"], - update_info=model["update_info"], - normalizations=model["normalizations"], - endogenous_factors_info=model["endogenous_factors_info"], + dimensions=processed_model.dimensions, + labels=processed_model.labels, + anchoring_info=processed_model.anchoring, + update_info=processed_model.update_info, + normalizations=processed_model.normalizations, + endogenous_factors_info=processed_model.endogenous_factors_info, ) constraints = constraints_dicts_to_om(_constraints_dicts) @@ -171,16 +179,15 @@ def debug_loglike(params): params_template = pd.DataFrame(columns=["value"], index=p_index) params_template = add_bounds( params=params_template, - bounds_distance=model["estimation_options"]["bounds_distance"], + bounds_distance=processed_model.estimation_options.bounds_distance, ) params_template = enforce_fixed_constraints( params_template=params_template, constraints_dicts=_constraints_dicts, ) - assert params_template.index.equals(p_index), ( - "params_template index is not equal to p_index" - ) - out = { + if not params_template.index.equals(p_index): + raise ValueError("params_template index is not equal to p_index") + return { "loglike": loglike, "loglikeobs": loglikeobs, "debug_loglike": debug_loglike, @@ -189,20 +196,18 @@ def debug_loglike(params): "params_template": params_template, } - return out - def _partial_some_log_likelihood( - fun, - parsing_info, - measurements, - controls, - observed_factors, - model, - sigma_weights, - sigma_scaling_factor, -): - update_info = model["update_info"] + fun: Callable, + parsing_info: ParsingInfo, + measurements: Array, + controls: Array, + observed_factors: Array, + model: ProcessedModel, + sigma_weights: Array, + sigma_scaling_factor: Array, +) -> Callable: + update_info = model.update_info is_measurement_iteration = (update_info["purpose"] == "measurement").to_numpy() _aug_periods = pd.Series( update_info.index.get_level_values("aug_period").to_numpy() @@ -215,24 +220,25 @@ def _partial_some_log_likelihood( # are endogenous factors, the last aug_period is found at index -2 (there should not # be measurements for endogenous factors in the "second half" of the last period). last_aug_period = ( - model["labels"]["aug_periods"][-2] - if parsing_info["has_endogenous_factors"] - else model["labels"]["aug_periods"][-1] + model.labels.aug_periods[-2] + if parsing_info.has_endogenous_factors + else model.labels.aug_periods[-1] ) iteration_to_period = _aug_periods.replace(last_aug_period, -1).to_numpy() - assert max(iteration_to_period) == last_aug_period - 1 + if max(iteration_to_period) != last_aug_period - 1: + raise ValueError("Unexpected iteration_to_period configuration") return functools.partial( fun, parsing_info=parsing_info, measurements=measurements, controls=controls, - transition_func=model["transition_info"]["func"], + transition_func=model.transition_info.func, sigma_scaling_factor=sigma_scaling_factor, sigma_weights=sigma_weights, - dimensions=model["dimensions"], - labels=model["labels"], - estimation_options=model["estimation_options"], + dimensions=model.dimensions, + labels=model.labels, + estimation_options=model.estimation_options, is_measurement_iteration=is_measurement_iteration, is_predict_iteration=is_predict_iteration, iteration_to_period=iteration_to_period, @@ -240,7 +246,7 @@ def _partial_some_log_likelihood( ) -def _to_numpy(obj): +def _to_numpy(obj: Any) -> Any: if isinstance(obj, dict): res = {} for key, value in obj.items(): @@ -257,7 +263,7 @@ def _to_numpy(obj): return res -def _get_jnp_params_vec(params, target_index): +def _get_jnp_params_vec(params: pd.DataFrame, target_index: pd.MultiIndex) -> Array: if set(params.index) != set(target_index): additional_entries = params.index.difference(target_index).tolist() missing_entries = target_index.difference(params.index).tolist() @@ -268,5 +274,4 @@ def _get_jnp_params_vec(params, target_index): msg += f"Your params have missing entries: {missing_entries}. " raise ValueError(msg) - vec = jnp.array(params.reindex(target_index)["value"].to_numpy()) - return vec + return jnp.array(params.reindex(target_index)["value"].to_numpy()) diff --git a/src/skillmodels/model_spec.py b/src/skillmodels/model_spec.py new file mode 100644 index 00000000..b0759cd8 --- /dev/null +++ b/src/skillmodels/model_spec.py @@ -0,0 +1,445 @@ +"""Strongly-typed model specification dataclasses. + +This module provides frozen dataclasses for defining model specifications +in a type-safe, immutable manner. All collections use immutable types +(tuples, MappingProxyType) to ensure the specification cannot be accidentally +modified. +""" + +from collections.abc import Callable, Mapping +from dataclasses import dataclass, field, replace +from types import MappingProxyType +from typing import Any, Self + +from skillmodels.types import _make_immutable, ensure_containers_are_immutable + + +@dataclass(frozen=True) +class Normalizations: + """Normalizations for factor identification. + + Attributes: + loadings: Per-period loading normalizations. Each element is a mapping + from variable name to fixed loading value. + intercepts: Per-period intercept normalizations. Each element is a mapping + from variable name to fixed intercept value. + + """ + + loadings: tuple[Mapping[str, float], ...] + intercepts: tuple[Mapping[str, float], ...] + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, + "loadings", + tuple(_make_immutable(dict(m)) for m in self.loadings), + ) + object.__setattr__( + self, + "intercepts", + tuple(_make_immutable(dict(m)) for m in self.intercepts), + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + return { + "loadings": [dict(x) for x in self.loadings], + "intercepts": [dict(x) for x in self.intercepts], + } + + +@dataclass(frozen=True) +class FactorSpec: + """Specification for a single latent factor. + + Attributes: + measurements: Per-period measurement variables. Each element is a tuple + of variable names measured in that period. + normalizations: Identification normalizations for this factor. + is_endogenous: Whether this factor is endogenous. + is_correction: Whether this factor is a correction factor. + transition_function: Optional transition function for this factor. + Can be a string (referencing built-in functions) or a callable. + + """ + + measurements: tuple[tuple[str, ...], ...] + normalizations: Normalizations | None = None + is_endogenous: bool = False + is_correction: bool = False + transition_function: str | Callable | None = None + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + result: dict = { + "measurements": [list(m) for m in self.measurements], + "is_endogenous": self.is_endogenous, + "is_correction": self.is_correction, + } + if self.normalizations is not None: + result["normalizations"] = self.normalizations.to_dict() + if self.transition_function is not None: + result["transition_function"] = self.transition_function + return result + + def with_transition_function(self, func: str | Callable) -> Self: + """Return a new FactorSpec with the given transition function.""" + return replace(self, transition_function=func) + + def with_normalizations(self, normalizations: Normalizations) -> Self: + """Return a new FactorSpec with the given normalizations.""" + return replace(self, normalizations=normalizations) + + +@dataclass(frozen=True) +class EstimationOptionsSpec: + """Options for model estimation. + + Attributes: + robust_bounds: Whether to use robust bounds. + bounds_distance: Distance for bounds. + n_mixtures: Number of mixture components. + sigma_points_scale: Scaling factor for sigma points in unscented transform. + clipping_lower_bound: Lower bound for soft clipping. + clipping_upper_bound: Upper bound for soft clipping (None for no upper bound). + clipping_lower_hardness: Hardness of lower clipping. + clipping_upper_hardness: Hardness of upper clipping. + + """ + + robust_bounds: bool = True + bounds_distance: float = 1e-3 + n_mixtures: int = 1 + sigma_points_scale: float = 2 + clipping_lower_bound: float = -1e30 + clipping_upper_bound: float | None = None + clipping_lower_hardness: float = 1 + clipping_upper_hardness: float = 1 + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + result = { + "robust_bounds": self.robust_bounds, + "bounds_distance": self.bounds_distance, + "n_mixtures": self.n_mixtures, + "sigma_points_scale": self.sigma_points_scale, + "clipping_lower_bound": self.clipping_lower_bound, + "clipping_lower_hardness": self.clipping_lower_hardness, + "clipping_upper_hardness": self.clipping_upper_hardness, + } + if self.clipping_upper_bound is not None: + result["clipping_upper_bound"] = self.clipping_upper_bound + return result + + +@dataclass(frozen=True) +class AnchoringSpec: + """Specification for anchoring latent factors to outcomes. + + Attributes: + outcomes: Mapping from factor names to outcome variable names. + free_controls: Whether control coefficients are free in anchoring equations. + free_constant: Whether the constant is free in anchoring equations. + free_loadings: Whether loadings are free in anchoring equations. + ignore_constant_when_anchoring: Whether to ignore constant when anchoring. + + """ + + outcomes: Mapping[str, str] = field(default_factory=dict) + free_controls: bool = False + free_constant: bool = False + free_loadings: bool = False + ignore_constant_when_anchoring: bool = False + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, "outcomes", ensure_containers_are_immutable(self.outcomes) + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + return { + "outcomes": dict(self.outcomes), + "free_controls": self.free_controls, + "free_constant": self.free_constant, + "free_loadings": self.free_loadings, + "ignore_constant_when_anchoring": self.ignore_constant_when_anchoring, + } + + +@dataclass(frozen=True, init=False) +class ModelSpec: + """Complete model specification. + + This is the main strongly-typed container for model specifications. + All fields are immutable to prevent accidental modifications. + + Attributes: + factors: Mapping from factor name to FactorSpec. + observed_factors: Tuple of observed factor variable names. + controls: Tuple of control variable names. + stagemap: Stage mapping for transition functions. + anchoring: Anchoring specification. + estimation_options: Estimation tuning parameters. + + """ + + _factors: MappingProxyType[str, FactorSpec] + observed_factors: tuple[str, ...] = () + controls: tuple[str, ...] = () + stagemap: tuple[int, ...] | None = None + anchoring: AnchoringSpec | None = None + estimation_options: EstimationOptionsSpec | None = None + + def __init__( + self, + factors: Mapping[str, FactorSpec], + observed_factors: tuple[str, ...] = (), + controls: tuple[str, ...] = (), + stagemap: tuple[int, ...] | None = None, + anchoring: AnchoringSpec | None = None, + estimation_options: EstimationOptionsSpec | None = None, + ) -> None: + """Create ModelSpec, wrapping factors dict in MappingProxyType.""" + object.__setattr__(self, "_factors", ensure_containers_are_immutable(factors)) + object.__setattr__(self, "observed_factors", observed_factors) + object.__setattr__(self, "controls", controls) + object.__setattr__(self, "stagemap", stagemap) + object.__setattr__(self, "anchoring", anchoring) + object.__setattr__(self, "estimation_options", estimation_options) + + @classmethod + def from_dict(cls, d: dict[str, Any]) -> Self: + """Create a ModelSpec from a dictionary (e.g. loaded from YAML). + + Args: + d: A dictionary with keys like "factors", "observed_factors", + "controls", "stagemap", "anchoring", "estimation_options". + + Returns: + A ModelSpec instance. + + """ + factors = {} + for name, spec in d["factors"].items(): + normalizations = None + if "normalizations" in spec: + nd = spec["normalizations"] + if "intercepts" not in nd: + n_periods = len(nd.get("loadings", [])) + nd["intercepts"] = [{} for _ in range(n_periods)] + normalizations = Normalizations( + loadings=tuple(nd["loadings"]), + intercepts=tuple(nd["intercepts"]), + ) + factors[name] = FactorSpec( + measurements=tuple(tuple(m) for m in spec["measurements"]), + normalizations=normalizations, + is_endogenous=spec.get("is_endogenous", False), + is_correction=spec.get("is_correction", False), + transition_function=spec.get("transition_function"), + ) + + anchoring = None + if "anchoring" in d: + ad = d["anchoring"] + anchoring = AnchoringSpec( + outcomes=ad.get("outcomes", {}), + free_controls=ad.get("free_controls", False), + free_constant=ad.get("free_constant", False), + free_loadings=ad.get("free_loadings", False), + ignore_constant_when_anchoring=ad.get( + "ignore_constant_when_anchoring", False + ), + ) + + estimation = None + if "estimation_options" in d: + ed = d["estimation_options"] + estimation = EstimationOptionsSpec( + robust_bounds=ed.get("robust_bounds", True), + bounds_distance=ed.get("bounds_distance", 1e-3), + n_mixtures=ed.get("n_mixtures", 1), + sigma_points_scale=ed.get("sigma_points_scale", 2), + clipping_lower_bound=ed.get("clipping_lower_bound", -1e30), + clipping_upper_bound=ed.get("clipping_upper_bound"), + clipping_lower_hardness=ed.get("clipping_lower_hardness", 1), + clipping_upper_hardness=ed.get("clipping_upper_hardness", 1), + ) + + stagemap = d.get("stagemap") + + return cls( + factors=factors, + observed_factors=tuple(d.get("observed_factors", [])), + controls=tuple(d.get("controls", [])), + stagemap=tuple(stagemap) if stagemap is not None else None, + anchoring=anchoring, + estimation_options=estimation, + ) + + @property + def factors(self) -> MappingProxyType[str, FactorSpec]: + """Immutable mapping of factor names to specifications.""" + return self._factors + + def _replace(self, **changes: Any) -> Self: + """Return a new ModelSpec with the specified fields replaced.""" + return type(self)( + factors=changes.get("factors", self.factors), + observed_factors=changes.get("observed_factors", self.observed_factors), + controls=changes.get("controls", self.controls), + stagemap=changes.get("stagemap", self.stagemap), + anchoring=changes.get("anchoring", self.anchoring), + estimation_options=changes.get( + "estimation_options", self.estimation_options + ), + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility with skillmodels. + + Returns: + Mutable dictionary in the format expected by skillmodels. + + """ + result: dict = { + "factors": {name: spec.to_dict() for name, spec in self.factors.items()}, + "observed_factors": list(self.observed_factors), + } + if self.controls: + result["controls"] = list(self.controls) + if self.stagemap is not None: + result["stagemap"] = list(self.stagemap) + if self.anchoring is not None: + result["anchoring"] = self.anchoring.to_dict() + if self.estimation_options is not None: + result["estimation_options"] = self.estimation_options.to_dict() + return result + + def with_transition_functions( + self, + transition_functions: dict[str, str | Callable], + ) -> Self: + """Return a new ModelSpec with transition functions added to factors. + + Args: + transition_functions: Mapping from factor name to transition function. + Can be strings (referencing built-in functions) or callables. + + Returns: + New ModelSpec with transition functions set on factors. + + Raises: + ValueError: If transition_functions keys don't match factor names. + + """ + if set(transition_functions.keys()) != set(self.factors.keys()): + msg = ( + f"Transition function keys {set(transition_functions.keys())} " + f"do not match factor keys {set(self.factors.keys())}" + ) + raise ValueError(msg) + + new_factors = { + name: spec.with_transition_function(transition_functions[name]) + for name, spec in self.factors.items() + } + return self._replace(factors=new_factors) + + def with_added_factor( + self, + name: str, + spec: FactorSpec, + ) -> Self: + """Return a new ModelSpec with an additional factor. + + Args: + name: Name of the new factor. + spec: Specification for the new factor. + + Returns: + New ModelSpec with the additional factor. + + """ + new_factors = dict(self.factors) + new_factors[name] = spec + return self._replace(factors=new_factors) + + def with_added_observed_factors( + self, + *names: str, + ) -> Self: + """Return a new ModelSpec with additional observed factors. + + Args: + *names: Names of additional observed factors. + + Returns: + New ModelSpec with the additional observed factors. + + """ + return self._replace( + observed_factors=self.observed_factors + names, + ) + + def with_estimation_options( + self, + estimation_options: EstimationOptionsSpec, + ) -> Self: + """Return a new ModelSpec with the given estimation options. + + Args: + estimation_options: New estimation options. + + Returns: + New ModelSpec with the updated estimation options. + + """ + return self._replace(estimation_options=estimation_options) + + def with_anchoring( + self, + anchoring: AnchoringSpec, + ) -> Self: + """Return a new ModelSpec with the given anchoring specification. + + Args: + anchoring: New anchoring specification. + + Returns: + New ModelSpec with the updated anchoring. + + """ + return self._replace(anchoring=anchoring) + + def with_controls( + self, + controls: tuple[str, ...], + ) -> Self: + """Return a new ModelSpec with the given controls. + + Args: + controls: New control variable names. + + Returns: + New ModelSpec with the updated controls. + + """ + return self._replace(controls=controls) + + def with_stagemap( + self, + stagemap: tuple[int, ...], + ) -> Self: + """Return a new ModelSpec with the given stagemap. + + Args: + stagemap: New stage mapping. + + Returns: + New ModelSpec with the updated stagemap. + + """ + return self._replace(stagemap=stagemap) diff --git a/src/skillmodels/params_index.py b/src/skillmodels/params_index.py index 586744f3..c3a19587 100644 --- a/src/skillmodels/params_index.py +++ b/src/skillmodels/params_index.py @@ -1,9 +1,22 @@ +"""Functions to construct the parameter index for model estimation.""" + import pandas as pd +from skillmodels.types import ( + Dimensions, + EndogenousFactorsInfo, + Labels, + TransitionInfo, +) + def get_params_index( - update_info, labels, dimensions, transition_info, endogenous_factors_info -): + update_info: pd.DataFrame, + labels: Labels, + dimensions: Dimensions, + transition_info: TransitionInfo, + endogenous_factors_info: EndogenousFactorsInfo, +) -> pd.MultiIndex: """Generate index for the params_df for optimagic. The index has four levels. The first is the parameter category. The second is the @@ -12,60 +25,59 @@ def get_params_index( it contains an empty string. Args: - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - options (dict): Tuning parameters for the estimation. - See :ref:`estimation_options`. - transition_info (dict): Information about the transition equations. - endogenous_factors_info (dict): Information about endogenous factors, if any. + labels: Labels for model quantities. + dimensions: Dimensional information. + transition_info: Information about the transition equations. + endogenous_factors_info: Information about endogenous factors, if any. Returns: params_index (pd.MultiIndex) """ ind_tups = get_control_params_index_tuples( - controls=labels["controls"], update_info=update_info + controls=labels.controls, update_info=update_info ) ind_tups += get_loadings_index_tuples( - factors=labels["latent_factors"], update_info=update_info + factors=labels.latent_factors, update_info=update_info ) ind_tups += get_meas_sds_index_tuples(update_info=update_info) ind_tups += get_shock_sds_index_tuples( - aug_periods=labels["aug_periods"], - factors=labels["latent_factors"], - has_endogenous_factors=endogenous_factors_info["has_endogenous_factors"], + aug_periods=labels.aug_periods, + factors=labels.latent_factors, + has_endogenous_factors=endogenous_factors_info.has_endogenous_factors, ) ind_tups += initial_mean_index_tuples( - n_mixtures=dimensions["n_mixtures"], - factors=labels["latent_factors"], + n_mixtures=dimensions.n_mixtures, + factors=labels.latent_factors, ) - ind_tups += get_mixture_weights_index_tuples(n_mixtures=dimensions["n_mixtures"]) + ind_tups += get_mixture_weights_index_tuples(n_mixtures=dimensions.n_mixtures) ind_tups += get_initial_cholcovs_index_tuples( - n_mixtures=dimensions["n_mixtures"], - factors=labels["latent_factors"], + n_mixtures=dimensions.n_mixtures, + factors=labels.latent_factors, ) ind_tups += get_transition_index_tuples( transition_info=transition_info, - aug_periods=labels["aug_periods"], - has_endogenous_factors=endogenous_factors_info["has_endogenous_factors"], + aug_periods=labels.aug_periods, + has_endogenous_factors=endogenous_factors_info.has_endogenous_factors, ) - index = pd.MultiIndex.from_tuples( + return pd.MultiIndex.from_tuples( ind_tups, names=["category", "aug_period", "name1", "name2"], ) - return index -def get_control_params_index_tuples(controls, update_info): +def get_control_params_index_tuples( + controls: tuple[str, ...], + update_info: pd.DataFrame, +) -> list[tuple[str, int, str, str]]: """Index tuples for control coeffs. Args: - controls (list): List of lists. There is one sublist per period which contains - the names of the control variables in that period. Constant not included. - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + controls: Names of the control variables. Constant not included. + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. """ @@ -76,19 +88,19 @@ def get_control_params_index_tuples(controls, update_info): return ind_tups -def get_loadings_index_tuples(factors, update_info): +def get_loadings_index_tuples( + factors: tuple[str, ...], + update_info: pd.DataFrame, +) -> list[tuple[str, int, str, str]]: """Index tuples for loading. Args: - factors (list): The latent factors of the model - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + factors: The latent factors of the model. + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - Returns: - ind_tups (list) - """ - mask = update_info[factors].to_numpy() + mask = update_info[list(factors)].to_numpy() ind_tups = [] for i, (aug_period, meas) in enumerate(update_info.index): for f, factor in enumerate(factors): @@ -97,16 +109,15 @@ def get_loadings_index_tuples(factors, update_info): return ind_tups -def get_meas_sds_index_tuples(update_info): +def get_meas_sds_index_tuples( + update_info: pd.DataFrame, +) -> list[tuple[str, int, str, str]]: """Index tuples for meas_sd. Args: - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - Returns: - ind_tups (list) - """ ind_tups = [] for aug_period, meas in update_info.index: @@ -114,15 +125,18 @@ def get_meas_sds_index_tuples(update_info): return ind_tups -def get_shock_sds_index_tuples(aug_periods, factors, has_endogenous_factors): +def get_shock_sds_index_tuples( + aug_periods: tuple[int, ...], + factors: tuple[str, ...], + *, + has_endogenous_factors: bool, +) -> list[tuple[str, int, str, str]]: """Index tuples for shock_sd. Args: - aug_periods (list): The augmented periods of the model. - factors (list): The latent factors of the model. - - Returns: - ind_tups (list) + aug_periods: The augmented periods of the model. + factors: The latent factors of the model. + has_endogenous_factors: Whether the model has endogenous factors. """ end = -2 if has_endogenous_factors else -1 @@ -133,15 +147,15 @@ def get_shock_sds_index_tuples(aug_periods, factors, has_endogenous_factors): return ind_tups -def initial_mean_index_tuples(n_mixtures, factors): +def initial_mean_index_tuples( + n_mixtures: int, + factors: tuple[str, ...], +) -> list[tuple[str, int, str, str]]: """Index tuples for initial_mean. Args: - n_mixtures (int): Number of elements in the mixture distribution of the factors. - factors (list): The latent factors of the model - - Returns: - ind_tups (list) + n_mixtures: Number of elements in the mixture distribution of the factors. + factors: The latent factors of the model. """ ind_tups = [] @@ -151,14 +165,13 @@ def initial_mean_index_tuples(n_mixtures, factors): return ind_tups -def get_mixture_weights_index_tuples(n_mixtures): +def get_mixture_weights_index_tuples( + n_mixtures: int, +) -> list[tuple[str, int, str, str]]: """Index tuples for mixture_weight. Args: - n_mixtures (int): Number of elements in the mixture distribution of the factors. - - Returns: - ind_tups (list) + n_mixtures: Number of elements in the mixture distribution of the factors. """ ind_tups = [] @@ -167,15 +180,15 @@ def get_mixture_weights_index_tuples(n_mixtures): return ind_tups -def get_initial_cholcovs_index_tuples(n_mixtures, factors): +def get_initial_cholcovs_index_tuples( + n_mixtures: int, + factors: tuple[str, ...], +) -> list[tuple[str, int, str, str]]: """Index tuples for initial_cov. Args: - n_mixtures (int): Number of elements in the mixture distribution of the factors. - factors (list): The latent factors of the model - - Returns: - ind_tups (list) + n_mixtures: Number of elements in the mixture distribution of the factors. + factors: The latent factors of the model. """ ind_tups = [] @@ -194,23 +207,23 @@ def get_initial_cholcovs_index_tuples(n_mixtures, factors): return ind_tups -def get_transition_index_tuples(transition_info, aug_periods, has_endogenous_factors): +def get_transition_index_tuples( + transition_info: TransitionInfo, + aug_periods: tuple[int, ...], + *, + has_endogenous_factors: bool, +) -> list[tuple[str, int, str, str]]: """Index tuples for transition equation coefficients. Args: - latent_factors (list): The latent factors of the model - all_factors (list): The latent and observed factors of the model. - aug_periods (list): The augmented periods of the model - transition_names (list): name of the transition equation of each factor - has_endogenous_factors (bool): Whether the model has endogenous factors. - - Returns: - ind_tups (list) + transition_info: Information about transition equations. + aug_periods: The augmented periods of the model. + has_endogenous_factors: Whether the model has endogenous factors. """ end = -2 if has_endogenous_factors else -1 ind_tups = [] - for factor, names in transition_info["param_names"].items(): + for factor, names in transition_info.param_names.items(): for aug_period in aug_periods[:end]: for name in names: ind_tups.append(("transition", aug_period, factor, name)) diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index 2515be2d..06b4c3ef 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -1,89 +1,116 @@ +"""Functions to parse parameter vectors into structured dictionaries.""" + import warnings import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array + +from skillmodels.types import ( + Anchoring, + Dimensions, + Labels, + LoadingsParsingInfo, + ParsedParams, + ParsingInfo, +) def create_parsing_info( - params_index, update_info, labels, anchoring, has_endogenous_factors -): - """Create a dictionary with information how the parameter vector has to be parsed. + params_index: pd.MultiIndex, + update_info: pd.DataFrame, + labels: Labels, + anchoring: Anchoring, + *, + has_endogenous_factors: bool, +) -> ParsingInfo: + """Create a dataclass with information how the parameter vector has to be parsed. Args: - params_index (pandas.MultiIndex): It has the levels ["category", "aug_period", + params_index: It has the levels ["category", "aug_period", "name1", "name2"] - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - labels (dict): Dict of lists with labels for the model quantities like + labels: Labels dataclass with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring (dict): Dictionary with anchoring settings. - has_endogenous_factors (bool): Whether the model includes endogenous factors. + anchoring: Anchoring dataclass with anchoring settings. + has_endogenous_factors: Whether the model includes endogenous factors. Returns: - dict: dictionary that maps model quantities to positions or slices of the + ParsingInfo dataclass that maps model quantities to positions or slices of the parameter vector. """ range_sr = pd.Series(data=np.arange(len(params_index)), index=params_index) - parsing_info = {} - - simple_ones = [ - "initial_states", - "initial_cholcovs", - "mixture_weights", - "controls", - "meas_sds", - "shock_sds", - ] - - for quantity in simple_ones: - parsing_info[quantity] = _get_positional_selector_from_loc(range_sr, quantity) + # Simple quantities + initial_states = _get_positional_selector_from_loc( + range_sr=range_sr, loc="initial_states" + ) + initial_cholcovs = _get_positional_selector_from_loc( + range_sr=range_sr, loc="initial_cholcovs" + ) + mixture_weights = _get_positional_selector_from_loc( + range_sr=range_sr, loc="mixture_weights" + ) + controls = _get_positional_selector_from_loc(range_sr=range_sr, loc="controls") + meas_sds = _get_positional_selector_from_loc(range_sr=range_sr, loc="meas_sds") + shock_sds = _get_positional_selector_from_loc(range_sr=range_sr, loc="shock_sds") # loadings: - mask = update_info[labels["latent_factors"]].to_numpy() + mask = update_info[list(labels.latent_factors)].to_numpy() helper = np.arange(mask.size).reshape(mask.shape) flat_indices = helper[mask] - parsing_info["loadings"] = { - "slice": _get_positional_selector_from_loc(range_sr, "loadings"), - "flat_indices": jnp.array(flat_indices), - "shape": mask.shape, - "size": mask.size, - } - - # "trans_coeffs" - pos_dict = {} - for factor in labels["latent_factors"]: - helper = pd.DataFrame(index=params_index) - loc = helper.query(f"category == 'transition' & name1 == '{factor}'").index - pos_dict[factor] = _get_positional_selector_from_loc(range_sr, loc) + loadings = LoadingsParsingInfo( + slice=_get_positional_selector_from_loc(range_sr=range_sr, loc="loadings"), + flat_indices=jnp.array(flat_indices), + shape=mask.shape, + size=mask.size, + ) - parsing_info["transition"] = pos_dict + # transition coefficients + transition: dict[str, Array | slice] = {} + for factor in list(labels.latent_factors): + helper_df = pd.DataFrame(index=params_index) + loc = helper_df.query(f"category == 'transition' & name1 == '{factor}'").index + transition[factor] = _get_positional_selector_from_loc( + range_sr=range_sr, loc=loc + ) # anchoring_scaling_factors - is_free_loading = update_info[labels["latent_factors"]].to_numpy() + is_free_loading = update_info[list(labels.latent_factors)].to_numpy() is_anchoring = (update_info["purpose"] == "anchoring").to_numpy().reshape(-1, 1) is_anchoring_loading = jnp.array(is_free_loading & is_anchoring) - parsing_info["is_anchoring_loading"] = is_anchoring_loading - parsing_info["is_anchored_factor"] = jnp.array( - update_info.query("purpose == 'anchoring'")[labels["latent_factors"]].any( + is_anchored_factor = jnp.array( + update_info.query("purpose == 'anchoring'")[list(labels.latent_factors)].any( axis=0, ), ) - parsing_info["is_anchoring_update"] = is_anchoring.flatten() - parsing_info["ignore_constant_when_anchoring"] = anchoring[ - "ignore_constant_when_anchoring" - ] - - # Add has_endogenous_factors to parsing_info - parsing_info["has_endogenous_factors"] = has_endogenous_factors - - return parsing_info + is_anchoring_update = jnp.array(is_anchoring.flatten()) + + return ParsingInfo( + initial_states=initial_states, + initial_cholcovs=initial_cholcovs, + mixture_weights=mixture_weights, + controls=controls, + meas_sds=meas_sds, + shock_sds=shock_sds, + loadings=loadings, + transition=transition, + is_anchoring_loading=is_anchoring_loading, + is_anchored_factor=is_anchored_factor, + is_anchoring_update=is_anchoring_update, + ignore_constant_when_anchoring=anchoring.ignore_constant_when_anchoring, + has_endogenous_factors=has_endogenous_factors, + ) -def _get_positional_selector_from_loc(range_sr, loc): +def _get_positional_selector_from_loc( + range_sr: pd.Series, + loc: str | pd.MultiIndex | pd.Index, +) -> Array | slice: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", @@ -98,176 +125,227 @@ def _get_positional_selector_from_loc(range_sr, loc): return ilocs -def parse_params(params, parsing_info, dimensions, labels, n_obs): +def parse_params( + params: Array, + parsing_info: ParsingInfo, + dimensions: Dimensions, + labels: Labels, + n_obs: int, +) -> tuple[Array, Array, Array, ParsedParams]: """Parse params into the quantities that depend on it. Args: - params (jax.numpy.array): 1d array with model parameters. - parsing_info (dict): Dictionary with information on how the parameters + params: 1d array with model parameters. + parsing_info: ParsingInfo dataclass with information on how the parameters have to be parsed. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - n_obs (int): Number of observations. + labels: Labels dataclass with labels for the model quantities like + factors, periods, controls, stagemap and stages. See :ref:`labels` + n_obs: Number of observations. Returns: - jax.numpy.array: Array of shape (n_obs, n_mixtures, n_states) with initial - state estimates. - jax.numpy.array: Array of shape (n_obs, n_mixtures, n_states, n_states) with the - transpose of the lower triangular cholesky factors of the initial covariance - matrices. - jax.numpy.array: Array of shape (n_obs, n_mixtures) with the log of the initial - weight for each element in the finite mixture of normals. - dict: Dictionary with other parameters. It has the following key-value pairs: - - "control_params": - - "loadings": - - "meas_sds": - - "shock_sds": - - "trans_params": - - "anchoring_scaling_factors": - - "anchoring_constants": + Tuple of: + - Array of shape (n_obs, n_mixtures, n_states) with initial state estimates. + - Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose + of the lower triangular cholesky factors of the initial covariance + matrices. + - Array of shape (n_obs, n_mixtures) with the log of the initial weight for + each element in the finite mixture of normals. + - ParsedParams dataclass with other model parameters. """ - states = _get_initial_states(params, parsing_info, dimensions, n_obs) - upper_chols = _get_initial_upper_chols(params, parsing_info, dimensions, n_obs) - log_weights = _get_initial_log_mixture_weights(params, parsing_info, n_obs) - pardict = { - "controls": _get_control_params(params, parsing_info, dimensions), - "loadings": _get_loadings(params, parsing_info), - "meas_sds": _get_meas_sds(params, parsing_info), - "shock_sds": _get_shock_sds(params, parsing_info, dimensions), - "transition": _get_transition_params(params, parsing_info, labels), - } - - pardict["anchoring_scaling_factors"] = _get_anchoring_scaling_factors( - pardict["loadings"], - parsing_info, - dimensions, + states = _get_initial_states( + params=params, info=parsing_info, dimensions=dimensions, n_obs=n_obs + ) + upper_chols = _get_initial_upper_chols( + params=params, info=parsing_info, dimensions=dimensions, n_obs=n_obs + ) + log_weights = _get_initial_log_mixture_weights( + params=params, info=parsing_info, n_obs=n_obs ) - pardict["anchoring_constants"] = _get_anchoring_constants( - pardict["controls"], - parsing_info, - dimensions, + controls = _get_control_params( + params=params, info=parsing_info, dimensions=dimensions + ) + loadings = _get_loadings(params=params, info=parsing_info) + meas_sds = _get_meas_sds(params=params, info=parsing_info) + shock_sds = _get_shock_sds(params=params, info=parsing_info, dimensions=dimensions) + transition = _get_transition_params(params=params, info=parsing_info, labels=labels) + + anchoring_scaling_factors = _get_anchoring_scaling_factors( + loadings=loadings, + info=parsing_info, + dimensions=dimensions, ) - return states, upper_chols, log_weights, pardict + anchoring_constants = _get_anchoring_constants( + controls=controls, + info=parsing_info, + dimensions=dimensions, + ) + + parsed = ParsedParams( + controls=controls, + loadings=loadings, + meas_sds=meas_sds, + shock_sds=shock_sds, + transition=transition, + anchoring_scaling_factors=anchoring_scaling_factors, + anchoring_constants=anchoring_constants, + ) + + return states, upper_chols, log_weights, parsed -def _get_initial_states(params, info, dimensions, n_obs): +def _get_initial_states( + params: Array, + info: ParsingInfo, + dimensions: Dimensions, + n_obs: int, +) -> Array: """Create the array of initial states.""" - state = params[info["initial_states"]].reshape( + state = params[info.initial_states].reshape( 1, - dimensions["n_mixtures"], - dimensions["n_latent_factors"], + dimensions.n_mixtures, + dimensions.n_latent_factors, ) return jnp.repeat(state, n_obs, axis=0) -def _get_initial_upper_chols(params, info, dimensions, n_obs): +def _get_initial_upper_chols( + params: Array, + info: ParsingInfo, + dimensions: Dimensions, + n_obs: int, +) -> Array: """Create the array with cholesky factors of the initial states covariance matrix. Note: The matrices contain the transpose of the lower triangular cholesky factors. """ - n_states, n_mixtures = dimensions["n_latent_factors"], dimensions["n_mixtures"] - chol_params = params[info["initial_cholcovs"]].reshape(n_mixtures, -1) + n_states, n_mixtures = dimensions.n_latent_factors, dimensions.n_mixtures + chol_params = params[info.initial_cholcovs].reshape(n_mixtures, -1) upper_chols = jnp.zeros((n_obs, n_mixtures, n_states, n_states)) for i in range(n_mixtures): filler = jnp.zeros((n_states, n_states)) - filler = filler.at[jnp.tril_indices(n_states)].set(chol_params[i]) - upper_chols = upper_chols.at[:, i].set(filler.T) + filler = filler.at[jnp.tril_indices(n_states)].set(chol_params[i]) # noqa: PD008 + upper_chols = upper_chols.at[:, i].set(filler.T) # noqa: PD008 return upper_chols -def _get_initial_log_mixture_weights(params, info, n_obs): +def _get_initial_log_mixture_weights( + params: Array, + info: ParsingInfo, + n_obs: int, +) -> Array: """Create the array with the log of initial mixture weights.""" - log_weights = jnp.log(params[info["mixture_weights"]]).reshape(1, -1) + log_weights = jnp.log(params[info.mixture_weights]).reshape(1, -1) return jnp.repeat(log_weights, n_obs, axis=0) -def _get_control_params(params, info, dimensions): +def _get_control_params( + params: Array, + info: ParsingInfo, + dimensions: Dimensions, +) -> Array: """Create the parameters for control variables in measurement equations.""" - return params[info["controls"]].reshape(-1, dimensions["n_controls"]) + return params[info.controls].reshape(-1, dimensions.n_controls) -def _get_loadings(params, info): +def _get_loadings( + params: Array, + info: ParsingInfo, +) -> Array: """Create the array of factor loadings.""" - info = info["loadings"] - free = params[info["slice"]] - extended = jnp.zeros(info["size"]).at[info["flat_indices"]].set(free) - out = extended.reshape(info["shape"]) - return out + loadings_info = info.loadings + free = params[loadings_info.slice] + extended = jnp.zeros(loadings_info.size).at[loadings_info.flat_indices].set(free) # noqa: PD008 + return extended.reshape(loadings_info.shape) -def _get_meas_sds(params, info): +def _get_meas_sds( + params: Array, + info: ParsingInfo, +) -> Array: """Create the array of standard deviations of the measurement errors.""" - return params[info["meas_sds"]] + return params[info.meas_sds] -def _get_shock_sds(params, info, dimensions): +def _get_shock_sds( + params: Array, + info: ParsingInfo, + dimensions: Dimensions, +) -> Array: """Create the array of standard deviations of the shocks in transition functions.""" - return params[info["shock_sds"]].reshape(-1, dimensions["n_latent_factors"]) + return params[info.shock_sds].reshape(-1, dimensions.n_latent_factors) -def _get_transition_params(params, info, labels): +def _get_transition_params( + params: Array, + info: ParsingInfo, + labels: Labels, +) -> dict[str, Array]: """Create a list of arrays with transition equation parameters.""" trans_params = {} - t_info = info["transition"] - n_aug_periods = len(labels["aug_periods"]) + n_aug_periods = len(labels.aug_periods) - # Use has_endogenous_factors from parsing_info instead of undefined global - len_reduction = 2 if info["has_endogenous_factors"] else 1 + len_reduction = 2 if info.has_endogenous_factors else 1 - for factor in labels["latent_factors"]: - ilocs = t_info[factor] + for factor in list(labels.latent_factors): + ilocs = info.transition[factor] trans_params[factor] = params[ilocs].reshape(n_aug_periods - len_reduction, -1) return trans_params -def _get_anchoring_scaling_factors(loadings, info, dimensions): +def _get_anchoring_scaling_factors( + loadings: Array, + info: ParsingInfo, + dimensions: Dimensions, +) -> Array: """Create an array of anchoring scaling factors. Note: Parameters are not taken from the parameter vector but from the loadings. """ scaling_factors = jnp.ones( - (dimensions["n_aug_periods"], dimensions["n_latent_factors"]), + (dimensions.n_aug_periods, dimensions.n_latent_factors), ) - free_anchoring_loadings = loadings[info["is_anchoring_loading"]].reshape( - dimensions["n_aug_periods"], + free_anchoring_loadings = loadings[info.is_anchoring_loading].reshape( + dimensions.n_aug_periods, -1, ) - scaling_factors = scaling_factors.at[:, info["is_anchored_factor"]].set( + scaling_factors = scaling_factors.at[:, info.is_anchored_factor].set( # noqa: PD008 free_anchoring_loadings, ) scaling_for_observed = jnp.ones( - (dimensions["n_aug_periods"], dimensions["n_observed_factors"]), + (dimensions.n_aug_periods, dimensions.n_observed_factors), ) - scaling_factors = jnp.hstack([scaling_factors, scaling_for_observed]) + return jnp.hstack([scaling_factors, scaling_for_observed]) - return scaling_factors - -def _get_anchoring_constants(controls, info, dimensions): +def _get_anchoring_constants( + controls: Array, + info: ParsingInfo, + dimensions: Dimensions, +) -> Array: """Create an array of anchoring constants. Note: Parameters are not taken from the parameter vector but from the controls. """ - constants = jnp.zeros((dimensions["n_aug_periods"], dimensions["n_latent_factors"])) - if not info["ignore_constant_when_anchoring"]: - values = controls[:, 0][info["is_anchoring_update"]].reshape( - dimensions["n_aug_periods"], + constants = jnp.zeros((dimensions.n_aug_periods, dimensions.n_latent_factors)) + if not info.ignore_constant_when_anchoring: + values = controls[:, 0][info.is_anchoring_update].reshape( + dimensions.n_aug_periods, -1, ) - constants = constants.at[:, info["is_anchored_factor"]].set(values) + constants = constants.at[:, info.is_anchored_factor].set(values) # noqa: PD008 constants_for_observed = jnp.zeros( - (dimensions["n_aug_periods"], dimensions["n_observed_factors"]), + (dimensions.n_aug_periods, dimensions.n_observed_factors), ) - constants = jnp.hstack([constants, constants_for_observed]) - - return constants + return jnp.hstack([constants, constants_for_observed]) diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index 41dc0b32..72f731fe 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -1,75 +1,93 @@ +"""Functions to process and prepare data for model estimation.""" + import warnings -from typing import Any +from typing import Any, Literal import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array + +from skillmodels.types import Anchoring, Labels def process_data( - df, - has_endogenous_factors, - labels, - update_info, - anchoring_info, - purpose="estimation", -): + df: pd.DataFrame, + *, + has_endogenous_factors: bool, + labels: Labels, + update_info: pd.DataFrame, + anchoring_info: Anchoring, + purpose: Literal["estimation", "anything", "simulation"] = "estimation", +) -> dict[str, Any]: """Process the data for estimation. Args: - df (DataFrame): panel dataset in long format. It has a MultiIndex + df: panel dataset in long format. It has a MultiIndex where the first level indicates the period and the second the individual. - has_endogenous_factors (bool): - labels (dict): Dict of lists with labels for the model quantities like + has_endogenous_factors: Whether the model includes endogenous factors. + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - anchoring_qinfo (dict): Information about anchoring. See :ref:`anchoring` - purpose (Literal["estimation", "anything"]): Whether the data is used for + anchoring_info: Information about anchoring. See :ref:`anchoring` + purpose: Whether the data is used for estimation (default, includes measurement data) or not. Returns a dictionary with keys: - measurements (jax.numpy.array): Array of shape (n_updates, n_obs) with data on + measurements: Array of shape (n_updates, n_obs) with data on observed measurements. NaN if the measurement was not observed. Only returned if estimation==True - controls (jax.numpy.array): Array of shape (n_periods, n_obs, n_controls) with + controls: Array of shape (n_periods, n_obs, n_controls) with observed control variables for the measurement equations. - observed_factors (jax.numpy.array): Array of shape + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with data on the observed factors. Only returned if estimation==True """ - df = pre_process_data(df, labels["periods"]) + df = pre_process_data(df=df, periods=labels.periods) df["constant"] = 1 out = {} - df = _add_copies_of_anchoring_outcome(df, anchoring_info) + df = _add_copies_of_anchoring_outcome(df=df, anchoring_info=anchoring_info) if has_endogenous_factors: - df = _augment_data_for_endogenous_factors(df, labels, update_info) + df = _augment_data_for_endogenous_factors( + df=df, labels=labels, update_info=update_info + ) else: df.index = df.index.set_names(["id", "aug_period"]) - _check_data(df, update_info, labels, purpose=purpose) - n_obs = int(len(df) / len(labels["aug_periods"])) - df = _handle_controls_with_missings(df, labels["controls"], update_info) - out["controls"] = _generate_controls_array(df, labels, n_obs) - out["observed_factors"] = _generate_observed_factor_array(df, labels, n_obs) + _check_data(df=df, update_info=update_info, labels=labels, purpose=purpose) + n_obs = int(len(df) / len(labels.aug_periods)) + df = _handle_controls_with_missings( + df=df, controls=labels.controls, update_info=update_info + ) + out["controls"] = _generate_controls_array(df=df, labels=labels, n_obs=n_obs) + out["observed_factors"] = _generate_observed_factor_array( + df=df, labels=labels, n_obs=n_obs + ) if purpose == "estimation": - out["measurements"] = _generate_measurements_array(df, update_info, n_obs) + out["measurements"] = _generate_measurements_array( + df=df, update_info=update_info, n_obs=n_obs + ) return out -def pre_process_data(df, periods): +def pre_process_data( + df: pd.DataFrame, + periods: tuple[int, ...] | list[int], +) -> pd.DataFrame: """Balance panel data in long format, drop unnecessary periods and set index. Args: - df (DataFrame): panel dataset in long format. It has a MultiIndex + df: panel dataset in long format. It has a MultiIndex where the first level indicates the period and the second the individual. + periods: The periods to keep in the balanced panel. Returns: - balanced (DataFrame): balanced panel. It has a MultiIndex. The first + balanced: balanced panel. It has a MultiIndex. The first enumerates individuals. The second level counts periods, starting at 0. """ @@ -80,28 +98,27 @@ def pre_process_data(df, periods): # replace existing codes for periods and df.index.names = ["id", "period"] for level in [0, 1]: - df.index = df.index.set_levels(range(len(df.index.levels[level])), level=level) + # df.index is a MultiIndex but typed as Index + df.index = df.index.set_levels(range(len(df.index.levels[level])), level=level) # ty: ignore[unresolved-attribute] # create new index ids = sorted(df.index.get_level_values("id").unique()) new_index = pd.MultiIndex.from_product([ids, periods], names=["id", "period"]) # set new index - df = df.reindex(new_index) - - return df + return df.reindex(new_index) def _get_period_data_for_endogenous_factors( aug_period: int, period: int, df: pd.DataFrame, - labels: dict[str, Any], + labels: Labels, update_info: pd.DataFrame, ) -> pd.DataFrame: - meas = _get_period_measurements(update_info, aug_period) - controls = labels["controls"] - observed = labels["observed_factors"] + meas = _get_period_measurements(update_info=update_info, aug_period=aug_period) + controls = labels.controls + observed = labels.observed_factors out = df.query(f"period == {period}")[ [ @@ -120,9 +137,9 @@ def _get_period_data_for_endogenous_factors( def _augment_data_for_endogenous_factors( df: pd.DataFrame, - labels: dict[str, Any], + labels: Labels, update_info: pd.DataFrame, -): +) -> pd.DataFrame: """Make room for endogenous factors by doubling up the periods. Endogeneity means that current states influence the factor. Typically, this comes @@ -134,8 +151,10 @@ def _augment_data_for_endogenous_factors( # Make sure datset is balanced n_ids = df["id"].nunique() n_periods = df["period"].nunique() - assert n_ids * n_periods == df.shape[0] - assert set(df["period"]) == set(labels["aug_periods_to_periods"].values()) + if n_ids * n_periods != df.shape[0]: + raise ValueError("Dataset is not balanced: n_ids * n_periods != n_rows") + if set(df["period"]) != set(labels.aug_periods_to_periods.values()): + raise ValueError("Periods in data don't match expected periods") out = pd.concat( [ @@ -146,30 +165,40 @@ def _augment_data_for_endogenous_factors( update_info=update_info, labels=labels, ) - for aug_period, period in labels["aug_periods_to_periods"].items() + for aug_period, period in labels.aug_periods_to_periods.items() ] ) return out.set_index(["id", "aug_period"]).sort_index() -def _add_copies_of_anchoring_outcome(df, anchoring_info): +def _add_copies_of_anchoring_outcome( + df: pd.DataFrame, + anchoring_info: Anchoring, +) -> pd.DataFrame: df = df.copy() - for factor in anchoring_info["factors"]: - outcome = anchoring_info["outcomes"][factor] + for factor in anchoring_info.factors: + outcome = anchoring_info.outcomes[factor] df[f"{outcome}_{factor}"] = df[outcome] return df -def _check_data(df, update_info, labels, purpose): # noqa: C901 +def _check_data( # noqa: C901 + df: pd.DataFrame, + update_info: pd.DataFrame, + labels: Labels, + purpose: Literal["estimation", "anything", "simulation"], +) -> None: var_report = pd.DataFrame(index=update_info.index[:0], columns=["problem"]) - for aug_period in labels["aug_periods"]: + for aug_period in labels.aug_periods: period_data = df.query(f"aug_period == {aug_period}") - for cont in labels["controls"]: + for cont in labels.controls: if cont not in period_data.columns or period_data[cont].isna().all(): var_report.loc[(aug_period, cont), "problem"] = "Variable is missing" if purpose == "estimation": - for meas in _get_period_measurements(update_info, aug_period): + for meas in _get_period_measurements( + update_info=update_info, aug_period=aug_period + ): if meas not in period_data.columns: var_report.loc[(aug_period, meas), "problem"] = ( "Variable is missing" @@ -179,7 +208,7 @@ def _check_data(df, update_info, labels, purpose): # noqa: C901 "Variable has no variance" ) - for factor in labels["observed_factors"]: + for factor in labels.observed_factors: if factor not in period_data.columns: var_report.loc[(aug_period, factor), "problem"] = "Variable is missing" elif period_data[factor].isna().any(): @@ -193,13 +222,19 @@ def _check_data(df, update_info, labels, purpose): # noqa: C901 raise ValueError(var_report) -def _handle_controls_with_missings(df, controls, update_info): +def _handle_controls_with_missings( + df: pd.DataFrame, + controls: tuple[str, ...], + update_info: pd.DataFrame, +) -> pd.DataFrame: aug_periods = update_info.index.get_level_values(0).unique().tolist() problematic_index = df.index[:0] for aug_period in aug_periods: period_data = df.query(f"aug_period == {aug_period}") - control_data = period_data[controls] - meas_data = period_data[_get_period_measurements(update_info, aug_period)] + control_data = period_data[list(controls)] + meas_data = period_data[ + _get_period_measurements(update_info=update_info, aug_period=aug_period) + ] problem = control_data.isna().any(axis=1) & meas_data.notna().any(axis=1) problematic_index = problematic_index.union(period_data[problem].index) @@ -207,12 +242,15 @@ def _handle_controls_with_missings(df, controls, update_info): old_names = df.loc[problematic_index][["__old_id__", "__old_period__"]] msg = "Set measurements to NaN because there are NaNs in the controls for:\n{}" msg = msg.format(list(map(tuple, old_names.to_numpy().tolist()))) - warnings.warn(msg) + warnings.warn(msg, stacklevel=2) df.loc[problematic_index] = np.nan return df -def _get_period_measurements(update_info, aug_period): +def _get_period_measurements( + update_info: pd.DataFrame, + aug_period: int, +) -> list[str]: if aug_period in update_info.index: measurements = list(update_info.loc[aug_period].index) else: @@ -220,26 +258,38 @@ def _get_period_measurements(update_info, aug_period): return measurements -def _generate_measurements_array(df, update_info, n_obs): +def _generate_measurements_array( + df: pd.DataFrame, + update_info: pd.DataFrame, + n_obs: int, +) -> Array: arr = np.zeros((len(update_info), n_obs)) for k, (aug_period, var) in enumerate(update_info.index): arr[k] = df.query(f"aug_period == {aug_period}")[var].to_numpy() return jnp.array(arr, dtype="float32") -def _generate_controls_array(df, labels, n_obs): - arr = np.zeros((len(labels["aug_periods"]), n_obs, len(labels["controls"]))) - for aug_period in labels["aug_periods"]: +def _generate_controls_array( + df: pd.DataFrame, + labels: Labels, + n_obs: int, +) -> Array: + arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.controls))) + for aug_period in labels.aug_periods: arr[aug_period] = df.query(f"aug_period == {aug_period}")[ - labels["controls"] + list(labels.controls) ].to_numpy() return jnp.array(arr, dtype="float32") -def _generate_observed_factor_array(df, labels, n_obs): - arr = np.zeros((len(labels["aug_periods"]), n_obs, len(labels["observed_factors"]))) - for aug_period in labels["aug_periods"]: +def _generate_observed_factor_array( + df: pd.DataFrame, + labels: Labels, + n_obs: int, +) -> Array: + arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.observed_factors))) + for aug_period in labels.aug_periods: arr[aug_period] = df.query(f"aug_period == {aug_period}")[ - labels["observed_factors"] + list(labels.observed_factors) ].to_numpy() return jnp.array(arr, dtype="float32") diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index 1d5e7dde..f8d055a0 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -1,31 +1,42 @@ +"""Functions to process debug output from likelihood function into DataFrames.""" + +from typing import Any + import numpy as np import pandas as pd +from jax import Array +from numpy.typing import NDArray +from skillmodels.types import ProcessedModel -def process_debug_data(debug_data, model): + +def process_debug_data( + debug_data: dict[str, Any], + model: ProcessedModel, +) -> dict[str, Any]: """Process the raw debug data into pandas objects that make visualization easy. Args: - debug_data (dict): Dictionary containing the following entries ( + debug_data: Dictionary containing the following entries ( and potentially others which are not modified): - - filtered_states (jax.numpy.array): Array of shape (n_updates, n_obs, + - filtered_states: Array of shape (n_updates, n_obs, n_mixtures, n_states) containing the filtered states after each Kalman update. - - initial_states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) + - initial_states: Array of shape (n_obs, n_mixtures, n_states) with the state estimates before the first Kalman update. - - residuals (jax.numpy.array): Array of shape (n_updates, n_obs, n_mixtures) + - residuals: Array of shape (n_updates, n_obs, n_mixtures) containing the residuals of a Kalman update. - - residual_sds (jax.numpy.ndarray): Array of shape (n_updates, n_obs, + - residual_sds: Array of shape (n_updates, n_obs, n_mixtures) containing the theoretical standard deviation of the residuals. - - all_contributions (jax.numpy.array): Array of shape (n_updates, n_obs) with + - all_contributions: Array of shape (n_updates, n_obs) with the likelihood contributions per update and individual. - - log_mixture_weights (jax.numpy.array): Array of shape (n_updates, n_obs, + - log_mixture_weights: Array of shape (n_updates, n_obs, n_mixtures) containing the log mixture weights after each update. - - initial_log_mixture_weights (jax.numpy.array): Array of shape (n_obs, + - initial_log_mixture_weights: Array of shape (n_obs, n_mixtures) containing the log mixture weights before the first kalman update. - model (dict): Processed model dictionary. + model: Processed model dictionary. Returns: dict: Dictionary with processed debug data. It has the following entries: @@ -36,28 +47,28 @@ def process_debug_data(debug_data, model): after the last update of each period. The columns are the factor names, "period" and "id". The filtered states are already aggregated over mixture distributions. - - state_ranges (dict): The keys are the names of the latent factors. + - state_ranges: The keys are the names of the latent factors. The values are DataFrames with the columns "period", "minimum", "maximum". Note that this aggregates over mixture distributions. - - residuals (pd.DataFrame): Tidy DataFrame with residuals of each Kalman update. + - residuals: Tidy DataFrame with residuals of each Kalman update. Columns are "residual", "mixture", "period", "measurement" and "id". "period" and "measurement" identify the Kalman update to which the residual belongs. - - residual_sds (pd.DataFrame): As residuals but containing the theoretical + - residual_sds: As residuals but containing the theoretical standard deviation of the corresponding residual. - - all_contributions (pd.DataFrame): Tidy DataFrame with log likelihood + - all_contributions: Tidy DataFrame with log likelihood contribution per individual and Kalman Update. The columns are "contribution", "period", "measurement" and "id". "period" and "measurement" identify the Kalman Update to which the likelihood contribution corresponds. """ - update_info = model["update_info"] - factors = model["labels"]["latent_factors"] + update_info = model.update_info + factors = model.labels.latent_factors post_update_states = _create_post_update_states( - debug_data["filtered_states"], - factors, - update_info, + filtered_states=debug_data["filtered_states"], + factors=factors, + update_info=update_info, ) filtered_states = _create_filtered_states( @@ -67,14 +78,18 @@ def process_debug_data(debug_data, model): factors=factors, ) - state_ranges = create_state_ranges(filtered_states, factors) + state_ranges = create_state_ranges(filtered_states=filtered_states, factors=factors) - residuals = _process_residuals(debug_data["residuals"], update_info) - residual_sds = _process_residual_sds(debug_data["residual_sds"], update_info) + residuals = _process_residuals( + residuals=debug_data["residuals"], update_info=update_info + ) + residual_sds = _process_residual_sds( + residual_sds=debug_data["residual_sds"], update_info=update_info + ) all_contributions = _process_all_contributions( - debug_data["all_contributions"], - update_info, + all_contributions=debug_data["all_contributions"], + update_info=update_info, ) res = { @@ -93,41 +108,51 @@ def process_debug_data(debug_data, model): return res -def _create_post_update_states(filtered_states, factors, update_info): +def _create_post_update_states( + filtered_states: Array, + factors: tuple[str, ...], + update_info: pd.DataFrame, +) -> pd.DataFrame: to_concat = [] for (aug_period, meas), data in zip( update_info.index, filtered_states, strict=False ): - df = _convert_state_array_to_df(data, factors) + df = _convert_state_array_to_df(arr=data, factor_names=factors) df["aug_period"] = aug_period df["id"] = np.arange(len(df)) df["measurement"] = meas to_concat.append(df) - post_states = pd.concat(to_concat) - - return post_states + return pd.concat(to_concat) -def _convert_state_array_to_df(arr, factor_names): +def _convert_state_array_to_df( + arr: NDArray[np.floating[Any]], + factor_names: tuple[str, ...], +) -> pd.DataFrame: """Convert a 3d state array into a 2d DataFrame. Args: - arr (np.ndarray): Array of shape (n_obs, n_mixtures, n_states) - factor_names (list): Names of the latent factors. + arr: Array of shape (n_obs, n_mixtures, n_states) + factor_names: Names of the latent factors. """ n_obs, n_mixtures, n_states = arr.shape - df = pd.DataFrame(data=arr.reshape(-1, n_states), columns=factor_names) + df = pd.DataFrame(data=arr.reshape(-1, n_states), columns=list(factor_names)) df["mixture"] = np.full((n_obs, n_mixtures), np.arange(n_mixtures)).flatten() return df -def _create_filtered_states(filtered_states, log_mixture_weights, update_info, factors): - filtered_states = np.array(filtered_states) - log_mixture_weights = np.array(log_mixture_weights) - weights = np.exp(log_mixture_weights) +def _create_filtered_states( + filtered_states: Array, + log_mixture_weights: Array, + update_info: pd.DataFrame, + factors: tuple[str, ...], +) -> pd.DataFrame: + filtered_states_np = np.array(filtered_states) + log_mixture_weights_np = np.array(log_mixture_weights) + weights = np.exp(log_mixture_weights_np) - agg_states = (filtered_states * weights.reshape(*weights.shape, 1)).sum(axis=-2) + agg_states = (filtered_states_np * weights.reshape(*weights.shape, 1)).sum(axis=-2) keep = [] for i, (aug_period, measurement) in enumerate(update_info.index): @@ -145,25 +170,30 @@ def _create_filtered_states(filtered_states, log_mixture_weights, update_info, f df["id"] = np.arange(len(df)) to_concat.append(df) - filtered_states = pd.concat(to_concat) - - return filtered_states + return pd.concat(to_concat) -def create_state_ranges(filtered_states, factors): - ranges = {} +def create_state_ranges( + filtered_states: pd.DataFrame, + factors: tuple[str, ...] | list[str], +) -> dict[str, pd.DataFrame]: + """Compute minimum and maximum state values for each factor by period.""" + ranges: dict[str, pd.DataFrame] = {} # Group by whichever period column is present period_col = "aug_period" if "aug_period" in filtered_states.columns else "period" minima = filtered_states.groupby(period_col).min() maxima = filtered_states.groupby(period_col).max() for factor in factors: df = pd.concat([minima[factor], maxima[factor]], axis=1) - df.columns = ["minimum", "maximum"] + df.columns = pd.Index(["minimum", "maximum"]) ranges[factor] = df return ranges -def _process_residuals(residuals, update_info): +def _process_residuals( + residuals: Array, + update_info: pd.DataFrame, +) -> pd.DataFrame: to_concat = [] n_obs, n_mixtures = residuals[0].shape for (aug_period, meas), data in zip(update_info.index, residuals, strict=False): @@ -176,11 +206,17 @@ def _process_residuals(residuals, update_info): return pd.concat(to_concat) -def _process_residual_sds(residual_sds, update_info): - return _process_residuals(residual_sds, update_info) +def _process_residual_sds( + residual_sds: Array, + update_info: pd.DataFrame, +) -> pd.DataFrame: + return _process_residuals(residuals=residual_sds, update_info=update_info) -def _process_all_contributions(all_contributions, update_info): +def _process_all_contributions( + all_contributions: Array, + update_info: pd.DataFrame, +) -> pd.DataFrame: to_concat = [] for (period, meas), contribs in zip( update_info.index, all_contributions, strict=False diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index dd1771ac..3745e9a7 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -1,22 +1,36 @@ -from copy import deepcopy +"""Functions to process model specifications from user-friendly to internal form.""" + +from collections.abc import KeysView, Mapping +from dataclasses import replace from functools import partial -from typing import Any, Literal import numpy as np import pandas as pd from dags import concatenate_functions from dags.signature import rename_arguments -from jax import vmap +from jax import Array, vmap from pandas import DataFrame import skillmodels.transition_functions as t_f_module from skillmodels.check_model import check_model, check_stagemap from skillmodels.decorators import extract_params, jax_array_output +from skillmodels.model_spec import FactorSpec, ModelSpec, Normalizations +from skillmodels.types import ( + Anchoring, + Dimensions, + EndogenousFactorsInfo, + EstimationOptions, + FactorInfo, + Labels, + MeasurementType, + ProcessedModel, + TransitionInfo, +) pd.set_option("future.no_silent_downcasting", True) # noqa: FBT003 -def process_model(model_dict): +def process_model(model_spec: ModelSpec) -> ProcessedModel: """Check, clean, extend and transform the model specs. Check the completeness, consistency and validity of the model specifications. @@ -24,78 +38,89 @@ def process_model(model_dict): Set default values and extend the model specification where necessary. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` Returns: - dict: nested dictionary of model specs. It has the following entries: - - dimensions (dict): Dimensional information like n_states, n_periods, + ProcessedModel with the following entries: + - dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - - labels (dict): Dict of lists with labels for the model quantities like + - labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - - anchoring (dict): Information about anchoring. See :ref:`anchoring` - - transition_info (dict): Everything related to transition functions. - - update_info (pandas.DataFrame): DataFrame with one row per Kalman update + - anchoring: Information about anchoring. See :ref:`anchoring` + - transition_info: Everything related to transition functions. + - update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - - normalizations (dict): Nested dictionary with information on normalized factor + - normalizations: Nested dictionary with information on normalized factor loadings and intercepts for each factor. See :ref:`normalizations`. """ - has_endogenous_factors = get_has_endogenous_factors(model_dict["factors"]) + has_endogenous_factors = get_has_endogenous_factors(model_spec.factors) + est_opts = model_spec.estimation_options dims = get_dimensions( - model_dict=model_dict, has_endogenous_factors=has_endogenous_factors + model_spec=model_spec, has_endogenous_factors=has_endogenous_factors ) labels = _get_labels( - model_dict=model_dict, + model_spec=model_spec, has_endogenous_factors=has_endogenous_factors, dimensions=dims, ) - anchoring = _process_anchoring(model_dict) + anchoring = _process_anchoring(model_spec) if has_endogenous_factors: - _model_dict_aug = _augment_periods_for_endogenous_factors( - model_dict=model_dict, + _model_spec_aug = _augment_periods_for_endogenous_factors( + model_spec=model_spec, dimensions=dims, labels=labels, ) - endogenous_factors_info = _get_endogenous_factors_info( - has_endogenous_factors=has_endogenous_factors, - model_dict=_model_dict_aug, - labels=labels, - bounds_distance=model_dict["estimation_options"]["bounds_distance"], - ) else: - _model_dict_aug = model_dict - endogenous_factors_info = {"has_endogenous_factors": has_endogenous_factors} + _model_spec_aug = model_spec + bounds_distance = est_opts.bounds_distance if est_opts else 1e-3 + endogenous_factors_info = _get_endogenous_factors_info( + has_endogenous_factors=has_endogenous_factors, + model_spec=_model_spec_aug, + labels=labels, + bounds_distance=bounds_distance, + ) check_model( - model_dict=_model_dict_aug, + model_spec=_model_spec_aug, labels=labels, dimensions=dims, anchoring=anchoring, has_endogenous_factors=has_endogenous_factors, ) - transition_info = _get_transition_info(_model_dict_aug, labels) - labels["transition_names"] = list(transition_info["function_names"].values()) - - processed = { - "dimensions": dims, - "labels": labels, - "anchoring": anchoring, - "estimation_options": _process_estimation_options(_model_dict_aug), - "transition_info": transition_info, - "update_info": _get_update_info(_model_dict_aug, dims, labels, anchoring), - "normalizations": _process_normalizations(_model_dict_aug, dims, labels), - "endogenous_factors_info": endogenous_factors_info, - } - return processed + transition_info = _get_transition_info(model_spec=_model_spec_aug, labels=labels) + labels = replace( + labels, transition_names=tuple(transition_info.function_names.values()) + ) + + return ProcessedModel( + dimensions=dims, + labels=labels, + anchoring=anchoring, + estimation_options=_process_estimation_options(_model_spec_aug), + transition_info=transition_info, + update_info=_get_update_info( + model_spec=_model_spec_aug, + dimensions=dims, + labels=labels, + anchoring_info=anchoring, + ), + normalizations=_process_normalizations( + model_spec=_model_spec_aug, dimensions=dims, labels=labels + ), + endogenous_factors_info=endogenous_factors_info, + ) -def get_has_endogenous_factors(factors: dict[str, Any]) -> bool: +def get_has_endogenous_factors( + factors: Mapping[str, FactorSpec], +) -> bool: """Return True if any endogenous factors are present.""" endogenous_factors = pd.DataFrame( [ { "factor": f, - "is_endogenous": v.get("is_endogenous", False), - "is_correction": v.get("is_correction", False), + "is_endogenous": v.is_endogenous, + "is_correction": v.is_correction, } for f, v in factors.items() ] @@ -115,36 +140,36 @@ def get_has_endogenous_factors(factors: dict[str, Any]) -> bool: return endogenous_factors["is_endogenous"].any() # ty: ignore[invalid-return-type] -def get_dimensions(model_dict, has_endogenous_factors): +def get_dimensions( + model_spec: ModelSpec, *, has_endogenous_factors: bool +) -> Dimensions: """Extract the dimensions of the model. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - has_endogenous_factors (bool): Whether endogenous factors are present. + model_spec: The model specification. + has_endogenous_factors: Whether endogenous factors are present. Returns: - dict: Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. + Dimensions dataclass with all dimensional information. """ - all_n_periods = [len(d["measurements"]) for d in model_dict["factors"].values()] + all_n_periods = [len(fspec.measurements) for fspec in model_spec.factors.values()] n_periods = max(all_n_periods) n_aug_periods = 2 * n_periods if has_endogenous_factors else n_periods - - dims = { - "n_latent_factors": len(model_dict["factors"]), - "n_observed_factors": len(model_dict.get("observed_factors", [])), - "n_controls": len(model_dict.get("controls", [])) + 1, # plus 1: constant - "n_mixtures": model_dict["estimation_options"].get("n_mixtures", 1), - "n_aug_periods": n_aug_periods, - "n_periods": n_periods, - } - dims["n_all_factors"] = dims["n_latent_factors"] + dims["n_observed_factors"] - return dims + est_opts = model_spec.estimation_options + + return Dimensions( + n_latent_factors=len(model_spec.factors), + n_observed_factors=len(model_spec.observed_factors), + n_controls=len(model_spec.controls) + 1, # plus 1: constant + n_mixtures=est_opts.n_mixtures if est_opts else 1, + n_aug_periods=n_aug_periods, + n_periods=n_periods, + ) def _get_aug_periods_to_periods( - n_aug_periods: int, has_endogenous_factors: bool + n_aug_periods: int, *, has_endogenous_factors: bool ) -> dict[int, int]: """Return mapper of (potentially) augmented periods to user-provided periods.""" aug_periods = list(range(n_aug_periods)) @@ -162,39 +187,43 @@ def _aug_periods_from_period( return [ap for ap, p in aug_periods_to_periods.items() if p == period] -def _get_labels(model_dict, has_endogenous_factors, dimensions): +def _get_labels( + model_spec: ModelSpec, *, has_endogenous_factors: bool, dimensions: Dimensions +) -> Labels: """Extract labels of the model quantities. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - has_endogenous_factors (bool): Whether endogenous factors are present. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. + model_spec: The model specification. See: :ref:`model_specs` + has_endogenous_factors: Whether endogenous factors are present. + dimensions: Dimensional information. Returns: - dict: Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` + Labels dataclass with all label information. """ aug_periods_to_periods = _get_aug_periods_to_periods( - n_aug_periods=dimensions["n_aug_periods"], + n_aug_periods=dimensions.n_aug_periods, has_endogenous_factors=has_endogenous_factors, ) - stagemap = model_dict.get("stagemap", list(range(dimensions["n_periods"] - 1))) + stagemap: list[int] = ( + list(model_spec.stagemap) + if model_spec.stagemap is not None + else list(range(dimensions.n_periods - 1)) + ) stages = sorted(int(v) for v in np.unique(stagemap)) report = check_stagemap( - stagemap=stagemap, - stages=stages, - n_periods=dimensions["n_periods"], + stagemap=tuple(stagemap), + stages=tuple(stages), + n_periods=dimensions.n_periods, is_augmented=False, ) if report: raise ValueError(f"Invalid stage map: {report}") if has_endogenous_factors: - aug_stagemap = [] - aug_stages_to_stages = {} + aug_stagemap: list[int] = [] + aug_stages_to_stages: dict[int, int] = {} relevant_aug_periods = sorted(aug_periods_to_periods.keys())[:-2] for aug_p in relevant_aug_periods: p = aug_periods_to_periods[aug_p] @@ -203,149 +232,148 @@ def _get_labels(model_dict, has_endogenous_factors, dimensions): aug_stagemap.append(aug_s) aug_stages_to_stages[aug_s] = s else: - aug_stagemap = stagemap + aug_stagemap = list(stagemap) aug_stages_to_stages = {s: s for s in stages} - labels = { - "latent_factors": list(model_dict["factors"]), - "observed_factors": list(model_dict.get("observed_factors", [])), - "controls": ["constant", *list(model_dict.get("controls", []))], - "periods": sorted(set(aug_periods_to_periods.values())), - "stagemap": stagemap, - "stages": stages, - "aug_periods": list(aug_periods_to_periods.keys()), - "aug_periods_to_periods": aug_periods_to_periods, - "aug_stagemap": aug_stagemap, - "aug_stages": sorted(int(v) for v in np.unique(aug_stagemap)), - "aug_stages_to_stages": aug_stages_to_stages, - } - - labels["all_factors"] = labels["latent_factors"] + labels["observed_factors"] # ty: ignore[unsupported-operator] - - return labels + return Labels( + latent_factors=tuple(model_spec.factors), + observed_factors=tuple(model_spec.observed_factors), + controls=("constant", *model_spec.controls), + periods=tuple(sorted(set(aug_periods_to_periods.values()))), + stagemap=tuple(stagemap), + stages=tuple(stages), + aug_periods=tuple(aug_periods_to_periods.keys()), + aug_periods_to_periods=aug_periods_to_periods, + aug_stagemap=tuple(aug_stagemap), + aug_stages=tuple(sorted(int(v) for v in np.unique(aug_stagemap))), + aug_stages_to_stages=aug_stages_to_stages, + ) -def _process_estimation_options(model_dict): +def _process_estimation_options(model_spec: ModelSpec) -> EstimationOptions: """Process options. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` Returns: - dict: Tuning parameters for the estimation. See :ref:`options`. + EstimationOptions dataclass with tuning parameters for the estimation. """ - default_options = { - "sigma_points_scale": 2, - "robust_bounds": True, - "bounds_distance": 1e-3, - "clipping_lower_bound": -1e30, - "clipping_upper_bound": None, - "clipping_lower_hardness": 1, - "clipping_upper_hardness": 1, - } - default_options.update(model_dict.get("estimation_options", {})) - - if not default_options["robust_bounds"]: - default_options["bounds_distance"] = 0 + opts = model_spec.estimation_options + if opts is None: + return EstimationOptions( + sigma_points_scale=2, + robust_bounds=True, + bounds_distance=1e-3, + clipping_lower_bound=-1e30, + clipping_upper_bound=None, + clipping_lower_hardness=1, + clipping_upper_hardness=1, + ) - return default_options + return EstimationOptions( + sigma_points_scale=opts.sigma_points_scale, + robust_bounds=opts.robust_bounds, + bounds_distance=opts.bounds_distance if opts.robust_bounds else 0, + clipping_lower_bound=opts.clipping_lower_bound, + clipping_upper_bound=opts.clipping_upper_bound, + clipping_lower_hardness=opts.clipping_lower_hardness, + clipping_upper_hardness=opts.clipping_upper_hardness, + ) -def _process_anchoring(model_dict): +def _process_anchoring(model_spec: ModelSpec) -> Anchoring: """Process the specification that governs how latent factors are anchored. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` Returns: - dict: Dictionary with information about anchoring. See :ref:`anchoring` + Anchoring dataclass with information about anchoring. """ - anchinfo = { - "anchoring": False, - "outcomes": {}, - "factors": [], - "free_controls": False, - "free_constant": False, - "free_loadings": False, - "ignore_constant_when_anchoring": False, - } - - if "anchoring" in model_dict: - anchinfo.update(model_dict["anchoring"]) - anchinfo["anchoring"] = True - anchinfo["factors"] = list(anchinfo["outcomes"]) # ty: ignore[invalid-argument-type] - - return anchinfo - + anch = model_spec.anchoring + if anch is not None: + return Anchoring.from_config( + outcomes=dict(anch.outcomes), + free_controls=anch.free_controls, + free_constant=anch.free_constant, + free_loadings=anch.free_loadings, + ignore_constant_when_anchoring=anch.ignore_constant_when_anchoring, + ) -def _insert_empty_elements_into_list(old, insert_at_modulo, to_insert, aug_p_to_p): - return [ - to_insert if aug_p % 2 == insert_at_modulo else old[p] - for aug_p, p in aug_p_to_p.items() - ] + return Anchoring.disabled() def _augment_periods_for_endogenous_factors( - model_dict: dict[str, Any], dimensions: dict[str, Any], labels: dict[str, Any] -) -> dict[str, Any]: + model_spec: ModelSpec, dimensions: Dimensions, labels: Labels +) -> ModelSpec: """Augment periods if endogenous factors are present. Args: - model_dict: The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` + model_spec: The model specification. See: :ref:`model_specs` + dimensions: Dimensional information. + labels: Labels for model quantities. Returns: - Model dictionary with twice the amount of periods + ModelSpec with twice the amount of periods. """ - aug = deepcopy(model_dict) - for fac, v in model_dict["factors"].items(): - insert_at_modulo = 0 if v.get("is_endogenous", False) else 1 + new_factors: dict[str, FactorSpec] = {} + for fac, fspec in model_spec.factors.items(): + insert_at_modulo = 0 if fspec.is_endogenous else 1 # Insert empty elements into measurements when we do not have those. - if len(v["measurements"]) != dimensions["n_periods"]: + if len(fspec.measurements) != dimensions.n_periods: raise ValueError( "Measurements must be of length `n_periods`, " - f"got {v['measurements']} for {fac}" + f"got {fspec.measurements} for {fac}" ) - aug["factors"][fac]["measurements"] = _insert_empty_elements_into_list( - old=v["measurements"], - insert_at_modulo=insert_at_modulo, - to_insert=[], - aug_p_to_p=labels["aug_periods_to_periods"], + aug_measurements = tuple( + () if aug_p % 2 == insert_at_modulo else fspec.measurements[p] + for aug_p, p in labels.aug_periods_to_periods.items() ) # Insert empty elements into normalizations when we do not have those. - for norm_type, normalizations in v.get("normalizations", {}).items(): - if not len(normalizations) == dimensions["n_periods"]: - raise ValueError( - "Normalizations must be lists of length `n_periods`, " - f"got {normalizations} for {fac}['normalizations']['{norm_type}']" - ) - aug["factors"][fac]["normalizations"][norm_type] = ( - _insert_empty_elements_into_list( - old=normalizations, - insert_at_modulo=insert_at_modulo, - to_insert={}, - aug_p_to_p=labels["aug_periods_to_periods"], + aug_normalizations = None + if fspec.normalizations is not None: + aug_norm_parts: dict[str, tuple[Mapping[str, float], ...]] = {} + for norm_type in ("loadings", "intercepts"): + norms = getattr(fspec.normalizations, norm_type) + if len(norms) != dimensions.n_periods: + raise ValueError( + "Normalizations must be lists of length `n_periods`, " + f"got {norms} for {fac}['normalizations']['{norm_type}']" + ) + aug_norm_parts[norm_type] = tuple( + {} if aug_p % 2 == insert_at_modulo else norms[p] + for aug_p, p in labels.aug_periods_to_periods.items() ) + aug_normalizations = Normalizations( + loadings=aug_norm_parts["loadings"], + intercepts=aug_norm_parts["intercepts"], ) - return aug + new_factors[fac] = FactorSpec( + measurements=aug_measurements, + normalizations=aug_normalizations, + is_endogenous=fspec.is_endogenous, + is_correction=fspec.is_correction, + transition_function=fspec.transition_function, + ) + + return model_spec._replace(factors=new_factors) -def _get_transition_info(model_dict, labels): + +def _get_transition_info(model_spec: ModelSpec, labels: Labels) -> TransitionInfo: """Collect information about transition functions.""" func_list, param_names = [], [] - latent_factors = labels["latent_factors"] - all_factors = labels["all_factors"] + latent_factors = labels.latent_factors + all_factors = labels.all_factors for factor in latent_factors: - spec = model_dict["factors"][factor]["transition_function"] + spec = model_spec.factors[factor].transition_function if isinstance(spec, str): func = getattr(t_f_module, spec) if spec == "constant": @@ -358,7 +386,7 @@ def _get_transition_info(model_dict, labels): "Custom transition functions must have a __name__ attribute.", ) if hasattr(spec, "__registered_params__"): - names = spec.__registered_params__ + names: list[str] = spec.__registered_params__ # ty: ignore[invalid-assignment] param_names.append(names) else: raise AttributeError( @@ -376,10 +404,10 @@ def _get_transition_info(model_dict, labels): # add functions to produce the individual factors out of the 1d states vector. # The dag will automatically sort out what we don't need. - def _extract_factor(states, pos): + def _extract_factor(states: Array, pos: int) -> Array: return states[pos] - for i, factor in enumerate(labels["all_factors"]): + for i, factor in enumerate(labels.all_factors): functions[factor] = partial(_extract_factor, pos=i) transition_function = concatenate_functions( @@ -397,93 +425,100 @@ def _extract_factor(states, pos): func = vmap(func, in_axes=(None, 0)) individual_functions[factor] = func - out = { - "func": transition_function, - "param_names": dict(zip(latent_factors, param_names, strict=False)), - "individual_functions": individual_functions, - "function_names": dict(zip(latent_factors, function_names, strict=False)), - } - return out + return TransitionInfo( + func=transition_function, + param_names=dict(zip(latent_factors, param_names, strict=False)), + individual_functions=individual_functions, + function_names=dict(zip(latent_factors, function_names, strict=False)), + ) def _get_endogenous_factors_info( + *, has_endogenous_factors: bool, - model_dict: dict[str, Any], - labels: dict[str, Any], + model_spec: ModelSpec, + labels: Labels, bounds_distance: float, -) -> dict[str, Any]: +) -> EndogenousFactorsInfo: """Collect information about endogenous factors.""" - endogenous_factors_info = { - "has_endogenous_factors": has_endogenous_factors, - "aug_periods_to_aug_period_meas_types": _get_aug_periods_to_aug_period_meas_types( # noqa: E501 - aug_periods=labels["aug_periods_to_periods"].keys(), + factor_info = {} + for fac, fspec in model_spec.factors.items(): + factor_info[fac] = FactorInfo.from_flags( + is_endogenous=fspec.is_endogenous, + is_correction=fspec.is_correction, + ) + + return EndogenousFactorsInfo( + has_endogenous_factors=has_endogenous_factors, + aug_periods_to_aug_period_meas_types=_get_aug_periods_to_aug_period_meas_types( + aug_periods=labels.aug_periods_to_periods.keys(), has_endogenous_factors=has_endogenous_factors, ), - "bounds_distance": bounds_distance, - "aug_periods_from_period": partial( + bounds_distance=bounds_distance, + aug_periods_from_period=partial( _aug_periods_from_period, - aug_periods_to_periods=labels["aug_periods_to_periods"], + aug_periods_to_periods=labels.aug_periods_to_periods, ), - } - for fac, v in model_dict["factors"].items(): - endogenous_factors_info[fac] = { - "is_state": ( - not v.get("is_endogenous", False) and not v.get("is_correction", False) - ), - "is_endogenous": v.get("is_endogenous", False), - "is_correction": v.get("is_correction", False), - } - return endogenous_factors_info + factor_info=factor_info, + ) def _get_aug_periods_to_aug_period_meas_types( - aug_periods: list[int], has_endogenous_factors: bool -) -> dict[int, Literal["states", "endogenous_factors"]]: + aug_periods: tuple[int, ...] | KeysView[int], + *, + has_endogenous_factors: bool, +) -> dict[int, MeasurementType]: if has_endogenous_factors: return { - aug_p: ("states" if aug_p % 2 == 0 else "endogenous_factors") + aug_p: ( + MeasurementType.STATES + if aug_p % 2 == 0 + else MeasurementType.ENDOGENOUS_FACTORS + ) for aug_p in aug_periods } - return dict.fromkeys(aug_periods, "states") + return dict.fromkeys(aug_periods, MeasurementType.STATES) -def _get_update_info(model_dict, dimensions, labels, anchoring_info): +def _get_update_info( + model_spec: ModelSpec, + dimensions: Dimensions, + labels: Labels, + anchoring_info: Anchoring, +) -> DataFrame: """Construct a DataFrame with information on each Kalman update. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring_info (dict): Information about anchoring. See :ref:`anchoring` + model_spec: The model specification. See: :ref:`model_specs` + dimensions: Dimensional information. + labels: Labels for model quantities. + anchoring_info: Information about anchoring. See :ref:`anchoring` Returns: - pandas.DataFrame: DataFrame with one row per Kalman update needed in - the likelihood function. See :ref:`update_info`. + DataFrame with one row per Kalman update needed in the likelihood function. """ index = pd.MultiIndex( levels=[[], []], codes=[[], []], names=["aug_period", "variable"] ) - uinfo = DataFrame(index=index, columns=labels["latent_factors"] + ["purpose"]) + uinfo = DataFrame(index=index, columns=[*labels.latent_factors, "purpose"]) measurements = {} - for factor in labels["latent_factors"]: - measurements[factor] = model_dict["factors"][factor]["measurements"] - if len(measurements[factor]) != dimensions["n_aug_periods"]: + for factor in labels.latent_factors: + measurements[factor] = model_spec.factors[factor].measurements + if len(measurements[factor]) != dimensions.n_aug_periods: raise ValueError( "Measurements must be of length `n_aug_periods`, " f"got {measurements[factor]} for {factor}" ) - for aug_period in labels["aug_periods"]: - for factor in labels["latent_factors"]: + for aug_period in labels.aug_periods: + for factor in labels.latent_factors: for meas in measurements[factor][aug_period]: uinfo.loc[(aug_period, meas), factor] = True uinfo.loc[(aug_period, meas), "purpose"] = "measurement" - for factor in anchoring_info["factors"]: - outcome = anchoring_info["outcomes"][factor] + for factor in anchoring_info.factors: + outcome = anchoring_info.outcomes[factor] name = f"{outcome}_{factor}" uinfo.loc[(aug_period, name), factor] = True uinfo.loc[(aug_period, name), "purpose"] = "anchoring" @@ -493,33 +528,35 @@ def _get_update_info(model_dict, dimensions, labels, anchoring_info): return uinfo -def _process_normalizations(model_dict, dimensions, labels): +def _process_normalizations( + model_spec: ModelSpec, dimensions: Dimensions, labels: Labels +) -> dict[str, dict[str, list]]: """Process the normalizations of intercepts and factor loadings. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` + model_spec: The model specification. See: :ref:`model_specs` + dimensions: Dimensional information. + labels: Labels for model quantities. Returns: - normalizations (dict): Nested dictionary with information on normalized factor - loadings and intercepts for each factor. See :ref:`normalizations`. + Nested dictionary with information on normalized factor loadings and + intercepts for each factor. """ - normalizations = {} - for factor in labels["latent_factors"]: + normalizations: dict[str, dict[str, list]] = {} + for factor in labels.latent_factors: normalizations[factor] = {} - norminfo = model_dict["factors"][factor].get("normalizations", {}) + fspec = model_spec.factors[factor] for norm_type in ["loadings", "intercepts"]: - candidate = norminfo.get( - norm_type, [{} for _ in range(dimensions["n_aug_periods"])] - ) - if not len(candidate) == dimensions["n_aug_periods"]: + if fspec.normalizations is not None: + norms = getattr(fspec.normalizations, norm_type) + candidate = [dict(m) for m in norms] + else: + candidate = [{} for _ in range(dimensions.n_aug_periods)] + if len(candidate) != dimensions.n_aug_periods: raise ValueError( "Normalizations must be of length `n_aug_periods`, " - f"got {norminfo} for {factor}['{norm_type}']" + f"got {candidate} for {factor}['{norm_type}']" ) normalizations[factor][norm_type] = candidate diff --git a/src/skillmodels/qr.py b/src/skillmodels/qr.py index c690eac7..566276b4 100644 --- a/src/skillmodels/qr.py +++ b/src/skillmodels/qr.py @@ -1,17 +1,20 @@ +"""Custom QR decomposition implementation optimized for GPU.""" + import jax import jax.numpy as jnp +from jax import Array @jax.custom_jvp -def qr_gpu(a: jax.Array): +def qr_gpu(a: Array) -> tuple[Array, Array]: """Custom implementation of the QR Decomposition.""" r, tau = jnp.linalg.qr(a, mode="raw") - q = _householder(r.mT, tau) + q = _householder(r=r.mT, tau=tau) return q, jnp.triu(r.mT[: tau.shape[0]]) -def _householder(r: jax.Array, tau: jax.Array): +def _householder(r: Array, tau: Array) -> Array: """Custom implementation of the Householder Product. Uses the outputs of jnp.linalg.qr with mode = "raw" to calculate Q. This is needed @@ -33,17 +36,17 @@ def _householder(r: jax.Array, tau: jax.Array): return h[:, :n] -def _t(x: jax.Array) -> jax.Array: +def _t(x: Array) -> Array: """Transpose batched Matrix.""" return jax.lax.transpose(x, (*range(x.ndim - 2), x.ndim - 1, x.ndim - 2)) -def _h(x: jax.Array) -> jax.Array: +def _h(x: Array) -> Array: """Hermitian Transpose of a Matrix.""" return _t(x).conj() -def _tril(m: jax.Array, k: int = 0) -> jax.Array: +def _tril(m: Array, k: int = 0) -> Array: """Select lower Triangle of a Matrix.""" *_, dim_n, dim_m = m.shape mask = jnp.tri(dim_n, dim_m, k, bool) @@ -51,7 +54,10 @@ def _tril(m: jax.Array, k: int = 0) -> jax.Array: @qr_gpu.defjvp -def qr_jvp_rule(primals, tangents): +def qr_jvp_rule( + primals: tuple[Array], + tangents: tuple[Array], +) -> tuple[tuple[Array, Array], tuple[Array, Array]]: """Calculates the derivative of the custom QR composition.""" # See j-towns.github.io/papers/qr-derivative.pdf for a terse derivation. (x,) = primals @@ -59,7 +65,7 @@ def qr_jvp_rule(primals, tangents): q, r = qr_gpu(x) dx_rinv = jax.lax.linalg.triangular_solve(r, dx) # Right side solve by default qt_dx_rinv = _h(q) @ dx_rinv - qt_dx_rinv_lower = _tril(qt_dx_rinv, -1) + qt_dx_rinv_lower = _tril(m=qt_dx_rinv, k=-1) do = qt_dx_rinv_lower - _h(qt_dx_rinv_lower) # This is skew-symmetric # The following correction is necessary for complex inputs n = x.shape[-1] diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 3ae38799..b59554d4 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -1,63 +1,83 @@ """Functions to simulate a dataset generated by a latent factor model.""" import warnings +from collections.abc import Mapping import jax.numpy as jnp import numpy as np import pandas as pd -from numpy.random import choice, multivariate_normal +from jax import Array +from numpy.typing import NDArray from skillmodels.filtered_states import anchor_states_df from skillmodels.kalman_filters import transform_sigma_points +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_data import process_data from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model - - -def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): +from skillmodels.types import ( + Dimensions, + EndogenousFactorsInfo, + Labels, + MeasurementType, + ParsedParams, + TransitionInfo, +) + + +def simulate_dataset( + model_spec: ModelSpec, + params: pd.DataFrame, + n_obs: int | None = None, + data: pd.DataFrame | None = None, + policies: list[dict] | None = None, + seed: int | None = None, +) -> dict: """Simulate datasets generated by a latent factor model. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - params (pandas.DataFrame): DataFrame with model parameters. - n_obs (int): Number of simulated individuals - data (pd.DataFrame): Dataset in the same format as for estimation, containing + model_spec: The model specification. See: :ref:`model_specs` + params: Model parameters. + n_obs: Number of simulated individuals. + data: Dataset in the same format as for estimation, containing information about observed factors and control variables. - policies (list): list of dictionaries. Each dictionary specifies a - a stochastic shock to a latent factor AT THE END of "period" for "factor" - with mean "effect_size" and "standard deviation" + policies: Each dictionary specifies a stochastic shock to a latent factor + AT THE END of "period" for "factor" with mean "effect_size" and + "standard deviation". + seed: Random seed for reproducibility. If None, uses numpy's default random + state. Returns: - observed_data (pd.DataFrame): Dataset with measurements and control variables + observed_data: Dataset with measurements and control variables in long format - latent_data (pd.DataFrame): Dataset with latent factors in long format + latent_data: Dataset with latent factors in long format """ + rng = np.random.default_rng(seed) + if data is None and n_obs is None: raise ValueError("Either `data` or `n_obs` has to be provided.") - model = process_model(model_dict) + processed_model = process_model(model_spec) - if model["labels"]["observed_factors"] and data is None: + if processed_model.labels.observed_factors and data is None: raise ValueError( "To simulate a model with observed factors, data cannot be None.", ) - if model["labels"]["controls"] != ["constant"] and data is None: + if processed_model.labels.controls != ("constant",) and data is None: raise ValueError("To simulate a model with controls, data cannot be None.") if data is not None: processed_data = process_data( df=data, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], - labels=model["labels"], - update_info=model["update_info"], - anchoring_info=model["anchoring"], + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, + labels=processed_model.labels, + update_info=processed_model.update_info, + anchoring_info=processed_model.anchoring, purpose="simulation", ) control_data = processed_data["controls"] @@ -68,39 +88,40 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): warnings.warn( f"The number of observations inferred from data ({data_n_obs}) and " f"n_obs ({n_obs}) are different. n_obs is ignored.", + stacklevel=2, ) n_obs = data_n_obs else: control_data = jnp.ones((n_obs, 1)) - n_periods = model["dimensions"]["n_periods"] + n_periods = processed_model.dimensions.n_periods observed_factors = jnp.zeros((n_periods, n_obs, 0)) params_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=processed_model.update_info, + labels=processed_model.labels, + dimensions=processed_model.dimensions, + transition_info=processed_model.transition_info, + endogenous_factors_info=processed_model.endogenous_factors_info, ) params = params.reindex(params_index) parsing_info = create_parsing_info( - params_index=params.index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + params_index=params.index, # ty: ignore[invalid-argument-type] + update_info=processed_model.update_info, + labels=processed_model.labels, + anchoring=processed_model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, ) - states, covs, log_weights, pardict = parse_params( + if n_obs is None: + raise ValueError("n_obs must be set by either data or argument") + states, covs, log_weights, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model["dimensions"], - labels=model["labels"], + dimensions=processed_model.dimensions, + labels=processed_model.labels, n_obs=n_obs, ) @@ -108,133 +129,155 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): latent_states=states, covs=covs, log_weights=log_weights, - pardict=pardict, - labels=model["labels"], - dimensions=model["dimensions"], + parsed_params=parsed_params, + labels=processed_model.labels, + dimensions=processed_model.dimensions, n_obs=n_obs, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], - update_info=model["update_info"], + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, + update_info=processed_model.update_info, control_data=control_data, observed_factors=observed_factors, - policies=policies, - transition_info=model["transition_info"], + policies=policies, # ty: ignore[invalid-argument-type] + transition_info=processed_model.transition_info, + rng=rng, ) # Create collapsed versions with user-facing periods latent_data = _collapse_aug_periods_to_periods( df=aug_latent_data, - factors=model["labels"]["latent_factors"], - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], - endogenous_factors_info=model["endogenous_factors_info"], + factors=processed_model.labels.latent_factors, + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, + endogenous_factors_info=processed_model.endogenous_factors_info, ) # Anchor the collapsed version (anchoring only works with period, not aug_period) anchored_latent_data = anchor_states_df( states_df=latent_data, - model_dict=model_dict, + model_spec=model_spec, params=params, use_aug_period=False, ) - out = { + return { "unanchored_states": { "states": latent_data, "state_ranges": create_state_ranges( - latent_data, - model["labels"]["latent_factors"], + filtered_states=latent_data, + factors=processed_model.labels.latent_factors, ), }, "anchored_states": { "states": anchored_latent_data, "state_ranges": create_state_ranges( - anchored_latent_data, - model["labels"]["latent_factors"], + filtered_states=anchored_latent_data, + factors=processed_model.labels.latent_factors, ), }, "aug_unanchored_states": { "states": aug_latent_data, "state_ranges": create_state_ranges( - aug_latent_data, - model["labels"]["latent_factors"], + filtered_states=aug_latent_data, + factors=processed_model.labels.latent_factors, ), }, "aug_measurements": aug_measurements, } - return out - def _simulate_dataset( - latent_states, - covs, - log_weights, - pardict, - labels, - dimensions, - n_obs, - has_endogenous_factors, - update_info, - control_data, - observed_factors, - policies, - transition_info, -): + latent_states: Array, + covs: Array, + log_weights: Array, + parsed_params: ParsedParams, + labels: Labels, + dimensions: Dimensions, + n_obs: int, + *, + has_endogenous_factors: bool, + update_info: pd.DataFrame, + control_data: Array, + observed_factors: Array, + policies: list[dict], + transition_info: TransitionInfo, + rng: np.random.Generator, +) -> tuple[pd.DataFrame, pd.DataFrame]: """Simulate datasets generated by a latent factor model. Args: - See simulate_data + latent_states: Array of shape (n_obs, n_mixtures, n_states) with initial + state estimates. + covs: Array of shape (n_obs, n_mixtures, n_states, n_states) with initial + covariance matrices. + log_weights: Array of shape (n_obs, n_mixtures) with log mixture weights. + parsed_params: ParsedParams dataclass with parsed parameters. + labels: Labels for the model quantities like factors, periods, controls. + dimensions: Dimensional information like n_states, n_periods, n_controls. + n_obs: Number of observations. + has_endogenous_factors: Whether the model includes endogenous factors. + update_info: DataFrame with information on measurements for each period. + control_data: Array of shape (n_periods, n_obs, n_controls) with controls. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors). + policies: List of policy dictionaries specifying stochastic shocks. + transition_info: Information about transition functions. + rng: NumPy random number generator. Returns: - See simulate_data + observed_data: DataFrame with simulated measurements. + latent_data: DataFrame with simulated latent factors. """ policies = policies if policies is not None else [] - n_states = dimensions["n_latent_factors"] + n_states = dimensions.n_latent_factors if has_endogenous_factors: - n_aug_periods = dimensions["n_aug_periods"] - 1 + n_aug_periods = dimensions.n_aug_periods - 1 else: - n_aug_periods = dimensions["n_aug_periods"] + n_aug_periods = dimensions.n_aug_periods weights = np.exp(log_weights)[0] loadings_df = pd.DataFrame( - data=pardict["loadings"], + data=parsed_params.loadings, index=update_info.index, - columns=labels["latent_factors"], + columns=labels.latent_factors, ) control_params_df = pd.DataFrame( - data=pardict["controls"], + data=parsed_params.controls, index=update_info.index, - columns=labels["controls"], + columns=labels.controls, ) meas_sds = pd.DataFrame( - data=pardict["meas_sds"].reshape(-1, 1), + data=parsed_params.meas_sds.reshape(-1, 1), index=update_info.index, ) - transition_params = pardict["transition"] - shock_sds = pardict["shock_sds"] + transition_params = parsed_params.transition + shock_sds = parsed_params.shock_sds dist_args = [] - for mixture in range(dimensions["n_mixtures"]): + for mixture in range(dimensions.n_mixtures): args = { "mean": latent_states[0][mixture], "cov": covs[0][mixture].T @ covs[0][mixture], } dist_args.append(args) - latent_states = np.zeros((n_aug_periods, n_obs, n_states)) - latent_states[0] = generate_start_states(n_obs, dimensions, dist_args, weights) + latent_states = np.zeros((n_aug_periods, n_obs, n_states)) # ty: ignore[invalid-assignment] + latent_states[0] = generate_start_states( + rng=rng, + n_obs=n_obs, + dimensions=dimensions, + dist_args=dist_args, + weights=weights, + ) for t in range(n_aug_periods - 1): # if there is a shock in period t, add it here policies_t = [p for p in policies if p["aug_period"] == t] for policy in policies_t: - position = labels["latent_factors"].index(policy["factor"]) + position = labels.latent_factors.index(policy["factor"]) latent_states[t, :, position] += _get_shock( + rng=rng, mean=policy["effect_size"], sd=policy["standard_deviation"], size=n_obs, @@ -250,24 +293,24 @@ def _simulate_dataset( trans_coeffs = {k: arr[t] for k, arr in transition_params.items()} # get anchoring_scaling_factors for the period - anchoring_scaling_factors = pardict["anchoring_scaling_factors"][ + anchoring_scaling_factors = parsed_params.anchoring_scaling_factors[ jnp.array([t, t + 1]) ] # get anchoring constants for the period - anchoring_constants = pardict["anchoring_constants"][jnp.array([t, t + 1])] + anchoring_constants = parsed_params.anchoring_constants[jnp.array([t, t + 1])] # call transform_sigma_points and convert result to numpy next_states = np.array( transform_sigma_points( sigma_points=states, - transition_func=transition_info["func"], + transition_func=transition_info.func, trans_coeffs=trans_coeffs, anchoring_scaling_factors=anchoring_scaling_factors, anchoring_constants=anchoring_constants, ), ).reshape(n_obs, -1) - errors = multivariate_normal( + errors = rng.multivariate_normal( mean=np.zeros(n_states), cov=np.diag(shock_sds[t] ** 2), size=n_obs, @@ -281,11 +324,12 @@ def _simulate_dataset( for t in range(n_aug_periods): meas = pd.DataFrame( data=measurements_from_states( - latent_states[t], - control_data[t], - loadings_df.loc[t].to_numpy(), - control_params_df.loc[t].to_numpy(), - meas_sds.loc[t].to_numpy().flatten(), + rng=rng, + states=latent_states[t], # ty: ignore[invalid-argument-type] + controls=control_data[t], # ty: ignore[invalid-argument-type] + loadings=loadings_df.loc[t].to_numpy(), + control_params=control_params_df.loc[t].to_numpy(), + sds=meas_sds.loc[t].to_numpy().flatten(), ), columns=loadings_df.loc[t].index, ) @@ -298,7 +342,7 @@ def _simulate_dataset( latent_data_by_period = [] for t in range(n_aug_periods): - lat = pd.DataFrame(data=latent_states[t], columns=labels["latent_factors"]) + lat = pd.DataFrame(data=latent_states[t], columns=labels.latent_factors) lat["aug_period"] = t latent_data_by_period.append(lat) @@ -309,118 +353,143 @@ def _simulate_dataset( def _collapse_aug_periods_to_periods( - df, factors, aug_periods_to_periods, endogenous_factors_info -): + df: pd.DataFrame, + factors: tuple[str, ...], + aug_periods_to_periods: Mapping[int, int], + endogenous_factors_info: EndogenousFactorsInfo, +) -> pd.DataFrame: """Collapse dataframe with aug_period index to user-facing period index. For each factor, extracts from the appropriate aug_period based on is_endogenous. Args: - df (pd.DataFrame): DataFrame with columns "aug_period" and "id" - latent_factors (list): List of latent factors - aug_periods_to_periods (dict): Mapping from aug_period to period - endogenous_factors_info (dict): Information about which factors are endogenous + df: DataFrame with columns "aug_period" and "id" + factors: Tuple of latent factors + aug_periods_to_periods: Mapping from aug_period to period + endogenous_factors_info: Information about which factors are endogenous Returns: pd.DataFrame: DataFrame with "period" column instead of "aug_period" """ df = df.copy() - if not endogenous_factors_info["has_endogenous_factors"]: + if not endogenous_factors_info.has_endogenous_factors: return df.rename(columns={"aug_period": "period"}) df["period"] = df["aug_period"].map(aug_periods_to_periods) df["_aug_period_meas_type"] = df["aug_period"].map( - endogenous_factors_info["aug_periods_to_aug_period_meas_types"] + endogenous_factors_info.aug_periods_to_aug_period_meas_types ) endogenous_cols = [ - fac for fac in factors if endogenous_factors_info[fac]["is_endogenous"] + fac for fac in factors if endogenous_factors_info.factor_info[fac].is_endogenous ] state_cols = [fac for fac in factors if fac not in endogenous_cols] - out = df.query('_aug_period_meas_type == "endogenous_factors"')[ - ["id", "period", *endogenous_cols] - ] + is_endogenous = df["_aug_period_meas_type"] == MeasurementType.ENDOGENOUS_FACTORS + is_states = df["_aug_period_meas_type"] == MeasurementType.STATES + + out = df.loc[is_endogenous, ["id", "period", *endogenous_cols]] return pd.merge( out, - df.query('_aug_period_meas_type == "states"')[["id", "period", *state_cols]], + df.loc[is_states, ["id", "period", *state_cols]], on=["id", "period"], how="outer", ) -def _get_shock(mean, sd, size): - """Add stochastic effect to a factor of length n_obs. +def _get_shock( + rng: np.random.Generator, + mean: float, + sd: float, + size: int, +) -> NDArray[np.floating]: + """Add stochastic effect to a factor of length n_obs. Args: - mean (float): mean of the stochastic effect - sd (float): standard deviation of the effect - size (int): length of resulting array + rng: NumPy random number generator. + mean: mean of the stochastic effect + sd: standard deviation of the effect + size: length of resulting array Returns: - shock (np.array): 1d array of length n_obs with the stochastic shock + shock: 1d array of length n_obs with the stochastic shock """ if sd == 0: shock = np.full(size, mean) elif sd > 0: - shock = np.random.normal(mean, sd, size) + shock = rng.normal(mean, sd, size) else: raise ValueError("No negative standard deviation allowed.") return shock -def generate_start_states(n_obs, dimensions, dist_args, weights): +def generate_start_states( + rng: np.random.Generator, + n_obs: int, + dimensions: Dimensions, + dist_args: list[dict], + weights: NDArray[np.floating], +) -> NDArray[np.floating]: """Draw initial states and control variables from a (mixture of) normals. Args: - n_obs (int): number of observations - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + rng: NumPy random number generator. + n_obs: number of observations + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - dist_args (list): list of dicts of length nmixtures of dictionaries with the + dist_args: list of dicts of length nmixtures of dictionaries with the entries "mean" and "cov" for each mixture distribution. + weights: Array of mixture weights. Returns: - start_states (np.ndarray): shape (n_obs, n_states), - controls (np.ndarray): shape (n_obs, n_controls), + start_states: shape (n_obs, n_states), + controls: shape (n_obs, n_controls), """ - n_states = dimensions["n_latent_factors"] + n_states = dimensions.n_latent_factors if np.size(weights) == 1: - out = multivariate_normal(size=n_obs, **dist_args[0]) + out = rng.multivariate_normal(size=n_obs, **dist_args[0]) else: - helper_array = choice(np.arange(len(weights)), p=weights, size=n_obs) + helper_array = rng.choice(np.arange(len(weights)), p=weights, size=n_obs) out = np.zeros((n_obs, n_states)) for i in range(n_obs): - out[i] = multivariate_normal(**dist_args[helper_array[i]]) + out[i] = rng.multivariate_normal(**dist_args[helper_array[i]]) return out -def measurements_from_states(states, controls, loadings, control_params, sds): +def measurements_from_states( + rng: np.random.Generator, + states: NDArray[np.floating], + controls: NDArray[np.floating], + loadings: NDArray[np.floating], + control_params: NDArray[np.floating], + sds: NDArray[np.floating], +) -> NDArray[np.floating]: """Generate the variables that would be observed in practice. This generates the data for only one period. Let n_meas be the number of measurements in that period. Args: - states (pd.DataFrame or np.ndarray): DataFrame of shape (n_obs, n_states) - controls (pd.DataFrame or np.ndarray): DataFrame of shape - (n_obs, n_controlsrols) - loadings (np.ndarray): numpy array of size (n_meas, n_states) - control_coeffs (np.ndarray): numpy array of size (n_meas, n_states) - sds (np.ndarray): numpy array of size (n_meas) with the standard deviations + rng: NumPy random number generator. + states: DataFrame of shape (n_obs, n_states) + controls: DataFrame of shape + (n_obs, n_controls) + loadings: numpy array of size (n_meas, n_states) + control_params: numpy array of size (n_meas, n_controls) + sds: numpy array of size (n_meas) with the standard deviations of the measurements. Measurement error is assumed to be independent across measurements. Returns: - measurements (np.ndarray): array of shape (n_obs, n_meas) with measurements. + measurements: array of shape (n_obs, n_meas) with measurements. """ n_meas = loadings.shape[0] n_obs = len(states) - epsilon = multivariate_normal([0] * n_meas, np.diag(sds**2), n_obs) + epsilon = rng.multivariate_normal([0] * n_meas, np.diag(sds**2), n_obs) states_part = np.dot(states, loadings.T) control_part = np.dot(controls, control_params.T) - meas = states_part + control_part + epsilon - return meas + return states_part + control_part + epsilon diff --git a/src/skillmodels/test_data/__init__.py b/src/skillmodels/test_data/__init__.py new file mode 100644 index 00000000..f4fd042b --- /dev/null +++ b/src/skillmodels/test_data/__init__.py @@ -0,0 +1 @@ +"""Test data and example model specifications for skillmodels.""" diff --git a/src/skillmodels/test_data/model2.py b/src/skillmodels/test_data/model2.py new file mode 100644 index 00000000..7ae9e3f3 --- /dev/null +++ b/src/skillmodels/test_data/model2.py @@ -0,0 +1,57 @@ +"""Model 2 from the replication files of Cunha, Heckman, and Schennach (2010). + +This model has three latent factors (fac1, fac2, fac3) observed over 8 periods, +with CES, linear, and constant transition functions respectively. It includes +anchoring of fac1 to outcome Q1 and a single control variable x1. +""" + +from skillmodels.model_spec import ( + AnchoringSpec, + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) + +MODEL2 = ModelSpec( + factors={ + "fac1": FactorSpec( + measurements=(("y1", "y2", "y3"),) * 8, + normalizations=Normalizations( + loadings=({"y1": 1},) * 8, + intercepts=({},) * 8, + ), + transition_function="log_ces", + ), + "fac2": FactorSpec( + measurements=(("y4", "y5", "y6"),) * 8, + normalizations=Normalizations( + loadings=({"y4": 1},) * 8, + intercepts=({},) * 8, + ), + transition_function="linear", + ), + "fac3": FactorSpec( + measurements=(("y7", "y8", "y9"),) + ((),) * 7, + normalizations=Normalizations( + loadings=({"y7": 1},) + ({},) * 7, + intercepts=({},) * 8, + ), + transition_function="constant", + ), + }, + anchoring=AnchoringSpec( + outcomes={"fac1": "Q1"}, + free_controls=True, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=True, + ), + controls=("x1",), + stagemap=(0, 0, 0, 0, 0, 0, 0), + estimation_options=EstimationOptionsSpec( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), +) diff --git a/tests/model2_correct_params_index.csv b/src/skillmodels/test_data/model2_correct_params_index.csv similarity index 100% rename from tests/model2_correct_params_index.csv rename to src/skillmodels/test_data/model2_correct_params_index.csv diff --git a/tests/model2_correct_update_info.csv b/src/skillmodels/test_data/model2_correct_update_info.csv similarity index 100% rename from tests/model2_correct_update_info.csv rename to src/skillmodels/test_data/model2_correct_update_info.csv diff --git a/tests/model2_simulated_data.dta b/src/skillmodels/test_data/model2_simulated_data.dta similarity index 100% rename from tests/model2_simulated_data.dta rename to src/skillmodels/test_data/model2_simulated_data.dta diff --git a/tests/model2_with_endog_correct_update_info.csv b/src/skillmodels/test_data/model2_with_endog_correct_update_info.csv similarity index 100% rename from tests/model2_with_endog_correct_update_info.csv rename to src/skillmodels/test_data/model2_with_endog_correct_update_info.csv diff --git a/tests/simplest_augmented_data_expected.csv b/src/skillmodels/test_data/simplest_augmented_data_expected.csv similarity index 100% rename from tests/simplest_augmented_data_expected.csv rename to src/skillmodels/test_data/simplest_augmented_data_expected.csv diff --git a/src/skillmodels/test_data/simplest_augmented_model.py b/src/skillmodels/test_data/simplest_augmented_model.py new file mode 100644 index 00000000..89eeb267 --- /dev/null +++ b/src/skillmodels/test_data/simplest_augmented_model.py @@ -0,0 +1,39 @@ +"""Simplest augmented model with endogenous factors. + +A minimal model with two latent factors (fac1, fac2) and one observed factor (of). +Factor fac2 is endogenous. Both factors use linear transition functions with two +periods. Used for testing endogenous factor augmentation. +""" + +from skillmodels.model_spec import ( + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) + +SIMPLEST_AUGMENTED_MODEL = ModelSpec( + factors={ + "fac1": FactorSpec( + measurements=(("var",), ("var",)), + normalizations=Normalizations( + loadings=({"var": 1}, {"var": 1}), + intercepts=({}, {}), + ), + transition_function="linear", + ), + "fac2": FactorSpec( + measurements=(("inv",), ("inv",)), + normalizations=Normalizations( + loadings=({"inv": 1}, {"inv": 1}), + intercepts=({}, {}), + ), + is_endogenous=True, + transition_function="linear", + ), + }, + observed_factors=("of",), + estimation_options=EstimationOptionsSpec( + bounds_distance=1e-8, + ), +) diff --git a/src/skillmodels/transition_functions.py b/src/skillmodels/transition_functions.py index 16427076..9233557b 100644 --- a/src/skillmodels/transition_functions.py +++ b/src/skillmodels/transition_functions.py @@ -14,8 +14,6 @@ Returns: * float - - **names_example_func(** *factors* **)**: Generate a list of names for the params of the transition function. @@ -33,21 +31,26 @@ import jax import jax.numpy as jnp +from jax import Array -def linear(states, params): +def linear(states: Array, params: Array) -> Array: """Linear production function where the constant is the last parameter.""" constant = params[-1] betas = params[:-1] return jnp.dot(states, betas) + constant -def params_linear(factors): +def params_linear(factors: tuple[str, ...]) -> list[str]: """Index tuples for linear transition function.""" return [*factors, "constant"] -def identity_constraints_linear(factor, aug_period, all_factors) -> list[dict]: +def identity_constraints_linear( + factor: str, + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for linear transition function.""" constraints_dicts = [] for regressor in params_linear(all_factors): @@ -63,7 +66,7 @@ def identity_constraints_linear(factor, aug_period, all_factors) -> list[dict]: return constraints_dicts -def translog(states, params): +def translog(states: Array, params: Array) -> Array: """Translog transition function. The name is a convention in the skill formation literature even though the function @@ -85,18 +88,21 @@ def translog(states, params): return res -def params_translog(factors): +def params_translog(factors: tuple[str, ...]) -> list[str]: """Index tuples for the translog production function.""" - names = ( - factors + return ( + list(factors) + [f"{factor} ** 2" for factor in factors] + [f"{a} * {b}" for a, b in combinations(factors, 2)] + ["constant"] ) - return names -def identity_constraints_translog(factor, aug_period, all_factors) -> list[dict]: +def identity_constraints_translog( + factor: str, + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for translog transition function.""" constraints_dicts = [] for regressor in params_translog(all_factors): @@ -112,7 +118,7 @@ def identity_constraints_translog(factor, aug_period, all_factors) -> list[dict] return constraints_dicts -def log_ces(states, params): +def log_ces(states: Array, params: Array) -> Array: """Log CES production function (KLS version).""" phi = params[-1] gammas = params[:-1] @@ -124,38 +130,45 @@ def log_ces(states, params): # the log step for gammas underflows for gamma = 0, but this is handled correctly # by logsumexp and does not raise a warning. unscaled = jax.scipy.special.logsumexp(jnp.log(gammas) + states * phi) - result = unscaled * scaling_factor - return result + return unscaled * scaling_factor -def params_log_ces(factors): +def params_log_ces(factors: tuple[str, ...]) -> list[str]: """Index tuples for the log_ces production function.""" return [*factors, "phi"] -def constraints_log_ces(factor, factors, aug_period): +def constraints_log_ces( + factor: str, + factors: tuple[str, ...], + aug_period: int, +) -> dict: """Constraints for log_ces production function.""" names = params_log_ces(factors) loc = [("transition", aug_period, factor, name) for name in names[:-1]] return {"loc": loc, "type": "probability"} -def identity_constraints_log_ces(factors, aug_period, all_factors): +def identity_constraints_log_ces( + factors: tuple[str, ...], + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for log_ces.""" raise NotImplementedError -def constant(state, params): # noqa: ARG001 +def constant(state: Array, params: Array) -> Array: # noqa: ARG001 """Constant production function.""" return state -def params_constant(factors): # noqa: ARG001 +def params_constant(factors: tuple[str, ...]) -> list[str]: # noqa: ARG001 """Index tuples for the constant production function.""" return [] -def robust_translog(states, params): +def robust_translog(states: Array, params: Array) -> Array: """Numerically robust version of the translog transition function. This function does a clipping of the state vector at +- 1e12 before calling @@ -168,19 +181,26 @@ def robust_translog(states, params): """ clipped_states = jnp.clip(states, -1e12, 1e12) - return translog(clipped_states, params) + return translog(states=clipped_states, params=params) -def params_robust_translog(factors): +def params_robust_translog(factors: tuple[str, ...]) -> list[str]: + """Return parameter names for robust translog transition function.""" return params_translog(factors) -def identity_constraints_robust_translog(factor, aug_period, all_factors) -> list[dict]: +def identity_constraints_robust_translog( + factor: str, + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for robust_translog.""" - return identity_constraints_translog(factor, aug_period, all_factors) + return identity_constraints_translog( + factor=factor, aug_period=aug_period, all_factors=all_factors + ) -def linear_and_squares(states, params): +def linear_and_squares(states: Array, params: Array) -> Array: """linear_and_squares transition function.""" nfac = len(states) constant = params[-1] @@ -193,14 +213,15 @@ def linear_and_squares(states, params): return res -def params_linear_and_squares(factors): +def params_linear_and_squares(factors: tuple[str, ...]) -> list[str]: """Index tuples for the linear_and_squares production function.""" - names = factors + [f"{factor} ** 2" for factor in factors] + ["constant"] - return names + return list(factors) + [f"{factor} ** 2" for factor in factors] + ["constant"] def identity_constraints_linear_and_squares( - factor, aug_period, all_factors + factor: str, + aug_period: int, + all_factors: tuple[str, ...], ) -> list[dict]: """Identity constraints for linear_and_squares transition function.""" constraints_dicts = [] @@ -217,7 +238,7 @@ def identity_constraints_linear_and_squares( return constraints_dicts -def log_ces_general(states, params): +def log_ces_general(states: Array, params: Array) -> Array: """Generalized log_ces production function without known location and scale.""" n = states.shape[-1] tfp = params[-1] @@ -230,15 +251,18 @@ def log_ces_general(states, params): # the log step for gammas underflows for gamma = 0, but this is handled correctly # by logsumexp and does not raise a warning. unscaled = jax.scipy.special.logsumexp(jnp.log(gammas) + states * sigmas) - result = unscaled * tfp - return result + return unscaled * tfp -def params_log_ces_general(factors): +def params_log_ces_general(factors: tuple[str, ...]) -> list[str]: """Index tuples for the generalized log_ces production function.""" - return factors + [f"sigma_{fac}" for fac in factors] + ["tfp"] + return list(factors) + [f"sigma_{fac}" for fac in factors] + ["tfp"] -def identity_constraints_log_ces_general(factors, aug_period, all_factors): +def identity_constraints_log_ces_general( + factors: tuple[str, ...], + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for log_ces_general.""" raise NotImplementedError diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py new file mode 100644 index 00000000..b7710ca6 --- /dev/null +++ b/src/skillmodels/types.py @@ -0,0 +1,386 @@ +"""Dataclass definitions for skillmodels internal data structures.""" + +import copyreg +from collections.abc import Callable, Mapping +from dataclasses import dataclass +from enum import Enum, auto +from types import MappingProxyType +from typing import NewType + +import pandas as pd +from jax import Array + + +def _make_immutable(value: object) -> object: + """Recursively convert mutable containers to immutable equivalents. + + - dict → MappingProxyType + - list → tuple + + Other types are returned unchanged. + """ + if isinstance(value, dict): + return MappingProxyType({k: _make_immutable(v) for k, v in value.items()}) + if isinstance(value, list): + return tuple(_make_immutable(v) for v in value) + return value + + +def ensure_containers_are_immutable( + value: Mapping, +) -> MappingProxyType: + """Convert a Mapping to a MappingProxyType, leaving existing proxies unchanged.""" + if isinstance(value, MappingProxyType): + return value + return MappingProxyType(dict(value)) + + +def _reduce_mapping_proxy(mp: MappingProxyType) -> tuple: + return ensure_containers_are_immutable, (dict(mp),) + + +copyreg.pickle(MappingProxyType, _reduce_mapping_proxy) + + +# NewType definitions for domain safety +# These prevent accidentally mixing up semantically different int values +Period = NewType("Period", int) +AugPeriod = NewType("AugPeriod", int) +Stage = NewType("Stage", int) +AugStage = NewType("AugStage", int) + + +class FactorType(Enum): + """Type of a latent factor in the model.""" + + STATE = auto() # Regular state factor + ENDOGENOUS = auto() # Endogenous factor (not a correction) + CORRECTION = auto() # Correction factor (is_endogenous=True, is_correction=True) + + +class MeasurementType(Enum): + """Type of measurement in an augmented period.""" + + STATES = auto() + ENDOGENOUS_FACTORS = auto() + + +@dataclass(frozen=True) +class Dimensions: + """Dimensional information for a skill formation model. + + All fields represent counts of model components. + """ + + n_latent_factors: int + n_observed_factors: int + n_controls: int + n_mixtures: int + n_aug_periods: int + n_periods: int + + @property + def n_all_factors(self) -> int: + """Total number of factors (latent + observed).""" + return self.n_latent_factors + self.n_observed_factors + + +@dataclass(frozen=True) +class Labels: + """Labels for model quantities. + + Contains string identifiers for factors, periods, controls, and stages. + """ + + latent_factors: tuple[str, ...] + observed_factors: tuple[str, ...] + controls: tuple[str, ...] + periods: tuple[int, ...] + stagemap: tuple[int, ...] + stages: tuple[int, ...] + aug_periods: tuple[int, ...] + aug_periods_to_periods: Mapping[int, int] + aug_stagemap: tuple[int, ...] + aug_stages: tuple[int, ...] + aug_stages_to_stages: Mapping[int, int] + transition_names: tuple[str, ...] = () + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, + "aug_periods_to_periods", + ensure_containers_are_immutable(self.aug_periods_to_periods), + ) + object.__setattr__( + self, + "aug_stages_to_stages", + ensure_containers_are_immutable(self.aug_stages_to_stages), + ) + + @property + def all_factors(self) -> tuple[str, ...]: + """All factor names (latent + observed).""" + return self.latent_factors + self.observed_factors + + +@dataclass(frozen=True) +class Anchoring: + """Information about how latent factors are anchored to observed outcomes.""" + + anchoring: bool + outcomes: Mapping[str, str] + factors: tuple[str, ...] + free_controls: bool + free_constant: bool + free_loadings: bool + ignore_constant_when_anchoring: bool + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, "outcomes", ensure_containers_are_immutable(self.outcomes) + ) + + @classmethod + def disabled(cls) -> Anchoring: + """Create an Anchoring config with anchoring disabled.""" + return cls( + anchoring=False, + outcomes={}, + factors=(), + free_controls=False, + free_constant=False, + free_loadings=False, + ignore_constant_when_anchoring=False, + ) + + @classmethod + def from_config( + cls, + outcomes: dict[str, str], + *, + free_controls: bool = False, + free_constant: bool = False, + free_loadings: bool = False, + ignore_constant_when_anchoring: bool = False, + ) -> Anchoring: + """Create an Anchoring config from a configuration dictionary. + + Args: + outcomes: Mapping from factor names to outcome variable names. + free_controls: Whether control parameters are free in anchoring equations. + free_constant: Whether constant is free in anchoring equations. + free_loadings: Whether loadings are free in anchoring equations. + ignore_constant_when_anchoring: Whether to ignore constant when anchoring. + + Returns: + Configured Anchoring instance with anchoring enabled. + + """ + return cls( + anchoring=True, + outcomes=outcomes, + factors=tuple(outcomes.keys()), + free_controls=free_controls, + free_constant=free_constant, + free_loadings=free_loadings, + ignore_constant_when_anchoring=ignore_constant_when_anchoring, + ) + + +@dataclass(frozen=True) +class EstimationOptions: + """Tuning parameters for the estimation.""" + + sigma_points_scale: float + robust_bounds: bool + bounds_distance: float + clipping_lower_bound: float | None + clipping_upper_bound: float | None + clipping_lower_hardness: float + clipping_upper_hardness: float + + +@dataclass(frozen=True) +class TransitionInfo: + """Information about transition functions.""" + + func: Callable + param_names: Mapping[str, list[str]] + individual_functions: Mapping[str, Callable] + function_names: Mapping[str, str] + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, "param_names", ensure_containers_are_immutable(self.param_names) + ) + object.__setattr__( + self, + "individual_functions", + ensure_containers_are_immutable(self.individual_functions), + ) + object.__setattr__( + self, + "function_names", + ensure_containers_are_immutable(self.function_names), + ) + + +@dataclass(frozen=True) +class FactorInfo: + """Information for a single factor.""" + + factor_type: FactorType + + @property + def is_state(self) -> bool: + """Whether the factor is a regular state factor.""" + return self.factor_type == FactorType.STATE + + @property + def is_endogenous(self) -> bool: + """Whether the factor is endogenous (ENDOGENOUS or CORRECTION).""" + return self.factor_type in (FactorType.ENDOGENOUS, FactorType.CORRECTION) + + @property + def is_correction(self) -> bool: + """Whether the factor is a correction factor.""" + return self.factor_type == FactorType.CORRECTION + + @classmethod + def from_flags( + cls, *, is_endogenous: bool = False, is_correction: bool = False + ) -> FactorInfo: + """Create FactorInfo from boolean flags. + + Args: + is_endogenous: Whether the factor is endogenous. + is_correction: Whether the factor is a correction (must be endogenous). + + Returns: + FactorInfo with the appropriate FactorType. + + Raises: + ValueError: If is_correction is True but is_endogenous is False. + + """ + if is_correction and not is_endogenous: + msg = "A correction factor must also be endogenous" + raise ValueError(msg) + if is_correction: + return cls(factor_type=FactorType.CORRECTION) + if is_endogenous: + return cls(factor_type=FactorType.ENDOGENOUS) + return cls(factor_type=FactorType.STATE) + + +@dataclass(frozen=True) +class EndogenousFactorsInfo: + """Information about endogenous factors in the model.""" + + has_endogenous_factors: bool + aug_periods_to_aug_period_meas_types: Mapping[int, MeasurementType] + bounds_distance: float + aug_periods_from_period: Callable[[int], list[int]] + factor_info: Mapping[str, FactorInfo] + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, + "aug_periods_to_aug_period_meas_types", + ensure_containers_are_immutable(self.aug_periods_to_aug_period_meas_types), + ) + object.__setattr__( + self, + "factor_info", + ensure_containers_are_immutable(self.factor_info), + ) + + +@dataclass(frozen=True) +class ProcessedModel: + """Complete processed model specification. + + This is the main output of process_model() containing all information + needed for estimation. + """ + + dimensions: Dimensions + labels: Labels + anchoring: Anchoring + estimation_options: EstimationOptions + transition_info: TransitionInfo + update_info: pd.DataFrame + normalizations: dict[str, dict[str, list]] + endogenous_factors_info: EndogenousFactorsInfo + + +@dataclass(frozen=True) +class LoadingsParsingInfo: + """Information for parsing factor loadings from parameter vector.""" + + slice: Array | slice + flat_indices: Array + shape: tuple[int, ...] + size: int + + +@dataclass(frozen=True) +class ParsingInfo: + """Information for parsing the parameter vector. + + Maps model quantities to positions or slices of the parameter vector. + """ + + initial_states: Array | slice + initial_cholcovs: Array | slice + mixture_weights: Array | slice + controls: Array | slice + meas_sds: Array | slice + shock_sds: Array | slice + loadings: LoadingsParsingInfo + transition: dict[str, Array | slice] + is_anchoring_loading: Array + is_anchored_factor: Array + is_anchoring_update: Array + ignore_constant_when_anchoring: bool + has_endogenous_factors: bool + + +@dataclass(frozen=True) +class ParsedParams: + """Parsed parameters from the flat parameter vector. + + Contains all model parameters in structured arrays. + """ + + controls: Array + loadings: Array + meas_sds: Array + shock_sds: Array + transition: dict[str, Array] + anchoring_scaling_factors: Array + anchoring_constants: Array + + +@dataclass(frozen=True) +class ProcessedData: + """Processed data arrays for estimation. + + All arrays are JAX arrays ready for use in the likelihood function. + """ + + measurements: Array + controls: Array + observed_factors: Array + + +@dataclass(frozen=True) +class KalmanState: + """State carried through Kalman filter iterations. + + Used as the carry state in jax.lax.scan. + """ + + states: Array + upper_chols: Array + log_mixture_weights: Array diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index dd4f385b..37c2908d 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -1,9 +1,16 @@ +"""Utility functions for manipulating model specifications and parameters.""" + import warnings -from copy import deepcopy +from dataclasses import replace import numpy as np import pandas as pd +from skillmodels.model_spec import ( + FactorSpec, + ModelSpec, + Normalizations, +) from skillmodels.params_index import get_params_index from skillmodels.process_model import ( get_dimensions, @@ -12,35 +19,41 @@ ) -def extract_factors(factors, model_dict, params=None): +def extract_factors( + factors: str | list[str], + model_spec: ModelSpec, + params: pd.DataFrame | None = None, +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Reduce a specification to a model with fewer latent factors. If provided, a params DataFrame is also reduced correspondingly. Args: - factors (str or list): Name(s) of the factor(s) to extract. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + factors: Name(s) of the factor(s) to extract. + model_spec: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ if isinstance(factors, str): factors = [factors] - to_remove = set(model_dict["factors"]).difference(factors) - out = remove_factors(to_remove, model_dict, params) - return out + to_remove = list(set(model_spec.factors).difference(factors)) + return remove_factors(factors=to_remove, model_spec=model_spec, params=params) -def update_parameter_values(params, others): +def update_parameter_values( + params: pd.DataFrame, + others: pd.DataFrame | list[pd.DataFrame], +) -> pd.DataFrame: """Update the "value" column of params with values from other. Args: - params (pandas.DataFrame or None): The params DataFrame for the full model. - others (pandas.DataFrame or list): Another DataFrame with parameters or list + params: The params DataFrame for the full model. + others: Another DataFrame with parameters or list of thereof. The values from other are used to update the value column of ``params``. If other is a list, the updates will be in order, i.e. later elements overwrite earlier ones. @@ -67,7 +80,11 @@ def update_parameter_values(params, others): return out -def remove_factors(factors, model_dict, params=None): +def remove_factors( + factors: str | list[str], + model_spec: ModelSpec, + params: pd.DataFrame | None = None, +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove factors from a model specification. If provided, a params DataFrame is also reduced correspondingly. @@ -76,147 +93,198 @@ def remove_factors(factors, model_dict, params=None): This happens if the remaining factors do not have measurements in later periods. Args: - factors (str or list): Name(s) of the factor(s) to remove. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + factors: Name(s) of the factor(s) to remove. + model_spec: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - # We need this for the full model when endogenous factors are present. - has_endogenous_factors = get_has_endogenous_factors(model_dict["factors"]) + if isinstance(factors, str): + factors = [factors] - out = deepcopy(model_dict) + # We need this for the full model when endogenous factors are present. + has_endogenous_factors = get_has_endogenous_factors(model_spec.factors) - out["factors"] = _remove_from_dict(out["factors"], factors) + new_factors = {k: v for k, v in model_spec.factors.items() if k not in factors} # adjust anchoring - if "anchoring" in model_dict: - out["anchoring"]["outcomes"] = _remove_from_dict( - out["anchoring"]["outcomes"], - factors, - ) - if out["anchoring"]["outcomes"] == {}: - out = _remove_from_dict(out, "anchoring") + new_anchoring = model_spec.anchoring + if new_anchoring is not None: + new_outcomes = { + k: v for k, v in new_anchoring.outcomes.items() if k not in factors + } + if new_outcomes: + new_anchoring = replace(new_anchoring, outcomes=new_outcomes) + else: + new_anchoring = None + + out = model_spec._replace( + factors=new_factors, + anchoring=new_anchoring, + ) # Remove periods if necessary, but only if no endogenous factors are present. # (else we would mess up the mapping between raw periods model periods) if not has_endogenous_factors: - new_n_periods = get_dimensions(out, has_endogenous_factors)["n_periods"] - out = reduce_n_periods(out, new_n_periods) + new_n_periods = get_dimensions( + out, has_endogenous_factors=has_endogenous_factors + ).n_periods + reduced = reduce_n_periods(model_spec=out, new_n_periods=new_n_periods) + if not isinstance(reduced, ModelSpec): + msg = "Expected ModelSpec from reduce_n_periods without params" + raise TypeError(msg) + out = reduced if params is not None: - out_params = _reduce_params(params, out, has_endogenous_factors) - out = (out, out_params) + out_params = _reduce_params( + params, + out, + has_endogenous_factors=has_endogenous_factors, + ) + return (out, out_params) return out -def remove_measurements(measurements, model_dict, params=None): +def remove_measurements( + measurements: str | list[str], + model_spec: ModelSpec, + params: pd.DataFrame | None = None, +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove measurements from a model specification. If provided, a params DataFrame is also reduced correspondingly. Args: - measurements (str or list): Name(s) of the measurement(s) to remove. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + measurements: Name(s) of the measurement(s) to remove. + model_spec: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - - for factor in model_dict["factors"]: - full = model_dict["factors"][factor]["measurements"] - reduced = [_remove_from_list(meas_list, measurements) for meas_list in full] - out["factors"][factor]["measurements"] = reduced - - norminfo = model_dict["factors"][factor].get("normalizations", {}) - if "loadings" in norminfo: - out["factors"][factor]["normalizations"]["loadings"] = ( - _remove_measurements_from_normalizations( - measurements, - norminfo["loadings"], - ) - ) + if isinstance(measurements, str): + measurements = [measurements] + + new_factors: dict[str, FactorSpec] = {} + for factor, fspec in model_spec.factors.items(): + new_meas = tuple( + tuple(m for m in period_meas if m not in measurements) + for period_meas in fspec.measurements + ) - if "intercepts" in norminfo: - out["factors"][factor]["normalizations"]["intercepts"] = ( - _remove_measurements_from_normalizations( - measurements, - norminfo["intercepts"], + new_normalizations = fspec.normalizations + if new_normalizations is not None: + new_loadings = tuple( + {k: v for k, v in d.items() if k not in measurements} + for d in new_normalizations.loadings + ) + new_intercepts = tuple( + {k: v for k, v in d.items() if k not in measurements} + for d in new_normalizations.intercepts + ) + if new_loadings != new_normalizations.loadings or ( + new_intercepts != new_normalizations.intercepts + ): + warnings.warn( + "Your removed a normalized measurement from a model. Make sure " + "there are enough normalizations left to ensure identification.", + stacklevel=2, ) + new_normalizations = Normalizations( + loadings=new_loadings, + intercepts=new_intercepts, ) + new_factors[factor] = replace( + fspec, measurements=new_meas, normalizations=new_normalizations + ) + + out = model_spec._replace(factors=new_factors) + if params is not None: # This likely won't work if we have endogenous factors. out_params = _reduce_params(params, out, has_endogenous_factors=False) - out = (out, out_params) + return (out, out_params) return out -def remove_controls(controls, model_dict, params=None): +def remove_controls( + controls: str | list[str], + model_spec: ModelSpec, + params: pd.DataFrame | None = None, +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove control variables from a model specification. If provided, a params DataFrame is also reduced correspondingly. Args: - controls (str or list): Name(s) of the contral variable(s) to remove. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + controls: Name(s) of the contral variable(s) to remove. + model_spec: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - out["controls"] = _remove_from_list(out["controls"], controls) - if out["controls"] == []: - out = _remove_from_dict(out, "controls") + if isinstance(controls, str): + controls = [controls] + + new_controls = tuple(c for c in model_spec.controls if c not in controls) + out = model_spec._replace(controls=new_controls) if params is not None: # This likely won't work if we have endogenous factors. out_params = _reduce_params(params, out, has_endogenous_factors=False) - out = (out, out_params) + return (out, out_params) return out -def switch_translog_to_linear(model_dict, params=None): +def switch_translog_to_linear( + model_spec: ModelSpec, + params: pd.DataFrame | None = None, +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Switch all translog production functions to linear. If provided, a params DataFrame is also reduced correspondingly. Args: - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + model_spec: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - for factor in model_dict["factors"]: - if model_dict["factors"][factor]["transition_function"] == "translog": - out["factors"][factor]["transition_function"] = "linear" + new_factors: dict[str, FactorSpec] = {} + for name, fspec in model_spec.factors.items(): + if fspec.transition_function == "translog": + new_factors[name] = fspec.with_transition_function("linear") + else: + new_factors[name] = fspec + out = model_spec._replace(factors=new_factors) if params is not None: # This likely won't work if we have endogenous factors. out_params = _reduce_params(params, out, has_endogenous_factors=False) - out = (out, out_params) + return (out, out_params) return out -def switch_linear_to_translog(model_dict, params=None): +def switch_linear_to_translog( + model_spec: ModelSpec, + params: pd.DataFrame | None = None, +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Switch all linear production functions to translog. If provided, a params DataFrame is also extended correspondingly. The fill value @@ -225,95 +293,96 @@ def switch_linear_to_translog(model_dict, params=None): the additional parameters are not initialized at zero. Args: - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + model_spec: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - for factor in model_dict["factors"]: - if model_dict["factors"][factor]["transition_function"] == "linear": - out["factors"][factor]["transition_function"] = "translog" + new_factors: dict[str, FactorSpec] = {} + for name, fspec in model_spec.factors.items(): + if fspec.transition_function == "linear": + new_factors[name] = fspec.with_transition_function("translog") + else: + new_factors[name] = fspec + out = model_spec._replace(factors=new_factors) if params is not None: - out_params = _extend_params(params, out, 0.05) - out = (out, out_params) + out_params = _extend_params(params=params, model_spec=out, fill_value=0.05) + return (out, out_params) + return out -def reduce_n_periods(model_dict, new_n_periods, params=None): +def reduce_n_periods( + model_spec: ModelSpec, + new_n_periods: int, + params: pd.DataFrame | None = None, +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove all periods after n_periods. Args: - model_dict (dict): The model specification. See: :ref:`model_specs`. - new_n_periods (int): The new number of periods. - params (pandas.DataFrame or None): The params DataFrame for the full model. + model_spec: The model specification. See: :ref:`model_specs`. + new_n_periods: The new number of periods. + params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - for factor in model_dict["factors"]: - out["factors"][factor]["measurements"] = _shorten_if_necessary( - out["factors"][factor]["measurements"], - new_n_periods, + new_factors: dict[str, FactorSpec] = {} + for name, fspec in model_spec.factors.items(): + new_meas = fspec.measurements[:new_n_periods] + new_normalizations = fspec.normalizations + if new_normalizations is not None: + new_normalizations = Normalizations( + loadings=new_normalizations.loadings[:new_n_periods], + intercepts=new_normalizations.intercepts[:new_n_periods], + ) + new_factors[name] = replace( + fspec, measurements=new_meas, normalizations=new_normalizations ) - norminfo = model_dict["factors"][factor].get("normalizations", {}) - if "loadings" in norminfo: - out["factors"][factor]["normalizations"]["loadings"] = ( - _shorten_if_necessary(norminfo["loadings"], new_n_periods) - ) + new_stagemap = model_spec.stagemap + if new_stagemap is not None and len(new_stagemap) > new_n_periods - 1: + new_stagemap = new_stagemap[: new_n_periods - 1] - if "intercepts" in norminfo: - out["factors"][factor]["normalizations"]["intercepts"] = ( - _shorten_if_necessary(norminfo["intercepts"], new_n_periods) - ) - - if "stagemap" in out: - out["stagemap"] = _shorten_if_necessary(out["stagemap"], new_n_periods - 1) + out = model_spec._replace( + factors=new_factors, + stagemap=new_stagemap, + ) if params is not None: - out_params = _extend_params(params, out, 0.05) - out = (out, out_params) + out_params = _extend_params(params=params, model_spec=out, fill_value=0.05) + return (out, out_params) return out -def _remove_from_list(list_, to_remove): - if isinstance(to_remove, str): - to_remove = [to_remove] - return [element for element in list_ if element not in to_remove] - - -def _remove_from_dict(dict_, to_remove): - if isinstance(to_remove, str): - to_remove = [to_remove] - - return {key: val for key, val in dict_.items() if key not in to_remove} - - -def _reduce_params(params, model_dict, has_endogenous_factors): +def _reduce_params( + params: pd.DataFrame, + model_spec: ModelSpec, + *, + has_endogenous_factors: bool, +) -> pd.DataFrame: """Reduce a parameter DataFrame from a larger model to a reduced model. The reduced model must be nested in the original model for which the params DataFrame was constructed. Args: - params (pandas.DataFrame or None): The params DataFrame for the full model. - model_dict (dict): The model specification. See: :ref:`model_specs`. - has_endogenous_factors (bool): Whether the model has endogenous factors. + params: The params DataFrame for the full model. + model_spec: The model specification. See: :ref:`model_specs`. + has_endogenous_factors: Whether the model has endogenous factors. Returns: pandas.DataFrame: The reduced parameters DataFrame. """ - index = _get_params_index_from_model_dict(model_dict) + index = _get_params_index(model_spec) # If we have endogenous factors, we need to keep the periods from params. if has_endogenous_factors: df = pd.merge( @@ -328,8 +397,12 @@ def _reduce_params(params, model_dict, has_endogenous_factors): return params.loc[index] -def _extend_params(params, model_dict, fill_value): - index = _get_params_index_from_model_dict(model_dict) +def _extend_params( + params: pd.DataFrame, + model_spec: ModelSpec, + fill_value: float, +) -> pd.DataFrame: + index = _get_params_index(model_spec) out = params.reindex(index) out["value"] = out["value"].fillna(fill_value) if "lower_bound" in out: @@ -341,29 +414,14 @@ def _extend_params(params, model_dict, fill_value): return out -def _get_params_index_from_model_dict(model_dict): - mod = process_model(model_dict) - index = get_params_index( - update_info=mod["update_info"], - labels=mod["labels"], - dimensions=mod["dimensions"], - transition_info=mod["transition_info"], - endogenous_factors_info=mod["endogenous_factors_info"], +def _get_params_index( + model_spec: ModelSpec, +) -> pd.MultiIndex: + mod = process_model(model_spec) + return get_params_index( + update_info=mod.update_info, + labels=mod.labels, + dimensions=mod.dimensions, + transition_info=mod.transition_info, + endogenous_factors_info=mod.endogenous_factors_info, ) - return index - - -def _remove_measurements_from_normalizations(measurements, normalizations): - reduced = [_remove_from_dict(norm, measurements) for norm in normalizations] - if reduced != normalizations: - warnings.warn( - "Your removed a normalized measurement from a model. Make sure there are " - "enough normalizations left to ensure identification.", - ) - return reduced - - -def _shorten_if_necessary(list_, length): - if len(list_) > length: - list_ = list_[:length] - return list_ diff --git a/src/skillmodels/utils_plotting.py b/src/skillmodels/utils_plotting.py index 491ac9e7..b66b89b8 100644 --- a/src/skillmodels/utils_plotting.py +++ b/src/skillmodels/utils_plotting.py @@ -1,14 +1,19 @@ +"""Utility functions for configuring plot layouts and subplots.""" + +from typing import Any + import numpy as np def get_layout_kwargs( - layout_kwargs=None, - legend_kwargs=None, - title_kwargs=None, - showlegend=False, - columns=None, - rows=None, -): + layout_kwargs: dict[str, Any] | None = None, + legend_kwargs: dict[str, Any] | None = None, + title_kwargs: dict[str, Any] | None = None, + *, + showlegend: bool = False, + columns: list[str] | tuple[str, ...] | None = None, + rows: list[str] | tuple[str, ...] | None = None, +) -> dict[str, Any]: """Define and update default kwargs for update_layout. Defines some default keyword arguments to update figure layout, such as @@ -37,13 +42,14 @@ def get_layout_kwargs( def get_make_subplot_kwargs( - sharex, - sharey, - column_order, - row_order, - make_subplot_kwargs, - add_scenes=False, -): + *, + sharex: bool, + sharey: bool, + column_order: list[str] | tuple[str, ...], + row_order: list[str] | tuple[str, ...], + make_subplot_kwargs: dict[str, Any] | None, + add_scenes: bool = False, +) -> dict[str, Any]: """Define and update keywargs for instantiating figure with subplots.""" nrows = len(row_order) ncols = len(column_order) diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index 8d9dea6a..cdf7587a 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -1,74 +1,85 @@ +"""Functions to visualize distributions of latent factors.""" + import warnings +from collections.abc import Mapping from copy import deepcopy +from typing import Any import numpy as np import pandas as pd import plotly.express as px import plotly.figure_factory as ff import plotly.graph_objects as go +from numpy.typing import NDArray from plotly.subplots import make_subplots from scipy.stats import gaussian_kde from skillmodels.filtered_states import get_filtered_states +from skillmodels.model_spec import ModelSpec from skillmodels.process_model import process_model +from skillmodels.types import ProcessedModel from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs def combine_distribution_plots( - kde_plots, - contour_plots, - surface_plots=None, - factor_order=None, - factor_mapping=None, - make_subplot_kwargs=None, - sharex=False, - sharey=False, - line_width=1.5, - showlegend=False, - layout_kwargs=None, - legend_kwargs=None, - title_kwargs=None, - eye_x=2.2, - eye_y=2.2, - eye_z=1, -): + kde_plots: dict[str, go.Figure], + contour_plots: dict[tuple[str, str], go.Figure], + surface_plots: dict[tuple[str, str], go.Figure] | None = None, + factor_order: list[str] | tuple[str, ...] | None = None, + factor_mapping: dict[str, str] | None = None, + make_subplot_kwargs: dict[str, Any] | None = None, + *, + sharex: bool = False, + sharey: bool = False, + line_width: float = 1.5, + showlegend: bool = False, + layout_kwargs: dict[str, Any] | None = None, + legend_kwargs: dict[str, Any] | None = None, + title_kwargs: dict[str, Any] | None = None, + eye_x: float = 2.2, + eye_y: float = 2.2, + eye_z: float = 1, +) -> go.Figure: """Combine individual plots into figure with subplots. Uses dictionary with plotly images as values to build plotly Figure with subplots. Args: - kde_plots (dict): Dictionary with plots of indivudal factor kde plots. - contour_plots (dict): Dictionary with plots of pairwise factor density + kde_plots: Dictionary with plots of indivudal factor kde plots. + contour_plots: Dictionary with plots of pairwise factor density contours. - surface_plots (dict): Dictionary with plots of pairwise factor density + surface_plots: Dictionary with plots of pairwise factor density 3d plots. - make_subplot_kwargs (dict or NoneType): Dictionary of keyword arguments used + factor_order: List of factor names to define the order of + subplots. If None, uses the order from kde_plots keys. + make_subplot_kwargs: Dictionary of keyword arguments used to instantiate plotly Figure with multiple subplots. Is used to define properties such as, for example, the spacing between subplots. If None, default arguments defined in the function are used. - factor_mapping (dct): Dictionary to change displayed factor names. - sharex (bool): Whether to share the properties of x-axis across subplots. + factor_mapping: Dictionary to change displayed factor names. + sharex: Whether to share the properties of x-axis across subplots. Default False. - sharey (bool): Whether to share the properties ofy-axis across subplots. + sharey: Whether to share the properties ofy-axis across subplots. Default True. - line_width (float): A float used to set same line width across subplots. - showlegend (bool): Display legend if True. - layout_kwargs (dict or NoneType): Dictionary of key word arguments used to + line_width: A float used to set same line width across subplots. + showlegend: Display legend if True. + layout_kwargs: Dictionary of key word arguments used to update layout of plotly Figure object. If None, the default kwargs defined in the function will be used. - legend_kwargs (dict or NoneType): Dictionary of key word arguments used to + legend_kwargs: Dictionary of key word arguments used to update position, orientation and title of figure legend. If None, default position and orientation will be used with no title. - title_kwargs (dict or NoneType): Dictionary of key word arguments used to + title_kwargs: Dictionary of key word arguments used to update properties of the figure title. Use {'text': ''} to set figure title. If None, infers title based on the value of `quntiles_of_other_factors`. - eye_x, eye_y and eye_z (float): Control camera (view point) of the 3d plots. - Together they form the a norm, and the larger the norm, the more zoomed out - is the view. Setting eye_z to a lower value lowers the view point. + eye_x: Control camera x position for the 3d plots. Default 2.2. + eye_y: Control camera y position for the 3d plots. Default 2.2. + eye_z: Control camera z position for the 3d plots. Default 1. + Setting eye_z to a lower value lowers the view point. Returns: - fig (plotly.Figure): Plotly figure with subplots that combines pairwise + fig: Plotly figure with subplots that combines pairwise distrubtion plots. """ @@ -76,8 +87,8 @@ def combine_distribution_plots( contour_plots = deepcopy(contour_plots) surface_plots = deepcopy(surface_plots) factors = list(kde_plots.keys()) - factor_names = _process_factor_mapping_dist(factor_mapping, factors) - ordered_factors = _get_ordered_factors(factor_order, factors) + factor_names = _process_factor_mapping_dist(mapper=factor_mapping, factors=factors) + ordered_factors = _get_ordered_factors(factor_order=factor_order, factors=factors) make_subplot_kwargs = get_make_subplot_kwargs( sharex=sharex, sharey=sharey, @@ -149,92 +160,90 @@ def combine_distribution_plots( def univariate_densities( - data, - model_dict, - params, - period, - factors=None, - observed_factors=False, - states=None, - show_curve=True, - show_hist=False, - show_rug=False, - curve_type="kde", - colorscale="D3", - bin_size=1, - distplot_kwargs=None, - layout_kwargs=None, -): + data: pd.DataFrame, + model_spec: ModelSpec, + params: pd.DataFrame, + period: int, + factors: list[str] | tuple[str, ...] | None = None, + *, + observed_factors: bool = False, + states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, + show_curve: bool = True, + show_hist: bool = False, + show_rug: bool = False, + curve_type: str = "kde", + colorscale: str = "D3", + bin_size: float = 1, + distplot_kwargs: dict[str, Any] | None = None, + layout_kwargs: dict[str, Any] | None = None, +) -> dict[str, go.Figure]: """Get dictionary with kernel density estimate plots for each factor. Plots kernel densities for latent factors and collects them in a dictionary with factor names as keys. Args: - data (DataFrame): Model estimation input data. - model_dict (dict): Dictionary with model specifications. - params (DataFrame): DataFrame with estimated parameter values. - period (int or float): Model period for which to plot the distributions for. - factors (list or NoneType): List of factors for which to plot the densities. + data: Model estimation input data. + model_spec: The model specification. See: :ref:`model_specs` + params: Estimated parameter values. + period: Model period for which to plot the distributions for. + factors: Factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. - observed_factors (bool): If True, plot densities of observed factors too. - states (dict, list, pd.DataFrame or NoneType): List or dictionary with tidy - DataFrames with filtered or simulated states or only one DataFrame with - filtered or simulated states. If None, retrieve data frame with filtered - states using model_dict and data. States are used to estimate the state - ranges in each period (if state_ranges are not given explicitly) and to - estimate the distribution of the latent factors. - show_hist (bool): Add histogram to the distplot. - show_curve (bool): Add density curve to the displot. - show_rug (bool): Add rug to the distplot. - curve_type (str): Curve type, 'normal' or 'kde', to add to the distplot. - colorscale (str): The color palette used when plotting multiple data. Must be + observed_factors: If True, plot densities of observed factors too. + states: Filtered or simulated states. Can be a single DataFrame, a list, + or a dictionary of DataFrames. If None, retrieve filtered states using + model and data. Used to estimate state ranges and factor distributions. + show_hist: Add histogram to the distplot. + show_curve: Add density curve to the distplot. + show_rug: Add rug to the distplot. + curve_type: Curve type, 'normal' or 'kde', to add to the distplot. + colorscale: The color palette used when plotting multiple data. Must be a valid attribute of px.colors.qualitative. - bin_size (float): Size of the histogram bins. - distplot_kwargs (NoneType or dict): Dictionary with additional keyword - arguments passed to ff.create_distplot() to initiate - the distplot. - layout_kwargs (NoneType or dict): Dictionary of keyword arguments to update - layout of the plot figures. Some essential layout kwargs are: - - xaxis_title (str): label label - - yaxis_title (str): label of y axis - - xaxis_showgrid (bool): display axis grid - - yaxis_showgrid (bool): display axis grid - - template (str): figure background theme - - showlegend (bool): add legend + bin_size: Size of the histogram bins. + distplot_kwargs: Additional keyword arguments passed to + ff.create_distplot(). + layout_kwargs: Keyword arguments to update layout of the plot figures. + Some essential layout kwargs are: + - xaxis_title: label of x axis + - yaxis_title: label of y axis + - xaxis_showgrid: display axis grid + - yaxis_showgrid: display axis grid + - template: figure background theme + - showlegend: add legend + Returns: - plots_dict (dict): Dictionary with density plots. + plots_dict: Density plots keyed by factor name. """ if states is None: - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] - model = process_model(model_dict) + processed_model = process_model(model_spec) factors = _get_factors( - model=model, + model=processed_model, factors=factors, observed_factors=observed_factors, ) - observed_states = _get_data_observed_factors(data, factors) + observed_states = _get_data_observed_factors(data=data, factors=factors) df = _process_data( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, observed_states=observed_states, ) scenarios = df["scenario"].unique() plots_dict = {} distplot_kwargs = _process_distplot_kwargs( - show_curve, - show_hist, - show_rug, - curve_type, - bin_size, - scenarios, - colorscale, - distplot_kwargs, + show_curve=show_curve, + show_hist=show_hist, + show_rug=show_rug, + curve_type=curve_type, + bin_size=bin_size, + scenarios=scenarios, + colorscale=colorscale, + distplot_kwargs=distplot_kwargs, ) plots_dict = {} layout_kwargs = get_layout_kwargs(layout_kwargs) @@ -246,6 +255,7 @@ def univariate_densities( warnings.warn( f"""Plotting univariate density failed for {fac} in period {period} with error:\n\n{e}""", + stacklevel=2, ) fig = go.Figure() fig.update_layout(showlegend=False) @@ -257,73 +267,72 @@ def univariate_densities( def bivariate_density_contours( - data, - model_dict, - params, - period, - factors=None, - observed_factors=False, - states=None, - n_points=50, - contour_kwargs=None, - layout_kwargs=None, - contours_showlabels=False, - contours_coloring="none", - contours_colorscale="RdBu_r", - lines_colorscale="D3", - showcolorbar=False, -): + data: pd.DataFrame, + model_spec: ModelSpec, + params: pd.DataFrame, + period: int, + factors: list[str] | tuple[str, ...] | None = None, + *, + observed_factors: bool = False, + states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, + n_points: int = 50, + contour_kwargs: dict[str, Any] | None = None, + layout_kwargs: dict[str, Any] | None = None, + contours_showlabels: bool = False, + contours_coloring: str = "none", + contours_colorscale: str = "RdBu_r", + lines_colorscale: str = "D3", + showcolorbar: bool = False, +) -> dict[tuple[str, str], go.Figure]: """Get dictionary with pariwise density contour plots. Plots pairwise bivariate density contours for latent factors and collects them in a dictionary with factor combinations as keys. Args: - data (DataFrame): Model estimation input data. - model_dict (dict): Dictionary with model specifications. - params (DataFrame): DataFrame with estimated parameter values. - period (int or float): Model period for which to plot the distributions for. - factors (list or NoneType): List of factors for which to plot the densities. + data: Model estimation input data. + model_spec: The model specification. See: :ref:`model_specs` + params: Estimated parameter values. + period: Model period for which to plot the distributions for. + factors: Factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. - observed_factors (bool): If True, plot densities of observed factors too. - states (dict, list, pd.DataFrame or NoneType): List or dictionary with tidy - DataFrames with filtered or simulated states or only one DataFrame with - filtered or simulated states. If None, retrieve data frame with filtered - states using model_dict and data. States are used to estimate the state - ranges in each period (if state_ranges are not given explicitly) and to - estimate the distribution of the latent factors. - n_points (int): Number of grid points used to create the mesh for calculation + observed_factors: If True, plot densities of observed factors too. + states: Filtered or simulated states. Can be a single DataFrame, a list, + or a dictionary of DataFrames. If None, retrieve filtered states using + model and data. Used to estimate state ranges and factor distributions. + n_points: Number of grid points used to create the mesh for calculation of kernel densities. - contour_kwargs (dict or NoneType): Dictionary with keyword arguments to set - contour line properties (such as annotation, colorscale). - layout_kwargs (dict or NoneType): Dictionary with keyword arguments to set - figure layout properties. + contour_kwargs: Keyword arguments to set contour line properties + (such as annotation, colorscale). + layout_kwargs: Keyword arguments to set figure layout properties. The following are various essential keyword arguments defining various features of plots. All features can also be changed ex-post via 'update_layout' or 'update_traces'. Some default figure layout properties (such as background theme) are defined if layout_kwargs is None. - contours_showlabels (bool): If True, annotate density contours. - contours_coloring (str): Defines how to apply color scale to density contours. + contours_showlabels: If True, annotate density contours. + contours_coloring: Defines how to apply color scale to density contours. Possible values are in ['lines', 'fill', 'heatmap', 'none']. Default is 'none' which implies no colorscale. - contours_colorscale (str): The color scale to use for line legends. Must be + contours_colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'RdBu_r'. - showcolorbar (bool): A boolean variable for displaying color bar. + lines_colorscale: The color palette used for contour lines when plotting + multiple scenarios. Must be a valid px.colors.qualitative attribute. + Default 'D3'. + showcolorbar: Whether to display the color bar. Returns: - plots_dict (dict): Dictionary with factor combinations as keys and respective - pariwise plots of density contours as values. + plots_dict: Pairwise density contour plots keyed by factor combinations. """ if states is None: - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] - model = process_model(model_dict) + processed_model = process_model(model_spec) factors = _get_factors( - model=model, + model=processed_model, factors=factors, observed_factors=observed_factors, ) @@ -332,16 +341,16 @@ def bivariate_density_contours( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} contour_kwargs = _process_contour_kwargs( contour_kwargs, - contours_showlabels, - contours_coloring, - contours_colorscale, - showcolorbar, + contours_showlabels=contours_showlabels, + contours_coloring=contours_coloring, + contours_colorscale=contours_colorscale, + contours_showscale=showcolorbar, ) layout_kwargs = _process_layout_kwargs(layout_kwargs) pairs = [] @@ -355,9 +364,9 @@ def bivariate_density_contours( for i, scenario in enumerate(df["scenario"].unique()): try: x, y, z = _calculate_kde_for_3d( - df[df["scenario"] == scenario], - pair, - n_points, + data=df[df["scenario"] == scenario], + factors=pair, + n_points=n_points, ) contour = go.Contour( x=x[:, 0], @@ -373,6 +382,7 @@ def bivariate_density_contours( Contour plot failed for {pair} in period {period} with error:\n\n{e} """, + stacklevel=2, ) fig.update_xaxes(title={"text": pair[0]}) fig.update_yaxes(title={"text": pair[1]}) @@ -383,89 +393,87 @@ def bivariate_density_contours( def bivariate_density_surfaces( - data, - model_dict, - params, - period, - factors=None, - observed_factors=False, - states=None, - n_points=50, - layout_kwargs=None, - colorscale="RdBu_r", - opacity=0.9, - showcolorbar=False, - showgrids=True, - showaxlines=True, - showlabels=True, -): + data: pd.DataFrame, + model_spec: ModelSpec, + params: pd.DataFrame, + period: int, + factors: list[str] | tuple[str, ...] | None = None, + *, + observed_factors: bool = False, + states: pd.DataFrame | None = None, + n_points: int = 50, + layout_kwargs: dict[str, Any] | None = None, + colorscale: str = "RdBu_r", + opacity: float = 0.9, + showcolorbar: bool = False, + showgrids: bool = True, + showaxlines: bool = True, + showlabels: bool = True, +) -> dict[tuple[str, str], go.Figure]: """Get dictionary with pariwise 3d density surface plots. Plots pairwise 3d density surfaces for latent factors and collects them in a dictionary with factor name combinations keys. Args: - data (DataFrame): Model estimation input data. - model_dict (dict): Dictionary with model specifications. - params (DataFrame): DataFrame with estimated parameter values. - period (int or float): Model period for which to plot the distributions for. - factors (list or NoneType): List of factors for which to plot the densities. + data: Model estimation input data. + model_spec: The model specification. See: :ref:`model_specs` + params: Estimated parameter values. + period: Model period for which to plot the distributions for. + factors: Factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. - observed_factors (bool): If True, plot densities of observed factors too. - states (dict, list, pd.DataFrame or NoneType): List or dictionary with tidy - DataFrames with filtered or simulated states or only one DataFrame with - filtered or simulated states. If None, retrieve data frame with filtered - states using model_dict and data. States are used to estimate the state - ranges in each period (if state_ranges are not given explicitly) and to - estimate the distribution of the latent factors. - n_points (int): Number of grid points used to create the mesh for calculation + observed_factors: If True, plot densities of observed factors too. + states: Filtered or simulated states as a single DataFrame. + If None, retrieve filtered states using model and data. Used to estimate + state ranges and factor distributions. + n_points: Number of grid points used to create the mesh for calculation of kernel densities. + The following are various essential keyword arguments defining various features of plots. All features can also be changed ex-post via 'update_layout' or 'update_traces'. Some default figure layout properties (such as background theme) are defined if layout_kwargs is None. - layout_kwargs (dict or NoneType): Dictionary with keyword arguments to set - figure layout properties. - colorscale (str): The color scale to use for line legends. Must be a valid + layout_kwargs: Keyword arguments to set figure layout properties. + colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'RdBu_r'. - showcolorbar (bool): A boolean variable for displaying the colorbar associated - with the surface color scale. - showgrids (bool): A boolean variable for showing axes grids. - showaxlines (bool): A boolean variable for showing axes lines. - showlabels (bool): A boolean variable for displaying axes labels. + opacity: Opacity of the surface. Default 0.9. + showcolorbar: Whether to display the colorbar associated with the + surface color scale. + showgrids: Whether to show axes grids. + showaxlines: Whether to show axes lines. + showlabels: Whether to display axes labels. Returns: - plots_dict (dict): Dictionary with factor combinations as keys and respective - pariwise plots of 3d density plots as values. + plots_dict: Pairwise 3d density surface plots keyed by factor combinations. """ if states is None: - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] elif not isinstance(states, pd.DataFrame): raise ValueError("3d plots are only supported if states is a DataFrame") - model = process_model(model_dict) + processed_model = process_model(model_spec) factors = _get_factors( - model=model, + model=processed_model, factors=factors, observed_factors=observed_factors, ) - observed_states = _get_data_observed_factors(data, factors) + observed_states = _get_data_observed_factors(data=data, factors=factors) df = _process_data( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} layout_kwargs = _process_layout_kwargs_3d( layout_kwargs, - showgrids, - showaxlines, - showlabels, + showgrids=showgrids, + showaxlines=showaxlines, + showlabels=showlabels, ) pairs = [] for fac1 in factors: @@ -475,7 +483,7 @@ def bivariate_density_surfaces( pairs = list(set(pairs)) for pair in pairs: try: - x, y, z = _calculate_kde_for_3d(df, pair, n_points) + x, y, z = _calculate_kde_for_3d(data=df, factors=pair, n_points=n_points) fig = go.Figure( go.Surface( x=x, @@ -490,6 +498,7 @@ def bivariate_density_surfaces( warnings.warn( f"""Plotting bivariate density surfaces for {pair} in period {period} with error:\n\n{e}""", + stacklevel=2, ) fig = go.Figure() fig.update_layout( @@ -504,18 +513,45 @@ def bivariate_density_surfaces( return plots_dict +def _get_one_state_per_period( + states: pd.DataFrame, + ap_to_p: pd.Series, +) -> pd.DataFrame: + """Get one state per (period, id). + + Handles aug_period and/or period index/columns. + """ + # Always reset index to work with columns + df = states.reset_index() + + has_aug_period = "aug_period" in df.columns + has_period = "period" in df.columns + + if has_aug_period and not has_period: + # Only aug_period: merge to get period, then collapse to one per (period, id) + df = df.merge(ap_to_p, left_on="aug_period", right_index=True, how="left") + return df.sort_values(["aug_period", "id"]).groupby(["period", "id"]).last() + if has_aug_period and has_period: + # Both exist: collapse multiple aug_periods to one per (period, id) + return df.sort_values(["aug_period", "id"]).groupby(["period", "id"]).last() + if has_period: + # Only period (no aug_period): just set index + return df.set_index(["period", "id"]) + msg = "States must have either 'aug_period' or 'period' column/index." + raise ValueError(msg) + + def _process_data( - states, period, factors, aug_periods_to_periods, observed_states=None -): + states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame], + period: int, + factors: tuple[str, ...], + aug_periods_to_periods: Mapping[int, int], + observed_states: pd.DataFrame | None = None, +) -> pd.DataFrame: ap_to_p = pd.Series(aug_periods_to_periods, name="period") ap_to_p.index.name = "aug_period" if isinstance(states, pd.DataFrame): - one_state_per_period = ( - states.merge(ap_to_p, left_on="aug_period", right_index=True, how="left") - .sort_values(["aug_period", "id"]) - .groupby(["period", "id"]) - .last() - ) + one_state_per_period = _get_one_state_per_period(states=states, ap_to_p=ap_to_p) to_concat = [] for fac in factors: if fac in one_state_per_period: @@ -527,12 +563,7 @@ def _process_data( states = dict(enumerate(states)) to_concat = [] for name, df in states.items(): - one_state_per_period = ( - df.merge(ap_to_p, left_on="aug_period", right_index=True, how="left") - .sort_values(["aug_period", "id"]) - .groupby(["period", "id"]) - .last() - ) + one_state_per_period = _get_one_state_per_period(states=df, ap_to_p=ap_to_p) to_keep = one_state_per_period.query(f"period == {period}")[factors].copy() to_keep["scenario"] = name to_concat.append(to_keep) @@ -548,15 +579,16 @@ def _process_data( def _process_distplot_kwargs( - show_curve, - show_hist, - show_rug, - curve_type, - bin_size, - scenarios, - colorscale, - distplot_kwargs, -): + *, + show_curve: bool, + show_hist: bool, + show_rug: bool, + curve_type: str, + bin_size: float, + scenarios: NDArray[Any], + colorscale: str, + distplot_kwargs: dict[str, Any] | None, +) -> dict[str, Any]: """Define and update default distplot kwargs.""" default_kwargs = { "show_hist": show_hist, @@ -572,7 +604,13 @@ def _process_distplot_kwargs( return default_kwargs -def _calculate_kde_for_3d(data, factors, n_points): +def _calculate_kde_for_3d( + data: pd.DataFrame, + factors: tuple[str, str], + n_points: int, +) -> tuple[ + NDArray[np.floating[Any]], NDArray[np.floating[Any]], NDArray[np.floating[Any]] +]: """Create grid mesh and calculate Gaussian kernel over the grid.""" x = data[factors[0]] y = data[factors[1]] @@ -588,12 +626,13 @@ def _calculate_kde_for_3d(data, factors, n_points): def _process_contour_kwargs( - contour_kwargs, - contours_showlabels, - contours_coloring, - contours_colorscale, - contours_showscale, -): + contour_kwargs: dict[str, Any] | None, + *, + contours_showlabels: bool, + contours_coloring: str | None, + contours_colorscale: str, + contours_showscale: bool, +) -> dict[str, Any]: """Define and update default density contour kwargs.""" if contours_coloring is None: contours_coloring = "none" @@ -609,9 +648,11 @@ def _process_contour_kwargs( return default_kwargs -def _process_layout_kwargs(layout_kwargs): +def _process_layout_kwargs( + layout_kwargs: dict[str, Any] | None, +) -> dict[str, Any]: """Define and update default figure layout kwargs.""" - default_kwargs = { + default_kwargs: dict[str, Any] = { "template": "simple_white", "xaxis_showgrid": False, "yaxis_showgrid": False, @@ -621,12 +662,18 @@ def _process_layout_kwargs(layout_kwargs): return default_kwargs -def _process_layout_kwargs_3d(layout_kwargs, showgrids, showaxlines, showlabels): +def _process_layout_kwargs_3d( + layout_kwargs: dict[str, Any] | None, + *, + showgrids: bool, + showaxlines: bool, + showlabels: bool, +) -> dict[str, Any]: """Define and update default figure layout kwargs for 3d plots.""" - default_kwargs = { + default_kwargs: dict[str, Any] = { "template": "none", } - scene = {} + scene: dict[str, Any] = {} for ax in list("xyz"): scene[f"{ax}axis"] = { "showgrid": showgrids, @@ -640,7 +687,10 @@ def _process_layout_kwargs_3d(layout_kwargs, showgrids, showaxlines, showlabels) return default_kwargs -def _process_factor_mapping_dist(mapper, factors): +def _process_factor_mapping_dist( + mapper: dict[str, str] | None, + factors: list[str] | tuple[str, ...], +) -> dict[str, str]: """Process mapper to return dictionary with old and new factor names.""" if mapper is None: mapper = {fac: fac for fac in factors} @@ -651,28 +701,38 @@ def _process_factor_mapping_dist(mapper, factors): return mapper -def _get_ordered_factors(factor_order, factors): - """Process factor orders to return list of strings.""" +def _get_ordered_factors( + factor_order: list[str] | tuple[str, ...] | str | None, + factors: list[str] | tuple[str, ...], +) -> tuple[str, ...]: + """Process factor orders to return tuple of strings.""" if factor_order is None: - ordered_factors = factors + ordered_factors = tuple(factors) elif isinstance(factor_order, str): - ordered_factors = [factor_order] + ordered_factors = (factor_order,) else: - ordered_factors = factor_order + ordered_factors = tuple(factor_order) return ordered_factors -def _get_factors(factors, observed_factors, model): - """Proccess factor names to return list of strings.""" +def _get_factors( + factors: list[str] | tuple[str, ...] | None, + *, + observed_factors: bool, + model: ProcessedModel, +) -> tuple[str, ...]: + """Proccess factor names to return tuple of strings.""" if factors is None: if observed_factors: - factors = model["labels"]["all_factors"] - else: - factors = model["labels"]["latent_factors"] - return factors + return model.labels.all_factors + return model.labels.latent_factors + return tuple(factors) -def _get_data_observed_factors(data, factors): +def _get_data_observed_factors( + data: pd.DataFrame, + factors: tuple[str, ...], +) -> pd.DataFrame | None: """Get data with observed factors if any.""" to_concat = [] for fac in factors: diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 4dc5304b..9a8b6669 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -1,89 +1,99 @@ +"""Functions to visualize transition equations and production functions.""" + import itertools +from collections.abc import Callable from copy import deepcopy +from typing import Any import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array from plotly import express as px from plotly import graph_objects as go from plotly.subplots import make_subplots from skillmodels.filtered_states import get_filtered_states +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_data import process_data from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model +from skillmodels.types import ParsedParams, ProcessedModel from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs def combine_transition_plots( - plots_dict, - column_order=None, - row_order=None, - factor_mapping=None, - make_subplot_kwargs=None, - sharex=False, - sharey=True, - showlegend=True, - layout_kwargs=None, - legend_kwargs=None, - title_kwargs=None, -): + plots_dict: dict[tuple[str, str], go.Figure], + column_order: list[str] | tuple[str, ...] | str | None = None, + row_order: list[str] | tuple[str, ...] | str | None = None, + factor_mapping: dict[str, str] | None = None, + make_subplot_kwargs: dict[str, Any] | None = None, + *, + sharex: bool = False, + sharey: bool = True, + showlegend: bool = True, + layout_kwargs: dict[str, Any] | None = None, + legend_kwargs: dict[str, Any] | None = None, + title_kwargs: dict[str, Any] | None = None, +) -> go.Figure: """Combine individual plots into figure with subplots. Use dictionary with plotly images as values to build plotly figure with subplots. Args: - plots_dict (dict): Dictionary with plots of transition functions for each + plots_dict: Dictionary with plots of transition functions for each factor. - column_order (list, str or NoneType): List of (output) factor names according + column_order: List of (output) factor names according to which transition plots should be ordered horizontally. If None, infer from the keys of of plots_dict - row_order (list, str or NoneType): List of (input) factor names according + row_order: List of (input) factor names according to which transition plots should be ordered vertically. If None, infer from the keys of of plots_dict - factor_mapping (dict or NoneType): A dictionary with custom factor names to + factor_mapping: A dictionary with custom factor names to display as axes labels. - make_subplot_kwargs (dict or NoneType): Dictionary of keyword arguments used + make_subplot_kwargs: Dictionary of keyword arguments used to instantiate plotly Figure with multiple subplots. Is used to define properties such as, for example, the spacing between subplots. If None, default arguments defined in the function are used. - sharex (bool): Whether to share the properties of x-axis across subplots. + sharex: Whether to share the properties of x-axis across subplots. Default False. - sharey (bool): Whether to share the properties ofy-axis across subplots. + sharey: Whether to share the properties ofy-axis across subplots. Default True. - showlegend (bool): Display legend if True. - layout_kwargs (dict or NoneType): Dictionary of key word arguments used to + showlegend: Display legend if True. + layout_kwargs: Dictionary of key word arguments used to update layout of plotly Figure object. If None, the default kwargs defined in the function will be used. - legend_kwargs (dict or NoneType): Dictionary of key word arguments used to + legend_kwargs: Dictionary of key word arguments used to update position, orientation and title of figure legend. If None, default position and orientation will be used with no title. - title_kwargs (dict or NoneType): Dictionary of key word arguments used to + title_kwargs: Dictionary of key word arguments used to update properties of the figure title. Use {'text': ''} to set figure title. If None, infers title based on the value of `quntiles_of_other_factors`. Returns: - fig (plotly.Figure): Plotly figure with subplots that combines individual + fig: Plotly figure with subplots that combines individual transition functions. """ plots_dict = deepcopy(plots_dict) - column_order, row_order = _process_orders(column_order, row_order, plots_dict) + column_order, row_order = _process_orders( + columns=column_order, rows=row_order, plots_dict=plots_dict + ) make_subplot_kwargs = get_make_subplot_kwargs( - sharex, - sharey, - column_order, - row_order, - make_subplot_kwargs, + sharex=sharex, + sharey=sharey, + column_order=column_order, + row_order=row_order, + make_subplot_kwargs=make_subplot_kwargs, ) factor_mapping = _process_factor_mapping_trans( - factor_mapping, - row_order, - column_order, + factor_mapper=factor_mapping, + output_factors=row_order, + input_factors=column_order, ) fig = make_subplots(**make_subplot_kwargs) for (output_factor, input_factor), (row, col) in zip( @@ -119,56 +129,61 @@ def combine_transition_plots( ) layout_kwargs = get_layout_kwargs( - layout_kwargs, - legend_kwargs, - title_kwargs, - showlegend, - column_order, - row_order, + layout_kwargs=layout_kwargs, + legend_kwargs=legend_kwargs, + title_kwargs=title_kwargs, + showlegend=showlegend, + columns=column_order, + rows=row_order, ) fig.update_layout(**layout_kwargs) return fig def get_transition_plots( - model_dict, - params, - data, - period, - state_ranges=None, - quantiles_of_other_factors=(0.25, 0.5, 0.75), - n_points=50, - n_draws=50, - colorscale="Magenta_r", - layout_kwargs=None, - include_correction_factors=False, -): + model_spec: ModelSpec, + params: pd.DataFrame, + data: pd.DataFrame, + period: int, + state_ranges: dict[str, pd.DataFrame] | None = None, + quantiles_of_other_factors: tuple[float, ...] | list[float] | float | None = ( + 0.25, + 0.5, + 0.75, + ), + n_points: int = 50, + n_draws: int = 50, + colorscale: str = "Magenta_r", + layout_kwargs: dict[str, Any] | None = None, + *, + include_correction_factors: bool = False, +) -> dict[tuple[str, str], go.Figure]: """Get dictionary with individual plots of transition equations for each factor. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - params (pandas.DataFrame): DataFrame with model parameters. - data (pd.DataFrame): Empirical dataset that is used to estimate the model. - period (int): The start period of the transition equations that are plotted. - state_ranges (dict or NoneType): The keys are the names of the latent factors. + model_spec: The model specification. See: :ref:`model_specs` + params: Model parameters. + data: Empirical dataset used to estimate the model. + period: The start period of the transition equations that are plotted. + state_ranges: The keys are the names of the latent factors. The values are DataFrames with the columns "period", "minimum", "maximum". The state_ranges are used to define the axis limits of the plots. - quantiles_of_other_factors (float, list or None): Quantiles at which the factors + quantiles_of_other_factors: Quantiles at which the factors that are not varied in a given plot are fixed. If None, those factors are not fixed but integrated out. - n_points (int): Number of grid points per input. Default 50. - n_draws (int): Number of randomly drawn values of the factors that are averaged + n_points: Number of grid points per input. Default 50. + n_draws: Number of randomly drawn values of the factors that are averaged out. Only relevant if quantiles_of_other_factors is *None*. Default 50. - colorscale (str): The color scale to use for line legends. Must be a valid + colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'Magenta_r'. - layout_kwargs (dict or NoneType): Dictionary of key word arguments used to + layout_kwargs: Dictionary of key word arguments used to update layout of plotly image object. If None, the default kwargs defined in the function will be used. - include_correction_factors (bool): Whether to include correction factors in the + include_correction_factors: Whether to include correction factors in the plots. Default False. Returns: - plots_dict (dict): Dictionary with individual plots of transition equations + plots_dict: Dictionary with individual plots of transition equations for each combination of input and output factors. """ @@ -176,35 +191,35 @@ def get_transition_plots( quantiles_of_other_factors, ) - model = process_model(model_dict) + processed_model = process_model(model_spec) - if period >= model["labels"]["periods"][-1]: + if period >= processed_model.labels.periods[-1]: raise ValueError( "*period* must be the penultimate period of the model or earlier.", ) if ( include_correction_factors - or not model["endogenous_factors_info"]["has_endogenous_factors"] + or not processed_model.endogenous_factors_info.has_endogenous_factors ): - latent_factors = model["labels"]["latent_factors"] + latent_factors = processed_model.labels.latent_factors else: latent_factors = [ lf - for lf in model["labels"]["latent_factors"] - if not model["endogenous_factors_info"][lf]["is_correction"] + for lf in processed_model.labels.latent_factors + if not processed_model.endogenous_factors_info.factor_info[lf].is_correction ] - all_factors = model["labels"]["all_factors"] - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + all_factors = processed_model.labels.all_factors + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] - plots_dict = _get_dictionary_with_plots( - model=model, + return _get_dictionary_with_plots( + model=processed_model, data=data, params=params, states=states, state_ranges=state_ranges, - latent_factors=latent_factors, + latent_factors=latent_factors, # ty: ignore[invalid-argument-type] all_factors=all_factors, quantiles_of_other_factors=quantiles_of_other_factors, period=period, @@ -213,95 +228,101 @@ def get_transition_plots( colorscale=colorscale, layout_kwargs=layout_kwargs, ) - return plots_dict def _get_dictionary_with_plots( - model, - data, - params, - states, - state_ranges, - latent_factors, - all_factors, - quantiles_of_other_factors, - period, - n_points, - n_draws, - colorscale, - layout_kwargs, - showlegend=True, -): + model: ProcessedModel, + data: pd.DataFrame, + params: pd.DataFrame, + states: pd.DataFrame, + state_ranges: dict[str, pd.DataFrame] | None, + latent_factors: tuple[str, ...], + all_factors: tuple[str, ...], + quantiles_of_other_factors: list[float] | None, + period: int, + n_points: int, + n_draws: int, + colorscale: str, + layout_kwargs: dict[str, Any] | None, + *, + showlegend: bool = True, +) -> dict[tuple[str, str], go.Figure]: """Get plots of transition functions for each input and output combination. Return a dictionary with individual plots of transition functions for each input and output factors. Args: - model (dict): The model specification. See: :ref:`model_specs` - params (pandas.DataFrame): DataFrame with model parameters. - states (pandas.DataFrame): Tidy DataFrame with filtered or simulated states. + model: The model specification. See: :ref:`model_specs` + data: Panel dataset in long format for getting observed factors. + params: DataFrame with model parameters. + states: Tidy DataFrame with filtered or simulated states. They are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the factors that are not visualized. - state_ranges (dict): The keys are the names of the latent factors. + state_ranges: The keys are the names of the latent factors. The values are DataFrames with the columns "period", "minimum", "maximum". The state_ranges are used to define the axis limits of the plots. - latent_factors (list): Latent factors of the model that are outputs of + latent_factors: Latent factors of the model that are outputs of transition factors. - all_factors (list): All factors of the model that are the inputs of transition + all_factors: All factors of the model that are the inputs of transition functions. - quantiles_of_other_factors (float, list or None): Quantiles at which the factors + quantiles_of_other_factors: Quantiles at which the factors that are not varied in a given plot are fixed. If None, those factors are not fixed but integrated out. - period (int): The start period of the transition equations that are plotted. - n_points (int): Number of grid points per input. Default 50. - n_draws (int): Number of randomly drawn values of the factors that are averaged + period: The start period of the transition equations that are plotted. + n_points: Number of grid points per input. Default 50. + n_draws: Number of randomly drawn values of the factors that are averaged out. Only relevant if quantiles_of_other_factors is *None*. Default 50. - colorscale (str): The color scale to use for line legends. Must be a valid + colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'Magenta_r'. - subfig_kwargs (dict or NoneType): Dictionary of key word arguments used to + layout_kwargs: Dictionary of key word arguments used to update layout of plotly image object. If None, the default kwargs defined in the function will be used. + showlegend: Display legend if True. Default True. Returns: - plots_dict (dict): Dictionary with individual plots of transition functions + plots_dict: Dictionary with individual plots of transition functions for each input and output factors. """ - observed_factors = model["labels"]["observed_factors"] - states_data = _get_states_data(model, period, data, states, observed_factors) - params = _set_index_params(model, params) - pardict = _get_pardict(model, params) - state_ranges = _get_state_ranges(state_ranges, states_data, all_factors) + observed_factors = model.labels.observed_factors + states_data = _get_states_data( + model=model, + period=period, + data=data, + states=states, + observed_factors=observed_factors, + ) + params = _set_index_params(model=model, params=params) + parsed_params = _get_parsed_params(model=model, params=params) + state_ranges = _get_state_ranges( + state_ranges=state_ranges, states_data=states_data, all_factors=all_factors + ) layout_kwargs = get_layout_kwargs( layout_kwargs=layout_kwargs, legend_kwargs=None, title_kwargs=None, showlegend=showlegend, ) - has_endogenous_factors = model["endogenous_factors_info"]["has_endogenous_factors"] + has_endogenous_factors = model.endogenous_factors_info.has_endogenous_factors if has_endogenous_factors: - _aug_periods = model["endogenous_factors_info"]["aug_periods_from_period"]( - period - ) + _aug_periods = model.endogenous_factors_info.aug_periods_from_period(period) else: _aug_periods = [period] plots_dict = {} for output_factor, input_factor in itertools.product(latent_factors, all_factors): - transition_function = model["transition_info"]["individual_functions"][ - output_factor - ] + transition_function = model.transition_info.individual_functions[output_factor] if ( has_endogenous_factors - and model["endogenous_factors_info"][output_factor]["is_endogenous"] + and model.endogenous_factors_info.factor_info[output_factor].is_endogenous ): aug_period = min(_aug_periods) else: aug_period = max(_aug_periods) transition_params = { - output_factor: pardict["transition"][output_factor][aug_period] + output_factor: parsed_params.transition[output_factor][aug_period] } if quantiles_of_other_factors is not None: @@ -354,50 +375,65 @@ def _get_dictionary_with_plots( return plots_dict -def _get_state_ranges(state_ranges, states_data, all_factors): +def _get_state_ranges( + state_ranges: dict[str, pd.DataFrame] | None, + states_data: pd.DataFrame, + all_factors: tuple[str, ...], +) -> dict[str, pd.DataFrame]: """Create state ranges if none is given.""" if state_ranges is None: - state_ranges = create_state_ranges(states_data, all_factors) + state_ranges = create_state_ranges( + filtered_states=states_data, factors=list(all_factors) + ) return state_ranges -def _get_pardict(model, params): - """Get parsed params dictionary.""" +def _get_parsed_params( + model: ProcessedModel, + params: pd.DataFrame, +) -> ParsedParams: + """Get parsed params dataclass.""" parsing_info = create_parsing_info( - params_index=params.index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + params_index=params.index, # ty: ignore[invalid-argument-type] + update_info=model.update_info, + labels=model.labels, + anchoring=model.anchoring, + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) - _, _, _, pardict = parse_params( + _, _, _, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model["dimensions"], - labels=model["labels"], + dimensions=model.dimensions, + labels=model.labels, n_obs=1, ) - return pardict + return parsed_params -def _set_index_params(model, params): +def _set_index_params( + model: ProcessedModel, + params: pd.DataFrame, +) -> pd.DataFrame: """Reset index of params data frame to model implied values.""" params_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=model.update_info, + labels=model.labels, + dimensions=model.dimensions, + transition_info=model.transition_info, + endogenous_factors_info=model.endogenous_factors_info, ) - params = params.reindex(params_index) - return params + return params.reindex(params_index) -def _get_states_data(model, period, data, states, observed_factors): +def _get_states_data( + model: ProcessedModel, + period: int, + data: pd.DataFrame, + states: pd.DataFrame, + observed_factors: tuple[str, ...], +) -> pd.DataFrame: if observed_factors and data is None: raise ValueError( "The model has observed factors. You must pass the empirical data to " @@ -407,19 +443,17 @@ def _get_states_data(model, period, data, states, observed_factors): if observed_factors: _observed_arr = process_data( df=data, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], - labels=model["labels"], - update_info=model["update_info"], - anchoring_info=model["anchoring"], + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + labels=model.labels, + update_info=model.update_info, + anchoring_info=model.anchoring, )["observed_factors"] # convert from jax to numpy _observed_arr = np.array(_observed_arr) - if model["endogenous_factors_info"]["has_endogenous_factors"]: + if model.endogenous_factors_info.has_endogenous_factors: both_aug_periods = [ aug_p - for aug_p, p in model["labels"]["aug_periods_to_periods"].items() + for aug_p, p in model.labels.aug_periods_to_periods.items() if p == period ] to_concat = [] @@ -453,18 +487,18 @@ def _get_states_data(model, period, data, states, observed_factors): def _prepare_data_for_one_plot_fixed_quantile_2d( - states_data, - state_ranges, - aug_period, - input_factor, - output_factor, - n_points, - quantiles_of_other_factors, - transition_function, - transition_params, - all_factors, -): - period_data = states_data.query(f"aug_period == {aug_period}")[all_factors] + states_data: pd.DataFrame, + state_ranges: dict[str, pd.DataFrame], + aug_period: int, + input_factor: str, + output_factor: str, + n_points: int, + quantiles_of_other_factors: list[float], + transition_function: Callable[..., Array], + transition_params: dict[str, Any], + all_factors: tuple[str, ...], +) -> pd.DataFrame: + period_data = states_data.query(f"aug_period == {aug_period}")[list(all_factors)] input_min = state_ranges[input_factor].loc[aug_period]["minimum"] input_max = state_ranges[input_factor].loc[aug_period]["maximum"] to_concat = [] @@ -474,7 +508,7 @@ def _prepare_data_for_one_plot_fixed_quantile_2d( fixed_quantiles = period_data.drop(columns=input_factor).quantile(quantile) for col, val in fixed_quantiles.items(): input_data[col] = val - input_arr = jnp.array(input_data[all_factors].to_numpy()) + input_arr = jnp.array(input_data[list(all_factors)].to_numpy()) # convert from jax to numpy array output_arr = np.array(transition_function(transition_params, input_arr)) quantile_data = pd.DataFrame() @@ -483,11 +517,12 @@ def _prepare_data_for_one_plot_fixed_quantile_2d( quantile_data["quantile"] = quantile to_concat.append(quantile_data) - out = pd.concat(to_concat).reset_index() - return out + return pd.concat(to_concat).reset_index() -def _process_quantiles_of_other_factors(quantiles_of_other_factors): +def _process_quantiles_of_other_factors( + quantiles_of_other_factors: tuple[float, ...] | list[float] | float | None, +) -> list[float] | None: """Process quantiles of other factors to always have list as type.""" if isinstance(quantiles_of_other_factors, float | int): quantiles_of_other_factors = [quantiles_of_other_factors] @@ -497,17 +532,17 @@ def _process_quantiles_of_other_factors(quantiles_of_other_factors): def _prepare_data_for_one_plot_average_2d( - states_data, - state_ranges, - aug_period, - input_factor, - output_factor, - n_points, - n_draws, - transition_function, - transition_params, - all_factors, -): + states_data: pd.DataFrame, + state_ranges: dict[str, pd.DataFrame], + aug_period: int, + input_factor: str, + output_factor: str, + n_points: int, + n_draws: int, + transition_function: Callable[..., Array], + transition_params: dict[str, Any], + all_factors: tuple[str, ...], +) -> pd.DataFrame: period_data = states_data.query(f"aug_period == {aug_period}") sampled_factors = [factor for factor in all_factors if factor != input_factor] @@ -521,7 +556,7 @@ def _prepare_data_for_one_plot_average_2d( input_data[input_factor] = np.linspace(input_min, input_max, n_points) for col, val in draw.items(): input_data[col] = val - input_arr = jnp.array(input_data[all_factors].to_numpy()) + input_arr = jnp.array(input_data[list(all_factors)].to_numpy()) # convert from jax to numpy array output_arr = np.array(transition_function(transition_params, input_arr)) draw_data = pd.DataFrame() @@ -529,11 +564,14 @@ def _prepare_data_for_one_plot_average_2d( draw_data[f"output_{output_factor}"] = np.array(output_arr) to_concat.append(draw_data) - out = pd.concat(to_concat).groupby(f"input_{input_factor}").mean().reset_index() - return out + return pd.concat(to_concat).groupby(f"input_{input_factor}").mean().reset_index() -def _process_factor_mapping_trans(factor_mapper, output_factors, input_factors): +def _process_factor_mapping_trans( + factor_mapper: dict[str, str] | None, + output_factors: tuple[str, ...], + input_factors: tuple[str, ...], +) -> dict[str, str]: """Process mapper to return dictionary with old and new factor names.""" all_factors = input_factors + output_factors if factor_mapper is None: @@ -545,20 +583,32 @@ def _process_factor_mapping_trans(factor_mapper, output_factors, input_factors): return factor_mapper -def _process_orders(columns, rows, plots_dict): - """Process axes orders to return list of strings.""" +def _process_orders( + columns: list[str] | tuple[str, ...] | str | None, + rows: list[str] | tuple[str, ...] | str | None, + plots_dict: dict[tuple[str, str], go.Figure], +) -> tuple[tuple[str, ...], tuple[str, ...]]: + """Process axes orders to return tuples of strings.""" + out_columns: tuple[str, ...] + out_rows: tuple[str, ...] if columns is None: - columns = [] + seen: list[str] = [] for f in plots_dict: - if f[0] not in columns: - columns.append(f[0]) + if f[0] not in seen: + seen.append(f[0]) + out_columns = tuple(seen) elif isinstance(columns, str): - columns = [columns] + out_columns = (columns,) + else: + out_columns = tuple(columns) if rows is None: - rows = [] + seen = [] for f in plots_dict: - if f[1] not in rows: - rows.append(f[1]) + if f[1] not in seen: + seen.append(f[1]) + out_rows = tuple(seen) elif isinstance(rows, str): - rows = [rows] - return columns, rows + out_rows = (rows,) + else: + out_rows = tuple(rows) + return out_columns, out_rows diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..81b003e2 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1 @@ +"""Shared test fixtures and helpers.""" diff --git a/tests/model2.yaml b/tests/model2.yaml deleted file mode 100644 index 3b80ca3d..00000000 --- a/tests/model2.yaml +++ /dev/null @@ -1,85 +0,0 @@ ---- -factors: - fac1: - measurements: - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - transition_function: log_ces - normalizations: - loadings: - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - fac2: - measurements: - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - transition_function: linear - normalizations: - loadings: - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - fac3: - measurements: - - [y7, y8, y9] - - [] - - [] - - [] - - [] - - [] - - [] - - [] - transition_function: constant - normalizations: - loadings: - - {y7: 1} - - {} - - {} - - {} - - {} - - {} - - {} - - {} -anchoring: - outcomes: {fac1: Q1} - free_controls: true - free_constant: true - free_loadings: true - ignore_constant_when_anchoring: true -controls: - - x1 -stagemap: - - 0 - - 0 - - 0 - - 0 - - 0 - - 0 - - 0 -estimation_options: - robust_bounds: true - bounds_distance: 0.001 - n_mixtures: 1 diff --git a/tests/simplest_augmented_model.yaml b/tests/simplest_augmented_model.yaml deleted file mode 100644 index 04e892b4..00000000 --- a/tests/simplest_augmented_model.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -factors: - fac1: - is_correction: false - is_endogenous: false - measurements: - - - var - - - var - normalizations: - loadings: - - {var: 1} - - {var: 1} - transition_function: linear - fac2: - is_correction: false - is_endogenous: true - measurements: - - - inv - - - inv - normalizations: - loadings: - - {inv: 1} - - {inv: 1} - transition_function: linear -observed_factors: - - of -estimation_options: - bounds_distance: 0.00000001 diff --git a/tests/test_clipping.py b/tests/test_clipping.py index b6bae71f..1afd17e3 100644 --- a/tests/test_clipping.py +++ b/tests/test_clipping.py @@ -1,10 +1,13 @@ +"""Tests for soft clipping functions.""" + import jax.numpy as jnp import numpy as np from skillmodels.clipping import soft_clipping -def test_one_sided_soft_maximum(): +def test_one_sided_soft_maximum() -> None: + """Test soft maximum clipping with lower bound.""" arr = jnp.array([-10.0, -5, -1, 1, 5, 10]) lower_bound = -8 lower_hardness = 3 @@ -21,7 +24,8 @@ def test_one_sided_soft_maximum(): np.testing.assert_allclose(res[1:], arr[1:], rtol=1e-05) -def test_one_sided_soft_minimum(): +def test_one_sided_soft_minimum() -> None: + """Test soft minimum clipping with upper bound.""" arr = jnp.array([-10.0, -5, -1, 1, 5, 10]) upper_bound = 8 upper_hardness = 3 diff --git a/tests/test_constraints.py b/tests/test_constraints.py index ca3bb103..fcfad8c2 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -1,9 +1,6 @@ -from pathlib import Path - import numpy as np import pandas as pd import pytest -import yaml from pandas.testing import assert_frame_equal from skillmodels.constraints import ( @@ -18,12 +15,11 @@ add_bounds, ) from skillmodels.process_model import process_model - -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +from skillmodels.test_data.simplest_augmented_model import SIMPLEST_AUGMENTED_MODEL +from skillmodels.types import Anchoring, Labels -def test_add_bounds(): +def test_add_bounds() -> None: ind_tups = [("shock_sds", i) for i in range(5)] + [ ("meas_sds", 4), ("bla", "blubb"), @@ -47,7 +43,7 @@ def test_add_bounds(): # ====================================================================================== -def test_normalization_constraints(): +def test_normalization_constraints() -> None: norm = { "fac1": { "loadings": [{"m1": 2, "m2": 1.5}, {"m1": 3}], @@ -84,7 +80,7 @@ def test_normalization_constraints(): }, ] - calculated = _get_normalization_constraints(norm, factors=["fac1", "fac2"]) + calculated = _get_normalization_constraints(norm, factors=("fac1", "fac2")) for c in calculated: del c["description"] @@ -96,7 +92,7 @@ def test_normalization_constraints(): # ====================================================================================== -def test_mixture_weight_constraints_mixture(): +def test_mixture_weight_constraints_mixture() -> None: calculated = _get_mixture_weights_constraints(n_mixtures=2) for c in calculated: del c["description"] @@ -104,7 +100,7 @@ def test_mixture_weight_constraints_mixture(): assert_list_equal_except_for_order(calculated, expected) -def test_mixture_weight_constraints_normal(): +def test_mixture_weight_constraints_normal() -> None: calculated = _get_mixture_weights_constraints(n_mixtures=1) for c in calculated: del c["description"] @@ -117,9 +113,9 @@ def test_mixture_weight_constraints_normal(): # ====================================================================================== -def test_stage_constraints(): - stages = [0] - stagemap = [0] * 3 +def test_stage_constraints() -> None: + stages = (0,) + stagemap = (0, 0, 0) expected = [ { @@ -138,9 +134,9 @@ def test_stage_constraints(): assert_list_equal_except_for_order(calculated, expected) -def test_stage_constraints_with_endogenous_factors(): - stages = [0, 1, 2, 3] - stagemap = [0, 1, 0, 1, 2, 3] +def test_stage_constraints_with_endogenous_factors() -> None: + stages = (0, 1, 2, 3) + stagemap = (0, 1, 0, 1, 2, 3) expected = [ { "loc": [("transition", 0), ("transition", 2)], @@ -171,12 +167,21 @@ def test_stage_constraints_with_endogenous_factors(): # ====================================================================================== -def test_constant_factor_constraints(): - labels = { - "latent_factors": ["fac1", "fac2"], - "aug_periods": [0, 1, 2], - "transition_names": ["bla", "constant"], - } +def test_constant_factor_constraints() -> None: + labels = Labels( + latent_factors=("fac1", "fac2"), + observed_factors=(), + controls=("constant",), + periods=(0, 1, 2), + stagemap=(0, 0, 0), + stages=(0,), + aug_periods=(0, 1, 2), + aug_periods_to_periods={0: 0, 1: 1, 2: 2}, + aug_stagemap=(0, 0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + transition_names=("bla", "constant"), + ) expected = [ {"loc": ("shock_sds", 0, "fac2", "-"), "type": "fixed", "value": 0.0}, @@ -194,9 +199,9 @@ def test_constant_factor_constraints(): # ====================================================================================== -def test_initial_mean_constraints(): +def test_initial_mean_constraints() -> None: nmixtures = 3 - factors = ["fac1", "fac2", "fac3"] + factors = ("fac1", "fac2", "fac3") ind_tups = [ ("initial_states", 0, "mixture_0", "fac1"), ("initial_states", 0, "mixture_1", "fac1"), @@ -216,13 +221,21 @@ def test_initial_mean_constraints(): # ====================================================================================== -def test_trans_coeff_constraints(): - labels = { - "latent_factors": ["fac1", "fac2", "fac3"], - "transition_names": ["log_ces", "bla", "blubb"], - "aug_periods": [0, 1, 2], - } - labels["all_factors"] = labels["latent_factors"] +def test_trans_coeff_constraints() -> None: + labels = Labels( + latent_factors=("fac1", "fac2", "fac3"), + observed_factors=(), + controls=("constant",), + periods=(0, 1, 2), + stagemap=(0, 0, 0), + stages=(0,), + aug_periods=(0, 1, 2), + aug_periods_to_periods={0: 0, 1: 1, 2: 2}, + aug_stagemap=(0, 0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + transition_names=("log_ces", "bla", "blubb"), + ) expected = [ { @@ -271,24 +284,35 @@ def anch_uinfo(): @pytest.fixture def base_anchoring_info(): - anch_info = { - "factors": ["f1", "f2"], - "outcomes": {"f1": "outcome", "f2": "outcome"}, - "free_controls": True, - "free_constant": True, - "free_loadings": True, - } - return anch_info + return Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=True, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) -def test_anchoring_constraints_no_constraint_needed(anch_uinfo, base_anchoring_info): - calculated = _get_anchoring_constraints(anch_uinfo, [], base_anchoring_info, (0, 1)) +def test_anchoring_constraints_no_constraint_needed( + anch_uinfo, base_anchoring_info +) -> None: + calculated = _get_anchoring_constraints(anch_uinfo, (), base_anchoring_info, (0, 1)) assert calculated == [] -def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): - base_anchoring_info["free_constant"] = False - calculated = _get_anchoring_constraints(anch_uinfo, [], base_anchoring_info, (0, 1)) +def test_anchoring_constraints_for_constants(anch_uinfo) -> None: + anchoring_info = Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=True, + free_constant=False, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) + calculated = _get_anchoring_constraints(anch_uinfo, (), anchoring_info, (0, 1)) del calculated[0]["description"] expected = [ @@ -307,12 +331,20 @@ def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): assert calculated == expected -def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): - base_anchoring_info["free_controls"] = False +def test_anchoring_constraints_for_controls(anch_uinfo) -> None: + anchoring_info = Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=False, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) calculated = _get_anchoring_constraints( anch_uinfo, - ["c1", "c2"], - base_anchoring_info, + ("c1", "c2"), + anchoring_info, (0, 1), ) @@ -339,9 +371,17 @@ def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): assert calculated == expected -def test_anchoring_constraints_for_loadings(anch_uinfo, base_anchoring_info): - base_anchoring_info["free_loadings"] = False - calculated = _get_anchoring_constraints(anch_uinfo, [], base_anchoring_info, (0, 1)) +def test_anchoring_constraints_for_loadings(anch_uinfo) -> None: + anchoring_info = Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=True, + free_constant=True, + free_loadings=False, + ignore_constant_when_anchoring=False, + ) + calculated = _get_anchoring_constraints(anch_uinfo, (), anchoring_info, (0, 1)) expected = [ { @@ -362,7 +402,7 @@ def test_anchoring_constraints_for_loadings(anch_uinfo, base_anchoring_info): assert calculated == expected -def assert_list_equal_except_for_order(list1, list2): +def assert_list_equal_except_for_order(list1, list2) -> None: for item in list1: assert item in list2, f"{item} is in list1 but not in list2" for item in list2: @@ -371,15 +411,13 @@ def assert_list_equal_except_for_order(list1, list2): @pytest.fixture def simplest_augmented_model(): - with open(TEST_DIR / "simplest_augmented_model.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return process_model(model_dict) + return process_model(SIMPLEST_AUGMENTED_MODEL) -def test_get_constraints_for_augmented_periods(simplest_augmented_model): +def test_get_constraints_for_augmented_periods(simplest_augmented_model) -> None: calculated = _get_constraints_for_augmented_periods( - labels=simplest_augmented_model["labels"], - endogenous_factors_info=simplest_augmented_model["endogenous_factors_info"], + labels=simplest_augmented_model.labels, + endogenous_factors_info=simplest_augmented_model.endogenous_factors_info, ) for c in calculated: del c["description"] diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 6f7e62ab..49ce8f63 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -1,3 +1,5 @@ +from types import SimpleNamespace + import numpy as np import pandas as pd from pandas.testing import assert_frame_equal as afe @@ -10,9 +12,10 @@ _get_quasi_factor_scores_data_for_single_period, _process_factors, ) +from skillmodels.types import Labels -def test_get_measurement_data_with_single_period(): +def test_get_measurement_data_with_single_period() -> None: period = 1 factors = ["f3", "f1"] update_info = pd.DataFrame( @@ -60,7 +63,7 @@ def test_get_measurement_data_with_single_period(): afe(result, expected) -def test_get_factor_scores_data_with_single_period(): +def test_get_factor_scores_data_with_single_period() -> None: period = 1 factors = ["f1", "f2"] update_info = pd.DataFrame( @@ -113,7 +116,7 @@ def test_get_factor_scores_data_with_single_period(): afe(expected, result, check_dtype=False) -def test_get_measurement_data_with_multiple_periods(): +def test_get_measurement_data_with_multiple_periods() -> None: period = [1, 2] factors = ["f3", "f1"] update_info = pd.DataFrame( @@ -172,7 +175,7 @@ def test_get_measurement_data_with_multiple_periods(): afe(result, expected) -def test_get_factor_scores_data_with_multiple_period(): +def test_get_factor_scores_data_with_multiple_period() -> None: periods = [0, 1] factors = ["f1", "f2"] update_info = pd.DataFrame( @@ -241,24 +244,36 @@ def test_get_factor_scores_data_with_multiple_period(): afe(expected, result) -def test_process_factors(): - model = { - "labels": {"latent_factors": list("abcd"), "observed_factors": list("efg")}, - } +def test_process_factors() -> None: + model = SimpleNamespace( + labels=Labels( + latent_factors=tuple("abcd"), + observed_factors=tuple("efg"), + controls=("constant",), + periods=(0,), + stagemap=(0,), + stages=(0,), + aug_periods=(0,), + aug_periods_to_periods={0: 0}, + aug_stagemap=(0,), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + ), + ) latent_factor = "c" observed_factor = "g" factors = ["b", "d", "g"] all_factors = None - assert list("abcd") == _process_factors(model, all_factors)[0] - assert list("efg") == _process_factors(model, all_factors)[1] - assert [latent_factor] == _process_factors(model, latent_factor)[0] - assert [observed_factor] == _process_factors(model, observed_factor)[1] - assert factors[:-1] == _process_factors(model, factors)[0] - assert [factors[-1] == _process_factors(model, factors)[1]] + assert tuple("abcd") == _process_factors(model, all_factors)[0] # ty: ignore[invalid-argument-type] + assert tuple("efg") == _process_factors(model, all_factors)[1] # ty: ignore[invalid-argument-type] + assert (latent_factor,) == _process_factors(model, latent_factor)[0] # ty: ignore[invalid-argument-type] + assert (observed_factor,) == _process_factors(model, observed_factor)[1] # ty: ignore[invalid-argument-type] + assert tuple(factors[:-1]) == _process_factors(model, factors)[0] # ty: ignore[invalid-argument-type] + assert (factors[-1],) == _process_factors(model, factors)[1] # ty: ignore[invalid-argument-type] -def test_get_mask_lower_triangle_only(): - corr = np.ones((4, 4)) +def test_get_mask_lower_triangle_only() -> None: + corr = pd.DataFrame(np.ones((4, 4))) show_upper = False show_diag = False expected = np.array( @@ -269,12 +284,12 @@ def test_get_mask_lower_triangle_only(): [True] * 3 + [False], ], ) - result = _get_mask(corr, show_upper, show_diag) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) -def test_get_mask_lower_triangle_and_diag(): - corr = np.ones((4, 4)) +def test_get_mask_lower_triangle_and_diag() -> None: + corr = pd.DataFrame(np.ones((4, 4))) show_upper = False show_diag = True expected = np.array( @@ -285,12 +300,12 @@ def test_get_mask_lower_triangle_and_diag(): [True] * 4, ], ) - result = _get_mask(corr, show_upper, show_diag) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) -def test_get_mask_lower_and_upper_triangle_no_diag(): - corr = np.ones((4, 4)) +def test_get_mask_lower_and_upper_triangle_no_diag() -> None: + corr = pd.DataFrame(np.ones((4, 4))) show_upper = True show_diag = False expected = np.array( @@ -301,14 +316,14 @@ def test_get_mask_lower_and_upper_triangle_no_diag(): [True] * 3 + [False], ], ) - result = _get_mask(corr, show_upper, show_diag) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) -def test_get_mask_full_square_matrix(): - corr = np.ones((4, 4)) +def test_get_mask_full_square_matrix() -> None: + corr = pd.DataFrame(np.ones((4, 4))) show_upper = True show_diag = True - expected = corr.astype(bool) - result = _get_mask(corr, show_upper, show_diag) + expected = corr.to_numpy().astype(bool) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) diff --git a/tests/test_decorators.py b/tests/test_decorators.py index 85f9edef..fcfcc762 100644 --- a/tests/test_decorators.py +++ b/tests/test_decorators.py @@ -3,7 +3,7 @@ from skillmodels.decorators import extract_params, jax_array_output, register_params -def test_extract_params_decorator_only_key(): +def test_extract_params_decorator_only_key() -> None: @extract_params(key="a") def f(x, params): return x * params @@ -11,7 +11,7 @@ def f(x, params): assert f(x=3, params={"a": 4, "b": 5}) == 12 -def test_extract_params_direct_call_only_key(): +def test_extract_params_direct_call_only_key() -> None: def f(x, params): return x * params @@ -20,7 +20,7 @@ def f(x, params): assert g(x=3, params={"a": 4, "b": 5}) == 12 -def test_extract_params_decorator_only_names(): +def test_extract_params_decorator_only_names() -> None: @extract_params(names=["c", "d"]) def f(x, params): return x * params["c"] @@ -28,7 +28,7 @@ def f(x, params): assert f(x=3, params=[4, 5]) == 12 -def test_extract_params_direct_call_only_names(): +def test_extract_params_direct_call_only_names() -> None: def f(x, params): return x * params["c"] @@ -36,7 +36,7 @@ def f(x, params): assert g(x=3, params=[4, 5]) == 12 -def test_extract_params_decorator_key_and_names(): +def test_extract_params_decorator_key_and_names() -> None: @extract_params(key="a", names=["c", "d"]) def f(x, params): return x * params["c"] @@ -44,7 +44,7 @@ def f(x, params): assert f(x=3, params={"a": [4, 5], "b": [5, 6]}) == 12 -def test_extract_params_direct_call_key_and_names(): +def test_extract_params_direct_call_key_and_names() -> None: def f(x, params): return x * params["c"] @@ -52,7 +52,7 @@ def f(x, params): assert g(x=3, params={"a": [4, 5], "b": [5, 6]}) == 12 -def test_jax_array_output_decorator(): +def test_jax_array_output_decorator() -> None: @jax_array_output def f(): return (1, 2, 3) @@ -60,7 +60,7 @@ def f(): assert isinstance(f(), jnp.ndarray) -def test_jax_array_output_direct_call(): +def test_jax_array_output_direct_call() -> None: def f(): return (1, 2, 3) @@ -69,19 +69,19 @@ def f(): assert isinstance(g(), jnp.ndarray) -def test_register_params_decorator(): +def test_register_params_decorator() -> None: @register_params(params=["a", "b", "c"]) - def f(): + def f() -> str: return "bla" assert f.__registered_params__ == ["a", "b", "c"] assert f() == "bla" -def test_register_params_direct_call(): - def f(): +def test_register_params_direct_call() -> None: + def f() -> str: return "bla" g = register_params(f, params=["a", "b", "c"]) - assert g.__registered_params__ == ["a", "b", "c"] + assert g.__registered_params__ == ["a", "b", "c"] # ty: ignore[unresolved-attribute] assert g() == "bla" diff --git a/tests/test_filtered_states.py b/tests/test_filtered_states.py index b98f15b2..d3268196 100644 --- a/tests/test_filtered_states.py +++ b/tests/test_filtered_states.py @@ -3,37 +3,34 @@ import numpy as np import pandas as pd import pytest -import yaml +from skillmodels.config import TEST_DATA_DIR from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.test_data.model2 import MODEL2 -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + return MODEL2 @pytest.fixture def model2_data(): - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") - data = data.set_index(["caseid", "period"]) - return data + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + return data.set_index(["caseid", "period"]) -def test_get_filtered_states(model2, model2_data): - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") +def test_get_filtered_states(model2, model2_data) -> None: + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) max_inputs = get_maximization_inputs(model2, model2_data) params = params.loc[max_inputs["params_template"].index] - calculated = get_filtered_states(model_dict=model2, data=model2_data, params=params) + calculated = get_filtered_states(model_spec=model2, data=model2_data, params=params) factors = ["fac1", "fac2", "fac3"] expected_ratios = [1.187757, 1, 1] diff --git a/tests/test_kalman_filters.py b/tests/test_kalman_filters.py index 4c71386e..26b578bc 100644 --- a/tests/test_kalman_filters.py +++ b/tests/test_kalman_filters.py @@ -28,9 +28,9 @@ @pytest.mark.parametrize(("seed", "update_func"), product(SEEDS, UPDATE_FUNCS)) -def test_kalman_update(seed, update_func): - np.random.seed(seed) - dim = np.random.randint(low=1, high=10) +def test_kalman_update(seed, update_func) -> None: + rng = np.random.default_rng(seed) + dim = int(rng.integers(low=1, high=10)) n_obs = 5 n_mix = 2 @@ -38,9 +38,11 @@ def test_kalman_update(seed, update_func): covs = np.zeros((n_obs, n_mix, dim, dim)) for i in range(n_obs): for j in range(n_mix): - states[i, j], covs[i, j] = _random_state_and_covariance(dim=dim) + states[i, j], covs[i, j] = _random_state_and_covariance(rng, dim=dim) - loadings, measurements, meas_sd = _random_loadings_measurements_and_meas_sd(states) + loadings, measurements, meas_sd = _random_loadings_measurements_and_meas_sd( + rng, states + ) expected_states = np.zeros_like(states) expected_covs = np.zeros_like(covs) @@ -86,7 +88,7 @@ def test_kalman_update(seed, update_func): @pytest.mark.parametrize("update_func", UPDATE_FUNCS) -def test_kalman_update_with_missing(update_func): +def test_kalman_update_with_missing(update_func) -> None: """State, cov and weights should not change, log likelihood should be zero.""" n_mixtures = 2 n_obs = 3 @@ -133,10 +135,10 @@ def test_kalman_update_with_missing(update_func): @pytest.mark.parametrize("seed", SEEDS) -def test_sigma_points(seed): - np.random.seed(seed) - state, cov = _random_state_and_covariance() - observed_factors = np.arange(2).reshape(1, 2) +def test_sigma_points(seed: int) -> None: + rng = np.random.default_rng(seed) + state, cov = _random_state_and_covariance(rng) + observed_factors = jnp.arange(2).reshape(1, 2) expected = JulierSigmaPoints(n=len(state), kappa=2).sigma_points(state, cov) observed_part = np.tile(observed_factors, len(expected)).reshape(-1, 2) expected = np.hstack([expected, observed_part]) @@ -157,10 +159,10 @@ def test_sigma_points(seed): @pytest.mark.parametrize("seed", SEEDS) -def test_sigma_scaling_factor_and_weights(seed): - np.random.seed(seed) - dim = np.random.randint(low=1, high=15) - kappa = np.random.uniform(low=0.5, high=5) +def test_sigma_scaling_factor_and_weights(seed) -> None: + rng = np.random.default_rng(seed) + dim = int(rng.integers(low=1, high=15)) + kappa = float(rng.uniform(low=0.5, high=5)) # Test my assumption that weights for mean and cov are equal in the Julier algorithm expected_weights = JulierSigmaPoints(n=dim, kappa=kappa).Wm expected_weights2 = JulierSigmaPoints(n=dim, kappa=kappa).Wc @@ -176,20 +178,19 @@ def test_sigma_scaling_factor_and_weights(seed): # ====================================================================================== -def test_transformation_of_sigma_points(): +def test_transformation_of_sigma_points() -> None: sp = jnp.arange(10).reshape(1, 1, 5, 2) + 1 def f(params, states): - out = jnp.column_stack( + return jnp.column_stack( [(states * params["fac1"][0]).sum(axis=1), states[..., 1]], ) - return out trans_coeffs = {"fac1": jnp.array([2]), "fac2": jnp.array([])} anch_scaling = jnp.array([[1, 1], [2, 1]]) - anch_constants = np.array([[0, 0], [0, 0]]) + anch_constants = jnp.array([[0, 0], [0, 0]]) expected = jnp.array([[[[3, 2], [7, 4], [11, 6], [15, 8], [19, 10]]]]) @@ -213,11 +214,11 @@ def f(params, states): @pytest.mark.parametrize("seed", SEEDS) -def test_predict_against_linear_filterpy(seed): - np.random.seed(seed) - state, cov = _random_state_and_covariance() +def test_predict_against_linear_filterpy(seed) -> None: + rng = np.random.default_rng(seed) + state, cov = _random_state_and_covariance(rng) dim = len(state) - trans_mat = np.random.uniform(low=-1, high=1, size=(dim, dim)) + trans_mat = rng.uniform(low=-1, high=1, size=(dim, dim)) shock_sds = 0.5 * np.arange(dim) / dim @@ -235,8 +236,7 @@ def linear(params, states): return jnp.dot(states, params) def transition_function(params, states): - out = jnp.column_stack([linear(params[f"fac{i}"], states) for i in range(dim)]) - return out + return jnp.column_stack([linear(params[f"fac{i}"], states) for i in range(dim)]) sm_state, sm_chol = _convert_predict_inputs_from_filterpy_to_skillmodels(state, cov) scaling_factor, weights = calculate_sigma_scaling_factor_and_weights(dim, 2) @@ -249,13 +249,13 @@ def transition_function(params, states): transition_function, sm_state, sm_chol, - scaling_factor, + float(scaling_factor), weights, trans_coeffs, jnp.array(shock_sds), anch_scaling, anch_constants, - observed_factors, + jnp.asarray(observed_factors), ) aaae(calc_states.flatten(), expected_state.flatten()) @@ -267,20 +267,20 @@ def transition_function(params, states): # ====================================================================================== -def _random_state_and_covariance(dim=None): +def _random_state_and_covariance(rng, dim=None): if dim is None: - dim = np.random.randint(low=1, high=10) - factorized = np.random.uniform(low=-1, high=3, size=(dim, dim)) + dim = rng.integers(low=1, high=10) + factorized = rng.uniform(low=-1, high=3, size=(dim, dim)) cov = factorized @ factorized.T * 0.5 + np.eye(dim) - state = np.random.uniform(low=-5, high=5, size=dim) + state = rng.uniform(low=-5, high=5, size=dim) return state, cov -def _random_loadings_measurements_and_meas_sd(state): +def _random_loadings_measurements_and_meas_sd(rng, state): n_obs, _n_mix, dim = state.shape - loadings = np.random.uniform(size=dim) - meas_sd = np.random.uniform() - epsilon = np.random.normal(loc=0, scale=meas_sd, size=(n_obs)) + loadings = rng.uniform(size=dim) + meas_sd = rng.uniform() + epsilon = rng.normal(loc=0, scale=meas_sd, size=(n_obs)) measurement = (state @ loadings).sum(axis=1) + epsilon return loadings, measurement, meas_sd diff --git a/tests/test_likelihood_regression.py b/tests/test_likelihood_regression.py index cd4cbc02..a6ccb176 100644 --- a/tests/test_likelihood_regression.py +++ b/tests/test_likelihood_regression.py @@ -1,4 +1,5 @@ import json +from dataclasses import replace from itertools import product from pathlib import Path @@ -6,11 +7,13 @@ import numpy as np import pandas as pd import pytest -import yaml from numpy.testing import assert_array_almost_equal as aaae +from skillmodels.config import TEST_DATA_DIR from skillmodels.decorators import register_params from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.model_spec import ModelSpec, Normalizations +from skillmodels.test_data.model2 import MODEL2 from skillmodels.utilities import reduce_n_periods jax.config.update("jax_enable_x64", True) @@ -23,35 +26,30 @@ "one_stage_anchoring_custom_functions", ] -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + return MODEL2 @pytest.fixture def model2_data(): - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") - data = data.set_index(["caseid", "period"]) - return data + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + return data.set_index(["caseid", "period"]) def _convert_model(base_model, model_name): - model = base_model.copy() if model_name == "no_stages_anchoring": - model.pop("stagemap") - elif model_name == "one_stage": - model.pop("anchoring") - elif model_name == "one_stage_anchoring": - pass - elif model_name == "two_stages_anchoring": - model["stagemap"] = [0, 0, 0, 0, 1, 1, 1] - elif model_name == "one_stage_anchoring_custom_functions": + return base_model._replace(stagemap=None) + if model_name == "one_stage": + return base_model._replace(anchoring=None) + if model_name == "one_stage_anchoring": + return base_model + if model_name == "two_stages_anchoring": + return base_model.with_stagemap((0, 0, 0, 0, 1, 1, 1)) + if model_name == "one_stage_anchoring_custom_functions": @register_params(params=[]) def constant(fac3, params): @@ -60,21 +58,27 @@ def constant(fac3, params): @register_params(params=["fac1", "fac2", "fac3", "constant"]) def linear(fac1, fac2, fac3, params): p = params - out = p["constant"] + fac1 * p["fac1"] + fac2 * p["fac2"] + fac3 * p["fac3"] - return out + return ( + p["constant"] + fac1 * p["fac1"] + fac2 * p["fac2"] + fac3 * p["fac3"] + ) - model["factors"]["fac2"]["transition_function"] = linear - model["factors"]["fac3"]["transition_function"] = constant - else: - raise ValueError("Invalid model name.") - return model + return base_model.with_transition_functions( + { + "fac1": "log_ces", + "fac2": linear, + "fac3": constant, + } + ) + raise ValueError("Invalid model name.") @pytest.mark.parametrize( ("model_name", "fun_key"), product(MODEL_NAMES, ["loglike", "debug_loglike"]) ) -def test_likelihood_values_have_not_changed(model2, model2_data, model_name, fun_key): - regvault = TEST_DIR / "regression_vault" +def test_likelihood_values_have_not_changed( + model2, model2_data, model_name, fun_key +) -> None: + regvault = REGRESSION_VAULT model = _convert_model(model2, model_name) params = pd.read_csv(regvault / f"{model_name}.csv").set_index( ["category", "period", "name1", "name2"], @@ -87,12 +91,12 @@ def test_likelihood_values_have_not_changed(model2, model2_data, model_name, fun fun = inputs[fun_key] new_loglike = fun(params)["value"] if "debug" in fun_key else fun(params) - with open(regvault / f"{model_name}_result.json") as j: + with (regvault / f"{model_name}_result.json").open() as j: old_loglike = np.array(json.load(j)).sum() aaae(new_loglike, old_loglike) -def test_splitting_does_not_change_gradient(model2, model2_data): +def test_splitting_does_not_change_gradient(model2, model2_data) -> None: inputs = get_maximization_inputs(model2, model2_data) inputs_split = get_maximization_inputs(model2, model2_data, 13) @@ -110,8 +114,8 @@ def test_splitting_does_not_change_gradient(model2, model2_data): ) def test_likelihood_contributions_have_not_changed( model2, model2_data, model_name, fun_key -): - regvault = TEST_DIR / "regression_vault" +) -> None: + regvault = REGRESSION_VAULT model = _convert_model(model2, model_name) params = pd.read_csv(regvault / f"{model_name}.csv").set_index( ["category", "period", "name1", "name2"], @@ -124,7 +128,7 @@ def test_likelihood_contributions_have_not_changed( fun = inputs[fun_key] new_loglikes = fun(params)["contributions"] if "debug" in fun_key else fun(params) - with open(regvault / f"{model_name}_result.json") as j: + with (regvault / f"{model_name}_result.json").open() as j: old_loglikes = np.array(json.load(j)) aaae(new_loglikes, old_loglikes) @@ -133,8 +137,11 @@ def test_likelihood_contributions_have_not_changed( ("model_type", "fun_key"), product(["no_stages_anchoring", "with_missings"], ["loglike_and_gradient"]), ) -def test_likelihood_contributions_large_nobs(model2, model2_data, model_type, fun_key): - regvault = TEST_DIR / "regression_vault" +def test_likelihood_contributions_large_nobs( + model2, model2_data, model_type, fun_key +) -> None: + rng = np.random.default_rng(42) + regvault = REGRESSION_VAULT model = _convert_model(model2, "no_stages_anchoring") params = pd.read_csv(regvault / "no_stages_anchoring.csv").set_index( ["category", "period", "name1", "name2"], @@ -168,12 +175,12 @@ def test_likelihood_contributions_large_nobs(model2, model2_data, model_type, fu ] if model_type == "no_stages_anchoring": for col in cols: - this_round[col] += np.random.normal(0, 0.1, (len(model2_data),)) + this_round[col] += rng.normal(0, 0.1, (len(model2_data),)) elif model_type == "with_missings": fraction_to_set_missing = 0.9 n_rows = len(this_round) n_missing = int(n_rows * fraction_to_set_missing) - rows_to_set_missing = this_round.sample(n=n_missing).index + rows_to_set_missing = this_round.sample(n=n_missing, random_state=rng).index this_round.loc[rows_to_set_missing, cols] = np.nan else: raise ValueError(f"Invalid model type: {model_type}") @@ -191,13 +198,30 @@ def test_likelihood_contributions_large_nobs(model2, model2_data, model_type, fu assert np.isfinite(loglike[1]).all() -def test_likelihood_runs_with_empty_periods(model2, model2_data): - del model2["anchoring"] - for factor in ["fac1", "fac2"]: - model2["factors"][factor]["measurements"][-1] = [] - model2["factors"][factor]["normalizations"]["loadings"][-1] = {} +def test_likelihood_runs_with_empty_periods(model2, model2_data) -> None: + # Remove anchoring and clear last-period measurements for fac1 and fac2 + new_factors = {} + for name, spec in model2.factors.items(): + if name in ("fac1", "fac2"): + new_meas = (*spec.measurements[:-1], ()) + old_norms = spec.normalizations + assert old_norms is not None + new_loadings = (*old_norms.loadings[:-1], {}) + new_norms = Normalizations( + loadings=new_loadings, + intercepts=old_norms.intercepts, + ) + new_factors[name] = replace( + spec, measurements=new_meas, normalizations=new_norms + ) + else: + new_factors[name] = spec + model = model2._replace( + factors=new_factors, + anchoring=None, + ) - func_dict = get_maximization_inputs(model2, model2_data) + func_dict = get_maximization_inputs(model, model2_data) params = func_dict["params_template"] params["value"] = 0.1 @@ -206,9 +230,10 @@ def test_likelihood_runs_with_empty_periods(model2, model2_data): debug_loglike(params) -def test_likelihood_runs_with_too_long_data(model2, model2_data): - model = reduce_n_periods(model2, 2) - func_dict = get_maximization_inputs(model, model2_data) +def test_likelihood_runs_with_too_long_data(model2, model2_data) -> None: + reduced = reduce_n_periods(model2, 2) + assert isinstance(reduced, ModelSpec) + func_dict = get_maximization_inputs(reduced, model2_data) params = func_dict["params_template"] params["value"] = 0.1 @@ -217,11 +242,11 @@ def test_likelihood_runs_with_too_long_data(model2, model2_data): debug_loglike(params) -def test_likelihood_runs_with_observed_factors(model2, model2_data): - model2["observed_factors"] = ["ob1", "ob2"] +def test_likelihood_runs_with_observed_factors(model2, model2_data) -> None: + model = model2.with_added_observed_factors("ob1", "ob2") model2_data["ob1"] = np.arange(len(model2_data)) model2_data["ob2"] = np.ones(len(model2_data)) - func_dict = get_maximization_inputs(model2, model2_data) + func_dict = get_maximization_inputs(model, model2_data) params = func_dict["params_template"] params["value"] = 0.1 diff --git a/tests/test_maximization_inputs.py b/tests/test_maximization_inputs.py index 0f901f25..1f2dd6fa 100644 --- a/tests/test_maximization_inputs.py +++ b/tests/test_maximization_inputs.py @@ -1,21 +1,26 @@ +"""Tests for maximization input functions.""" + import jax.numpy as jnp import numpy as np from skillmodels.maximization_inputs import _to_numpy -def test_to_numpy_with_dict(): +def test_to_numpy_with_dict() -> None: + """Test _to_numpy with dictionary input.""" dict_ = {"a": jnp.ones(3), "b": 4.5} calculated = _to_numpy(dict_) assert isinstance(calculated["a"], np.ndarray) assert isinstance(calculated["b"], float) -def test_to_numpy_one_array(): +def test_to_numpy_one_array() -> None: + """Test _to_numpy with single array input.""" calculated = _to_numpy(jnp.ones(3)) assert isinstance(calculated, np.ndarray) -def test_to_numpy_one_float(): +def test_to_numpy_one_float() -> None: + """Test _to_numpy with single float input.""" calculated = _to_numpy(3.5) assert isinstance(calculated, float) diff --git a/tests/test_params_index.py b/tests/test_params_index.py index 9e10d61f..e7d00794 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -1,9 +1,7 @@ -from pathlib import Path - import pandas as pd import pytest -import yaml +from skillmodels.config import TEST_DATA_DIR from skillmodels.params_index import ( get_control_params_index_tuples, get_initial_cholcovs_index_tuples, @@ -16,40 +14,37 @@ initial_mean_index_tuples, ) from skillmodels.process_model import process_model +from skillmodels.test_data.model2 import MODEL2 +from skillmodels.types import TransitionInfo @pytest.fixture def model2_inputs(): - test_dir = Path(__file__).parent.resolve() - with open(test_dir / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - processed = process_model(model_dict) - - out = { - "update_info": processed["update_info"], - "labels": processed["labels"], - "dimensions": processed["dimensions"], - "transition_info": processed["transition_info"], - "endogenous_factors_info": processed["endogenous_factors_info"], + processed = process_model(MODEL2) + + return { + "update_info": processed.update_info, + "labels": processed.labels, + "dimensions": processed.dimensions, + "transition_info": processed.transition_info, + "endogenous_factors_info": processed.endogenous_factors_info, } - return out -def test_params_index_with_model2(model2_inputs): - test_dir = Path(__file__).parent.resolve() +def test_params_index_with_model2(model2_inputs) -> None: calculated = get_params_index(**model2_inputs) expected = pd.read_csv( - test_dir / "model2_correct_params_index.csv", + TEST_DATA_DIR / "model2_correct_params_index.csv", index_col=["category", "period", "name1", "name2"], ).index assert calculated.equals(expected) -def test_control_coeffs_index_tuples(): +def test_control_coeffs_index_tuples() -> None: uinfo_tups = [(0, "m1"), (0, "m2"), (0, "bla"), (1, "m1"), (1, "m2")] uinfo = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_tups)) - controls = ["constant", "c1"] + controls = ("constant", "c1") expected = [ ("controls", 0, "m1", "constant"), @@ -68,14 +63,14 @@ def test_control_coeffs_index_tuples(): assert calculated == expected -def test_loading_index_tuples(): +def test_loading_index_tuples() -> None: uinfo_tups = [(0, "m1"), (0, "m2"), (0, "bla"), (1, "m1"), (1, "m2")] uinfo = pd.DataFrame( True, index=pd.MultiIndex.from_tuples(uinfo_tups), columns=["fac1", "fac2"], ) - factors = ["fac1", "fac2"] + factors = ("fac1", "fac2") expected = [ ("loadings", 0, "m1", "fac1"), ("loadings", 0, "m1", "fac2"), @@ -93,7 +88,7 @@ def test_loading_index_tuples(): assert calculated == expected -def test_meas_sd_index_tuples(): +def test_meas_sd_index_tuples() -> None: uinfo_tups = [(0, "m1"), (0, "m2"), (0, "bla"), (1, "m1"), (1, "m2")] uinfo = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_tups)) @@ -109,9 +104,9 @@ def test_meas_sd_index_tuples(): assert calculated == expected -def test_shock_sd_index_tuples(): - periods = [0, 1, 2] - factors = ["fac1", "fac2"] +def test_shock_sd_index_tuples() -> None: + periods = (0, 1, 2) + factors = ("fac1", "fac2") expected = [ ("shock_sds", 0, "fac1", "-"), @@ -120,13 +115,15 @@ def test_shock_sd_index_tuples(): ("shock_sds", 1, "fac2", "-"), ] - calculated = get_shock_sds_index_tuples(periods, factors, False) + calculated = get_shock_sds_index_tuples( + periods, factors, has_endogenous_factors=False + ) assert calculated == expected -def test_initial_mean_index_tuples(): +def test_initial_mean_index_tuples() -> None: nmixtures = 3 - factors = ["fac1", "fac2"] + factors = ("fac1", "fac2") expected = [ ("initial_states", 0, "mixture_0", "fac1"), @@ -141,7 +138,7 @@ def test_initial_mean_index_tuples(): assert calculated == expected -def test_mixture_weight_index_tuples(): +def test_mixture_weight_index_tuples() -> None: nmixtures = 3 expected = [ ("mixture_weights", 0, "mixture_0", "-"), @@ -152,9 +149,9 @@ def test_mixture_weight_index_tuples(): assert calculated == expected -def test_initial_cov_index_tuples(): +def test_initial_cov_index_tuples() -> None: nmixtures = 2 - factors = ["fac1", "fac2", "fac3"] + factors = ("fac1", "fac2", "fac3") expected = [ ("initial_cholcovs", 0, "mixture_0", "fac1-fac1"), ("initial_cholcovs", 0, "mixture_0", "fac2-fac1"), @@ -174,15 +171,20 @@ def test_initial_cov_index_tuples(): assert calculated == expected -def test_trans_coeffs_index_tuples_no_endogenous_factors(): - periods = [0, 1, 2] +def test_trans_coeffs_index_tuples_no_endogenous_factors() -> None: + periods = (0, 1, 2) param_names = { "fac1": ["fac1", "fac2", "fac3", "constant"], "fac2": [], "fac3": ["fac1", "fac2", "fac3", "phi"], } - trans_info = {"param_names": param_names} + trans_info = TransitionInfo( + func=lambda x: x, # dummy function + param_names=param_names, + individual_functions={}, + function_names={}, + ) expected = [ ("transition", 0, "fac1", "fac1"), @@ -212,15 +214,20 @@ def test_trans_coeffs_index_tuples_no_endogenous_factors(): assert calculated == expected -def test_trans_coeffs_index_tuples_has_endogenous_factors(): - periods = [0, 1, 2, 3, 4, 5] +def test_trans_coeffs_index_tuples_has_endogenous_factors() -> None: + periods = (0, 1, 2, 3, 4, 5) param_names = { "fac1": ["fac1", "fac2", "fac3", "constant"], "fac2": [], "fac3": ["fac1", "fac2", "fac3", "phi"], } - trans_info = {"param_names": param_names} + trans_info = TransitionInfo( + func=lambda x: x, # dummy function + param_names=param_names, + individual_functions={}, + function_names={}, + ) expected = [ ("transition", 0, "fac1", "fac1"), diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index 33ffbe88..2d1392c8 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -5,41 +5,45 @@ """ -from pathlib import Path - import jax.numpy as jnp import numpy as np import pandas as pd import pytest -import yaml from numpy.testing import assert_array_equal as aae +from skillmodels.config import TEST_DATA_DIR from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_model import process_model +from skillmodels.test_data.model2 import MODEL2 +from skillmodels.types import Anchoring @pytest.fixture def parsed_parameters(): - test_dir = Path(__file__).parent.resolve() p_index = pd.read_csv( - test_dir / "model2_correct_params_index.csv", + TEST_DATA_DIR / "model2_correct_params_index.csv", index_col=["category", "period", "name1", "name2"], ).index - with open(test_dir / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - - processed = process_model(model_dict) + processed = process_model(MODEL2) - update_info = processed["update_info"] - labels = processed["labels"] - dimensions = processed["dimensions"] + update_info = processed.update_info + labels = processed.labels + dimensions = processed.dimensions # this overwrites the anchoring setting from the model specification to get a # more meaningful test - anchoring = {"ignore_constant_when_anchoring": False} + anchoring = Anchoring( + anchoring=False, + outcomes={}, + factors=(), + free_controls=True, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) parsing_info = create_parsing_info( - params_index=p_index, + params_index=p_index, # ty: ignore[invalid-argument-type] update_info=update_info, labels=labels, anchoring=anchoring, @@ -49,41 +53,46 @@ def parsed_parameters(): params_vec = jnp.arange(len(p_index)) n_obs = 5 - parsed = parse_params(params_vec, parsing_info, dimensions, labels, n_obs) - - return dict( - zip(["states", "upper_chols", "log_weights", "pardict"], parsed, strict=False) + states, upper_chols, log_weights, parsed_params = parse_params( + params_vec, parsing_info, dimensions, labels, n_obs ) + return { + "states": states, + "upper_chols": upper_chols, + "log_weights": log_weights, + "parsed_params": parsed_params, + } + -def test_controls(parsed_parameters): +def test_controls(parsed_parameters) -> None: expected = jnp.arange(118).reshape(59, 2) - aae(parsed_parameters["pardict"]["controls"], expected) + aae(parsed_parameters["parsed_params"].controls, expected) -def test_loadings(parsed_parameters): +def test_loadings(parsed_parameters) -> None: expected_values = jnp.arange(118, 177) - calculated = parsed_parameters["pardict"]["loadings"] + calculated = parsed_parameters["parsed_params"].loadings calculated_values = calculated[calculated != 0] aae(expected_values, calculated_values) -def test_meas_sds(parsed_parameters): +def test_meas_sds(parsed_parameters) -> None: expected = jnp.arange(177, 236) - aae(parsed_parameters["pardict"]["meas_sds"], expected) + aae(parsed_parameters["parsed_params"].meas_sds, expected) -def test_shock_sds(parsed_parameters): +def test_shock_sds(parsed_parameters) -> None: expected = jnp.arange(236, 257).reshape(7, 3) - aae(parsed_parameters["pardict"]["shock_sds"], expected) + aae(parsed_parameters["parsed_params"].shock_sds, expected) -def test_initial_states(parsed_parameters): +def test_initial_states(parsed_parameters) -> None: expected = jnp.arange(257, 260).reshape(1, 3).repeat(5, axis=0).reshape(5, 1, 3) aae(parsed_parameters["states"], expected) -def test_initial_upper_chols(parsed_parameters): +def test_initial_upper_chols(parsed_parameters) -> None: expected = ( jnp.array([[[261, 262, 264], [0, 263, 265], [0, 0, 266]]]) .repeat(5, axis=0) @@ -92,8 +101,8 @@ def test_initial_upper_chols(parsed_parameters): aae(parsed_parameters["upper_chols"], expected) -def test_transition_parameters(parsed_parameters): - calculated = parsed_parameters["pardict"]["transition"] +def test_transition_parameters(parsed_parameters) -> None: + calculated = parsed_parameters["parsed_params"].transition aae(calculated["fac1"], jnp.arange(385, 413).reshape(7, 4) - 118) aae(calculated["fac2"], jnp.arange(413, 441).reshape(7, 4) - 118) @@ -102,15 +111,15 @@ def test_transition_parameters(parsed_parameters): assert isinstance(calculated, dict) -def test_anchoring_scaling_factors(parsed_parameters): - calculated = parsed_parameters["pardict"]["anchoring_scaling_factors"] +def test_anchoring_scaling_factors(parsed_parameters) -> None: + calculated = parsed_parameters["parsed_params"].anchoring_scaling_factors expected = np.ones((8, 3)) expected[:, 0] = jnp.array([127 + 7 * i for i in range(8)]) aae(calculated, expected) -def test_anchoring_constants(parsed_parameters): - calculated = parsed_parameters["pardict"]["anchoring_constants"] +def test_anchoring_constants(parsed_parameters) -> None: + calculated = parsed_parameters["parsed_params"].anchoring_constants expected = np.zeros((8, 3)) expected[:, 0] = jnp.array([18 + i * 14 for i in range(8)]) aae(calculated, expected) diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 4ec797e6..4437911e 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -1,14 +1,13 @@ import io import textwrap -from pathlib import Path import jax.numpy as jnp import numpy as np import pandas as pd import pytest -import yaml from numpy.testing import assert_array_equal as aae +from skillmodels.config import TEST_DATA_DIR from skillmodels.process_data import ( _augment_data_for_endogenous_factors, _generate_controls_array, @@ -18,16 +17,15 @@ pre_process_data, ) from skillmodels.process_model import process_model +from skillmodels.test_data.simplest_augmented_model import SIMPLEST_AUGMENTED_MODEL +from skillmodels.types import Labels -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() - -def test_pre_process_data(): +def test_pre_process_data() -> None: df = pd.DataFrame(data=np.arange(20).reshape(2, 10).T, columns=["var", "inv"]) df["period"] = [1, 2, 3, 2, 3, 4, 2, 4, 3, 1] df["id"] = [1, 1, 1, 3, 3, 3, 4, 4, 5, 5] - df.set_index(["id", "period"], inplace=True) + df = df.set_index(["id", "period"]) exp = pd.DataFrame() period = [0, 1, 2, 3] * 4 @@ -39,7 +37,7 @@ def test_pre_process_data(): } data = np.column_stack([period, id_, data["var"], data["inv"]]) exp = pd.DataFrame(data=data, columns=["__period__", "__id__", "var", "inv"]) - exp.set_index(["__id__", "__period__"], inplace=True) + exp = exp.set_index(["__id__", "__period__"]) res = pre_process_data(df, [0, 1, 2, 3]) assert res[["var", "inv"]].equals(exp[["var", "inv"]]) @@ -48,36 +46,35 @@ def test_pre_process_data(): @pytest.fixture def simplest_augmented(): out = {} - with open(TEST_DIR / "simplest_augmented_model.yaml") as y: - out["model_dict"] = yaml.load(y, Loader=yaml.FullLoader) + out["model"] = SIMPLEST_AUGMENTED_MODEL _df = pd.DataFrame(data=np.arange(15).reshape(3, 5).T, columns=["var", "inv", "of"]) _df["period"] = [1, 1, 2, 1, 2] _df["id"] = [1, 3, 3, 5, 5] out["data_input"] = _df.set_index(["id", "period"]) out["data_exp"] = pd.read_csv( - TEST_DIR / "simplest_augmented_data_expected.csv", + TEST_DATA_DIR / "simplest_augmented_data_expected.csv", index_col=["id", "aug_period"], ) return out -def test_augment_data_for_endogenous_factors(simplest_augmented): - model = process_model(simplest_augmented["model_dict"]) +def test_augment_data_for_endogenous_factors(simplest_augmented) -> None: + processed_model = process_model(simplest_augmented["model"]) pre_processed_data = pre_process_data( - simplest_augmented["data_input"], model["labels"]["periods"] + simplest_augmented["data_input"], processed_model.labels.periods ) pre_processed_data["constant"] = 1 res = _augment_data_for_endogenous_factors( df=pre_processed_data, - labels=model["labels"], - update_info=model["update_info"], + labels=processed_model.labels, + update_info=processed_model.update_info, ) cols = ["var", "inv", "constant", "of"] pd.testing.assert_frame_equal(res[cols], simplest_augmented["data_exp"][cols]) -def test_handle_controls_with_missings(): - controls = ["c1"] +def test_handle_controls_with_missings() -> None: + controls = ("c1",) uinfo_ind_tups = [(0, "m1"), (0, "m2")] update_info = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_ind_tups)) data = [[1, 1, 1], [np.nan, 1, 1], [np.nan, 1, np.nan], [np.nan, np.nan, np.nan]] @@ -86,14 +83,14 @@ def test_handle_controls_with_missings(): df["id"] = np.arange(4) df["__old_id__"] = df["id"] df["__old_period__"] = df["aug_period"] + 1 - df.set_index(["id", "aug_period"], inplace=True) + df = df.set_index(["id", "aug_period"]) with pytest.warns(UserWarning): # noqa: PT030 calculated = _handle_controls_with_missings(df, controls, update_info) - assert calculated.loc[(2, 0)].isna().all() + assert calculated.loc[(2, 0)].isna().all() # ty: ignore[unresolved-attribute] -def test_generate_measurements_array(): +def test_generate_measurements_array() -> None: uinfo_ind_tups = [(0, "m1"), (0, "m2"), (1, "m1"), (1, "m3")] update_info = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_ind_tups)) @@ -112,7 +109,7 @@ def test_generate_measurements_array(): aae(calculated, expected) -def test_generate_controls_array(): +def test_generate_controls_array() -> None: csv = """ id,aug_period,c1,c2 0, 0, 1, 2 @@ -122,14 +119,26 @@ def test_generate_controls_array(): """ data = _read_csv_string(csv, ["id", "aug_period"]) - labels = {"controls": ["c1", "c2"], "aug_periods": [0, 1]} + labels = Labels( + latent_factors=(), + observed_factors=(), + controls=("c1", "c2"), + periods=(0, 1), + stagemap=(0, 0), + stages=(0,), + aug_periods=(0, 1), + aug_periods_to_periods={0: 0, 1: 1}, + aug_stagemap=(0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + ) calculated = _generate_controls_array(data, labels, 2) expected = jnp.array([[[1, 2], [5, 8]], [[3, 4], [7, 8]]]) aae(calculated, expected) -def test_generate_observed_factor_array(): +def test_generate_observed_factor_array() -> None: csv = """ id,aug_period,v1,v2 0, 0, 1, 2 @@ -139,7 +148,19 @@ def test_generate_observed_factor_array(): """ data = _read_csv_string(csv, ["id", "aug_period"]) - labels = {"observed_factors": ["v1", "v2"], "aug_periods": [0, 1]} + labels = Labels( + latent_factors=(), + observed_factors=("v1", "v2"), + controls=("constant",), + periods=(0, 1), + stagemap=(0, 0), + stages=(0,), + aug_periods=(0, 1), + aug_periods_to_periods={0: 0, 1: 1}, + aug_stagemap=(0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + ) calculated = _generate_observed_factor_array(data, labels, 2) expected = jnp.array([[[1, 2], [5, 8]], [[3, 4], [7, 8]]]) diff --git a/tests/test_process_model.py b/tests/test_process_model.py index ab808814..7d5227b2 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -1,92 +1,86 @@ import inspect -from pathlib import Path +from dataclasses import replace import pandas as pd import pytest -import yaml from pandas.testing import assert_frame_equal +from skillmodels.config import TEST_DATA_DIR +from skillmodels.model_spec import FactorSpec from skillmodels.process_model import get_has_endogenous_factors, process_model +from skillmodels.test_data.model2 import MODEL2 +from skillmodels.types import TransitionInfo # ====================================================================================== # Integration test with model2 from the replication files of CHS2010 # ====================================================================================== -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() - @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + return MODEL2 -def test_has_endogenous_factors(model2): - assert ( - process_model(model2)["endogenous_factors_info"]["has_endogenous_factors"] - == False - ) +def test_has_endogenous_factors(model2) -> None: + assert process_model(model2).endogenous_factors_info.has_endogenous_factors == False -def test_dimensions(model2): - res = process_model(model2)["dimensions"] - assert res["n_latent_factors"] == 3 - assert res["n_observed_factors"] == 0 - assert res["n_all_factors"] == 3 - assert res["n_periods"] == 8 - assert res["n_controls"] == 2 - assert res["n_mixtures"] == 1 +def test_dimensions(model2) -> None: + res = process_model(model2).dimensions + assert res.n_latent_factors == 3 + assert res.n_observed_factors == 0 + assert res.n_all_factors == 3 + assert res.n_periods == 8 + assert res.n_controls == 2 + assert res.n_mixtures == 1 -def test_labels(model2): - res = process_model(model2)["labels"] - assert res["latent_factors"] == ["fac1", "fac2", "fac3"] - assert res["observed_factors"] == [] - assert res["all_factors"] == ["fac1", "fac2", "fac3"] - assert res["controls"] == ["constant", "x1"] - assert res["periods"] == [0, 1, 2, 3, 4, 5, 6, 7] - assert res["stagemap"] == [0, 0, 0, 0, 0, 0, 0] - assert res["stages"] == [0] +def test_labels(model2) -> None: + res = process_model(model2).labels + assert res.latent_factors == ("fac1", "fac2", "fac3") + assert res.observed_factors == () + assert res.all_factors == ("fac1", "fac2", "fac3") + assert res.controls == ("constant", "x1") + assert res.periods == (0, 1, 2, 3, 4, 5, 6, 7) + assert res.stagemap == (0, 0, 0, 0, 0, 0, 0) + assert res.stages == (0,) -def test_estimation_options(model2): - res = process_model(model2)["estimation_options"] - assert res["sigma_points_scale"] == 2 - assert res["robust_bounds"] - assert res["bounds_distance"] == 0.001 +def test_estimation_options(model2) -> None: + res = process_model(model2).estimation_options + assert res.sigma_points_scale == 2 + assert res.robust_bounds + assert res.bounds_distance == 0.001 -def test_anchoring(model2): - res = process_model(model2)["anchoring"] - assert res["outcomes"] == {"fac1": "Q1"} - assert res["factors"] == ["fac1"] - assert res["free_controls"] - assert res["free_constant"] - assert res["free_loadings"] +def test_anchoring(model2) -> None: + res = process_model(model2).anchoring + assert res.outcomes == {"fac1": "Q1"} + assert res.factors == ("fac1",) + assert res.free_controls + assert res.free_constant + assert res.free_loadings -def test_transition_info(model2): - res = process_model(model2)["transition_info"] +def test_transition_info(model2) -> None: + res = process_model(model2).transition_info - assert isinstance(res, dict) - assert callable(res["func"]) + assert isinstance(res, TransitionInfo) + assert callable(res.func) - assert list(inspect.signature(res["func"]).parameters) == ["params", "states"] + assert list(inspect.signature(res.func).parameters) == ["params", "states"] -def test_update_info(model2): - res = process_model(model2)["update_info"] - test_dir = Path(__file__).parent.resolve() +def test_update_info(model2) -> None: + res = process_model(model2).update_info expected = pd.read_csv( - test_dir / "model2_correct_update_info.csv", + TEST_DATA_DIR / "model2_correct_update_info.csv", index_col=["aug_period", "variable"], ) assert_frame_equal(res, expected) -def test_normalizations(model2): +def test_normalizations(model2) -> None: expected = { "fac1": { "loadings": [ @@ -119,7 +113,7 @@ def test_normalizations(model2): "intercepts": [{}, {}, {}, {}, {}, {}, {}, {}], }, } - res = process_model(model2)["normalizations"] + res = process_model(model2).normalizations assert res == expected @@ -129,133 +123,127 @@ def test_normalizations(model2): # ====================================================================================== -def test_anchoring_and_endogenous_factors_work_together(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] +def _make_fac3_endogenous(model): + """Return a new model with fac3 set as endogenous.""" + fac3 = model.factors["fac3"] + new_fac3 = replace(fac3, is_endogenous=True) + new_factors = dict(model.factors) | {"fac3": new_fac3} + return model._replace(factors=new_factors) + + +def test_anchoring_and_endogenous_factors_work_together() -> None: + model = _make_fac3_endogenous(MODEL2)._replace(stagemap=None) # Should not raise - anchoring and endogenous factors now work together - result = process_model(model_dict) + result = process_model(model) # Verify anchoring is enabled - assert result["anchoring"]["anchoring"] - assert result["anchoring"]["factors"] == ["fac1"] + assert result.anchoring.anchoring + assert result.anchoring.factors == ("fac1",) # Verify endogenous factors are enabled - assert result["endogenous_factors_info"]["has_endogenous_factors"] + assert result.endogenous_factors_info.has_endogenous_factors # Verify dimensions - assert result["dimensions"]["n_periods"] == 8 - assert result["dimensions"]["n_aug_periods"] == 16 + assert result.dimensions.n_periods == 8 + assert result.dimensions.n_aug_periods == 16 # Verify update_info has anchoring entries for all aug_periods - anchoring_updates = result["update_info"][ - result["update_info"]["purpose"] == "anchoring" - ] + anchoring_updates = result.update_info[result.update_info["purpose"] == "anchoring"] assert ( len(anchoring_updates) == 16 ) # One per aug_period for the one anchored factor -def test_stagemap_with_endogenous_factors_wrong_labels(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 4] - del model_dict["anchoring"] +def test_stagemap_with_endogenous_factors_wrong_labels() -> None: + model = _make_fac3_endogenous(MODEL2)._replace( + stagemap=(0, 0, 1, 1, 2, 2, 4), + anchoring=None, + ) with pytest.raises(ValueError, match="Invalid stage map:"): - process_model(model_dict) + process_model(model) -def test_stagemap_with_endogenous_factors(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 3] - del model_dict["anchoring"] - model = process_model(model_dict) - assert model["labels"]["stagemap"] == model_dict["stagemap"] - assert model["labels"]["stages"] == [0, 1, 2, 3] - assert model["labels"]["aug_stagemap"] == [0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7] +def test_stagemap_with_endogenous_factors() -> None: + stagemap = (0, 0, 1, 1, 2, 2, 3) + model = _make_fac3_endogenous(MODEL2)._replace( + stagemap=stagemap, + anchoring=None, + ) + processed = process_model(model) + assert processed.labels.stagemap == stagemap + assert processed.labels.stages == (0, 1, 2, 3) + assert processed.labels.aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) @pytest.fixture def model2_inv(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] - del model_dict["anchoring"] - return model_dict + return _make_fac3_endogenous(MODEL2)._replace( + stagemap=None, + anchoring=None, + ) -def test_with_endog_has_endogenous_factors(model2_inv): +def test_with_endog_has_endogenous_factors(model2_inv) -> None: assert ( - process_model(model2_inv)["endogenous_factors_info"]["has_endogenous_factors"] - == True + process_model(model2_inv).endogenous_factors_info.has_endogenous_factors == True ) -def test_with_endog_dimensions(model2_inv): - res = process_model(model2_inv)["dimensions"] - assert res["n_latent_factors"] == 3 - assert res["n_observed_factors"] == 0 - assert res["n_all_factors"] == 3 - assert res["n_aug_periods"] == 16 - assert res["n_periods"] == 8 - assert res["n_controls"] == 2 - assert res["n_mixtures"] == 1 +def test_with_endog_dimensions(model2_inv) -> None: + res = process_model(model2_inv).dimensions + assert res.n_latent_factors == 3 + assert res.n_observed_factors == 0 + assert res.n_all_factors == 3 + assert res.n_aug_periods == 16 + assert res.n_periods == 8 + assert res.n_controls == 2 + assert res.n_mixtures == 1 -def test_with_endog_labels(model2_inv): - res = process_model(model2_inv)["labels"] +def test_with_endog_labels(model2_inv) -> None: + res = process_model(model2_inv).labels n_aug_periods = 16 - assert res["latent_factors"] == ["fac1", "fac2", "fac3"] - assert res["observed_factors"] == [] - assert res["all_factors"] == ["fac1", "fac2", "fac3"] - assert res["controls"] == ["constant", "x1"] - assert res["aug_periods"] == list(range(n_aug_periods)) - assert res["periods"] == [0, 1, 2, 3, 4, 5, 6, 7] - assert res["aug_stagemap"] == list(range(n_aug_periods - 2)) - assert res["aug_stages"] == list(range(n_aug_periods - 2)) + assert res.latent_factors == ("fac1", "fac2", "fac3") + assert res.observed_factors == () + assert res.all_factors == ("fac1", "fac2", "fac3") + assert res.controls == ("constant", "x1") + assert res.aug_periods == tuple(range(n_aug_periods)) + assert res.periods == (0, 1, 2, 3, 4, 5, 6, 7) + assert res.aug_stagemap == tuple(range(n_aug_periods - 2)) + assert res.aug_stages == tuple(range(n_aug_periods - 2)) -def test_with_endog_estimation_options(model2_inv): - res = process_model(model2_inv)["estimation_options"] - assert res["sigma_points_scale"] == 2 - assert res["robust_bounds"] - assert res["bounds_distance"] == 0.001 +def test_with_endog_estimation_options(model2_inv) -> None: + res = process_model(model2_inv).estimation_options + assert res.sigma_points_scale == 2 + assert res.robust_bounds + assert res.bounds_distance == 0.001 -def test_with_endog_anchoring_is_empty(model2_inv): - res = process_model(model2_inv)["anchoring"] - assert res["outcomes"] == {} - assert res["factors"] == [] - assert res["free_controls"] is False - assert res["free_constant"] is False - assert res["free_loadings"] is False +def test_with_endog_anchoring_is_empty(model2_inv) -> None: + res = process_model(model2_inv).anchoring + assert res.outcomes == {} + assert res.factors == () + assert res.free_controls is False + assert res.free_constant is False + assert res.free_loadings is False -def test_with_endog_transition_info(model2_inv): - res = process_model(model2_inv)["transition_info"] +def test_with_endog_transition_info(model2_inv) -> None: + res = process_model(model2_inv).transition_info - assert isinstance(res, dict) - assert callable(res["func"]) + assert isinstance(res, TransitionInfo) + assert callable(res.func) - assert list(inspect.signature(res["func"]).parameters) == ["params", "states"] + assert list(inspect.signature(res.func).parameters) == ["params", "states"] -def test_with_endog_update_info(model2_inv): - res = process_model(model2_inv)["update_info"] - test_dir = Path(__file__).parent.resolve() +def test_with_endog_update_info(model2_inv) -> None: + res = process_model(model2_inv).update_info expected = pd.read_csv( - test_dir / "model2_with_endog_correct_update_info.csv", + TEST_DATA_DIR / "model2_with_endog_correct_update_info.csv", index_col=["aug_period", "variable"], ) assert_frame_equal(res, expected) -def test_with_endog_normalizations(model2_inv): +def test_with_endog_normalizations(model2_inv) -> None: expected = { "fac1": { "loadings": [ @@ -372,7 +360,7 @@ def test_with_endog_normalizations(model2_inv): ], }, } - res = process_model(model2_inv)["normalizations"] + res = process_model(model2_inv).normalizations assert res == expected @@ -382,35 +370,34 @@ def test_with_endog_normalizations(model2_inv): # ====================================================================================== -def test_model_has_endogenous_factors_not_specified(): - factors = {"a": {}} - assert get_has_endogenous_factors(factors) == False +def _fspec(**kwargs) -> FactorSpec: + """Create a minimal FactorSpec for unit tests.""" + return FactorSpec(measurements=((),), **kwargs) -def test_get_has_endogenous_factors_wrong_type(): - factors = {"a": {"is_endogenous": 3}} - with pytest.raises(ValueError): - get_has_endogenous_factors(factors) +def test_model_has_endogenous_factors_not_specified() -> None: + factors = {"a": _fspec()} + assert get_has_endogenous_factors(factors) == False -def test_get_has_endogenous_factors_wrong_constellation(): - factors = {"a": {"is_endogenous": False, "is_correction": True}} +def test_get_has_endogenous_factors_wrong_constellation() -> None: + factors = {"a": _fspec(is_endogenous=False, is_correction=True)} with pytest.raises(ValueError): get_has_endogenous_factors(factors) -def test_get_has_endogenous_factors_indeed(): +def test_get_has_endogenous_factors_indeed() -> None: factors = { - "a": {"is_endogenous": True, "is_correction": False}, - "b": {"is_endogenous": False, "is_correction": False}, + "a": _fspec(is_endogenous=True, is_correction=False), + "b": _fspec(is_endogenous=False, is_correction=False), } assert get_has_endogenous_factors(factors) == True -def test_get_has_endogenous_factors_and_correction(): +def test_get_has_endogenous_factors_and_correction() -> None: factors = { - "a": {"is_endogenous": True, "is_correction": False}, - "b": {"is_endogenous": False, "is_correction": False}, - "c": {"is_endogenous": True, "is_correction": True}, + "a": _fspec(is_endogenous=True, is_correction=False), + "b": _fspec(is_endogenous=False, is_correction=False), + "c": _fspec(is_endogenous=True, is_correction=True), } assert get_has_endogenous_factors(factors) == True diff --git a/tests/test_qr.py b/tests/test_qr.py index 5a35475f..b6f74532 100644 --- a/tests/test_qr.py +++ b/tests/test_qr.py @@ -1,8 +1,11 @@ +"""Tests for custom QR decomposition.""" + import jax import jax.numpy as jnp import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae +from numpy.typing import NDArray from skillmodels.qr import qr_gpu @@ -10,26 +13,30 @@ @pytest.fixture -def cov_matrix(): +def cov_matrix() -> NDArray[np.floating]: + """Create a covariance matrix for testing.""" fixedrng = np.random.default_rng(SEED) factorized = fixedrng.uniform(low=-1, high=3, size=(7, 7)) - cov = factorized @ factorized.T * 0.5 + np.eye(7) - return cov + return factorized @ factorized.T * 0.5 + np.eye(7) -def test_q(cov_matrix): +def test_q(cov_matrix: NDArray[np.floating]) -> None: + """Test Q matrix from QR decomposition matches JAX implementation.""" q_gpu, _ = qr_gpu(cov_matrix) q_jax, _ = jnp.linalg.qr(cov_matrix) aaae(q_gpu, q_jax) -def test_r(cov_matrix): +def test_r(cov_matrix: NDArray[np.floating]) -> None: + """Test R matrix from QR decomposition matches JAX implementation.""" _, r_gpu = qr_gpu(cov_matrix) _, r_jax = jnp.linalg.qr(cov_matrix) aaae(r_gpu, r_jax) -def test_grad_qr(cov_matrix): +def test_grad_qr(cov_matrix: NDArray[np.floating]) -> None: + """Test gradient of QR decomposition matches JAX implementation.""" + def f_jax(a): q, r = jnp.linalg.qr(a) return jnp.sum(r) + jnp.sum(q) diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index f04b658d..c2b742ab 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -1,40 +1,43 @@ """Tests for functions in simulate_data module.""" +from dataclasses import replace from pathlib import Path import numpy as np import pandas as pd import pytest -import yaml from numpy.testing import assert_array_almost_equal as aaae -from skillmodels.simulate_data import measurements_from_states, simulate_dataset +from skillmodels.config import TEST_DATA_DIR +from skillmodels.process_model import process_model +from skillmodels.simulate_data import ( + _collapse_aug_periods_to_periods, + measurements_from_states, + simulate_dataset, +) +from skillmodels.test_data.model2 import MODEL2 -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + return MODEL2 @pytest.fixture def model2_data(): - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") - data = data.set_index(["caseid", "period"]) - return data + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + return data.set_index(["caseid", "period"]) -def test_simulate_dataset(model2, model2_data): - model_dict = model2 - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") +def test_simulate_dataset(model2, model2_data) -> None: + model = model2 + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) calculated = simulate_dataset( - model_dict=model_dict, + model_spec=model, params=params, data=model2_data, ) @@ -48,13 +51,80 @@ def test_simulate_dataset(model2, model2_data): assert np.allclose(ratio, expected_ratio) -def test_measurements_from_factors(): - inputs = { - "states": np.array([[0, 0, 0], [1, 1, 1]]), - "controls": np.array([[1, 1], [1, 1]]), - "loadings": np.array([[0.3, 0.3, 0.3], [0.3, 0.3, 0.3], [0.3, 0.3, 0.3]]), - "control_params": np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]), - "sds": np.zeros(3), - } +def test_measurements_from_factors() -> None: + rng = np.random.default_rng(42) + states = np.array([[0, 0, 0], [1, 1, 1]], dtype=np.float64) + controls = np.array([[1, 1], [1, 1]], dtype=np.float64) + loadings = np.array([[0.3, 0.3, 0.3], [0.3, 0.3, 0.3], [0.3, 0.3, 0.3]]) + control_params = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) + sds = np.zeros(3) expected = np.array([[1, 1, 1], [1.9, 1.9, 1.9]]) - aaae(measurements_from_states(**inputs), expected) + aaae( + measurements_from_states(rng, states, controls, loadings, control_params, sds), + expected, + ) + + +@pytest.fixture +def model2_with_endogenous(): + """Model2 with fac3 set as endogenous factor.""" + fac3 = MODEL2.factors["fac3"] + new_fac3 = replace(fac3, is_endogenous=True) + new_factors = dict(MODEL2.factors) | {"fac3": new_fac3} + return MODEL2._replace( + factors=new_factors, + stagemap=None, + anchoring=None, + ) + + +def test_collapse_aug_periods_to_periods_with_endogenous_factors( + model2_with_endogenous, +) -> None: + """Test that _collapse_aug_periods_to_periods works with endogenous factors. + + This is a regression test for a bug where MeasurementType enum values were + compared against strings in pandas queries, causing empty results. + """ + rng = np.random.default_rng(42) + processed_model = process_model(model2_with_endogenous) + factors = processed_model.labels.latent_factors + + # Create a mock aug_latent_data DataFrame with aug_period column + n_obs = 5 + n_aug_periods = ( + processed_model.dimensions.n_aug_periods - 1 + ) # Exclude last half-period + records = [] + for aug_p in range(n_aug_periods): + for obs_id in range(n_obs): + record = {"id": obs_id, "aug_period": aug_p} + for fac in factors: + record[fac] = rng.standard_normal() + records.append(record) + aug_latent_data = pd.DataFrame(records) + + result = _collapse_aug_periods_to_periods( + df=aug_latent_data, + factors=factors, + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, + endogenous_factors_info=processed_model.endogenous_factors_info, + ) + + # The result should not be empty + assert len(result) > 0, "Collapsed DataFrame should not be empty" + + # Should have 'period' column, not 'aug_period' + assert "period" in result.columns + assert "aug_period" not in result.columns + + # Should have all factor columns + for fac in factors: + assert fac in result.columns + + # Should have correct number of unique periods (half of aug_periods) + expected_n_periods = processed_model.dimensions.n_periods + assert result["period"].nunique() == expected_n_periods + + # Should have all observations for each period + assert len(result) == n_obs * expected_n_periods diff --git a/tests/test_transition_functions.py b/tests/test_transition_functions.py index 1edc83c7..0a84e7e7 100644 --- a/tests/test_transition_functions.py +++ b/tests/test_transition_functions.py @@ -1,6 +1,5 @@ import jax import jax.numpy as jnp -import numpy as np from numpy.testing import assert_array_almost_equal as aaae from skillmodels.transition_functions import ( @@ -16,15 +15,15 @@ jax.config.update("jax_enable_x64", True) -def test_linear(): - states = np.arange(3) - params = np.array([0.1, 0.2, 0.3, 0.4]) +def test_linear() -> None: + states = jnp.arange(3) + params = jnp.array([0.1, 0.2, 0.3, 0.4]) expected = 1.2 aaae(linear(states, params), expected) -def test_translog(): - all_states = np.array( +def test_translog() -> None: + all_states = jnp.array( [ [2, 0, 0], [0, 3, 0], @@ -38,7 +37,7 @@ def test_translog(): ], ) - params = np.array( + params = jnp.array( [ # linear terms 0.2, @@ -60,19 +59,19 @@ def test_translog(): expected_translog = [0.76, 0.7, 1.32, 0.04, 0.77, 0.1, -0.07, 0.573, 76.72] for states, expected in zip(all_states, expected_translog, strict=False): - calculated = translog(states, params) + calculated = translog(jnp.asarray(states), params) aaae(calculated, expected) -def test_log_ces(): - states = np.array([3, 7.5]) +def test_log_ces() -> None: + states = jnp.array([3, 7.5]) params = jnp.array([0.4, 0.6, 2]) expected = 7.244628323025 calculated = log_ces(states, params) aaae(calculated, expected) -def test_where_all_but_one_gammas_are_zero(): +def test_where_all_but_one_gammas_are_zero() -> None: """This has to be tested, becaus it leads to an underflow in the log step.""" states = jnp.ones(3) params = jnp.array([0, 0, 1, -0.5]) @@ -81,12 +80,12 @@ def test_where_all_but_one_gammas_are_zero(): aaae(calculated, expected) -def test_constant(): - assert constant("bla", "blubb") == "bla" +def test_constant() -> None: + assert constant("bla", "blubb") == "bla" # ty: ignore[invalid-argument-type] -def test_robust_translog(): - all_states = np.array( +def test_robust_translog() -> None: + all_states = jnp.array( [ [2, 0, 0], [0, 3, 0], @@ -100,7 +99,7 @@ def test_robust_translog(): ], ) - params = np.array( + params = jnp.array( [ # linear terms 0.2, @@ -122,19 +121,19 @@ def test_robust_translog(): expected_translog = [0.76, 0.7, 1.32, 0.04, 0.77, 0.1, -0.07, 0.573, 76.72] for states, expected in zip(all_states, expected_translog, strict=False): - calculated = robust_translog(states, params) + calculated = robust_translog(jnp.asarray(states), params) aaae(calculated, expected) -def test_log_ces_general(): - states = np.array([3, 7.5]) +def test_log_ces_general() -> None: + states = jnp.array([3, 7.5]) params = jnp.array([0.4, 0.6, 2, 2, 0.5]) expected = 7.244628323025 calculated = log_ces_general(states, params) aaae(calculated, expected) -def test_log_ces_general_where_all_but_one_gammas_are_zero(): +def test_log_ces_general_where_all_but_one_gammas_are_zero() -> None: """This has to be tested, becaus it leads to an underflow in the log step.""" states = jnp.ones(3) params = jnp.array([0, 0, 1, -0.5, -0.5, -0.5, -2]) @@ -143,8 +142,8 @@ def test_log_ces_general_where_all_but_one_gammas_are_zero(): aaae(calculated, expected) -def test_param_names_log_ces_general(): - factors = ["a", "b"] +def test_param_names_log_ces_general() -> None: + factors = ("a", "b") expected = ["a", "b", "sigma_a", "sigma_b", "tfp"] calculated = params_log_ces_general(factors) assert calculated == expected diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 8041fc98..5540f9c6 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -5,20 +5,16 @@ """ -from pathlib import Path - import numpy as np import pandas as pd import pytest -import yaml from pandas.testing import assert_frame_equal, assert_index_equal +from skillmodels.model_spec import ModelSpec from skillmodels.process_model import process_model +from skillmodels.test_data.model2 import MODEL2 from skillmodels.utilities import ( - _get_params_index_from_model_dict, - _remove_from_dict, - _remove_from_list, - _shorten_if_necessary, + _get_params_index, extract_factors, reduce_n_periods, remove_controls, @@ -29,28 +25,25 @@ update_parameter_values, ) -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() - @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + return MODEL2 @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) -def test_extract_factors_single(model2, factors): +def test_extract_factors_single(model2, factors) -> None: reduced = extract_factors(factors, model2) - assert list(reduced["factors"]) == ["fac2"] - assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] - assert "anchoring" not in reduced - assert model2["anchoring"]["outcomes"] == {"fac1": "Q1"} + assert isinstance(reduced, ModelSpec) + assert list(reduced.factors) == ["fac2"] + assert list(model2.factors) == ["fac1", "fac2", "fac3"] + assert reduced.anchoring is None + assert model2.anchoring is not None + assert dict(model2.anchoring.outcomes) == {"fac1": "Q1"} process_model(reduced) -def test_update_parameter_values(): +def test_update_parameter_values() -> None: params = pd.DataFrame() params["value"] = np.arange(5, dtype=np.int64) @@ -67,71 +60,78 @@ def test_update_parameter_values(): @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) -def test_remove_factors(model2, factors): +def test_remove_factors(model2, factors) -> None: reduced = remove_factors(factors, model2) - assert list(reduced["factors"]) == ["fac1", "fac3"] - assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] - assert "anchoring" in reduced + assert isinstance(reduced, ModelSpec) + assert list(reduced.factors) == ["fac1", "fac3"] + assert list(model2.factors) == ["fac1", "fac2", "fac3"] + assert reduced.anchoring is not None process_model(reduced) @pytest.mark.parametrize("measurements", ["y5", ["y5"]]) -def test_remove_measurements(model2, measurements): +def test_remove_measurements(model2, measurements) -> None: reduced = remove_measurements(measurements, model2) - assert reduced["factors"]["fac2"]["measurements"] == [["y4", "y6"]] * 8 - assert "y5" in model2["factors"]["fac2"]["measurements"][0] + assert isinstance(reduced, ModelSpec) + for period_meas in reduced.factors["fac2"].measurements: + assert list(period_meas) == ["y4", "y6"] + assert "y5" in model2.factors["fac2"].measurements[0] process_model(reduced) @pytest.mark.parametrize("controls", ["x1", ["x1"]]) -def test_remove_controls(model2, controls): +def test_remove_controls(model2, controls) -> None: reduced = remove_controls(controls, model2) - assert "controls" not in reduced - assert "controls" in model2 + assert isinstance(reduced, ModelSpec) + assert reduced.controls == () + assert model2.controls == ("x1",) process_model(reduced) -def test_reduce_n_periods(model2): +def test_reduce_n_periods(model2) -> None: reduced = reduce_n_periods(model2, 1) - assert reduced["factors"]["fac1"]["measurements"] == [["y1", "y2", "y3"]] - assert reduced["factors"]["fac2"]["normalizations"]["loadings"] == [{"y4": 1}] + assert isinstance(reduced, ModelSpec) + assert list(reduced.factors["fac1"].measurements[0]) == ["y1", "y2", "y3"] + assert len(reduced.factors["fac1"].measurements) == 1 + norms = reduced.factors["fac2"].normalizations + assert norms is not None + assert dict(norms.loadings[0]) == {"y4": 1} + assert len(norms.loadings) == 1 process_model(reduced) -def test_switch_linear_to_translog(model2): +def test_switch_linear_to_translog(model2) -> None: switched = switch_linear_to_translog(model2) - assert switched["factors"]["fac2"]["transition_function"] == "translog" + assert isinstance(switched, ModelSpec) + assert switched.factors["fac2"].transition_function == "translog" -def test_switch_linear_and_translog_back_and_forth(model2): +def test_switch_linear_and_translog_back_and_forth(model2) -> None: with_translog = switch_linear_to_translog(model2) + assert isinstance(with_translog, ModelSpec) with_linear = switch_translog_to_linear(with_translog) - assert model2 == with_linear - - -@pytest.mark.parametrize("to_remove", ["a", ["a"]]) -def test_remove_from_list(to_remove): - list_ = ["a", "b", "c"] - calculated = _remove_from_list(list_, to_remove) - assert calculated == ["b", "c"] - assert list_ == ["a", "b", "c"] - - -@pytest.mark.parametrize("to_remove", ["a", ["a"]]) -def test_remove_from_dict(to_remove): - dict_ = {"a": 1, "b": 2, "c": 3} - calculated = _remove_from_dict(dict_, to_remove) - assert calculated == {"b": 2, "c": 3} - assert dict_ == {"a": 1, "b": 2, "c": 3} - - -def test_reduce_params_via_extract_factors(model2): - model_dict = reduce_n_periods(model2, 2) - - full_index = _get_params_index_from_model_dict(model_dict) + assert isinstance(with_linear, ModelSpec) + # Check equivalence of factors + for name in model2.factors: + orig = model2.factors[name] + back = with_linear.factors[name] + assert orig.measurements == back.measurements + assert orig.normalizations == back.normalizations + assert orig.transition_function == back.transition_function + assert orig.is_endogenous == back.is_endogenous + assert orig.is_correction == back.is_correction + + +def test_reduce_params_via_extract_factors(model2) -> None: + model = reduce_n_periods(model2, 2) + assert isinstance(model, ModelSpec) + + full_index = _get_params_index(model) params = pd.DataFrame(columns=["value"], index=full_index) - _, reduced_params = extract_factors("fac3", model_dict, params) + result = extract_factors("fac3", model, params) + assert not isinstance(result, ModelSpec) + _, reduced_params = result expected_index = pd.MultiIndex.from_tuples( [ @@ -158,12 +158,15 @@ def test_reduce_params_via_extract_factors(model2): assert_index_equal(reduced_params.index, expected_index) -def test_extend_params_via_switch_to_translog(model2): - model_dict = reduce_n_periods(model2, 2) - normal_index = _get_params_index_from_model_dict(model_dict) +def test_extend_params_via_switch_to_translog(model2) -> None: + model = reduce_n_periods(model2, 2) + assert isinstance(model, ModelSpec) + normal_index = _get_params_index(model) params = pd.DataFrame(columns=["value"], index=normal_index) - _, extended_params = switch_linear_to_translog(model_dict, params) + result = switch_linear_to_translog(model, params) + assert not isinstance(result, ModelSpec) + _, extended_params = result added_index = extended_params.index.difference(normal_index) @@ -182,12 +185,3 @@ def test_extend_params_via_switch_to_translog(model2): assert_index_equal(added_index, expected_added_index) assert extended_params.loc[added_index, "value"].unique()[0] == 0.05 - - -def test_shorten_if_necessary(): - list_ = list(range(3)) - not_necessary = _shorten_if_necessary(list_, 5) - assert not_necessary == list_ - - necessary = _shorten_if_necessary(list_, 2) - assert necessary == [0, 1] diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index 1895301d..74574fab 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -1,10 +1,13 @@ from pathlib import Path import pandas as pd -import yaml +from skillmodels.config import TEST_DATA_DIR +from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.process_model import process_model from skillmodels.simulate_data import simulate_dataset +from skillmodels.test_data.model2 import MODEL2 from skillmodels.visualize_factor_distributions import ( bivariate_density_contours, bivariate_density_surfaces, @@ -12,37 +15,35 @@ univariate_densities, ) -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" -def test_visualize_factor_distributions_runs_with_filtered_states(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_visualize_factor_distributions_runs_with_filtered_states() -> None: + model = MODEL2 - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") - data.set_index(["caseid", "period"], inplace=True) + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + data = data.set_index(["caseid", "period"]) - max_inputs = get_maximization_inputs(model_dict, data) + max_inputs = get_maximization_inputs(model, data) params = params.loc[max_inputs["params_template"].index] kde = univariate_densities( data=data, - model_dict=model_dict, + model_spec=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, - model_dict=model_dict, + model_spec=model, params=params, period=1, ) surfaces = bivariate_density_surfaces( data=data, - model_dict=model_dict, + model_spec=model, params=params, period=1, ) @@ -53,34 +54,136 @@ def test_visualize_factor_distributions_runs_with_filtered_states(): ) -def test_visualize_factor_distributions_runs_with_simulated_states(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_visualize_factor_distributions_runs_with_simulated_states() -> None: + model = MODEL2 - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") - data.set_index(["caseid", "period"], inplace=True) + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + data = data.set_index(["caseid", "period"]) - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) - max_inputs = get_maximization_inputs(model_dict, data) + max_inputs = get_maximization_inputs(model, data) params = params.loc[max_inputs["params_template"].index] - latent_data = simulate_dataset(model_dict, params, data=data, policies=None)[ + latent_data = simulate_dataset(model, params, data=data, policies=None)[ "aug_unanchored_states" ]["states"] kde = univariate_densities( data=data, states=latent_data, - model_dict=model_dict, + model_spec=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, states=latent_data, - model_dict=model_dict, + model_spec=model, + params=params, + period=1, + ) + combine_distribution_plots( + kde_plots=kde, + contour_plots=contours, + surface_plots=None, + ) + + +def test_visualize_factor_distributions_with_period_indexed_states() -> None: + """Test visualization with states indexed by (id, period) without aug_period. + + This mimics the scenario where states come from a downstream task that has + already mapped aug_period to period and dropped the aug_period column. + """ + model = MODEL2 + + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + data = data.set_index(["caseid", "period"]) + + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") + params = params.set_index(["category", "period", "name1", "name2"]) + + max_inputs = get_maximization_inputs(model, data) + params = params.loc[max_inputs["params_template"].index] + + # Get filtered states and convert to (id, period) index without aug_period + filtered_states = get_filtered_states(model_spec=model, data=data, params=params)[ + "anchored_states" + ]["states"] + processed = process_model(model) + + # Add period column and drop aug_period + # (mimics task_filtered_states_and_measurements) + filtered_states["period"] = filtered_states["aug_period"].map( + processed.labels.aug_periods_to_periods + ) + filtered_states = filtered_states.drop(columns=["aug_period"]).set_index( + ["id", "period"] + ) + + kde = univariate_densities( + data=data, + states=filtered_states, + model_spec=model, + params=params, + period=1, + ) + contours = bivariate_density_contours( + data=data, + states=filtered_states, + model_spec=model, + params=params, + period=1, + ) + combine_distribution_plots( + kde_plots=kde, + contour_plots=contours, + surface_plots=None, + ) + + +def test_visualize_factor_distributions_with_both_aug_period_and_period() -> None: + """Test visualization with states having both aug_period and period. + + This mimics the scenario where states have aug_period as a column and period + in the index (or both as columns). + """ + model = MODEL2 + + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + data = data.set_index(["caseid", "period"]) + + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") + params = params.set_index(["category", "period", "name1", "name2"]) + + max_inputs = get_maximization_inputs(model, data) + params = params.loc[max_inputs["params_template"].index] + + # Get filtered states and add period while keeping aug_period + filtered_states = get_filtered_states(model_spec=model, data=data, params=params)[ + "anchored_states" + ]["states"] + processed = process_model(model) + + # Add period column but keep aug_period (both are present) + filtered_states["period"] = filtered_states["aug_period"].map( + processed.labels.aug_periods_to_periods + ) + filtered_states = filtered_states.set_index(["id", "period"]) + + kde = univariate_densities( + data=data, + states=filtered_states, + model_spec=model, + params=params, + period=1, + ) + contours = bivariate_density_contours( + data=data, + states=filtered_states, + model_spec=model, params=params, period=1, ) diff --git a/tests/test_visualize_transition_equations.py b/tests/test_visualize_transition_equations.py index cb811631..3ce950fc 100644 --- a/tests/test_visualize_transition_equations.py +++ b/tests/test_visualize_transition_equations.py @@ -1,36 +1,34 @@ from pathlib import Path import pandas as pd -import yaml +from skillmodels.config import TEST_DATA_DIR from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.test_data.model2 import MODEL2 from skillmodels.visualize_transition_equations import ( combine_transition_plots, get_transition_plots, ) -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" -def test_visualize_transition_equations_runs(): - with open(TEST_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_visualize_transition_equations_runs() -> None: + model = MODEL2.with_added_observed_factors("ob1") - model_dict["observed_factors"] = ["ob1"] - - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") - data.set_index(["caseid", "period"], inplace=True) + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + data = data.set_index(["caseid", "period"]) data["ob1"] = 0 - max_inputs = get_maximization_inputs(model_dict, data) + max_inputs = get_maximization_inputs(model, data) full_index = max_inputs["params_template"].index params = params.reindex(full_index) params["value"] = params["value"].fillna(0) subplots = get_transition_plots( - model_dict=model_dict, + model_spec=model, params=params, period=0, quantiles_of_other_factors=[0.1, 0.25, 0.5, 0.75, 0.9], @@ -38,7 +36,7 @@ def test_visualize_transition_equations_runs(): ) combine_transition_plots(subplots) subplots = get_transition_plots( - model_dict=model_dict, + model_spec=model, params=params, period=0, quantiles_of_other_factors=None,