From 71e9d59bea06d7f067dc4d0c488ce2dc13db371d Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Thu, 8 Jan 2026 18:53:59 +0100 Subject: [PATCH 01/27] Refactor model configuration to use frozen dataclasses instead of dicts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce strongly-typed dataclasses for model configuration: - Dimensions, Labels, Anchoring, EstimationOptions, TransitionInfo - FactorEndogenousInfo, EndogenousFactorsInfo This improves type safety and enables IDE autocompletion while keeping user-facing model_dict as a plain dictionary. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- src/skillmodels/check_model.py | 68 ++-- src/skillmodels/constraints.py | 59 +-- src/skillmodels/correlation_heatmap.py | 18 +- src/skillmodels/filtered_states.py | 12 +- src/skillmodels/likelihood_function.py | 8 +- src/skillmodels/likelihood_function_debug.py | 8 +- src/skillmodels/maximization_inputs.py | 20 +- src/skillmodels/params_index.py | 65 ++-- src/skillmodels/parse_params.py | 40 +- src/skillmodels/process_data.py | 47 +-- src/skillmodels/process_debug_data.py | 2 +- src/skillmodels/process_model.py | 345 ++++++++++-------- src/skillmodels/simulate_data.py | 56 ++- src/skillmodels/transition_functions.py | 6 +- src/skillmodels/types.py | 202 ++++++++++ src/skillmodels/utilities.py | 2 +- .../visualize_factor_distributions.py | 10 +- .../visualize_transition_equations.py | 46 ++- tests/test_constraints.py | 93 +++-- tests/test_correlation_heatmap.py | 15 +- tests/test_params_index.py | 15 +- tests/test_parse_params.py | 11 +- tests/test_process_data.py | 31 +- tests/test_process_model.py | 122 +++---- 24 files changed, 830 insertions(+), 471 deletions(-) create mode 100644 src/skillmodels/types.py diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index 41dc1e4c..5919ef02 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -1,7 +1,15 @@ import numpy as np +from skillmodels.types import Anchoring, Dimensions, Labels -def check_model(model_dict, labels, dimensions, anchoring, has_endogenous_factors): + +def check_model( + model_dict: dict, + labels: Labels, + dimensions: Dimensions, + anchoring: Anchoring, + has_endogenous_factors: bool, +) -> None: """Check consistency and validity of the model specification. labels, dimensions and anchoring information are done before the model checking @@ -12,27 +20,24 @@ def check_model(model_dict, labels, dimensions, anchoring, has_endogenous_factor that the assumptions we make during the processing are fulfilled. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring (dict): Dictionary with information about anchoring. - See :ref:`anchoring` - has_endogenous_factors (bool): Whether the model has any endogenous factors + model_dict: The model specification. See: :ref:`model_specs` + dimensions: Dimensional information. + labels: Labels for model quantities. + anchoring: Information about anchoring. + has_endogenous_factors: Whether the model has any endogenous factors Raises: ValueError """ report = check_stagemap( - stagemap=labels["aug_stagemap"], - stages=labels["aug_stages"], - n_periods=dimensions["n_aug_periods"], + stagemap=labels.aug_stagemap, + stages=labels.aug_stages, + n_periods=dimensions.n_aug_periods, is_augmented=has_endogenous_factors, ) report += _check_anchoring(anchoring) - invalid_measurements = _check_measurements(model_dict, labels["latent_factors"]) + invalid_measurements = _check_measurements(model_dict, labels.latent_factors) if invalid_measurements: report += invalid_measurements elif has_endogenous_factors: @@ -40,7 +45,7 @@ def check_model(model_dict, labels, dimensions, anchoring, has_endogenous_factor report += _check_no_overlap_in_measurements_of_states_and_inv( model_dict, labels ) - report += _check_normalizations(model_dict, labels["latent_factors"]) + report += _check_normalizations(model_dict, labels.latent_factors) report = "\n".join(report) if report != "": @@ -55,7 +60,8 @@ def check_stagemap(stagemap, stages, n_periods, is_augmented): f"The stagemap needs to be of length n_periods - {step_size}. " f" n_periods is {n_periods}, the stagemap has length {len(stagemap)}.", ) - if stages != list(range(len(stages))): + # Convert to list for comparison (stages may be a tuple from dataclass) + if list(stages) != list(range(len(stages))): report.append("Stages need to be integers, start at zero and increase by 1.") # Hijacking the stagemap for endogenous factors leads to interleaved elements. @@ -68,24 +74,24 @@ def check_stagemap(stagemap, stages, n_periods, is_augmented): return report -def _check_anchoring(anchoring): +def _check_anchoring(anchoring: Anchoring) -> list[str]: report = [] - if not isinstance(anchoring["anchoring"], bool): - report.append("anchoring['anchoring'] must be a bool.") - if not isinstance(anchoring["outcomes"], dict): - report.append("anchoring['outcomes'] must be a dict") + if not isinstance(anchoring.anchoring, bool): + report.append("anchoring.anchoring must be a bool.") + if not isinstance(anchoring.outcomes, dict): + report.append("anchoring.outcomes must be a dict") else: - variables = list(anchoring["outcomes"].values()) + variables = list(anchoring.outcomes.values()) for var in variables: if not isinstance(var, str | int | tuple): report.append("Outcomes variables have to be valid variable names.") - if not isinstance(anchoring["free_controls"], bool): - report.append("anchoring['use_controls'] must be a bool") - if not isinstance(anchoring["free_constant"], bool): - report.append("anchoring['use_constant'] must be a bool.") - if not isinstance(anchoring["free_loadings"], bool): - report.append("anchoring['free_loadings'] must be a bool.") + if not isinstance(anchoring.free_controls, bool): + report.append("anchoring.free_controls must be a bool") + if not isinstance(anchoring.free_constant, bool): + report.append("anchoring.free_constant must be a bool.") + if not isinstance(anchoring.free_loadings, bool): + report.append("anchoring.free_loadings must be a bool.") return report @@ -108,11 +114,13 @@ def _check_measurements(model_dict, factors): return report -def _check_no_overlap_in_measurements_of_states_and_inv(model_dict, labels): +def _check_no_overlap_in_measurements_of_states_and_inv( + model_dict: dict, labels: Labels +) -> list[str]: report = [] - for period in labels["periods"]: + for period in labels.periods: meas = {} - for factor in labels["latent_factors"]: + for factor in labels.latent_factors: props = model_dict["factors"][factor] if props.get("is_endogenous", False): meas["endogenous_factors"] = set(props["measurements"][period]) diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index 2fb1c704..78360ad2 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -49,26 +49,26 @@ def get_constraints_dicts( constraints_dicts = [] constraints_dicts += _get_normalization_constraints( - normalizations, labels["latent_factors"] + normalizations, labels.latent_factors ) - constraints_dicts += _get_mixture_weights_constraints(dimensions["n_mixtures"]) + constraints_dicts += _get_mixture_weights_constraints(dimensions.n_mixtures) constraints_dicts += _get_stage_constraints( - stagemap=labels["aug_stagemap"], - stages=labels["aug_stages"], + stagemap=labels.aug_stagemap, + stages=labels.aug_stages, ) constraints_dicts += _get_constant_factors_constraints(labels=labels) constraints_dicts += _get_initial_states_constraints( - n_mixtures=dimensions["n_mixtures"], - factors=labels["latent_factors"], + n_mixtures=dimensions.n_mixtures, + factors=labels.latent_factors, ) constraints_dicts += _get_transition_constraints(labels=labels) constraints_dicts += _get_anchoring_constraints( update_info=update_info, - controls=labels["controls"], + controls=labels.controls, anchoring_info=anchoring_info, - periods=labels["aug_periods"], + periods=labels.aug_periods, ) - if endogenous_factors_info["has_endogenous_factors"]: + if endogenous_factors_info.has_endogenous_factors: constraints_dicts += _get_constraints_for_augmented_periods( labels=labels, endogenous_factors_info=endogenous_factors_info, @@ -244,10 +244,10 @@ def _get_constant_factors_constraints(labels) -> list[dict]: """ constraints_dicts = [] - for f, factor in enumerate(labels["latent_factors"]): - if labels["transition_names"][f] == "constant": + for f, factor in enumerate(labels.latent_factors): + if labels.transition_names[f] == "constant": msg = f"This constraint was generated because {factor} is constant." - for aug_period in labels["aug_periods"][:-1]: + for aug_period in labels.aug_periods[:-1]: constraints_dicts.append( { "loc": ("shock_sds", aug_period, factor, "-"), @@ -302,14 +302,14 @@ def _get_transition_constraints(labels) -> list[dict]: """ constraints_dicts = [] - for f, factor in enumerate(labels["latent_factors"]): - tname = labels["transition_names"][f] + for f, factor in enumerate(labels.latent_factors): + tname = labels.transition_names[f] msg = f"This constraint is inherent to the {tname} production function." - for aug_period in labels["aug_periods"][:-1]: + for aug_period in labels.aug_periods[:-1]: funcname = f"constraints_{tname}" if func := getattr(t_f_module, funcname, False): c = func( # ty: ignore[call-non-callable] - factor=factor, factors=labels["all_factors"], aug_period=aug_period + factor=factor, factors=labels.all_factors, aug_period=aug_period ) if "description" not in c: c["description"] = msg @@ -336,7 +336,7 @@ def _get_anchoring_constraints( anchoring_updates = update_info[update_info["purpose"] == "anchoring"].index constraints_dicts = [] - if not anchoring_info["free_constant"]: + if not anchoring_info.free_constant: msg = ( "This constraint was generated because free_constant in the anchoring " "section of the model specification is set to False." @@ -348,7 +348,7 @@ def _get_anchoring_constraints( {"loc": locs, "type": "fixed", "value": 0, "description": msg}, ) - if not anchoring_info["free_controls"]: + if not anchoring_info.free_controls: msg = ( "This constraint was generated because free_controls in the anchoring " "section of the model specification is set to False." @@ -361,15 +361,15 @@ def _get_anchoring_constraints( {"loc": ind_tups, "type": "fixed", "value": 0, "description": msg}, ) - if not anchoring_info["free_loadings"]: + if not anchoring_info.free_loadings: msg = ( "This constraint was generated because free_loadings in the anchoring " "section of the model specification is set to False." ) ind_tups = [] for period in periods: - for factor in anchoring_info["factors"]: - outcome = anchoring_info["outcomes"][factor] + for factor in anchoring_info.factors: + outcome = anchoring_info.outcomes[factor] meas = f"{outcome}_{factor}" ind_tups.append(("loadings", period, meas, factor)) @@ -402,22 +402,23 @@ def _get_constraints_for_augmented_periods( """ constraints_dicts = [] - for f, factor in enumerate(labels["latent_factors"]): - tname = labels["transition_names"][f] + for f, factor in enumerate(labels.latent_factors): + tname = labels.transition_names[f] if tname == "constant": continue # We are restricting transitions and shocks, not measurements. So this might # look counterintuitive... aug_period_meas_type_to_constrain = ( "states" - if endogenous_factors_info[factor]["is_state"] + if endogenous_factors_info.factor_info[factor].is_state else "endogenous_factors" ) + aug_period_meas_types = ( + endogenous_factors_info.aug_periods_to_aug_period_meas_types + ) aug_periods_to_constrain = [ k - for k, v in endogenous_factors_info[ - "aug_periods_to_aug_period_meas_types" - ].items() + for k, v in aug_period_meas_types.items() if v == aug_period_meas_type_to_constrain ] for aug_period in aug_periods_to_constrain: @@ -425,14 +426,14 @@ def _get_constraints_for_augmented_periods( constraints_dicts += func( # ty: ignore[call-non-callable] factor=factor, aug_period=aug_period, - all_factors=labels["all_factors"], + all_factors=labels.all_factors, ) for aug_period in aug_periods_to_constrain[:-1]: constraints_dicts.append( { "loc": ("shock_sds", aug_period, factor, "-"), "type": "fixed", - "value": endogenous_factors_info["bounds_distance"], + "value": endogenous_factors_info.bounds_distance, "description": "Identity constraint.", } ) diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 9ea5d12c..b0abc958 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -279,7 +279,7 @@ def _get_update_info_for_periods(model): # Replace period level with user-provided period using set_codes period_values = update_info.index.get_level_values("aug_period").map( - model["labels"]["aug_periods_to_periods"] + model["labels"].aug_periods_to_periods ) update_info.index = update_info.index.set_codes(period_values, level="aug_period") update_info.index = update_info.index.set_names(["period", "variable"]) @@ -629,7 +629,7 @@ def _get_factor_scores_data_for_single_period( df (pd.DataFrame): Processed DataFrame to calculate correlations over. """ - aug_periods = model["endogenous_factors_info"]["aug_periods_from_period"](period) + aug_periods = model["endogenous_factors_info"].aug_periods_from_period(period) df = pd.concat( [ _get_factor_scores_data_for_single_model_period( @@ -757,22 +757,22 @@ def _get_factor_scores_data_for_multiple_periods( def _process_factors(model, factors): """Process factors to get a tuple of lists.""" if not factors: - latent_factors = model["labels"]["latent_factors"] - observed_factors = model["labels"]["observed_factors"] + latent_factors = list(model["labels"].latent_factors) + observed_factors = list(model["labels"].observed_factors) elif isinstance(factors, str): - if factors in model["labels"]["latent_factors"]: + if factors in model["labels"].latent_factors: latent_factors = [factors] observed_factors = [] - elif factors in model["labels"]["observed_factors"]: + elif factors in model["labels"].observed_factors: observed_factors = [factors] latent_factors = [] else: observed_factors = [] latent_factors = [] for factor in factors: - if factor in model["labels"]["latent_factors"]: + if factor in model["labels"].latent_factors: latent_factors.append(factor) - elif factor in model["labels"]["observed_factors"]: + elif factor in model["labels"].observed_factors: observed_factors.append(factor) return latent_factors, observed_factors # ty: ignore[possibly-unresolved-reference] @@ -780,7 +780,7 @@ def _process_factors(model, factors): def _process_periods(periods, model): """Process periods to get a list.""" if periods is None: - periods = list(range(model["dimensions"]["n_periods"])) + periods = list(range(model["dimensions"].n_periods)) elif isinstance(periods, int | float): periods = [periods] return periods diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index 2efb4c6f..a1c222a9 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -26,7 +26,7 @@ def get_filtered_states(model_dict, data, params): anchored_ranges = create_state_ranges( filtered_states=anchored_states_df, - factors=model["labels"]["latent_factors"], + factors=model["labels"].latent_factors, ) out = { @@ -72,9 +72,7 @@ def anchor_states_df(states_df, model_dict, params, use_aug_period): update_info=model["update_info"], labels=model["labels"], anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, ) *_, pardict = parse_params( @@ -85,13 +83,13 @@ def anchor_states_df(states_df, model_dict, params, use_aug_period): n_obs=1, ) - n_latent = model["dimensions"]["n_latent_factors"] + n_latent = model["dimensions"].n_latent_factors _scaling_factors = np.array(pardict["anchoring_scaling_factors"][:, :n_latent]) _constants = np.array(pardict["anchoring_constants"][:, :n_latent]) if use_aug_period: period_arr = states_df["aug_period"].to_numpy() - ap_to_p = model["labels"]["aug_periods_to_periods"] + ap_to_p = model["labels"].aug_periods_to_periods scaling_factors = np.empty(shape=(len(ap_to_p), n_latent)) constants = np.empty(shape=(len(ap_to_p), n_latent)) for ap, p in ap_to_p.items(): @@ -106,7 +104,7 @@ def anchor_states_df(states_df, model_dict, params, use_aug_period): constants_arr = constants[period_arr] out = states_df.copy(deep=True) - for pos, factor in enumerate(model["labels"]["latent_factors"]): + for pos, factor in enumerate(model["labels"].latent_factors): out[factor] = constants_arr[:, pos] + states_df[factor] * scaling_arr[:, pos] out = out[states_df.columns] diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index 4e7eec21..04870520 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -139,10 +139,10 @@ def log_likelihood_obs( # possible. return soft_clipping( arr=static_out["loglikes"], - lower=estimation_options["clipping_lower_bound"], - upper=estimation_options["clipping_upper_bound"], - lower_hardness=estimation_options["clipping_lower_hardness"], - upper_hardness=estimation_options["clipping_upper_hardness"], + lower=estimation_options.clipping_lower_bound, + upper=estimation_options.clipping_upper_bound, + lower_hardness=estimation_options.clipping_lower_hardness, + upper_hardness=estimation_options.clipping_upper_hardness, ).sum(axis=0) diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index 6624050a..ab902598 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -106,10 +106,10 @@ def log_likelihood( # possible. clipped = soft_clipping( arr=static_out["loglikes"], - lower=estimation_options["clipping_lower_bound"], - upper=estimation_options["clipping_upper_bound"], - lower_hardness=estimation_options["clipping_lower_hardness"], - upper_hardness=estimation_options["clipping_upper_hardness"], + lower=estimation_options.clipping_lower_bound, + upper=estimation_options.clipping_upper_bound, + lower_hardness=estimation_options.clipping_lower_hardness, + upper_hardness=estimation_options.clipping_upper_hardness, ) value = clipped.sum() diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 333804bd..1f75dafb 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -70,15 +70,11 @@ def get_maximization_inputs(model_dict, data, split_dataset=1): update_info=model["update_info"], labels=model["labels"], anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, ) processed_data = process_data( df=data, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, labels=model["labels"], update_info=model["update_info"], anchoring_info=model["anchoring"], @@ -86,8 +82,8 @@ def get_maximization_inputs(model_dict, data, split_dataset=1): ) sigma_scaling_factor, sigma_weights = calculate_sigma_scaling_factor_and_weights( - model["dimensions"]["n_latent_factors"], - model["estimation_options"]["sigma_points_scale"], + model["dimensions"].n_latent_factors, + model["estimation_options"].sigma_points_scale, ) partialed_get_jnp_params_vec = functools.partial( @@ -171,7 +167,7 @@ def debug_loglike(params): params_template = pd.DataFrame(columns=["value"], index=p_index) params_template = add_bounds( params=params_template, - bounds_distance=model["estimation_options"]["bounds_distance"], + bounds_distance=model["estimation_options"].bounds_distance, ) params_template = enforce_fixed_constraints( params_template=params_template, @@ -215,9 +211,9 @@ def _partial_some_log_likelihood( # are endogenous factors, the last aug_period is found at index -2 (there should not # be measurements for endogenous factors in the "second half" of the last period). last_aug_period = ( - model["labels"]["aug_periods"][-2] + model["labels"].aug_periods[-2] if parsing_info["has_endogenous_factors"] - else model["labels"]["aug_periods"][-1] + else model["labels"].aug_periods[-1] ) iteration_to_period = _aug_periods.replace(last_aug_period, -1).to_numpy() assert max(iteration_to_period) == last_aug_period - 1 @@ -227,7 +223,7 @@ def _partial_some_log_likelihood( parsing_info=parsing_info, measurements=measurements, controls=controls, - transition_func=model["transition_info"]["func"], + transition_func=model["transition_info"].func, sigma_scaling_factor=sigma_scaling_factor, sigma_weights=sigma_weights, dimensions=model["dimensions"], diff --git a/src/skillmodels/params_index.py b/src/skillmodels/params_index.py index 586744f3..57bc8ee2 100644 --- a/src/skillmodels/params_index.py +++ b/src/skillmodels/params_index.py @@ -1,8 +1,19 @@ import pandas as pd +from skillmodels.types import ( + Dimensions, + EndogenousFactorsInfo, + Labels, + TransitionInfo, +) + def get_params_index( - update_info, labels, dimensions, transition_info, endogenous_factors_info + update_info, + labels: Labels, + dimensions: Dimensions, + transition_info: TransitionInfo, + endogenous_factors_info: EndogenousFactorsInfo, ): """Generate index for the params_df for optimagic. @@ -12,44 +23,42 @@ def get_params_index( it contains an empty string. Args: - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - options (dict): Tuning parameters for the estimation. - See :ref:`estimation_options`. - transition_info (dict): Information about the transition equations. - endogenous_factors_info (dict): Information about endogenous factors, if any. + labels: Labels for model quantities. + dimensions: Dimensional information. + transition_info: Information about the transition equations. + endogenous_factors_info: Information about endogenous factors, if any. Returns: params_index (pd.MultiIndex) """ ind_tups = get_control_params_index_tuples( - controls=labels["controls"], update_info=update_info + controls=labels.controls, update_info=update_info ) ind_tups += get_loadings_index_tuples( - factors=labels["latent_factors"], update_info=update_info + factors=labels.latent_factors, update_info=update_info ) ind_tups += get_meas_sds_index_tuples(update_info=update_info) ind_tups += get_shock_sds_index_tuples( - aug_periods=labels["aug_periods"], - factors=labels["latent_factors"], - has_endogenous_factors=endogenous_factors_info["has_endogenous_factors"], + aug_periods=labels.aug_periods, + factors=labels.latent_factors, + has_endogenous_factors=endogenous_factors_info.has_endogenous_factors, ) ind_tups += initial_mean_index_tuples( - n_mixtures=dimensions["n_mixtures"], - factors=labels["latent_factors"], + n_mixtures=dimensions.n_mixtures, + factors=labels.latent_factors, ) - ind_tups += get_mixture_weights_index_tuples(n_mixtures=dimensions["n_mixtures"]) + ind_tups += get_mixture_weights_index_tuples(n_mixtures=dimensions.n_mixtures) ind_tups += get_initial_cholcovs_index_tuples( - n_mixtures=dimensions["n_mixtures"], - factors=labels["latent_factors"], + n_mixtures=dimensions.n_mixtures, + factors=labels.latent_factors, ) ind_tups += get_transition_index_tuples( transition_info=transition_info, - aug_periods=labels["aug_periods"], - has_endogenous_factors=endogenous_factors_info["has_endogenous_factors"], + aug_periods=labels.aug_periods, + has_endogenous_factors=endogenous_factors_info.has_endogenous_factors, ) index = pd.MultiIndex.from_tuples( @@ -88,7 +97,7 @@ def get_loadings_index_tuples(factors, update_info): ind_tups (list) """ - mask = update_info[factors].to_numpy() + mask = update_info[list(factors)].to_numpy() ind_tups = [] for i, (aug_period, meas) in enumerate(update_info.index): for f, factor in enumerate(factors): @@ -194,15 +203,15 @@ def get_initial_cholcovs_index_tuples(n_mixtures, factors): return ind_tups -def get_transition_index_tuples(transition_info, aug_periods, has_endogenous_factors): +def get_transition_index_tuples( + transition_info: TransitionInfo, aug_periods, has_endogenous_factors: bool +): """Index tuples for transition equation coefficients. Args: - latent_factors (list): The latent factors of the model - all_factors (list): The latent and observed factors of the model. - aug_periods (list): The augmented periods of the model - transition_names (list): name of the transition equation of each factor - has_endogenous_factors (bool): Whether the model has endogenous factors. + transition_info: Information about transition equations. + aug_periods: The augmented periods of the model. + has_endogenous_factors: Whether the model has endogenous factors. Returns: ind_tups (list) @@ -210,7 +219,7 @@ def get_transition_index_tuples(transition_info, aug_periods, has_endogenous_fac """ end = -2 if has_endogenous_factors else -1 ind_tups = [] - for factor, names in transition_info["param_names"].items(): + for factor, names in transition_info.param_names.items(): for aug_period in aug_periods[:end]: for name in names: ind_tups.append(("transition", aug_period, factor, name)) diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index 2515be2d..b763a6fd 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -42,7 +42,7 @@ def create_parsing_info( parsing_info[quantity] = _get_positional_selector_from_loc(range_sr, quantity) # loadings: - mask = update_info[labels["latent_factors"]].to_numpy() + mask = update_info[list(labels.latent_factors)].to_numpy() helper = np.arange(mask.size).reshape(mask.shape) flat_indices = helper[mask] @@ -55,7 +55,7 @@ def create_parsing_info( # "trans_coeffs" pos_dict = {} - for factor in labels["latent_factors"]: + for factor in list(labels.latent_factors): helper = pd.DataFrame(index=params_index) loc = helper.query(f"category == 'transition' & name1 == '{factor}'").index pos_dict[factor] = _get_positional_selector_from_loc(range_sr, loc) @@ -63,19 +63,19 @@ def create_parsing_info( parsing_info["transition"] = pos_dict # anchoring_scaling_factors - is_free_loading = update_info[labels["latent_factors"]].to_numpy() + is_free_loading = update_info[list(labels.latent_factors)].to_numpy() is_anchoring = (update_info["purpose"] == "anchoring").to_numpy().reshape(-1, 1) is_anchoring_loading = jnp.array(is_free_loading & is_anchoring) parsing_info["is_anchoring_loading"] = is_anchoring_loading parsing_info["is_anchored_factor"] = jnp.array( - update_info.query("purpose == 'anchoring'")[labels["latent_factors"]].any( + update_info.query("purpose == 'anchoring'")[list(labels.latent_factors)].any( axis=0, ), ) parsing_info["is_anchoring_update"] = is_anchoring.flatten() - parsing_info["ignore_constant_when_anchoring"] = anchoring[ - "ignore_constant_when_anchoring" - ] + parsing_info["ignore_constant_when_anchoring"] = ( + anchoring.ignore_constant_when_anchoring + ) # Add has_endogenous_factors to parsing_info parsing_info["has_endogenous_factors"] = has_endogenous_factors @@ -157,8 +157,8 @@ def _get_initial_states(params, info, dimensions, n_obs): """Create the array of initial states.""" state = params[info["initial_states"]].reshape( 1, - dimensions["n_mixtures"], - dimensions["n_latent_factors"], + dimensions.n_mixtures, + dimensions.n_latent_factors, ) return jnp.repeat(state, n_obs, axis=0) @@ -169,7 +169,7 @@ def _get_initial_upper_chols(params, info, dimensions, n_obs): Note: The matrices contain the transpose of the lower triangular cholesky factors. """ - n_states, n_mixtures = dimensions["n_latent_factors"], dimensions["n_mixtures"] + n_states, n_mixtures = dimensions.n_latent_factors, dimensions.n_mixtures chol_params = params[info["initial_cholcovs"]].reshape(n_mixtures, -1) upper_chols = jnp.zeros((n_obs, n_mixtures, n_states, n_states)) for i in range(n_mixtures): @@ -187,7 +187,7 @@ def _get_initial_log_mixture_weights(params, info, n_obs): def _get_control_params(params, info, dimensions): """Create the parameters for control variables in measurement equations.""" - return params[info["controls"]].reshape(-1, dimensions["n_controls"]) + return params[info["controls"]].reshape(-1, dimensions.n_controls) def _get_loadings(params, info): @@ -206,19 +206,19 @@ def _get_meas_sds(params, info): def _get_shock_sds(params, info, dimensions): """Create the array of standard deviations of the shocks in transition functions.""" - return params[info["shock_sds"]].reshape(-1, dimensions["n_latent_factors"]) + return params[info["shock_sds"]].reshape(-1, dimensions.n_latent_factors) def _get_transition_params(params, info, labels): """Create a list of arrays with transition equation parameters.""" trans_params = {} t_info = info["transition"] - n_aug_periods = len(labels["aug_periods"]) + n_aug_periods = len(labels.aug_periods) # Use has_endogenous_factors from parsing_info instead of undefined global len_reduction = 2 if info["has_endogenous_factors"] else 1 - for factor in labels["latent_factors"]: + for factor in list(labels.latent_factors): ilocs = t_info[factor] trans_params[factor] = params[ilocs].reshape(n_aug_periods - len_reduction, -1) return trans_params @@ -231,10 +231,10 @@ def _get_anchoring_scaling_factors(loadings, info, dimensions): """ scaling_factors = jnp.ones( - (dimensions["n_aug_periods"], dimensions["n_latent_factors"]), + (dimensions.n_aug_periods, dimensions.n_latent_factors), ) free_anchoring_loadings = loadings[info["is_anchoring_loading"]].reshape( - dimensions["n_aug_periods"], + dimensions.n_aug_periods, -1, ) scaling_factors = scaling_factors.at[:, info["is_anchored_factor"]].set( @@ -242,7 +242,7 @@ def _get_anchoring_scaling_factors(loadings, info, dimensions): ) scaling_for_observed = jnp.ones( - (dimensions["n_aug_periods"], dimensions["n_observed_factors"]), + (dimensions.n_aug_periods, dimensions.n_observed_factors), ) scaling_factors = jnp.hstack([scaling_factors, scaling_for_observed]) @@ -256,16 +256,16 @@ def _get_anchoring_constants(controls, info, dimensions): Note: Parameters are not taken from the parameter vector but from the controls. """ - constants = jnp.zeros((dimensions["n_aug_periods"], dimensions["n_latent_factors"])) + constants = jnp.zeros((dimensions.n_aug_periods, dimensions.n_latent_factors)) if not info["ignore_constant_when_anchoring"]: values = controls[:, 0][info["is_anchoring_update"]].reshape( - dimensions["n_aug_periods"], + dimensions.n_aug_periods, -1, ) constants = constants.at[:, info["is_anchored_factor"]].set(values) constants_for_observed = jnp.zeros( - (dimensions["n_aug_periods"], dimensions["n_observed_factors"]), + (dimensions.n_aug_periods, dimensions.n_observed_factors), ) constants = jnp.hstack([constants, constants_for_observed]) diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index 41dc0b32..688479c6 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -1,10 +1,13 @@ import warnings -from typing import Any +from typing import TYPE_CHECKING import jax.numpy as jnp import numpy as np import pandas as pd +if TYPE_CHECKING: + from skillmodels.types import Labels + def process_data( df, @@ -39,7 +42,7 @@ def process_data( Only returned if estimation==True """ - df = pre_process_data(df, labels["periods"]) + df = pre_process_data(df, labels.periods) df["constant"] = 1 out = {} @@ -50,8 +53,8 @@ def process_data( df.index = df.index.set_names(["id", "aug_period"]) _check_data(df, update_info, labels, purpose=purpose) - n_obs = int(len(df) / len(labels["aug_periods"])) - df = _handle_controls_with_missings(df, labels["controls"], update_info) + n_obs = int(len(df) / len(labels.aug_periods)) + df = _handle_controls_with_missings(df, labels.controls, update_info) out["controls"] = _generate_controls_array(df, labels, n_obs) out["observed_factors"] = _generate_observed_factor_array(df, labels, n_obs) @@ -96,12 +99,12 @@ def _get_period_data_for_endogenous_factors( aug_period: int, period: int, df: pd.DataFrame, - labels: dict[str, Any], + labels: "Labels", update_info: pd.DataFrame, ) -> pd.DataFrame: meas = _get_period_measurements(update_info, aug_period) - controls = labels["controls"] - observed = labels["observed_factors"] + controls = labels.controls + observed = labels.observed_factors out = df.query(f"period == {period}")[ [ @@ -120,7 +123,7 @@ def _get_period_data_for_endogenous_factors( def _augment_data_for_endogenous_factors( df: pd.DataFrame, - labels: dict[str, Any], + labels: "Labels", update_info: pd.DataFrame, ): """Make room for endogenous factors by doubling up the periods. @@ -135,7 +138,7 @@ def _augment_data_for_endogenous_factors( n_ids = df["id"].nunique() n_periods = df["period"].nunique() assert n_ids * n_periods == df.shape[0] - assert set(df["period"]) == set(labels["aug_periods_to_periods"].values()) + assert set(df["period"]) == set(labels.aug_periods_to_periods.values()) out = pd.concat( [ @@ -146,7 +149,7 @@ def _augment_data_for_endogenous_factors( update_info=update_info, labels=labels, ) - for aug_period, period in labels["aug_periods_to_periods"].items() + for aug_period, period in labels.aug_periods_to_periods.items() ] ) return out.set_index(["id", "aug_period"]).sort_index() @@ -154,17 +157,17 @@ def _augment_data_for_endogenous_factors( def _add_copies_of_anchoring_outcome(df, anchoring_info): df = df.copy() - for factor in anchoring_info["factors"]: - outcome = anchoring_info["outcomes"][factor] + for factor in anchoring_info.factors: + outcome = anchoring_info.outcomes[factor] df[f"{outcome}_{factor}"] = df[outcome] return df def _check_data(df, update_info, labels, purpose): # noqa: C901 var_report = pd.DataFrame(index=update_info.index[:0], columns=["problem"]) - for aug_period in labels["aug_periods"]: + for aug_period in labels.aug_periods: period_data = df.query(f"aug_period == {aug_period}") - for cont in labels["controls"]: + for cont in labels.controls: if cont not in period_data.columns or period_data[cont].isna().all(): var_report.loc[(aug_period, cont), "problem"] = "Variable is missing" @@ -179,7 +182,7 @@ def _check_data(df, update_info, labels, purpose): # noqa: C901 "Variable has no variance" ) - for factor in labels["observed_factors"]: + for factor in labels.observed_factors: if factor not in period_data.columns: var_report.loc[(aug_period, factor), "problem"] = "Variable is missing" elif period_data[factor].isna().any(): @@ -198,7 +201,7 @@ def _handle_controls_with_missings(df, controls, update_info): problematic_index = df.index[:0] for aug_period in aug_periods: period_data = df.query(f"aug_period == {aug_period}") - control_data = period_data[controls] + control_data = period_data[list(controls)] meas_data = period_data[_get_period_measurements(update_info, aug_period)] problem = control_data.isna().any(axis=1) & meas_data.notna().any(axis=1) problematic_index = problematic_index.union(period_data[problem].index) @@ -228,18 +231,18 @@ def _generate_measurements_array(df, update_info, n_obs): def _generate_controls_array(df, labels, n_obs): - arr = np.zeros((len(labels["aug_periods"]), n_obs, len(labels["controls"]))) - for aug_period in labels["aug_periods"]: + arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.controls))) + for aug_period in labels.aug_periods: arr[aug_period] = df.query(f"aug_period == {aug_period}")[ - labels["controls"] + list(labels.controls) ].to_numpy() return jnp.array(arr, dtype="float32") def _generate_observed_factor_array(df, labels, n_obs): - arr = np.zeros((len(labels["aug_periods"]), n_obs, len(labels["observed_factors"]))) - for aug_period in labels["aug_periods"]: + arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.observed_factors))) + for aug_period in labels.aug_periods: arr[aug_period] = df.query(f"aug_period == {aug_period}")[ - labels["observed_factors"] + list(labels.observed_factors) ].to_numpy() return jnp.array(arr, dtype="float32") diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index 1d5e7dde..1a035bc6 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -52,7 +52,7 @@ def process_debug_data(debug_data, model): """ update_info = model["update_info"] - factors = model["labels"]["latent_factors"] + factors = model["labels"].latent_factors post_update_states = _create_post_update_states( debug_data["filtered_states"], diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index dd1771ac..1998b8fa 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -12,6 +12,15 @@ import skillmodels.transition_functions as t_f_module from skillmodels.check_model import check_model, check_stagemap from skillmodels.decorators import extract_params, jax_array_output +from skillmodels.types import ( + Anchoring, + Dimensions, + EndogenousFactorsInfo, + EstimationOptions, + FactorEndogenousInfo, + Labels, + TransitionInfo, +) pd.set_option("future.no_silent_downcasting", True) # noqa: FBT003 @@ -64,7 +73,26 @@ def process_model(model_dict): ) else: _model_dict_aug = model_dict - endogenous_factors_info = {"has_endogenous_factors": has_endogenous_factors} + endogenous_factors_info = EndogenousFactorsInfo( + has_endogenous_factors=has_endogenous_factors, + aug_periods_to_aug_period_meas_types=_get_aug_periods_to_aug_period_meas_types( + aug_periods=labels.aug_periods_to_periods.keys(), + has_endogenous_factors=has_endogenous_factors, + ), + bounds_distance=model_dict["estimation_options"].get( + "bounds_distance", 1e-3 + ), + aug_periods_from_period=partial( + _aug_periods_from_period, + aug_periods_to_periods=labels.aug_periods_to_periods, + ), + factor_info={ + fac: FactorEndogenousInfo( + is_state=True, is_endogenous=False, is_correction=False + ) + for fac in labels.latent_factors + }, + ) check_model( model_dict=_model_dict_aug, labels=labels, @@ -73,7 +101,21 @@ def process_model(model_dict): has_endogenous_factors=has_endogenous_factors, ) transition_info = _get_transition_info(_model_dict_aug, labels) - labels["transition_names"] = list(transition_info["function_names"].values()) + # Create new Labels with transition_names (frozen dataclass requires replacement) + labels = Labels( + latent_factors=labels.latent_factors, + observed_factors=labels.observed_factors, + controls=labels.controls, + periods=labels.periods, + stagemap=labels.stagemap, + stages=labels.stages, + aug_periods=labels.aug_periods, + aug_periods_to_periods=labels.aug_periods_to_periods, + aug_stagemap=labels.aug_stagemap, + aug_stages=labels.aug_stages, + aug_stages_to_stages=labels.aug_stages_to_stages, + transition_names=tuple(transition_info.function_names.values()), + ) processed = { "dimensions": dims, @@ -115,32 +157,29 @@ def get_has_endogenous_factors(factors: dict[str, Any]) -> bool: return endogenous_factors["is_endogenous"].any() # ty: ignore[invalid-return-type] -def get_dimensions(model_dict, has_endogenous_factors): +def get_dimensions(model_dict: dict, has_endogenous_factors: bool) -> Dimensions: """Extract the dimensions of the model. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - has_endogenous_factors (bool): Whether endogenous factors are present. + model_dict: The model specification. See: :ref:`model_specs` + has_endogenous_factors: Whether endogenous factors are present. Returns: - dict: Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. + Dimensions dataclass with all dimensional information. """ all_n_periods = [len(d["measurements"]) for d in model_dict["factors"].values()] n_periods = max(all_n_periods) n_aug_periods = 2 * n_periods if has_endogenous_factors else n_periods - dims = { - "n_latent_factors": len(model_dict["factors"]), - "n_observed_factors": len(model_dict.get("observed_factors", [])), - "n_controls": len(model_dict.get("controls", [])) + 1, # plus 1: constant - "n_mixtures": model_dict["estimation_options"].get("n_mixtures", 1), - "n_aug_periods": n_aug_periods, - "n_periods": n_periods, - } - dims["n_all_factors"] = dims["n_latent_factors"] + dims["n_observed_factors"] - return dims + return Dimensions( + n_latent_factors=len(model_dict["factors"]), + n_observed_factors=len(model_dict.get("observed_factors", [])), + n_controls=len(model_dict.get("controls", [])) + 1, # plus 1: constant + n_mixtures=model_dict["estimation_options"].get("n_mixtures", 1), + n_aug_periods=n_aug_periods, + n_periods=n_periods, + ) def _get_aug_periods_to_periods( @@ -162,39 +201,39 @@ def _aug_periods_from_period( return [ap for ap, p in aug_periods_to_periods.items() if p == period] -def _get_labels(model_dict, has_endogenous_factors, dimensions): +def _get_labels( + model_dict: dict, has_endogenous_factors: bool, dimensions: Dimensions +) -> Labels: """Extract labels of the model quantities. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - has_endogenous_factors (bool): Whether endogenous factors are present. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. + model_dict: The model specification. See: :ref:`model_specs` + has_endogenous_factors: Whether endogenous factors are present. + dimensions: Dimensional information. Returns: - dict: Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` + Labels dataclass with all label information. """ aug_periods_to_periods = _get_aug_periods_to_periods( - n_aug_periods=dimensions["n_aug_periods"], + n_aug_periods=dimensions.n_aug_periods, has_endogenous_factors=has_endogenous_factors, ) - stagemap = model_dict.get("stagemap", list(range(dimensions["n_periods"] - 1))) + stagemap = model_dict.get("stagemap", list(range(dimensions.n_periods - 1))) stages = sorted(int(v) for v in np.unique(stagemap)) report = check_stagemap( stagemap=stagemap, stages=stages, - n_periods=dimensions["n_periods"], + n_periods=dimensions.n_periods, is_augmented=False, ) if report: raise ValueError(f"Invalid stage map: {report}") if has_endogenous_factors: - aug_stagemap = [] - aug_stages_to_stages = {} + aug_stagemap: list[int] = [] + aug_stages_to_stages: dict[int, int] = {} relevant_aug_periods = sorted(aug_periods_to_periods.keys())[:-2] for aug_p in relevant_aug_periods: p = aug_periods_to_periods[aug_p] @@ -203,81 +242,91 @@ def _get_labels(model_dict, has_endogenous_factors, dimensions): aug_stagemap.append(aug_s) aug_stages_to_stages[aug_s] = s else: - aug_stagemap = stagemap + aug_stagemap = list(stagemap) aug_stages_to_stages = {s: s for s in stages} - labels = { - "latent_factors": list(model_dict["factors"]), - "observed_factors": list(model_dict.get("observed_factors", [])), - "controls": ["constant", *list(model_dict.get("controls", []))], - "periods": sorted(set(aug_periods_to_periods.values())), - "stagemap": stagemap, - "stages": stages, - "aug_periods": list(aug_periods_to_periods.keys()), - "aug_periods_to_periods": aug_periods_to_periods, - "aug_stagemap": aug_stagemap, - "aug_stages": sorted(int(v) for v in np.unique(aug_stagemap)), - "aug_stages_to_stages": aug_stages_to_stages, - } - - labels["all_factors"] = labels["latent_factors"] + labels["observed_factors"] # ty: ignore[unsupported-operator] - - return labels + return Labels( + latent_factors=tuple(model_dict["factors"]), + observed_factors=tuple(model_dict.get("observed_factors", [])), + controls=("constant", *model_dict.get("controls", [])), + periods=tuple(sorted(set(aug_periods_to_periods.values()))), + stagemap=tuple(stagemap), + stages=tuple(stages), + aug_periods=tuple(aug_periods_to_periods.keys()), + aug_periods_to_periods=aug_periods_to_periods, + aug_stagemap=tuple(aug_stagemap), + aug_stages=tuple(sorted(int(v) for v in np.unique(aug_stagemap))), + aug_stages_to_stages=aug_stages_to_stages, + ) -def _process_estimation_options(model_dict): +def _process_estimation_options(model_dict: dict) -> EstimationOptions: """Process options. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` + model_dict: The model specification. See: :ref:`model_specs` Returns: - dict: Tuning parameters for the estimation. See :ref:`options`. + EstimationOptions dataclass with tuning parameters for the estimation. """ - default_options = { - "sigma_points_scale": 2, - "robust_bounds": True, - "bounds_distance": 1e-3, - "clipping_lower_bound": -1e30, - "clipping_upper_bound": None, - "clipping_lower_hardness": 1, - "clipping_upper_hardness": 1, - } - default_options.update(model_dict.get("estimation_options", {})) - - if not default_options["robust_bounds"]: - default_options["bounds_distance"] = 0 - - return default_options + user_opts = model_dict.get("estimation_options", {}) + + sigma_points_scale = user_opts.get("sigma_points_scale", 2) + robust_bounds = user_opts.get("robust_bounds", True) + bounds_distance = user_opts.get("bounds_distance", 1e-3) + clipping_lower_bound = user_opts.get("clipping_lower_bound", -1e30) + clipping_upper_bound = user_opts.get("clipping_upper_bound", None) + clipping_lower_hardness = user_opts.get("clipping_lower_hardness", 1) + clipping_upper_hardness = user_opts.get("clipping_upper_hardness", 1) + + if not robust_bounds: + bounds_distance = 0 + + return EstimationOptions( + sigma_points_scale=sigma_points_scale, + robust_bounds=robust_bounds, + bounds_distance=bounds_distance, + clipping_lower_bound=clipping_lower_bound, + clipping_upper_bound=clipping_upper_bound, + clipping_lower_hardness=clipping_lower_hardness, + clipping_upper_hardness=clipping_upper_hardness, + ) -def _process_anchoring(model_dict): +def _process_anchoring(model_dict: dict) -> Anchoring: """Process the specification that governs how latent factors are anchored. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` + model_dict: The model specification. See: :ref:`model_specs` Returns: - dict: Dictionary with information about anchoring. See :ref:`anchoring` + Anchoring dataclass with information about anchoring. """ - anchinfo = { - "anchoring": False, - "outcomes": {}, - "factors": [], - "free_controls": False, - "free_constant": False, - "free_loadings": False, - "ignore_constant_when_anchoring": False, - } - if "anchoring" in model_dict: - anchinfo.update(model_dict["anchoring"]) - anchinfo["anchoring"] = True - anchinfo["factors"] = list(anchinfo["outcomes"]) # ty: ignore[invalid-argument-type] + anch = model_dict["anchoring"] + return Anchoring( + anchoring=True, + outcomes=anch.get("outcomes", {}), + factors=tuple(anch.get("outcomes", {}).keys()), + free_controls=anch.get("free_controls", False), + free_constant=anch.get("free_constant", False), + free_loadings=anch.get("free_loadings", False), + ignore_constant_when_anchoring=anch.get( + "ignore_constant_when_anchoring", False + ), + ) - return anchinfo + return Anchoring( + anchoring=False, + outcomes={}, + factors=(), + free_controls=False, + free_constant=False, + free_loadings=False, + ignore_constant_when_anchoring=False, + ) def _insert_empty_elements_into_list(old, insert_at_modulo, to_insert, aug_p_to_p): @@ -288,16 +337,14 @@ def _insert_empty_elements_into_list(old, insert_at_modulo, to_insert, aug_p_to_ def _augment_periods_for_endogenous_factors( - model_dict: dict[str, Any], dimensions: dict[str, Any], labels: dict[str, Any] + model_dict: dict[str, Any], dimensions: Dimensions, labels: Labels ) -> dict[str, Any]: """Augment periods if endogenous factors are present. Args: model_dict: The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` + dimensions: Dimensional information. + labels: Labels for model quantities. Returns: Model dictionary with twice the amount of periods @@ -308,7 +355,7 @@ def _augment_periods_for_endogenous_factors( insert_at_modulo = 0 if v.get("is_endogenous", False) else 1 # Insert empty elements into measurements when we do not have those. - if len(v["measurements"]) != dimensions["n_periods"]: + if len(v["measurements"]) != dimensions.n_periods: raise ValueError( "Measurements must be of length `n_periods`, " f"got {v['measurements']} for {fac}" @@ -317,12 +364,12 @@ def _augment_periods_for_endogenous_factors( old=v["measurements"], insert_at_modulo=insert_at_modulo, to_insert=[], - aug_p_to_p=labels["aug_periods_to_periods"], + aug_p_to_p=labels.aug_periods_to_periods, ) # Insert empty elements into normalizations when we do not have those. for norm_type, normalizations in v.get("normalizations", {}).items(): - if not len(normalizations) == dimensions["n_periods"]: + if not len(normalizations) == dimensions.n_periods: raise ValueError( "Normalizations must be lists of length `n_periods`, " f"got {normalizations} for {fac}['normalizations']['{norm_type}']" @@ -332,17 +379,17 @@ def _augment_periods_for_endogenous_factors( old=normalizations, insert_at_modulo=insert_at_modulo, to_insert={}, - aug_p_to_p=labels["aug_periods_to_periods"], + aug_p_to_p=labels.aug_periods_to_periods, ) ) return aug -def _get_transition_info(model_dict, labels): +def _get_transition_info(model_dict: dict, labels: Labels) -> TransitionInfo: """Collect information about transition functions.""" func_list, param_names = [], [] - latent_factors = labels["latent_factors"] - all_factors = labels["all_factors"] + latent_factors = labels.latent_factors + all_factors = labels.all_factors for factor in latent_factors: spec = model_dict["factors"][factor]["transition_function"] @@ -379,7 +426,7 @@ def _get_transition_info(model_dict, labels): def _extract_factor(states, pos): return states[pos] - for i, factor in enumerate(labels["all_factors"]): + for i, factor in enumerate(labels.all_factors): functions[factor] = partial(_extract_factor, pos=i) transition_function = concatenate_functions( @@ -397,47 +444,48 @@ def _extract_factor(states, pos): func = vmap(func, in_axes=(None, 0)) individual_functions[factor] = func - out = { - "func": transition_function, - "param_names": dict(zip(latent_factors, param_names, strict=False)), - "individual_functions": individual_functions, - "function_names": dict(zip(latent_factors, function_names, strict=False)), - } - return out + return TransitionInfo( + func=transition_function, + param_names=dict(zip(latent_factors, param_names, strict=False)), + individual_functions=individual_functions, + function_names=dict(zip(latent_factors, function_names, strict=False)), + ) def _get_endogenous_factors_info( has_endogenous_factors: bool, model_dict: dict[str, Any], - labels: dict[str, Any], + labels: Labels, bounds_distance: float, -) -> dict[str, Any]: +) -> EndogenousFactorsInfo: """Collect information about endogenous factors.""" - endogenous_factors_info = { - "has_endogenous_factors": has_endogenous_factors, - "aug_periods_to_aug_period_meas_types": _get_aug_periods_to_aug_period_meas_types( # noqa: E501 - aug_periods=labels["aug_periods_to_periods"].keys(), + factor_info = {} + for fac, v in model_dict["factors"].items(): + factor_info[fac] = FactorEndogenousInfo( + is_state=( + not v.get("is_endogenous", False) and not v.get("is_correction", False) + ), + is_endogenous=v.get("is_endogenous", False), + is_correction=v.get("is_correction", False), + ) + + return EndogenousFactorsInfo( + has_endogenous_factors=has_endogenous_factors, + aug_periods_to_aug_period_meas_types=_get_aug_periods_to_aug_period_meas_types( + aug_periods=labels.aug_periods_to_periods.keys(), has_endogenous_factors=has_endogenous_factors, ), - "bounds_distance": bounds_distance, - "aug_periods_from_period": partial( + bounds_distance=bounds_distance, + aug_periods_from_period=partial( _aug_periods_from_period, - aug_periods_to_periods=labels["aug_periods_to_periods"], + aug_periods_to_periods=labels.aug_periods_to_periods, ), - } - for fac, v in model_dict["factors"].items(): - endogenous_factors_info[fac] = { - "is_state": ( - not v.get("is_endogenous", False) and not v.get("is_correction", False) - ), - "is_endogenous": v.get("is_endogenous", False), - "is_correction": v.get("is_correction", False), - } - return endogenous_factors_info + factor_info=factor_info, + ) def _get_aug_periods_to_aug_period_meas_types( - aug_periods: list[int], has_endogenous_factors: bool + aug_periods, has_endogenous_factors: bool ) -> dict[int, Literal["states", "endogenous_factors"]]: if has_endogenous_factors: return { @@ -447,43 +495,42 @@ def _get_aug_periods_to_aug_period_meas_types( return dict.fromkeys(aug_periods, "states") -def _get_update_info(model_dict, dimensions, labels, anchoring_info): +def _get_update_info( + model_dict: dict, dimensions: Dimensions, labels: Labels, anchoring_info: Anchoring +) -> DataFrame: """Construct a DataFrame with information on each Kalman update. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring_info (dict): Information about anchoring. See :ref:`anchoring` + model_dict: The model specification. See: :ref:`model_specs` + dimensions: Dimensional information. + labels: Labels for model quantities. + anchoring_info: Information about anchoring. See :ref:`anchoring` Returns: - pandas.DataFrame: DataFrame with one row per Kalman update needed in - the likelihood function. See :ref:`update_info`. + DataFrame with one row per Kalman update needed in the likelihood function. """ index = pd.MultiIndex( levels=[[], []], codes=[[], []], names=["aug_period", "variable"] ) - uinfo = DataFrame(index=index, columns=labels["latent_factors"] + ["purpose"]) + uinfo = DataFrame(index=index, columns=[*labels.latent_factors, "purpose"]) measurements = {} - for factor in labels["latent_factors"]: + for factor in labels.latent_factors: measurements[factor] = model_dict["factors"][factor]["measurements"] - if len(measurements[factor]) != dimensions["n_aug_periods"]: + if len(measurements[factor]) != dimensions.n_aug_periods: raise ValueError( "Measurements must be of length `n_aug_periods`, " f"got {measurements[factor]} for {factor}" ) - for aug_period in labels["aug_periods"]: - for factor in labels["latent_factors"]: + for aug_period in labels.aug_periods: + for factor in labels.latent_factors: for meas in measurements[factor][aug_period]: uinfo.loc[(aug_period, meas), factor] = True uinfo.loc[(aug_period, meas), "purpose"] = "measurement" - for factor in anchoring_info["factors"]: - outcome = anchoring_info["outcomes"][factor] + for factor in anchoring_info.factors: + outcome = anchoring_info.outcomes[factor] name = f"{outcome}_{factor}" uinfo.loc[(aug_period, name), factor] = True uinfo.loc[(aug_period, name), "purpose"] = "anchoring" @@ -493,30 +540,30 @@ def _get_update_info(model_dict, dimensions, labels, anchoring_info): return uinfo -def _process_normalizations(model_dict, dimensions, labels): +def _process_normalizations( + model_dict: dict, dimensions: Dimensions, labels: Labels +) -> dict[str, dict[str, list]]: """Process the normalizations of intercepts and factor loadings. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` + model_dict: The model specification. See: :ref:`model_specs` + dimensions: Dimensional information. + labels: Labels for model quantities. Returns: - normalizations (dict): Nested dictionary with information on normalized factor - loadings and intercepts for each factor. See :ref:`normalizations`. + Nested dictionary with information on normalized factor loadings and + intercepts for each factor. """ normalizations = {} - for factor in labels["latent_factors"]: + for factor in labels.latent_factors: normalizations[factor] = {} norminfo = model_dict["factors"][factor].get("normalizations", {}) for norm_type in ["loadings", "intercepts"]: candidate = norminfo.get( - norm_type, [{} for _ in range(dimensions["n_aug_periods"])] + norm_type, [{} for _ in range(dimensions.n_aug_periods)] ) - if not len(candidate) == dimensions["n_aug_periods"]: + if not len(candidate) == dimensions.n_aug_periods: raise ValueError( "Normalizations must be of length `n_aug_periods`, " f"got {norminfo} for {factor}['{norm_type}']" diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 3ae38799..4d2d808f 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -41,20 +41,20 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): model = process_model(model_dict) - if model["labels"]["observed_factors"] and data is None: + if model["labels"].observed_factors and data is None: raise ValueError( "To simulate a model with observed factors, data cannot be None.", ) - if model["labels"]["controls"] != ["constant"] and data is None: + if model["labels"].controls != ["constant"] and data is None: raise ValueError("To simulate a model with controls, data cannot be None.") if data is not None: processed_data = process_data( df=data, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model[ + "endogenous_factors_info" + ].has_endogenous_factors, labels=model["labels"], update_info=model["update_info"], anchoring_info=model["anchoring"], @@ -73,7 +73,7 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): else: control_data = jnp.ones((n_obs, 1)) - n_periods = model["dimensions"]["n_periods"] + n_periods = model["dimensions"].n_periods observed_factors = jnp.zeros((n_periods, n_obs, 0)) params_index = get_params_index( @@ -91,9 +91,7 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): update_info=model["update_info"], labels=model["labels"], anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, ) states, covs, log_weights, pardict = parse_params( @@ -112,9 +110,7 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): labels=model["labels"], dimensions=model["dimensions"], n_obs=n_obs, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, update_info=model["update_info"], control_data=control_data, observed_factors=observed_factors, @@ -125,8 +121,8 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): # Create collapsed versions with user-facing periods latent_data = _collapse_aug_periods_to_periods( df=aug_latent_data, - factors=model["labels"]["latent_factors"], - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], + factors=model["labels"].latent_factors, + aug_periods_to_periods=model["labels"].aug_periods_to_periods, endogenous_factors_info=model["endogenous_factors_info"], ) @@ -143,21 +139,21 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): "states": latent_data, "state_ranges": create_state_ranges( latent_data, - model["labels"]["latent_factors"], + model["labels"].latent_factors, ), }, "anchored_states": { "states": anchored_latent_data, "state_ranges": create_state_ranges( anchored_latent_data, - model["labels"]["latent_factors"], + model["labels"].latent_factors, ), }, "aug_unanchored_states": { "states": aug_latent_data, "state_ranges": create_state_ranges( aug_latent_data, - model["labels"]["latent_factors"], + model["labels"].latent_factors, ), }, "aug_measurements": aug_measurements, @@ -192,23 +188,23 @@ def _simulate_dataset( """ policies = policies if policies is not None else [] - n_states = dimensions["n_latent_factors"] + n_states = dimensions.n_latent_factors if has_endogenous_factors: - n_aug_periods = dimensions["n_aug_periods"] - 1 + n_aug_periods = dimensions.n_aug_periods - 1 else: - n_aug_periods = dimensions["n_aug_periods"] + n_aug_periods = dimensions.n_aug_periods weights = np.exp(log_weights)[0] loadings_df = pd.DataFrame( data=pardict["loadings"], index=update_info.index, - columns=labels["latent_factors"], + columns=labels.latent_factors, ) control_params_df = pd.DataFrame( data=pardict["controls"], index=update_info.index, - columns=labels["controls"], + columns=labels.controls, ) meas_sds = pd.DataFrame( data=pardict["meas_sds"].reshape(-1, 1), @@ -219,7 +215,7 @@ def _simulate_dataset( shock_sds = pardict["shock_sds"] dist_args = [] - for mixture in range(dimensions["n_mixtures"]): + for mixture in range(dimensions.n_mixtures): args = { "mean": latent_states[0][mixture], "cov": covs[0][mixture].T @ covs[0][mixture], @@ -233,7 +229,7 @@ def _simulate_dataset( # if there is a shock in period t, add it here policies_t = [p for p in policies if p["aug_period"] == t] for policy in policies_t: - position = labels["latent_factors"].index(policy["factor"]) + position = labels.latent_factors.index(policy["factor"]) latent_states[t, :, position] += _get_shock( mean=policy["effect_size"], sd=policy["standard_deviation"], @@ -260,7 +256,7 @@ def _simulate_dataset( next_states = np.array( transform_sigma_points( sigma_points=states, - transition_func=transition_info["func"], + transition_func=transition_info.func, trans_coeffs=trans_coeffs, anchoring_scaling_factors=anchoring_scaling_factors, anchoring_constants=anchoring_constants, @@ -298,7 +294,7 @@ def _simulate_dataset( latent_data_by_period = [] for t in range(n_aug_periods): - lat = pd.DataFrame(data=latent_states[t], columns=labels["latent_factors"]) + lat = pd.DataFrame(data=latent_states[t], columns=labels.latent_factors) lat["aug_period"] = t latent_data_by_period.append(lat) @@ -325,16 +321,16 @@ def _collapse_aug_periods_to_periods( pd.DataFrame: DataFrame with "period" column instead of "aug_period" """ df = df.copy() - if not endogenous_factors_info["has_endogenous_factors"]: + if not endogenous_factors_info.has_endogenous_factors: return df.rename(columns={"aug_period": "period"}) df["period"] = df["aug_period"].map(aug_periods_to_periods) df["_aug_period_meas_type"] = df["aug_period"].map( - endogenous_factors_info["aug_periods_to_aug_period_meas_types"] + endogenous_factors_info.aug_periods_to_aug_period_meas_types ) endogenous_cols = [ - fac for fac in factors if endogenous_factors_info[fac]["is_endogenous"] + fac for fac in factors if endogenous_factors_info.factor_info[fac].is_endogenous ] state_cols = [fac for fac in factors if fac not in endogenous_cols] @@ -385,7 +381,7 @@ def generate_start_states(n_obs, dimensions, dist_args, weights): controls (np.ndarray): shape (n_obs, n_controls), """ - n_states = dimensions["n_latent_factors"] + n_states = dimensions.n_latent_factors if np.size(weights) == 1: out = multivariate_normal(size=n_obs, **dist_args[0]) else: diff --git a/src/skillmodels/transition_functions.py b/src/skillmodels/transition_functions.py index 16427076..af1c9c5d 100644 --- a/src/skillmodels/transition_functions.py +++ b/src/skillmodels/transition_functions.py @@ -88,7 +88,7 @@ def translog(states, params): def params_translog(factors): """Index tuples for the translog production function.""" names = ( - factors + list(factors) + [f"{factor} ** 2" for factor in factors] + [f"{a} * {b}" for a, b in combinations(factors, 2)] + ["constant"] @@ -195,7 +195,7 @@ def linear_and_squares(states, params): def params_linear_and_squares(factors): """Index tuples for the linear_and_squares production function.""" - names = factors + [f"{factor} ** 2" for factor in factors] + ["constant"] + names = list(factors) + [f"{factor} ** 2" for factor in factors] + ["constant"] return names @@ -236,7 +236,7 @@ def log_ces_general(states, params): def params_log_ces_general(factors): """Index tuples for the generalized log_ces production function.""" - return factors + [f"sigma_{fac}" for fac in factors] + ["tfp"] + return list(factors) + [f"sigma_{fac}" for fac in factors] + ["tfp"] def identity_constraints_log_ces_general(factors, aug_period, all_factors): diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py new file mode 100644 index 00000000..455bc317 --- /dev/null +++ b/src/skillmodels/types.py @@ -0,0 +1,202 @@ +"""Dataclass definitions for skillmodels internal data structures.""" + +from collections.abc import Callable +from dataclasses import dataclass +from typing import Literal + +import pandas as pd +from jax import Array + + +@dataclass(frozen=True) +class Dimensions: + """Dimensional information for a skill formation model. + + All fields represent counts of model components. + """ + + n_latent_factors: int + n_observed_factors: int + n_controls: int + n_mixtures: int + n_aug_periods: int + n_periods: int + + @property + def n_all_factors(self) -> int: + """Total number of factors (latent + observed).""" + return self.n_latent_factors + self.n_observed_factors + + +@dataclass(frozen=True) +class Labels: + """Labels for model quantities. + + Contains string identifiers for factors, periods, controls, and stages. + """ + + latent_factors: tuple[str, ...] + observed_factors: tuple[str, ...] + controls: tuple[str, ...] + periods: tuple[int, ...] + stagemap: tuple[int, ...] + stages: tuple[int, ...] + aug_periods: tuple[int, ...] + aug_periods_to_periods: dict[int, int] + aug_stagemap: tuple[int, ...] + aug_stages: tuple[int, ...] + aug_stages_to_stages: dict[int, int] + transition_names: tuple[str, ...] = () + + @property + def all_factors(self) -> tuple[str, ...]: + """All factor names (latent + observed).""" + return self.latent_factors + self.observed_factors + + +@dataclass(frozen=True) +class Anchoring: + """Information about how latent factors are anchored to observed outcomes.""" + + anchoring: bool + outcomes: dict[str, str] + factors: tuple[str, ...] + free_controls: bool + free_constant: bool + free_loadings: bool + ignore_constant_when_anchoring: bool + + +@dataclass(frozen=True) +class EstimationOptions: + """Tuning parameters for the estimation.""" + + sigma_points_scale: float + robust_bounds: bool + bounds_distance: float + clipping_lower_bound: float | None + clipping_upper_bound: float | None + clipping_lower_hardness: float + clipping_upper_hardness: float + + +@dataclass(frozen=True) +class TransitionInfo: + """Information about transition functions.""" + + func: Callable + param_names: dict[str, list[str]] + individual_functions: dict[str, Callable] + function_names: dict[str, str] + + +@dataclass(frozen=True) +class FactorEndogenousInfo: + """Endogeneity information for a single factor.""" + + is_state: bool + is_endogenous: bool + is_correction: bool + + +@dataclass(frozen=True) +class EndogenousFactorsInfo: + """Information about endogenous factors in the model.""" + + has_endogenous_factors: bool + aug_periods_to_aug_period_meas_types: dict[ + int, Literal["states", "endogenous_factors"] + ] + bounds_distance: float + aug_periods_from_period: Callable[[int], list[int]] + factor_info: dict[str, FactorEndogenousInfo] + + +@dataclass(frozen=True) +class ProcessedModel: + """Complete processed model specification. + + This is the main output of process_model() containing all information + needed for estimation. + """ + + dimensions: Dimensions + labels: Labels + anchoring: Anchoring + estimation_options: EstimationOptions + transition_info: TransitionInfo + update_info: pd.DataFrame + normalizations: dict[str, dict[str, list]] + endogenous_factors_info: EndogenousFactorsInfo + + +@dataclass(frozen=True) +class LoadingsParsingInfo: + """Information for parsing factor loadings from parameter vector.""" + + slice: Array | slice + flat_indices: Array + shape: tuple[int, ...] + size: int + + +@dataclass(frozen=True) +class ParsingInfo: + """Information for parsing the parameter vector. + + Maps model quantities to positions or slices of the parameter vector. + """ + + initial_states: Array | slice + initial_cholcovs: Array | slice + mixture_weights: Array | slice + controls: Array | slice + meas_sds: Array | slice + shock_sds: Array | slice + loadings: LoadingsParsingInfo + transition: dict[str, Array | slice] + is_anchoring_loading: Array + is_anchored_factor: Array + is_anchoring_update: Array + ignore_constant_when_anchoring: bool + has_endogenous_factors: bool + + +@dataclass(frozen=True) +class ParsedParams: + """Parsed parameters from the flat parameter vector. + + Contains all model parameters in structured arrays. + """ + + controls: Array + loadings: Array + meas_sds: Array + shock_sds: Array + transition: dict[str, Array] + anchoring_scaling_factors: Array + anchoring_constants: Array + + +@dataclass(frozen=True) +class ProcessedData: + """Processed data arrays for estimation. + + All arrays are JAX arrays ready for use in the likelihood function. + """ + + measurements: Array + controls: Array + observed_factors: Array + + +@dataclass(frozen=True) +class KalmanState: + """State carried through Kalman filter iterations. + + Used as the carry state in jax.lax.scan. + """ + + states: Array + upper_chols: Array + log_mixture_weights: Array diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index dd4f385b..1e41a0ab 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -104,7 +104,7 @@ def remove_factors(factors, model_dict, params=None): # Remove periods if necessary, but only if no endogenous factors are present. # (else we would mess up the mapping between raw periods model periods) if not has_endogenous_factors: - new_n_periods = get_dimensions(out, has_endogenous_factors)["n_periods"] + new_n_periods = get_dimensions(out, has_endogenous_factors).n_periods out = reduce_n_periods(out, new_n_periods) if params is not None: diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index 8d9dea6a..a56aa82c 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -221,7 +221,7 @@ def univariate_densities( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], + aug_periods_to_periods=model["labels"].aug_periods_to_periods, observed_states=observed_states, ) scenarios = df["scenario"].unique() @@ -332,7 +332,7 @@ def bivariate_density_contours( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], + aug_periods_to_periods=model["labels"].aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} @@ -457,7 +457,7 @@ def bivariate_density_surfaces( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"]["aug_periods_to_periods"], + aug_periods_to_periods=model["labels"].aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} @@ -666,9 +666,9 @@ def _get_factors(factors, observed_factors, model): """Proccess factor names to return list of strings.""" if factors is None: if observed_factors: - factors = model["labels"]["all_factors"] + factors = model["labels"].all_factors else: - factors = model["labels"]["latent_factors"] + factors = model["labels"].latent_factors return factors diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 4dc5304b..0ec8c019 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -178,23 +178,23 @@ def get_transition_plots( model = process_model(model_dict) - if period >= model["labels"]["periods"][-1]: + if period >= model["labels"].periods[-1]: raise ValueError( "*period* must be the penultimate period of the model or earlier.", ) if ( include_correction_factors - or not model["endogenous_factors_info"]["has_endogenous_factors"] + or not model["endogenous_factors_info"].has_endogenous_factors ): - latent_factors = model["labels"]["latent_factors"] + latent_factors = model["labels"].latent_factors else: latent_factors = [ lf - for lf in model["labels"]["latent_factors"] - if not model["endogenous_factors_info"][lf]["is_correction"] + for lf in model["labels"].latent_factors + if not model["endogenous_factors_info"].factor_info[lf].is_correction ] - all_factors = model["labels"]["all_factors"] + all_factors = model["labels"].all_factors states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ "anchored_states" ]["states"] @@ -270,7 +270,7 @@ def _get_dictionary_with_plots( for each input and output factors. """ - observed_factors = model["labels"]["observed_factors"] + observed_factors = model["labels"].observed_factors states_data = _get_states_data(model, period, data, states, observed_factors) params = _set_index_params(model, params) pardict = _get_pardict(model, params) @@ -281,21 +281,21 @@ def _get_dictionary_with_plots( title_kwargs=None, showlegend=showlegend, ) - has_endogenous_factors = model["endogenous_factors_info"]["has_endogenous_factors"] + has_endogenous_factors = model["endogenous_factors_info"].has_endogenous_factors if has_endogenous_factors: - _aug_periods = model["endogenous_factors_info"]["aug_periods_from_period"]( - period - ) + _aug_periods = model["endogenous_factors_info"].aug_periods_from_period(period) else: _aug_periods = [period] plots_dict = {} for output_factor, input_factor in itertools.product(latent_factors, all_factors): - transition_function = model["transition_info"]["individual_functions"][ + transition_function = model["transition_info"].individual_functions[ output_factor ] if ( has_endogenous_factors - and model["endogenous_factors_info"][output_factor]["is_endogenous"] + and model["endogenous_factors_info"] + .factor_info[output_factor] + .is_endogenous ): aug_period = min(_aug_periods) else: @@ -368,9 +368,7 @@ def _get_pardict(model, params): update_info=model["update_info"], labels=model["labels"], anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, ) _, _, _, pardict = parse_params( @@ -407,19 +405,19 @@ def _get_states_data(model, period, data, states, observed_factors): if observed_factors: _observed_arr = process_data( df=data, - has_endogenous_factors=model["endogenous_factors_info"][ - "has_endogenous_factors" - ], + has_endogenous_factors=model[ + "endogenous_factors_info" + ].has_endogenous_factors, labels=model["labels"], update_info=model["update_info"], anchoring_info=model["anchoring"], )["observed_factors"] # convert from jax to numpy _observed_arr = np.array(_observed_arr) - if model["endogenous_factors_info"]["has_endogenous_factors"]: + if model["endogenous_factors_info"].has_endogenous_factors: both_aug_periods = [ aug_p - for aug_p, p in model["labels"]["aug_periods_to_periods"].items() + for aug_p, p in model["labels"].aug_periods_to_periods.items() if p == period ] to_concat = [] @@ -464,7 +462,7 @@ def _prepare_data_for_one_plot_fixed_quantile_2d( transition_params, all_factors, ): - period_data = states_data.query(f"aug_period == {aug_period}")[all_factors] + period_data = states_data.query(f"aug_period == {aug_period}")[list(all_factors)] input_min = state_ranges[input_factor].loc[aug_period]["minimum"] input_max = state_ranges[input_factor].loc[aug_period]["maximum"] to_concat = [] @@ -474,7 +472,7 @@ def _prepare_data_for_one_plot_fixed_quantile_2d( fixed_quantiles = period_data.drop(columns=input_factor).quantile(quantile) for col, val in fixed_quantiles.items(): input_data[col] = val - input_arr = jnp.array(input_data[all_factors].to_numpy()) + input_arr = jnp.array(input_data[list(all_factors)].to_numpy()) # convert from jax to numpy array output_arr = np.array(transition_function(transition_params, input_arr)) quantile_data = pd.DataFrame() @@ -521,7 +519,7 @@ def _prepare_data_for_one_plot_average_2d( input_data[input_factor] = np.linspace(input_min, input_max, n_points) for col, val in draw.items(): input_data[col] = val - input_arr = jnp.array(input_data[all_factors].to_numpy()) + input_arr = jnp.array(input_data[list(all_factors)].to_numpy()) # convert from jax to numpy array output_arr = np.array(transition_function(transition_params, input_arr)) draw_data = pd.DataFrame() diff --git a/tests/test_constraints.py b/tests/test_constraints.py index ca3bb103..9a9595f9 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -18,6 +18,7 @@ add_bounds, ) from skillmodels.process_model import process_model +from skillmodels.types import Anchoring, Labels # importing the TEST_DIR from config does not work for test run in conda build TEST_DIR = Path(__file__).parent.resolve() @@ -172,11 +173,20 @@ def test_stage_constraints_with_endogenous_factors(): def test_constant_factor_constraints(): - labels = { - "latent_factors": ["fac1", "fac2"], - "aug_periods": [0, 1, 2], - "transition_names": ["bla", "constant"], - } + labels = Labels( + latent_factors=("fac1", "fac2"), + observed_factors=(), + controls=("constant",), + periods=(0, 1, 2), + stagemap=(0, 0, 0), + stages=(0,), + aug_periods=(0, 1, 2), + aug_periods_to_periods={0: 0, 1: 1, 2: 2}, + aug_stagemap=(0, 0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + transition_names=("bla", "constant"), + ) expected = [ {"loc": ("shock_sds", 0, "fac2", "-"), "type": "fixed", "value": 0.0}, @@ -217,12 +227,20 @@ def test_initial_mean_constraints(): def test_trans_coeff_constraints(): - labels = { - "latent_factors": ["fac1", "fac2", "fac3"], - "transition_names": ["log_ces", "bla", "blubb"], - "aug_periods": [0, 1, 2], - } - labels["all_factors"] = labels["latent_factors"] + labels = Labels( + latent_factors=("fac1", "fac2", "fac3"), + observed_factors=(), + controls=("constant",), + periods=(0, 1, 2), + stagemap=(0, 0, 0), + stages=(0,), + aug_periods=(0, 1, 2), + aug_periods_to_periods={0: 0, 1: 1, 2: 2}, + aug_stagemap=(0, 0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + transition_names=("log_ces", "bla", "blubb"), + ) expected = [ { @@ -271,14 +289,15 @@ def anch_uinfo(): @pytest.fixture def base_anchoring_info(): - anch_info = { - "factors": ["f1", "f2"], - "outcomes": {"f1": "outcome", "f2": "outcome"}, - "free_controls": True, - "free_constant": True, - "free_loadings": True, - } - return anch_info + return Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=True, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) def test_anchoring_constraints_no_constraint_needed(anch_uinfo, base_anchoring_info): @@ -287,8 +306,16 @@ def test_anchoring_constraints_no_constraint_needed(anch_uinfo, base_anchoring_i def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): - base_anchoring_info["free_constant"] = False - calculated = _get_anchoring_constraints(anch_uinfo, [], base_anchoring_info, (0, 1)) + anchoring_info = Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=True, + free_constant=False, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) + calculated = _get_anchoring_constraints(anch_uinfo, [], anchoring_info, (0, 1)) del calculated[0]["description"] expected = [ @@ -308,11 +335,19 @@ def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): - base_anchoring_info["free_controls"] = False + anchoring_info = Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=False, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) calculated = _get_anchoring_constraints( anch_uinfo, ["c1", "c2"], - base_anchoring_info, + anchoring_info, (0, 1), ) @@ -340,8 +375,16 @@ def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): def test_anchoring_constraints_for_loadings(anch_uinfo, base_anchoring_info): - base_anchoring_info["free_loadings"] = False - calculated = _get_anchoring_constraints(anch_uinfo, [], base_anchoring_info, (0, 1)) + anchoring_info = Anchoring( + anchoring=True, + factors=("f1", "f2"), + outcomes={"f1": "outcome", "f2": "outcome"}, + free_controls=True, + free_constant=True, + free_loadings=False, + ignore_constant_when_anchoring=False, + ) + calculated = _get_anchoring_constraints(anch_uinfo, [], anchoring_info, (0, 1)) expected = [ { diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 6f7e62ab..9ec8f775 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -10,6 +10,7 @@ _get_quasi_factor_scores_data_for_single_period, _process_factors, ) +from skillmodels.types import Labels def test_get_measurement_data_with_single_period(): @@ -243,7 +244,19 @@ def test_get_factor_scores_data_with_multiple_period(): def test_process_factors(): model = { - "labels": {"latent_factors": list("abcd"), "observed_factors": list("efg")}, + "labels": Labels( + latent_factors=tuple("abcd"), + observed_factors=tuple("efg"), + controls=("constant",), + periods=(0,), + stagemap=(0,), + stages=(0,), + aug_periods=(0,), + aug_periods_to_periods={0: 0}, + aug_stagemap=(0,), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + ), } latent_factor = "c" observed_factor = "g" diff --git a/tests/test_params_index.py b/tests/test_params_index.py index 9e10d61f..6f84df66 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -16,6 +16,7 @@ initial_mean_index_tuples, ) from skillmodels.process_model import process_model +from skillmodels.types import TransitionInfo @pytest.fixture @@ -182,7 +183,12 @@ def test_trans_coeffs_index_tuples_no_endogenous_factors(): "fac2": [], "fac3": ["fac1", "fac2", "fac3", "phi"], } - trans_info = {"param_names": param_names} + trans_info = TransitionInfo( + func=lambda x: x, # dummy function + param_names=param_names, + individual_functions={}, + function_names={}, + ) expected = [ ("transition", 0, "fac1", "fac1"), @@ -220,7 +226,12 @@ def test_trans_coeffs_index_tuples_has_endogenous_factors(): "fac2": [], "fac3": ["fac1", "fac2", "fac3", "phi"], } - trans_info = {"param_names": param_names} + trans_info = TransitionInfo( + func=lambda x: x, # dummy function + param_names=param_names, + individual_functions={}, + function_names={}, + ) expected = [ ("transition", 0, "fac1", "fac1"), diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index 33ffbe88..c7f7e956 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -16,6 +16,7 @@ from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_model import process_model +from skillmodels.types import Anchoring @pytest.fixture @@ -36,7 +37,15 @@ def parsed_parameters(): dimensions = processed["dimensions"] # this overwrites the anchoring setting from the model specification to get a # more meaningful test - anchoring = {"ignore_constant_when_anchoring": False} + anchoring = Anchoring( + anchoring=False, + outcomes={}, + factors=(), + free_controls=True, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=False, + ) parsing_info = create_parsing_info( params_index=p_index, diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 4ec797e6..274a81c8 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -18,6 +18,7 @@ pre_process_data, ) from skillmodels.process_model import process_model +from skillmodels.types import Labels # importing the TEST_DIR from config does not work for test run in conda build TEST_DIR = Path(__file__).parent.resolve() @@ -64,7 +65,7 @@ def simplest_augmented(): def test_augment_data_for_endogenous_factors(simplest_augmented): model = process_model(simplest_augmented["model_dict"]) pre_processed_data = pre_process_data( - simplest_augmented["data_input"], model["labels"]["periods"] + simplest_augmented["data_input"], model["labels"].periods ) pre_processed_data["constant"] = 1 res = _augment_data_for_endogenous_factors( @@ -122,7 +123,19 @@ def test_generate_controls_array(): """ data = _read_csv_string(csv, ["id", "aug_period"]) - labels = {"controls": ["c1", "c2"], "aug_periods": [0, 1]} + labels = Labels( + latent_factors=(), + observed_factors=(), + controls=("c1", "c2"), + periods=(0, 1), + stagemap=(0, 0), + stages=(0,), + aug_periods=(0, 1), + aug_periods_to_periods={0: 0, 1: 1}, + aug_stagemap=(0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + ) calculated = _generate_controls_array(data, labels, 2) expected = jnp.array([[[1, 2], [5, 8]], [[3, 4], [7, 8]]]) @@ -139,7 +152,19 @@ def test_generate_observed_factor_array(): """ data = _read_csv_string(csv, ["id", "aug_period"]) - labels = {"observed_factors": ["v1", "v2"], "aug_periods": [0, 1]} + labels = Labels( + latent_factors=(), + observed_factors=("v1", "v2"), + controls=("constant",), + periods=(0, 1), + stagemap=(0, 0), + stages=(0,), + aug_periods=(0, 1), + aug_periods_to_periods={0: 0, 1: 1}, + aug_stagemap=(0, 0), + aug_stages=(0,), + aug_stages_to_stages={0: 0}, + ) calculated = _generate_observed_factor_array(data, labels, 2) expected = jnp.array([[[1, 2], [5, 8]], [[3, 4], [7, 8]]]) diff --git a/tests/test_process_model.py b/tests/test_process_model.py index ab808814..1dcbe056 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -7,6 +7,7 @@ from pandas.testing import assert_frame_equal from skillmodels.process_model import get_has_endogenous_factors, process_model +from skillmodels.types import TransitionInfo # ====================================================================================== # Integration test with model2 from the replication files of CHS2010 @@ -25,55 +26,54 @@ def model2(): def test_has_endogenous_factors(model2): assert ( - process_model(model2)["endogenous_factors_info"]["has_endogenous_factors"] - == False + process_model(model2)["endogenous_factors_info"].has_endogenous_factors == False ) def test_dimensions(model2): res = process_model(model2)["dimensions"] - assert res["n_latent_factors"] == 3 - assert res["n_observed_factors"] == 0 - assert res["n_all_factors"] == 3 - assert res["n_periods"] == 8 - assert res["n_controls"] == 2 - assert res["n_mixtures"] == 1 + assert res.n_latent_factors == 3 + assert res.n_observed_factors == 0 + assert res.n_all_factors == 3 + assert res.n_periods == 8 + assert res.n_controls == 2 + assert res.n_mixtures == 1 def test_labels(model2): res = process_model(model2)["labels"] - assert res["latent_factors"] == ["fac1", "fac2", "fac3"] - assert res["observed_factors"] == [] - assert res["all_factors"] == ["fac1", "fac2", "fac3"] - assert res["controls"] == ["constant", "x1"] - assert res["periods"] == [0, 1, 2, 3, 4, 5, 6, 7] - assert res["stagemap"] == [0, 0, 0, 0, 0, 0, 0] - assert res["stages"] == [0] + assert res.latent_factors == ("fac1", "fac2", "fac3") + assert res.observed_factors == () + assert res.all_factors == ("fac1", "fac2", "fac3") + assert res.controls == ("constant", "x1") + assert res.periods == (0, 1, 2, 3, 4, 5, 6, 7) + assert res.stagemap == (0, 0, 0, 0, 0, 0, 0) + assert res.stages == (0,) def test_estimation_options(model2): res = process_model(model2)["estimation_options"] - assert res["sigma_points_scale"] == 2 - assert res["robust_bounds"] - assert res["bounds_distance"] == 0.001 + assert res.sigma_points_scale == 2 + assert res.robust_bounds + assert res.bounds_distance == 0.001 def test_anchoring(model2): res = process_model(model2)["anchoring"] - assert res["outcomes"] == {"fac1": "Q1"} - assert res["factors"] == ["fac1"] - assert res["free_controls"] - assert res["free_constant"] - assert res["free_loadings"] + assert res.outcomes == {"fac1": "Q1"} + assert res.factors == ("fac1",) + assert res.free_controls + assert res.free_constant + assert res.free_loadings def test_transition_info(model2): res = process_model(model2)["transition_info"] - assert isinstance(res, dict) - assert callable(res["func"]) + assert isinstance(res, TransitionInfo) + assert callable(res.func) - assert list(inspect.signature(res["func"]).parameters) == ["params", "states"] + assert list(inspect.signature(res.func).parameters) == ["params", "states"] def test_update_info(model2): @@ -138,13 +138,13 @@ def test_anchoring_and_endogenous_factors_work_together(): # Should not raise - anchoring and endogenous factors now work together result = process_model(model_dict) # Verify anchoring is enabled - assert result["anchoring"]["anchoring"] - assert result["anchoring"]["factors"] == ["fac1"] + assert result["anchoring"].anchoring + assert result["anchoring"].factors == ("fac1",) # Verify endogenous factors are enabled - assert result["endogenous_factors_info"]["has_endogenous_factors"] + assert result["endogenous_factors_info"].has_endogenous_factors # Verify dimensions - assert result["dimensions"]["n_periods"] == 8 - assert result["dimensions"]["n_aug_periods"] == 16 + assert result["dimensions"].n_periods == 8 + assert result["dimensions"].n_aug_periods == 16 # Verify update_info has anchoring entries for all aug_periods anchoring_updates = result["update_info"][ result["update_info"]["purpose"] == "anchoring" @@ -173,9 +173,9 @@ def test_stagemap_with_endogenous_factors(): model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 3] del model_dict["anchoring"] model = process_model(model_dict) - assert model["labels"]["stagemap"] == model_dict["stagemap"] - assert model["labels"]["stages"] == [0, 1, 2, 3] - assert model["labels"]["aug_stagemap"] == [0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7] + assert model["labels"].stagemap == tuple(model_dict["stagemap"]) + assert model["labels"].stages == (0, 1, 2, 3) + assert model["labels"].aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) @pytest.fixture @@ -191,58 +191,58 @@ def model2_inv(): def test_with_endog_has_endogenous_factors(model2_inv): assert ( - process_model(model2_inv)["endogenous_factors_info"]["has_endogenous_factors"] + process_model(model2_inv)["endogenous_factors_info"].has_endogenous_factors == True ) def test_with_endog_dimensions(model2_inv): res = process_model(model2_inv)["dimensions"] - assert res["n_latent_factors"] == 3 - assert res["n_observed_factors"] == 0 - assert res["n_all_factors"] == 3 - assert res["n_aug_periods"] == 16 - assert res["n_periods"] == 8 - assert res["n_controls"] == 2 - assert res["n_mixtures"] == 1 + assert res.n_latent_factors == 3 + assert res.n_observed_factors == 0 + assert res.n_all_factors == 3 + assert res.n_aug_periods == 16 + assert res.n_periods == 8 + assert res.n_controls == 2 + assert res.n_mixtures == 1 def test_with_endog_labels(model2_inv): res = process_model(model2_inv)["labels"] n_aug_periods = 16 - assert res["latent_factors"] == ["fac1", "fac2", "fac3"] - assert res["observed_factors"] == [] - assert res["all_factors"] == ["fac1", "fac2", "fac3"] - assert res["controls"] == ["constant", "x1"] - assert res["aug_periods"] == list(range(n_aug_periods)) - assert res["periods"] == [0, 1, 2, 3, 4, 5, 6, 7] - assert res["aug_stagemap"] == list(range(n_aug_periods - 2)) - assert res["aug_stages"] == list(range(n_aug_periods - 2)) + assert res.latent_factors == ("fac1", "fac2", "fac3") + assert res.observed_factors == () + assert res.all_factors == ("fac1", "fac2", "fac3") + assert res.controls == ("constant", "x1") + assert res.aug_periods == tuple(range(n_aug_periods)) + assert res.periods == (0, 1, 2, 3, 4, 5, 6, 7) + assert res.aug_stagemap == tuple(range(n_aug_periods - 2)) + assert res.aug_stages == tuple(range(n_aug_periods - 2)) def test_with_endog_estimation_options(model2_inv): res = process_model(model2_inv)["estimation_options"] - assert res["sigma_points_scale"] == 2 - assert res["robust_bounds"] - assert res["bounds_distance"] == 0.001 + assert res.sigma_points_scale == 2 + assert res.robust_bounds + assert res.bounds_distance == 0.001 def test_with_endog_anchoring_is_empty(model2_inv): res = process_model(model2_inv)["anchoring"] - assert res["outcomes"] == {} - assert res["factors"] == [] - assert res["free_controls"] is False - assert res["free_constant"] is False - assert res["free_loadings"] is False + assert res.outcomes == {} + assert res.factors == () + assert res.free_controls is False + assert res.free_constant is False + assert res.free_loadings is False def test_with_endog_transition_info(model2_inv): res = process_model(model2_inv)["transition_info"] - assert isinstance(res, dict) - assert callable(res["func"]) + assert isinstance(res, TransitionInfo) + assert callable(res.func) - assert list(inspect.signature(res["func"]).parameters) == ["params", "states"] + assert list(inspect.signature(res.func).parameters) == ["params", "states"] def test_with_endog_update_info(model2_inv): From 776667209029b4d9e2c98c87727e5cc9a283e740 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Thu, 8 Jan 2026 19:29:52 +0100 Subject: [PATCH 02/27] Use frozendict for immutable dict fields in dataclasses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace dict fields with frozendict in frozen dataclasses to ensure true immutability: - Labels.aug_periods_to_periods - Labels.aug_stages_to_stages - Anchoring.outcomes - TransitionInfo.param_names, individual_functions, function_names - EndogenousFactorsInfo.aug_periods_to_aug_period_meas_types, factor_info 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- pixi.lock | 45 ++++++++++++++++++++--------- pyproject.toml | 1 + src/skillmodels/process_model.py | 47 ++++++++++++++++++------------- src/skillmodels/types.py | 17 +++++------ tests/test_constraints.py | 17 +++++------ tests/test_correlation_heatmap.py | 5 ++-- tests/test_params_index.py | 13 +++++---- tests/test_parse_params.py | 3 +- tests/test_process_data.py | 9 +++--- 9 files changed, 94 insertions(+), 63 deletions(-) diff --git a/pixi.lock b/pixi.lock index ea662f25..e3c658cd 100644 --- a/pixi.lock +++ b/pixi.lock @@ -251,6 +251,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl @@ -272,7 +273,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -497,13 +498,14 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -716,12 +718,13 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl @@ -939,13 +942,14 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl @@ -1204,13 +1208,14 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -1436,12 +1441,13 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl @@ -1664,13 +1670,14 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl @@ -1961,6 +1968,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl @@ -1982,7 +1990,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -2240,13 +2248,14 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -2476,12 +2485,13 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -2708,13 +2718,14 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -3987,6 +3998,11 @@ packages: purls: [] size: 184553 timestamp: 1757946164012 +- pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl + name: frozendict + version: 2.4.7 + sha256: 972af65924ea25cf5b4d9326d549e69a9a4918d8a76a9d3a7cd174d98b237550 + requires_python: '>=3.6' - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-he8b2097_16.conda sha256: 4acf50b7d5673250d585a256a40aabdd922e0947ca12cdbad0cef960ee1a9509 md5: d274bf1343507683e6eb2954d1871569 @@ -7042,7 +7058,7 @@ packages: version: 3.4.0 sha256: 69bb92469f86a1565195ece4ac0323943e83477171b91d24c35afe028a90d7cd requires_python: '>=3.8' -- pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#e02ea4743cac9f861a5813f3b4b1283fd2ade730 +- pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 name: optimagic version: 0.5.3.dev30+ge02ea4743 requires_dist: @@ -8570,10 +8586,11 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.53.dev37+gcc710a63e.d20260108 - sha256: 167bd74677526ae18b099ed73f00727ec8b124bba6895ba9afe3862d04fbcd84 + version: 0.0.24.dev238+g71e9d59be.d20260108 + sha256: f68c7eee242cae980939d7c3cffd40f1f449e1da7b739cbf8f1918dfe55e7fff requires_dist: - dags + - frozendict - jax>=0.8 - numpy - pandas diff --git a/pyproject.toml b/pyproject.toml index 0b00efd9..b055a3d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ classifiers = [ requires-python = ">=3.13,<3.14" dependencies = [ "dags", + "frozendict", "jax>=0.8", "numpy", "pandas", diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 1998b8fa..0a0b9bf6 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -6,6 +6,7 @@ import pandas as pd from dags import concatenate_functions from dags.signature import rename_arguments +from frozendict import frozendict from jax import vmap from pandas import DataFrame @@ -75,9 +76,11 @@ def process_model(model_dict): _model_dict_aug = model_dict endogenous_factors_info = EndogenousFactorsInfo( has_endogenous_factors=has_endogenous_factors, - aug_periods_to_aug_period_meas_types=_get_aug_periods_to_aug_period_meas_types( - aug_periods=labels.aug_periods_to_periods.keys(), - has_endogenous_factors=has_endogenous_factors, + aug_periods_to_aug_period_meas_types=frozendict( + _get_aug_periods_to_aug_period_meas_types( + aug_periods=labels.aug_periods_to_periods.keys(), + has_endogenous_factors=has_endogenous_factors, + ) ), bounds_distance=model_dict["estimation_options"].get( "bounds_distance", 1e-3 @@ -86,12 +89,14 @@ def process_model(model_dict): _aug_periods_from_period, aug_periods_to_periods=labels.aug_periods_to_periods, ), - factor_info={ - fac: FactorEndogenousInfo( - is_state=True, is_endogenous=False, is_correction=False - ) - for fac in labels.latent_factors - }, + factor_info=frozendict( + { + fac: FactorEndogenousInfo( + is_state=True, is_endogenous=False, is_correction=False + ) + for fac in labels.latent_factors + } + ), ) check_model( model_dict=_model_dict_aug, @@ -253,10 +258,10 @@ def _get_labels( stagemap=tuple(stagemap), stages=tuple(stages), aug_periods=tuple(aug_periods_to_periods.keys()), - aug_periods_to_periods=aug_periods_to_periods, + aug_periods_to_periods=frozendict(aug_periods_to_periods), aug_stagemap=tuple(aug_stagemap), aug_stages=tuple(sorted(int(v) for v in np.unique(aug_stagemap))), - aug_stages_to_stages=aug_stages_to_stages, + aug_stages_to_stages=frozendict(aug_stages_to_stages), ) @@ -308,7 +313,7 @@ def _process_anchoring(model_dict: dict) -> Anchoring: anch = model_dict["anchoring"] return Anchoring( anchoring=True, - outcomes=anch.get("outcomes", {}), + outcomes=frozendict(anch.get("outcomes", {})), factors=tuple(anch.get("outcomes", {}).keys()), free_controls=anch.get("free_controls", False), free_constant=anch.get("free_constant", False), @@ -320,7 +325,7 @@ def _process_anchoring(model_dict: dict) -> Anchoring: return Anchoring( anchoring=False, - outcomes={}, + outcomes=frozendict({}), factors=(), free_controls=False, free_constant=False, @@ -446,9 +451,9 @@ def _extract_factor(states, pos): return TransitionInfo( func=transition_function, - param_names=dict(zip(latent_factors, param_names, strict=False)), - individual_functions=individual_functions, - function_names=dict(zip(latent_factors, function_names, strict=False)), + param_names=frozendict(zip(latent_factors, param_names, strict=False)), + individual_functions=frozendict(individual_functions), + function_names=frozendict(zip(latent_factors, function_names, strict=False)), ) @@ -471,16 +476,18 @@ def _get_endogenous_factors_info( return EndogenousFactorsInfo( has_endogenous_factors=has_endogenous_factors, - aug_periods_to_aug_period_meas_types=_get_aug_periods_to_aug_period_meas_types( - aug_periods=labels.aug_periods_to_periods.keys(), - has_endogenous_factors=has_endogenous_factors, + aug_periods_to_aug_period_meas_types=frozendict( + _get_aug_periods_to_aug_period_meas_types( + aug_periods=labels.aug_periods_to_periods.keys(), + has_endogenous_factors=has_endogenous_factors, + ) ), bounds_distance=bounds_distance, aug_periods_from_period=partial( _aug_periods_from_period, aug_periods_to_periods=labels.aug_periods_to_periods, ), - factor_info=factor_info, + factor_info=frozendict(factor_info), ) diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py index 455bc317..71dcd52b 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/types.py @@ -5,6 +5,7 @@ from typing import Literal import pandas as pd +from frozendict import frozendict from jax import Array @@ -42,10 +43,10 @@ class Labels: stagemap: tuple[int, ...] stages: tuple[int, ...] aug_periods: tuple[int, ...] - aug_periods_to_periods: dict[int, int] + aug_periods_to_periods: frozendict[int, int] aug_stagemap: tuple[int, ...] aug_stages: tuple[int, ...] - aug_stages_to_stages: dict[int, int] + aug_stages_to_stages: frozendict[int, int] transition_names: tuple[str, ...] = () @property @@ -59,7 +60,7 @@ class Anchoring: """Information about how latent factors are anchored to observed outcomes.""" anchoring: bool - outcomes: dict[str, str] + outcomes: frozendict[str, str] factors: tuple[str, ...] free_controls: bool free_constant: bool @@ -85,9 +86,9 @@ class TransitionInfo: """Information about transition functions.""" func: Callable - param_names: dict[str, list[str]] - individual_functions: dict[str, Callable] - function_names: dict[str, str] + param_names: frozendict[str, list[str]] + individual_functions: frozendict[str, Callable] + function_names: frozendict[str, str] @dataclass(frozen=True) @@ -104,12 +105,12 @@ class EndogenousFactorsInfo: """Information about endogenous factors in the model.""" has_endogenous_factors: bool - aug_periods_to_aug_period_meas_types: dict[ + aug_periods_to_aug_period_meas_types: frozendict[ int, Literal["states", "endogenous_factors"] ] bounds_distance: float aug_periods_from_period: Callable[[int], list[int]] - factor_info: dict[str, FactorEndogenousInfo] + factor_info: frozendict[str, FactorEndogenousInfo] @dataclass(frozen=True) diff --git a/tests/test_constraints.py b/tests/test_constraints.py index 9a9595f9..faaddfa8 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -4,6 +4,7 @@ import pandas as pd import pytest import yaml +from frozendict import frozendict from pandas.testing import assert_frame_equal from skillmodels.constraints import ( @@ -181,10 +182,10 @@ def test_constant_factor_constraints(): stagemap=(0, 0, 0), stages=(0,), aug_periods=(0, 1, 2), - aug_periods_to_periods={0: 0, 1: 1, 2: 2}, + aug_periods_to_periods=frozendict({0: 0, 1: 1, 2: 2}), aug_stagemap=(0, 0, 0), aug_stages=(0,), - aug_stages_to_stages={0: 0}, + aug_stages_to_stages=frozendict({0: 0}), transition_names=("bla", "constant"), ) @@ -235,10 +236,10 @@ def test_trans_coeff_constraints(): stagemap=(0, 0, 0), stages=(0,), aug_periods=(0, 1, 2), - aug_periods_to_periods={0: 0, 1: 1, 2: 2}, + aug_periods_to_periods=frozendict({0: 0, 1: 1, 2: 2}), aug_stagemap=(0, 0, 0), aug_stages=(0,), - aug_stages_to_stages={0: 0}, + aug_stages_to_stages=frozendict({0: 0}), transition_names=("log_ces", "bla", "blubb"), ) @@ -292,7 +293,7 @@ def base_anchoring_info(): return Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes={"f1": "outcome", "f2": "outcome"}, + outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), free_controls=True, free_constant=True, free_loadings=True, @@ -309,7 +310,7 @@ def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes={"f1": "outcome", "f2": "outcome"}, + outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), free_controls=True, free_constant=False, free_loadings=True, @@ -338,7 +339,7 @@ def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes={"f1": "outcome", "f2": "outcome"}, + outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), free_controls=False, free_constant=True, free_loadings=True, @@ -378,7 +379,7 @@ def test_anchoring_constraints_for_loadings(anch_uinfo, base_anchoring_info): anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes={"f1": "outcome", "f2": "outcome"}, + outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), free_controls=True, free_constant=True, free_loadings=False, diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 9ec8f775..d701ac70 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -1,5 +1,6 @@ import numpy as np import pandas as pd +from frozendict import frozendict from pandas.testing import assert_frame_equal as afe from skillmodels.correlation_heatmap import ( @@ -252,10 +253,10 @@ def test_process_factors(): stagemap=(0,), stages=(0,), aug_periods=(0,), - aug_periods_to_periods={0: 0}, + aug_periods_to_periods=frozendict({0: 0}), aug_stagemap=(0,), aug_stages=(0,), - aug_stages_to_stages={0: 0}, + aug_stages_to_stages=frozendict({0: 0}), ), } latent_factor = "c" diff --git a/tests/test_params_index.py b/tests/test_params_index.py index 6f84df66..5817e762 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -3,6 +3,7 @@ import pandas as pd import pytest import yaml +from frozendict import frozendict from skillmodels.params_index import ( get_control_params_index_tuples, @@ -185,9 +186,9 @@ def test_trans_coeffs_index_tuples_no_endogenous_factors(): } trans_info = TransitionInfo( func=lambda x: x, # dummy function - param_names=param_names, - individual_functions={}, - function_names={}, + param_names=frozendict(param_names), + individual_functions=frozendict({}), + function_names=frozendict({}), ) expected = [ @@ -228,9 +229,9 @@ def test_trans_coeffs_index_tuples_has_endogenous_factors(): } trans_info = TransitionInfo( func=lambda x: x, # dummy function - param_names=param_names, - individual_functions={}, - function_names={}, + param_names=frozendict(param_names), + individual_functions=frozendict({}), + function_names=frozendict({}), ) expected = [ diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index c7f7e956..461e6112 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -12,6 +12,7 @@ import pandas as pd import pytest import yaml +from frozendict import frozendict from numpy.testing import assert_array_equal as aae from skillmodels.parse_params import create_parsing_info, parse_params @@ -39,7 +40,7 @@ def parsed_parameters(): # more meaningful test anchoring = Anchoring( anchoring=False, - outcomes={}, + outcomes=frozendict({}), factors=(), free_controls=True, free_constant=True, diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 274a81c8..9843715d 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -7,6 +7,7 @@ import pandas as pd import pytest import yaml +from frozendict import frozendict from numpy.testing import assert_array_equal as aae from skillmodels.process_data import ( @@ -131,10 +132,10 @@ def test_generate_controls_array(): stagemap=(0, 0), stages=(0,), aug_periods=(0, 1), - aug_periods_to_periods={0: 0, 1: 1}, + aug_periods_to_periods=frozendict({0: 0, 1: 1}), aug_stagemap=(0, 0), aug_stages=(0,), - aug_stages_to_stages={0: 0}, + aug_stages_to_stages=frozendict({0: 0}), ) calculated = _generate_controls_array(data, labels, 2) @@ -160,10 +161,10 @@ def test_generate_observed_factor_array(): stagemap=(0, 0), stages=(0,), aug_periods=(0, 1), - aug_periods_to_periods={0: 0, 1: 1}, + aug_periods_to_periods=frozendict({0: 0, 1: 1}), aug_stagemap=(0, 0), aug_stages=(0,), - aug_stages_to_stages={0: 0}, + aug_stages_to_stages=frozendict({0: 0}), ) calculated = _generate_observed_factor_array(data, labels, 2) From 562cfaad381eaf05fd960a4d806917aa7d44fa95 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Thu, 8 Jan 2026 19:36:21 +0100 Subject: [PATCH 03/27] Ignore ty false positive. --- src/skillmodels/process_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 0a0b9bf6..8ebf7fae 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -537,7 +537,7 @@ def _get_update_info( uinfo.loc[(aug_period, meas), factor] = True uinfo.loc[(aug_period, meas), "purpose"] = "measurement" for factor in anchoring_info.factors: - outcome = anchoring_info.outcomes[factor] + outcome = anchoring_info.outcomes[factor] # ty: ignore[invalid-argument-type] name = f"{outcome}_{factor}" uinfo.loc[(aug_period, name), factor] = True uinfo.loc[(aug_period, name), "purpose"] = "anchoring" From 4d2c271227ee3cbf4e82eda80498aa748f569333 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Thu, 8 Jan 2026 19:49:37 +0100 Subject: [PATCH 04/27] Return ProcessedModel dataclass from process_model() instead of dict MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update process_model() to return a ProcessedModel frozen dataclass and update all consumers to use attribute access instead of dict access. This provides: - Better type safety with explicit typed fields - Immutability via frozen dataclass - IDE autocomplete support - Clear documentation of the model structure 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- src/skillmodels/correlation_heatmap.py | 22 +++---- src/skillmodels/filtered_states.py | 30 ++++----- src/skillmodels/maximization_inputs.py | 58 ++++++++--------- src/skillmodels/process_debug_data.py | 4 +- src/skillmodels/process_model.py | 22 +++---- src/skillmodels/simulate_data.py | 60 +++++++++--------- src/skillmodels/utilities.py | 10 +-- .../visualize_factor_distributions.py | 10 +-- .../visualize_transition_equations.py | 62 +++++++++---------- tests/test_constraints.py | 4 +- tests/test_correlation_heatmap.py | 8 ++- tests/test_params_index.py | 10 +-- tests/test_parse_params.py | 6 +- tests/test_process_data.py | 6 +- tests/test_process_model.py | 55 ++++++++-------- 15 files changed, 178 insertions(+), 189 deletions(-) diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index b0abc958..6a8be0eb 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -275,11 +275,11 @@ def _get_mask(corr, show_upper_triangle, show_diagonal): def _get_update_info_for_periods(model): """Return update_info with user-provided periods instead of augmented periods.""" - update_info = model["update_info"].copy() + update_info = model.update_info.copy() # Replace period level with user-provided period using set_codes period_values = update_info.index.get_level_values("aug_period").map( - model["labels"].aug_periods_to_periods + model.labels.aug_periods_to_periods ) update_info.index = update_info.index.set_codes(period_values, level="aug_period") update_info.index = update_info.index.set_names(["period", "variable"]) @@ -629,13 +629,13 @@ def _get_factor_scores_data_for_single_period( df (pd.DataFrame): Processed DataFrame to calculate correlations over. """ - aug_periods = model["endogenous_factors_info"].aug_periods_from_period(period) + aug_periods = model.endogenous_factors_info.aug_periods_from_period(period) df = pd.concat( [ _get_factor_scores_data_for_single_model_period( data=data, params=params, - update_info=model["update_info"], + update_info=model.update_info, aug_period=ap, period=period, latent_factors=latent_factors, @@ -757,22 +757,22 @@ def _get_factor_scores_data_for_multiple_periods( def _process_factors(model, factors): """Process factors to get a tuple of lists.""" if not factors: - latent_factors = list(model["labels"].latent_factors) - observed_factors = list(model["labels"].observed_factors) + latent_factors = list(model.labels.latent_factors) + observed_factors = list(model.labels.observed_factors) elif isinstance(factors, str): - if factors in model["labels"].latent_factors: + if factors in model.labels.latent_factors: latent_factors = [factors] observed_factors = [] - elif factors in model["labels"].observed_factors: + elif factors in model.labels.observed_factors: observed_factors = [factors] latent_factors = [] else: observed_factors = [] latent_factors = [] for factor in factors: - if factor in model["labels"].latent_factors: + if factor in model.labels.latent_factors: latent_factors.append(factor) - elif factor in model["labels"].observed_factors: + elif factor in model.labels.observed_factors: observed_factors.append(factor) return latent_factors, observed_factors # ty: ignore[possibly-unresolved-reference] @@ -780,7 +780,7 @@ def _process_factors(model, factors): def _process_periods(periods, model): """Process periods to get a list.""" if periods is None: - periods = list(range(model["dimensions"].n_periods)) + periods = list(range(model.dimensions.n_periods)) elif isinstance(periods, int | float): periods = [periods] return periods diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index a1c222a9..016003d3 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -26,7 +26,7 @@ def get_filtered_states(model_dict, data, params): anchored_ranges = create_state_ranges( filtered_states=anchored_states_df, - factors=model["labels"].latent_factors, + factors=model.labels.latent_factors, ) out = { @@ -58,38 +58,38 @@ def anchor_states_df(states_df, model_dict, params, use_aug_period): model = process_model(model_dict) p_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=model.update_info, + labels=model.labels, + dimensions=model.dimensions, + transition_info=model.transition_info, + endogenous_factors_info=model.endogenous_factors_info, ) params = params.loc[p_index] parsing_info = create_parsing_info( params_index=p_index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, + update_info=model.update_info, + labels=model.labels, + anchoring=model.anchoring, + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) *_, pardict = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model["dimensions"], - labels=model["labels"], + dimensions=model.dimensions, + labels=model.labels, n_obs=1, ) - n_latent = model["dimensions"].n_latent_factors + n_latent = model.dimensions.n_latent_factors _scaling_factors = np.array(pardict["anchoring_scaling_factors"][:, :n_latent]) _constants = np.array(pardict["anchoring_constants"][:, :n_latent]) if use_aug_period: period_arr = states_df["aug_period"].to_numpy() - ap_to_p = model["labels"].aug_periods_to_periods + ap_to_p = model.labels.aug_periods_to_periods scaling_factors = np.empty(shape=(len(ap_to_p), n_latent)) constants = np.empty(shape=(len(ap_to_p), n_latent)) for ap, p in ap_to_p.items(): @@ -104,7 +104,7 @@ def anchor_states_df(states_df, model_dict, params, use_aug_period): constants_arr = constants[period_arr] out = states_df.copy(deep=True) - for pos, factor in enumerate(model["labels"].latent_factors): + for pos, factor in enumerate(model.labels.latent_factors): out[factor] = constants_arr[:, pos] + states_df[factor] * scaling_arr[:, pos] out = out[states_df.columns] diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 1f75dafb..c50be7b2 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -58,32 +58,32 @@ def get_maximization_inputs(model_dict, data, split_dataset=1): """ model = process_model(model_dict) p_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=model.update_info, + labels=model.labels, + dimensions=model.dimensions, + transition_info=model.transition_info, + endogenous_factors_info=model.endogenous_factors_info, ) parsing_info = create_parsing_info( params_index=p_index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, + update_info=model.update_info, + labels=model.labels, + anchoring=model.anchoring, + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) processed_data = process_data( df=data, - has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, - labels=model["labels"], - update_info=model["update_info"], - anchoring_info=model["anchoring"], + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + labels=model.labels, + update_info=model.update_info, + anchoring_info=model.anchoring, purpose="estimation", ) sigma_scaling_factor, sigma_weights = calculate_sigma_scaling_factor_and_weights( - model["dimensions"].n_latent_factors, - model["estimation_options"].sigma_points_scale, + model.dimensions.n_latent_factors, + model.estimation_options.sigma_points_scale, ) partialed_get_jnp_params_vec = functools.partial( @@ -154,12 +154,12 @@ def debug_loglike(params): return process_debug_data(debug_data=tmp, model=model) _constraints_dicts = get_constraints_dicts( - dimensions=model["dimensions"], - labels=model["labels"], - anchoring_info=model["anchoring"], - update_info=model["update_info"], - normalizations=model["normalizations"], - endogenous_factors_info=model["endogenous_factors_info"], + dimensions=model.dimensions, + labels=model.labels, + anchoring_info=model.anchoring, + update_info=model.update_info, + normalizations=model.normalizations, + endogenous_factors_info=model.endogenous_factors_info, ) constraints = constraints_dicts_to_om(_constraints_dicts) @@ -167,7 +167,7 @@ def debug_loglike(params): params_template = pd.DataFrame(columns=["value"], index=p_index) params_template = add_bounds( params=params_template, - bounds_distance=model["estimation_options"].bounds_distance, + bounds_distance=model.estimation_options.bounds_distance, ) params_template = enforce_fixed_constraints( params_template=params_template, @@ -198,7 +198,7 @@ def _partial_some_log_likelihood( sigma_weights, sigma_scaling_factor, ): - update_info = model["update_info"] + update_info = model.update_info is_measurement_iteration = (update_info["purpose"] == "measurement").to_numpy() _aug_periods = pd.Series( update_info.index.get_level_values("aug_period").to_numpy() @@ -211,9 +211,9 @@ def _partial_some_log_likelihood( # are endogenous factors, the last aug_period is found at index -2 (there should not # be measurements for endogenous factors in the "second half" of the last period). last_aug_period = ( - model["labels"].aug_periods[-2] + model.labels.aug_periods[-2] if parsing_info["has_endogenous_factors"] - else model["labels"].aug_periods[-1] + else model.labels.aug_periods[-1] ) iteration_to_period = _aug_periods.replace(last_aug_period, -1).to_numpy() assert max(iteration_to_period) == last_aug_period - 1 @@ -223,12 +223,12 @@ def _partial_some_log_likelihood( parsing_info=parsing_info, measurements=measurements, controls=controls, - transition_func=model["transition_info"].func, + transition_func=model.transition_info.func, sigma_scaling_factor=sigma_scaling_factor, sigma_weights=sigma_weights, - dimensions=model["dimensions"], - labels=model["labels"], - estimation_options=model["estimation_options"], + dimensions=model.dimensions, + labels=model.labels, + estimation_options=model.estimation_options, is_measurement_iteration=is_measurement_iteration, is_predict_iteration=is_predict_iteration, iteration_to_period=iteration_to_period, diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index 1a035bc6..00cacacb 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -51,8 +51,8 @@ def process_debug_data(debug_data, model): identify the Kalman Update to which the likelihood contribution corresponds. """ - update_info = model["update_info"] - factors = model["labels"].latent_factors + update_info = model.update_info + factors = model.labels.latent_factors post_update_states = _create_post_update_states( debug_data["filtered_states"], diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 8ebf7fae..aa718818 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -20,6 +20,7 @@ EstimationOptions, FactorEndogenousInfo, Labels, + ProcessedModel, TransitionInfo, ) @@ -122,17 +123,16 @@ def process_model(model_dict): transition_names=tuple(transition_info.function_names.values()), ) - processed = { - "dimensions": dims, - "labels": labels, - "anchoring": anchoring, - "estimation_options": _process_estimation_options(_model_dict_aug), - "transition_info": transition_info, - "update_info": _get_update_info(_model_dict_aug, dims, labels, anchoring), - "normalizations": _process_normalizations(_model_dict_aug, dims, labels), - "endogenous_factors_info": endogenous_factors_info, - } - return processed + return ProcessedModel( + dimensions=dims, + labels=labels, + anchoring=anchoring, + estimation_options=_process_estimation_options(_model_dict_aug), + transition_info=transition_info, + update_info=_get_update_info(_model_dict_aug, dims, labels, anchoring), + normalizations=_process_normalizations(_model_dict_aug, dims, labels), + endogenous_factors_info=endogenous_factors_info, + ) def get_has_endogenous_factors(factors: dict[str, Any]) -> bool: diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 4d2d808f..b3b0fc07 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -41,23 +41,21 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): model = process_model(model_dict) - if model["labels"].observed_factors and data is None: + if model.labels.observed_factors and data is None: raise ValueError( "To simulate a model with observed factors, data cannot be None.", ) - if model["labels"].controls != ["constant"] and data is None: + if model.labels.controls != ["constant"] and data is None: raise ValueError("To simulate a model with controls, data cannot be None.") if data is not None: processed_data = process_data( df=data, - has_endogenous_factors=model[ - "endogenous_factors_info" - ].has_endogenous_factors, - labels=model["labels"], - update_info=model["update_info"], - anchoring_info=model["anchoring"], + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + labels=model.labels, + update_info=model.update_info, + anchoring_info=model.anchoring, purpose="simulation", ) control_data = processed_data["controls"] @@ -73,32 +71,32 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): else: control_data = jnp.ones((n_obs, 1)) - n_periods = model["dimensions"].n_periods + n_periods = model.dimensions.n_periods observed_factors = jnp.zeros((n_periods, n_obs, 0)) params_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=model.update_info, + labels=model.labels, + dimensions=model.dimensions, + transition_info=model.transition_info, + endogenous_factors_info=model.endogenous_factors_info, ) params = params.reindex(params_index) parsing_info = create_parsing_info( params_index=params.index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, + update_info=model.update_info, + labels=model.labels, + anchoring=model.anchoring, + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) states, covs, log_weights, pardict = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model["dimensions"], - labels=model["labels"], + dimensions=model.dimensions, + labels=model.labels, n_obs=n_obs, ) @@ -107,23 +105,23 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): covs=covs, log_weights=log_weights, pardict=pardict, - labels=model["labels"], - dimensions=model["dimensions"], + labels=model.labels, + dimensions=model.dimensions, n_obs=n_obs, - has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, - update_info=model["update_info"], + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + update_info=model.update_info, control_data=control_data, observed_factors=observed_factors, policies=policies, - transition_info=model["transition_info"], + transition_info=model.transition_info, ) # Create collapsed versions with user-facing periods latent_data = _collapse_aug_periods_to_periods( df=aug_latent_data, - factors=model["labels"].latent_factors, - aug_periods_to_periods=model["labels"].aug_periods_to_periods, - endogenous_factors_info=model["endogenous_factors_info"], + factors=model.labels.latent_factors, + aug_periods_to_periods=model.labels.aug_periods_to_periods, + endogenous_factors_info=model.endogenous_factors_info, ) # Anchor the collapsed version (anchoring only works with period, not aug_period) @@ -139,21 +137,21 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): "states": latent_data, "state_ranges": create_state_ranges( latent_data, - model["labels"].latent_factors, + model.labels.latent_factors, ), }, "anchored_states": { "states": anchored_latent_data, "state_ranges": create_state_ranges( anchored_latent_data, - model["labels"].latent_factors, + model.labels.latent_factors, ), }, "aug_unanchored_states": { "states": aug_latent_data, "state_ranges": create_state_ranges( aug_latent_data, - model["labels"].latent_factors, + model.labels.latent_factors, ), }, "aug_measurements": aug_measurements, diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index 1e41a0ab..bbb84a4f 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -344,11 +344,11 @@ def _extend_params(params, model_dict, fill_value): def _get_params_index_from_model_dict(model_dict): mod = process_model(model_dict) index = get_params_index( - update_info=mod["update_info"], - labels=mod["labels"], - dimensions=mod["dimensions"], - transition_info=mod["transition_info"], - endogenous_factors_info=mod["endogenous_factors_info"], + update_info=mod.update_info, + labels=mod.labels, + dimensions=mod.dimensions, + transition_info=mod.transition_info, + endogenous_factors_info=mod.endogenous_factors_info, ) return index diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index a56aa82c..76a6660d 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -221,7 +221,7 @@ def univariate_densities( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"].aug_periods_to_periods, + aug_periods_to_periods=model.labels.aug_periods_to_periods, observed_states=observed_states, ) scenarios = df["scenario"].unique() @@ -332,7 +332,7 @@ def bivariate_density_contours( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"].aug_periods_to_periods, + aug_periods_to_periods=model.labels.aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} @@ -457,7 +457,7 @@ def bivariate_density_surfaces( states=states, period=period, factors=factors, - aug_periods_to_periods=model["labels"].aug_periods_to_periods, + aug_periods_to_periods=model.labels.aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} @@ -666,9 +666,9 @@ def _get_factors(factors, observed_factors, model): """Proccess factor names to return list of strings.""" if factors is None: if observed_factors: - factors = model["labels"].all_factors + factors = model.labels.all_factors else: - factors = model["labels"].latent_factors + factors = model.labels.latent_factors return factors diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 0ec8c019..00d0e237 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -178,23 +178,23 @@ def get_transition_plots( model = process_model(model_dict) - if period >= model["labels"].periods[-1]: + if period >= model.labels.periods[-1]: raise ValueError( "*period* must be the penultimate period of the model or earlier.", ) if ( include_correction_factors - or not model["endogenous_factors_info"].has_endogenous_factors + or not model.endogenous_factors_info.has_endogenous_factors ): - latent_factors = model["labels"].latent_factors + latent_factors = model.labels.latent_factors else: latent_factors = [ lf - for lf in model["labels"].latent_factors - if not model["endogenous_factors_info"].factor_info[lf].is_correction + for lf in model.labels.latent_factors + if not model.endogenous_factors_info.factor_info[lf].is_correction ] - all_factors = model["labels"].all_factors + all_factors = model.labels.all_factors states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ "anchored_states" ]["states"] @@ -270,7 +270,7 @@ def _get_dictionary_with_plots( for each input and output factors. """ - observed_factors = model["labels"].observed_factors + observed_factors = model.labels.observed_factors states_data = _get_states_data(model, period, data, states, observed_factors) params = _set_index_params(model, params) pardict = _get_pardict(model, params) @@ -281,21 +281,17 @@ def _get_dictionary_with_plots( title_kwargs=None, showlegend=showlegend, ) - has_endogenous_factors = model["endogenous_factors_info"].has_endogenous_factors + has_endogenous_factors = model.endogenous_factors_info.has_endogenous_factors if has_endogenous_factors: - _aug_periods = model["endogenous_factors_info"].aug_periods_from_period(period) + _aug_periods = model.endogenous_factors_info.aug_periods_from_period(period) else: _aug_periods = [period] plots_dict = {} for output_factor, input_factor in itertools.product(latent_factors, all_factors): - transition_function = model["transition_info"].individual_functions[ - output_factor - ] + transition_function = model.transition_info.individual_functions[output_factor] if ( has_endogenous_factors - and model["endogenous_factors_info"] - .factor_info[output_factor] - .is_endogenous + and model.endogenous_factors_info.factor_info[output_factor].is_endogenous ): aug_period = min(_aug_periods) else: @@ -365,17 +361,17 @@ def _get_pardict(model, params): """Get parsed params dictionary.""" parsing_info = create_parsing_info( params_index=params.index, - update_info=model["update_info"], - labels=model["labels"], - anchoring=model["anchoring"], - has_endogenous_factors=model["endogenous_factors_info"].has_endogenous_factors, + update_info=model.update_info, + labels=model.labels, + anchoring=model.anchoring, + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) _, _, _, pardict = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model["dimensions"], - labels=model["labels"], + dimensions=model.dimensions, + labels=model.labels, n_obs=1, ) return pardict @@ -384,11 +380,11 @@ def _get_pardict(model, params): def _set_index_params(model, params): """Reset index of params data frame to model implied values.""" params_index = get_params_index( - update_info=model["update_info"], - labels=model["labels"], - dimensions=model["dimensions"], - transition_info=model["transition_info"], - endogenous_factors_info=model["endogenous_factors_info"], + update_info=model.update_info, + labels=model.labels, + dimensions=model.dimensions, + transition_info=model.transition_info, + endogenous_factors_info=model.endogenous_factors_info, ) params = params.reindex(params_index) @@ -405,19 +401,17 @@ def _get_states_data(model, period, data, states, observed_factors): if observed_factors: _observed_arr = process_data( df=data, - has_endogenous_factors=model[ - "endogenous_factors_info" - ].has_endogenous_factors, - labels=model["labels"], - update_info=model["update_info"], - anchoring_info=model["anchoring"], + has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + labels=model.labels, + update_info=model.update_info, + anchoring_info=model.anchoring, )["observed_factors"] # convert from jax to numpy _observed_arr = np.array(_observed_arr) - if model["endogenous_factors_info"].has_endogenous_factors: + if model.endogenous_factors_info.has_endogenous_factors: both_aug_periods = [ aug_p - for aug_p, p in model["labels"].aug_periods_to_periods.items() + for aug_p, p in model.labels.aug_periods_to_periods.items() if p == period ] to_concat = [] diff --git a/tests/test_constraints.py b/tests/test_constraints.py index faaddfa8..88e8498f 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -422,8 +422,8 @@ def simplest_augmented_model(): def test_get_constraints_for_augmented_periods(simplest_augmented_model): calculated = _get_constraints_for_augmented_periods( - labels=simplest_augmented_model["labels"], - endogenous_factors_info=simplest_augmented_model["endogenous_factors_info"], + labels=simplest_augmented_model.labels, + endogenous_factors_info=simplest_augmented_model.endogenous_factors_info, ) for c in calculated: del c["description"] diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index d701ac70..66e76701 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -1,3 +1,5 @@ +from types import SimpleNamespace + import numpy as np import pandas as pd from frozendict import frozendict @@ -244,8 +246,8 @@ def test_get_factor_scores_data_with_multiple_period(): def test_process_factors(): - model = { - "labels": Labels( + model = SimpleNamespace( + labels=Labels( latent_factors=tuple("abcd"), observed_factors=tuple("efg"), controls=("constant",), @@ -258,7 +260,7 @@ def test_process_factors(): aug_stages=(0,), aug_stages_to_stages=frozendict({0: 0}), ), - } + ) latent_factor = "c" observed_factor = "g" factors = ["b", "d", "g"] diff --git a/tests/test_params_index.py b/tests/test_params_index.py index 5817e762..c712f5bc 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -28,11 +28,11 @@ def model2_inputs(): processed = process_model(model_dict) out = { - "update_info": processed["update_info"], - "labels": processed["labels"], - "dimensions": processed["dimensions"], - "transition_info": processed["transition_info"], - "endogenous_factors_info": processed["endogenous_factors_info"], + "update_info": processed.update_info, + "labels": processed.labels, + "dimensions": processed.dimensions, + "transition_info": processed.transition_info, + "endogenous_factors_info": processed.endogenous_factors_info, } return out diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index 461e6112..5d50e591 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -33,9 +33,9 @@ def parsed_parameters(): processed = process_model(model_dict) - update_info = processed["update_info"] - labels = processed["labels"] - dimensions = processed["dimensions"] + update_info = processed.update_info + labels = processed.labels + dimensions = processed.dimensions # this overwrites the anchoring setting from the model specification to get a # more meaningful test anchoring = Anchoring( diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 9843715d..5cfac889 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -66,13 +66,13 @@ def simplest_augmented(): def test_augment_data_for_endogenous_factors(simplest_augmented): model = process_model(simplest_augmented["model_dict"]) pre_processed_data = pre_process_data( - simplest_augmented["data_input"], model["labels"].periods + simplest_augmented["data_input"], model.labels.periods ) pre_processed_data["constant"] = 1 res = _augment_data_for_endogenous_factors( df=pre_processed_data, - labels=model["labels"], - update_info=model["update_info"], + labels=model.labels, + update_info=model.update_info, ) cols = ["var", "inv", "constant", "of"] pd.testing.assert_frame_equal(res[cols], simplest_augmented["data_exp"][cols]) diff --git a/tests/test_process_model.py b/tests/test_process_model.py index 1dcbe056..be67a17b 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -25,13 +25,11 @@ def model2(): def test_has_endogenous_factors(model2): - assert ( - process_model(model2)["endogenous_factors_info"].has_endogenous_factors == False - ) + assert process_model(model2).endogenous_factors_info.has_endogenous_factors == False def test_dimensions(model2): - res = process_model(model2)["dimensions"] + res = process_model(model2).dimensions assert res.n_latent_factors == 3 assert res.n_observed_factors == 0 assert res.n_all_factors == 3 @@ -41,7 +39,7 @@ def test_dimensions(model2): def test_labels(model2): - res = process_model(model2)["labels"] + res = process_model(model2).labels assert res.latent_factors == ("fac1", "fac2", "fac3") assert res.observed_factors == () assert res.all_factors == ("fac1", "fac2", "fac3") @@ -52,14 +50,14 @@ def test_labels(model2): def test_estimation_options(model2): - res = process_model(model2)["estimation_options"] + res = process_model(model2).estimation_options assert res.sigma_points_scale == 2 assert res.robust_bounds assert res.bounds_distance == 0.001 def test_anchoring(model2): - res = process_model(model2)["anchoring"] + res = process_model(model2).anchoring assert res.outcomes == {"fac1": "Q1"} assert res.factors == ("fac1",) assert res.free_controls @@ -68,7 +66,7 @@ def test_anchoring(model2): def test_transition_info(model2): - res = process_model(model2)["transition_info"] + res = process_model(model2).transition_info assert isinstance(res, TransitionInfo) assert callable(res.func) @@ -77,7 +75,7 @@ def test_transition_info(model2): def test_update_info(model2): - res = process_model(model2)["update_info"] + res = process_model(model2).update_info test_dir = Path(__file__).parent.resolve() expected = pd.read_csv( test_dir / "model2_correct_update_info.csv", @@ -119,7 +117,7 @@ def test_normalizations(model2): "intercepts": [{}, {}, {}, {}, {}, {}, {}, {}], }, } - res = process_model(model2)["normalizations"] + res = process_model(model2).normalizations assert res == expected @@ -138,17 +136,15 @@ def test_anchoring_and_endogenous_factors_work_together(): # Should not raise - anchoring and endogenous factors now work together result = process_model(model_dict) # Verify anchoring is enabled - assert result["anchoring"].anchoring - assert result["anchoring"].factors == ("fac1",) + assert result.anchoring.anchoring + assert result.anchoring.factors == ("fac1",) # Verify endogenous factors are enabled - assert result["endogenous_factors_info"].has_endogenous_factors + assert result.endogenous_factors_info.has_endogenous_factors # Verify dimensions - assert result["dimensions"].n_periods == 8 - assert result["dimensions"].n_aug_periods == 16 + assert result.dimensions.n_periods == 8 + assert result.dimensions.n_aug_periods == 16 # Verify update_info has anchoring entries for all aug_periods - anchoring_updates = result["update_info"][ - result["update_info"]["purpose"] == "anchoring" - ] + anchoring_updates = result.update_info[result.update_info["purpose"] == "anchoring"] assert ( len(anchoring_updates) == 16 ) # One per aug_period for the one anchored factor @@ -173,9 +169,9 @@ def test_stagemap_with_endogenous_factors(): model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 3] del model_dict["anchoring"] model = process_model(model_dict) - assert model["labels"].stagemap == tuple(model_dict["stagemap"]) - assert model["labels"].stages == (0, 1, 2, 3) - assert model["labels"].aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) + assert model.labels.stagemap == tuple(model_dict["stagemap"]) + assert model.labels.stages == (0, 1, 2, 3) + assert model.labels.aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) @pytest.fixture @@ -191,13 +187,12 @@ def model2_inv(): def test_with_endog_has_endogenous_factors(model2_inv): assert ( - process_model(model2_inv)["endogenous_factors_info"].has_endogenous_factors - == True + process_model(model2_inv).endogenous_factors_info.has_endogenous_factors == True ) def test_with_endog_dimensions(model2_inv): - res = process_model(model2_inv)["dimensions"] + res = process_model(model2_inv).dimensions assert res.n_latent_factors == 3 assert res.n_observed_factors == 0 assert res.n_all_factors == 3 @@ -208,7 +203,7 @@ def test_with_endog_dimensions(model2_inv): def test_with_endog_labels(model2_inv): - res = process_model(model2_inv)["labels"] + res = process_model(model2_inv).labels n_aug_periods = 16 assert res.latent_factors == ("fac1", "fac2", "fac3") assert res.observed_factors == () @@ -221,14 +216,14 @@ def test_with_endog_labels(model2_inv): def test_with_endog_estimation_options(model2_inv): - res = process_model(model2_inv)["estimation_options"] + res = process_model(model2_inv).estimation_options assert res.sigma_points_scale == 2 assert res.robust_bounds assert res.bounds_distance == 0.001 def test_with_endog_anchoring_is_empty(model2_inv): - res = process_model(model2_inv)["anchoring"] + res = process_model(model2_inv).anchoring assert res.outcomes == {} assert res.factors == () assert res.free_controls is False @@ -237,7 +232,7 @@ def test_with_endog_anchoring_is_empty(model2_inv): def test_with_endog_transition_info(model2_inv): - res = process_model(model2_inv)["transition_info"] + res = process_model(model2_inv).transition_info assert isinstance(res, TransitionInfo) assert callable(res.func) @@ -246,7 +241,7 @@ def test_with_endog_transition_info(model2_inv): def test_with_endog_update_info(model2_inv): - res = process_model(model2_inv)["update_info"] + res = process_model(model2_inv).update_info test_dir = Path(__file__).parent.resolve() expected = pd.read_csv( test_dir / "model2_with_endog_correct_update_info.csv", @@ -372,7 +367,7 @@ def test_with_endog_normalizations(model2_inv): ], }, } - res = process_model(model2_inv)["normalizations"] + res = process_model(model2_inv).normalizations assert res == expected From 264016cffad1d7c8b3c91e201a9bec1f7af924a3 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Fri, 9 Jan 2026 05:59:58 +0100 Subject: [PATCH 05/27] Rename FactorEndogenousInfo to FactorInfo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- src/skillmodels/process_model.py | 6 +++--- src/skillmodels/types.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index aa718818..2f8cd9d8 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -18,7 +18,7 @@ Dimensions, EndogenousFactorsInfo, EstimationOptions, - FactorEndogenousInfo, + FactorInfo, Labels, ProcessedModel, TransitionInfo, @@ -92,7 +92,7 @@ def process_model(model_dict): ), factor_info=frozendict( { - fac: FactorEndogenousInfo( + fac: FactorInfo( is_state=True, is_endogenous=False, is_correction=False ) for fac in labels.latent_factors @@ -466,7 +466,7 @@ def _get_endogenous_factors_info( """Collect information about endogenous factors.""" factor_info = {} for fac, v in model_dict["factors"].items(): - factor_info[fac] = FactorEndogenousInfo( + factor_info[fac] = FactorInfo( is_state=( not v.get("is_endogenous", False) and not v.get("is_correction", False) ), diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py index 71dcd52b..6296603f 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/types.py @@ -92,7 +92,7 @@ class TransitionInfo: @dataclass(frozen=True) -class FactorEndogenousInfo: +class FactorInfo: """Endogeneity information for a single factor.""" is_state: bool @@ -110,7 +110,7 @@ class EndogenousFactorsInfo: ] bounds_distance: float aug_periods_from_period: Callable[[int], list[int]] - factor_info: frozendict[str, FactorEndogenousInfo] + factor_info: frozendict[str, FactorInfo] @dataclass(frozen=True) From 3bb334b109e36828ba5dc59ff3e1b7fc2ca0cd5f Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Fri, 9 Jan 2026 09:32:38 +0100 Subject: [PATCH 06/27] Ensure complete typing in src/skillmodels. --- pixi.lock | 4 +- pyproject.toml | 17 +- src/skillmodels/check_model.py | 40 ++- src/skillmodels/clipping.py | 9 +- src/skillmodels/constraints.py | 54 ++-- src/skillmodels/correlation_heatmap.py | 295 ++++++++++-------- src/skillmodels/decorators.py | 32 +- src/skillmodels/filtered_states.py | 16 +- src/skillmodels/kalman_filters.py | 67 ++-- src/skillmodels/kalman_filters_debug.py | 21 +- src/skillmodels/likelihood_function.py | 100 +++--- src/skillmodels/likelihood_function_debug.py | 72 +++-- src/skillmodels/maximization_inputs.py | 43 ++- src/skillmodels/params_index.py | 91 +++--- src/skillmodels/parse_params.py | 89 +++++- src/skillmodels/process_data.py | 72 +++-- src/skillmodels/process_debug_data.py | 64 +++- src/skillmodels/process_model.py | 17 +- src/skillmodels/qr.py | 16 +- src/skillmodels/simulate_data.py | 91 ++++-- src/skillmodels/transition_functions.py | 69 ++-- src/skillmodels/types.py | 2 +- src/skillmodels/utilities.py | 88 ++++-- src/skillmodels/utils_plotting.py | 30 +- .../visualize_factor_distributions.py | 224 +++++++------ .../visualize_transition_equations.py | 208 +++++++----- 26 files changed, 1155 insertions(+), 676 deletions(-) diff --git a/pixi.lock b/pixi.lock index e3c658cd..a2726f81 100644 --- a/pixi.lock +++ b/pixi.lock @@ -8586,8 +8586,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev238+g71e9d59be.d20260108 - sha256: f68c7eee242cae980939d7c3cffd40f1f449e1da7b739cbf8f1918dfe55e7fff + version: 0.0.24.dev243+gc7ed219cb + sha256: 2a9a1ae4e4a106a7b7def4db3aa61be8cbf11a44695ee638f8252253cde97ee3 requires_dist: - dags - frozendict diff --git a/pyproject.toml b/pyproject.toml index b055a3d0..3c08e5dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -174,20 +174,8 @@ line-length = 88 [tool.ruff.lint] select = ["ALL"] extend-ignore = [ - # missing type annotation - "ANN001", - - # missing type annotation for `*args` - "ANN002", - - # missing type annotation for `**kwargs` - "ANN003", - - # missing return type annotation for public function - "ANN201", - - # missing return type annotation for private function - "ANN202", + # Dynamically typed expressions (typing.Any) are disallowed - too strict + "ANN401", # No explicit `stacklevel` keyword argument found "B028", @@ -271,6 +259,7 @@ extend-ignore = [ "**/*.ipynb" = ["B018", "T201", "E402", "PLR2004", "INP001", "PTH100"] "docs/**/*" = ["A001", "ERA001", "INP001", "PTH100", "PTH123", "S506"] "tests/*" = [ + "ANN", # Tests don't need type annotations "ARG001", "E712", "FBT003", diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index 5919ef02..f858bcf4 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -52,8 +52,13 @@ def check_model( raise ValueError(f"Invalid model specification: {report}") -def check_stagemap(stagemap, stages, n_periods, is_augmented): - report = [] +def check_stagemap( + stagemap: tuple[int, ...], + stages: tuple[int, ...] | list[int], + n_periods: int, + is_augmented: bool, +) -> list[str]: + report: list[str] = [] step_size = 2 if is_augmented else 1 if len(stagemap) != n_periods - step_size: report.append( @@ -95,8 +100,11 @@ def _check_anchoring(anchoring: Anchoring) -> list[str]: return report -def _check_measurements(model_dict, factors): - report = [] +def _check_measurements( + model_dict: dict, + factors: tuple[str, ...], +) -> list[str]: + report: list[str] = [] for factor in factors: candidate = model_dict["factors"][factor]["measurements"] if not _is_list_of(candidate, list): @@ -134,8 +142,11 @@ def _check_no_overlap_in_measurements_of_states_and_inv( return report -def _check_normalizations(model_dict, factors): - report = [] +def _check_normalizations( + model_dict: dict, + factors: tuple[str, ...], +) -> list[str]: + report: list[str] = [] for factor in factors: norminfo = model_dict["factors"][factor].get("normalizations", {}) for norm_type in ["loadings", "intercepts"]: @@ -160,8 +171,12 @@ def _check_normalizations(model_dict, factors): return report -def _check_normalized_variables_are_present(list_of_normdicts, model_dict, factor): - report = [] +def _check_normalized_variables_are_present( + list_of_normdicts: list[dict], + model_dict: dict, + factor: str, +) -> list[str]: + report: list[str] = [] for period, norm_dict in enumerate(list_of_normdicts): for var in norm_dict: if var not in model_dict["factors"][factor]["measurements"][period]: @@ -174,8 +189,11 @@ def _check_normalized_variables_are_present(list_of_normdicts, model_dict, facto return report -def _check_loadings_are_not_normalized_to_zero(list_of_normdicts, factor): - report = [] +def _check_loadings_are_not_normalized_to_zero( + list_of_normdicts: list[dict], + factor: str, +) -> list[str]: + report: list[str] = [] for period, norm_dict in enumerate(list_of_normdicts): for var, val in norm_dict.items(): if val == 0: @@ -186,7 +204,7 @@ def _check_loadings_are_not_normalized_to_zero(list_of_normdicts, factor): return report -def _is_list_of(candidate, type_): +def _is_list_of(candidate: object, type_: type) -> bool: """Check if candidate is a list that only contains elements of type. Note that this is always falls if candidate is not a list and always true if diff --git a/src/skillmodels/clipping.py b/src/skillmodels/clipping.py index 6c327997..07b59661 100644 --- a/src/skillmodels/clipping.py +++ b/src/skillmodels/clipping.py @@ -1,8 +1,15 @@ import jax import jax.numpy as jnp +from jax import Array -def soft_clipping(arr, lower=None, upper=None, lower_hardness=1, upper_hardness=1): +def soft_clipping( + arr: Array, + lower: float | None = None, + upper: float | None = None, + lower_hardness: float = 1, + upper_hardness: float = 1, +) -> Array: """Clip values in an array elementwise using a soft maximum to avoid kinks. Clipping from below is taking a maximum between two values. Clipping diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index 78360ad2..fe3adbc3 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -3,7 +3,7 @@ import functools import warnings from dataclasses import dataclass -from typing import Any +from typing import TYPE_CHECKING, Any import numpy as np import optimagic as om @@ -11,14 +11,17 @@ import skillmodels.transition_functions as t_f_module +if TYPE_CHECKING: + from skillmodels.types import Anchoring, Dimensions, EndogenousFactorsInfo, Labels + def get_constraints_dicts( - dimensions, - labels, - anchoring_info, - update_info, - normalizations, - endogenous_factors_info, + dimensions: "Dimensions", + labels: "Labels", + anchoring_info: "Anchoring", + update_info: pd.DataFrame, + normalizations: dict[str, dict[str, list]], + endogenous_factors_info: "EndogenousFactorsInfo", ) -> list[dict]: """Generate constraints implied by the model specification. @@ -116,7 +119,7 @@ def add_bounds(params: pd.DataFrame, bounds_distance: float) -> pd.DataFrame: return df -def _is_diagonal_entry(ind_tup): +def _is_diagonal_entry(ind_tup: tuple[str, ...]) -> bool: name2 = ind_tup[-1] middle_pos = int(len(name2) // 2) if ( @@ -130,7 +133,10 @@ def _is_diagonal_entry(ind_tup): return is_diag -def _get_normalization_constraints(normalizations, factors) -> list[dict]: +def _get_normalization_constraints( + normalizations: dict[str, dict[str, list]], + factors: tuple[str, ...], +) -> list[dict]: """List of constraints to enforce normalizations. Args: @@ -171,7 +177,7 @@ def _get_normalization_constraints(normalizations, factors) -> list[dict]: return constraints_dicts -def _get_mixture_weights_constraints(n_mixtures) -> list[dict]: +def _get_mixture_weights_constraints(n_mixtures: int) -> list[dict]: """Constrain mixture weights to be between 0 and 1 and sum to 1.""" if n_mixtures == 1: msg = "Set the mixture weight to 1 if there is only one mixture element." @@ -191,7 +197,10 @@ def _get_mixture_weights_constraints(n_mixtures) -> list[dict]: return constraints_dicts -def _get_stage_constraints(stagemap, stages) -> list[dict]: +def _get_stage_constraints( + stagemap: tuple[int, ...], + stages: tuple[int, ...], +) -> list[dict]: """Equality constraints for transition and shock parameters within stages. Args: @@ -232,7 +241,7 @@ def _get_stage_constraints(stagemap, stages) -> list[dict]: return constraints_dicts -def _get_constant_factors_constraints(labels) -> list[dict]: +def _get_constant_factors_constraints(labels: "Labels") -> list[dict]: """Fix shock variances of constant factors to `bounds_distance`. Args: @@ -259,7 +268,10 @@ def _get_constant_factors_constraints(labels) -> list[dict]: return constraints_dicts -def _get_initial_states_constraints(n_mixtures, factors) -> list[dict]: +def _get_initial_states_constraints( + n_mixtures: int, + factors: tuple[str, ...], +) -> list[dict]: """Enforce that the x values of the first factor are increasing. Otherwise the model would only be identified up to the order of the start factors. @@ -290,7 +302,7 @@ def _get_initial_states_constraints(n_mixtures, factors) -> list[dict]: return constraints_dicts -def _get_transition_constraints(labels) -> list[dict]: +def _get_transition_constraints(labels: "Labels") -> list[dict]: """Collect possible constraints on transition parameters. Args: @@ -318,7 +330,10 @@ def _get_transition_constraints(labels) -> list[dict]: def _get_anchoring_constraints( - update_info, controls, anchoring_info, periods + update_info: pd.DataFrame, + controls: tuple[str, ...], + anchoring_info: "Anchoring", + periods: tuple[int, ...], ) -> list[dict]: """Constraints on anchoring parameters. @@ -369,7 +384,7 @@ def _get_anchoring_constraints( ind_tups = [] for period in periods: for factor in anchoring_info.factors: - outcome = anchoring_info.outcomes[factor] + outcome = anchoring_info.outcomes[factor] # ty: ignore[invalid-argument-type] meas = f"{outcome}_{factor}" ind_tups.append(("loadings", period, meas, factor)) @@ -383,7 +398,8 @@ def _get_anchoring_constraints( def _get_constraints_for_augmented_periods( - labels, endogenous_factors_info + labels: "Labels", + endogenous_factors_info: "EndogenousFactorsInfo", ) -> list[dict]: """Constraints for augmented periods. @@ -410,7 +426,7 @@ def _get_constraints_for_augmented_periods( # look counterintuitive... aug_period_meas_type_to_constrain = ( "states" - if endogenous_factors_info.factor_info[factor].is_state + if endogenous_factors_info.factor_info[factor].is_state # ty: ignore[invalid-argument-type] else "endogenous_factors" ) aug_period_meas_types = ( @@ -441,7 +457,7 @@ def _get_constraints_for_augmented_periods( return constraints_dicts -def _sel(params, loc): +def _sel(params: pd.DataFrame, loc: Any) -> pd.DataFrame: return params.loc[loc] diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 6a8be0eb..2d6f0bbf 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -1,32 +1,36 @@ +from typing import Any + import numpy as np import pandas as pd +from numpy.typing import NDArray from plotly import graph_objects as go from skillmodels.process_data import pre_process_data from skillmodels.process_model import process_model +from skillmodels.types import ProcessedModel def plot_correlation_heatmap( - corr, - heatmap_kwargs=None, - layout_kwargs=None, - rounding=2, - zmax=None, - zmin=None, - zmid=None, - colorscale="RdBu_r", - show_color_bar=True, - show_diagonal=True, - show_upper_triangle=True, - trim_heatmap=False, - annotate=True, - annotation_fontsize=13, - annotation_text_color="black", - annotation_text_angle=0, - axes_tick_fontsize=(12, 12), - axes_tick_label_angle=(90, 0), - axes_tick_label_color=("black", "black"), -): + corr: pd.DataFrame, + heatmap_kwargs: dict[str, Any] | None = None, + layout_kwargs: dict[str, Any] | None = None, + rounding: int = 2, + zmax: float | None = None, + zmin: float | None = None, + zmid: float | None = None, + colorscale: str = "RdBu_r", + show_color_bar: bool = True, + show_diagonal: bool = True, + show_upper_triangle: bool = True, + trim_heatmap: bool = False, + annotate: bool = True, + annotation_fontsize: int = 13, + annotation_text_color: str = "black", + annotation_text_angle: float = 0, + axes_tick_fontsize: tuple[int, int] = (12, 12), + axes_tick_label_angle: tuple[float, float] = (90, 0), + axes_tick_label_color: tuple[str, str] = ("black", "black"), +) -> go.Figure: """Plot correlation heatmaps for factor measurements. Args: @@ -122,7 +126,12 @@ def plot_correlation_heatmap( return fig -def get_measurements_corr(data, model_dict, factors, periods): +def get_measurements_corr( + data: pd.DataFrame, + model_dict: dict, + factors: list[str] | str | None, + periods: float | list[int] | None, +) -> pd.DataFrame: """Get data frame with measurement correlations. Process data to retrieve measurements for each period and calculate correlations @@ -160,7 +169,12 @@ def get_measurements_corr(data, model_dict, factors, periods): return corr -def get_quasi_scores_corr(data, model_dict, factors, periods): +def get_quasi_scores_corr( + data: pd.DataFrame, + model_dict: dict, + factors: list[str] | str | None, + periods: float | list[int] | None, +) -> pd.DataFrame: """Get data frame with correlations of factor scores. Process data to retrieve measurements for each period, standardize measurements @@ -201,7 +215,13 @@ def get_quasi_scores_corr(data, model_dict, factors, periods): return corr -def get_scores_corr(data, params, model_dict, factors, periods): +def get_scores_corr( + data: pd.DataFrame, + params: pd.DataFrame, + model_dict: dict, + factors: list[str] | str | None, + periods: float | list[int] | None, +) -> pd.DataFrame: """Get data frame with correlations of factor scores. Process data to retrieve measurements for each period, standardize measurements @@ -243,12 +263,12 @@ def get_scores_corr(data, params, model_dict, factors, periods): def _process_corr_data_for_plotting( - corr, - rounding, - show_upper_triangle, - show_diagonal, - trim_heatmap, -): + corr: pd.DataFrame, + rounding: int, + show_upper_triangle: bool, + show_diagonal: bool, + trim_heatmap: bool, +) -> pd.DataFrame: """Apply mask and rounding to correlation DataFrame.""" mask = _get_mask(corr, show_upper_triangle, show_diagonal) corr = corr.where(mask).round(rounding) @@ -262,7 +282,11 @@ def _process_corr_data_for_plotting( return corr -def _get_mask(corr, show_upper_triangle, show_diagonal): +def _get_mask( + corr: pd.DataFrame, + show_upper_triangle: bool, + show_diagonal: bool, +) -> NDArray[np.bool_]: """Get array to mask the correlation DataFrame.""" mask = np.zeros_like(corr, dtype=bool) mask[np.tril_indices_from(mask, k=-1)] = True @@ -273,7 +297,7 @@ def _get_mask(corr, show_upper_triangle, show_diagonal): return mask -def _get_update_info_for_periods(model): +def _get_update_info_for_periods(model: ProcessedModel) -> pd.DataFrame: """Return update_info with user-provided periods instead of augmented periods.""" update_info = model.update_info.copy() @@ -281,7 +305,7 @@ def _get_update_info_for_periods(model): period_values = update_info.index.get_level_values("aug_period").map( model.labels.aug_periods_to_periods ) - update_info.index = update_info.index.set_codes(period_values, level="aug_period") + update_info.index = update_info.index.set_codes(period_values, level="aug_period") # ty: ignore[unresolved-attribute] update_info.index = update_info.index.set_names(["period", "variable"]) # Group by period and variable, apply OR logic for boolean columns @@ -293,8 +317,12 @@ def _get_update_info_for_periods(model): def _get_measurement_data( - data, update_info_by_period, periods, latent_factors, observed_factors -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get data frame with factor measurements in each period, in wide format. For each factor, retrieve the data on measurements in each period and stack @@ -336,12 +364,12 @@ def _get_measurement_data( def _get_measurement_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + period: int, + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Extract measurements of factors for the given period. Args: @@ -372,12 +400,12 @@ def _get_measurement_data_for_single_period( def _get_measurement_data_for_multiple_periods( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Extract measurements for factors for given periods. Args: @@ -413,12 +441,12 @@ def _get_measurement_data_for_multiple_periods( def _get_quasi_factor_scores_data( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get data frame with summary information on factor measurements in each period. In each period, standardize factor measurements to zero mean and unit standard @@ -463,12 +491,12 @@ def _get_quasi_factor_scores_data( def _get_quasi_factor_scores_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + period: int, + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get frame with summary scores on factor measurements in a given period. Args: @@ -507,12 +535,12 @@ def _get_quasi_factor_scores_data_for_single_period( def _get_quasi_factor_scores_data_for_multiple_periods( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + update_info_by_period: pd.DataFrame, + periods: list[int], + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get frame with summary scores of factor measurements in a given period. Args: @@ -548,13 +576,13 @@ def _get_quasi_factor_scores_data_for_multiple_periods( def _get_factor_scores_data( - data, - params, - model, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + model: ProcessedModel, + periods: list[int], + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get data frame with factor scores in each period. In each period, standardize factor measurements to with estimated intercepts and @@ -601,13 +629,13 @@ def _get_factor_scores_data( def _get_factor_scores_data_for_single_period( - data, - params, - model, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + model: ProcessedModel, + period: int, + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get frame with factor scores in a given period. Careful: When we have endogenous factors, *period* refers to the raw period, but the @@ -654,14 +682,14 @@ def _get_factor_scores_data_for_single_period( def _get_factor_scores_data_for_single_model_period( - data, - params, - update_info, - aug_period, - period, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + update_info: pd.DataFrame, + aug_period: int, + period: int, + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get frame with factor scores in a given model period. In this function, all calculations are at the augmented period level. @@ -690,7 +718,7 @@ def _get_factor_scores_data_for_single_model_period( params.loc["controls"].query("name2 == 'constant'").droplevel("name2")["value"] ) loadings_count = loadings.astype(bool).groupby("name1").sum() - leave_out_meas = loadings_count[loadings_count > 1].index.to_list() + leave_out_meas = loadings_count[loadings_count > 1].index.to_list() # ty: ignore[unsupported-operator] to_concat = [] for factor in latent_factors: period_factor_measurements = period_info.query( @@ -712,13 +740,13 @@ def _get_factor_scores_data_for_single_model_period( def _get_factor_scores_data_for_multiple_periods( - data, - params, - model, - periods, - latent_factors, - observed_factors, -): + data: pd.DataFrame, + params: pd.DataFrame, + model: ProcessedModel, + periods: list[int], + latent_factors: list[str], + observed_factors: list[str], +) -> pd.DataFrame: """Get frame with factor scores in a given period. Args: @@ -754,7 +782,10 @@ def _get_factor_scores_data_for_multiple_periods( return df -def _process_factors(model, factors): +def _process_factors( + model: ProcessedModel, + factors: list[str] | str | None, +) -> tuple[list[str], list[str]]: """Process factors to get a tuple of lists.""" if not factors: latent_factors = list(model.labels.latent_factors) @@ -777,26 +808,29 @@ def _process_factors(model, factors): return latent_factors, observed_factors # ty: ignore[possibly-unresolved-reference] -def _process_periods(periods, model): +def _process_periods( + periods: float | list[int] | None, + model: ProcessedModel, +) -> list[int]: """Process periods to get a list.""" if periods is None: - periods = list(range(model.dimensions.n_periods)) - elif isinstance(periods, int | float): - periods = [periods] + return list(range(model.dimensions.n_periods)) + if isinstance(periods, int | float): + return [int(periods)] return periods def _get_layout_kwargs( - corr, - layout_kwargs, - annotate, - annotation_fontsize, - annotation_text_color, - annotation_text_angle, - axes_tick_fontsize, - axes_tick_label_angle, - axes_tick_label_color, -): + corr: pd.DataFrame, + layout_kwargs: dict[str, Any] | None, + annotate: bool, + annotation_fontsize: int, + annotation_text_color: str, + annotation_text_angle: float, + axes_tick_fontsize: tuple[int, int], + axes_tick_label_angle: tuple[float, float], + axes_tick_label_color: tuple[str, str], +) -> dict[str, Any]: """Get kwargs to update figure layout. Args: @@ -844,10 +878,10 @@ def _get_layout_kwargs( def _get_axes_ticks_kwargs( - axes_tick_fontsize, - axes_tick_label_angle, - axes_tick_label_color, -): + axes_tick_fontsize: tuple[int, int] | dict[str, int], + axes_tick_label_angle: tuple[float, float] | dict[str, float], + axes_tick_label_color: tuple[str, str] | dict[str, str], +) -> dict[str, Any]: """Get kwargs for axes ticks label formating.""" axes_tick_fontsize = _process_axes_tick_args(axes_tick_fontsize) axes_tick_label_angle = _process_axes_tick_args(axes_tick_label_angle) @@ -865,12 +899,12 @@ def _get_axes_ticks_kwargs( def _get_annotations( - df, - annotate, - annotation_fontsize, - annotation_text_color, - annotation_text_angle, -): + df: pd.DataFrame, + annotate: bool, + annotation_fontsize: int, + annotation_text_color: str, + annotation_text_angle: float, +) -> dict[str, Any]: """Get annotations and formatting kwargs.""" annotation_kwargs = {} if annotate: @@ -897,14 +931,14 @@ def _get_annotations( def _get_heatmap_kwargs( - corr, - heatmap_kwargs, - colorscale, - show_color_bar, - zmax, - zmin, - zmid, -): + corr: pd.DataFrame, + heatmap_kwargs: dict[str, Any] | None, + colorscale: str, + show_color_bar: bool, + zmax: float | None, + zmin: float | None, + zmid: float | None, +) -> dict[str, Any]: """Get kwargs to instantiate Heatmap object. Args: @@ -921,7 +955,8 @@ def _get_heatmap_kwargs( """ if zmax is None: - zmax = np.abs(corr.to_numpy())[np.tril_indices_from(corr, k=-1)].max() + corr_arr = corr.to_numpy() + zmax = np.abs(corr_arr)[np.tril_indices_from(corr_arr, k=-1)].max() if zmin is None: zmin = -zmax if zmid is None: @@ -938,7 +973,9 @@ def _get_heatmap_kwargs( return default_heatmap_kwargs -def _process_axes_tick_args(args): +def _process_axes_tick_args( + args: tuple[Any, Any] | list[Any] | dict[str, Any], +) -> dict[str, Any]: if isinstance(args, tuple | list): args = {"x": args[0], "y": args[1]} return args diff --git a/src/skillmodels/decorators.py b/src/skillmodels/decorators.py index 6dcf779d..3fc46f8f 100644 --- a/src/skillmodels/decorators.py +++ b/src/skillmodels/decorators.py @@ -1,9 +1,17 @@ import functools +from collections.abc import Callable +from typing import Any import jax.numpy as jnp +from jax import Array -def extract_params(func=None, *, key=None, names=None): +def extract_params( + func: Callable | None = None, + *, + key: str | None = None, + names: list[str] | None = None, +) -> Callable: """Process params before passing them to func. Note: The resulting function is keyword only! @@ -17,11 +25,11 @@ def extract_params(func=None, *, key=None, names=None): """ - def decorator_extract_params(func): + def decorator_extract_params(func: Callable) -> Callable: if key is not None and names is None: @functools.wraps(func) - def wrapper_extract_params(**kwargs): + def wrapper_extract_params(**kwargs: Any) -> Any: internal_kwargs = kwargs.copy() internal_kwargs["params"] = kwargs["params"][key] return func(**internal_kwargs) @@ -29,7 +37,7 @@ def wrapper_extract_params(**kwargs): elif key is None and names is not None: @functools.wraps(func) - def wrapper_extract_params(**kwargs): + def wrapper_extract_params(**kwargs: Any) -> Any: internal_kwargs = kwargs.copy() internal_kwargs["params"] = dict( zip(names, kwargs["params"], strict=False) @@ -39,7 +47,7 @@ def wrapper_extract_params(**kwargs): elif key is not None and names is not None: @functools.wraps(func) - def wrapper_extract_params(**kwargs): + def wrapper_extract_params(**kwargs: Any) -> Any: internal_kwargs = kwargs.copy() internal_kwargs["params"] = dict( zip(names, kwargs["params"][key], strict=False) @@ -56,11 +64,11 @@ def wrapper_extract_params(**kwargs): return decorator_extract_params -def jax_array_output(func): +def jax_array_output(func: Callable) -> Callable: """Convert tuple output to list output.""" @functools.wraps(func) - def wrapper_jax_array_output(*args, **kwargs): + def wrapper_jax_array_output(*args: Any, **kwargs: Any) -> Array: raw = func(*args, **kwargs) out = jnp.array(raw) return out @@ -68,9 +76,13 @@ def wrapper_jax_array_output(*args, **kwargs): return wrapper_jax_array_output -def register_params(func=None, *, params=None): - def decorator_register_params(func): - func.__registered_params__ = params +def register_params( + func: Callable | None = None, + *, + params: list[str] | None = None, +) -> Callable: + def decorator_register_params(func: Callable) -> Callable: + func.__registered_params__ = params # ty: ignore[unresolved-attribute] return func if callable(func): diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index 016003d3..6c80bd49 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -1,5 +1,8 @@ +from typing import Any + import jax.numpy as jnp import numpy as np +import pandas as pd from skillmodels.maximization_inputs import get_maximization_inputs from skillmodels.params_index import get_params_index @@ -8,7 +11,11 @@ from skillmodels.process_model import process_model -def get_filtered_states(model_dict, data, params): +def get_filtered_states( + model_dict: dict, + data: pd.DataFrame, + params: pd.DataFrame, +) -> dict[str, dict[str, Any]]: max_inputs = get_maximization_inputs(model_dict=model_dict, data=data) params = params.loc[max_inputs["params_template"].index] debug_loglike = max_inputs["debug_loglike"] @@ -43,7 +50,12 @@ def get_filtered_states(model_dict, data, params): return out -def anchor_states_df(states_df, model_dict, params, use_aug_period): +def anchor_states_df( + states_df: pd.DataFrame, + model_dict: dict, + params: pd.DataFrame, + use_aug_period: bool, +) -> pd.DataFrame: """Anchor states in a DataFrame. The DataFrame is expected to have a column called "period" as well as one column diff --git a/src/skillmodels/kalman_filters.py b/src/skillmodels/kalman_filters.py index f9cfae97..d1d57b36 100644 --- a/src/skillmodels/kalman_filters.py +++ b/src/skillmodels/kalman_filters.py @@ -1,5 +1,8 @@ +from collections.abc import Callable + import jax import jax.numpy as jnp +from jax import Array from skillmodels.qr import qr_gpu @@ -15,15 +18,15 @@ def kalman_update( - states, - upper_chols, - loadings, - control_params, - meas_sd, - measurements, - controls, - log_mixture_weights, -): + states: Array, + upper_chols: Array, + loadings: Array, + control_params: Array, + meas_sd: Array, + measurements: Array, + controls: Array, + log_mixture_weights: Array, +) -> tuple[Array, Array, Array, Array]: """Perform a Kalman update with likelihood evaluation. Args: @@ -135,7 +138,10 @@ def kalman_update( # ====================================================================================== -def calculate_sigma_scaling_factor_and_weights(n_states, kappa=2): +def calculate_sigma_scaling_factor_and_weights( + n_states: int, + kappa: float = 2, +) -> tuple[Array, Array]: """Calculate the scaling factor and weights for sigma points according to Julier. There are other sigma point algorithms, but many of them possibly have negative @@ -158,17 +164,17 @@ def calculate_sigma_scaling_factor_and_weights(n_states, kappa=2): def kalman_predict( - transition_func, - states, - upper_chols, - sigma_scaling_factor, - sigma_weights, - trans_coeffs, - shock_sds, - anchoring_scaling_factors, - anchoring_constants, - observed_factors, -): + transition_func: Callable, + states: Array, + upper_chols: Array, + sigma_scaling_factor: float, + sigma_weights: Array, + trans_coeffs: dict[str, Array], + shock_sds: Array, + anchoring_scaling_factors: Array, + anchoring_constants: Array, + observed_factors: Array, +) -> tuple[Array, Array]: """Make a unscented Kalman predict. Args: @@ -228,7 +234,12 @@ def kalman_predict( return predicted_states, predicted_covs -def _calculate_sigma_points(states, upper_chols, scaling_factor, observed_factors): +def _calculate_sigma_points( + states: Array, + upper_chols: Array, + scaling_factor: float, + observed_factors: Array, +) -> Array: """Calculate the array of sigma_points for the unscented transform. Args: @@ -274,12 +285,12 @@ def _calculate_sigma_points(states, upper_chols, scaling_factor, observed_factor def transform_sigma_points( - sigma_points, - transition_func, - trans_coeffs, - anchoring_scaling_factors, - anchoring_constants, -): + sigma_points: Array, + transition_func: Callable, + trans_coeffs: dict[str, Array], + anchoring_scaling_factors: Array, + anchoring_constants: Array, +) -> Array: """Anchor sigma points, transform them and unanchor the transformed sigma points. Args: diff --git a/src/skillmodels/kalman_filters_debug.py b/src/skillmodels/kalman_filters_debug.py index 14b07e1b..c5cbab5d 100644 --- a/src/skillmodels/kalman_filters_debug.py +++ b/src/skillmodels/kalman_filters_debug.py @@ -1,5 +1,8 @@ +from typing import Any + import jax import jax.numpy as jnp +from jax import Array array_qr_jax = jax.vmap(jax.vmap(jnp.linalg.qr)) @@ -10,15 +13,15 @@ def kalman_update( - states, - upper_chols, - loadings, - control_params, - meas_sd, - measurements, - controls, - log_mixture_weights, -): + states: Array, + upper_chols: Array, + loadings: Array, + control_params: Array, + meas_sd: float, + measurements: Array, + controls: Array, + log_mixture_weights: Array, +) -> tuple[Array, Array, Array, Array, dict[str, Any]]: """Perform a Kalman update with likelihood evaluation, returning debug info on top. Args: diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index 04870520..ead81beb 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -1,7 +1,10 @@ import functools +from collections.abc import Callable +from typing import Any import jax import jax.numpy as jnp +from jax import Array from skillmodels.clipping import soft_clipping from skillmodels.kalman_filters import ( @@ -9,24 +12,25 @@ kalman_update, ) from skillmodels.parse_params import parse_params +from skillmodels.types import Dimensions, EstimationOptions, Labels def log_likelihood( - params, - parsing_info, - measurements, - controls, - transition_func, - sigma_scaling_factor, - sigma_weights, - dimensions, - labels, - estimation_options, - is_measurement_iteration, - is_predict_iteration, - iteration_to_period, - observed_factors, -): + params: Array, + parsing_info: dict[str, Any], + measurements: Array, + controls: Array, + transition_func: Callable, + sigma_scaling_factor: float, + sigma_weights: Array, + dimensions: Dimensions, + labels: Labels, + estimation_options: EstimationOptions, + is_measurement_iteration: Array, + is_predict_iteration: Array, + iteration_to_period: Array, + observed_factors: Array, +) -> Array: return log_likelihood_obs( params=params, parsing_info=parsing_info, @@ -46,21 +50,21 @@ def log_likelihood( def log_likelihood_obs( - params, - parsing_info, - measurements, - controls, - transition_func, - sigma_scaling_factor, - sigma_weights, - dimensions, - labels, - estimation_options, - is_measurement_iteration, - is_predict_iteration, - iteration_to_period, - observed_factors, -): + params: Array, + parsing_info: dict[str, Any], + measurements: Array, + controls: Array, + transition_func: Callable, + sigma_scaling_factor: float, + sigma_weights: Array, + dimensions: Dimensions, + labels: Labels, + estimation_options: EstimationOptions, + is_measurement_iteration: Array, + is_predict_iteration: Array, + iteration_to_period: Array, + observed_factors: Array, +) -> Array: """Log likelihood of a skill formation model. This function is jax-differentiable and jax-jittable as long as all but the first @@ -147,15 +151,15 @@ def log_likelihood_obs( def _scan_body( - carry, - loop_args, - controls, - pardict, - sigma_scaling_factor, - sigma_weights, - transition_func, - observed_factors, -): + carry: dict[str, Array], + loop_args: dict[str, Array], + controls: Array, + pardict: dict[str, Any], + sigma_scaling_factor: float, + sigma_weights: Array, + transition_func: Callable, + observed_factors: Array, +) -> tuple[dict[str, Array], dict[str, Array]]: # ================================================================================== # create arguments needed for update # ================================================================================== @@ -224,12 +228,16 @@ def _scan_body( return new_state, static_out -def _one_arg_measurement_update(kwargs): +def _one_arg_measurement_update( + kwargs: dict[str, Array], +) -> tuple[Array, Array, Array, Array]: out = kalman_update(**kwargs) return out -def _one_arg_anchoring_update(kwargs): +def _one_arg_anchoring_update( + kwargs: dict[str, Array], +) -> tuple[Array, Array, Array, Array]: _, _, new_log_mixture_weights, new_loglikes = kalman_update(**kwargs) out = ( kwargs["states"], @@ -240,12 +248,18 @@ def _one_arg_anchoring_update(kwargs): return out -def _one_arg_no_predict(kwargs, transition_func): # noqa: ARG001 +def _one_arg_no_predict( + kwargs: dict[str, Any], + transition_func: Callable, # noqa: ARG001 +) -> tuple[Array, Array, Array]: """Just return the states cond chols without any changes.""" return kwargs["states"], kwargs["upper_chols"], kwargs["states"] -def _one_arg_predict(kwargs, transition_func): +def _one_arg_predict( + kwargs: dict[str, Any], + transition_func: Callable, +) -> tuple[Array, Array, Array]: """Do a predict step but also return the input states as filtered states.""" new_states, new_upper_chols = kalman_predict( transition_func, diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index ab902598..a4c8e16c 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -1,30 +1,36 @@ import functools +from collections.abc import Callable +from typing import TYPE_CHECKING, Any import jax import jax.numpy as jnp +from jax import Array from skillmodels.clipping import soft_clipping from skillmodels.kalman_filters import kalman_predict from skillmodels.kalman_filters_debug import kalman_update from skillmodels.parse_params import parse_params +if TYPE_CHECKING: + from skillmodels.types import Dimensions, EstimationOptions, Labels + def log_likelihood( - params, - parsing_info, - measurements, - controls, - transition_func, - sigma_scaling_factor, - sigma_weights, - dimensions, - labels, - estimation_options, - is_measurement_iteration, - is_predict_iteration, - iteration_to_period, - observed_factors, -): + params: Array, + parsing_info: dict[str, Any], + measurements: Array, + controls: Array, + transition_func: Callable[..., Array], + sigma_scaling_factor: float, + sigma_weights: Array, + dimensions: "Dimensions", + labels: "Labels", + estimation_options: "EstimationOptions", + is_measurement_iteration: Array, + is_predict_iteration: Array, + iteration_to_period: Array, + observed_factors: Array, +) -> dict[str, Any]: """Log likelihood of a skill formation model, returning debug data on top. This function is jax-differentiable and jax-jittable as long as all but the first @@ -142,15 +148,15 @@ def log_likelihood( def _scan_body( - carry, - loop_args, - controls, - pardict, - sigma_scaling_factor, - sigma_weights, - transition_func, - observed_factors, -): + carry: dict[str, Array], + loop_args: dict[str, Array], + controls: Array, + pardict: dict[str, Any], + sigma_scaling_factor: float, + sigma_weights: Array, + transition_func: Callable[..., Array], + observed_factors: Array, +) -> tuple[dict[str, Array], dict[str, Any]]: # ================================================================================== # create arguments needed for update # ================================================================================== @@ -219,12 +225,16 @@ def _scan_body( return new_state, static_out -def _one_arg_measurement_update(kwargs): +def _one_arg_measurement_update( + kwargs: dict[str, Any], +) -> tuple[Array, Array, Array, Array, dict[str, Any]]: out = kalman_update(**kwargs) return out -def _one_arg_anchoring_update(kwargs): +def _one_arg_anchoring_update( + kwargs: dict[str, Any], +) -> tuple[Array, Array, Array, Array, dict[str, Any]]: _, _, new_log_mixture_weights, new_loglikes, debug_info = kalman_update(**kwargs) out = ( kwargs["states"], @@ -236,12 +246,18 @@ def _one_arg_anchoring_update(kwargs): return out -def _one_arg_no_predict(kwargs, transition_func): # noqa: ARG001 +def _one_arg_no_predict( + kwargs: dict[str, Any], + transition_func: Callable[..., Array], # noqa: ARG001 +) -> tuple[Array, Array, Array]: """Just return the states cond chols without any changes.""" return kwargs["states"], kwargs["upper_chols"], kwargs["states"] -def _one_arg_predict(kwargs, transition_func): +def _one_arg_predict( + kwargs: dict[str, Any], + transition_func: Callable[..., Array], +) -> tuple[Array, Array, Array]: """Do a predict step but also return the input states as filtered states.""" new_states, new_upper_chols = kalman_predict( transition_func, diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index c50be7b2..9bfa71a7 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -1,9 +1,13 @@ import functools +from collections.abc import Callable +from typing import Any import jax import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array +from numpy.typing import NDArray import skillmodels.likelihood_function as lf import skillmodels.likelihood_function_debug as lfd @@ -19,11 +23,16 @@ from skillmodels.process_data import process_data from skillmodels.process_debug_data import process_debug_data from skillmodels.process_model import process_model +from skillmodels.types import ProcessedModel jax.config.update("jax_enable_x64", True) # noqa: FBT003 -def get_maximization_inputs(model_dict, data, split_dataset=1): +def get_maximization_inputs( + model_dict: dict, + data: pd.DataFrame, + split_dataset: int = 1, +) -> dict[str, Any]: """Create inputs for optimagic's maximize function. Args: @@ -112,15 +121,17 @@ def get_maximization_inputs(model_dict, data, split_dataset=1): _jitted_loglikeobs = jax.jit(partialed_loglikes["llo"]) _gradient = jax.jit(jax.grad(partialed_loglikes["ll"])) - def loglike(params): + def loglike(params: pd.DataFrame) -> float: params_vec = partialed_get_jnp_params_vec(params) return float(_jitted_loglike(params_vec)) - def loglikeobs(params): + def loglikeobs(params: pd.DataFrame) -> NDArray[np.floating]: params_vec = partialed_get_jnp_params_vec(params) return _to_numpy(_jitted_loglikeobs(params_vec)) - def loglike_and_gradient(params): + def loglike_and_gradient( + params: pd.DataFrame, + ) -> tuple[float, NDArray[np.floating]]: params_vec = partialed_get_jnp_params_vec(params) crit = float(_jitted_loglike(params_vec)) n_obs = processed_data["measurements"].shape[1] @@ -146,7 +157,7 @@ def loglike_and_gradient(params): grad = _to_numpy(_grad) return crit, grad - def debug_loglike(params): + def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: params_vec = partialed_get_jnp_params_vec(params) jax_output = partialed_loglikes["debug_ll"](params_vec) tmp = _to_numpy(jax_output) @@ -189,15 +200,15 @@ def debug_loglike(params): def _partial_some_log_likelihood( - fun, - parsing_info, - measurements, - controls, - observed_factors, - model, - sigma_weights, - sigma_scaling_factor, -): + fun: Callable, + parsing_info: dict[str, Any], + measurements: Array, + controls: Array, + observed_factors: Array, + model: ProcessedModel, + sigma_weights: Array, + sigma_scaling_factor: Array, +) -> Callable: update_info = model.update_info is_measurement_iteration = (update_info["purpose"] == "measurement").to_numpy() _aug_periods = pd.Series( @@ -236,7 +247,7 @@ def _partial_some_log_likelihood( ) -def _to_numpy(obj): +def _to_numpy(obj: Any) -> Any: if isinstance(obj, dict): res = {} for key, value in obj.items(): @@ -253,7 +264,7 @@ def _to_numpy(obj): return res -def _get_jnp_params_vec(params, target_index): +def _get_jnp_params_vec(params: pd.DataFrame, target_index: pd.MultiIndex) -> Array: if set(params.index) != set(target_index): additional_entries = params.index.difference(target_index).tolist() missing_entries = target_index.difference(params.index).tolist() diff --git a/src/skillmodels/params_index.py b/src/skillmodels/params_index.py index 57bc8ee2..d997f602 100644 --- a/src/skillmodels/params_index.py +++ b/src/skillmodels/params_index.py @@ -9,12 +9,12 @@ def get_params_index( - update_info, + update_info: pd.DataFrame, labels: Labels, dimensions: Dimensions, transition_info: TransitionInfo, endogenous_factors_info: EndogenousFactorsInfo, -): +) -> pd.MultiIndex: """Generate index for the params_df for optimagic. The index has four levels. The first is the parameter category. The second is the @@ -68,13 +68,15 @@ def get_params_index( return index -def get_control_params_index_tuples(controls, update_info): +def get_control_params_index_tuples( + controls: tuple[str, ...], + update_info: pd.DataFrame, +) -> list[tuple[str, int, str, str]]: """Index tuples for control coeffs. Args: - controls (list): List of lists. There is one sublist per period which contains - the names of the control variables in that period. Constant not included. - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + controls: Names of the control variables. Constant not included. + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. """ @@ -85,17 +87,17 @@ def get_control_params_index_tuples(controls, update_info): return ind_tups -def get_loadings_index_tuples(factors, update_info): +def get_loadings_index_tuples( + factors: tuple[str, ...], + update_info: pd.DataFrame, +) -> list[tuple[str, int, str, str]]: """Index tuples for loading. Args: - factors (list): The latent factors of the model - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + factors: The latent factors of the model. + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - Returns: - ind_tups (list) - """ mask = update_info[list(factors)].to_numpy() ind_tups = [] @@ -106,16 +108,15 @@ def get_loadings_index_tuples(factors, update_info): return ind_tups -def get_meas_sds_index_tuples(update_info): +def get_meas_sds_index_tuples( + update_info: pd.DataFrame, +) -> list[tuple[str, int, str, str]]: """Index tuples for meas_sd. Args: - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - Returns: - ind_tups (list) - """ ind_tups = [] for aug_period, meas in update_info.index: @@ -123,15 +124,17 @@ def get_meas_sds_index_tuples(update_info): return ind_tups -def get_shock_sds_index_tuples(aug_periods, factors, has_endogenous_factors): +def get_shock_sds_index_tuples( + aug_periods: tuple[int, ...], + factors: tuple[str, ...], + has_endogenous_factors: bool, +) -> list[tuple[str, int, str, str]]: """Index tuples for shock_sd. Args: - aug_periods (list): The augmented periods of the model. - factors (list): The latent factors of the model. - - Returns: - ind_tups (list) + aug_periods: The augmented periods of the model. + factors: The latent factors of the model. + has_endogenous_factors: Whether the model has endogenous factors. """ end = -2 if has_endogenous_factors else -1 @@ -142,15 +145,15 @@ def get_shock_sds_index_tuples(aug_periods, factors, has_endogenous_factors): return ind_tups -def initial_mean_index_tuples(n_mixtures, factors): +def initial_mean_index_tuples( + n_mixtures: int, + factors: tuple[str, ...], +) -> list[tuple[str, int, str, str]]: """Index tuples for initial_mean. Args: - n_mixtures (int): Number of elements in the mixture distribution of the factors. - factors (list): The latent factors of the model - - Returns: - ind_tups (list) + n_mixtures: Number of elements in the mixture distribution of the factors. + factors: The latent factors of the model. """ ind_tups = [] @@ -160,14 +163,13 @@ def initial_mean_index_tuples(n_mixtures, factors): return ind_tups -def get_mixture_weights_index_tuples(n_mixtures): +def get_mixture_weights_index_tuples( + n_mixtures: int, +) -> list[tuple[str, int, str, str]]: """Index tuples for mixture_weight. Args: - n_mixtures (int): Number of elements in the mixture distribution of the factors. - - Returns: - ind_tups (list) + n_mixtures: Number of elements in the mixture distribution of the factors. """ ind_tups = [] @@ -176,15 +178,15 @@ def get_mixture_weights_index_tuples(n_mixtures): return ind_tups -def get_initial_cholcovs_index_tuples(n_mixtures, factors): +def get_initial_cholcovs_index_tuples( + n_mixtures: int, + factors: tuple[str, ...], +) -> list[tuple[str, int, str, str]]: """Index tuples for initial_cov. Args: - n_mixtures (int): Number of elements in the mixture distribution of the factors. - factors (list): The latent factors of the model - - Returns: - ind_tups (list) + n_mixtures: Number of elements in the mixture distribution of the factors. + factors: The latent factors of the model. """ ind_tups = [] @@ -204,8 +206,10 @@ def get_initial_cholcovs_index_tuples(n_mixtures, factors): def get_transition_index_tuples( - transition_info: TransitionInfo, aug_periods, has_endogenous_factors: bool -): + transition_info: TransitionInfo, + aug_periods: tuple[int, ...], + has_endogenous_factors: bool, +) -> list[tuple[str, int, str, str]]: """Index tuples for transition equation coefficients. Args: @@ -213,9 +217,6 @@ def get_transition_index_tuples( aug_periods: The augmented periods of the model. has_endogenous_factors: Whether the model has endogenous factors. - Returns: - ind_tups (list) - """ end = -2 if has_endogenous_factors else -1 ind_tups = [] diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index b763a6fd..e10b61f0 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -1,13 +1,21 @@ import warnings +from typing import Any import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array + +from skillmodels.types import Anchoring, Dimensions, Labels def create_parsing_info( - params_index, update_info, labels, anchoring, has_endogenous_factors -): + params_index: pd.MultiIndex, + update_info: pd.DataFrame, + labels: Labels, + anchoring: Anchoring, + has_endogenous_factors: bool, +) -> dict[str, Any]: """Create a dictionary with information how the parameter vector has to be parsed. Args: @@ -83,7 +91,10 @@ def create_parsing_info( return parsing_info -def _get_positional_selector_from_loc(range_sr, loc): +def _get_positional_selector_from_loc( + range_sr: pd.Series, + loc: str | pd.MultiIndex | pd.Index, +) -> Array | slice: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", @@ -98,7 +109,13 @@ def _get_positional_selector_from_loc(range_sr, loc): return ilocs -def parse_params(params, parsing_info, dimensions, labels, n_obs): +def parse_params( + params: Array, + parsing_info: dict[str, Any], + dimensions: Dimensions, + labels: Labels, + n_obs: int, +) -> tuple[Array, Array, Array, dict[str, Any]]: """Parse params into the quantities that depend on it. Args: @@ -139,13 +156,13 @@ def parse_params(params, parsing_info, dimensions, labels, n_obs): } pardict["anchoring_scaling_factors"] = _get_anchoring_scaling_factors( - pardict["loadings"], + pardict["loadings"], # ty: ignore[invalid-argument-type] parsing_info, dimensions, ) pardict["anchoring_constants"] = _get_anchoring_constants( - pardict["controls"], + pardict["controls"], # ty: ignore[invalid-argument-type] parsing_info, dimensions, ) @@ -153,7 +170,12 @@ def parse_params(params, parsing_info, dimensions, labels, n_obs): return states, upper_chols, log_weights, pardict -def _get_initial_states(params, info, dimensions, n_obs): +def _get_initial_states( + params: Array, + info: dict[str, Any], + dimensions: Dimensions, + n_obs: int, +) -> Array: """Create the array of initial states.""" state = params[info["initial_states"]].reshape( 1, @@ -163,7 +185,12 @@ def _get_initial_states(params, info, dimensions, n_obs): return jnp.repeat(state, n_obs, axis=0) -def _get_initial_upper_chols(params, info, dimensions, n_obs): +def _get_initial_upper_chols( + params: Array, + info: dict[str, Any], + dimensions: Dimensions, + n_obs: int, +) -> Array: """Create the array with cholesky factors of the initial states covariance matrix. Note: The matrices contain the transpose of the lower triangular cholesky factors. @@ -179,18 +206,29 @@ def _get_initial_upper_chols(params, info, dimensions, n_obs): return upper_chols -def _get_initial_log_mixture_weights(params, info, n_obs): +def _get_initial_log_mixture_weights( + params: Array, + info: dict[str, Any], + n_obs: int, +) -> Array: """Create the array with the log of initial mixture weights.""" log_weights = jnp.log(params[info["mixture_weights"]]).reshape(1, -1) return jnp.repeat(log_weights, n_obs, axis=0) -def _get_control_params(params, info, dimensions): +def _get_control_params( + params: Array, + info: dict[str, Any], + dimensions: Dimensions, +) -> Array: """Create the parameters for control variables in measurement equations.""" return params[info["controls"]].reshape(-1, dimensions.n_controls) -def _get_loadings(params, info): +def _get_loadings( + params: Array, + info: dict[str, Any], +) -> Array: """Create the array of factor loadings.""" info = info["loadings"] free = params[info["slice"]] @@ -199,17 +237,28 @@ def _get_loadings(params, info): return out -def _get_meas_sds(params, info): +def _get_meas_sds( + params: Array, + info: dict[str, Any], +) -> Array: """Create the array of standard deviations of the measurement errors.""" return params[info["meas_sds"]] -def _get_shock_sds(params, info, dimensions): +def _get_shock_sds( + params: Array, + info: dict[str, Any], + dimensions: Dimensions, +) -> Array: """Create the array of standard deviations of the shocks in transition functions.""" return params[info["shock_sds"]].reshape(-1, dimensions.n_latent_factors) -def _get_transition_params(params, info, labels): +def _get_transition_params( + params: Array, + info: dict[str, Any], + labels: Labels, +) -> dict[str, Array]: """Create a list of arrays with transition equation parameters.""" trans_params = {} t_info = info["transition"] @@ -224,7 +273,11 @@ def _get_transition_params(params, info, labels): return trans_params -def _get_anchoring_scaling_factors(loadings, info, dimensions): +def _get_anchoring_scaling_factors( + loadings: Array, + info: dict[str, Any], + dimensions: Dimensions, +) -> Array: """Create an array of anchoring scaling factors. Note: Parameters are not taken from the parameter vector but from the loadings. @@ -250,7 +303,11 @@ def _get_anchoring_scaling_factors(loadings, info, dimensions): return scaling_factors -def _get_anchoring_constants(controls, info, dimensions): +def _get_anchoring_constants( + controls: Array, + info: dict[str, Any], + dimensions: Dimensions, +) -> Array: """Create an array of anchoring constants. Note: Parameters are not taken from the parameter vector but from the controls. diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index 688479c6..28f2fb36 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -1,22 +1,23 @@ import warnings -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Literal import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array if TYPE_CHECKING: - from skillmodels.types import Labels + from skillmodels.types import Anchoring, Labels def process_data( - df, - has_endogenous_factors, - labels, - update_info, - anchoring_info, - purpose="estimation", -): + df: pd.DataFrame, + has_endogenous_factors: bool, + labels: "Labels", + update_info: pd.DataFrame, + anchoring_info: "Anchoring", + purpose: Literal["estimation", "anything", "simulation"] = "estimation", +) -> dict[str, Any]: """Process the data for estimation. Args: @@ -63,7 +64,10 @@ def process_data( return out -def pre_process_data(df, periods): +def pre_process_data( + df: pd.DataFrame, + periods: tuple[int, ...] | list[int], +) -> pd.DataFrame: """Balance panel data in long format, drop unnecessary periods and set index. Args: @@ -83,7 +87,8 @@ def pre_process_data(df, periods): # replace existing codes for periods and df.index.names = ["id", "period"] for level in [0, 1]: - df.index = df.index.set_levels(range(len(df.index.levels[level])), level=level) + # df.index is a MultiIndex but typed as Index + df.index = df.index.set_levels(range(len(df.index.levels[level])), level=level) # ty: ignore[unresolved-attribute] # create new index ids = sorted(df.index.get_level_values("id").unique()) @@ -125,7 +130,7 @@ def _augment_data_for_endogenous_factors( df: pd.DataFrame, labels: "Labels", update_info: pd.DataFrame, -): +) -> pd.DataFrame: """Make room for endogenous factors by doubling up the periods. Endogeneity means that current states influence the factor. Typically, this comes @@ -155,15 +160,23 @@ def _augment_data_for_endogenous_factors( return out.set_index(["id", "aug_period"]).sort_index() -def _add_copies_of_anchoring_outcome(df, anchoring_info): +def _add_copies_of_anchoring_outcome( + df: pd.DataFrame, + anchoring_info: "Anchoring", +) -> pd.DataFrame: df = df.copy() for factor in anchoring_info.factors: - outcome = anchoring_info.outcomes[factor] + outcome = anchoring_info.outcomes[factor] # ty: ignore[invalid-argument-type] df[f"{outcome}_{factor}"] = df[outcome] return df -def _check_data(df, update_info, labels, purpose): # noqa: C901 +def _check_data( # noqa: C901 + df: pd.DataFrame, + update_info: pd.DataFrame, + labels: "Labels", + purpose: Literal["estimation", "anything", "simulation"], +) -> None: var_report = pd.DataFrame(index=update_info.index[:0], columns=["problem"]) for aug_period in labels.aug_periods: period_data = df.query(f"aug_period == {aug_period}") @@ -196,7 +209,11 @@ def _check_data(df, update_info, labels, purpose): # noqa: C901 raise ValueError(var_report) -def _handle_controls_with_missings(df, controls, update_info): +def _handle_controls_with_missings( + df: pd.DataFrame, + controls: tuple[str, ...], + update_info: pd.DataFrame, +) -> pd.DataFrame: aug_periods = update_info.index.get_level_values(0).unique().tolist() problematic_index = df.index[:0] for aug_period in aug_periods: @@ -215,7 +232,10 @@ def _handle_controls_with_missings(df, controls, update_info): return df -def _get_period_measurements(update_info, aug_period): +def _get_period_measurements( + update_info: pd.DataFrame, + aug_period: int, +) -> list[str]: if aug_period in update_info.index: measurements = list(update_info.loc[aug_period].index) else: @@ -223,14 +243,22 @@ def _get_period_measurements(update_info, aug_period): return measurements -def _generate_measurements_array(df, update_info, n_obs): +def _generate_measurements_array( + df: pd.DataFrame, + update_info: pd.DataFrame, + n_obs: int, +) -> Array: arr = np.zeros((len(update_info), n_obs)) for k, (aug_period, var) in enumerate(update_info.index): arr[k] = df.query(f"aug_period == {aug_period}")[var].to_numpy() return jnp.array(arr, dtype="float32") -def _generate_controls_array(df, labels, n_obs): +def _generate_controls_array( + df: pd.DataFrame, + labels: "Labels", + n_obs: int, +) -> Array: arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.controls))) for aug_period in labels.aug_periods: arr[aug_period] = df.query(f"aug_period == {aug_period}")[ @@ -239,7 +267,11 @@ def _generate_controls_array(df, labels, n_obs): return jnp.array(arr, dtype="float32") -def _generate_observed_factor_array(df, labels, n_obs): +def _generate_observed_factor_array( + df: pd.DataFrame, + labels: "Labels", + n_obs: int, +) -> Array: arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.observed_factors))) for aug_period in labels.aug_periods: arr[aug_period] = df.query(f"aug_period == {aug_period}")[ diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index 00cacacb..df3ccbeb 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -1,8 +1,18 @@ +from typing import TYPE_CHECKING, Any + import numpy as np import pandas as pd +from jax import Array +from numpy.typing import NDArray + +if TYPE_CHECKING: + from skillmodels.types import ProcessedModel -def process_debug_data(debug_data, model): +def process_debug_data( + debug_data: dict[str, Any], + model: "ProcessedModel", +) -> dict[str, Any]: """Process the raw debug data into pandas objects that make visualization easy. Args: @@ -93,7 +103,11 @@ def process_debug_data(debug_data, model): return res -def _create_post_update_states(filtered_states, factors, update_info): +def _create_post_update_states( + filtered_states: Array, + factors: tuple[str, ...], + update_info: pd.DataFrame, +) -> pd.DataFrame: to_concat = [] for (aug_period, meas), data in zip( update_info.index, filtered_states, strict=False @@ -109,7 +123,10 @@ def _create_post_update_states(filtered_states, factors, update_info): return post_states -def _convert_state_array_to_df(arr, factor_names): +def _convert_state_array_to_df( + arr: NDArray[np.floating[Any]], + factor_names: tuple[str, ...], +) -> pd.DataFrame: """Convert a 3d state array into a 2d DataFrame. Args: @@ -117,17 +134,22 @@ def _convert_state_array_to_df(arr, factor_names): factor_names (list): Names of the latent factors. """ n_obs, n_mixtures, n_states = arr.shape - df = pd.DataFrame(data=arr.reshape(-1, n_states), columns=factor_names) + df = pd.DataFrame(data=arr.reshape(-1, n_states), columns=list(factor_names)) df["mixture"] = np.full((n_obs, n_mixtures), np.arange(n_mixtures)).flatten() return df -def _create_filtered_states(filtered_states, log_mixture_weights, update_info, factors): - filtered_states = np.array(filtered_states) - log_mixture_weights = np.array(log_mixture_weights) - weights = np.exp(log_mixture_weights) +def _create_filtered_states( + filtered_states: Array, + log_mixture_weights: Array, + update_info: pd.DataFrame, + factors: tuple[str, ...], +) -> pd.DataFrame: + filtered_states_np = np.array(filtered_states) + log_mixture_weights_np = np.array(log_mixture_weights) + weights = np.exp(log_mixture_weights_np) - agg_states = (filtered_states * weights.reshape(*weights.shape, 1)).sum(axis=-2) + agg_states = (filtered_states_np * weights.reshape(*weights.shape, 1)).sum(axis=-2) keep = [] for i, (aug_period, measurement) in enumerate(update_info.index): @@ -150,20 +172,26 @@ def _create_filtered_states(filtered_states, log_mixture_weights, update_info, f return filtered_states -def create_state_ranges(filtered_states, factors): - ranges = {} +def create_state_ranges( + filtered_states: pd.DataFrame, + factors: tuple[str, ...] | list[str], +) -> dict[str, pd.DataFrame]: + ranges: dict[str, pd.DataFrame] = {} # Group by whichever period column is present period_col = "aug_period" if "aug_period" in filtered_states.columns else "period" minima = filtered_states.groupby(period_col).min() maxima = filtered_states.groupby(period_col).max() for factor in factors: df = pd.concat([minima[factor], maxima[factor]], axis=1) - df.columns = ["minimum", "maximum"] + df.columns = pd.Index(["minimum", "maximum"]) ranges[factor] = df return ranges -def _process_residuals(residuals, update_info): +def _process_residuals( + residuals: Array, + update_info: pd.DataFrame, +) -> pd.DataFrame: to_concat = [] n_obs, n_mixtures = residuals[0].shape for (aug_period, meas), data in zip(update_info.index, residuals, strict=False): @@ -176,11 +204,17 @@ def _process_residuals(residuals, update_info): return pd.concat(to_concat) -def _process_residual_sds(residual_sds, update_info): +def _process_residual_sds( + residual_sds: Array, + update_info: pd.DataFrame, +) -> pd.DataFrame: return _process_residuals(residual_sds, update_info) -def _process_all_contributions(all_contributions, update_info): +def _process_all_contributions( + all_contributions: Array, + update_info: pd.DataFrame, +) -> pd.DataFrame: to_concat = [] for (period, meas), contribs in zip( update_info.index, all_contributions, strict=False diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 2f8cd9d8..d73bde2b 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -1,3 +1,4 @@ +from collections.abc import KeysView, Mapping from copy import deepcopy from functools import partial from typing import Any, Literal @@ -7,7 +8,7 @@ from dags import concatenate_functions from dags.signature import rename_arguments from frozendict import frozendict -from jax import vmap +from jax import Array, vmap from pandas import DataFrame import skillmodels.transition_functions as t_f_module @@ -27,7 +28,7 @@ pd.set_option("future.no_silent_downcasting", True) # noqa: FBT003 -def process_model(model_dict): +def process_model(model_dict: dict) -> ProcessedModel: """Check, clean, extend and transform the model specs. Check the completeness, consistency and validity of the model specifications. @@ -334,7 +335,12 @@ def _process_anchoring(model_dict: dict) -> Anchoring: ) -def _insert_empty_elements_into_list(old, insert_at_modulo, to_insert, aug_p_to_p): +def _insert_empty_elements_into_list( + old: list, + insert_at_modulo: int, + to_insert: Any, + aug_p_to_p: Mapping[int, int], +) -> list: return [ to_insert if aug_p % 2 == insert_at_modulo else old[p] for aug_p, p in aug_p_to_p.items() @@ -428,7 +434,7 @@ def _get_transition_info(model_dict: dict, labels: Labels) -> TransitionInfo: # add functions to produce the individual factors out of the 1d states vector. # The dag will automatically sort out what we don't need. - def _extract_factor(states, pos): + def _extract_factor(states: Array, pos: int) -> Array: return states[pos] for i, factor in enumerate(labels.all_factors): @@ -492,7 +498,8 @@ def _get_endogenous_factors_info( def _get_aug_periods_to_aug_period_meas_types( - aug_periods, has_endogenous_factors: bool + aug_periods: tuple[int, ...] | KeysView[int], + has_endogenous_factors: bool, ) -> dict[int, Literal["states", "endogenous_factors"]]: if has_endogenous_factors: return { diff --git a/src/skillmodels/qr.py b/src/skillmodels/qr.py index c690eac7..36a602fc 100644 --- a/src/skillmodels/qr.py +++ b/src/skillmodels/qr.py @@ -1,9 +1,10 @@ import jax import jax.numpy as jnp +from jax import Array @jax.custom_jvp -def qr_gpu(a: jax.Array): +def qr_gpu(a: Array) -> tuple[Array, Array]: """Custom implementation of the QR Decomposition.""" r, tau = jnp.linalg.qr(a, mode="raw") @@ -11,7 +12,7 @@ def qr_gpu(a: jax.Array): return q, jnp.triu(r.mT[: tau.shape[0]]) -def _householder(r: jax.Array, tau: jax.Array): +def _householder(r: Array, tau: Array) -> Array: """Custom implementation of the Householder Product. Uses the outputs of jnp.linalg.qr with mode = "raw" to calculate Q. This is needed @@ -33,17 +34,17 @@ def _householder(r: jax.Array, tau: jax.Array): return h[:, :n] -def _t(x: jax.Array) -> jax.Array: +def _t(x: Array) -> Array: """Transpose batched Matrix.""" return jax.lax.transpose(x, (*range(x.ndim - 2), x.ndim - 1, x.ndim - 2)) -def _h(x: jax.Array) -> jax.Array: +def _h(x: Array) -> Array: """Hermitian Transpose of a Matrix.""" return _t(x).conj() -def _tril(m: jax.Array, k: int = 0) -> jax.Array: +def _tril(m: Array, k: int = 0) -> Array: """Select lower Triangle of a Matrix.""" *_, dim_n, dim_m = m.shape mask = jnp.tri(dim_n, dim_m, k, bool) @@ -51,7 +52,10 @@ def _tril(m: jax.Array, k: int = 0) -> jax.Array: @qr_gpu.defjvp -def qr_jvp_rule(primals, tangents): +def qr_jvp_rule( + primals: tuple[Array], + tangents: tuple[Array], +) -> tuple[tuple[Array, Array], tuple[Array, Array]]: """Calculates the derivative of the custom QR composition.""" # See j-towns.github.io/papers/qr-derivative.pdf for a terse derivation. (x,) = primals diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index b3b0fc07..5a87910e 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -1,13 +1,25 @@ """Functions to simulate a dataset generated by a latent factor model.""" import warnings +from collections.abc import Mapping +from typing import TYPE_CHECKING import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array from numpy.random import choice, multivariate_normal +from numpy.typing import NDArray from skillmodels.filtered_states import anchor_states_df + +if TYPE_CHECKING: + from skillmodels.types import ( + Dimensions, + EndogenousFactorsInfo, + Labels, + TransitionInfo, + ) from skillmodels.kalman_filters import transform_sigma_points from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params @@ -16,7 +28,13 @@ from skillmodels.process_model import process_model -def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): +def simulate_dataset( + model_dict: dict, + params: pd.DataFrame, + n_obs: int | None = None, + data: pd.DataFrame | None = None, + policies: list[dict] | None = None, +) -> dict: """Simulate datasets generated by a latent factor model. Args: @@ -85,13 +103,14 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): params = params.reindex(params_index) parsing_info = create_parsing_info( - params_index=params.index, + params_index=params.index, # ty: ignore[invalid-argument-type] update_info=model.update_info, labels=model.labels, anchoring=model.anchoring, has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) + assert n_obs is not None # type narrowing: n_obs is set by either data or argument states, covs, log_weights, pardict = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, @@ -112,7 +131,7 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): update_info=model.update_info, control_data=control_data, observed_factors=observed_factors, - policies=policies, + policies=policies, # ty: ignore[invalid-argument-type] transition_info=model.transition_info, ) @@ -161,20 +180,20 @@ def simulate_dataset(model_dict, params, n_obs=None, data=None, policies=None): def _simulate_dataset( - latent_states, - covs, - log_weights, - pardict, - labels, - dimensions, - n_obs, - has_endogenous_factors, - update_info, - control_data, - observed_factors, - policies, - transition_info, -): + latent_states: Array, + covs: Array, + log_weights: Array, + pardict: dict, + labels: "Labels", + dimensions: "Dimensions", + n_obs: int, + has_endogenous_factors: bool, + update_info: pd.DataFrame, + control_data: Array, + observed_factors: Array, + policies: list[dict], + transition_info: "TransitionInfo", +) -> tuple[pd.DataFrame, pd.DataFrame]: """Simulate datasets generated by a latent factor model. Args: @@ -220,7 +239,7 @@ def _simulate_dataset( } dist_args.append(args) - latent_states = np.zeros((n_aug_periods, n_obs, n_states)) + latent_states = np.zeros((n_aug_periods, n_obs, n_states)) # ty: ignore[invalid-assignment] latent_states[0] = generate_start_states(n_obs, dimensions, dist_args, weights) for t in range(n_aug_periods - 1): @@ -275,8 +294,8 @@ def _simulate_dataset( for t in range(n_aug_periods): meas = pd.DataFrame( data=measurements_from_states( - latent_states[t], - control_data[t], + latent_states[t], # ty: ignore[invalid-argument-type] + control_data[t], # ty: ignore[invalid-argument-type] loadings_df.loc[t].to_numpy(), control_params_df.loc[t].to_numpy(), meas_sds.loc[t].to_numpy().flatten(), @@ -303,8 +322,11 @@ def _simulate_dataset( def _collapse_aug_periods_to_periods( - df, factors, aug_periods_to_periods, endogenous_factors_info -): + df: pd.DataFrame, + factors: tuple[str, ...], + aug_periods_to_periods: Mapping[int, int], + endogenous_factors_info: "EndogenousFactorsInfo", +) -> pd.DataFrame: """Collapse dataframe with aug_period index to user-facing period index. For each factor, extracts from the appropriate aug_period based on is_endogenous. @@ -328,7 +350,9 @@ def _collapse_aug_periods_to_periods( ) endogenous_cols = [ - fac for fac in factors if endogenous_factors_info.factor_info[fac].is_endogenous + fac + for fac in factors + if endogenous_factors_info.factor_info[fac].is_endogenous # ty: ignore[invalid-argument-type] ] state_cols = [fac for fac in factors if fac not in endogenous_cols] @@ -343,7 +367,11 @@ def _collapse_aug_periods_to_periods( ) -def _get_shock(mean, sd, size): +def _get_shock( + mean: float, + sd: float, + size: int, +) -> NDArray[np.floating]: """Add stochastic effect to a factor of length n_obs. Args: @@ -364,7 +392,12 @@ def _get_shock(mean, sd, size): return shock -def generate_start_states(n_obs, dimensions, dist_args, weights): +def generate_start_states( + n_obs: int, + dimensions: "Dimensions", + dist_args: list[dict], + weights: NDArray[np.floating], +) -> NDArray[np.floating]: """Draw initial states and control variables from a (mixture of) normals. Args: @@ -391,7 +424,13 @@ def generate_start_states(n_obs, dimensions, dist_args, weights): return out -def measurements_from_states(states, controls, loadings, control_params, sds): +def measurements_from_states( + states: NDArray[np.floating], + controls: NDArray[np.floating], + loadings: NDArray[np.floating], + control_params: NDArray[np.floating], + sds: NDArray[np.floating], +) -> NDArray[np.floating]: """Generate the variables that would be observed in practice. This generates the data for only one period. Let n_meas be the number diff --git a/src/skillmodels/transition_functions.py b/src/skillmodels/transition_functions.py index af1c9c5d..686749f0 100644 --- a/src/skillmodels/transition_functions.py +++ b/src/skillmodels/transition_functions.py @@ -33,21 +33,26 @@ import jax import jax.numpy as jnp +from jax import Array -def linear(states, params): +def linear(states: Array, params: Array) -> Array: """Linear production function where the constant is the last parameter.""" constant = params[-1] betas = params[:-1] return jnp.dot(states, betas) + constant -def params_linear(factors): +def params_linear(factors: tuple[str, ...]) -> list[str]: """Index tuples for linear transition function.""" return [*factors, "constant"] -def identity_constraints_linear(factor, aug_period, all_factors) -> list[dict]: +def identity_constraints_linear( + factor: str, + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for linear transition function.""" constraints_dicts = [] for regressor in params_linear(all_factors): @@ -63,7 +68,7 @@ def identity_constraints_linear(factor, aug_period, all_factors) -> list[dict]: return constraints_dicts -def translog(states, params): +def translog(states: Array, params: Array) -> Array: """Translog transition function. The name is a convention in the skill formation literature even though the function @@ -85,7 +90,7 @@ def translog(states, params): return res -def params_translog(factors): +def params_translog(factors: tuple[str, ...]) -> list[str]: """Index tuples for the translog production function.""" names = ( list(factors) @@ -96,7 +101,11 @@ def params_translog(factors): return names -def identity_constraints_translog(factor, aug_period, all_factors) -> list[dict]: +def identity_constraints_translog( + factor: str, + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for translog transition function.""" constraints_dicts = [] for regressor in params_translog(all_factors): @@ -112,7 +121,7 @@ def identity_constraints_translog(factor, aug_period, all_factors) -> list[dict] return constraints_dicts -def log_ces(states, params): +def log_ces(states: Array, params: Array) -> Array: """Log CES production function (KLS version).""" phi = params[-1] gammas = params[:-1] @@ -128,34 +137,42 @@ def log_ces(states, params): return result -def params_log_ces(factors): +def params_log_ces(factors: tuple[str, ...]) -> list[str]: """Index tuples for the log_ces production function.""" return [*factors, "phi"] -def constraints_log_ces(factor, factors, aug_period): +def constraints_log_ces( + factor: str, + factors: tuple[str, ...], + aug_period: int, +) -> dict: """Constraints for log_ces production function.""" names = params_log_ces(factors) loc = [("transition", aug_period, factor, name) for name in names[:-1]] return {"loc": loc, "type": "probability"} -def identity_constraints_log_ces(factors, aug_period, all_factors): +def identity_constraints_log_ces( + factors: tuple[str, ...], + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for log_ces.""" raise NotImplementedError -def constant(state, params): # noqa: ARG001 +def constant(state: Array, params: Array) -> Array: # noqa: ARG001 """Constant production function.""" return state -def params_constant(factors): # noqa: ARG001 +def params_constant(factors: tuple[str, ...]) -> list[str]: # noqa: ARG001 """Index tuples for the constant production function.""" return [] -def robust_translog(states, params): +def robust_translog(states: Array, params: Array) -> Array: """Numerically robust version of the translog transition function. This function does a clipping of the state vector at +- 1e12 before calling @@ -171,16 +188,20 @@ def robust_translog(states, params): return translog(clipped_states, params) -def params_robust_translog(factors): +def params_robust_translog(factors: tuple[str, ...]) -> list[str]: return params_translog(factors) -def identity_constraints_robust_translog(factor, aug_period, all_factors) -> list[dict]: +def identity_constraints_robust_translog( + factor: str, + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for robust_translog.""" return identity_constraints_translog(factor, aug_period, all_factors) -def linear_and_squares(states, params): +def linear_and_squares(states: Array, params: Array) -> Array: """linear_and_squares transition function.""" nfac = len(states) constant = params[-1] @@ -193,14 +214,16 @@ def linear_and_squares(states, params): return res -def params_linear_and_squares(factors): +def params_linear_and_squares(factors: tuple[str, ...]) -> list[str]: """Index tuples for the linear_and_squares production function.""" names = list(factors) + [f"{factor} ** 2" for factor in factors] + ["constant"] return names def identity_constraints_linear_and_squares( - factor, aug_period, all_factors + factor: str, + aug_period: int, + all_factors: tuple[str, ...], ) -> list[dict]: """Identity constraints for linear_and_squares transition function.""" constraints_dicts = [] @@ -217,7 +240,7 @@ def identity_constraints_linear_and_squares( return constraints_dicts -def log_ces_general(states, params): +def log_ces_general(states: Array, params: Array) -> Array: """Generalized log_ces production function without known location and scale.""" n = states.shape[-1] tfp = params[-1] @@ -234,11 +257,15 @@ def log_ces_general(states, params): return result -def params_log_ces_general(factors): +def params_log_ces_general(factors: tuple[str, ...]) -> list[str]: """Index tuples for the generalized log_ces production function.""" return list(factors) + [f"sigma_{fac}" for fac in factors] + ["tfp"] -def identity_constraints_log_ces_general(factors, aug_period, all_factors): +def identity_constraints_log_ces_general( + factors: tuple[str, ...], + aug_period: int, + all_factors: tuple[str, ...], +) -> list[dict]: """Identity constraints for log_ces_general.""" raise NotImplementedError diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py index 6296603f..222bba0f 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/types.py @@ -93,7 +93,7 @@ class TransitionInfo: @dataclass(frozen=True) class FactorInfo: - """Endogeneity information for a single factor.""" + """Information for a single factor.""" is_state: bool is_endogenous: bool diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index bbb84a4f..ad499f28 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -1,5 +1,6 @@ import warnings from copy import deepcopy +from typing import Any import numpy as np import pandas as pd @@ -12,7 +13,11 @@ ) -def extract_factors(factors, model_dict, params=None): +def extract_factors( + factors: str | list[str], + model_dict: dict[str, Any], + params: pd.DataFrame | None = None, +) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: """Reduce a specification to a model with fewer latent factors. If provided, a params DataFrame is also reduced correspondingly. @@ -30,12 +35,15 @@ def extract_factors(factors, model_dict, params=None): if isinstance(factors, str): factors = [factors] - to_remove = set(model_dict["factors"]).difference(factors) + to_remove = list(set(model_dict["factors"]).difference(factors)) out = remove_factors(to_remove, model_dict, params) return out -def update_parameter_values(params, others): +def update_parameter_values( + params: pd.DataFrame, + others: pd.DataFrame | list[pd.DataFrame], +) -> pd.DataFrame: """Update the "value" column of params with values from other. Args: @@ -67,7 +75,11 @@ def update_parameter_values(params, others): return out -def remove_factors(factors, model_dict, params=None): +def remove_factors( + factors: str | list[str], + model_dict: dict[str, Any], + params: pd.DataFrame | None = None, +) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: """Remove factors from a model specification. If provided, a params DataFrame is also reduced correspondingly. @@ -108,13 +120,17 @@ def remove_factors(factors, model_dict, params=None): out = reduce_n_periods(out, new_n_periods) if params is not None: - out_params = _reduce_params(params, out, has_endogenous_factors) + out_params = _reduce_params(params, out, has_endogenous_factors) # ty: ignore[invalid-argument-type] out = (out, out_params) - return out + return out # ty: ignore[invalid-return-type] -def remove_measurements(measurements, model_dict, params=None): +def remove_measurements( + measurements: str | list[str], + model_dict: dict[str, Any], + params: pd.DataFrame | None = None, +) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: """Remove measurements from a model specification. If provided, a params DataFrame is also reduced correspondingly. @@ -161,7 +177,11 @@ def remove_measurements(measurements, model_dict, params=None): return out -def remove_controls(controls, model_dict, params=None): +def remove_controls( + controls: str | list[str], + model_dict: dict[str, Any], + params: pd.DataFrame | None = None, +) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: """Remove control variables from a model specification. If provided, a params DataFrame is also reduced correspondingly. @@ -189,7 +209,10 @@ def remove_controls(controls, model_dict, params=None): return out -def switch_translog_to_linear(model_dict, params=None): +def switch_translog_to_linear( + model_dict: dict[str, Any], + params: pd.DataFrame | None = None, +) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: """Switch all translog production functions to linear. If provided, a params DataFrame is also reduced correspondingly. @@ -216,7 +239,10 @@ def switch_translog_to_linear(model_dict, params=None): return out -def switch_linear_to_translog(model_dict, params=None): +def switch_linear_to_translog( + model_dict: dict[str, Any], + params: pd.DataFrame | None = None, +) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: """Switch all linear production functions to translog. If provided, a params DataFrame is also extended correspondingly. The fill value @@ -244,7 +270,11 @@ def switch_linear_to_translog(model_dict, params=None): return out -def reduce_n_periods(model_dict, new_n_periods, params=None): +def reduce_n_periods( + model_dict: dict[str, Any], + new_n_periods: int, + params: pd.DataFrame | None = None, +) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: """Remove all periods after n_periods. Args: @@ -285,20 +315,30 @@ def reduce_n_periods(model_dict, new_n_periods, params=None): return out -def _remove_from_list(list_, to_remove): +def _remove_from_list( + list_: list[Any], + to_remove: str | list[str], +) -> list[Any]: if isinstance(to_remove, str): to_remove = [to_remove] return [element for element in list_ if element not in to_remove] -def _remove_from_dict(dict_, to_remove): +def _remove_from_dict( + dict_: dict[str, Any], + to_remove: str | list[str], +) -> dict[str, Any]: if isinstance(to_remove, str): to_remove = [to_remove] return {key: val for key, val in dict_.items() if key not in to_remove} -def _reduce_params(params, model_dict, has_endogenous_factors): +def _reduce_params( + params: pd.DataFrame, + model_dict: dict[str, Any], + has_endogenous_factors: bool, +) -> pd.DataFrame: """Reduce a parameter DataFrame from a larger model to a reduced model. The reduced model must be nested in the original model for which the params @@ -328,7 +368,11 @@ def _reduce_params(params, model_dict, has_endogenous_factors): return params.loc[index] -def _extend_params(params, model_dict, fill_value): +def _extend_params( + params: pd.DataFrame, + model_dict: dict[str, Any], + fill_value: float, +) -> pd.DataFrame: index = _get_params_index_from_model_dict(model_dict) out = params.reindex(index) out["value"] = out["value"].fillna(fill_value) @@ -341,7 +385,9 @@ def _extend_params(params, model_dict, fill_value): return out -def _get_params_index_from_model_dict(model_dict): +def _get_params_index_from_model_dict( + model_dict: dict[str, Any], +) -> pd.MultiIndex: mod = process_model(model_dict) index = get_params_index( update_info=mod.update_info, @@ -353,7 +399,10 @@ def _get_params_index_from_model_dict(model_dict): return index -def _remove_measurements_from_normalizations(measurements, normalizations): +def _remove_measurements_from_normalizations( + measurements: str | list[str], + normalizations: list[dict[str, Any]], +) -> list[dict[str, Any]]: reduced = [_remove_from_dict(norm, measurements) for norm in normalizations] if reduced != normalizations: warnings.warn( @@ -363,7 +412,10 @@ def _remove_measurements_from_normalizations(measurements, normalizations): return reduced -def _shorten_if_necessary(list_, length): +def _shorten_if_necessary( + list_: list[Any], + length: int, +) -> list[Any]: if len(list_) > length: list_ = list_[:length] return list_ diff --git a/src/skillmodels/utils_plotting.py b/src/skillmodels/utils_plotting.py index 491ac9e7..ab7e6ea2 100644 --- a/src/skillmodels/utils_plotting.py +++ b/src/skillmodels/utils_plotting.py @@ -1,14 +1,16 @@ +from typing import Any + import numpy as np def get_layout_kwargs( - layout_kwargs=None, - legend_kwargs=None, - title_kwargs=None, - showlegend=False, - columns=None, - rows=None, -): + layout_kwargs: dict[str, Any] | None = None, + legend_kwargs: dict[str, Any] | None = None, + title_kwargs: dict[str, Any] | None = None, + showlegend: bool = False, + columns: list[str] | None = None, + rows: list[str] | None = None, +) -> dict[str, Any]: """Define and update default kwargs for update_layout. Defines some default keyword arguments to update figure layout, such as @@ -37,13 +39,13 @@ def get_layout_kwargs( def get_make_subplot_kwargs( - sharex, - sharey, - column_order, - row_order, - make_subplot_kwargs, - add_scenes=False, -): + sharex: bool, + sharey: bool, + column_order: list[str], + row_order: list[str], + make_subplot_kwargs: dict[str, Any] | None, + add_scenes: bool = False, +) -> dict[str, Any]: """Define and update keywargs for instantiating figure with subplots.""" nrows = len(row_order) ncols = len(column_order) diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index 76a6660d..728cce9f 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -1,11 +1,14 @@ import warnings +from collections.abc import Mapping from copy import deepcopy +from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd import plotly.express as px import plotly.figure_factory as ff import plotly.graph_objects as go +from numpy.typing import NDArray from plotly.subplots import make_subplots from scipy.stats import gaussian_kde @@ -13,25 +16,28 @@ from skillmodels.process_model import process_model from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs +if TYPE_CHECKING: + from skillmodels.types import ProcessedModel + def combine_distribution_plots( - kde_plots, - contour_plots, - surface_plots=None, - factor_order=None, - factor_mapping=None, - make_subplot_kwargs=None, - sharex=False, - sharey=False, - line_width=1.5, - showlegend=False, - layout_kwargs=None, - legend_kwargs=None, - title_kwargs=None, - eye_x=2.2, - eye_y=2.2, - eye_z=1, -): + kde_plots: dict[str, go.Figure], + contour_plots: dict[tuple[str, str], go.Figure], + surface_plots: dict[tuple[str, str], go.Figure] | None = None, + factor_order: list[str] | None = None, + factor_mapping: dict[str, str] | None = None, + make_subplot_kwargs: dict[str, Any] | None = None, + sharex: bool = False, + sharey: bool = False, + line_width: float = 1.5, + showlegend: bool = False, + layout_kwargs: dict[str, Any] | None = None, + legend_kwargs: dict[str, Any] | None = None, + title_kwargs: dict[str, Any] | None = None, + eye_x: float = 2.2, + eye_y: float = 2.2, + eye_z: float = 1, +) -> go.Figure: """Combine individual plots into figure with subplots. Uses dictionary with plotly images as values to build plotly Figure with subplots. @@ -149,22 +155,22 @@ def combine_distribution_plots( def univariate_densities( - data, - model_dict, - params, - period, - factors=None, - observed_factors=False, - states=None, - show_curve=True, - show_hist=False, - show_rug=False, - curve_type="kde", - colorscale="D3", - bin_size=1, - distplot_kwargs=None, - layout_kwargs=None, -): + data: pd.DataFrame, + model_dict: dict[str, Any], + params: pd.DataFrame, + period: int, + factors: list[str] | None = None, + observed_factors: bool = False, + states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, + show_curve: bool = True, + show_hist: bool = False, + show_rug: bool = False, + curve_type: str = "kde", + colorscale: str = "D3", + bin_size: float = 1, + distplot_kwargs: dict[str, Any] | None = None, + layout_kwargs: dict[str, Any] | None = None, +) -> dict[str, go.Figure]: """Get dictionary with kernel density estimate plots for each factor. Plots kernel densities for latent factors and collects them in a dictionary @@ -257,22 +263,22 @@ def univariate_densities( def bivariate_density_contours( - data, - model_dict, - params, - period, - factors=None, - observed_factors=False, - states=None, - n_points=50, - contour_kwargs=None, - layout_kwargs=None, - contours_showlabels=False, - contours_coloring="none", - contours_colorscale="RdBu_r", - lines_colorscale="D3", - showcolorbar=False, -): + data: pd.DataFrame, + model_dict: dict[str, Any], + params: pd.DataFrame, + period: int, + factors: list[str] | None = None, + observed_factors: bool = False, + states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, + n_points: int = 50, + contour_kwargs: dict[str, Any] | None = None, + layout_kwargs: dict[str, Any] | None = None, + contours_showlabels: bool = False, + contours_coloring: str = "none", + contours_colorscale: str = "RdBu_r", + lines_colorscale: str = "D3", + showcolorbar: bool = False, +) -> dict[tuple[str, str], go.Figure]: """Get dictionary with pariwise density contour plots. Plots pairwise bivariate density contours for latent factors @@ -383,22 +389,22 @@ def bivariate_density_contours( def bivariate_density_surfaces( - data, - model_dict, - params, - period, - factors=None, - observed_factors=False, - states=None, - n_points=50, - layout_kwargs=None, - colorscale="RdBu_r", - opacity=0.9, - showcolorbar=False, - showgrids=True, - showaxlines=True, - showlabels=True, -): + data: pd.DataFrame, + model_dict: dict[str, Any], + params: pd.DataFrame, + period: int, + factors: list[str] | None = None, + observed_factors: bool = False, + states: pd.DataFrame | None = None, + n_points: int = 50, + layout_kwargs: dict[str, Any] | None = None, + colorscale: str = "RdBu_r", + opacity: float = 0.9, + showcolorbar: bool = False, + showgrids: bool = True, + showaxlines: bool = True, + showlabels: bool = True, +) -> dict[tuple[str, str], go.Figure]: """Get dictionary with pariwise 3d density surface plots. Plots pairwise 3d density surfaces for latent factors @@ -505,8 +511,12 @@ def bivariate_density_surfaces( def _process_data( - states, period, factors, aug_periods_to_periods, observed_states=None -): + states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame], + period: int, + factors: list[str], + aug_periods_to_periods: Mapping[int, int], + observed_states: pd.DataFrame | None = None, +) -> pd.DataFrame: ap_to_p = pd.Series(aug_periods_to_periods, name="period") ap_to_p.index.name = "aug_period" if isinstance(states, pd.DataFrame): @@ -548,15 +558,15 @@ def _process_data( def _process_distplot_kwargs( - show_curve, - show_hist, - show_rug, - curve_type, - bin_size, - scenarios, - colorscale, - distplot_kwargs, -): + show_curve: bool, + show_hist: bool, + show_rug: bool, + curve_type: str, + bin_size: float, + scenarios: NDArray[Any], + colorscale: str, + distplot_kwargs: dict[str, Any] | None, +) -> dict[str, Any]: """Define and update default distplot kwargs.""" default_kwargs = { "show_hist": show_hist, @@ -572,7 +582,13 @@ def _process_distplot_kwargs( return default_kwargs -def _calculate_kde_for_3d(data, factors, n_points): +def _calculate_kde_for_3d( + data: pd.DataFrame, + factors: tuple[str, str], + n_points: int, +) -> tuple[ + NDArray[np.floating[Any]], NDArray[np.floating[Any]], NDArray[np.floating[Any]] +]: """Create grid mesh and calculate Gaussian kernel over the grid.""" x = data[factors[0]] y = data[factors[1]] @@ -588,12 +604,12 @@ def _calculate_kde_for_3d(data, factors, n_points): def _process_contour_kwargs( - contour_kwargs, - contours_showlabels, - contours_coloring, - contours_colorscale, - contours_showscale, -): + contour_kwargs: dict[str, Any] | None, + contours_showlabels: bool, + contours_coloring: str | None, + contours_colorscale: str, + contours_showscale: bool, +) -> dict[str, Any]: """Define and update default density contour kwargs.""" if contours_coloring is None: contours_coloring = "none" @@ -609,9 +625,11 @@ def _process_contour_kwargs( return default_kwargs -def _process_layout_kwargs(layout_kwargs): +def _process_layout_kwargs( + layout_kwargs: dict[str, Any] | None, +) -> dict[str, Any]: """Define and update default figure layout kwargs.""" - default_kwargs = { + default_kwargs: dict[str, Any] = { "template": "simple_white", "xaxis_showgrid": False, "yaxis_showgrid": False, @@ -621,12 +639,17 @@ def _process_layout_kwargs(layout_kwargs): return default_kwargs -def _process_layout_kwargs_3d(layout_kwargs, showgrids, showaxlines, showlabels): +def _process_layout_kwargs_3d( + layout_kwargs: dict[str, Any] | None, + showgrids: bool, + showaxlines: bool, + showlabels: bool, +) -> dict[str, Any]: """Define and update default figure layout kwargs for 3d plots.""" - default_kwargs = { + default_kwargs: dict[str, Any] = { "template": "none", } - scene = {} + scene: dict[str, Any] = {} for ax in list("xyz"): scene[f"{ax}axis"] = { "showgrid": showgrids, @@ -640,7 +663,10 @@ def _process_layout_kwargs_3d(layout_kwargs, showgrids, showaxlines, showlabels) return default_kwargs -def _process_factor_mapping_dist(mapper, factors): +def _process_factor_mapping_dist( + mapper: dict[str, str] | None, + factors: list[str], +) -> dict[str, str]: """Process mapper to return dictionary with old and new factor names.""" if mapper is None: mapper = {fac: fac for fac in factors} @@ -651,7 +677,10 @@ def _process_factor_mapping_dist(mapper, factors): return mapper -def _get_ordered_factors(factor_order, factors): +def _get_ordered_factors( + factor_order: list[str] | str | None, + factors: list[str], +) -> list[str]: """Process factor orders to return list of strings.""" if factor_order is None: ordered_factors = factors @@ -662,17 +691,24 @@ def _get_ordered_factors(factor_order, factors): return ordered_factors -def _get_factors(factors, observed_factors, model): +def _get_factors( + factors: list[str] | None, + observed_factors: bool, + model: "ProcessedModel", +) -> list[str]: """Proccess factor names to return list of strings.""" if factors is None: if observed_factors: - factors = model.labels.all_factors + factors = list(model.labels.all_factors) else: - factors = model.labels.latent_factors + factors = list(model.labels.latent_factors) return factors -def _get_data_observed_factors(data, factors): +def _get_data_observed_factors( + data: pd.DataFrame, + factors: list[str], +) -> pd.DataFrame | None: """Get data with observed factors if any.""" to_concat = [] for fac in factors: diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 00d0e237..bebb50b8 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -1,9 +1,12 @@ import itertools +from collections.abc import Callable from copy import deepcopy +from typing import TYPE_CHECKING, Any import jax.numpy as jnp import numpy as np import pandas as pd +from jax import Array from plotly import express as px from plotly import graph_objects as go from plotly.subplots import make_subplots @@ -16,20 +19,23 @@ from skillmodels.process_model import process_model from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs +if TYPE_CHECKING: + from skillmodels.types import ProcessedModel + def combine_transition_plots( - plots_dict, - column_order=None, - row_order=None, - factor_mapping=None, - make_subplot_kwargs=None, - sharex=False, - sharey=True, - showlegend=True, - layout_kwargs=None, - legend_kwargs=None, - title_kwargs=None, -): + plots_dict: dict[tuple[str, str], go.Figure], + column_order: list[str] | str | None = None, + row_order: list[str] | str | None = None, + factor_mapping: dict[str, str] | None = None, + make_subplot_kwargs: dict[str, Any] | None = None, + sharex: bool = False, + sharey: bool = True, + showlegend: bool = True, + layout_kwargs: dict[str, Any] | None = None, + legend_kwargs: dict[str, Any] | None = None, + title_kwargs: dict[str, Any] | None = None, +) -> go.Figure: """Combine individual plots into figure with subplots. Use dictionary with plotly images as values to build plotly figure with subplots. @@ -131,18 +137,22 @@ def combine_transition_plots( def get_transition_plots( - model_dict, - params, - data, - period, - state_ranges=None, - quantiles_of_other_factors=(0.25, 0.5, 0.75), - n_points=50, - n_draws=50, - colorscale="Magenta_r", - layout_kwargs=None, - include_correction_factors=False, -): + model_dict: dict[str, Any], + params: pd.DataFrame, + data: pd.DataFrame, + period: int, + state_ranges: dict[str, pd.DataFrame] | None = None, + quantiles_of_other_factors: tuple[float, ...] | list[float] | float | None = ( + 0.25, + 0.5, + 0.75, + ), + n_points: int = 50, + n_draws: int = 50, + colorscale: str = "Magenta_r", + layout_kwargs: dict[str, Any] | None = None, + include_correction_factors: bool = False, +) -> dict[tuple[str, str], go.Figure]: """Get dictionary with individual plots of transition equations for each factor. Args: @@ -192,7 +202,7 @@ def get_transition_plots( latent_factors = [ lf for lf in model.labels.latent_factors - if not model.endogenous_factors_info.factor_info[lf].is_correction + if not model.endogenous_factors_info.factor_info[lf].is_correction # ty: ignore[invalid-argument-type] ] all_factors = model.labels.all_factors states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ @@ -204,7 +214,7 @@ def get_transition_plots( params=params, states=states, state_ranges=state_ranges, - latent_factors=latent_factors, + latent_factors=latent_factors, # ty: ignore[invalid-argument-type] all_factors=all_factors, quantiles_of_other_factors=quantiles_of_other_factors, period=period, @@ -217,21 +227,21 @@ def get_transition_plots( def _get_dictionary_with_plots( - model, - data, - params, - states, - state_ranges, - latent_factors, - all_factors, - quantiles_of_other_factors, - period, - n_points, - n_draws, - colorscale, - layout_kwargs, - showlegend=True, -): + model: "ProcessedModel", + data: pd.DataFrame, + params: pd.DataFrame, + states: pd.DataFrame, + state_ranges: dict[str, pd.DataFrame] | None, + latent_factors: list[str], + all_factors: tuple[str, ...], + quantiles_of_other_factors: list[float] | None, + period: int, + n_points: int, + n_draws: int, + colorscale: str, + layout_kwargs: dict[str, Any] | None, + showlegend: bool = True, +) -> dict[tuple[str, str], go.Figure]: """Get plots of transition functions for each input and output combination. Return a dictionary with individual plots of transition functions for each input @@ -288,10 +298,10 @@ def _get_dictionary_with_plots( _aug_periods = [period] plots_dict = {} for output_factor, input_factor in itertools.product(latent_factors, all_factors): - transition_function = model.transition_info.individual_functions[output_factor] + transition_function = model.transition_info.individual_functions[output_factor] # ty: ignore[invalid-argument-type] if ( has_endogenous_factors - and model.endogenous_factors_info.factor_info[output_factor].is_endogenous + and model.endogenous_factors_info.factor_info[output_factor].is_endogenous # ty: ignore[invalid-argument-type] ): aug_period = min(_aug_periods) else: @@ -350,17 +360,24 @@ def _get_dictionary_with_plots( return plots_dict -def _get_state_ranges(state_ranges, states_data, all_factors): +def _get_state_ranges( + state_ranges: dict[str, pd.DataFrame] | None, + states_data: pd.DataFrame, + all_factors: tuple[str, ...], +) -> dict[str, pd.DataFrame]: """Create state ranges if none is given.""" if state_ranges is None: - state_ranges = create_state_ranges(states_data, all_factors) + state_ranges = create_state_ranges(states_data, list(all_factors)) return state_ranges -def _get_pardict(model, params): +def _get_pardict( + model: "ProcessedModel", + params: pd.DataFrame, +) -> dict[str, Any]: """Get parsed params dictionary.""" parsing_info = create_parsing_info( - params_index=params.index, + params_index=params.index, # ty: ignore[invalid-argument-type] update_info=model.update_info, labels=model.labels, anchoring=model.anchoring, @@ -377,7 +394,10 @@ def _get_pardict(model, params): return pardict -def _set_index_params(model, params): +def _set_index_params( + model: "ProcessedModel", + params: pd.DataFrame, +) -> pd.DataFrame: """Reset index of params data frame to model implied values.""" params_index = get_params_index( update_info=model.update_info, @@ -391,7 +411,13 @@ def _set_index_params(model, params): return params -def _get_states_data(model, period, data, states, observed_factors): +def _get_states_data( + model: "ProcessedModel", + period: int, + data: pd.DataFrame, + states: pd.DataFrame, + observed_factors: tuple[str, ...], +) -> pd.DataFrame: if observed_factors and data is None: raise ValueError( "The model has observed factors. You must pass the empirical data to " @@ -445,17 +471,17 @@ def _get_states_data(model, period, data, states, observed_factors): def _prepare_data_for_one_plot_fixed_quantile_2d( - states_data, - state_ranges, - aug_period, - input_factor, - output_factor, - n_points, - quantiles_of_other_factors, - transition_function, - transition_params, - all_factors, -): + states_data: pd.DataFrame, + state_ranges: dict[str, pd.DataFrame], + aug_period: int, + input_factor: str, + output_factor: str, + n_points: int, + quantiles_of_other_factors: list[float], + transition_function: Callable[..., Array], + transition_params: dict[str, Any], + all_factors: tuple[str, ...], +) -> pd.DataFrame: period_data = states_data.query(f"aug_period == {aug_period}")[list(all_factors)] input_min = state_ranges[input_factor].loc[aug_period]["minimum"] input_max = state_ranges[input_factor].loc[aug_period]["maximum"] @@ -479,7 +505,9 @@ def _prepare_data_for_one_plot_fixed_quantile_2d( return out -def _process_quantiles_of_other_factors(quantiles_of_other_factors): +def _process_quantiles_of_other_factors( + quantiles_of_other_factors: tuple[float, ...] | list[float] | float | None, +) -> list[float] | None: """Process quantiles of other factors to always have list as type.""" if isinstance(quantiles_of_other_factors, float | int): quantiles_of_other_factors = [quantiles_of_other_factors] @@ -489,17 +517,17 @@ def _process_quantiles_of_other_factors(quantiles_of_other_factors): def _prepare_data_for_one_plot_average_2d( - states_data, - state_ranges, - aug_period, - input_factor, - output_factor, - n_points, - n_draws, - transition_function, - transition_params, - all_factors, -): + states_data: pd.DataFrame, + state_ranges: dict[str, pd.DataFrame], + aug_period: int, + input_factor: str, + output_factor: str, + n_points: int, + n_draws: int, + transition_function: Callable[..., Array], + transition_params: dict[str, Any], + all_factors: tuple[str, ...], +) -> pd.DataFrame: period_data = states_data.query(f"aug_period == {aug_period}") sampled_factors = [factor for factor in all_factors if factor != input_factor] @@ -525,7 +553,11 @@ def _prepare_data_for_one_plot_average_2d( return out -def _process_factor_mapping_trans(factor_mapper, output_factors, input_factors): +def _process_factor_mapping_trans( + factor_mapper: dict[str, str] | None, + output_factors: list[str], + input_factors: list[str], +) -> dict[str, str]: """Process mapper to return dictionary with old and new factor names.""" all_factors = input_factors + output_factors if factor_mapper is None: @@ -537,20 +569,30 @@ def _process_factor_mapping_trans(factor_mapper, output_factors, input_factors): return factor_mapper -def _process_orders(columns, rows, plots_dict): +def _process_orders( + columns: list[str] | str | None, + rows: list[str] | str | None, + plots_dict: dict[tuple[str, str], go.Figure], +) -> tuple[list[str], list[str]]: """Process axes orders to return list of strings.""" + out_columns: list[str] + out_rows: list[str] if columns is None: - columns = [] + out_columns = [] for f in plots_dict: - if f[0] not in columns: - columns.append(f[0]) + if f[0] not in out_columns: + out_columns.append(f[0]) elif isinstance(columns, str): - columns = [columns] + out_columns = [columns] + else: + out_columns = columns if rows is None: - rows = [] + out_rows = [] for f in plots_dict: - if f[1] not in rows: - rows.append(f[1]) + if f[1] not in out_rows: + out_rows.append(f[1]) elif isinstance(rows, str): - rows = [rows] - return columns, rows + out_rows = [rows] + else: + out_rows = rows + return out_columns, out_rows From e2d687a5a49236217c463e45ca1d470e37cefd7b Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Fri, 9 Jan 2026 09:44:20 +0100 Subject: [PATCH 07/27] Require Python 3.14 before fixing type annotations. --- pixi.lock | 1953 +++++++++++++++++++++++------------------------- pyproject.toml | 4 +- 2 files changed, 941 insertions(+), 1016 deletions(-) diff --git a/pixi.lock b/pixi.lock index a2726f81..9e63b939 100644 --- a/pixi.lock +++ b/pixi.lock @@ -14,13 +14,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45-default_hfdba357_105.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45-default_h4852527_105.conda @@ -28,20 +28,20 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-12.9.27-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-crt-dev_linux-64-12.9.86-ha770c72_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-crt-tools-12.9.86-ha770c72_2.conda @@ -62,7 +62,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-tools-12.9.86-h4bc722e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-version-12.9-h4f385c5_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -70,7 +70,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-he8b2097_16.conda @@ -110,7 +110,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/kernel-headers_linux-64-4.18.0-he073ed8_9.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -153,8 +153,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda @@ -167,23 +167,23 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -194,16 +194,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -212,11 +212,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda @@ -226,13 +226,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -252,13 +253,13 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/1c/38/4ba2486f95fcf2120723932feacdded438e785258148b18a703cd1177e41/jax_cuda12_plugin-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/27/58/a5a27d4677d6890570f7e58cecd51891469cb620e6f64c8faed4935d93d0/jax_cuda12_plugin-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/77/3c/aa88abe01f3be3d1f8f787d1d33dc83e76fec05945f9a28fbb41cfb99cd5/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/18/2a/d4cd8506d2044e082f8cd921be57392e6a9b5ccd3ffdf050362430a3d5d5/nvidia_cuda_cccl_cu12-12.9.27-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c1/2e/b84e32197e33f39907b455b83395a017e697c07a449a2b15fd07fc1c9981/nvidia_cuda_cupti_cu12-12.9.79-py3-none-manylinux_2_25_x86_64.whl @@ -274,9 +275,9 @@ environments: - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ default: @@ -293,34 +294,34 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -328,7 +329,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -363,7 +364,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -401,8 +402,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda @@ -415,23 +416,23 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -442,16 +443,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -460,11 +461,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda @@ -473,13 +474,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -499,16 +501,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ osx-arm64: @@ -517,34 +519,34 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -552,7 +554,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -585,7 +587,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda @@ -619,8 +621,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-21.1.8-h4a912ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda @@ -633,45 +635,45 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda @@ -680,11 +682,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda @@ -693,13 +695,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -720,12 +723,12 @@ environments: - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl @@ -735,34 +738,34 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -770,7 +773,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -804,7 +807,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda @@ -839,8 +842,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -853,22 +856,22 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda @@ -878,18 +881,18 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyh09c184e_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda @@ -897,11 +900,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda @@ -911,14 +914,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda @@ -943,17 +947,17 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ test-cpu: @@ -970,36 +974,36 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -1009,7 +1013,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gmp-6.3.0-hac33072_2.conda @@ -1046,7 +1050,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -1100,12 +1104,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -1118,24 +1122,24 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/p11-kit-0.25.10-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -1149,16 +1153,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -1168,11 +1172,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -1183,13 +1187,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -1209,16 +1214,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ osx-arm64: @@ -1227,35 +1232,35 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py313h65a2061_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py314h6e9b3f0_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -1264,7 +1269,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -1297,7 +1302,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda @@ -1333,12 +1338,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lz4-c-1.10.0-h286801f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py313h78c9487_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py314habef2a7_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -1350,31 +1355,31 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda @@ -1382,16 +1387,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda @@ -1401,11 +1406,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -1416,13 +1421,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -1443,12 +1449,12 @@ environments: - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl @@ -1458,35 +1464,35 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py314h2359020_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -1495,7 +1501,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -1529,7 +1535,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda @@ -1564,8 +1570,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -1578,22 +1584,22 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda @@ -1605,18 +1611,18 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-7.0.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda @@ -1624,11 +1630,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -1639,14 +1645,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda @@ -1671,17 +1678,17 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ test-gpu: @@ -1698,13 +1705,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45-default_hfdba357_105.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45-default_h4852527_105.conda @@ -1712,22 +1719,22 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-12.9.27-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-crt-dev_linux-64-12.9.86-ha770c72_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-crt-tools-12.9.86-ha770c72_2.conda @@ -1748,7 +1755,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-tools-12.9.86-h4bc722e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-version-12.9-h4f385c5_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -1758,7 +1765,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-he8b2097_16.conda @@ -1800,7 +1807,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/kernel-headers_linux-64-4.18.0-he073ed8_9.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -1859,12 +1866,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -1877,24 +1884,24 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/p11-kit-0.25.10-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -1908,16 +1915,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -1927,11 +1934,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -1943,13 +1950,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -1969,13 +1977,13 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/1c/38/4ba2486f95fcf2120723932feacdded438e785258148b18a703cd1177e41/jax_cuda12_plugin-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/27/58/a5a27d4677d6890570f7e58cecd51891469cb620e6f64c8faed4935d93d0/jax_cuda12_plugin-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/77/3c/aa88abe01f3be3d1f8f787d1d33dc83e76fec05945f9a28fbb41cfb99cd5/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/18/2a/d4cd8506d2044e082f8cd921be57392e6a9b5ccd3ffdf050362430a3d5d5/nvidia_cuda_cccl_cu12-12.9.27-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c1/2e/b84e32197e33f39907b455b83395a017e697c07a449a2b15fd07fc1c9981/nvidia_cuda_cupti_cu12-12.9.79-py3-none-manylinux_2_25_x86_64.whl @@ -1991,9 +1999,9 @@ environments: - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ ty: @@ -2010,36 +2018,36 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -2049,7 +2057,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gmp-6.3.0-hac33072_2.conda @@ -2086,7 +2094,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda @@ -2140,12 +2148,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -2158,24 +2166,24 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/p11-kit-0.25.10-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda @@ -2189,16 +2197,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda @@ -2208,11 +2216,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -2223,13 +2231,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -2249,17 +2258,17 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/9e/4c/2f9ac5edbd0e67bf82f5cd04275c4e87cbbf69a78f43e5dcf90c1573d44e/ty-0.0.10-py3-none-manylinux_2_24_x86_64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl @@ -2271,35 +2280,35 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py313h65a2061_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py314h6e9b3f0_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -2308,7 +2317,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -2341,7 +2350,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda @@ -2377,12 +2386,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lz4-c-1.10.0-h286801f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py313h78c9487_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py314habef2a7_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda @@ -2394,31 +2403,31 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda @@ -2426,16 +2435,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda @@ -2445,11 +2454,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.2.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -2460,13 +2469,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda @@ -2487,12 +2497,12 @@ environments: - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl @@ -2506,35 +2516,35 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py314h2359020_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda @@ -2543,7 +2553,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda @@ -2577,7 +2587,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda @@ -2612,8 +2622,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -2626,22 +2636,22 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda @@ -2653,18 +2663,18 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-7.0.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda @@ -2672,11 +2682,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda @@ -2687,14 +2697,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda @@ -2719,18 +2730,18 @@ environments: - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 - - pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/42/36/82e66b9753a76964d26fd9bc3514ea0abce0a5ba5ad7d5f084070c6981da/ty-0.0.10-py3-none-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl @@ -2846,43 +2857,43 @@ packages: - pkg:pypi/argon2-cffi?source=hash-mapping size: 18715 timestamp: 1749017288144 -- conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py313h07c4f96_2.conda - sha256: ad188ccc06a06c633dc124b09e9e06fb9df4c32ffc38acc96ecc86e506062090 - md5: 27bbec9f2f3a15d32b60ec5734f5b41c +- conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda + sha256: 39234a99df3d2e3065383808ed8bfda36760de5ef590c54c3692bb53571ef02b + md5: 3cca1b74b2752917b5b65b81f61f0553 depends: - __glibc >=2.17,<3.0.a0 - - cffi >=1.0.1 + - cffi >=2.0.0b1 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/argon2-cffi-bindings?source=hash-mapping - size: 35943 - timestamp: 1762509452935 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py313h6535dbc_2.conda - sha256: 05ea6fa7109235cfb4fc24526bae1fe82d88bbb5e697ab3945c313f5f041af5b - md5: e23e087109b2096db4cf9a3985bab329 + size: 35598 + timestamp: 1762509505285 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda + sha256: aab60bbaea5cc49dff37438d1ad469d64025cda2ce58103cf68da61701ed2075 + md5: a240a79a49a95b388ef81ccda27a5e51 depends: - __osx >=11.0 - - cffi >=1.0.1 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - cffi >=2.0.0b1 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/argon2-cffi-bindings?source=hash-mapping - size: 33947 - timestamp: 1762510144907 -- conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py313h5ea7bf4_2.conda - sha256: 3f8a1affdfeb2be5289d709e365fc6e386d734773895215cf8cbc5100fa6af9a - md5: eabb4b677b54874d7d6ab775fdaa3d27 - depends: - - cffi >=1.0.1 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 34218 + timestamp: 1762509977830 +- conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda + sha256: a742e7cd0d5534bfff3fd550a0c1e430411fad60a24f88930d261056ab08096f + md5: ffa247e46f47e157851dc547f4c513e4 + depends: + - cffi >=2.0.0b1 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -2890,8 +2901,8 @@ packages: license_family: MIT purls: - pkg:pypi/argon2-cffi-bindings?source=hash-mapping - size: 38779 - timestamp: 1762509796090 + size: 38653 + timestamp: 1762509771011 - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda sha256: 792da8131b1b53ff667bd6fc617ea9087b570305ccb9913deb36b8e12b3b5141 md5: 85c4f19f377424eafc4ed7911b291642 @@ -2956,49 +2967,16 @@ packages: - pkg:pypi/babel?source=hash-mapping size: 6938256 timestamp: 1738490268466 -- conda: https://conda.anaconda.org/conda-forge/linux-64/backports.zstd-1.3.0-py313h18e8e13_0.conda - sha256: 9552afbec37c4d8d0e83a5c4c6b3c7f4b8785f935094ce3881e0a249045909ce - md5: d9e90792551a527200637e23a915dd79 - depends: - - python - - libgcc >=14 - - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 - - zstd >=1.5.7,<1.6.0a0 - license: BSD-3-Clause AND MIT AND EPL-2.0 - purls: - - pkg:pypi/backports-zstd?source=hash-mapping - size: 240943 - timestamp: 1767044981366 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/backports.zstd-1.3.0-py313h48bb75e_0.conda - sha256: f3047ca3b41bb444b4b5a71a6eee182623192c77019746dd4685fd260becb249 - md5: 54008c5cc8928e5cb5a0f9206b829451 - depends: - - python - - python 3.13.* *_cp313 - - __osx >=11.0 - - zstd >=1.5.7,<1.6.0a0 - - python_abi 3.13.* *_cp313 - license: BSD-3-Clause AND MIT AND EPL-2.0 - purls: - - pkg:pypi/backports-zstd?source=hash-mapping - size: 244371 - timestamp: 1767045003420 -- conda: https://conda.anaconda.org/conda-forge/win-64/backports.zstd-1.3.0-py313h2a31948_0.conda - sha256: 1e76ed9bcf07ef1df9c964d73e9cda08a0380845d09c8da1678a1687dc087c34 - md5: cdcdfe68c5bc9af9e908e35ebffc9fe1 +- conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + noarch: generic + sha256: c31ab719d256bc6f89926131e88ecd0f0c5d003fe8481852c6424f4ec6c7eb29 + md5: a2ac7763a9ac75055b68f325d3255265 depends: - - python - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 - - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 - - zstd >=1.5.7,<1.6.0a0 + - python >=3.14 license: BSD-3-Clause AND MIT AND EPL-2.0 - purls: - - pkg:pypi/backports-zstd?source=hash-mapping - size: 240406 - timestamp: 1767045016907 + purls: [] + size: 7514 + timestamp: 1767044983590 - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda sha256: bf1e71c3c0a5b024e44ff928225a0874fc3c3356ec1a0b6fe719108e6d1288f6 md5: 5267bef8efea4127aacd1f4e1f149b6e @@ -3139,46 +3117,46 @@ packages: purls: [] size: 22714 timestamp: 1764017952449 -- conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py313hf159716_1.conda - sha256: dadec2879492adede0a9af0191203f9b023f788c18efd45ecac676d424c458ae - md5: 6c4d3597cf43f3439a51b2b13e29a4ba +- conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda + sha256: 3ad3500bff54a781c29f16ce1b288b36606e2189d0b0ef2f67036554f47f12b0 + md5: 8910d2c46f7e7b519129f486e0fe927a depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - libstdcxx >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 constrains: - libbrotlicommon 1.2.0 hb03c661_1 license: MIT license_family: MIT purls: - pkg:pypi/brotli?source=hash-mapping - size: 367721 - timestamp: 1764017371123 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py313hde1f3bb_1.conda - sha256: 2e21dccccd68bedd483300f9ab87a425645f6776e6e578e10e0dd98c946e1be9 - md5: b03732afa9f4f54634d94eb920dfb308 + size: 367376 + timestamp: 1764017265553 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda + sha256: 5c2e471fd262fcc3c5a9d5ea4dae5917b885e0e9b02763dbd0f0d9635ed4cb99 + md5: f9501812fe7c66b6548c7fcaa1c1f252 depends: - __osx >=11.0 - libcxx >=19 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 constrains: - libbrotlicommon 1.2.0 hc919400_1 license: MIT license_family: MIT purls: - pkg:pypi/brotli?source=hash-mapping - size: 359568 - timestamp: 1764018359470 -- conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py313h3ebfc14_1.conda - sha256: 3558006cd6e836de8dff53cbe5f0b9959f96ea6a6776b4e14f1c524916dd956c - md5: 916a39a0261621b8c33e9db2366dd427 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 359854 + timestamp: 1764018178608 +- conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda + sha256: 6854ee7675135c57c73a04849c29cbebc2fb6a3a3bfee1f308e64bf23074719b + md5: 1302b74b93c44791403cbeee6a0f62a3 + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -3188,8 +3166,8 @@ packages: license_family: MIT purls: - pkg:pypi/brotli?source=hash-mapping - size: 335605 - timestamp: 1764018132514 + size: 335782 + timestamp: 1764018443683 - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda sha256: c30daba32ddebbb7ded490f0e371eae90f51e72db620554089103b4a6934b0d5 md5: 51a19bba1b8ebfb60df25cde030b7ebc @@ -3284,45 +3262,45 @@ packages: - pkg:pypi/certifi?source=compressed-mapping size: 150969 timestamp: 1767500900768 -- conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py313hf46b229_1.conda - sha256: 2162a91819945c826c6ef5efe379e88b1df0fe9a387eeba23ddcf7ebeacd5bd6 - md5: d0616e7935acab407d1543b28c446f6f +- conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda + sha256: c6339858a0aaf5d939e00d345c98b99e4558f285942b27232ac098ad17ac7f8e + md5: cf45f4278afd6f4e6d03eda0f435d527 depends: - __glibc >=2.17,<3.0.a0 - libffi >=3.5.2,<3.6.0a0 - libgcc >=14 - pycparser - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/cffi?source=hash-mapping - size: 298357 - timestamp: 1761202966461 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py313h224173a_1.conda - sha256: 1fa69651f5e81c25d48ac42064db825ed1a3e53039629db69f86b952f5ce603c - md5: 050374657d1c7a4f2ea443c0d0cbd9a0 + size: 300271 + timestamp: 1761203085220 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda + sha256: 5b5ee5de01eb4e4fd2576add5ec9edfc654fbaf9293e7b7ad2f893a67780aa98 + md5: 10dd19e4c797b8f8bdb1ec1fbb6821d7 depends: - __osx >=11.0 - libffi >=3.5.2,<3.6.0a0 - pycparser - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/cffi?source=hash-mapping - size: 291376 - timestamp: 1761203583358 -- conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py313h5ea7bf4_1.conda - sha256: f867a11f42bb64a09b232e3decf10f8a8fe5194d7e3a216c6bac9f40483bd1c6 - md5: 55b44664f66a2caf584d72196aa98af9 + size: 292983 + timestamp: 1761203354051 +- conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda + sha256: 924f2f01fa7a62401145ef35ab6fc95f323b7418b2644a87fea0ea68048880ed + md5: c360170be1c9183654a240aadbedad94 depends: - pycparser - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -3330,8 +3308,8 @@ packages: license_family: MIT purls: - pkg:pypi/cffi?source=hash-mapping - size: 292681 - timestamp: 1761203203673 + size: 294731 + timestamp: 1761203441365 - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda sha256: aa589352e61bb221351a79e5946d56916e3c595783994884accdb3b97fe9d449 md5: 381bd45fb7aa032691f3063aff47e3a1 @@ -3396,45 +3374,45 @@ packages: - pkg:pypi/comm?source=hash-mapping size: 14690 timestamp: 1753453984907 -- conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py313h7037e92_3.conda - sha256: c545751fd48f119f2c28635514e6aa6ae784d9a1d4eb0e10be16c776e961f333 - md5: 6186382cb34a9953bf2a18fc763dc346 +- conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + sha256: 54c79736927c787e535db184bb7f3bce13217cb7d755c50666cfc0da7c6c86f3 + md5: 72d57382d0f63c20a16b1d514fcde6ff depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - libstdcxx >=14 - numpy >=1.25 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/contourpy?source=hash-mapping - size: 297459 - timestamp: 1762525479137 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py313ha61f8ec_3.conda - sha256: a0e69aa3a039f0dab4af8c30933bcc6b718404263a002936c21c274b1f460958 - md5: 5643cff3e9ab77999fba139465156e35 + size: 299226 + timestamp: 1762525516589 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + sha256: e5ca7f079f9bd49a9fce837dfe9014d96603600a29e5575cce19895d3639182c + md5: d75fae59fe0c8863de391e95959b2c65 depends: - __osx >=11.0 - libcxx >=19 - numpy >=1.25 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/contourpy?source=hash-mapping - size: 259519 - timestamp: 1762526242160 -- conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py313hf069bd2_3.conda - sha256: f5acc168a1f5eedd159bd1a89dc1dd4d901dc0502b769b4fca2bc5bdb4293fcf - md5: a1d5292683730418cd19b6e0cefcfc76 + size: 262199 + timestamp: 1762525837746 +- conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + sha256: f014eb687eb8dd25cec124594f4e48cf85803ff1db85a2a1f95719f9ec6434d2 + md5: 3647d90eea49efc6076729ef0ae81075 depends: - numpy >=1.25 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -3442,44 +3420,44 @@ packages: license_family: BSD purls: - pkg:pypi/contourpy?source=hash-mapping - size: 225553 - timestamp: 1762525633181 -- conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py313h3dea7bd_0.conda - sha256: 4275280f4fcef6cd0a0e5cd236120d7454a11390dd4c271378bf90bc563f6780 - md5: 82315acb438e857f809f556e2dcdb822 + size: 227536 + timestamp: 1762525688384 +- conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.1-py314h67df5f8_0.conda + sha256: 63b91c7308704819bc35747ed88097c391a75502921f7f3c9422d42e1ed07909 + md5: a4525263f2fa741bffa4af1e40aec245 depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - tomli license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/coverage?source=hash-mapping - size: 393234 - timestamp: 1766951417242 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py313h65a2061_0.conda - sha256: 46e4af43bd60580fda7955cc6c21b3a40465ef25a98c2a256419dc74caae56b0 - md5: 3283d95f985c7f293cb13bb7e33500a5 + size: 410205 + timestamp: 1766951484026 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.13.1-py314h6e9b3f0_0.conda + sha256: 06311a6cb704c7c2db910ef4bda5f4d4f2c3a9e8bdffe4cc5c4481fc253a47d6 + md5: 39869c1b0010c430849a7c2585c65f47 depends: - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 - tomli license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/coverage?source=hash-mapping - size: 393649 - timestamp: 1766951606379 -- conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py313hd650c13_0.conda - sha256: d41807f993eb1c097594f6481dc4a3ea1080ed57cfd1f0721216a3d7f7f3f949 - md5: 6799738f6603dfddd97389ee3e65e891 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 409230 + timestamp: 1766951563419 +- conda: https://conda.anaconda.org/conda-forge/win-64/coverage-7.13.1-py314h2359020_0.conda + sha256: fd24db3e7d3407ae7a15cd636722c84ca26e4c274f639084cdd18afa6612fe5b + md5: c5cb6c314f63b0bd76c67775a515364d + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - tomli - ucrt >=10.0.20348.0 - vc >=14.3,<15 @@ -3488,19 +3466,19 @@ packages: license_family: APACHE purls: - pkg:pypi/coverage?source=hash-mapping - size: 418313 - timestamp: 1766951491957 -- conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.13.11-py313hd8ed1ab_100.conda + size: 434074 + timestamp: 1766951384017 +- conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda noarch: generic - sha256: 63f677762304e6f8dc55e11dff6aafe71129cbbd0a77d176b99ba1f6a5053b77 - md5: 5bf347916a543bcb290c780fa449bf73 + sha256: 9e345f306446500956ffb1414b773f5476f497d7a2b5335a59edd2c335209dbb + md5: 30f999d06f347b0116f0434624b6e559 depends: - - python >=3.13,<3.14.0a0 - - python_abi * *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi * *_cp314 license: Python-2.0 purls: [] - size: 48369 - timestamp: 1765019689213 + size: 49298 + timestamp: 1765020324943 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-12.9.27-ha770c72_0.conda sha256: 2ee3b9564ca326226e5cda41d11b251482df8e7c757e333d28ec75213c75d126 md5: 87ff6381e33b76e5b9b179a2cdd005ec @@ -3743,51 +3721,51 @@ packages: - flatten-dict - networkx requires_python: '>=3.10' -- conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py313h5d5ffb9_0.conda - sha256: 29d10b4520846d3cbc511545552c11b726199013354e7517a53679272629c20d - md5: 80fd7ff9877570d12cabb5c5037dac89 +- conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda + sha256: 2803e9285da433a5d704a63ac9c64c87b5df9aaa1e2d48cc333e65d5a945912e + md5: 69635aa34b45d84c2599ff8b48094978 depends: - python + - libgcc >=14 - __glibc >=2.17,<3.0.a0 - libstdcxx >=14 - - libgcc >=14 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/debugpy?source=hash-mapping - size: 2870642 - timestamp: 1765704059389 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py313hc37fe24_0.conda - sha256: 1eb7c9f5a994e273d714e945253fff40413fd63de9f6d5e01989d6d96199dad0 - md5: 95287e5abbe8a588d2a8d234f3d591a7 + size: 2888322 + timestamp: 1765704065377 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda + sha256: 5c263dafa3660660087443ad37e32e0597067cf098b351230a76adf83e462e12 + md5: 45961f5d077fca30eeff1a1973aca63d depends: - python - - python 3.13.* *_cp313 - - libcxx >=19 - __osx >=11.0 - - python_abi 3.13.* *_cp313 + - python 3.14.* *_cp314 + - libcxx >=19 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - - pkg:pypi/debugpy?source=compressed-mapping - size: 2759061 - timestamp: 1765840814720 -- conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py313h927ade5_0.conda - sha256: d6d62b00c9a81cf9f183b9f3929455f11e1906e37891a28b953237245df6a5f3 - md5: a7e77991e54b031328253da027e2f3e1 + - pkg:pypi/debugpy?source=hash-mapping + size: 2776268 + timestamp: 1765840821598 +- conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda + sha256: 0ad7f50f664ede3aafcd23458ce4f669f63e32f7efb74c0938260bdb829679df + md5: 3361deac30d356844406fbe6def54d5b depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/debugpy?source=hash-mapping - size: 4002629 - timestamp: 1765840845981 + size: 4021751 + timestamp: 1765840833937 - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda sha256: c17c6b9937c08ad63cb20a26f403a3234088e57d4455600974a0ce865cb14017 md5: 9ce473d1d1be1cc3810856a48b3fab32 @@ -3907,55 +3885,22 @@ packages: - pathlib2>=2.3,<3.0 ; python_full_version < '3.4' - six>=1.12,<2.0 requires_python: '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' -- conda: https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.61.1-py313h3dea7bd_0.conda - sha256: 97f225199e6e5dfb93f551087c0951fee92db2d29a9dcb6a0346d66bff06fea4 - md5: c0f36dfbb130da4f6ce2df31f6b25ea8 - depends: - - __glibc >=2.17,<3.0.a0 - - brotli - - libgcc >=14 - - munkres - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - license: MIT - license_family: MIT - purls: - - pkg:pypi/fonttools?source=hash-mapping - size: 2988776 - timestamp: 1765633043435 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/fonttools-4.61.1-py313h7d74516_0.conda - sha256: 52d4aacd7c154adff1f0e86609bf1b0e63b7049c947c4df1e78eedb9f2913091 - md5: 894eb0c3e9a17643906a6da3209bf045 - depends: - - __osx >=11.0 - - brotli - - munkres - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 - license: MIT - license_family: MIT - purls: - - pkg:pypi/fonttools?source=hash-mapping - size: 2897709 - timestamp: 1765632961717 -- conda: https://conda.anaconda.org/conda-forge/win-64/fonttools-4.61.1-py313hd650c13_0.conda - sha256: da82b8e843103bf4aaab470e4b8025286357dc8c34cd47817350dcb14ad307fb - md5: c6fbf3a96192c26a75ed5755bd904fea +- conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + sha256: bb74f1732065eb95c3ea4ae7f7ab29d6ddaafe6da32f009106bf9a335147cb77 + md5: d5da976e963e70364b9e3ff270842b9f depends: - brotli - munkres - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - ucrt >=10.0.20348.0 - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 + - python >=3.10 + - unicodedata2 >=15.1.0 + track_features: + - fonttools_no_compile license: MIT license_family: MIT purls: - pkg:pypi/fonttools?source=hash-mapping - size: 2523451 - timestamp: 1765632913315 + size: 834764 + timestamp: 1765632669874 - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda sha256: 2509992ec2fd38ab27c7cdb42cf6cadc566a1cc0d1021a2673475d9fa87c6276 md5: d3549fd50d450b6d9e7dddff25dd2110 @@ -4059,10 +4004,10 @@ packages: purls: [] size: 2009354 timestamp: 1765814947748 -- pypi: https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl name: greenlet version: 3.3.0 - sha256: 9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38 + sha256: 73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170 requires_dist: - sphinx ; extra == 'docs' - furo ; extra == 'docs' @@ -4070,10 +4015,10 @@ packages: - psutil ; extra == 'test' - setuptools ; extra == 'test' requires_python: '>=3.10' -- pypi: https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl name: greenlet version: 3.3.0 - sha256: 087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527 + sha256: 5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955 requires_dist: - sphinx ; extra == 'docs' - furo ; extra == 'docs' @@ -4447,10 +4392,10 @@ packages: name: jax-cuda12-pjrt version: 0.8.2 sha256: e3bab41ca7c48e4163db9e7efd271b3aa85f0fe45f5ed0708d6bbed93a59f977 -- pypi: https://files.pythonhosted.org/packages/1c/38/4ba2486f95fcf2120723932feacdded438e785258148b18a703cd1177e41/jax_cuda12_plugin-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/27/58/a5a27d4677d6890570f7e58cecd51891469cb620e6f64c8faed4935d93d0/jax_cuda12_plugin-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl name: jax-cuda12-plugin version: 0.8.2 - sha256: 82c6798be66bf8c773386918e4c8e5cd8119753f3bfb3ca4bbc46818283750c6 + sha256: a5898bac1d8ab6020b54546440256409f2c66bcbbb3a1099ca473c84843addad requires_dist: - jax-cuda12-pjrt==0.8.2 - nvidia-cublas-cu12>=12.1.3.1 ; sys_platform == 'linux' and extra == 'with-cuda' @@ -4466,28 +4411,28 @@ packages: - nvidia-cuda-nvrtc-cu12>=12.1.55 ; sys_platform == 'linux' and extra == 'with-cuda' - nvidia-nvshmem-cu12>=3.2.5 ; sys_platform == 'linux' and extra == 'with-cuda' requires_python: '>=3.11' -- pypi: https://files.pythonhosted.org/packages/6b/e0/91e5762a7ddb6351b07c742ca407cd28e26043d6945d6228b6c1b0881a45/jaxlib-0.8.2-cp313-cp313-manylinux_2_27_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl name: jaxlib version: 0.8.2 - sha256: 1bfbcf6c3de221784fa4cdb6765a09d71cb4298b15626b3d0409b3dfcd8a8667 + sha256: e6a97dfb0232eed9a2bb6e3828e4f682dbac1a7fea840bfda574cae2dbf5faf9 requires_dist: - scipy>=1.13 - numpy>=2.0 - ml-dtypes>=0.5.0 requires_python: '>=3.11' -- pypi: https://files.pythonhosted.org/packages/85/68/25b38673b07a808616ce7b6efb3eed491f983f3373a09cbbd03f67178563/jaxlib-0.8.2-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl name: jaxlib version: 0.8.2 - sha256: f205e91c3a152a2a76c0bc59a6a2de03e87ec261b91e8812922777185e7b08f5 + sha256: 05b958f497e49824c432e734bb059723b7dfe69e2ad696a9f9c8ad82fff7c3f8 requires_dist: - scipy>=1.13 - numpy>=2.0 - ml-dtypes>=0.5.0 requires_python: '>=3.11' -- pypi: https://files.pythonhosted.org/packages/c5/22/c0ec75e43a13b2457d78d509f49b49a57fa302ffced4f4a2778e428cb0a6/jaxlib-0.8.2-cp313-cp313-macosx_11_0_arm64.whl +- pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl name: jaxlib version: 0.8.2 - sha256: 4d006db96be020c8165212a1216372f8acac4ff4f8fb067743d694ef2b301ace + sha256: beffb004e7eeb5c9afb24439e2b2cf45a4ee3e3e8adf45e355edf2af62acf8b8 requires_dist: - scipy>=1.13 - numpy>=2.0 @@ -4798,39 +4743,39 @@ packages: purls: [] size: 134088 timestamp: 1754905959823 -- conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py313hc8edb43_2.conda - sha256: 60d7d525db89401f88f5c91bdbb79d3afbf005e7d7c1326318659fa097607e51 - md5: 3e0e65595330e26515e31b7fc6d933c7 +- conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda + sha256: a707d08c095d02148201f2da9fba465054fb750e33117e215892a4fefcc1b54a + md5: 57f1ce4f7ba6bcd460be8f83c8f04c69 depends: - python - - __glibc >=2.17,<3.0.a0 - libstdcxx >=14 - libgcc >=14 - - python_abi 3.13.* *_cp313 + - __glibc >=2.17,<3.0.a0 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/kiwisolver?source=hash-mapping - size: 77616 - timestamp: 1762488778882 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py313h7add70c_2.conda - sha256: adc6b89070b6858b81fbe24dd034a73295e8fa9ccb68ed871bf04f1ed498f51c - md5: 9583687276aaa393e723f3b7970be69f + size: 78071 + timestamp: 1762488742381 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda + sha256: c4d7e6653d343e768110ec77ac1c6c89f313f77a19a1f2cd60b7c7b8b0758bdf + md5: 9aa431bf603c231e8c77a1b0842a85ed depends: - python - - libcxx >=19 - - python 3.13.* *_cp313 + - python 3.14.* *_cp314 - __osx >=11.0 - - python_abi 3.13.* *_cp313 + - libcxx >=19 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/kiwisolver?source=hash-mapping - size: 68438 - timestamp: 1762488945877 -- conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py313h1a38498_2.conda - sha256: 40eafae7e9cdbe97eeb56ab0882816d3f68a2af4080a822f7349f986de2adeb6 - md5: f77249adfa3f0091e016610346affd09 + size: 68534 + timestamp: 1762489024029 +- conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda + sha256: ded907ab1ce24abcff20bc239e770ae7ef4cff6fdcfb8cc24ca59ebe736a1d3f + md5: e9d93271b021332f5492ff5478601614 depends: - python - vc >=14.3,<15 @@ -4839,13 +4784,13 @@ packages: - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/kiwisolver?source=hash-mapping - size: 73825 - timestamp: 1762488792613 + size: 73670 + timestamp: 1762488752873 - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda sha256: 99df692f7a8a5c27cd14b5fb1374ee55e756631b9c3d659ed3ee60830249b238 md5: 3f43953b7d3fb3aaa1d0d0723d91e368 @@ -6386,58 +6331,24 @@ packages: - pkg:pypi/markdown-it-py?source=hash-mapping size: 64736 timestamp: 1754951288511 -- conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py313h3dea7bd_0.conda - sha256: a530a411bdaaf0b1e4de8869dfaca46cb07407bc7dc0702a9e231b0e5ce7ca85 - md5: c14389156310b8ed3520d84f854be1ee +- conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + sha256: e0cbfea51a19b3055ca19428bd9233a25adca956c208abb9d00b21e7259c7e03 + md5: fab1be106a50e20f10fe5228fd1d1651 depends: - - __glibc >=2.17,<3.0.a0 - - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - constrains: - - jinja2 >=3.0.0 - license: BSD-3-Clause - license_family: BSD - purls: - - pkg:pypi/markupsafe?source=hash-mapping - size: 25909 - timestamp: 1759055357045 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py313h7d74516_0.conda - sha256: e06902a1bf370fdd4ada0a8c81c504868fdb7e9971b72c6bd395aa4e5a497bd2 - md5: 3df5979cc0b761dda0053ffdb0bca3ea - depends: - - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 - constrains: - - jinja2 >=3.0.0 - license: BSD-3-Clause - license_family: BSD - purls: - - pkg:pypi/markupsafe?source=hash-mapping - size: 25778 - timestamp: 1759055530601 -- conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py313hd650c13_0.conda - sha256: 988d14095c1392e055fd75e24544da2db01ade73b0c2f99ddc8e2b8678ead4cc - md5: 47eaaa4405741beb171ea6edc6eaf874 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - ucrt >=10.0.20348.0 - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 + - python >=3.10 constrains: - jinja2 >=3.0.0 + track_features: + - markupsafe_no_compile license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/markupsafe?source=hash-mapping - size: 28959 - timestamp: 1759055685616 -- conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py313h683a580_0.conda - sha256: b1117aa2c1d11ca70d1704054cdc8801cbcf2dfb846c565531edd417ddd82559 - md5: ffe67570e1a9192d2f4c189b27f75f89 + size: 15499 + timestamp: 1759055275624 +- conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda + sha256: ee773261fbd6c76fc8174b0e4e1ce272b0bbaa56610f130e9d3d1f575106f04f + md5: b8683e6068099b69c10dbfcf7204203f depends: - __glibc >=2.17,<3.0.a0 - contourpy >=1.0.1 @@ -6454,20 +6365,20 @@ packages: - packaging >=20.0 - pillow >=8 - pyparsing >=2.3.1 - - python >=3.13,<3.14.0a0 + - python >=3.14,<3.15.0a0 - python-dateutil >=2.7 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - qhull >=2020.2,<2020.3.0a0 - tk >=8.6.13,<8.7.0a0 license: PSF-2.0 license_family: PSF purls: - - pkg:pypi/matplotlib?source=compressed-mapping - size: 8405862 - timestamp: 1763055358671 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py313h58042b9_0.conda - sha256: 24767ca32ea9db74a4a5965d2df8c69c83c82583e8ba32b683123d406092e205 - md5: 745c18472bc6d3dc9146c3dec18bb740 + - pkg:pypi/matplotlib?source=hash-mapping + size: 8473358 + timestamp: 1763055439346 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda + sha256: 198dcc0ed83e78bc7bf48e6ef8d4ecd220e9cf1f07db98508251b2bc0be067f9 + md5: c84152e510d41378b8758826655b6ed7 depends: - __osx >=11.0 - contourpy >=1.0.1 @@ -6483,20 +6394,20 @@ packages: - packaging >=20.0 - pillow >=8 - pyparsing >=2.3.1 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 - python-dateutil >=2.7 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - qhull >=2020.2,<2020.3.0a0 license: PSF-2.0 license_family: PSF purls: - pkg:pypi/matplotlib?source=hash-mapping - size: 8197793 - timestamp: 1763056104477 -- conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py313he1ded55_0.conda - sha256: f63c4a5ded62cfb216c9d107a3c4527940036eef19cf481418080a0bd9bc11d8 - md5: 05f96c429201a64ea752decf4b910a7c + size: 8286510 + timestamp: 1763055937766 +- conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda + sha256: 82a50284275e8a1818cd3323846f3032dc89bd23a3f80dcf44e34a62b016256b + md5: 9d491a60700e0e90e92607fcc4e2566c depends: - contourpy >=1.0.1 - cycler >=0.10 @@ -6510,9 +6421,9 @@ packages: - packaging >=20.0 - pillow >=8 - pyparsing >=2.3.1 - - python >=3.13,<3.14.0a0 + - python >=3.14,<3.15.0a0 - python-dateutil >=2.7 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - qhull >=2020.2,<2020.3.0a0 - ucrt >=10.0.20348.0 - vc >=14.3,<15 @@ -6521,8 +6432,8 @@ packages: license_family: PSF purls: - pkg:pypi/matplotlib?source=hash-mapping - size: 8007333 - timestamp: 1763055517579 + size: 8185296 + timestamp: 1763055983613 - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda sha256: 9d690334de0cd1d22c51bc28420663f4277cfa60d34fa5cad1ce284a13f1d603 md5: 00e120ce3e40bad7bfc78861ce3c4a25 @@ -6558,44 +6469,44 @@ packages: - pkg:pypi/mdurl?source=hash-mapping size: 14465 timestamp: 1733255681319 -- conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py313h422961c_3.conda - sha256: 1a752d45a2c5da1289afac51ea5b89bde0a80f290708505b487f38d47b4e3267 - md5: 6f9810aa09fbdab0c6b941d48a3b72bb +- conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.1-py314hef15ded_3.conda + sha256: 43801200d3b8dcaa1f9ab47f527c9fe94028780b2760173a240e132e25be2194 + md5: cc1bee6de727d07ce2dad51a5e8364b9 depends: - python - rich >=11.2.0 - jinja2 - textual >=0.34.0 - - libgcc >=14 - libstdcxx >=14 + - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 - lz4-c >=1.10.0,<1.11.0a0 - elfutils >=0.194,<0.195.0a0 - libunwind >=1.8.3,<1.9.0a0 + - python_abi 3.14.* *_cp314 license: Apache-2.0 AND BSD-3-Clause purls: - pkg:pypi/memray?source=hash-mapping - size: 1816303 - timestamp: 1765821582847 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py313h78c9487_3.conda - sha256: eece155fd7c5f59226e24015ae08e5d8eb9a3e453f6c97bf16d04348e7f94c97 - md5: f1dcaa6d7f501b2b8bd6294610c3982a + size: 1824670 + timestamp: 1765821568349 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.1-py314habef2a7_3.conda + sha256: 912a462c888a867a22e6ebf0607ad3a42d078260cafc7d4f25654989a17f41ac + md5: f8c08fd9eb42146b0489d42069fb270e depends: - python - rich >=11.2.0 - jinja2 - textual >=0.34.0 + - python 3.14.* *_cp314 - __osx >=11.0 - - python 3.13.* *_cp313 - libcxx >=19 - lz4-c >=1.10.0,<1.11.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: Apache-2.0 AND BSD-3-Clause purls: - pkg:pypi/memray?source=hash-mapping - size: 1712578 - timestamp: 1765821632543 + size: 1721669 + timestamp: 1765821674618 - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda sha256: d3fb4beb5e0a52b6cc33852c558e077e1bfe44df1159eb98332d69a264b14bae md5: b11e360fc4de2b0035fc8aaa74f17fd6 @@ -6623,10 +6534,10 @@ packages: purls: [] size: 100224829 timestamp: 1767634557029 -- pypi: https://files.pythonhosted.org/packages/d9/a1/4008f14bbc616cfb1ac5b39ea485f9c63031c4634ab3f4cf72e7541f816a/ml_dtypes-0.5.4-cp313-cp313-macosx_10_13_universal2.whl +- pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl name: ml-dtypes version: 0.5.4 - sha256: 8c760d85a2f82e2bed75867079188c9d18dae2ee77c25a54d60e9cc79be1bc48 + sha256: 2b857d3af6ac0d39db1de7c706e69c7f9791627209c3d6dedbfca8c7e5faec22 requires_dist: - numpy>=1.21 - numpy>=1.21.2 ; python_full_version >= '3.10' @@ -6639,10 +6550,10 @@ packages: - pylint>=2.6.0 ; extra == 'dev' - pyink ; extra == 'dev' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/e1/8b/200088c6859d8221454825959df35b5244fa9bdf263fd0249ac5fb75e281/ml_dtypes-0.5.4-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl name: ml-dtypes version: 0.5.4 - sha256: f21c9219ef48ca5ee78402d5cc831bd58ea27ce89beda894428bc67a52da5328 + sha256: 14a4fd3228af936461db66faccef6e4f41c1d82fcc30e9f8d58a08916b1d811f requires_dist: - numpy>=1.21 - numpy>=1.21.2 ; python_full_version >= '3.10' @@ -6655,10 +6566,10 @@ packages: - pylint>=2.6.0 ; extra == 'dev' - pyink ; extra == 'dev' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/eb/33/40cd74219417e78b97c47802037cf2d87b91973e18bb968a7da48a96ea44/ml_dtypes-0.5.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl name: ml-dtypes version: 0.5.4 - sha256: 533ce891ba774eabf607172254f2e7260ba5f57bdd64030c9a4fcfbd99815d0d + sha256: 8c6a2dcebd6f3903e05d51960a8058d6e131fe69f952a5397e5dbabc841b6d56 requires_dist: - numpy>=1.21 - numpy>=1.21.2 ; python_full_version >= '3.10' @@ -6836,49 +6747,49 @@ packages: - pkg:pypi/notebook-shim?source=hash-mapping size: 16817 timestamp: 1733408419340 -- conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py313hf6604e3_1.conda - sha256: 2f8aff2a17e4d43012e9863ef4392e6d5de3ae9da0c3e322831f8c5c3d86df71 - md5: dce261869f78ba9b81b9091b084d328d +- conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda + sha256: 81425306df4f0ddba159e80c8d91323a34df335079ca93a194201e57b337231c + md5: ab17cb5f388fa17c08937cb9cc24e7b6 depends: - python + - __glibc >=2.17,<3.0.a0 - libgcc >=14 - libstdcxx >=14 - - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 - - libcblas >=3.9.0,<4.0a0 - liblapack >=3.9.0,<4.0a0 + - libcblas >=3.9.0,<4.0a0 - libblas >=3.9.0,<4.0a0 + - python_abi 3.14.* *_cp314 constrains: - numpy-base <0a0 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/numpy?source=hash-mapping - size: 8919234 - timestamp: 1766383469748 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py313h16eae64_1.conda - sha256: d759e7fee853d8e18709a15b8fc8a6db90c96986cb9d316c4d5ccdf5a1d3f61f - md5: c72599556b49dc853839f4439c1eea32 + size: 8983076 + timestamp: 1766383421113 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda + sha256: bc9dfe41ba4898365a82c485416fd4a572f86d94e606d89379766de70d34fc79 + md5: d421394cf6758a6f27ead1530cfdfa6a depends: - python - libcxx >=19 - __osx >=11.0 - - python 3.13.* *_cp313 - - liblapack >=3.9.0,<4.0a0 - - python_abi 3.13.* *_cp313 + - python 3.14.* *_cp314 - libcblas >=3.9.0,<4.0a0 + - python_abi 3.14.* *_cp314 - libblas >=3.9.0,<4.0a0 + - liblapack >=3.9.0,<4.0a0 constrains: - numpy-base <0a0 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/numpy?source=hash-mapping - size: 6792353 - timestamp: 1766383288679 -- conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py313hce7ae62_1.conda - sha256: c02d9587864174146bf0024051c76d368b2de18c94421e2f4e611fbb18576dd1 - md5: 78749843445581c6dcc0cb80d146982d + size: 6861028 + timestamp: 1766383292611 +- conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda + sha256: 111a7af69521dce54ce6b4d89ef767ade9f3769576353a526174792de8702b5d + md5: 71dabea9914329c08b4864955c3793fc depends: - python - vc >=14.3,<15 @@ -6886,16 +6797,16 @@ packages: - ucrt >=10.0.20348.0 - liblapack >=3.9.0,<4.0a0 - libblas >=3.9.0,<4.0a0 + - python_abi 3.14.* *_cp314 - libcblas >=3.9.0,<4.0a0 - - python_abi 3.13.* *_cp313 constrains: - numpy-base <0a0 license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/numpy?source=hash-mapping - size: 7524105 - timestamp: 1766383318405 + - pkg:pypi/numpy?source=compressed-mapping + size: 7584934 + timestamp: 1766383321713 - pypi: https://files.pythonhosted.org/packages/77/3c/aa88abe01f3be3d1f8f787d1d33dc83e76fec05945f9a28fbb41cfb99cd5/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl name: nvidia-cublas-cu12 version: 12.9.1.4 @@ -7073,53 +6984,53 @@ packages: - sqlalchemy>=1.3 - typing-extensions requires_python: '>=3.10' -- conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py313h541fbb8_0.conda - sha256: 6bb36f180ea4ba4f13f5e6ef8ec0b2fdd010d73430af53a05986ffc312091e8f - md5: 5dd1f02f38d71a29f3cfaf13c4cbf3dd +- conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda + sha256: f8da6a925be44a867c172dd945049d7690ba6ae3a7905b61b1d5a4ba81fe0554 + md5: 15ae5e4f52f2d9a98997e8859d35aa21 depends: - python - - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python_abi 3.13.* *_cp313 + - __glibc >=2.17,<3.0.a0 + - python_abi 3.14.* *_cp314 constrains: - __glibc >=2.17 license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/orjson?source=hash-mapping - size: 317253 - timestamp: 1765811463186 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py313hfea8034_0.conda - sha256: 259cf50b358d2c1915123f0bf889db27d277efab7a3388c287f0dd4797764fe5 - md5: d80421fc2b6f692925c82351f1c98407 + size: 317280 + timestamp: 1765811464445 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda + sha256: 08f70edd4fc9f684083d18350c8a33c5a092e05aaaab7a97b09b381f8ca19eb7 + md5: 21db7b1b5c5c04461bf40c33953f8cf7 depends: - python + - python 3.14.* *_cp314 - __osx >=11.0 - - python 3.13.* *_cp313 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 constrains: - __osx >=11.0 license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/orjson?source=hash-mapping - size: 288912 - timestamp: 1765811468774 -- conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.4-py313hfbe8231_1.conda - sha256: cf55c2f55f7c0e8973da287217315c4b8652ca29dbcbcecfd0b3b8e48e784422 - md5: db9e91caa5ee3f4891d340f8e323cc79 + size: 288991 + timestamp: 1765811524857 +- conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda + sha256: 32014651690ee74eb65d4ef3f42f1ff679b216274f85c52b3be701cf16c6dff3 + md5: 84b27320349d7fbf9fb6ad06141eec5b depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: Apache-2.0 license_family: APACHE purls: - pkg:pypi/orjson?source=hash-mapping - size: 197832 - timestamp: 1764441550892 + size: 197828 + timestamp: 1765811532648 - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda sha256: 1840bd90d25d4930d60f57b4f38d4e0ae3f5b8db2819638709c36098c6ba770c md5: e51f1e4089cad105b6cac64bd8166587 @@ -7157,10 +7068,10 @@ packages: - pkg:pypi/packaging?source=hash-mapping size: 62477 timestamp: 1745345660407 -- pypi: https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl name: pandas version: 2.3.3 - sha256: 318d77e0e42a628c04dc56bcef4b40de67918f7041c2b061af1da41dcff670ac + sha256: ee67acbbf05014ea6c763beb097e03cd629961c8a632075eeb34247120abcb4b requires_dist: - numpy>=1.22.4 ; python_full_version < '3.11' - numpy>=1.23.2 ; python_full_version == '3.11.*' @@ -7248,10 +7159,10 @@ packages: - xlsxwriter>=3.0.5 ; extra == 'all' - zstandard>=0.19.0 ; extra == 'all' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl +- pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl name: pandas version: 2.3.3 - sha256: bdcd9d1167f4885211e401b3036c0c8d9e274eee67ea8d0758a256d60704cfe8 + sha256: 1611aedd912e1ff81ff41c745822980c49ce4a7907537be8692c8dbc31924593 requires_dist: - numpy>=1.22.4 ; python_full_version < '3.11' - numpy>=1.23.2 ; python_full_version == '3.11.*' @@ -7339,10 +7250,10 @@ packages: - xlsxwriter>=3.0.5 ; extra == 'all' - zstandard>=0.19.0 ; extra == 'all' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl name: pandas version: 2.3.3 - sha256: f8bfc0e12dc78f777f323f55c58649591b2cd0c43534e8355c51d3fede5f4dee + sha256: 1b07204a219b3b7350abaae088f451860223a52cfb8a6c53358e7948735158e5 requires_dist: - numpy>=1.22.4 ; python_full_version < '3.11' - numpy>=1.23.2 ; python_full_version == '3.11.*' @@ -7481,76 +7392,76 @@ packages: - pkg:pypi/pexpect?source=hash-mapping size: 53561 timestamp: 1733302019362 -- conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py313h80991f8_0.conda - sha256: bdad1e21cadd64154c45fa554247dd672288ad51982ca7d54b3fab63e40938df - md5: 183fe6b9e99e5c2b464c1573ec78eac8 +- conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda + sha256: 6d8e32dc44165cff96ec9c00383e998fd035983d971c5f35ebed6f5f51c4022a + md5: f9b6a8fbb8dcb840a0c1c052dc5092e4 depends: - python - - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - tk >=8.6.13,<8.7.0a0 - - python_abi 3.13.* *_cp313 - - libtiff >=4.7.1,<4.8.0a0 - - libjpeg-turbo >=3.1.2,<4.0a0 + - libgcc >=14 - lcms2 >=2.17,<3.0a0 - - libxcb >=1.17.0,<2.0a0 + - libfreetype >=2.14.1 + - libfreetype6 >=2.14.1 + - libjpeg-turbo >=3.1.2,<4.0a0 - zlib-ng >=2.3.2,<2.4.0a0 + - libxcb >=1.17.0,<2.0a0 - libwebp-base >=1.6.0,<2.0a0 - openjpeg >=2.5.4,<3.0a0 - - libfreetype >=2.14.1 - - libfreetype6 >=2.14.1 + - python_abi 3.14.* *_cp314 + - libtiff >=4.7.1,<4.8.0a0 + - tk >=8.6.13,<8.7.0a0 license: HPND purls: - pkg:pypi/pillow?source=hash-mapping - size: 1043309 - timestamp: 1767353193450 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py313h45e5a15_0.conda - sha256: e5eaa7f00fca189848a0454303c56cc4edefd3e58a70bfd490d2cfe0d0aa525d - md5: 78a39731fd50dbd511de305934fe7e62 + size: 1072995 + timestamp: 1767353193452 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda + sha256: 3f88f2600862583c8bed3d37f4b95f0f96a459e9fdd36ca680472bc89a46e7bb + md5: 1f9dae6213643ac883e300c11df611eb depends: - python - __osx >=11.0 - - python 3.13.* *_cp313 - - libxcb >=1.17.0,<2.0a0 + - python 3.14.* *_cp314 + - libjpeg-turbo >=3.1.2,<4.0a0 - openjpeg >=2.5.4,<3.0a0 - - libtiff >=4.7.1,<4.8.0a0 + - python_abi 3.14.* *_cp314 - zlib-ng >=2.3.2,<2.4.0a0 - - tk >=8.6.13,<8.7.0a0 - - libjpeg-turbo >=3.1.2,<4.0a0 - - python_abi 3.13.* *_cp313 + - libxcb >=1.17.0,<2.0a0 - lcms2 >=2.17,<3.0a0 - libfreetype >=2.14.1 - libfreetype6 >=2.14.1 + - libtiff >=4.7.1,<4.8.0a0 + - tk >=8.6.13,<8.7.0a0 - libwebp-base >=1.6.0,<2.0a0 license: HPND purls: - pkg:pypi/pillow?source=hash-mapping - size: 966296 - timestamp: 1767353279679 -- conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py313h38f99e1_0.conda - sha256: 181b4d169e7a671c387427ceb398d931802adace8808836b44295b07c3484abd - md5: 1927a42726a4ca0e94d5e8cb94c7a06d + size: 995543 + timestamp: 1767353279681 +- conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda + sha256: b30a83db337dab8579a46e3da7906851f53d6cf8c09695aef6d2a38b17636c1c + md5: 17dbdfedee39f31166b7e548f3ccc58a depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - lcms2 >=2.17,<3.0a0 - - libwebp-base >=1.6.0,<2.0a0 - - python_abi 3.13.* *_cp313 - libfreetype >=2.14.1 - libfreetype6 >=2.14.1 - - openjpeg >=2.5.4,<3.0a0 + - tk >=8.6.13,<8.7.0a0 + - libwebp-base >=1.6.0,<2.0a0 + - lcms2 >=2.17,<3.0a0 + - libtiff >=4.7.1,<4.8.0a0 - zlib-ng >=2.3.2,<2.4.0a0 + - openjpeg >=2.5.4,<3.0a0 - libjpeg-turbo >=3.1.2,<4.0a0 - - libtiff >=4.7.1,<4.8.0a0 - libxcb >=1.17.0,<2.0a0 - - tk >=8.6.13,<8.7.0a0 + - python_abi 3.14.* *_cp314 license: HPND purls: - pkg:pypi/pillow?source=hash-mapping - size: 946833 - timestamp: 1767353195062 + size: 973387 + timestamp: 1767353195064 - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda sha256: 04c64fb78c520e5c396b6e07bc9082735a5cc28175dbe23138201d0a9441800b md5: 1bd2e65c8c7ef24f4639ae6e850dacc2 @@ -7630,49 +7541,49 @@ packages: - pkg:pypi/prompt-toolkit?source=hash-mapping size: 273927 timestamp: 1756321848365 -- conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py313h54dd161_0.conda - sha256: 8a5f773e22ccd08fbda57c92f1d094533474db75f70db35311912cdcdb2f18ad - md5: d362949a1ed1ad4693b3928ad1d32c93 +- conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda + sha256: 324455a702ef721290de6e51d9af4f7ca057546d6398bbc6e88454db17cdaf6b + md5: 28af9719e28f0054e9aee68153899293 depends: - python - - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 + - libgcc >=14 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/psutil?source=hash-mapping - size: 225429 - timestamp: 1767012386804 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py313h6688731_0.conda - sha256: 2abd12a0371836075a72e12fde44f63ea08b3781e5b6ec997233d50b9c9832d9 - md5: c3a1b24571871fec4498a0226a3c22c1 + size: 228170 + timestamp: 1767012382363 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda + sha256: 686b643b97df8e7076b971820fb9b5d2ed0ea8a5a82922910da1600a6f462b79 + md5: 6d799fc0d0178eb63202bf99ff7bc24f depends: - python - - python 3.13.* *_cp313 + - python 3.14.* *_cp314 - __osx >=11.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/psutil?source=hash-mapping - size: 238851 - timestamp: 1767012473931 -- conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py313h5fd188c_0.conda - sha256: 025574efd6e9d5b90d89ec1da8423132ab9c6131e21be7ec91b9fd7a14665a57 - md5: 8732097a02c66f6b260dd15b705a014e + size: 241751 + timestamp: 1767012600474 +- conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda + sha256: d776855d47e14d8b1521a3949c1d1dc3848c690170253ecc439264e219859e22 + md5: 65df3730bedf9c24f54414c8316f8e72 depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/psutil?source=hash-mapping - size: 243141 - timestamp: 1767012395730 + - pkg:pypi/psutil?source=compressed-mapping + size: 245991 + timestamp: 1767012412984 - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda sha256: 9c88f8c64590e9567c6c80823f0328e58d3b1efb0e1c539c0315ceca764e0973 md5: b3c17d95b5a10c6e64a21fa17573e70e @@ -7761,38 +7672,38 @@ packages: - pkg:pypi/pygments?source=hash-mapping size: 889287 timestamp: 1750615908735 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py313h40b429f_0.conda - sha256: 307ca29ebf2317bd2561639b1ee0290fd8c03c3450fa302b9f9437d8df6a5280 - md5: 31a0a72f3466682d0ea2ebcbd7d319b8 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + sha256: df5af268c5a74b7160d772c263ece6f43257faff571783443e34b5f1d5a61cf2 + md5: 75a84fc8337557347252cc4fd3ba2a93 depends: - __osx >=11.0 - libffi >=3.5.2,<3.6.0a0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 - setuptools license: MIT license_family: MIT purls: - pkg:pypi/pyobjc-core?source=hash-mapping - size: 481508 - timestamp: 1763152124940 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py313hcc5defa_0.conda - sha256: 194e188d8119befc952d04157079733e2041a7a502d50340ddde632658799fdc - md5: a6d28c8fc266a3d3c3dae183e25c4d31 + size: 483374 + timestamp: 1763151489724 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda + sha256: aa76ee4328d0514d7c1c455dcd2d3b547db1c59797e54ce0a3f27de5b970e508 + md5: 4219bb3408016e22316cf8b443b5ef93 depends: - __osx >=11.0 - libffi >=3.5.2,<3.6.0a0 - pyobjc-core 12.1.* - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/pyobjc-framework-cocoa?source=hash-mapping - size: 376136 - timestamp: 1763160678792 + size: 374792 + timestamp: 1763160601898 - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda sha256: 0c70bc577f5efa87501bdc841b88f594f4d3f3a992dfb851e2130fa5c817835b md5: d837065e4e0de4962c3462079c23f969 @@ -7917,10 +7828,10 @@ packages: - pkg:pypi/pytest-xdist?source=hash-mapping size: 39300 timestamp: 1751452761594 -- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.13.11-hc97d973_100_cp313.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda build_number: 100 - sha256: 9cf014cf28e93ee242bacfbf664e8b45ae06e50b04291e640abeaeb0cba0364c - md5: 0cbb0010f1d8ecb64a428a8d4214609e + sha256: a120fb2da4e4d51dd32918c149b04a08815fd2bd52099dad1334647984bb07f1 + md5: 1cef1236a05c3a98f68c33ae9425f656 depends: - __glibc >=2.17,<3.0.a0 - bzip2 >=1.0.8,<2.0a0 @@ -7935,19 +7846,20 @@ packages: - libzlib >=1.3.1,<2.0a0 - ncurses >=6.5,<7.0a0 - openssl >=3.5.4,<4.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - readline >=8.2,<9.0a0 - tk >=8.6.13,<8.7.0a0 - tzdata + - zstd >=1.5.7,<1.6.0a0 license: Python-2.0 purls: [] - size: 37226336 - timestamp: 1765021889577 - python_site_packages_path: lib/python3.13/site-packages -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.13.11-hfc2f54d_100_cp313.conda + size: 36790521 + timestamp: 1765021515427 + python_site_packages_path: lib/python3.14/site-packages +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda build_number: 100 - sha256: c476f4e9b6d97c46b496b442878924868a54e5727251549ebfc82027aa52af68 - md5: 18a8c69608151098a8fb75eea64cc266 + sha256: 1a93782e90b53e04c2b1a50a0f8bf0887936649d19dba6a05b05c4b44dae96b7 + md5: 14f15ab0d31a2ee5635aa56e77132594 depends: - __osx >=11.0 - bzip2 >=1.0.8,<2.0a0 @@ -7959,19 +7871,20 @@ packages: - libzlib >=1.3.1,<2.0a0 - ncurses >=6.5,<7.0a0 - openssl >=3.5.4,<4.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - readline >=8.2,<9.0a0 - tk >=8.6.13,<8.7.0a0 - tzdata + - zstd >=1.5.7,<1.6.0a0 license: Python-2.0 purls: [] - size: 12920650 - timestamp: 1765020887340 - python_site_packages_path: lib/python3.13/site-packages -- conda: https://conda.anaconda.org/conda-forge/win-64/python-3.13.11-h09917c8_100_cp313.conda + size: 13575758 + timestamp: 1765021280625 + python_site_packages_path: lib/python3.14/site-packages +- conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda build_number: 100 - sha256: 0ee0402368783e1fad10025719530499c517a3dbbdfbe18351841d9b7aef1d6a - md5: 9e4c9a7ee9c4ab5b3778ab73e583283e + sha256: 6857d7c97cc71fe9ba298dcb1d3b66cc7df425132ab801babd655faa3df48f32 + md5: c3c73414d5ae3f543c531c978d9cc8b8 depends: - bzip2 >=1.0.8,<2.0a0 - libexpat >=2.7.3,<3.0a0 @@ -7981,16 +7894,17 @@ packages: - libsqlite >=3.51.1,<4.0a0 - libzlib >=1.3.1,<2.0a0 - openssl >=3.5.4,<4.0a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 - tk >=8.6.13,<8.7.0a0 - tzdata - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 + - zstd >=1.5.7,<1.6.0a0 license: Python-2.0 purls: [] - size: 16617922 - timestamp: 1765019627175 + size: 16833248 + timestamp: 1765020224759 python_site_packages_path: Lib/site-packages - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda sha256: d6a17ece93bbd5139e02d2bd7dbfa80bee1a4261dced63f65f679121686bf664 @@ -8017,16 +7931,16 @@ packages: - pkg:pypi/fastjsonschema?source=hash-mapping size: 244628 timestamp: 1755304154927 -- conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.13.11-h4df99d1_100.conda - sha256: 4b08d4c2c4b956d306b4868d3faf724eebb5d6e6b170fad2eb0f2d4eb227f1af - md5: d1461b2e63b1909f4f5b41c823bd90ae +- conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + sha256: 8203dc90a5cb6687f5bfcf332eeaf494ec95d24ed13fca3c82ef840f0bb92a5d + md5: 0064ab66736c4814864e808169dc7497 depends: - - cpython 3.13.11.* - - python_abi * *_cp313 + - cpython 3.14.2.* + - python_abi * *_cp314 license: Python-2.0 purls: [] - size: 48352 - timestamp: 1765019767640 + size: 49287 + timestamp: 1765020424843 - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda sha256: 4790787fe1f4e8da616edca4acf6a4f8ed4e7c6967aa31b920208fc8f95efcca md5: a61bf9ec79426938ff785eb69dbb1960 @@ -8066,17 +7980,17 @@ packages: - pkg:pypi/tzdata?source=compressed-mapping size: 143542 timestamp: 1765719982349 -- conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.13-8_cp313.conda +- conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda build_number: 8 - sha256: 210bffe7b121e651419cb196a2a63687b087497595c9be9d20ebe97dd06060a7 - md5: 94305520c52a4aa3f6c2b1ff6008d9f8 + sha256: ad6d2e9ac39751cc0529dd1566a26751a0bf2542adb0c232533d32e176e21db5 + md5: 0539938c55b6b1a59b560e843ad864a4 constrains: - - python 3.13.* *_cp313 + - python 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: [] - size: 7002 - timestamp: 1752805902938 + size: 6989 + timestamp: 1752805904792 - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda sha256: 8d2a8bf110cc1fc3df6904091dead158ba3e614d8402a83e51ed3a8aa93cdeb0 md5: bc8e3267d44011051f2eb14d22fb0960 @@ -8088,9 +8002,9 @@ packages: - pkg:pypi/pytz?source=hash-mapping size: 189015 timestamp: 1742920947249 -- conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py313h40c08fc_1.conda - sha256: 87eaeb79b5961e0f216aa840bc35d5f0b9b123acffaecc4fda4de48891901f20 - md5: 1ce4f826332dca56c76a5b0cc89fb19e +- conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + sha256: 6918a8067f296f3c65d43e84558170c9e6c3f4dd735cfe041af41a7fdba7b171 + md5: 2d7b7ba21e8a8ced0eca553d4d53f773 depends: - python - vc >=14.3,<15 @@ -8099,19 +8013,19 @@ packages: - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: PSF-2.0 license_family: PSF purls: - pkg:pypi/pywin32?source=hash-mapping - size: 6695114 - timestamp: 1756487139550 -- conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py313h5813708_1.conda - sha256: d34a7cd0a4a7dc79662cb6005e01d630245d9a942e359eb4d94b2fb464ed2552 - md5: 8f01ed27e2baa455e753301218e054fd - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 6713155 + timestamp: 1756487145487 +- conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + sha256: 048e20641da680aedaab285640a2aca56b7b5baf7a18f8f164f2796e13628c1f + md5: dd84e8748bd3c85a5c751b0576488080 + depends: + - python >=3.14.0rc3,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.2,<15 - vc14_runtime >=14.29.30139 @@ -8120,54 +8034,22 @@ packages: license_family: MIT purls: - pkg:pypi/pywinpty?source=hash-mapping - size: 216075 - timestamp: 1759556799508 -- conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py313h3dea7bd_0.conda - sha256: 40dcd6718dce5fbee8aabdd0519f23d456d8feb2e15ac352eaa88bbfd3a881af - md5: 4794ea0adaebd9f844414e594b142cb2 - depends: - - __glibc >=2.17,<3.0.a0 - - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - yaml >=0.2.5,<0.3.0a0 - license: MIT - license_family: MIT - purls: - - pkg:pypi/pyyaml?source=hash-mapping - size: 207109 - timestamp: 1758892173548 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyyaml-6.0.3-py313h7d74516_0.conda - sha256: f5be0d84f72a567b7333b9efa74a65bfa44a25658cf107ffa3fc65d3ae6660d7 - md5: 0e8e3235217b4483a7461b63dca5826b - depends: - - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 - - yaml >=0.2.5,<0.3.0a0 - license: MIT - license_family: MIT - purls: - - pkg:pypi/pyyaml?source=hash-mapping - size: 191630 - timestamp: 1758892258120 -- conda: https://conda.anaconda.org/conda-forge/win-64/pyyaml-6.0.3-py313hd650c13_0.conda - sha256: 5d9fd32d318b9da615524589a372b33a6f3d07db2708de16570d70360bf638c2 - md5: c067122d76f8dcbe0848822942ba07be - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 - - ucrt >=10.0.20348.0 - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 - - yaml >=0.2.5,<0.3.0a0 + size: 216325 + timestamp: 1759557436167 +- conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + sha256: 828af2fd7bb66afc9ab1c564c2046be391aaf66c0215f05afaf6d7a9a270fe2a + md5: b12f41c0d7fb5ab81709fcc86579688f + depends: + - python >=3.10.* + - yaml + track_features: + - pyyaml_no_compile license: MIT license_family: MIT purls: - pkg:pypi/pyyaml?source=hash-mapping - size: 182043 - timestamp: 1758892011955 + size: 45223 + timestamp: 1758891992558 - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda noarch: python sha256: a00a41b66c12d9c60e66b391e9a4832b7e28743348cf4b48b410b91927cd7819 @@ -8363,56 +8245,56 @@ packages: - pkg:pypi/rich?source=hash-mapping size: 200840 timestamp: 1760026188268 -- conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py313h843e2db_0.conda - sha256: 076d26e51c62c8ecfca6eb19e3c1febdd7632df1990a7aa53da5df5e54482b1c - md5: 779e3307a0299518713765b83a36f4b1 +- conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + sha256: e53b0cbf3b324eaa03ca1fe1a688fdf4ab42cea9c25270b0a7307d8aaaa4f446 + md5: c1c368b5437b0d1a68f372ccf01cb133 depends: - python - libgcc >=14 - __glibc >=2.17,<3.0.a0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 constrains: - __glibc >=2.17 license: MIT license_family: MIT purls: - pkg:pypi/rpds-py?source=hash-mapping - size: 383230 - timestamp: 1764543223529 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py313h2c089d5_0.conda - sha256: db63344f91e8bfe77703c6764aa9eeafb44d165e286053214722814eabda0264 - md5: 190c2d0d4e98ec97df48cdb74caf44d8 + size: 376121 + timestamp: 1764543122774 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + sha256: e161dd97403b8b8a083d047369a5cf854557dba1204d29e2f0250f5ac4403925 + md5: 76a4f88d1b7748c477abf3c341edc64c depends: - python - __osx >=11.0 - - python 3.13.* *_cp313 - - python_abi 3.13.* *_cp313 + - python 3.14.* *_cp314 + - python_abi 3.14.* *_cp314 constrains: - __osx >=11.0 license: MIT license_family: MIT purls: - pkg:pypi/rpds-py?source=hash-mapping - size: 358961 - timestamp: 1764543165314 -- conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py313hfbe8231_0.conda - sha256: 27bd383787c0df7a0a926b11014fd692d60d557398dcf1d50c55aa2378507114 - md5: 58ae648b12cfa6df3923b5fd219931cb + size: 350976 + timestamp: 1764543169524 +- conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + sha256: e4435368c5c25076dc0f5918ba531c5a92caee8e0e2f9912ef6810049cf00db2 + md5: e86531e278ad304438e530953cd55d14 depends: - python - vc >=14.3,<15 - vc14_runtime >=14.44.35208 - ucrt >=10.0.20348.0 - - python_abi 3.13.* *_cp313 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/rpds-py?source=hash-mapping - size: 243419 - timestamp: 1764543047271 -- conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py313h4b8bb8b_2.conda - sha256: a5ddc728be0589e770f59e45e3c6c670c56d96a801ddf76a304cc0af7bcef5c4 - md5: 0be9bd58abfb3e8f97260bd0176d5331 + size: 235780 + timestamp: 1764543046065 +- conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda + sha256: 652f9a235051c1d39ccd2fe7e9326792b046a1d93de42171977fa1ba9668a0e8 + md5: ee95e8bb52e35c3267a53d3ee1347cc4 depends: - __glibc >=2.17,<3.0.a0 - libblas >=3.9.0,<4.0a0 @@ -8425,17 +8307,17 @@ packages: - numpy <2.6 - numpy >=1.23,<3 - numpy >=1.25.2 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - pkg:pypi/scipy?source=compressed-mapping - size: 16785487 - timestamp: 1766108773270 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py313h29d7d31_2.conda - sha256: ee3cbddb7d598c78b592fafbfa3eaf8c89df353bbed56a1a9f32e9f7daa49bb4 - md5: a3324bd937a39cbbf1cbe0940160e19e + size: 16982488 + timestamp: 1766108668132 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda + sha256: 282f8b244f31d8c2e0ce401b0473e8090de4b59326018a360419693b629e6b87 + md5: 6333b784ddfcccd3f5569f812f66c352 depends: - __osx >=11.0 - libblas >=3.9.0,<4.0a0 @@ -8447,18 +8329,18 @@ packages: - numpy <2.6 - numpy >=1.23,<3 - numpy >=1.25.2 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/scipy?source=hash-mapping - size: 13929516 - timestamp: 1766109298759 -- conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py313he51e9a2_2.conda - sha256: 997a2202126425438a16de7ef1e5e924bd66feb43bda5b71326e281c7331489d - md5: a49556572438d5477f1eca06bb6d0770 + - pkg:pypi/scipy?source=compressed-mapping + size: 13880523 + timestamp: 1766109018710 +- conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda + sha256: 99d6198dc05171610073083c9d218d2a9adfa756659b391183d21cca55f888f1 + md5: b600c47282ee91e492b89f65708a5c9a depends: - libblas >=3.9.0,<4.0a0 - libcblas >=3.9.0,<4.0a0 @@ -8466,8 +8348,8 @@ packages: - numpy <2.6 - numpy >=1.23,<3 - numpy >=1.25.2 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -8475,8 +8357,8 @@ packages: license_family: BSD purls: - pkg:pypi/scipy?source=hash-mapping - size: 15066293 - timestamp: 1766109539389 + size: 15082636 + timestamp: 1766109482825 - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda sha256: 5893e203cb099c784bf5b08d29944b5402beebcc361d55e54b676e9b355c7844 md5: dcff6f8ea9e86a0bda978b88f89f2310 @@ -8529,40 +8411,40 @@ packages: - pkg:pypi/setuptools?source=hash-mapping size: 748788 timestamp: 1748804951958 -- conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py313h07c4f96_1.conda - sha256: cf44d6bd3dc3be6b683fac251d6b53d508d041506a2101fd7cdb404468cf8be3 - md5: 1cc1de04373b633177f4d367b8b75270 +- conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda + sha256: fde24560898ecbb63edb6580fbf09fa07e10f55a89f8ae35f891f712f1d07872 + md5: b2f9edf27e434edc6072e6f7c076015f depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/simplejson?source=hash-mapping - size: 133692 - timestamp: 1762506927030 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py313h6535dbc_1.conda - sha256: ef09659f0248066e8c06a0bd8bd1a360b8158cd2d73c65c969897e20344c6a2a - md5: 27a8bc65b5f0aecb87a01568e573e6ae + size: 135289 + timestamp: 1762507017143 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda + sha256: e6a6a2aab805c4c50464aecff3f752a78ce15bb1b9de006b1d929d0673f3a386 + md5: 82c463d19f1d85e60d520d129c67b483 depends: - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/simplejson?source=hash-mapping - size: 133717 - timestamp: 1762507593463 -- conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py313h5ea7bf4_1.conda - sha256: b1ea3625e7dcda6ea6121dc61461da9bc9be54a99aa20ed26a5ee5b43663b5c4 - md5: bcdc4785e018f4325845f8217333a17e - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 134426 + timestamp: 1762507464057 +- conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda + sha256: 28f67233b03f8f1ebdcd5b35d1700d75101be0e9decf4975b8dc867609d4a507 + md5: f5d14f3ecb62b185cb571b79034df477 + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -8570,8 +8452,8 @@ packages: license_family: MIT purls: - pkg:pypi/simplejson?source=hash-mapping - size: 132684 - timestamp: 1762507090611 + size: 134026 + timestamp: 1762507518751 - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda sha256: 458227f759d5e3fcec5d9b7acce54e10c9e1f4f4b7ec978f3bfd54ce4ee9853d md5: 3339e3b65d58accf4ca4fb8748ab16b3 @@ -8586,15 +8468,15 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev243+gc7ed219cb - sha256: 2a9a1ae4e4a106a7b7def4db3aa61be8cbf11a44695ee638f8252253cde97ee3 + version: 0.0.24.dev243+g3bb334b10.d20260109 + sha256: ec4cbcbac9f3a9700240882f959c3cad0f86692d93a963561b182ac9d6004ef1 requires_dist: - dags - frozendict - jax>=0.8 - numpy - pandas - requires_python: '>=3.13,<3.14' + requires_python: '>=3.14,<3.15' - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda sha256: 833326122c18887b338262c13365cb146b6702c79d72da74a1c6b8af4c50e162 md5: 421b7a950e384949ca1b0f04f0751ce0 @@ -8629,10 +8511,10 @@ packages: - pkg:pypi/soupsieve?source=compressed-mapping size: 37951 timestamp: 1766075884412 -- pypi: https://files.pythonhosted.org/packages/0e/50/80a8d080ac7d3d321e5e5d420c9a522b0aa770ec7013ea91f9a8b7d36e4a/sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl name: sqlalchemy version: 2.0.45 - sha256: 672c45cae53ba88e0dad74b9027dddd09ef6f441e927786b05bec75d949fbb2e + sha256: 4748601c8ea959e37e03d13dcda4a44837afcd1b21338e637f7c935b8da06177 requires_dist: - importlib-metadata ; python_full_version < '3.8' - greenlet>=1 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' @@ -8667,10 +8549,10 @@ packages: - typing-extensions!=3.10.0.1 ; extra == 'aiosqlite' - sqlcipher3-binary ; extra == 'sqlcipher' requires_python: '>=3.7' -- pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl name: sqlalchemy version: 2.0.45 - sha256: 5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0 + sha256: 883c600c345123c033c2f6caca18def08f1f7f4c3ebeb591a63b6fceffc95cce requires_dist: - importlib-metadata ; python_full_version < '3.8' - greenlet>=1 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' @@ -8705,10 +8587,10 @@ packages: - typing-extensions!=3.10.0.1 ; extra == 'aiosqlite' - sqlcipher3-binary ; extra == 'sqlcipher' requires_python: '>=3.7' -- pypi: https://files.pythonhosted.org/packages/c0/c5/d17113020b2d43073412aeca09b60d2009442420372123b8d49cc253f8b8/sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl name: sqlalchemy version: 2.0.45 - sha256: afbf47dc4de31fa38fd491f3705cac5307d21d4bb828a4f020ee59af412744ee + sha256: 5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0 requires_dist: - importlib-metadata ; python_full_version < '3.8' - greenlet>=1 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' @@ -8902,40 +8784,40 @@ packages: - pkg:pypi/tomli?source=compressed-mapping size: 20973 timestamp: 1760014679845 -- conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py313h07c4f96_0.conda - sha256: 6006d4e5a6ff99be052c939e43adee844a38f2dc148f44a7c11aa0011fd3d811 - md5: 82da2dcf1ea3e298f2557b50459809e0 +- conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda + sha256: b8f9f9ae508d79c9c697eb01b6a8d2ed4bc1899370f44aa6497c8abbd15988ea + md5: e35f08043f54d26a1be93fdbf90d30c3 depends: - __glibc >=2.17,<3.0.a0 - libgcc >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: Apache-2.0 license_family: Apache purls: - pkg:pypi/tornado?source=hash-mapping - size: 878109 - timestamp: 1765458900582 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py313h6535dbc_0.conda - sha256: a8130a361b7bc21190836ba8889276cc263fcb09f52bf22efcaed1de98179948 - md5: 67a85c1b5c17124eaf9194206afd5159 + size: 905436 + timestamp: 1765458949518 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda + sha256: affbc6300e1baef5848f6e69569733a3e7a118aa642487c853f53d6f2bd23b89 + md5: 83e1a2d7b0c1352870bbe9d9406135cf depends: - __osx >=11.0 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: Apache-2.0 license_family: Apache purls: - pkg:pypi/tornado?source=hash-mapping - size: 877647 - timestamp: 1765836696426 -- conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py313h5ea7bf4_0.conda - sha256: 81b131db1bebed88f11a5f9891c0c0a7c6998dfd96cd96f54839f3a0cbebd5a0 - md5: 1402782887fafaa117a8d76d2cfa4761 - depends: - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + size: 909298 + timestamp: 1765836779269 +- conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda + sha256: 40fde32a4992ab0f875618f97d9aadf263d39c6c92ace7572c6b0a71c655abe1 + md5: 00157f40fd3ea957a2616e9ffda6b84f + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -8943,8 +8825,8 @@ packages: license_family: Apache purls: - pkg:pypi/tornado?source=hash-mapping - size: 880049 - timestamp: 1765836649731 + size: 908399 + timestamp: 1765836848636 - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda sha256: f39a5620c6e8e9e98357507262a7869de2ae8cc07da8b7f84e517c9fd6c2b959 md5: 019a7385be9af33791c989871317e1ed @@ -9031,45 +8913,45 @@ packages: purls: [] size: 694692 timestamp: 1756385147981 -- conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py313h7037e92_6.conda - sha256: bd1f3d159b204be5aeeb3dd165fad447d3a1c5df75fec64407a68f210a0cb722 - md5: 1fa8d662361896873a165b051322073e +- conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda + sha256: ef6753f6febaa74d35253e4e0dd09dc9497af8e370893bd97c479f59346daa57 + md5: 28303a78c48916ab07b95ffdbffdfd6c depends: - __glibc >=2.17,<3.0.a0 - cffi - libgcc >=14 - libstdcxx >=14 - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/ukkonen?source=hash-mapping - size: 14648 - timestamp: 1761594865380 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py313hc50a443_6.conda - sha256: 66596db68cd50d61af97b01de4fd6ba5b08c4f5c779c331888196253b4daf353 - md5: 8e87b6fff522cabf8c02878c24d44312 + size: 14762 + timestamp: 1761594960135 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda + sha256: 2ef342cc861c52ec3ac464e89b192a37fd7afd79740b2c0773d2588fd8acff26 + md5: 452b75f09bc2a4c5eea4044b769bc659 depends: - __osx >=11.0 - cffi - libcxx >=19 - - python >=3.13,<3.14.0a0 - - python >=3.13,<3.14.0a0 *_cp313 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 license: MIT license_family: MIT purls: - pkg:pypi/ukkonen?source=hash-mapping - size: 14535 - timestamp: 1761595088230 -- conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py313hf069bd2_6.conda - sha256: f42cd55bd21746274d7074b93b53fb420b4ae0f8f1b6161cb2cc5004c20c7ec7 - md5: 77444fe3f3004fe52c5ee70626d11d66 + size: 14635 + timestamp: 1761595172213 +- conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda + sha256: f65b3bf31d22ae37300ed2521352107be830e7c5ba805a4c93e2ce0e0f739078 + md5: 8528e182a2d9b5d14f0072734a24a6b9 depends: - cffi - - python >=3.13,<3.14.0a0 - - python_abi 3.13.* *_cp313 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 @@ -9077,8 +8959,51 @@ packages: license_family: MIT purls: - pkg:pypi/ukkonen?source=hash-mapping - size: 18266 - timestamp: 1761595426854 + size: 18357 + timestamp: 1761595080794 +- conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda + sha256: d1dafc15fc5d2b1dd5b0a525e8a815028de20dd53b2c775a1b56e8e4839fb736 + md5: 58e2ee530005067c5db23f33c6ab43d2 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 + license: Apache-2.0 + license_family: Apache + purls: + - pkg:pypi/unicodedata2?source=hash-mapping + size: 409745 + timestamp: 1763055060898 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda + sha256: 48c51dd2ef696f7a1a3635716585a8e383a8c00e719305cfda2b480c36ee1283 + md5: c673decfe1f120b0717d0aa193b10060 + depends: + - __osx >=11.0 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 + license: Apache-2.0 + license_family: Apache + purls: + - pkg:pypi/unicodedata2?source=hash-mapping + size: 416770 + timestamp: 1763055099322 +- conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda + sha256: 47e061aec1487519c398e1c999ac3680f068f9e1d8574c8b365eac4787773250 + md5: 1f90bb13fa5ced89ca4dcc0af3bbebf3 + depends: + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 + - ucrt >=10.0.20348.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + license: Apache-2.0 + license_family: Apache + purls: + - pkg:pypi/unicodedata2?source=hash-mapping + size: 405783 + timestamp: 1763054877424 - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda sha256: e0eb6c8daf892b3056f08416a96d68b0a358b7c46b99c8a50481b22631a4dfc0 md5: e7cb0f5745e4c5035a460248334af7eb diff --git a/pyproject.toml b/pyproject.toml index 3c08e5dc..59171e42 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ classifiers = [ "Programming Language :: Python :: 3 :: Only", "Topic :: Scientific/Engineering", ] -requires-python = ">=3.13,<3.14" +requires-python = ">=3.14,<3.15" dependencies = [ "dags", "frozendict", @@ -166,7 +166,7 @@ ty = {features = ["test", "ty"], solve-group = "default"} # ====================================================================================== [tool.ruff] -target-version = "py313" +target-version = "py314" fix = true line-length = 88 From 921c612764d7a490e093fd6f82afc45c9cda4d90 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Fri, 9 Jan 2026 10:45:46 +0100 Subject: [PATCH 08/27] Move TESTS_DIR -> TEST_DATA_DIR, which points to a subdirectory of src so that config.TEST_DATA_DIR is valid also for skillmodels the package (as opposed to the project). --- .pre-commit-config.yaml | 2 +- docs/source/getting_started/tutorial.ipynb | 11 +- .../how_to_simulate_dataset.ipynb | 8 +- .../how_to_visualize_correlations.ipynb | 8 +- ...sualize_pairwise_factor_distribution.ipynb | 8 +- ...ow_to_visualize_transition_equations.ipynb | 8 +- pixi.lock | 123 +++++++++--------- src/skillmodels/config.py | 5 +- src/skillmodels/constraints.py | 18 +-- src/skillmodels/likelihood_function_debug.py | 6 +- src/skillmodels/process_data.py | 16 +-- src/skillmodels/process_debug_data.py | 2 +- src/skillmodels/simulate_data.py | 10 +- .../skillmodels/test_data}/model2.yaml | 0 .../model2_correct_params_index.csv | 0 .../test_data}/model2_correct_update_info.csv | 0 .../test_data}/model2_simulated_data.dta | Bin .../model2_with_endog_correct_update_info.csv | 0 .../simplest_augmented_data_expected.csv | 0 .../test_data}/simplest_augmented_model.yaml | 0 .../visualize_factor_distributions.py | 2 +- .../visualize_transition_equations.py | 8 +- tests/test_constraints.py | 28 ++-- tests/test_correlation_heatmap.py | 22 ++-- tests/test_decorators.py | 2 +- tests/test_filtered_states.py | 10 +- tests/test_kalman_filters.py | 10 +- tests/test_likelihood_regression.py | 16 +-- tests/test_params_index.py | 25 ++-- tests/test_parse_params.py | 10 +- tests/test_process_data.py | 13 +- tests/test_process_model.py | 21 ++- tests/test_simulate_data.py | 10 +- tests/test_transition_functions.py | 25 ++-- tests/test_utilities.py | 46 +++---- tests/test_visualize_factor_distributions.py | 16 +-- tests/test_visualize_transition_equations.py | 9 +- 37 files changed, 240 insertions(+), 258 deletions(-) rename {tests => src/skillmodels/test_data}/model2.yaml (100%) rename {tests => src/skillmodels/test_data}/model2_correct_params_index.csv (100%) rename {tests => src/skillmodels/test_data}/model2_correct_update_info.csv (100%) rename {tests => src/skillmodels/test_data}/model2_simulated_data.dta (100%) rename {tests => src/skillmodels/test_data}/model2_with_endog_correct_update_info.csv (100%) rename {tests => src/skillmodels/test_data}/simplest_augmented_data_expected.csv (100%) rename {tests => src/skillmodels/test_data}/simplest_augmented_model.yaml (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ee3f39be..7c38d11f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -45,7 +45,7 @@ repos: hooks: - id: yamllint - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.14.10 + rev: v0.14.11 hooks: - id: ruff-check types_or: diff --git a/docs/source/getting_started/tutorial.ipynb b/docs/source/getting_started/tutorial.ipynb index 15906e69..81ce0965 100644 --- a/docs/source/getting_started/tutorial.ipynb +++ b/docs/source/getting_started/tutorial.ipynb @@ -19,7 +19,7 @@ "import pandas as pd\n", "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.maximization_inputs import get_maximization_inputs" ] }, @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", + "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", " model_dict = yaml.load(y, Loader=yaml.SafeLoader)" ] }, @@ -51,7 +51,7 @@ "metadata": {}, "outputs": [], "source": [ - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])" ] }, @@ -110,7 +110,7 @@ "outputs": [], "source": [ "index_cols = [\"category\", \"period\", \"name1\", \"name2\"]\n", - "chs_path = TEST_DIR / \"regression_vault\" / \"chs_results.csv\"\n", + "chs_path = REGRESSION_VAULT / \"chs_results.csv\"\n", "chs_values = pd.read_csv(chs_path)\n", "chs_values = chs_values.set_index(index_cols)\n", "chs_values = chs_values[[\"chs_value\", \"good_start_value\", \"bad_start_value\"]]\n", @@ -289,7 +289,6 @@ "metadata": {}, "outputs": [], "source": [ - "pc, pp = om.process_constraints(constraints, params)\n", "params[\"group\"] = params.index.get_level_values(\"category\")\n", "params.loc[\"controls\", \"group\"] = params.loc[\"controls\"].index.get_level_values(\"name2\")\n", "\n", @@ -299,8 +298,6 @@ " + params.index.get_level_values(\"period\").astype(str)\n", ")\n", "params[\"group\"] = params[\"group\"].str.replace(\"_\", \"-\")\n", - "params[\"group\"] = params[\"group\"].astype(\"O\")\n", - "params.loc[~pp[\"_internal_free\"], \"group\"] = None\n", "params" ] }, diff --git a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb b/docs/source/how_to_guides/how_to_simulate_dataset.ipynb index bad7647c..b6b4685e 100644 --- a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb +++ b/docs/source/how_to_guides/how_to_simulate_dataset.ipynb @@ -9,7 +9,7 @@ "import pandas as pd\n", "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.simulate_data import simulate_dataset" ] }, @@ -34,13 +34,13 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", + "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])\n", "\n", - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])" ] }, diff --git a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb index 299f3207..890cddef 100644 --- a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb @@ -16,7 +16,7 @@ "import pandas as pd\n", "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.correlation_heatmap import (\n", " get_measurements_corr,\n", " get_quasi_scores_corr,\n", @@ -40,7 +40,7 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", + "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", " model_dict = yaml.load(y, Loader=yaml.FullLoader)" ] }, @@ -50,10 +50,10 @@ "metadata": {}, "outputs": [], "source": [ - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])" ] }, diff --git a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index db9ec045..e0f5944e 100644 --- a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -21,7 +21,7 @@ "import pandas as pd\n", "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.maximization_inputs import get_maximization_inputs\n", "from skillmodels.simulate_data import simulate_dataset\n", "from skillmodels.visualize_factor_distributions import (\n", @@ -57,12 +57,12 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", + "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])" ] }, diff --git a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb index f67e28b4..17c9a714 100644 --- a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb @@ -10,7 +10,7 @@ "import pandas as pd\n", "import yaml\n", "\n", - "from skillmodels.config import TEST_DIR\n", + "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.visualize_transition_equations import (\n", " combine_transition_plots,\n", " get_transition_plots,\n", @@ -47,13 +47,13 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DIR / \"model2.yaml\") as y:\n", + "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", "\n", - "params = pd.read_csv(TEST_DIR / \"regression_vault\" / \"one_stage_anchoring.csv\")\n", + "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", - "data = pd.read_stata(TEST_DIR / \"model2_simulated_data.dta\")\n", + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])" ] }, diff --git a/pixi.lock b/pixi.lock index 9e63b939..b3b1ff96 100644 --- a/pixi.lock +++ b/pixi.lock @@ -236,7 +236,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -274,7 +274,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -484,7 +484,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -507,7 +507,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -705,7 +705,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -727,7 +727,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl @@ -824,7 +824,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda @@ -909,7 +909,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda @@ -928,7 +928,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -953,7 +953,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl @@ -1183,7 +1183,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda @@ -1197,7 +1197,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1220,7 +1220,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -1417,7 +1417,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda @@ -1431,7 +1431,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1453,7 +1453,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl @@ -1552,7 +1552,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda @@ -1640,7 +1640,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda @@ -1659,7 +1659,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1684,7 +1684,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl @@ -1946,7 +1946,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sysroot_linux-64-2.28-h4ee821c_9.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda @@ -1960,7 +1960,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1998,7 +1998,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -2227,7 +2227,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda @@ -2241,7 +2241,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2264,7 +2264,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -2465,7 +2465,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda @@ -2479,7 +2479,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2501,7 +2501,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -2604,7 +2604,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda @@ -2692,7 +2692,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda @@ -2711,7 +2711,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2736,7 +2736,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 + - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -4501,6 +4501,7 @@ packages: - rpds-py >=0.25.0 - python license: MIT + license_family: MIT purls: - pkg:pypi/jsonschema?source=compressed-mapping size: 82356 @@ -4533,6 +4534,7 @@ packages: - uri-template - webcolors >=24.6.0 license: MIT + license_family: MIT purls: [] size: 4740 timestamp: 1767839954258 @@ -5522,9 +5524,9 @@ packages: purls: [] size: 663567 timestamp: 1765260367147 -- conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.1-default_h4379cf1_1003.conda - sha256: 2d534c09f92966b885acb3f4a838f7055cea043165a03079a539b06c54e20a49 - md5: d1699ce4fe195a9f61264a1c29b87035 +- conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda + sha256: 8cdf11333a81085468d9aa536ebb155abd74adc293576f6013fc0c85a7a90da3 + md5: 3b576f6860f838f950c570f4433b086e depends: - libwinpthread >=12.0.0.r4.gg4f2fc60ca - libxml2 @@ -5535,8 +5537,8 @@ packages: license: BSD-3-Clause license_family: BSD purls: [] - size: 2412642 - timestamp: 1765090345611 + size: 2411241 + timestamp: 1765104337762 - conda: https://conda.anaconda.org/conda-forge/linux-64/libiconv-1.18-h3b78370_2.conda sha256: c467851a7312765447155e071752d7bf9bf44d610a5687e32706f480aad2833f md5: 915f5995e94f60e9a4826e0b0920ee88 @@ -6517,7 +6519,7 @@ packages: license: BSD-3-Clause license_family: BSD purls: - - pkg:pypi/mistune?source=compressed-mapping + - pkg:pypi/mistune?source=hash-mapping size: 74250 timestamp: 1766504456031 - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda @@ -6969,9 +6971,9 @@ packages: version: 3.4.0 sha256: 69bb92469f86a1565195ece4ac0323943e83477171b91d24c35afe028a90d7cd requires_python: '>=3.8' -- pypi: git+https://github.com/optimagic-dev/optimagic.git#e02ea4743cac9f861a5813f3b4b1283fd2ade730 +- pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d name: optimagic - version: 0.5.3.dev30+ge02ea4743 + version: 0.5.3.dev31+g522b8c9a2 requires_dist: - annotated-types - cloudpickle @@ -7484,6 +7486,7 @@ packages: constrains: - ipywidgets >=7.6 license: MIT + license_family: MIT purls: - pkg:pypi/plotly?source=hash-mapping size: 4455861 @@ -8468,8 +8471,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev243+g3bb334b10.d20260109 - sha256: ec4cbcbac9f3a9700240882f959c3cad0f86692d93a963561b182ac9d6004ef1 + version: 0.0.24.dev244+ge2d687a5a.d20260109 + sha256: 06b1b0d92e84a2a2f4c422c119aaa56cc04f1aadaf2ee53c393c67d9378aaf37 requires_dist: - dags - frozendict @@ -8658,19 +8661,19 @@ packages: requires_dist: - pyreadline3 ; sys_platform == 'win32' requires_python: '>=3.8' -- conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-hd094cb3_1.conda - sha256: c31cac57913a699745d124cdc016a63e31c5749f16f60b3202414d071fc50573 - md5: 17c38aaf14c640b85c4617ccb59c1146 +- conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda + sha256: abd9a489f059fba85c8ffa1abdaa4d515d6de6a3325238b8e81203b913cf65a9 + md5: 0f9817ffbe25f9e69ceba5ea70c52606 depends: - - libhwloc >=2.12.1,<2.12.2.0a0 + - libhwloc >=2.12.2,<2.12.3.0a0 - ucrt >=10.0.20348.0 - vc >=14.3,<15 - vc14_runtime >=14.44.35208 license: Apache-2.0 license_family: APACHE purls: [] - size: 155714 - timestamp: 1762510341121 + size: 155869 + timestamp: 1767886839029 - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda sha256: b375e8df0d5710717c31e7c8e93c025c37fa3504aea325c7a55509f64e5d4340 md5: e43ca10d61e55d0a8ec5d8c62474ec9e @@ -8701,9 +8704,9 @@ packages: - pkg:pypi/terminado?source=hash-mapping size: 24749 timestamp: 1766513766867 -- conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.0-pyhcf101f3_0.conda - sha256: 50ea42e243d349b8218168c06bfd408f4dcda68d4364de1f5866507e009e3cfd - md5: ca39d364b4f1b395bb6a70312d455c28 +- conda: https://conda.anaconda.org/conda-forge/noarch/textual-7.0.1-pyhcf101f3_0.conda + sha256: b601d7f7d200465547ed76fd6b95701d94b0bbf0ab1d9dae4beb2f7012947cdd + md5: 13e92b552eb58a0c243a967a7d9e4d78 depends: - pygments >=2.19.2,<3.0.0 - typing_extensions >=4.4.0,<5.0.0 @@ -8720,8 +8723,8 @@ packages: license_family: MIT purls: - pkg:pypi/textual?source=hash-mapping - size: 526014 - timestamp: 1767448924135 + size: 525875 + timestamp: 1767859034631 - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda sha256: 7c803480dbfb8b536b9bf6287fa2aa0a4f970f8c09075694174eb4550a4524cd md5: c0d0b883e97906f7524e2aac94be0e0d @@ -9067,12 +9070,12 @@ packages: purls: [] size: 115235 timestamp: 1767320173250 -- conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.35.4-pyhd8ed1ab_0.conda - sha256: 77193c99c6626c58446168d3700f9643d8c0dab1f6deb6b9dd039e6872781bfb - md5: cfccfd4e8d9de82ed75c8e2c91cab375 +- conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda + sha256: cbb40ae88ccc72e95ce00911a73d9175eead4fb4e74925b0e9557bb60737317e + md5: c9a9b6e144b880308f5eedc905fe503d depends: - distlib >=0.3.7,<1 - - filelock >=3.12.2,<4 + - filelock >=3.20.1,<4 - platformdirs >=3.9.1,<5 - python >=3.10 - typing_extensions >=4.13.2 @@ -9080,8 +9083,8 @@ packages: license_family: MIT purls: - pkg:pypi/virtualenv?source=hash-mapping - size: 4401341 - timestamp: 1761726489722 + size: 4403353 + timestamp: 1767880093070 - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda sha256: e311b64e46c6739e2a35ab8582c20fa30eb608da130625ed379f4467219d4813 md5: 7e1e5ff31239f9cd5855714df8a3783d diff --git a/src/skillmodels/config.py b/src/skillmodels/config.py index ddc66a44..c5901cfd 100644 --- a/src/skillmodels/config.py +++ b/src/skillmodels/config.py @@ -1,3 +1,6 @@ from pathlib import Path -TEST_DIR = Path(__file__).resolve().parent / "tests" +TEST_DATA_DIR = Path(__file__).resolve().parent / "test_data" +REGRESSION_VAULT = ( + Path(__file__).resolve().parent.parent.parent / "tests" / "regression_vault" +) diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index fe3adbc3..e9d3765a 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -16,12 +16,12 @@ def get_constraints_dicts( - dimensions: "Dimensions", - labels: "Labels", - anchoring_info: "Anchoring", + dimensions: Dimensions, + labels: Labels, + anchoring_info: Anchoring, update_info: pd.DataFrame, normalizations: dict[str, dict[str, list]], - endogenous_factors_info: "EndogenousFactorsInfo", + endogenous_factors_info: EndogenousFactorsInfo, ) -> list[dict]: """Generate constraints implied by the model specification. @@ -241,7 +241,7 @@ def _get_stage_constraints( return constraints_dicts -def _get_constant_factors_constraints(labels: "Labels") -> list[dict]: +def _get_constant_factors_constraints(labels: Labels) -> list[dict]: """Fix shock variances of constant factors to `bounds_distance`. Args: @@ -302,7 +302,7 @@ def _get_initial_states_constraints( return constraints_dicts -def _get_transition_constraints(labels: "Labels") -> list[dict]: +def _get_transition_constraints(labels: Labels) -> list[dict]: """Collect possible constraints on transition parameters. Args: @@ -332,7 +332,7 @@ def _get_transition_constraints(labels: "Labels") -> list[dict]: def _get_anchoring_constraints( update_info: pd.DataFrame, controls: tuple[str, ...], - anchoring_info: "Anchoring", + anchoring_info: Anchoring, periods: tuple[int, ...], ) -> list[dict]: """Constraints on anchoring parameters. @@ -398,8 +398,8 @@ def _get_anchoring_constraints( def _get_constraints_for_augmented_periods( - labels: "Labels", - endogenous_factors_info: "EndogenousFactorsInfo", + labels: Labels, + endogenous_factors_info: EndogenousFactorsInfo, ) -> list[dict]: """Constraints for augmented periods. diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index a4c8e16c..c1d4d64e 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -23,9 +23,9 @@ def log_likelihood( transition_func: Callable[..., Array], sigma_scaling_factor: float, sigma_weights: Array, - dimensions: "Dimensions", - labels: "Labels", - estimation_options: "EstimationOptions", + dimensions: Dimensions, + labels: Labels, + estimation_options: EstimationOptions, is_measurement_iteration: Array, is_predict_iteration: Array, iteration_to_period: Array, diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index 28f2fb36..0337bd65 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -13,9 +13,9 @@ def process_data( df: pd.DataFrame, has_endogenous_factors: bool, - labels: "Labels", + labels: Labels, update_info: pd.DataFrame, - anchoring_info: "Anchoring", + anchoring_info: Anchoring, purpose: Literal["estimation", "anything", "simulation"] = "estimation", ) -> dict[str, Any]: """Process the data for estimation. @@ -104,7 +104,7 @@ def _get_period_data_for_endogenous_factors( aug_period: int, period: int, df: pd.DataFrame, - labels: "Labels", + labels: Labels, update_info: pd.DataFrame, ) -> pd.DataFrame: meas = _get_period_measurements(update_info, aug_period) @@ -128,7 +128,7 @@ def _get_period_data_for_endogenous_factors( def _augment_data_for_endogenous_factors( df: pd.DataFrame, - labels: "Labels", + labels: Labels, update_info: pd.DataFrame, ) -> pd.DataFrame: """Make room for endogenous factors by doubling up the periods. @@ -162,7 +162,7 @@ def _augment_data_for_endogenous_factors( def _add_copies_of_anchoring_outcome( df: pd.DataFrame, - anchoring_info: "Anchoring", + anchoring_info: Anchoring, ) -> pd.DataFrame: df = df.copy() for factor in anchoring_info.factors: @@ -174,7 +174,7 @@ def _add_copies_of_anchoring_outcome( def _check_data( # noqa: C901 df: pd.DataFrame, update_info: pd.DataFrame, - labels: "Labels", + labels: Labels, purpose: Literal["estimation", "anything", "simulation"], ) -> None: var_report = pd.DataFrame(index=update_info.index[:0], columns=["problem"]) @@ -256,7 +256,7 @@ def _generate_measurements_array( def _generate_controls_array( df: pd.DataFrame, - labels: "Labels", + labels: Labels, n_obs: int, ) -> Array: arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.controls))) @@ -269,7 +269,7 @@ def _generate_controls_array( def _generate_observed_factor_array( df: pd.DataFrame, - labels: "Labels", + labels: Labels, n_obs: int, ) -> Array: arr = np.zeros((len(labels.aug_periods), n_obs, len(labels.observed_factors))) diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index df3ccbeb..48d479e1 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -11,7 +11,7 @@ def process_debug_data( debug_data: dict[str, Any], - model: "ProcessedModel", + model: ProcessedModel, ) -> dict[str, Any]: """Process the raw debug data into pandas objects that make visualization easy. diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 5a87910e..13ef693b 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -184,15 +184,15 @@ def _simulate_dataset( covs: Array, log_weights: Array, pardict: dict, - labels: "Labels", - dimensions: "Dimensions", + labels: Labels, + dimensions: Dimensions, n_obs: int, has_endogenous_factors: bool, update_info: pd.DataFrame, control_data: Array, observed_factors: Array, policies: list[dict], - transition_info: "TransitionInfo", + transition_info: TransitionInfo, ) -> tuple[pd.DataFrame, pd.DataFrame]: """Simulate datasets generated by a latent factor model. @@ -325,7 +325,7 @@ def _collapse_aug_periods_to_periods( df: pd.DataFrame, factors: tuple[str, ...], aug_periods_to_periods: Mapping[int, int], - endogenous_factors_info: "EndogenousFactorsInfo", + endogenous_factors_info: EndogenousFactorsInfo, ) -> pd.DataFrame: """Collapse dataframe with aug_period index to user-facing period index. @@ -394,7 +394,7 @@ def _get_shock( def generate_start_states( n_obs: int, - dimensions: "Dimensions", + dimensions: Dimensions, dist_args: list[dict], weights: NDArray[np.floating], ) -> NDArray[np.floating]: diff --git a/tests/model2.yaml b/src/skillmodels/test_data/model2.yaml similarity index 100% rename from tests/model2.yaml rename to src/skillmodels/test_data/model2.yaml diff --git a/tests/model2_correct_params_index.csv b/src/skillmodels/test_data/model2_correct_params_index.csv similarity index 100% rename from tests/model2_correct_params_index.csv rename to src/skillmodels/test_data/model2_correct_params_index.csv diff --git a/tests/model2_correct_update_info.csv b/src/skillmodels/test_data/model2_correct_update_info.csv similarity index 100% rename from tests/model2_correct_update_info.csv rename to src/skillmodels/test_data/model2_correct_update_info.csv diff --git a/tests/model2_simulated_data.dta b/src/skillmodels/test_data/model2_simulated_data.dta similarity index 100% rename from tests/model2_simulated_data.dta rename to src/skillmodels/test_data/model2_simulated_data.dta diff --git a/tests/model2_with_endog_correct_update_info.csv b/src/skillmodels/test_data/model2_with_endog_correct_update_info.csv similarity index 100% rename from tests/model2_with_endog_correct_update_info.csv rename to src/skillmodels/test_data/model2_with_endog_correct_update_info.csv diff --git a/tests/simplest_augmented_data_expected.csv b/src/skillmodels/test_data/simplest_augmented_data_expected.csv similarity index 100% rename from tests/simplest_augmented_data_expected.csv rename to src/skillmodels/test_data/simplest_augmented_data_expected.csv diff --git a/tests/simplest_augmented_model.yaml b/src/skillmodels/test_data/simplest_augmented_model.yaml similarity index 100% rename from tests/simplest_augmented_model.yaml rename to src/skillmodels/test_data/simplest_augmented_model.yaml diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index 728cce9f..431b0d47 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -694,7 +694,7 @@ def _get_ordered_factors( def _get_factors( factors: list[str] | None, observed_factors: bool, - model: "ProcessedModel", + model: ProcessedModel, ) -> list[str]: """Proccess factor names to return list of strings.""" if factors is None: diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index bebb50b8..de49c5df 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -227,7 +227,7 @@ def get_transition_plots( def _get_dictionary_with_plots( - model: "ProcessedModel", + model: ProcessedModel, data: pd.DataFrame, params: pd.DataFrame, states: pd.DataFrame, @@ -372,7 +372,7 @@ def _get_state_ranges( def _get_pardict( - model: "ProcessedModel", + model: ProcessedModel, params: pd.DataFrame, ) -> dict[str, Any]: """Get parsed params dictionary.""" @@ -395,7 +395,7 @@ def _get_pardict( def _set_index_params( - model: "ProcessedModel", + model: ProcessedModel, params: pd.DataFrame, ) -> pd.DataFrame: """Reset index of params data frame to model implied values.""" @@ -412,7 +412,7 @@ def _set_index_params( def _get_states_data( - model: "ProcessedModel", + model: ProcessedModel, period: int, data: pd.DataFrame, states: pd.DataFrame, diff --git a/tests/test_constraints.py b/tests/test_constraints.py index 88e8498f..ddecd1ee 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -1,5 +1,3 @@ -from pathlib import Path - import numpy as np import pandas as pd import pytest @@ -7,6 +5,7 @@ from frozendict import frozendict from pandas.testing import assert_frame_equal +from skillmodels.config import TEST_DATA_DIR from skillmodels.constraints import ( _get_anchoring_constraints, _get_constant_factors_constraints, @@ -21,9 +20,6 @@ from skillmodels.process_model import process_model from skillmodels.types import Anchoring, Labels -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() - def test_add_bounds(): ind_tups = [("shock_sds", i) for i in range(5)] + [ @@ -86,7 +82,7 @@ def test_normalization_constraints(): }, ] - calculated = _get_normalization_constraints(norm, factors=["fac1", "fac2"]) + calculated = _get_normalization_constraints(norm, factors=("fac1", "fac2")) for c in calculated: del c["description"] @@ -120,8 +116,8 @@ def test_mixture_weight_constraints_normal(): def test_stage_constraints(): - stages = [0] - stagemap = [0] * 3 + stages = (0,) + stagemap = (0, 0, 0) expected = [ { @@ -141,8 +137,8 @@ def test_stage_constraints(): def test_stage_constraints_with_endogenous_factors(): - stages = [0, 1, 2, 3] - stagemap = [0, 1, 0, 1, 2, 3] + stages = (0, 1, 2, 3) + stagemap = (0, 1, 0, 1, 2, 3) expected = [ { "loc": [("transition", 0), ("transition", 2)], @@ -207,7 +203,7 @@ def test_constant_factor_constraints(): def test_initial_mean_constraints(): nmixtures = 3 - factors = ["fac1", "fac2", "fac3"] + factors = ("fac1", "fac2", "fac3") ind_tups = [ ("initial_states", 0, "mixture_0", "fac1"), ("initial_states", 0, "mixture_1", "fac1"), @@ -302,7 +298,7 @@ def base_anchoring_info(): def test_anchoring_constraints_no_constraint_needed(anch_uinfo, base_anchoring_info): - calculated = _get_anchoring_constraints(anch_uinfo, [], base_anchoring_info, (0, 1)) + calculated = _get_anchoring_constraints(anch_uinfo, (), base_anchoring_info, (0, 1)) assert calculated == [] @@ -316,7 +312,7 @@ def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): free_loadings=True, ignore_constant_when_anchoring=False, ) - calculated = _get_anchoring_constraints(anch_uinfo, [], anchoring_info, (0, 1)) + calculated = _get_anchoring_constraints(anch_uinfo, (), anchoring_info, (0, 1)) del calculated[0]["description"] expected = [ @@ -347,7 +343,7 @@ def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): ) calculated = _get_anchoring_constraints( anch_uinfo, - ["c1", "c2"], + ("c1", "c2"), anchoring_info, (0, 1), ) @@ -385,7 +381,7 @@ def test_anchoring_constraints_for_loadings(anch_uinfo, base_anchoring_info): free_loadings=False, ignore_constant_when_anchoring=False, ) - calculated = _get_anchoring_constraints(anch_uinfo, [], anchoring_info, (0, 1)) + calculated = _get_anchoring_constraints(anch_uinfo, (), anchoring_info, (0, 1)) expected = [ { @@ -415,7 +411,7 @@ def assert_list_equal_except_for_order(list1, list2): @pytest.fixture def simplest_augmented_model(): - with open(TEST_DIR / "simplest_augmented_model.yaml") as y: + with open(TEST_DATA_DIR / "simplest_augmented_model.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) return process_model(model_dict) diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 66e76701..6ecfe9f0 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -265,16 +265,16 @@ def test_process_factors(): observed_factor = "g" factors = ["b", "d", "g"] all_factors = None - assert list("abcd") == _process_factors(model, all_factors)[0] - assert list("efg") == _process_factors(model, all_factors)[1] - assert [latent_factor] == _process_factors(model, latent_factor)[0] - assert [observed_factor] == _process_factors(model, observed_factor)[1] - assert factors[:-1] == _process_factors(model, factors)[0] - assert [factors[-1] == _process_factors(model, factors)[1]] + assert list("abcd") == _process_factors(model, all_factors)[0] # ty: ignore[invalid-argument-type] + assert list("efg") == _process_factors(model, all_factors)[1] # ty: ignore[invalid-argument-type] + assert [latent_factor] == _process_factors(model, latent_factor)[0] # ty: ignore[invalid-argument-type] + assert [observed_factor] == _process_factors(model, observed_factor)[1] # ty: ignore[invalid-argument-type] + assert factors[:-1] == _process_factors(model, factors)[0] # ty: ignore[invalid-argument-type] + assert [factors[-1] == _process_factors(model, factors)[1]] # ty: ignore[invalid-argument-type] def test_get_mask_lower_triangle_only(): - corr = np.ones((4, 4)) + corr = pd.DataFrame(np.ones((4, 4))) show_upper = False show_diag = False expected = np.array( @@ -290,7 +290,7 @@ def test_get_mask_lower_triangle_only(): def test_get_mask_lower_triangle_and_diag(): - corr = np.ones((4, 4)) + corr = pd.DataFrame(np.ones((4, 4))) show_upper = False show_diag = True expected = np.array( @@ -306,7 +306,7 @@ def test_get_mask_lower_triangle_and_diag(): def test_get_mask_lower_and_upper_triangle_no_diag(): - corr = np.ones((4, 4)) + corr = pd.DataFrame(np.ones((4, 4))) show_upper = True show_diag = False expected = np.array( @@ -322,9 +322,9 @@ def test_get_mask_lower_and_upper_triangle_no_diag(): def test_get_mask_full_square_matrix(): - corr = np.ones((4, 4)) + corr = pd.DataFrame(np.ones((4, 4))) show_upper = True show_diag = True - expected = corr.astype(bool) + expected = corr.to_numpy().astype(bool) result = _get_mask(corr, show_upper, show_diag) np.testing.assert_array_equal(result, expected) diff --git a/tests/test_decorators.py b/tests/test_decorators.py index 85f9edef..a4f939cf 100644 --- a/tests/test_decorators.py +++ b/tests/test_decorators.py @@ -83,5 +83,5 @@ def f(): return "bla" g = register_params(f, params=["a", "b", "c"]) - assert g.__registered_params__ == ["a", "b", "c"] + assert g.__registered_params__ == ["a", "b", "c"] # ty: ignore[unresolved-attribute] assert g() == "bla" diff --git a/tests/test_filtered_states.py b/tests/test_filtered_states.py index b98f15b2..18ad272c 100644 --- a/tests/test_filtered_states.py +++ b/tests/test_filtered_states.py @@ -5,29 +5,29 @@ import pytest import yaml +from skillmodels.config import TEST_DATA_DIR from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) return model_dict @pytest.fixture def model2_data(): - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) return data def test_get_filtered_states(model2, model2_data): - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) max_inputs = get_maximization_inputs(model2, model2_data) diff --git a/tests/test_kalman_filters.py b/tests/test_kalman_filters.py index 4c71386e..39714b3e 100644 --- a/tests/test_kalman_filters.py +++ b/tests/test_kalman_filters.py @@ -133,10 +133,10 @@ def test_kalman_update_with_missing(update_func): @pytest.mark.parametrize("seed", SEEDS) -def test_sigma_points(seed): +def test_sigma_points(seed: int): np.random.seed(seed) state, cov = _random_state_and_covariance() - observed_factors = np.arange(2).reshape(1, 2) + observed_factors = jnp.arange(2).reshape(1, 2) expected = JulierSigmaPoints(n=len(state), kappa=2).sigma_points(state, cov) observed_part = np.tile(observed_factors, len(expected)).reshape(-1, 2) expected = np.hstack([expected, observed_part]) @@ -189,7 +189,7 @@ def f(params, states): anch_scaling = jnp.array([[1, 1], [2, 1]]) - anch_constants = np.array([[0, 0], [0, 0]]) + anch_constants = jnp.array([[0, 0], [0, 0]]) expected = jnp.array([[[[3, 2], [7, 4], [11, 6], [15, 8], [19, 10]]]]) @@ -249,13 +249,13 @@ def transition_function(params, states): transition_function, sm_state, sm_chol, - scaling_factor, + float(scaling_factor), weights, trans_coeffs, jnp.array(shock_sds), anch_scaling, anch_constants, - observed_factors, + jnp.asarray(observed_factors), ) aaae(calc_states.flatten(), expected_state.flatten()) diff --git a/tests/test_likelihood_regression.py b/tests/test_likelihood_regression.py index cd4cbc02..2e2d2423 100644 --- a/tests/test_likelihood_regression.py +++ b/tests/test_likelihood_regression.py @@ -9,6 +9,7 @@ import yaml from numpy.testing import assert_array_almost_equal as aaae +from skillmodels.config import TEST_DATA_DIR from skillmodels.decorators import register_params from skillmodels.maximization_inputs import get_maximization_inputs from skillmodels.utilities import reduce_n_periods @@ -23,20 +24,19 @@ "one_stage_anchoring_custom_functions", ] -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) return model_dict @pytest.fixture def model2_data(): - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) return data @@ -74,7 +74,7 @@ def linear(fac1, fac2, fac3, params): ("model_name", "fun_key"), product(MODEL_NAMES, ["loglike", "debug_loglike"]) ) def test_likelihood_values_have_not_changed(model2, model2_data, model_name, fun_key): - regvault = TEST_DIR / "regression_vault" + regvault = REGRESSION_VAULT model = _convert_model(model2, model_name) params = pd.read_csv(regvault / f"{model_name}.csv").set_index( ["category", "period", "name1", "name2"], @@ -111,7 +111,7 @@ def test_splitting_does_not_change_gradient(model2, model2_data): def test_likelihood_contributions_have_not_changed( model2, model2_data, model_name, fun_key ): - regvault = TEST_DIR / "regression_vault" + regvault = REGRESSION_VAULT model = _convert_model(model2, model_name) params = pd.read_csv(regvault / f"{model_name}.csv").set_index( ["category", "period", "name1", "name2"], @@ -134,7 +134,7 @@ def test_likelihood_contributions_have_not_changed( product(["no_stages_anchoring", "with_missings"], ["loglike_and_gradient"]), ) def test_likelihood_contributions_large_nobs(model2, model2_data, model_type, fun_key): - regvault = TEST_DIR / "regression_vault" + regvault = REGRESSION_VAULT model = _convert_model(model2, "no_stages_anchoring") params = pd.read_csv(regvault / "no_stages_anchoring.csv").set_index( ["category", "period", "name1", "name2"], @@ -208,7 +208,7 @@ def test_likelihood_runs_with_empty_periods(model2, model2_data): def test_likelihood_runs_with_too_long_data(model2, model2_data): model = reduce_n_periods(model2, 2) - func_dict = get_maximization_inputs(model, model2_data) + func_dict = get_maximization_inputs(model, model2_data) # ty: ignore[invalid-argument-type] params = func_dict["params_template"] params["value"] = 0.1 diff --git a/tests/test_params_index.py b/tests/test_params_index.py index c712f5bc..84d4730a 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -1,10 +1,9 @@ -from pathlib import Path - import pandas as pd import pytest import yaml from frozendict import frozendict +from skillmodels.config import TEST_DATA_DIR from skillmodels.params_index import ( get_control_params_index_tuples, get_initial_cholcovs_index_tuples, @@ -22,8 +21,7 @@ @pytest.fixture def model2_inputs(): - test_dir = Path(__file__).parent.resolve() - with open(test_dir / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) processed = process_model(model_dict) @@ -38,10 +36,9 @@ def model2_inputs(): def test_params_index_with_model2(model2_inputs): - test_dir = Path(__file__).parent.resolve() calculated = get_params_index(**model2_inputs) expected = pd.read_csv( - test_dir / "model2_correct_params_index.csv", + TEST_DATA_DIR / "model2_correct_params_index.csv", index_col=["category", "period", "name1", "name2"], ).index @@ -51,7 +48,7 @@ def test_params_index_with_model2(model2_inputs): def test_control_coeffs_index_tuples(): uinfo_tups = [(0, "m1"), (0, "m2"), (0, "bla"), (1, "m1"), (1, "m2")] uinfo = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_tups)) - controls = ["constant", "c1"] + controls = ("constant", "c1") expected = [ ("controls", 0, "m1", "constant"), @@ -77,7 +74,7 @@ def test_loading_index_tuples(): index=pd.MultiIndex.from_tuples(uinfo_tups), columns=["fac1", "fac2"], ) - factors = ["fac1", "fac2"] + factors = ("fac1", "fac2") expected = [ ("loadings", 0, "m1", "fac1"), ("loadings", 0, "m1", "fac2"), @@ -112,8 +109,8 @@ def test_meas_sd_index_tuples(): def test_shock_sd_index_tuples(): - periods = [0, 1, 2] - factors = ["fac1", "fac2"] + periods = (0, 1, 2) + factors = ("fac1", "fac2") expected = [ ("shock_sds", 0, "fac1", "-"), @@ -128,7 +125,7 @@ def test_shock_sd_index_tuples(): def test_initial_mean_index_tuples(): nmixtures = 3 - factors = ["fac1", "fac2"] + factors = ("fac1", "fac2") expected = [ ("initial_states", 0, "mixture_0", "fac1"), @@ -156,7 +153,7 @@ def test_mixture_weight_index_tuples(): def test_initial_cov_index_tuples(): nmixtures = 2 - factors = ["fac1", "fac2", "fac3"] + factors = ("fac1", "fac2", "fac3") expected = [ ("initial_cholcovs", 0, "mixture_0", "fac1-fac1"), ("initial_cholcovs", 0, "mixture_0", "fac2-fac1"), @@ -177,7 +174,7 @@ def test_initial_cov_index_tuples(): def test_trans_coeffs_index_tuples_no_endogenous_factors(): - periods = [0, 1, 2] + periods = (0, 1, 2) param_names = { "fac1": ["fac1", "fac2", "fac3", "constant"], @@ -220,7 +217,7 @@ def test_trans_coeffs_index_tuples_no_endogenous_factors(): def test_trans_coeffs_index_tuples_has_endogenous_factors(): - periods = [0, 1, 2, 3, 4, 5] + periods = (0, 1, 2, 3, 4, 5) param_names = { "fac1": ["fac1", "fac2", "fac3", "constant"], diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index 5d50e591..b8383af2 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -5,8 +5,6 @@ """ -from pathlib import Path - import jax.numpy as jnp import numpy as np import pandas as pd @@ -15,6 +13,7 @@ from frozendict import frozendict from numpy.testing import assert_array_equal as aae +from skillmodels.config import TEST_DATA_DIR from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_model import process_model from skillmodels.types import Anchoring @@ -22,13 +21,12 @@ @pytest.fixture def parsed_parameters(): - test_dir = Path(__file__).parent.resolve() p_index = pd.read_csv( - test_dir / "model2_correct_params_index.csv", + TEST_DATA_DIR / "model2_correct_params_index.csv", index_col=["category", "period", "name1", "name2"], ).index - with open(test_dir / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) processed = process_model(model_dict) @@ -49,7 +47,7 @@ def parsed_parameters(): ) parsing_info = create_parsing_info( - params_index=p_index, + params_index=p_index, # ty: ignore[invalid-argument-type] update_info=update_info, labels=labels, anchoring=anchoring, diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 5cfac889..9e924459 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -1,6 +1,5 @@ import io import textwrap -from pathlib import Path import jax.numpy as jnp import numpy as np @@ -10,6 +9,7 @@ from frozendict import frozendict from numpy.testing import assert_array_equal as aae +from skillmodels.config import TEST_DATA_DIR from skillmodels.process_data import ( _augment_data_for_endogenous_factors, _generate_controls_array, @@ -21,9 +21,6 @@ from skillmodels.process_model import process_model from skillmodels.types import Labels -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() - def test_pre_process_data(): df = pd.DataFrame(data=np.arange(20).reshape(2, 10).T, columns=["var", "inv"]) @@ -50,14 +47,14 @@ def test_pre_process_data(): @pytest.fixture def simplest_augmented(): out = {} - with open(TEST_DIR / "simplest_augmented_model.yaml") as y: + with open(TEST_DATA_DIR / "simplest_augmented_model.yaml") as y: out["model_dict"] = yaml.load(y, Loader=yaml.FullLoader) _df = pd.DataFrame(data=np.arange(15).reshape(3, 5).T, columns=["var", "inv", "of"]) _df["period"] = [1, 1, 2, 1, 2] _df["id"] = [1, 3, 3, 5, 5] out["data_input"] = _df.set_index(["id", "period"]) out["data_exp"] = pd.read_csv( - TEST_DIR / "simplest_augmented_data_expected.csv", + TEST_DATA_DIR / "simplest_augmented_data_expected.csv", index_col=["id", "aug_period"], ) return out @@ -79,7 +76,7 @@ def test_augment_data_for_endogenous_factors(simplest_augmented): def test_handle_controls_with_missings(): - controls = ["c1"] + controls = ("c1",) uinfo_ind_tups = [(0, "m1"), (0, "m2")] update_info = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_ind_tups)) data = [[1, 1, 1], [np.nan, 1, 1], [np.nan, 1, np.nan], [np.nan, np.nan, np.nan]] @@ -92,7 +89,7 @@ def test_handle_controls_with_missings(): with pytest.warns(UserWarning): # noqa: PT030 calculated = _handle_controls_with_missings(df, controls, update_info) - assert calculated.loc[(2, 0)].isna().all() + assert calculated.loc[(2, 0)].isna().all() # ty: ignore[unresolved-attribute] def test_generate_measurements_array(): diff --git a/tests/test_process_model.py b/tests/test_process_model.py index be67a17b..4cd8c8ca 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -1,11 +1,11 @@ import inspect -from pathlib import Path import pandas as pd import pytest import yaml from pandas.testing import assert_frame_equal +from skillmodels.config import TEST_DATA_DIR from skillmodels.process_model import get_has_endogenous_factors, process_model from skillmodels.types import TransitionInfo @@ -13,13 +13,10 @@ # Integration test with model2 from the replication files of CHS2010 # ====================================================================================== -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() - @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) return model_dict @@ -76,9 +73,8 @@ def test_transition_info(model2): def test_update_info(model2): res = process_model(model2).update_info - test_dir = Path(__file__).parent.resolve() expected = pd.read_csv( - test_dir / "model2_correct_update_info.csv", + TEST_DATA_DIR / "model2_correct_update_info.csv", index_col=["aug_period", "variable"], ) assert_frame_equal(res, expected) @@ -128,7 +124,7 @@ def test_normalizations(model2): def test_anchoring_and_endogenous_factors_work_together(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True @@ -151,7 +147,7 @@ def test_anchoring_and_endogenous_factors_work_together(): def test_stagemap_with_endogenous_factors_wrong_labels(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True @@ -162,7 +158,7 @@ def test_stagemap_with_endogenous_factors_wrong_labels(): def test_stagemap_with_endogenous_factors(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True @@ -176,7 +172,7 @@ def test_stagemap_with_endogenous_factors(): @pytest.fixture def model2_inv(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True @@ -242,9 +238,8 @@ def test_with_endog_transition_info(model2_inv): def test_with_endog_update_info(model2_inv): res = process_model(model2_inv).update_info - test_dir = Path(__file__).parent.resolve() expected = pd.read_csv( - test_dir / "model2_with_endog_correct_update_info.csv", + TEST_DATA_DIR / "model2_with_endog_correct_update_info.csv", index_col=["aug_period", "variable"], ) assert_frame_equal(res, expected) diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index f04b658d..533f5317 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -8,29 +8,29 @@ import yaml from numpy.testing import assert_array_almost_equal as aaae +from skillmodels.config import TEST_DATA_DIR from skillmodels.simulate_data import measurements_from_states, simulate_dataset -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) return model_dict @pytest.fixture def model2_data(): - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) return data def test_simulate_dataset(model2, model2_data): model_dict = model2 - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) calculated = simulate_dataset( diff --git a/tests/test_transition_functions.py b/tests/test_transition_functions.py index 1edc83c7..2f6eb877 100644 --- a/tests/test_transition_functions.py +++ b/tests/test_transition_functions.py @@ -1,6 +1,5 @@ import jax import jax.numpy as jnp -import numpy as np from numpy.testing import assert_array_almost_equal as aaae from skillmodels.transition_functions import ( @@ -17,14 +16,14 @@ def test_linear(): - states = np.arange(3) - params = np.array([0.1, 0.2, 0.3, 0.4]) + states = jnp.arange(3) + params = jnp.array([0.1, 0.2, 0.3, 0.4]) expected = 1.2 aaae(linear(states, params), expected) def test_translog(): - all_states = np.array( + all_states = jnp.array( [ [2, 0, 0], [0, 3, 0], @@ -38,7 +37,7 @@ def test_translog(): ], ) - params = np.array( + params = jnp.array( [ # linear terms 0.2, @@ -60,12 +59,12 @@ def test_translog(): expected_translog = [0.76, 0.7, 1.32, 0.04, 0.77, 0.1, -0.07, 0.573, 76.72] for states, expected in zip(all_states, expected_translog, strict=False): - calculated = translog(states, params) + calculated = translog(jnp.asarray(states), params) aaae(calculated, expected) def test_log_ces(): - states = np.array([3, 7.5]) + states = jnp.array([3, 7.5]) params = jnp.array([0.4, 0.6, 2]) expected = 7.244628323025 calculated = log_ces(states, params) @@ -82,11 +81,11 @@ def test_where_all_but_one_gammas_are_zero(): def test_constant(): - assert constant("bla", "blubb") == "bla" + assert constant("bla", "blubb") == "bla" # ty: ignore[invalid-argument-type] def test_robust_translog(): - all_states = np.array( + all_states = jnp.array( [ [2, 0, 0], [0, 3, 0], @@ -100,7 +99,7 @@ def test_robust_translog(): ], ) - params = np.array( + params = jnp.array( [ # linear terms 0.2, @@ -122,12 +121,12 @@ def test_robust_translog(): expected_translog = [0.76, 0.7, 1.32, 0.04, 0.77, 0.1, -0.07, 0.573, 76.72] for states, expected in zip(all_states, expected_translog, strict=False): - calculated = robust_translog(states, params) + calculated = robust_translog(jnp.asarray(states), params) aaae(calculated, expected) def test_log_ces_general(): - states = np.array([3, 7.5]) + states = jnp.array([3, 7.5]) params = jnp.array([0.4, 0.6, 2, 2, 0.5]) expected = 7.244628323025 calculated = log_ces_general(states, params) @@ -144,7 +143,7 @@ def test_log_ces_general_where_all_but_one_gammas_are_zero(): def test_param_names_log_ces_general(): - factors = ["a", "b"] + factors = ("a", "b") expected = ["a", "b", "sigma_a", "sigma_b", "tfp"] calculated = params_log_ces_general(factors) assert calculated == expected diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 8041fc98..858c9513 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -5,14 +5,13 @@ """ -from pathlib import Path - import numpy as np import pandas as pd import pytest import yaml from pandas.testing import assert_frame_equal, assert_index_equal +from skillmodels.config import TEST_DATA_DIR from skillmodels.process_model import process_model from skillmodels.utilities import ( _get_params_index_from_model_dict, @@ -29,13 +28,10 @@ update_parameter_values, ) -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() - @pytest.fixture def model2(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) return model_dict @@ -43,11 +39,11 @@ def model2(): @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) def test_extract_factors_single(model2, factors): reduced = extract_factors(factors, model2) - assert list(reduced["factors"]) == ["fac2"] + assert list(reduced["factors"]) == ["fac2"] # ty: ignore[invalid-argument-type] assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] assert "anchoring" not in reduced assert model2["anchoring"]["outcomes"] == {"fac1": "Q1"} - process_model(reduced) + process_model(reduced) # ty: ignore[invalid-argument-type] def test_update_parameter_values(): @@ -69,18 +65,18 @@ def test_update_parameter_values(): @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) def test_remove_factors(model2, factors): reduced = remove_factors(factors, model2) - assert list(reduced["factors"]) == ["fac1", "fac3"] + assert list(reduced["factors"]) == ["fac1", "fac3"] # ty: ignore[invalid-argument-type] assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] assert "anchoring" in reduced - process_model(reduced) + process_model(reduced) # ty: ignore[invalid-argument-type] @pytest.mark.parametrize("measurements", ["y5", ["y5"]]) def test_remove_measurements(model2, measurements): reduced = remove_measurements(measurements, model2) - assert reduced["factors"]["fac2"]["measurements"] == [["y4", "y6"]] * 8 + assert reduced["factors"]["fac2"]["measurements"] == [["y4", "y6"]] * 8 # ty: ignore[invalid-argument-type] assert "y5" in model2["factors"]["fac2"]["measurements"][0] - process_model(reduced) + process_model(reduced) # ty: ignore[invalid-argument-type] @pytest.mark.parametrize("controls", ["x1", ["x1"]]) @@ -88,24 +84,24 @@ def test_remove_controls(model2, controls): reduced = remove_controls(controls, model2) assert "controls" not in reduced assert "controls" in model2 - process_model(reduced) + process_model(reduced) # ty: ignore[invalid-argument-type] def test_reduce_n_periods(model2): reduced = reduce_n_periods(model2, 1) - assert reduced["factors"]["fac1"]["measurements"] == [["y1", "y2", "y3"]] - assert reduced["factors"]["fac2"]["normalizations"]["loadings"] == [{"y4": 1}] - process_model(reduced) + assert reduced["factors"]["fac1"]["measurements"] == [["y1", "y2", "y3"]] # ty: ignore[invalid-argument-type] + assert reduced["factors"]["fac2"]["normalizations"]["loadings"] == [{"y4": 1}] # ty: ignore[invalid-argument-type] + process_model(reduced) # ty: ignore[invalid-argument-type] def test_switch_linear_to_translog(model2): switched = switch_linear_to_translog(model2) - assert switched["factors"]["fac2"]["transition_function"] == "translog" + assert switched["factors"]["fac2"]["transition_function"] == "translog" # ty: ignore[invalid-argument-type] def test_switch_linear_and_translog_back_and_forth(model2): with_translog = switch_linear_to_translog(model2) - with_linear = switch_translog_to_linear(with_translog) + with_linear = switch_translog_to_linear(with_translog) # ty: ignore[invalid-argument-type] assert model2 == with_linear @@ -128,10 +124,10 @@ def test_remove_from_dict(to_remove): def test_reduce_params_via_extract_factors(model2): model_dict = reduce_n_periods(model2, 2) - full_index = _get_params_index_from_model_dict(model_dict) + full_index = _get_params_index_from_model_dict(model_dict) # ty: ignore[invalid-argument-type] params = pd.DataFrame(columns=["value"], index=full_index) - _, reduced_params = extract_factors("fac3", model_dict, params) + _, reduced_params = extract_factors("fac3", model_dict, params) # ty: ignore[invalid-argument-type] expected_index = pd.MultiIndex.from_tuples( [ @@ -155,17 +151,17 @@ def test_reduce_params_via_extract_factors(model2): names=["category", "aug_period", "name1", "name2"], ) - assert_index_equal(reduced_params.index, expected_index) + assert_index_equal(reduced_params.index, expected_index) # ty: ignore[invalid-argument-type] def test_extend_params_via_switch_to_translog(model2): model_dict = reduce_n_periods(model2, 2) - normal_index = _get_params_index_from_model_dict(model_dict) + normal_index = _get_params_index_from_model_dict(model_dict) # ty: ignore[invalid-argument-type] params = pd.DataFrame(columns=["value"], index=normal_index) - _, extended_params = switch_linear_to_translog(model_dict, params) + _, extended_params = switch_linear_to_translog(model_dict, params) # ty: ignore[invalid-argument-type] - added_index = extended_params.index.difference(normal_index) + added_index = extended_params.index.difference(normal_index) # ty: ignore[possibly-missing-attribute] expected_added_index = pd.MultiIndex.from_tuples( [ @@ -181,7 +177,7 @@ def test_extend_params_via_switch_to_translog(model2): assert_index_equal(added_index, expected_added_index) - assert extended_params.loc[added_index, "value"].unique()[0] == 0.05 + assert extended_params.loc[added_index, "value"].unique()[0] == 0.05 # ty: ignore[possibly-missing-attribute] def test_shorten_if_necessary(): diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index 1895301d..f208295f 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -3,6 +3,7 @@ import pandas as pd import yaml +from skillmodels.config import TEST_DATA_DIR from skillmodels.maximization_inputs import get_maximization_inputs from skillmodels.simulate_data import simulate_dataset from skillmodels.visualize_factor_distributions import ( @@ -12,18 +13,17 @@ univariate_densities, ) -# importing the TEST_DIR from config does not work for test run in conda build -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" def test_visualize_factor_distributions_runs_with_filtered_states(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data.set_index(["caseid", "period"], inplace=True) max_inputs = get_maximization_inputs(model_dict, data) @@ -54,13 +54,13 @@ def test_visualize_factor_distributions_runs_with_filtered_states(): def test_visualize_factor_distributions_runs_with_simulated_states(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data.set_index(["caseid", "period"], inplace=True) - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) max_inputs = get_maximization_inputs(model_dict, data) diff --git a/tests/test_visualize_transition_equations.py b/tests/test_visualize_transition_equations.py index cb811631..a3bd90eb 100644 --- a/tests/test_visualize_transition_equations.py +++ b/tests/test_visualize_transition_equations.py @@ -3,25 +3,26 @@ import pandas as pd import yaml +from skillmodels.config import TEST_DATA_DIR from skillmodels.maximization_inputs import get_maximization_inputs from skillmodels.visualize_transition_equations import ( combine_transition_plots, get_transition_plots, ) -TEST_DIR = Path(__file__).parent.resolve() +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" def test_visualize_transition_equations_runs(): - with open(TEST_DIR / "model2.yaml") as y: + with open(TEST_DATA_DIR / "model2.yaml") as y: model_dict = yaml.load(y, Loader=yaml.FullLoader) model_dict["observed_factors"] = ["ob1"] - params = pd.read_csv(TEST_DIR / "regression_vault" / "one_stage_anchoring.csv") + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) - data = pd.read_stata(TEST_DIR / "model2_simulated_data.dta") + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data.set_index(["caseid", "period"], inplace=True) data["ob1"] = 0 From dce66ad7c251eab5fad70ddc48871d914cb80799 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Fri, 9 Jan 2026 12:39:30 +0100 Subject: [PATCH 09/27] Fix more linting issues. --- CLAUDE.md | 4 +- pixi.lock | 4 +- pyproject.toml | 106 ++------ src/skillmodels/check_model.py | 5 +- src/skillmodels/clipping.py | 11 +- src/skillmodels/constraints.py | 50 ++-- src/skillmodels/correlation_heatmap.py | 243 +++++++++--------- src/skillmodels/decorators.py | 7 +- src/skillmodels/filtered_states.py | 6 +- src/skillmodels/kalman_filters.py | 71 +++-- src/skillmodels/kalman_filters_debug.py | 26 +- src/skillmodels/likelihood_function.py | 64 ++++- src/skillmodels/likelihood_function_debug.py | 62 ++--- src/skillmodels/maximization_inputs.py | 31 +-- src/skillmodels/params_index.py | 15 +- src/skillmodels/parse_params.py | 25 +- src/skillmodels/process_data.py | 23 +- src/skillmodels/process_debug_data.py | 35 +-- src/skillmodels/process_model.py | 20 +- src/skillmodels/simulate_data.py | 80 +++--- src/skillmodels/transition_functions.py | 2 - src/skillmodels/utilities.py | 48 ++-- .../visualize_factor_distributions.py | 150 ++++++----- .../visualize_transition_equations.py | 78 +++--- 24 files changed, 596 insertions(+), 570 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 25ca239e..ce3840ba 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -86,10 +86,12 @@ The main package exports three functions: ## Code Style -- Uses Ruff for linting (target: Python 3.13, line length: 88) +- Require Python 3.14 +- Uses Ruff for linting (target: Python 3.14, line length: 88) - Google-style docstrings - Pre-commit hooks enforce formatting and linting - Type checking via `ty` with strict rules +- Do not use `from __future__ import annotations` ## Testing diff --git a/pixi.lock b/pixi.lock index b3b1ff96..20d82777 100644 --- a/pixi.lock +++ b/pixi.lock @@ -8471,8 +8471,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev244+ge2d687a5a.d20260109 - sha256: 06b1b0d92e84a2a2f4c422c119aaa56cc04f1aadaf2ee53c393c67d9378aaf37 + version: 0.0.24.dev245+g921c61276.d20260109 + sha256: 5918dae4da35a25e3716f0ea2f2cce8f1e9841ad01284ccc92962243d4a6d19e requires_dist: - dags - frozendict diff --git a/pyproject.toml b/pyproject.toml index 59171e42..38e4fcd9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -150,7 +150,7 @@ types-PyYAML = "*" types-pytz = "*" [tool.pixi.feature.ty.tasks] -ty = "ty check" +ty = "ty check src tests docs" # Environments # -------------------------------------------------------------------------------------- @@ -174,88 +174,34 @@ line-length = 88 [tool.ruff.lint] select = ["ALL"] extend-ignore = [ - # Dynamically typed expressions (typing.Any) are disallowed - too strict - "ANN401", - - # No explicit `stacklevel` keyword argument found - "B028", - - # In conflict with formatter - "COM812", - - # Missing docstring in public module - "D100", - - # missing docstring in public function - "D103", - - # missing docstring in public package - "D104", - - # exception must not use a string literal - "EM101", - - # exception must not use an f-string literal - "EM102", - - # Boolean-typed positional arguments. - "FBT001", - - # Boolean default positional argument in function definition - "FBT002", - - # line contains a todo - "FIX002", - - # In conflict with formatter - "ISC001", - - # Leave Numpy's legacy RNG - "NPY002", - - # array.at is perfectly valid Jax, but linter thinks it's Pandas... - "PD008", - - # pd.merge is fine - "PD015", - - # Many suggestions to use list comprehension are not helpful - "PERF401", - - # Magic values are fine - "PLR2004", - - # Too many arguments to function call - "PLR0913", - - # Assignment before return statement is fine. - "RET504", - - # use of `assert` detected - "S101", - - # `pickle` module is unsafe - "S301", - - # Private member accessed: `_stochastic_info` - "SLF001", - - # long messages outside the exception class - "TRY003", + "ANN401", # Dynamically typed expressions (typing.Any) are disallowed - too strict + "B028", # No explicit `stacklevel` keyword argument found + "COM812", # In conflict with formatter + "D100", # Missing docstring in public module + "D103", # missing docstring in public function + "D104", # missing docstring in public package + "EM101", # exception must not use a string literal + "EM102", # exception must not use an f-string literal + "FBT001", # Boolean-typed positional arguments. + "FBT002", # Boolean default positional argument in function definition + "FIX002", # line contains a todo + "ISC001", # In conflict with formatter + "NPY002", # Leave Numpy's legacy RNG + "PD008", # array.at is perfectly valid Jax, but linter thinks it's Pandas... + "PD015", # pd.merge is fine + "PERF401", # Many suggestions to use list comprehension are not helpful + "PLR2004", # Magic values are fine + "PLR0913", # Too many arguments to function call + "RET504", # Assignment before return statement is fine. + "S101", # use of `assert` detected + "S301", # `pickle` module is unsafe + "SLF001", # Private member accessed: `_stochastic_info` + "TRY003", # long messages outside the exception class ] [tool.ruff.lint.per-file-ignores] -"src/skillmodels/constraints.py" = ["D417"] -"src/skillmodels/decorators.py" = ["D417"] -"src/skillmodels/kalman_filters.py" = ["D417"] -"src/skillmodels/likelihood_function.py" = ["D417"] -"src/skillmodels/likelihood_function_debug.py" = ["D417"] -"src/skillmodels/params_index.py" = ["D417"] -"src/skillmodels/parse_params.py" = ["D417"] -"src/skillmodels/process_data.py" = ["D417"] -"src/skillmodels/simulate_data.py" = ["D417"] -"src/skillmodels/visualize_*.py" = ["BLE001", "D417"] -"src/skillmodels/*_heatmap*.py" = ["D417"] +"src/skillmodels/types.py" = ["TC"] # Dataclasses need types at runtime +"src/skillmodels/visualize_*.py" = ["BLE001"] "**/*.ipynb" = ["B018", "T201", "E402", "PLR2004", "INP001", "PTH100"] "docs/**/*" = ["A001", "ERA001", "INP001", "PTH100", "PTH123", "S506"] "tests/*" = [ diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index f858bcf4..a591001f 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -1,6 +1,9 @@ +from typing import TYPE_CHECKING + import numpy as np -from skillmodels.types import Anchoring, Dimensions, Labels +if TYPE_CHECKING: + from skillmodels.types import Anchoring, Dimensions, Labels def check_model( diff --git a/src/skillmodels/clipping.py b/src/skillmodels/clipping.py index 07b59661..04fea4d7 100644 --- a/src/skillmodels/clipping.py +++ b/src/skillmodels/clipping.py @@ -26,14 +26,13 @@ def soft_clipping( ``scipy.special.logsumexp``. ``scipy.special.softmax`` is the gradient of ``scipy.special.logsumexp``. - Args: - arr (jax.numpy.array): Array that is clipped elementwise. - lower (float): The value at which the array is clipped from below. - upper (float): The value at which the array is clipped from above. - lower_hardness (float): Scaling factor that is applied inside the soft maximum. + arr: Array that is clipped elementwise. + lower: The value at which the array is clipped from below. + upper: The value at which the array is clipped from above. + lower_hardness: Scaling factor that is applied inside the soft maximum. High values imply a closer approximation of the real maximum. - upper_hardness (float): Scaling factor that is applied inside the soft maximum. + upper_hardness: Scaling factor that is applied inside the soft maximum. High values imply a closer approximation of the real maximum. """ diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index e9d3765a..d95f5c86 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -7,11 +7,12 @@ import numpy as np import optimagic as om -import pandas as pd import skillmodels.transition_functions as t_f_module if TYPE_CHECKING: + import pandas as pd + from skillmodels.types import Anchoring, Dimensions, EndogenousFactorsInfo, Labels @@ -28,16 +29,16 @@ def get_constraints_dicts( The result can easily be converted to optimagic-style constraints. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring (dict): Information about anchoring. See :ref:`anchoring` - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + anchoring_info: Information about anchoring. See :ref:`anchoring` + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - normalizations (dict): Nested dictionary with information on normalized factor + normalizations: Nested dictionary with information on normalized factor loadings and intercepts for each factor. See :ref:`normalizations`. + endogenous_factors_info: Information about endogenous factors in the model. Returns: A list of constraints dictionaries with entries: @@ -140,8 +141,9 @@ def _get_normalization_constraints( """List of constraints to enforce normalizations. Args: - normalizations (dict): Nested dictionary with information on normalized factor - loadings and intercepts for each factor. See :ref:`normalizations`. + normalizations: Nested dictionary with information on normalized factor + loadings and intercepts for each factor. See :ref:`normalizations`. + factors: Tuple of factor names to process. Returns: constraints_dicts @@ -204,8 +206,8 @@ def _get_stage_constraints( """Equality constraints for transition and shock parameters within stages. Args: - stagemap (list): map aug_periods to aug_stages - stages (list): aug_stages + stagemap: map aug_periods to aug_stages + stages: aug_stages Returns: constraints_dicts @@ -245,7 +247,7 @@ def _get_constant_factors_constraints(labels: Labels) -> list[dict]: """Fix shock variances of constant factors to `bounds_distance`. Args: - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` Returns: @@ -277,8 +279,8 @@ def _get_initial_states_constraints( Otherwise the model would only be identified up to the order of the start factors. Args: - n_mixtures (int): number of elements in the mixture of normal of the factors. - factors (list): the latent factors of the model + n_mixtures: number of elements in the mixture of normal of the factors. + factors: the latent factors of the model Returns: constraints_dicts @@ -306,7 +308,7 @@ def _get_transition_constraints(labels: Labels) -> list[dict]: """Collect possible constraints on transition parameters. Args: - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` Returns: @@ -338,11 +340,11 @@ def _get_anchoring_constraints( """Constraints on anchoring parameters. Args: - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - controls (list): List of control variables - anchoring_info (dict): Information about anchoring. See :ref:`anchoring` - periods (list): Period of the model + controls: List of control variables + anchoring_info: Information about anchoring. See :ref:`anchoring` + periods: Period of the model Returns: constraints_dicts @@ -410,8 +412,10 @@ def _get_constraints_for_augmented_periods( Both depend on the transition function. Args: - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` + endogenous_factors_info: Information about endogenous factors and their + relationship to augmented periods. Returns: constraints_dicts @@ -533,7 +537,7 @@ def constraints_dicts_to_om( """Convert constraints provided in dictionary form to optimagic constraints. Args: - constraints_dicts (list): see :ref:`get_constraints_dicts`. + constraints_dicts: see :ref:`get_constraints_dicts`. Returns: List of optimagic constraints. @@ -572,8 +576,8 @@ def enforce_fixed_constraints( This means that any robust bounds will be overridden for fixed parameters. Args: - params_template (pd.DataFrame): see :ref:`params_df`. - constraints_dicts (list): see :ref:`get_constraints_dicts`. + params_template: see :ref:`params_df`. + constraints_dicts: see :ref:`get_constraints_dicts`. Returns: pd.DataFrame: modified copy of params_template diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 2d6f0bbf..cf2d782f 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -1,13 +1,16 @@ -from typing import Any +from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd -from numpy.typing import NDArray from plotly import graph_objects as go from skillmodels.process_data import pre_process_data from skillmodels.process_model import process_model -from skillmodels.types import ProcessedModel + +if TYPE_CHECKING: + from numpy.typing import NDArray + + from skillmodels.types import ProcessedModel def plot_correlation_heatmap( @@ -34,31 +37,33 @@ def plot_correlation_heatmap( """Plot correlation heatmaps for factor measurements. Args: - corr (DataFrame): Data frame of measurement or factor score correlations. - heatmap_kwargs (dct): Dictionary of key word arguments to pass to go.Heatmap (). + corr: Data frame of measurement or factor score correlations. + heatmap_kwargs: Dictionary of key word arguments to pass to go.Heatmap (). If None, the default kwargs defined in the function will be used. - layout_kwargs (dct): Dictionary of key word arguments used to update layout of + layout_kwargs: Dictionary of key word arguments used to update layout of go.Figure object. If None, the default kwargs defined in the function will be used. Through layout_kwargs, you can edit figure properties such as - template - title - figsize - rounding (int): Number of digits after the decimal point to round the + rounding: Number of digits after the decimal point to round the correlation values to. Default 2. - zmax (float ot NoneType): Upper bound to set on correlation color map. If None, + zmax: Upper bound to set on correlation color map. If None, is set to maximum absolute correlation value. - zmin (float or NoneType): Lower bound to set on correlation color map. If None, + zmin: Lower bound to set on correlation color map. If None, is set to -zmax. - zmid (float or NoneType): Midpoint to set on correlation color map. If None, + zmid: Midpoint to set on correlation color map. If None, is set to 0. - colorscale (str): Name of the color palette to use in the heatmap. + colorscale: Name of the color palette to use in the heatmap. Default 'RdBu_r'. - show_color_bar (bool): A boolean variable for displaying heatmap colorbar. + show_color_bar: A boolean variable for displaying heatmap colorbar. Default True. - show_diagonal (bool): A boolean for displaying the correlations on the diagonal. + show_diagonal: A boolean for displaying the correlations on the diagonal. Default False. - show_upper_triangle (bool): A boolean for displaying upper triangular part + show_upper_triangle: A boolean for displaying upper triangular part of the correlation heatmap. Default False. + trim_heatmap: If True, trim empty rows/columns from the heatmap. + Default False. The following arguments are processed into dictionaries or special plotly objects and passed to layout_kwargs. Defining them as additional arguments @@ -71,21 +76,21 @@ def plot_correlation_heatmap( defined in layout_kwargs will overwrite values passed via the individual arguments. - annotate (bool): If True, annotate the heatmap figure with correlation values. + annotate: If True, annotate the heatmap figure with correlation values. Default False. - annotation_font_size (int): Font size of the annotation text. Default 13. - annotation_font_color (str): Collor of the annotation text. Default 'black'. - annotation_text_angle (float): The angle at which to rotate annotation text. + annotation_fontsize: Font size of the annotation text. Default 13. + annotation_text_color: Color of the annotation text. Default 'black'. + annotation_text_angle: The angle at which to rotate annotation text. Default 0. - axes_tick_fontsize (list, tuple, other iterable or dict): Fontsize of axes + axes_tick_fontsize: Fontsize of axes ticks. Default (12,12) - axes_tick_label_angle (list, tuple, other iterable or dict): Rotation angles of + axes_tick_label_angle: Rotation angles of axes tick labels. Default (90,0). - axes_tick_label_color (list, tuple, other iterable or dict): Colors of the axes + axes_tick_label_color: Colors of the axes tick labels. Default ('black', 'black'). Returns: - fig (plotly graph object): The figure with correlaiton heatmap. + fig: The figure with correlaiton heatmap. """ corr = _process_corr_data_for_plotting( @@ -138,18 +143,18 @@ def get_measurements_corr( across period specific measurements. Args: - data (pd.DataFrame): DataFrame with observed measurements. - model_dict (dct): Dictionary of model attributes to be passed to process_model + data: DataFrame with observed measurements. + model_dict: Dictionary of model attributes to be passed to process_model and extract measurements for each period. - factors (list, str or NoneType): List of factors, to retrieve measurements for. + factors: List of factors, to retrieve measurements for. If None, then calculate correlations of measurements of all factors. - periods (int, float, list or NoneType): If int, the period within which to + periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Note: Periods refer to originl periods, not the augmented periods. Returns: - corr (DataFrame): DataFrame with measurement correlations. + corr: DataFrame with measurement correlations. """ data = data.copy(deep=True) @@ -185,17 +190,17 @@ def get_quasi_scores_corr( The calculated scores coincide with factor scores for linear models. Args: - data (pd.DataFrame): DataFrame with observed measurements. - model_dict (dct): Dictionary of model attributes to be passed to process_model + data: DataFrame with observed measurements. + model_dict: Dictionary of model attributes to be passed to process_model and extract measurements for each period. - factors (list, str or NoneType): List of factors, to retrieve measurements for. + factors: List of factors, to retrieve measurements for. If None, then calculate correlations of measurements of all factors. - periods (int,float, list or NoneType): If int, the period within which to + periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Returns: - corr (DataFrame): DataFrame with score correlations. + corr: DataFrame with score correlations. """ data = data.copy(deep=True) @@ -230,18 +235,18 @@ def get_scores_corr( scores. Args: - data (pd.DataFrame): DataFrame with observed measurements. - params (pd.DataFrame): DataFrame with estimated model parameters - model_dict (dct): Dictionary of model attributes to be passed to process_model + data: DataFrame with observed measurements. + params: DataFrame with estimated model parameters + model_dict: Dictionary of model attributes to be passed to process_model and extract measurements for each period. - factors (list, str or NoneType): List of factors, to retrieve measurements for. + factors: List of factors, to retrieve measurements for. If None, then calculate correlations of measurements of all factors. - periods (int,float, list or NoneType): If int, the period within which to + periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Returns: - corr (DataFrame): DataFrame with score correlations. + corr: DataFrame with score correlations. """ data = data.copy(deep=True) @@ -329,18 +334,18 @@ def _get_measurement_data( the data columns into a data frame. Args: - data (pd.DataFrame): Data with observable variables. - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each model period. - periods (list): The list of periods that correlations are + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the measurements of which + latent_factors: List of latent factors the measurements of which correlations are calculated for. - observed_factors (list): List of observed factors the measurements of which + observed_factors: List of observed factors the measurements of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if len(periods) == 1: @@ -373,17 +378,17 @@ def _get_measurement_data_for_single_period( """Extract measurements of factors for the given period. Args: - data (pd.DataFrame): Data with observable variables. - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each model period. - periods (int or float): The period to extract measurements for. - latent_factors (list): List of latent factors the measurements of which + period: The period to extract measurements for. + latent_factors: List of latent factors the measurements of which correlations are calculated for. - observed_factors (list): List of observed factors the measurements of which + observed_factors: List of observed factors the measurements of which correlations are calculated for. Returns: - df (pd.DataFrame): DataFrame with measurements of factors for period 'period'. + df: DataFrame with measurements of factors for period 'period'. """ period_info = update_info_by_period.loc[period].reset_index() @@ -409,17 +414,17 @@ def _get_measurement_data_for_multiple_periods( """Extract measurements for factors for given periods. Args: - data (pd.DataFrame): Data with observable variables. - update_info_by_period (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each user-provided period. - periods (list): The periods to extract measurements for. - latent_factors (list): List of latent factors the measurements of which + periods: The periods to extract measurements for. + latent_factors: List of latent factors the measurements of which correlations are calculated for. - observed_factors (list): List of observed factors the measurements of which + observed_factors: List of observed factors the measurements of which correlations are calculated for. Returns: - df (pd.DataFrame): DataFrame with measurements of factors in each period as + df: DataFrame with measurements of factors in each period as columns. """ @@ -455,18 +460,18 @@ def _get_quasi_factor_scores_data( models. Args: - data (pd.DataFrame): Data with observable variables. - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each model period. - periods (list): The list of periods that correlations are + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if len(periods) == 1: @@ -500,18 +505,17 @@ def _get_quasi_factor_scores_data_for_single_period( """Get frame with summary scores on factor measurements in a given period. Args: - data (pd.DataFrame): Data with observable variables. - update_info_by_period (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each user-provided period. - periods (list): The list of periods that correlations are - calculated for. - latent_factors (list): List of latent factors the scores of which + period: The period that correlations are calculated for. + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ period_info = update_info_by_period.loc[period].reset_index() @@ -544,18 +548,18 @@ def _get_quasi_factor_scores_data_for_multiple_periods( """Get frame with summary scores of factor measurements in a given period. Args: - data (pd.DataFrame): Data with observable variables. - update_info_by_period (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + update_info_by_period: DataFrame with information on measurements for each factor in each user-provided period. - periods (list): The list of periods that correlations are + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ to_concat = [] @@ -590,19 +594,19 @@ def _get_factor_scores_data( a summary statistics. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated measurement relevant + data: Data with observable variables. + params: Data frame with estimated measurement relevant model parameters. - model (dict): Processed model dict. - periods (list): The list of periods that correlations are + model: Processed model dict. + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if len(periods) == 1: @@ -643,18 +647,18 @@ def _get_factor_scores_data_for_single_period( augmented periods. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated measurement relevant + data: Data with observable variables. + params: Data frame with estimated measurement relevant model parameters. - model (dict): Processed model dict. - period (int): The period that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + model: Processed model dict. + period: The period that correlations are calculated for. + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ aug_periods = model.endogenous_factors_info.aug_periods_from_period(period) @@ -695,19 +699,19 @@ def _get_factor_scores_data_for_single_model_period( In this function, all calculations are at the augmented period level. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated measurement relevant - update_info (pd.DataFrame): DataFrame with information on measurements + data: Data with observable variables. + params: Data frame with estimated measurement relevant + update_info: DataFrame with information on measurements for each factor in each model period. - aug_period (int): The (augmented) period that correlations are calculated for. - period (int): The (raw) period that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + aug_period: The (augmented) period that correlations are calculated for. + period: The (raw) period that correlations are calculated for. + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ if aug_period not in update_info.index: return pd.DataFrame() @@ -750,18 +754,18 @@ def _get_factor_scores_data_for_multiple_periods( """Get frame with factor scores in a given period. Args: - data (pd.DataFrame): Data with observable variables. - params (pd.DataFrame): Data frame with estimated model parameters. - model (dict): Processed model dict. - periods (list): The list of periods that correlations are + data: Data with observable variables. + params: Data frame with estimated model parameters. + model: Processed model dict. + periods: The list of periods that correlations are calculated for. - latent_factors (list): List of latent factors the scores of which + latent_factors: List of latent factors the scores of which correlations are calculated for. - observed_factors (list): List of observed factors the scores of which + observed_factors: List of observed factors the scores of which correlations are calculated for. Returns: - df (pd.DataFrame): Processed DataFrame to calculate correlations over. + df: Processed DataFrame to calculate correlations over. """ to_concat = [] @@ -834,20 +838,20 @@ def _get_layout_kwargs( """Get kwargs to update figure layout. Args: - corr (DataFrame): The processed data frame with correlation coefficients. - layout_kwargs (dct): Dictionary of keyword arguments used to update layout of + corr: The processed data frame with correlation coefficients. + layout_kwargs: Dictionary of keyword arguments used to update layout of go.Figure object. - annotate (bool): Add annotations to the figure if True. - annotation_font_size (int): Fontsize of the annotation text. - annotation_font_color (str): Color of the annotation text. - annotation_text_angle (float): The angle at which to rotate annotation text. + annotate: Add annotations to the figure if True. + annotation_fontsize: Fontsize of the annotation text. + annotation_text_color: Color of the annotation text. + annotation_text_angle: The angle at which to rotate annotation text. axes_tick_fontsize(tuple,list or dict): Fontsizes of axes tick labels. axes_tick_label_angle(tuple,list or dict): The angle at which to rotate axes tick labels. - axes_tick_label_color(tuple,list or dict): Collor of axes labels. + axes_tick_label_color(tuple,list or dict): Color of axes labels. Returns: - default_layout_kwargs (dict): Dictionary to update figure layout. + default_layout_kwargs: Dictionary to update figure layout. """ default_layout_kwargs = { @@ -942,16 +946,17 @@ def _get_heatmap_kwargs( """Get kwargs to instantiate Heatmap object. Args: - heatmap_kwargs (dct): Dictionary of key word arguments to pass to go.Heatmap(). - colorscale (str): Name of the color palette to use in the heatmap. + corr: Data frame with correlation coefficients. + heatmap_kwargs: Dictionary of key word arguments to pass to go.Heatmap(). + colorscale: Name of the color palette to use in the heatmap. Default 'RdBu_r'. - show_color_bar (bool): A boolean variable for displayin heatmap colorbar. - zmax (float or None): Upper bound to set on correlation color map. - zmin (float or None): Lower bound to set on correlation color map. - zmid (float or None): Midpoint to set on correlation color map. + show_color_bar: A boolean variable for displaying heatmap colorbar. + zmax: Upper bound to set on correlation color map. + zmin: Lower bound to set on correlation color map. + zmid: Midpoint to set on correlation color map. Returns: - default_heatmap_kwargs (dict): Dictionary of kwargs to instantiate go.Heatmap. + default_heatmap_kwargs: Dictionary of kwargs to instantiate go.Heatmap. """ if zmax is None: diff --git a/src/skillmodels/decorators.py b/src/skillmodels/decorators.py index 3fc46f8f..6354e484 100644 --- a/src/skillmodels/decorators.py +++ b/src/skillmodels/decorators.py @@ -1,5 +1,5 @@ import functools -from collections.abc import Callable +from collections.abc import Callable # noqa: TC003 from typing import Any import jax.numpy as jnp @@ -17,9 +17,10 @@ def extract_params( Note: The resulting function is keyword only! Args: - key (str or None): If key is not None, we assume params is a dictionary of which + func: The function to be decorated, or None if using decorator with arguments. + key: If key is not None, we assume params is a dictionary of which only the params[key] should be passed into func. - names (list or None): If names is provided, we assume that params + names: If names is provided, we assume that params (or params[key]) should be converted to a dictionary with names as keys before passing them to func. diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index 6c80bd49..257fb680 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -1,8 +1,7 @@ -from typing import Any +from typing import TYPE_CHECKING, Any import jax.numpy as jnp import numpy as np -import pandas as pd from skillmodels.maximization_inputs import get_maximization_inputs from skillmodels.params_index import get_params_index @@ -10,6 +9,9 @@ from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model +if TYPE_CHECKING: + import pandas as pd + def get_filtered_states( model_dict: dict, diff --git a/src/skillmodels/kalman_filters.py b/src/skillmodels/kalman_filters.py index d1d57b36..725787aa 100644 --- a/src/skillmodels/kalman_filters.py +++ b/src/skillmodels/kalman_filters.py @@ -1,4 +1,4 @@ -from collections.abc import Callable +from collections.abc import Callable # noqa: TC003 import jax import jax.numpy as jnp @@ -12,11 +12,10 @@ else jax.vmap(jax.vmap(jnp.linalg.qr)) ) + # ====================================================================================== # Update Step # ====================================================================================== - - def kalman_update( states: Array, upper_chols: Array, @@ -30,26 +29,26 @@ def kalman_update( """Perform a Kalman update with likelihood evaluation. Args: - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - loadings (jax.numpy.array): 1d array of length n_states with factor loadings. - control_params (jax.numpy.array): 1d array of length n_controls. - meas_sd (float): Standard deviation of the measurement error. - measurements (jax.numpy.array): 1d array of length n_obs with measurements. + loadings: 1d array of length n_states with factor loadings. + control_params: 1d array of length n_controls. + meas_sd: Standard deviation of the measurement error. + measurements: 1d array of length n_obs with measurements. May contain NaNs if no measurement was observed. - controls (jax.numpy.array): Array of shape (n_obs, n_controls) with data on the + controls: Array of shape (n_obs, n_controls) with data on the control variables. - log_mixture_weights (jax.numpy.array): Array of shape (n_obs, n_mixtures) with + log_mixture_weights: Array of shape (n_obs, n_mixtures) with the natural logarithm of the weights of each element of the mixture of normals distribution. Returns: - states (jax.numpy.array): Same format as states. - new_states (jax.numpy.array): Same format as states. - new_upper_chols (jax.numpy.array): Same format as upper_chols + states: Same format as states. + new_states: Same format as states. + new_upper_chols: Same format as upper_chols new_log_mixture_weights: (jax.numpy.array): Same format as log_mixture_weights new_loglikes: (jax.numpy.array): 1d array of length n_obs @@ -136,8 +135,6 @@ def kalman_update( # ====================================================================================== # Predict Step # ====================================================================================== - - def calculate_sigma_scaling_factor_and_weights( n_states: int, kappa: float = 2, @@ -148,8 +145,8 @@ def calculate_sigma_scaling_factor_and_weights( weights which makes the unscented predict step more complicated. Args: - n_states (int): Number of states. - kappa (float): Spreading factor of the sigma points. + n_states: Number of states. + kappa: Spreading factor of the sigma points. Returns: float: Scaling factor @@ -178,25 +175,27 @@ def kalman_predict( """Make a unscented Kalman predict. Args: - transition_func (Callable): The transition function. - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + transition_func: The transition function. + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - sigma_scaling_factor (float): A scaling factor that controls the spread of the + sigma_scaling_factor: A scaling factor that controls the spread of the sigma points. Bigger means that sigma points are further apart. Depends on the sigma_point algorithm chosen. - sigma_weights (jax.numpy.array): 1d array of length n_sigma with non-negative + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. - trans_coeffs (tuple): Tuple of 1d jax.numpy.arrays with transition parameters. - anchoring_scaling_factors (jax.numpy.array): Array of shape (2, n_fac) with + trans_coeffs: Tuple of 1d jax.numpy.arrays with transition parameters. + shock_sds: 1d array of length n_fac with shock standard + deviations. + anchoring_scaling_factors: Array of shape (2, n_fac) with the scaling factors for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). - anchoring_constants (jax.numpy.array): Array of shape (2, n_states) with the + anchoring_constants: Array of shape (2, n_states) with the constants for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). - observed_factors (jax.numpy.array): Array of shape (n_obs, n_observed_factors) + observed_factors: Array of shape (n_obs, n_observed_factors) with data on the observed factors in period t. Returns: @@ -243,15 +242,15 @@ def _calculate_sigma_points( """Calculate the array of sigma_points for the unscented transform. Args: - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - scaling_factor (float): A scaling factor that controls the spread of the + scaling_factor: A scaling factor that controls the spread of the sigma points. Bigger means that sigma points are further apart. Depends on the sigma_point algorithm chosen. - observed_factors (jax.numpy.array): Array of shape (n_obs, n_observed_factors) + observed_factors: Array of shape (n_obs, n_observed_factors) with data on the observed factors in period t. Returns: @@ -294,13 +293,13 @@ def transform_sigma_points( """Anchor sigma points, transform them and unanchor the transformed sigma points. Args: - sigma_points (jax.numpy.array) of shape n_obs, n_mixtures, n_sigma, n_fac. - transition_func (Callable): The transition function. - trans_coeffs (tuple): Tuple of 1d jax.numpy.arrays with transition parameters. - anchoring_scaling_factors (jax.numpy.array): Array of shape (2, n_states) with + sigma_points: Array of shape n_obs, n_mixtures, n_sigma, n_fac. + transition_func: The transition function. + trans_coeffs: Tuple of 1d jax.numpy.arrays with transition parameters. + anchoring_scaling_factors: Array of shape (2, n_states) with the scaling factors for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). - anchoring_constants (jax.numpy.array): Array of shape (2, n_states) with the + anchoring_constants: Array of shape (2, n_states) with the constants for anchoring. The first row corresponds to the input period, the second to the output period (i.e. input period + 1). diff --git a/src/skillmodels/kalman_filters_debug.py b/src/skillmodels/kalman_filters_debug.py index c5cbab5d..c38a8ab6 100644 --- a/src/skillmodels/kalman_filters_debug.py +++ b/src/skillmodels/kalman_filters_debug.py @@ -10,8 +10,6 @@ # ====================================================================================== # Update Step # ====================================================================================== - - def kalman_update( states: Array, upper_chols: Array, @@ -25,29 +23,29 @@ def kalman_update( """Perform a Kalman update with likelihood evaluation, returning debug info on top. Args: - states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with + states: Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. - upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, + upper_chols: Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. - loadings (jax.numpy.array): 1d array of length n_states with factor loadings. - control_params (jax.numpy.array): 1d array of length n_controls. - meas_sd (float): Standard deviation of the measurement error. - measurements (jax.numpy.array): 1d array of length n_obs with measurements. + loadings: 1d array of length n_states with factor loadings. + control_params: 1d array of length n_controls. + meas_sd: Standard deviation of the measurement error. + measurements: 1d array of length n_obs with measurements. May contain NaNs if no measurement was observed. - controls (jax.numpy.array): Array of shape (n_obs, n_controls) with data on the + controls: Array of shape (n_obs, n_controls) with data on the control variables. - log_mixture_weights (jax.numpy.array): Array of shape (n_obs, n_mixtures) with + log_mixture_weights: Array of shape (n_obs, n_mixtures) with the natural logarithm of the weights of each element of the mixture of normals distribution. Returns: - states (jax.numpy.array): Same format as states. - new_states (jax.numpy.array): Same format as states. - new_upper_chols (jax.numpy.array): Same format as upper_chols + states: Same format as states. + new_states: Same format as states. + new_upper_chols: Same format as upper_chols new_log_mixture_weights: (jax.numpy.array): Same format as log_mixture_weights new_loglikes: (jax.numpy.array): 1d array of length n_obs - debug_info (dict): Empty or containing residuals and residual_sds + debug_info: Empty or containing residuals and residual_sds """ n_obs, n_mixtures, n_states = states.shape diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index ead81beb..4c7d7c8d 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -1,5 +1,5 @@ import functools -from collections.abc import Callable +from collections.abc import Callable # noqa: TC003 from typing import Any import jax @@ -12,7 +12,7 @@ kalman_update, ) from skillmodels.parse_params import parse_params -from skillmodels.types import Dimensions, EstimationOptions, Labels +from skillmodels.types import Dimensions, EstimationOptions, Labels # noqa: TC001 def log_likelihood( @@ -31,6 +31,38 @@ def log_likelihood( iteration_to_period: Array, observed_factors: Array, ) -> Array: + """Aggregated log likelihood of a skill formation model. + + Wrapper around log_likelihood_obs that sums contributions across observations. + + Args: + params: 1d array with model parameters. + parsing_info: Contains information how to parse parameter vector. + measurements: Array of shape (n_updates, n_obs) with data on + observed measurements. NaN if the measurement was not observed. + controls: Array of shape (n_periods, n_obs, n_controls) + with observed control variables for the measurement equations. + transition_func: The transition function. + sigma_scaling_factor: A scaling factor that controls the spread of the + sigma points. + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. + dimensions: Dimensional information like n_states, n_periods, n_controls, + n_mixtures. + labels: Labels for the model quantities like factors, periods, controls, + stagemap and stages. + estimation_options: Options for estimation including clipping bounds. + is_measurement_iteration: Boolean array indicating which iterations are + measurement updates. + is_predict_iteration: Boolean array indicating which iterations are predict + steps. + iteration_to_period: Array mapping iteration index to period. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with + data on the observed factors. + + Returns: + Scalar aggregated log likelihood. + + """ return log_likelihood_obs( params=params, parsing_info=parsing_info, @@ -77,25 +109,29 @@ def log_likelihood_obs( with Jax. Args: - params (jax.numpy.array): 1d array with model parameters. - parsing_info (dict): Contains information how to parse parameter vector. - update_info (pandas.DataFrame): Contains information about number of updates in - each period and purpose of each update. - measurements (jax.numpy.array): Array of shape (n_updates, n_obs) with data on + params: 1d array with model parameters. + parsing_info: Contains information how to parse parameter vector. + measurements: Array of shape (n_updates, n_obs) with data on observed measurements. NaN if the measurement was not observed. - controls (jax.numpy.array): Array of shape (n_periods, n_obs, n_controls) + controls: Array of shape (n_periods, n_obs, n_controls) with observed control variables for the measurement equations. - transition_func (Callable): The transition function. - sigma_scaling_factor (float): A scaling factor that controls the spread of the + transition_func: The transition function. + sigma_scaling_factor: A scaling factor that controls the spread of the sigma points. Bigger means that sigma points are further apart. Depends on the sigma_point algorithm chosen. - sigma_weights (jax.numpy.array): 1d array of length n_sigma with non-negative + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - observed_factors (jax.numpy.array): Array of shape (n_periods, n_obs, + estimation_options: Options for estimation including clipping bounds. + is_measurement_iteration: Boolean array indicating which + iterations are measurement updates. + is_predict_iteration: Boolean array indicating which + iterations are predict steps. + iteration_to_period: Array mapping iteration index to period. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with data on the observed factors. Returns: diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index c1d4d64e..1bb448cb 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -1,6 +1,6 @@ import functools -from collections.abc import Callable -from typing import TYPE_CHECKING, Any +from collections.abc import Callable # noqa: TC003 +from typing import Any import jax import jax.numpy as jnp @@ -10,9 +10,7 @@ from skillmodels.kalman_filters import kalman_predict from skillmodels.kalman_filters_debug import kalman_update from skillmodels.parse_params import parse_params - -if TYPE_CHECKING: - from skillmodels.types import Dimensions, EstimationOptions, Labels +from skillmodels.types import Dimensions, EstimationOptions, Labels # noqa: TC001 def log_likelihood( @@ -36,39 +34,33 @@ def log_likelihood( This function is jax-differentiable and jax-jittable as long as all but the first argument are marked as static. - The function returns both a tuple (float, dict). The first entry is the aggregated - log likelihood value. The second additional information like the log likelihood - contribution of each individual. Note that the dict also contains the aggregated - value. Returning that value separately is only needed to calculate a gradient with - Jax. - Args: - params (jax.numpy.array): 1d array with model parameters. parsing_info (dict): - Contains information how to parse parameter vector. update_info - (pandas.DataFrame): Contains information about number of updates in - each period and purpose of each update. - measurements (jax.numpy.array): Array of shape (n_updates, n_obs) with data on - observed measurements. NaN if the measurement was not observed. - controls (jax.numpy.array): Array of shape (n_periods, n_obs, n_controls) - with observed control variables for the measurement equations. - transition_func (dict): Dict with the entries "func" (the actual transition - function) and "columns" (a dictionary mapping factors that are needed as - individual columns to positions in the factor array). - sigma_scaling_factor (float): A scaling factor that controls the spread of the - sigma points. Bigger means that sigma points are further apart. Depends on - the sigma_point algorithm chosen. - sigma_weights (jax.numpy.array): 1d array of length n_sigma with non-negative - sigma weights. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, - n_mixtures. See :ref:`dimensions`. - labels (dict): Dict of lists with labels for the model quantities like - factors, periods, controls, stagemap and stages. See :ref:`labels` - observed_factors (jax.numpy.array): Array of shape (n_periods, n_obs, - n_observed_factors) with data on the observed factors. + params: 1d array with model parameters. + parsing_info: Contains information how to parse parameter vector. + measurements: Array of shape (n_updates, n_obs) with data on observed + measurements. NaN if the measurement was not observed. + controls: Array of shape (n_periods, n_obs, n_controls) with observed + control variables for the measurement equations. + transition_func: The transition function. + sigma_scaling_factor: A scaling factor that controls the spread of the + sigma points. Bigger means that sigma points are further apart. + sigma_weights: 1d array of length n_sigma with non-negative sigma weights. + dimensions: Dimensional information like n_states, n_periods, n_controls, + n_mixtures. + labels: Labels for the model quantities like factors, periods, controls, + stagemap and stages. + estimation_options: Options for estimation including clipping bounds. + is_measurement_iteration: Boolean array indicating which iterations are + measurement updates. + is_predict_iteration: Boolean array indicating which iterations are predict + steps. + iteration_to_period: Array mapping iteration index to period. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with + data on the observed factors. Returns: - dict: All data relevant for debugging, e.g. the log likelihood contribution of - each Kalman update and additional information like the filtered states. + All data relevant for debugging, e.g. the log likelihood contribution of + each Kalman update and additional information like the filtered states. """ n_obs = measurements.shape[1] diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 9bfa71a7..5495cbf5 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -1,13 +1,12 @@ import functools -from collections.abc import Callable -from typing import Any +from collections.abc import Callable # noqa: TC003 +from typing import TYPE_CHECKING, Any import jax import jax.numpy as jnp import numpy as np import pandas as pd from jax import Array -from numpy.typing import NDArray import skillmodels.likelihood_function as lf import skillmodels.likelihood_function_debug as lfd @@ -23,7 +22,11 @@ from skillmodels.process_data import process_data from skillmodels.process_debug_data import process_debug_data from skillmodels.process_model import process_model -from skillmodels.types import ProcessedModel + +if TYPE_CHECKING: + from numpy.typing import NDArray + + from skillmodels.types import ProcessedModel jax.config.update("jax_enable_x64", True) # noqa: FBT003 @@ -36,34 +39,32 @@ def get_maximization_inputs( """Create inputs for optimagic's maximize function. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - data (DataFrame): dataset in long format. + model_dict: The model specification. See: :ref:`model_specs` + data: dataset in long format. split_dataset(Int): Controls into how many sclices to split the dataset during the gradient computation. Returns a dictionary with keys: - loglike (function): A jax jitted function that takes an optimagic-style + loglike: A jax jitted function that takes an optimagic-style params dataframe as only input and returns a dict with entries: - "value": The scalar log likelihood - "contributions": An array with the log likelihood per observation - debug_loglike (function): Similar to loglike, with the following differences: + debug_loglike: Similar to loglike, with the following differences: - It is not jitted and thus faster on the first call and debuggable - It will add intermediate results as additional entries in the returned dictionary. Those can be used for debugging and plotting. - gradient (function): The gradient of the scalar log likelihood + gradient: The gradient of the scalar log likelihood function with respect to the parameters. - loglike_and_gradient (function): Combination of loglike and + loglike_and_gradient: Combination of loglike and loglike_gradient that is faster than calling the two functions separately. - constraints (list): List of optimagic constraints that are implied by the + constraints: List of optimagic constraints that are implied by the model specification. - params_template (pd.DataFrame): Parameter DataFrame with correct index and + params_template: Parameter DataFrame with correct index and bounds. The value column is empty except for the fixed constraints, which are set including the bounds. - data_aug (pd.DataFrame): DataFrame with augmented data. If model contains + data_aug: DataFrame with augmented data. If model contains endogenous factors, we double up the number of periods in order to add - - """ model = process_model(model_dict) p_index = get_params_index( diff --git a/src/skillmodels/params_index.py b/src/skillmodels/params_index.py index d997f602..8328387f 100644 --- a/src/skillmodels/params_index.py +++ b/src/skillmodels/params_index.py @@ -1,11 +1,14 @@ +from typing import TYPE_CHECKING + import pandas as pd -from skillmodels.types import ( - Dimensions, - EndogenousFactorsInfo, - Labels, - TransitionInfo, -) +if TYPE_CHECKING: + from skillmodels.types import ( + Dimensions, + EndogenousFactorsInfo, + Labels, + TransitionInfo, + ) def get_params_index( diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index e10b61f0..ad6fcc4f 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -1,12 +1,13 @@ import warnings -from typing import Any +from typing import TYPE_CHECKING, Any import jax.numpy as jnp import numpy as np import pandas as pd from jax import Array -from skillmodels.types import Anchoring, Dimensions, Labels +if TYPE_CHECKING: + from skillmodels.types import Anchoring, Dimensions, Labels def create_parsing_info( @@ -19,14 +20,14 @@ def create_parsing_info( """Create a dictionary with information how the parameter vector has to be parsed. Args: - params_index (pandas.MultiIndex): It has the levels ["category", "aug_period", + params_index: It has the levels ["category", "aug_period", "name1", "name2"] - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - labels (dict): Dict of lists with labels for the model quantities like + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring (dict): Dictionary with anchoring settings. - has_endogenous_factors (bool): Whether the model includes endogenous factors. + anchoring: Dictionary with anchoring settings. + has_endogenous_factors: Whether the model includes endogenous factors. Returns: dict: dictionary that maps model quantities to positions or slices of the @@ -119,12 +120,14 @@ def parse_params( """Parse params into the quantities that depend on it. Args: - params (jax.numpy.array): 1d array with model parameters. - parsing_info (dict): Dictionary with information on how the parameters + params: 1d array with model parameters. + parsing_info: Dictionary with information on how the parameters have to be parsed. - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - n_obs (int): Number of observations. + labels: Dict of lists with labels for the model quantities like + factors, periods, controls, stagemap and stages. See :ref:`labels` + n_obs: Number of observations. Returns: jax.numpy.array: Array of shape (n_obs, n_mixtures, n_states) with initial diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index 0337bd65..ec7d9c4a 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -21,24 +21,24 @@ def process_data( """Process the data for estimation. Args: - df (DataFrame): panel dataset in long format. It has a MultiIndex + df: panel dataset in long format. It has a MultiIndex where the first level indicates the period and the second the individual. - has_endogenous_factors (bool): - labels (dict): Dict of lists with labels for the model quantities like + has_endogenous_factors: Whether the model includes endogenous factors. + labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - update_info (pandas.DataFrame): DataFrame with one row per Kalman update needed + update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - anchoring_qinfo (dict): Information about anchoring. See :ref:`anchoring` - purpose (Literal["estimation", "anything"]): Whether the data is used for + anchoring_info: Information about anchoring. See :ref:`anchoring` + purpose: Whether the data is used for estimation (default, includes measurement data) or not. Returns a dictionary with keys: - measurements (jax.numpy.array): Array of shape (n_updates, n_obs) with data on + measurements: Array of shape (n_updates, n_obs) with data on observed measurements. NaN if the measurement was not observed. Only returned if estimation==True - controls (jax.numpy.array): Array of shape (n_periods, n_obs, n_controls) with + controls: Array of shape (n_periods, n_obs, n_controls) with observed control variables for the measurement equations. - observed_factors (jax.numpy.array): Array of shape + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors) with data on the observed factors. Only returned if estimation==True @@ -71,12 +71,13 @@ def pre_process_data( """Balance panel data in long format, drop unnecessary periods and set index. Args: - df (DataFrame): panel dataset in long format. It has a MultiIndex + df: panel dataset in long format. It has a MultiIndex where the first level indicates the period and the second the individual. + periods: The periods to keep in the balanced panel. Returns: - balanced (DataFrame): balanced panel. It has a MultiIndex. The first + balanced: balanced panel. It has a MultiIndex. The first enumerates individuals. The second level counts periods, starting at 0. """ diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index 48d479e1..bea50c1c 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -2,10 +2,11 @@ import numpy as np import pandas as pd -from jax import Array -from numpy.typing import NDArray if TYPE_CHECKING: + from jax import Array + from numpy.typing import NDArray + from skillmodels.types import ProcessedModel @@ -16,26 +17,26 @@ def process_debug_data( """Process the raw debug data into pandas objects that make visualization easy. Args: - debug_data (dict): Dictionary containing the following entries ( + debug_data: Dictionary containing the following entries ( and potentially others which are not modified): - - filtered_states (jax.numpy.array): Array of shape (n_updates, n_obs, + - filtered_states: Array of shape (n_updates, n_obs, n_mixtures, n_states) containing the filtered states after each Kalman update. - - initial_states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) + - initial_states: Array of shape (n_obs, n_mixtures, n_states) with the state estimates before the first Kalman update. - - residuals (jax.numpy.array): Array of shape (n_updates, n_obs, n_mixtures) + - residuals: Array of shape (n_updates, n_obs, n_mixtures) containing the residuals of a Kalman update. - - residual_sds (jax.numpy.ndarray): Array of shape (n_updates, n_obs, + - residual_sds: Array of shape (n_updates, n_obs, n_mixtures) containing the theoretical standard deviation of the residuals. - - all_contributions (jax.numpy.array): Array of shape (n_updates, n_obs) with + - all_contributions: Array of shape (n_updates, n_obs) with the likelihood contributions per update and individual. - - log_mixture_weights (jax.numpy.array): Array of shape (n_updates, n_obs, + - log_mixture_weights: Array of shape (n_updates, n_obs, n_mixtures) containing the log mixture weights after each update. - - initial_log_mixture_weights (jax.numpy.array): Array of shape (n_obs, + - initial_log_mixture_weights: Array of shape (n_obs, n_mixtures) containing the log mixture weights before the first kalman update. - model (dict): Processed model dictionary. + model: Processed model dictionary. Returns: dict: Dictionary with processed debug data. It has the following entries: @@ -46,16 +47,16 @@ def process_debug_data( after the last update of each period. The columns are the factor names, "period" and "id". The filtered states are already aggregated over mixture distributions. - - state_ranges (dict): The keys are the names of the latent factors. + - state_ranges: The keys are the names of the latent factors. The values are DataFrames with the columns "period", "minimum", "maximum". Note that this aggregates over mixture distributions. - - residuals (pd.DataFrame): Tidy DataFrame with residuals of each Kalman update. + - residuals: Tidy DataFrame with residuals of each Kalman update. Columns are "residual", "mixture", "period", "measurement" and "id". "period" and "measurement" identify the Kalman update to which the residual belongs. - - residual_sds (pd.DataFrame): As residuals but containing the theoretical + - residual_sds: As residuals but containing the theoretical standard deviation of the corresponding residual. - - all_contributions (pd.DataFrame): Tidy DataFrame with log likelihood + - all_contributions: Tidy DataFrame with log likelihood contribution per individual and Kalman Update. The columns are "contribution", "period", "measurement" and "id". "period" and "measurement" identify the Kalman Update to which the likelihood contribution corresponds. @@ -130,8 +131,8 @@ def _convert_state_array_to_df( """Convert a 3d state array into a 2d DataFrame. Args: - arr (np.ndarray): Array of shape (n_obs, n_mixtures, n_states) - factor_names (list): Names of the latent factors. + arr: Array of shape (n_obs, n_mixtures, n_states) + factor_names: Names of the latent factors. """ n_obs, n_mixtures, n_states = arr.shape df = pd.DataFrame(data=arr.reshape(-1, n_states), columns=list(factor_names)) diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index d73bde2b..7f5af946 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -1,7 +1,6 @@ -from collections.abc import KeysView, Mapping from copy import deepcopy from functools import partial -from typing import Any, Literal +from typing import TYPE_CHECKING, Any, Literal import numpy as np import pandas as pd @@ -25,6 +24,9 @@ TransitionInfo, ) +if TYPE_CHECKING: + from collections.abc import KeysView, Mapping + pd.set_option("future.no_silent_downcasting", True) # noqa: FBT003 @@ -36,19 +38,19 @@ def process_model(model_dict: dict) -> ProcessedModel: Set default values and extend the model specification where necessary. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` + model_dict: The model specification. See: :ref:`model_specs` Returns: dict: nested dictionary of model specs. It has the following entries: - - dimensions (dict): Dimensional information like n_states, n_periods, + - dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - - labels (dict): Dict of lists with labels for the model quantities like + - labels: Dict of lists with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - - anchoring (dict): Information about anchoring. See :ref:`anchoring` - - transition_info (dict): Everything related to transition functions. - - update_info (pandas.DataFrame): DataFrame with one row per Kalman update + - anchoring: Information about anchoring. See :ref:`anchoring` + - transition_info: Everything related to transition functions. + - update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - - normalizations (dict): Nested dictionary with information on normalized factor + - normalizations: Nested dictionary with information on normalized factor loadings and intercepts for each factor. See :ref:`normalizations`. """ diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 13ef693b..1ab93575 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -1,7 +1,6 @@ """Functions to simulate a dataset generated by a latent factor model.""" import warnings -from collections.abc import Mapping from typing import TYPE_CHECKING import jax.numpy as jnp @@ -9,11 +8,14 @@ import pandas as pd from jax import Array from numpy.random import choice, multivariate_normal -from numpy.typing import NDArray from skillmodels.filtered_states import anchor_states_df if TYPE_CHECKING: + from collections.abc import Mapping + + from numpy.typing import NDArray + from skillmodels.types import ( Dimensions, EndogenousFactorsInfo, @@ -38,20 +40,20 @@ def simulate_dataset( """Simulate datasets generated by a latent factor model. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - params (pandas.DataFrame): DataFrame with model parameters. - n_obs (int): Number of simulated individuals - data (pd.DataFrame): Dataset in the same format as for estimation, containing + model_dict: The model specification. See: :ref:`model_specs` + params: DataFrame with model parameters. + n_obs: Number of simulated individuals + data: Dataset in the same format as for estimation, containing information about observed factors and control variables. - policies (list): list of dictionaries. Each dictionary specifies a + policies: list of dictionaries. Each dictionary specifies a a stochastic shock to a latent factor AT THE END of "period" for "factor" with mean "effect_size" and "standard deviation" Returns: - observed_data (pd.DataFrame): Dataset with measurements and control variables + observed_data: Dataset with measurements and control variables in long format - latent_data (pd.DataFrame): Dataset with latent factors in long format + latent_data: Dataset with latent factors in long format """ if data is None and n_obs is None: @@ -197,10 +199,25 @@ def _simulate_dataset( """Simulate datasets generated by a latent factor model. Args: - See simulate_data + latent_states: Array of shape (n_obs, n_mixtures, n_states) with initial + state estimates. + covs: Array of shape (n_obs, n_mixtures, n_states, n_states) with initial + covariance matrices. + log_weights: Array of shape (n_obs, n_mixtures) with log mixture weights. + pardict: Dictionary with parsed parameters. + labels: Labels for the model quantities like factors, periods, controls. + dimensions: Dimensional information like n_states, n_periods, n_controls. + n_obs: Number of observations. + has_endogenous_factors: Whether the model includes endogenous factors. + update_info: DataFrame with information on measurements for each period. + control_data: Array of shape (n_periods, n_obs, n_controls) with controls. + observed_factors: Array of shape (n_periods, n_obs, n_observed_factors). + policies: List of policy dictionaries specifying stochastic shocks. + transition_info: Information about transition functions. Returns: - See simulate_data + observed_data: DataFrame with simulated measurements. + latent_data: DataFrame with simulated latent factors. """ policies = policies if policies is not None else [] @@ -332,10 +349,10 @@ def _collapse_aug_periods_to_periods( For each factor, extracts from the appropriate aug_period based on is_endogenous. Args: - df (pd.DataFrame): DataFrame with columns "aug_period" and "id" - latent_factors (list): List of latent factors - aug_periods_to_periods (dict): Mapping from aug_period to period - endogenous_factors_info (dict): Information about which factors are endogenous + df: DataFrame with columns "aug_period" and "id" + factors: Tuple of latent factors + aug_periods_to_periods: Mapping from aug_period to period + endogenous_factors_info: Information about which factors are endogenous Returns: pd.DataFrame: DataFrame with "period" column instead of "aug_period" @@ -375,12 +392,12 @@ def _get_shock( """Add stochastic effect to a factor of length n_obs. Args: - mean (float): mean of the stochastic effect - sd (float): standard deviation of the effect - size (int): length of resulting array + mean: mean of the stochastic effect + sd: standard deviation of the effect + size: length of resulting array Returns: - shock (np.array): 1d array of length n_obs with the stochastic shock + shock: 1d array of length n_obs with the stochastic shock """ if sd == 0: @@ -401,15 +418,16 @@ def generate_start_states( """Draw initial states and control variables from a (mixture of) normals. Args: - n_obs (int): number of observations - dimensions (dict): Dimensional information like n_states, n_periods, n_controls, + n_obs: number of observations + dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - dist_args (list): list of dicts of length nmixtures of dictionaries with the + dist_args: list of dicts of length nmixtures of dictionaries with the entries "mean" and "cov" for each mixture distribution. + weights: Array of mixture weights. Returns: - start_states (np.ndarray): shape (n_obs, n_states), - controls (np.ndarray): shape (n_obs, n_controls), + start_states: shape (n_obs, n_states), + controls: shape (n_obs, n_controls), """ n_states = dimensions.n_latent_factors @@ -437,17 +455,17 @@ def measurements_from_states( of measurements in that period. Args: - states (pd.DataFrame or np.ndarray): DataFrame of shape (n_obs, n_states) - controls (pd.DataFrame or np.ndarray): DataFrame of shape - (n_obs, n_controlsrols) - loadings (np.ndarray): numpy array of size (n_meas, n_states) - control_coeffs (np.ndarray): numpy array of size (n_meas, n_states) - sds (np.ndarray): numpy array of size (n_meas) with the standard deviations + states: DataFrame of shape (n_obs, n_states) + controls: DataFrame of shape + (n_obs, n_controls) + loadings: numpy array of size (n_meas, n_states) + control_params: numpy array of size (n_meas, n_controls) + sds: numpy array of size (n_meas) with the standard deviations of the measurements. Measurement error is assumed to be independent across measurements. Returns: - measurements (np.ndarray): array of shape (n_obs, n_meas) with measurements. + measurements: array of shape (n_obs, n_meas) with measurements. """ n_meas = loadings.shape[0] diff --git a/src/skillmodels/transition_functions.py b/src/skillmodels/transition_functions.py index 686749f0..89a5b504 100644 --- a/src/skillmodels/transition_functions.py +++ b/src/skillmodels/transition_functions.py @@ -14,8 +14,6 @@ Returns: * float - - **names_example_func(** *factors* **)**: Generate a list of names for the params of the transition function. diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index ad499f28..4db48a90 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -23,9 +23,9 @@ def extract_factors( If provided, a params DataFrame is also reduced correspondingly. Args: - factors (str or list): Name(s) of the factor(s) to extract. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + factors: Name(s) of the factor(s) to extract. + model_dict: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: dict: The reduced model dictionary @@ -47,8 +47,8 @@ def update_parameter_values( """Update the "value" column of params with values from other. Args: - params (pandas.DataFrame or None): The params DataFrame for the full model. - others (pandas.DataFrame or list): Another DataFrame with parameters or list + params: The params DataFrame for the full model. + others: Another DataFrame with parameters or list of thereof. The values from other are used to update the value column of ``params``. If other is a list, the updates will be in order, i.e. later elements overwrite earlier ones. @@ -88,9 +88,9 @@ def remove_factors( This happens if the remaining factors do not have measurements in later periods. Args: - factors (str or list): Name(s) of the factor(s) to remove. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + factors: Name(s) of the factor(s) to remove. + model_dict: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: dict: The reduced model dictionary @@ -136,9 +136,9 @@ def remove_measurements( If provided, a params DataFrame is also reduced correspondingly. Args: - measurements (str or list): Name(s) of the measurement(s) to remove. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + measurements: Name(s) of the measurement(s) to remove. + model_dict: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: dict: The reduced model dictionary @@ -187,9 +187,9 @@ def remove_controls( If provided, a params DataFrame is also reduced correspondingly. Args: - controls (str or list): Name(s) of the contral variable(s) to remove. - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + controls: Name(s) of the contral variable(s) to remove. + model_dict: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: dict: The reduced model dictionary @@ -218,8 +218,8 @@ def switch_translog_to_linear( If provided, a params DataFrame is also reduced correspondingly. Args: - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + model_dict: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: dict: The reduced model dictionary @@ -251,8 +251,8 @@ def switch_linear_to_translog( the additional parameters are not initialized at zero. Args: - model_dict (dict): The model specification. See: :ref:`model_specs`. - params (pandas.DataFrame or None): The params DataFrame for the full model. + model_dict: The model specification. See: :ref:`model_specs`. + params: The params DataFrame for the full model. Returns: dict: The reduced model dictionary @@ -278,9 +278,9 @@ def reduce_n_periods( """Remove all periods after n_periods. Args: - model_dict (dict): The model specification. See: :ref:`model_specs`. - new_n_periods (int): The new number of periods. - params (pandas.DataFrame or None): The params DataFrame for the full model. + model_dict: The model specification. See: :ref:`model_specs`. + new_n_periods: The new number of periods. + params: The params DataFrame for the full model. Returns: dict: The reduced model dictionary @@ -345,9 +345,9 @@ def _reduce_params( DataFrame was constructed. Args: - params (pandas.DataFrame or None): The params DataFrame for the full model. - model_dict (dict): The model specification. See: :ref:`model_specs`. - has_endogenous_factors (bool): Whether the model has endogenous factors. + params: The params DataFrame for the full model. + model_dict: The model specification. See: :ref:`model_specs`. + has_endogenous_factors: Whether the model has endogenous factors. Returns: pandas.DataFrame: The reduced parameters DataFrame. diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index 431b0d47..6ca1fc87 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -1,5 +1,4 @@ import warnings -from collections.abc import Mapping from copy import deepcopy from typing import TYPE_CHECKING, Any @@ -8,7 +7,6 @@ import plotly.express as px import plotly.figure_factory as ff import plotly.graph_objects as go -from numpy.typing import NDArray from plotly.subplots import make_subplots from scipy.stats import gaussian_kde @@ -17,6 +15,10 @@ from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs if TYPE_CHECKING: + from collections.abc import Mapping + + from numpy.typing import NDArray + from skillmodels.types import ProcessedModel @@ -43,38 +45,41 @@ def combine_distribution_plots( Uses dictionary with plotly images as values to build plotly Figure with subplots. Args: - kde_plots (dict): Dictionary with plots of indivudal factor kde plots. - contour_plots (dict): Dictionary with plots of pairwise factor density + kde_plots: Dictionary with plots of indivudal factor kde plots. + contour_plots: Dictionary with plots of pairwise factor density contours. - surface_plots (dict): Dictionary with plots of pairwise factor density + surface_plots: Dictionary with plots of pairwise factor density 3d plots. - make_subplot_kwargs (dict or NoneType): Dictionary of keyword arguments used + factor_order: List of factor names to define the order of + subplots. If None, uses the order from kde_plots keys. + make_subplot_kwargs: Dictionary of keyword arguments used to instantiate plotly Figure with multiple subplots. Is used to define properties such as, for example, the spacing between subplots. If None, default arguments defined in the function are used. - factor_mapping (dct): Dictionary to change displayed factor names. - sharex (bool): Whether to share the properties of x-axis across subplots. + factor_mapping: Dictionary to change displayed factor names. + sharex: Whether to share the properties of x-axis across subplots. Default False. - sharey (bool): Whether to share the properties ofy-axis across subplots. + sharey: Whether to share the properties ofy-axis across subplots. Default True. - line_width (float): A float used to set same line width across subplots. - showlegend (bool): Display legend if True. - layout_kwargs (dict or NoneType): Dictionary of key word arguments used to + line_width: A float used to set same line width across subplots. + showlegend: Display legend if True. + layout_kwargs: Dictionary of key word arguments used to update layout of plotly Figure object. If None, the default kwargs defined in the function will be used. - legend_kwargs (dict or NoneType): Dictionary of key word arguments used to + legend_kwargs: Dictionary of key word arguments used to update position, orientation and title of figure legend. If None, default position and orientation will be used with no title. - title_kwargs (dict or NoneType): Dictionary of key word arguments used to + title_kwargs: Dictionary of key word arguments used to update properties of the figure title. Use {'text': ''} to set figure title. If None, infers title based on the value of `quntiles_of_other_factors`. - eye_x, eye_y and eye_z (float): Control camera (view point) of the 3d plots. - Together they form the a norm, and the larger the norm, the more zoomed out - is the view. Setting eye_z to a lower value lowers the view point. + eye_x: Control camera x position for the 3d plots. Default 2.2. + eye_y: Control camera y position for the 3d plots. Default 2.2. + eye_z: Control camera z position for the 3d plots. Default 1. + Setting eye_z to a lower value lowers the view point. Returns: - fig (plotly.Figure): Plotly figure with subplots that combines pairwise + fig: Plotly figure with subplots that combines pairwise distrubtion plots. """ @@ -177,39 +182,39 @@ def univariate_densities( with factor names as keys. Args: - data (DataFrame): Model estimation input data. - model_dict (dict): Dictionary with model specifications. - params (DataFrame): DataFrame with estimated parameter values. - period (int or float): Model period for which to plot the distributions for. - factors (list or NoneType): List of factors for which to plot the densities. + data: Model estimation input data. + model_dict: Dictionary with model specifications. + params: DataFrame with estimated parameter values. + period: Model period for which to plot the distributions for. + factors: List of factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. - observed_factors (bool): If True, plot densities of observed factors too. - states (dict, list, pd.DataFrame or NoneType): List or dictionary with tidy + observed_factors: If True, plot densities of observed factors too. + states: List or dictionary with tidy DataFrames with filtered or simulated states or only one DataFrame with filtered or simulated states. If None, retrieve data frame with filtered states using model_dict and data. States are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the latent factors. - show_hist (bool): Add histogram to the distplot. - show_curve (bool): Add density curve to the displot. - show_rug (bool): Add rug to the distplot. - curve_type (str): Curve type, 'normal' or 'kde', to add to the distplot. - colorscale (str): The color palette used when plotting multiple data. Must be + show_hist: Add histogram to the distplot. + show_curve: Add density curve to the displot. + show_rug: Add rug to the distplot. + curve_type: Curve type, 'normal' or 'kde', to add to the distplot. + colorscale: The color palette used when plotting multiple data. Must be a valid attribute of px.colors.qualitative. - bin_size (float): Size of the histogram bins. - distplot_kwargs (NoneType or dict): Dictionary with additional keyword + bin_size: Size of the histogram bins. + distplot_kwargs: Dictionary with additional keyword arguments passed to ff.create_distplot() to initiate the distplot. - layout_kwargs (NoneType or dict): Dictionary of keyword arguments to update + layout_kwargs: Dictionary of keyword arguments to update layout of the plot figures. Some essential layout kwargs are: - - xaxis_title (str): label label - - yaxis_title (str): label of y axis - - xaxis_showgrid (bool): display axis grid - - yaxis_showgrid (bool): display axis grid - - template (str): figure background theme - - showlegend (bool): add legend + - xaxis_title: label label + - yaxis_title: label of y axis + - xaxis_showgrid: display axis grid + - yaxis_showgrid: display axis grid + - template: figure background theme + - showlegend: add legend Returns: - plots_dict (dict): Dictionary with density plots. + plots_dict: Dictionary with density plots. """ if states is None: @@ -285,24 +290,24 @@ def bivariate_density_contours( and collects them in a dictionary with factor combinations as keys. Args: - data (DataFrame): Model estimation input data. - model_dict (dict): Dictionary with model specifications. - params (DataFrame): DataFrame with estimated parameter values. - period (int or float): Model period for which to plot the distributions for. - factors (list or NoneType): List of factors for which to plot the densities. + data: Model estimation input data. + model_dict: Dictionary with model specifications. + params: DataFrame with estimated parameter values. + period: Model period for which to plot the distributions for. + factors: List of factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. - observed_factors (bool): If True, plot densities of observed factors too. - states (dict, list, pd.DataFrame or NoneType): List or dictionary with tidy + observed_factors: If True, plot densities of observed factors too. + states: List or dictionary with tidy DataFrames with filtered or simulated states or only one DataFrame with filtered or simulated states. If None, retrieve data frame with filtered states using model_dict and data. States are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the latent factors. - n_points (int): Number of grid points used to create the mesh for calculation + n_points: Number of grid points used to create the mesh for calculation of kernel densities. - contour_kwargs (dict or NoneType): Dictionary with keyword arguments to set + contour_kwargs: Dictionary with keyword arguments to set contour line properties (such as annotation, colorscale). - layout_kwargs (dict or NoneType): Dictionary with keyword arguments to set + layout_kwargs: Dictionary with keyword arguments to set figure layout properties. The following are various essential keyword arguments defining various features @@ -310,16 +315,19 @@ def bivariate_density_contours( 'update_traces'. Some default figure layout properties (such as background theme) are defined if layout_kwargs is None. - contours_showlabels (bool): If True, annotate density contours. - contours_coloring (str): Defines how to apply color scale to density contours. + contours_showlabels: If True, annotate density contours. + contours_coloring: Defines how to apply color scale to density contours. Possible values are in ['lines', 'fill', 'heatmap', 'none']. Default is 'none' which implies no colorscale. - contours_colorscale (str): The color scale to use for line legends. Must be + contours_colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'RdBu_r'. - showcolorbar (bool): A boolean variable for displaying color bar. + lines_colorscale: The color palette used for contour lines when plotting + multiple scenarios. Must be a valid px.colors.qualitative attribute. + Default 'D3'. + showcolorbar: A boolean variable for displaying color bar. Returns: - plots_dict (dict): Dictionary with factor combinations as keys and respective + plots_dict: Dictionary with factor combinations as keys and respective pariwise plots of density contours as values. """ @@ -411,38 +419,40 @@ def bivariate_density_surfaces( and collects them in a dictionary with factor name combinations keys. Args: - data (DataFrame): Model estimation input data. - model_dict (dict): Dictionary with model specifications. - params (DataFrame): DataFrame with estimated parameter values. - period (int or float): Model period for which to plot the distributions for. - factors (list or NoneType): List of factors for which to plot the densities. + data: Model estimation input data. + model_dict: Dictionary with model specifications. + params: DataFrame with estimated parameter values. + period: Model period for which to plot the distributions for. + factors: List of factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. - observed_factors (bool): If True, plot densities of observed factors too. - states (dict, list, pd.DataFrame or NoneType): List or dictionary with tidy + observed_factors: If True, plot densities of observed factors too. + states: List or dictionary with tidy DataFrames with filtered or simulated states or only one DataFrame with filtered or simulated states. If None, retrieve data frame with filtered states using model_dict and data. States are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the latent factors. - n_points (int): Number of grid points used to create the mesh for calculation + n_points: Number of grid points used to create the mesh for calculation of kernel densities. + The following are various essential keyword arguments defining various features of plots. All features can also be changed ex-post via 'update_layout' or 'update_traces'. Some default figure layout properties (such as background theme) are defined if layout_kwargs is None. - layout_kwargs (dict or NoneType): Dictionary with keyword arguments to set + layout_kwargs: Dictionary with keyword arguments to set figure layout properties. - colorscale (str): The color scale to use for line legends. Must be a valid + colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'RdBu_r'. - showcolorbar (bool): A boolean variable for displaying the colorbar associated + opacity: Opacity of the surface. Default 0.9. + showcolorbar: A boolean variable for displaying the colorbar associated with the surface color scale. - showgrids (bool): A boolean variable for showing axes grids. - showaxlines (bool): A boolean variable for showing axes lines. - showlabels (bool): A boolean variable for displaying axes labels. + showgrids: A boolean variable for showing axes grids. + showaxlines: A boolean variable for showing axes lines. + showlabels: A boolean variable for displaying axes labels. Returns: - plots_dict (dict): Dictionary with factor combinations as keys and respective + plots_dict: Dictionary with factor combinations as keys and respective pariwise plots of 3d density plots as values. """ diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index de49c5df..7400ccb4 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -1,5 +1,5 @@ import itertools -from collections.abc import Callable +from collections.abc import Callable # noqa: TC003 from copy import deepcopy from typing import TYPE_CHECKING, Any @@ -41,38 +41,38 @@ def combine_transition_plots( Use dictionary with plotly images as values to build plotly figure with subplots. Args: - plots_dict (dict): Dictionary with plots of transition functions for each + plots_dict: Dictionary with plots of transition functions for each factor. - column_order (list, str or NoneType): List of (output) factor names according + column_order: List of (output) factor names according to which transition plots should be ordered horizontally. If None, infer from the keys of of plots_dict - row_order (list, str or NoneType): List of (input) factor names according + row_order: List of (input) factor names according to which transition plots should be ordered vertically. If None, infer from the keys of of plots_dict - factor_mapping (dict or NoneType): A dictionary with custom factor names to + factor_mapping: A dictionary with custom factor names to display as axes labels. - make_subplot_kwargs (dict or NoneType): Dictionary of keyword arguments used + make_subplot_kwargs: Dictionary of keyword arguments used to instantiate plotly Figure with multiple subplots. Is used to define properties such as, for example, the spacing between subplots. If None, default arguments defined in the function are used. - sharex (bool): Whether to share the properties of x-axis across subplots. + sharex: Whether to share the properties of x-axis across subplots. Default False. - sharey (bool): Whether to share the properties ofy-axis across subplots. + sharey: Whether to share the properties ofy-axis across subplots. Default True. - showlegend (bool): Display legend if True. - layout_kwargs (dict or NoneType): Dictionary of key word arguments used to + showlegend: Display legend if True. + layout_kwargs: Dictionary of key word arguments used to update layout of plotly Figure object. If None, the default kwargs defined in the function will be used. - legend_kwargs (dict or NoneType): Dictionary of key word arguments used to + legend_kwargs: Dictionary of key word arguments used to update position, orientation and title of figure legend. If None, default position and orientation will be used with no title. - title_kwargs (dict or NoneType): Dictionary of key word arguments used to + title_kwargs: Dictionary of key word arguments used to update properties of the figure title. Use {'text': ''} to set figure title. If None, infers title based on the value of `quntiles_of_other_factors`. Returns: - fig (plotly.Figure): Plotly figure with subplots that combines individual + fig: Plotly figure with subplots that combines individual transition functions. """ @@ -156,29 +156,29 @@ def get_transition_plots( """Get dictionary with individual plots of transition equations for each factor. Args: - model_dict (dict): The model specification. See: :ref:`model_specs` - params (pandas.DataFrame): DataFrame with model parameters. - data (pd.DataFrame): Empirical dataset that is used to estimate the model. - period (int): The start period of the transition equations that are plotted. - state_ranges (dict or NoneType): The keys are the names of the latent factors. + model_dict: The model specification. See: :ref:`model_specs` + params: DataFrame with model parameters. + data: Empirical dataset that is used to estimate the model. + period: The start period of the transition equations that are plotted. + state_ranges: The keys are the names of the latent factors. The values are DataFrames with the columns "period", "minimum", "maximum". The state_ranges are used to define the axis limits of the plots. - quantiles_of_other_factors (float, list or None): Quantiles at which the factors + quantiles_of_other_factors: Quantiles at which the factors that are not varied in a given plot are fixed. If None, those factors are not fixed but integrated out. - n_points (int): Number of grid points per input. Default 50. - n_draws (int): Number of randomly drawn values of the factors that are averaged + n_points: Number of grid points per input. Default 50. + n_draws: Number of randomly drawn values of the factors that are averaged out. Only relevant if quantiles_of_other_factors is *None*. Default 50. - colorscale (str): The color scale to use for line legends. Must be a valid + colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'Magenta_r'. - layout_kwargs (dict or NoneType): Dictionary of key word arguments used to + layout_kwargs: Dictionary of key word arguments used to update layout of plotly image object. If None, the default kwargs defined in the function will be used. - include_correction_factors (bool): Whether to include correction factors in the + include_correction_factors: Whether to include correction factors in the plots. Default False. Returns: - plots_dict (dict): Dictionary with individual plots of transition equations + plots_dict: Dictionary with individual plots of transition equations for each combination of input and output factors. """ @@ -248,35 +248,37 @@ def _get_dictionary_with_plots( and output factors. Args: - model (dict): The model specification. See: :ref:`model_specs` - params (pandas.DataFrame): DataFrame with model parameters. - states (pandas.DataFrame): Tidy DataFrame with filtered or simulated states. + model: The model specification. See: :ref:`model_specs` + data: Panel dataset in long format for getting observed factors. + params: DataFrame with model parameters. + states: Tidy DataFrame with filtered or simulated states. They are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the factors that are not visualized. - state_ranges (dict): The keys are the names of the latent factors. + state_ranges: The keys are the names of the latent factors. The values are DataFrames with the columns "period", "minimum", "maximum". The state_ranges are used to define the axis limits of the plots. - latent_factors (list): Latent factors of the model that are outputs of + latent_factors: Latent factors of the model that are outputs of transition factors. - all_factors (list): All factors of the model that are the inputs of transition + all_factors: All factors of the model that are the inputs of transition functions. - quantiles_of_other_factors (float, list or None): Quantiles at which the factors + quantiles_of_other_factors: Quantiles at which the factors that are not varied in a given plot are fixed. If None, those factors are not fixed but integrated out. - period (int): The start period of the transition equations that are plotted. - n_points (int): Number of grid points per input. Default 50. - n_draws (int): Number of randomly drawn values of the factors that are averaged + period: The start period of the transition equations that are plotted. + n_points: Number of grid points per input. Default 50. + n_draws: Number of randomly drawn values of the factors that are averaged out. Only relevant if quantiles_of_other_factors is *None*. Default 50. - colorscale (str): The color scale to use for line legends. Must be a valid + colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'Magenta_r'. - subfig_kwargs (dict or NoneType): Dictionary of key word arguments used to + layout_kwargs: Dictionary of key word arguments used to update layout of plotly image object. If None, the default kwargs defined in the function will be used. + showlegend: Display legend if True. Default True. Returns: - plots_dict (dict): Dictionary with individual plots of transition functions + plots_dict: Dictionary with individual plots of transition functions for each input and output factors. """ From e0f59d06ac7b5845bcf78b36afe2a8bb4cbdf68e Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Fri, 9 Jan 2026 14:43:25 +0100 Subject: [PATCH 10/27] Make ruff rules much stricter. --- .yamllint.yml | 3 + docs/source/conf.py | 10 +-- docs/source/getting_started/tutorial.ipynb | 2 +- .../how_to_simulate_dataset.ipynb | 4 +- .../how_to_visualize_correlations.ipynb | 4 +- ...sualize_pairwise_factor_distribution.ipynb | 4 +- ...ow_to_visualize_transition_equations.ipynb | 4 +- pixi.lock | 4 +- pyproject.toml | 42 +++++------ src/skillmodels/__init__.py | 2 + src/skillmodels/check_model.py | 5 ++ src/skillmodels/clipping.py | 2 + src/skillmodels/config.py | 2 + src/skillmodels/constraints.py | 4 +- src/skillmodels/correlation_heatmap.py | 58 ++++++++-------- src/skillmodels/decorators.py | 7 +- src/skillmodels/filtered_states.py | 12 ++-- src/skillmodels/kalman_filters.py | 9 ++- src/skillmodels/kalman_filters_debug.py | 2 + src/skillmodels/likelihood_function.py | 8 +-- src/skillmodels/likelihood_function_debug.py | 8 +-- src/skillmodels/maximization_inputs.py | 17 +++-- src/skillmodels/params_index.py | 7 +- src/skillmodels/parse_params.py | 24 +++---- src/skillmodels/process_data.py | 15 ++-- src/skillmodels/process_debug_data.py | 11 ++- src/skillmodels/process_model.py | 10 ++- src/skillmodels/qr.py | 2 + src/skillmodels/simulate_data.py | 12 ++-- src/skillmodels/transition_functions.py | 13 ++-- src/skillmodels/utilities.py | 20 ++++-- src/skillmodels/utils_plotting.py | 4 ++ .../visualize_factor_distributions.py | 43 ++++++++---- .../visualize_transition_equations.py | 39 ++++++----- tests/test_clipping.py | 8 ++- tests/test_constraints.py | 36 +++++----- tests/test_correlation_heatmap.py | 26 +++---- tests/test_decorators.py | 24 +++---- tests/test_filtered_states.py | 10 ++- tests/test_kalman_filters.py | 18 +++-- tests/test_likelihood_regression.py | 35 +++++----- tests/test_maximization_inputs.py | 11 ++- tests/test_params_index.py | 31 +++++---- tests/test_parse_params.py | 22 +++--- tests/test_process_data.py | 22 +++--- tests/test_process_model.py | 69 +++++++++---------- tests/test_qr.py | 23 +++++-- tests/test_simulate_data.py | 12 ++-- tests/test_transition_functions.py | 18 ++--- tests/test_utilities.py | 31 ++++----- tests/test_visualize_factor_distributions.py | 16 ++--- tests/test_visualize_transition_equations.py | 8 +-- 52 files changed, 449 insertions(+), 384 deletions(-) diff --git a/.yamllint.yml b/.yamllint.yml index 72f64be1..0bdfa076 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -3,6 +3,8 @@ yaml-files: - '*.yaml' - '*.yml' - .yamllint +ignore: + - src/skillmodels/test_data/simplest_augmented_model.yaml rules: braces: enable brackets: enable @@ -34,3 +36,4 @@ rules: trailing-spaces: enable truthy: level: warning + check-keys: false diff --git a/docs/source/conf.py b/docs/source/conf.py index 8c9b2d42..09c9be1f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,3 +1,5 @@ +"""Sphinx configuration file for skillmodels documentation.""" + # # Documentation build configuration file, created by sphinx-quickstart # @@ -8,13 +10,13 @@ # # All configuration values have a default; values that are commented out # serve to show the default. -import os import sys +from pathlib import Path # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("../..")) +# documentation root, use Path.resolve() to make it absolute, like shown here. +sys.path.insert(0, str(Path("../..").resolve())) # -- General configuration ---------------------------------------------------- @@ -61,7 +63,7 @@ # General information about the project. project = "skillmodels" -copyright = "2016-2021, Janos Gabler" +copyright = "2016-, Janos Gabler" # noqa: A001 # The version info for the project you"re documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/docs/source/getting_started/tutorial.ipynb b/docs/source/getting_started/tutorial.ipynb index 81ce0965..137c8115 100644 --- a/docs/source/getting_started/tutorial.ipynb +++ b/docs/source/getting_started/tutorial.ipynb @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", + "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", " model_dict = yaml.load(y, Loader=yaml.SafeLoader)" ] }, diff --git a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb b/docs/source/how_to_guides/how_to_simulate_dataset.ipynb index b6b4685e..12905f6a 100644 --- a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb +++ b/docs/source/how_to_guides/how_to_simulate_dataset.ipynb @@ -34,8 +34,8 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", + "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", + " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", "\n", "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])\n", diff --git a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb index 890cddef..73560c56 100644 --- a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb @@ -40,8 +40,8 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)" + "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", + " model_dict = yaml.load(y, Loader=yaml.SafeLoader)" ] }, { diff --git a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index e0f5944e..05b7a524 100644 --- a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", + "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", + " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", diff --git a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb index 17c9a714..9f9f8631 100644 --- a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb @@ -47,8 +47,8 @@ "metadata": {}, "outputs": [], "source": [ - "with open(TEST_DATA_DIR / \"model2.yaml\") as y:\n", - " model_dict = yaml.load(y, Loader=yaml.FullLoader)\n", + "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", + " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", "\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", diff --git a/pixi.lock b/pixi.lock index 20d82777..64280cd4 100644 --- a/pixi.lock +++ b/pixi.lock @@ -8471,8 +8471,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev245+g921c61276.d20260109 - sha256: 5918dae4da35a25e3716f0ea2f2cce8f1e9841ad01284ccc92962243d4a6d19e + version: 0.0.24.dev246+gdce66ad7c.d20260109 + sha256: c4aac3043ab0ace6fc5dfb9a40c211225a79046f52762a52dc441938d4487364 requires_dist: - dags - frozendict diff --git a/pyproject.toml b/pyproject.toml index 38e4fcd9..f8e28e92 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -175,46 +175,42 @@ line-length = 88 select = ["ALL"] extend-ignore = [ "ANN401", # Dynamically typed expressions (typing.Any) are disallowed - too strict - "B028", # No explicit `stacklevel` keyword argument found "COM812", # In conflict with formatter - "D100", # Missing docstring in public module - "D103", # missing docstring in public function - "D104", # missing docstring in public package "EM101", # exception must not use a string literal "EM102", # exception must not use an f-string literal - "FBT001", # Boolean-typed positional arguments. "FBT002", # Boolean default positional argument in function definition - "FIX002", # line contains a todo "ISC001", # In conflict with formatter "NPY002", # Leave Numpy's legacy RNG - "PD008", # array.at is perfectly valid Jax, but linter thinks it's Pandas... "PD015", # pd.merge is fine "PERF401", # Many suggestions to use list comprehension are not helpful - "PLR2004", # Magic values are fine "PLR0913", # Too many arguments to function call - "RET504", # Assignment before return statement is fine. - "S101", # use of `assert` detected + "PLR2004", # Magic values are fine "S301", # `pickle` module is unsafe - "SLF001", # Private member accessed: `_stochastic_info` "TRY003", # long messages outside the exception class ] [tool.ruff.lint.per-file-ignores] "src/skillmodels/types.py" = ["TC"] # Dataclasses need types at runtime "src/skillmodels/visualize_*.py" = ["BLE001"] -"**/*.ipynb" = ["B018", "T201", "E402", "PLR2004", "INP001", "PTH100"] -"docs/**/*" = ["A001", "ERA001", "INP001", "PTH100", "PTH123", "S506"] +"**/*.ipynb" = [ + "B018", # Seemingly useless expression for printing. + "T201", # Printing is fine here. + "INP001", # No need for a namespace. +] +"docs/source/conf.py" = [ + "ERA001", # Lots of erased code + "INP001", # No need for a namespace. +] "tests/*" = [ - "ANN", # Tests don't need type annotations - "ARG001", - "E712", - "FBT003", - "INP001", - "PD002", - "PT011", - "NPY002", - "PTH123", - "S506" + "ANN", # No type annotations needed for tests + "ARG001", # Unused arguments are common in fixture-heavy tests + "D100", # No module docstrings needed for tests + "D103", # No function docstrings needed for tests + "E712", # Comparison to True/False using == might be necessary for arrays. + "FBT003", # Boolean positional values are common in test setup + "INP001", # No need for a namespace. + "PT011", # Broad pytest.raises() blocks are okay + "S101", # use of `assert` detected ] [tool.ruff.lint.pydocstyle] diff --git a/src/skillmodels/__init__.py b/src/skillmodels/__init__.py index e55229ad..901e3138 100644 --- a/src/skillmodels/__init__.py +++ b/src/skillmodels/__init__.py @@ -1,3 +1,5 @@ +"""Skillmodels: A Python package for estimating latent factor models.""" + import contextlib try: diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index a591001f..28a618bb 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -1,3 +1,5 @@ +"""Functions to validate model specifications.""" + from typing import TYPE_CHECKING import numpy as np @@ -11,6 +13,7 @@ def check_model( labels: Labels, dimensions: Dimensions, anchoring: Anchoring, + *, has_endogenous_factors: bool, ) -> None: """Check consistency and validity of the model specification. @@ -59,8 +62,10 @@ def check_stagemap( stagemap: tuple[int, ...], stages: tuple[int, ...] | list[int], n_periods: int, + *, is_augmented: bool, ) -> list[str]: + """Validate the stagemap configuration against model dimensions.""" report: list[str] = [] step_size = 2 if is_augmented else 1 if len(stagemap) != n_periods - step_size: diff --git a/src/skillmodels/clipping.py b/src/skillmodels/clipping.py index 04fea4d7..ce8dd000 100644 --- a/src/skillmodels/clipping.py +++ b/src/skillmodels/clipping.py @@ -1,3 +1,5 @@ +"""Soft clipping utilities for constraining values to bounded ranges.""" + import jax import jax.numpy as jnp from jax import Array diff --git a/src/skillmodels/config.py b/src/skillmodels/config.py index c5901cfd..cd7eb32b 100644 --- a/src/skillmodels/config.py +++ b/src/skillmodels/config.py @@ -1,3 +1,5 @@ +"""Configuration constants and paths for skillmodels.""" + from pathlib import Path TEST_DATA_DIR = Path(__file__).resolve().parent / "test_data" diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index d95f5c86..58235735 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -394,9 +394,7 @@ def _get_anchoring_constraints( {"loc": ind_tups, "type": "fixed", "value": 1, "description": msg}, ) - constraints_dicts = [c for c in constraints_dicts if c["loc"] != []] - - return constraints_dicts + return [c for c in constraints_dicts if c["loc"] != []] def _get_constraints_for_augmented_periods( diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index cf2d782f..795ba99d 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -1,3 +1,5 @@ +"""Functions for creating correlation heatmap visualizations.""" + from typing import TYPE_CHECKING, Any import numpy as np @@ -22,6 +24,7 @@ def plot_correlation_heatmap( zmin: float | None = None, zmid: float | None = None, colorscale: str = "RdBu_r", + *, show_color_bar: bool = True, show_diagonal: bool = True, show_upper_triangle: bool = True, @@ -96,18 +99,18 @@ def plot_correlation_heatmap( corr = _process_corr_data_for_plotting( corr, rounding, - show_upper_triangle, - show_diagonal, - trim_heatmap, + show_upper_triangle=show_upper_triangle, + show_diagonal=show_diagonal, + trim_heatmap=trim_heatmap, ) heatmap_kwargs = _get_heatmap_kwargs( corr, heatmap_kwargs, colorscale, - show_color_bar, - zmax, - zmin, - zmid, + show_color_bar=show_color_bar, + zmax=zmax, + zmin=zmin, + zmid=zmid, ) layout_kwargs = _get_layout_kwargs( corr=corr, @@ -170,8 +173,7 @@ def get_measurements_corr( latent_factors=latent_factors, observed_factors=observed_factors, ) - corr = df.corr() - return corr + return df.corr() def get_quasi_scores_corr( @@ -216,8 +218,7 @@ def get_quasi_scores_corr( latent_factors=latent_factors, observed_factors=observed_factors, ) - corr = df.corr() - return corr + return df.corr() def get_scores_corr( @@ -263,19 +264,21 @@ def get_scores_corr( latent_factors=latent_factors, observed_factors=observed_factors, ) - corr = df.corr() - return corr + return df.corr() def _process_corr_data_for_plotting( corr: pd.DataFrame, rounding: int, + *, show_upper_triangle: bool, show_diagonal: bool, trim_heatmap: bool, ) -> pd.DataFrame: """Apply mask and rounding to correlation DataFrame.""" - mask = _get_mask(corr, show_upper_triangle, show_diagonal) + mask = _get_mask( + corr, show_upper_triangle=show_upper_triangle, show_diagonal=show_diagonal + ) corr = corr.where(mask).round(rounding) if trim_heatmap: keeprows = mask.any(axis=1) & corr.notna().any(axis="columns").to_numpy() @@ -289,6 +292,7 @@ def _process_corr_data_for_plotting( def _get_mask( corr: pd.DataFrame, + *, show_upper_triangle: bool, show_diagonal: bool, ) -> NDArray[np.bool_]: @@ -400,8 +404,7 @@ def _get_measurement_data_for_single_period( )["variable"].to_list() for fac in observed_factors: measurements.append(fac) - df = data.query(f"{update_info_by_period.index.names[0]}=={period}")[measurements] - return df + return data.query(f"{update_info_by_period.index.names[0]}=={period}")[measurements] def _get_measurement_data_for_multiple_periods( @@ -441,8 +444,7 @@ def _get_measurement_data_for_multiple_periods( .add_suffix(f", {period}") .reset_index(drop=True), ) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) def _get_quasi_factor_scores_data( @@ -534,8 +536,7 @@ def _get_quasi_factor_scores_data_for_single_period( for factor in observed_factors: df = data.query(f"{update_info_by_period.index.names[0]}=={period}")[factor] to_concat.append(df) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) def _get_quasi_factor_scores_data_for_multiple_periods( @@ -575,8 +576,7 @@ def _get_quasi_factor_scores_data_for_multiple_periods( .add_suffix(f", {period}") .reset_index(drop=True), ) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) def _get_factor_scores_data( @@ -782,8 +782,7 @@ def _get_factor_scores_data_for_multiple_periods( .add_suffix(f", {period}") .reset_index(drop=True), ) - df = pd.concat(to_concat, axis=1) - return df + return pd.concat(to_concat, axis=1) def _process_factors( @@ -827,6 +826,7 @@ def _process_periods( def _get_layout_kwargs( corr: pd.DataFrame, layout_kwargs: dict[str, Any] | None, + *, annotate: bool, annotation_fontsize: int, annotation_text_color: str, @@ -863,10 +863,10 @@ def _get_layout_kwargs( default_layout_kwargs.update( _get_annotations( corr, - annotate, - annotation_fontsize, - annotation_text_color, - annotation_text_angle, + annotate=annotate, + annotation_fontsize=annotation_fontsize, + annotation_text_color=annotation_text_color, + annotation_text_angle=annotation_text_angle, ), ) default_layout_kwargs.update( @@ -904,6 +904,7 @@ def _get_axes_ticks_kwargs( def _get_annotations( df: pd.DataFrame, + *, annotate: bool, annotation_fontsize: int, annotation_text_color: str, @@ -938,6 +939,7 @@ def _get_heatmap_kwargs( corr: pd.DataFrame, heatmap_kwargs: dict[str, Any] | None, colorscale: str, + *, show_color_bar: bool, zmax: float | None, zmin: float | None, diff --git a/src/skillmodels/decorators.py b/src/skillmodels/decorators.py index 6354e484..e1c834a7 100644 --- a/src/skillmodels/decorators.py +++ b/src/skillmodels/decorators.py @@ -1,3 +1,5 @@ +"""Decorators for parameter extraction and registration in transition functions.""" + import functools from collections.abc import Callable # noqa: TC003 from typing import Any @@ -71,8 +73,7 @@ def jax_array_output(func: Callable) -> Callable: @functools.wraps(func) def wrapper_jax_array_output(*args: Any, **kwargs: Any) -> Array: raw = func(*args, **kwargs) - out = jnp.array(raw) - return out + return jnp.array(raw) return wrapper_jax_array_output @@ -82,6 +83,8 @@ def register_params( *, params: list[str] | None = None, ) -> Callable: + """Register parameter names for a transition function.""" + def decorator_register_params(func: Callable) -> Callable: func.__registered_params__ = params # ty: ignore[unresolved-attribute] return func diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index 257fb680..09e83927 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -1,3 +1,5 @@ +"""Functions to compute and process filtered latent states.""" + from typing import TYPE_CHECKING, Any import jax.numpy as jnp @@ -18,6 +20,7 @@ def get_filtered_states( data: pd.DataFrame, params: pd.DataFrame, ) -> dict[str, dict[str, Any]]: + """Compute filtered latent states given data and estimated parameters.""" max_inputs = get_maximization_inputs(model_dict=model_dict, data=data) params = params.loc[max_inputs["params_template"].index] debug_loglike = max_inputs["debug_loglike"] @@ -38,7 +41,7 @@ def get_filtered_states( factors=model.labels.latent_factors, ) - out = { + return { "anchored_states": { "states": anchored_states_df, "state_ranges": anchored_ranges, @@ -49,13 +52,12 @@ def get_filtered_states( }, } - return out - def anchor_states_df( states_df: pd.DataFrame, model_dict: dict, params: pd.DataFrame, + *, use_aug_period: bool, ) -> pd.DataFrame: """Anchor states in a DataFrame. @@ -121,6 +123,4 @@ def anchor_states_df( for pos, factor in enumerate(model.labels.latent_factors): out[factor] = constants_arr[:, pos] + states_df[factor] * scaling_arr[:, pos] - out = out[states_df.columns] - - return out + return out[states_df.columns] diff --git a/src/skillmodels/kalman_filters.py b/src/skillmodels/kalman_filters.py index 725787aa..d1b2b97a 100644 --- a/src/skillmodels/kalman_filters.py +++ b/src/skillmodels/kalman_filters.py @@ -1,3 +1,5 @@ +"""Kalman filter operations for state estimation using the square-root form.""" + from collections.abc import Callable # noqa: TC003 import jax @@ -279,8 +281,7 @@ def _calculate_sigma_points( n_observed, ) - sigma_points = jnp.concatenate([sigma_points, observed_part], axis=-1) - return sigma_points + return jnp.concatenate([sigma_points, observed_part], axis=-1) def transform_sigma_points( @@ -323,6 +324,4 @@ def transform_sigma_points( ) / anchoring_scaling_factors[1][:n_observed] out_shape = (n_obs, n_mixtures, n_sigma, -1) - out = transformed_unanchored.reshape(out_shape) - - return out + return transformed_unanchored.reshape(out_shape) diff --git a/src/skillmodels/kalman_filters_debug.py b/src/skillmodels/kalman_filters_debug.py index c38a8ab6..5d1ec44f 100644 --- a/src/skillmodels/kalman_filters_debug.py +++ b/src/skillmodels/kalman_filters_debug.py @@ -1,3 +1,5 @@ +"""Debug versions of Kalman filter operations that return intermediate results.""" + from typing import Any import jax diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index 4c7d7c8d..720b04ac 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -1,3 +1,5 @@ +"""Log-likelihood function for latent factor models.""" + import functools from collections.abc import Callable # noqa: TC003 from typing import Any @@ -267,21 +269,19 @@ def _scan_body( def _one_arg_measurement_update( kwargs: dict[str, Array], ) -> tuple[Array, Array, Array, Array]: - out = kalman_update(**kwargs) - return out + return kalman_update(**kwargs) def _one_arg_anchoring_update( kwargs: dict[str, Array], ) -> tuple[Array, Array, Array, Array]: _, _, new_log_mixture_weights, new_loglikes = kalman_update(**kwargs) - out = ( + return ( kwargs["states"], kwargs["upper_chols"], new_log_mixture_weights, new_loglikes, ) - return out def _one_arg_no_predict( diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index 1bb448cb..5fc9db9f 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -1,3 +1,5 @@ +"""Debug version of log-likelihood function that returns intermediate results.""" + import functools from collections.abc import Callable # noqa: TC003 from typing import Any @@ -220,22 +222,20 @@ def _scan_body( def _one_arg_measurement_update( kwargs: dict[str, Any], ) -> tuple[Array, Array, Array, Array, dict[str, Any]]: - out = kalman_update(**kwargs) - return out + return kalman_update(**kwargs) def _one_arg_anchoring_update( kwargs: dict[str, Any], ) -> tuple[Array, Array, Array, Array, dict[str, Any]]: _, _, new_log_mixture_weights, new_loglikes, debug_info = kalman_update(**kwargs) - out = ( + return ( kwargs["states"], kwargs["upper_chols"], new_log_mixture_weights, new_loglikes, debug_info, ) - return out def _one_arg_no_predict( diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 5495cbf5..a07b422e 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -1,3 +1,5 @@ +"""Functions to create inputs for optimization of the log-likelihood.""" + import functools from collections.abc import Callable # noqa: TC003 from typing import TYPE_CHECKING, Any @@ -185,10 +187,9 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: params_template=params_template, constraints_dicts=_constraints_dicts, ) - assert params_template.index.equals(p_index), ( - "params_template index is not equal to p_index" - ) - out = { + if not params_template.index.equals(p_index): + raise ValueError("params_template index is not equal to p_index") + return { "loglike": loglike, "loglikeobs": loglikeobs, "debug_loglike": debug_loglike, @@ -197,8 +198,6 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: "params_template": params_template, } - return out - def _partial_some_log_likelihood( fun: Callable, @@ -228,7 +227,8 @@ def _partial_some_log_likelihood( else model.labels.aug_periods[-1] ) iteration_to_period = _aug_periods.replace(last_aug_period, -1).to_numpy() - assert max(iteration_to_period) == last_aug_period - 1 + if max(iteration_to_period) != last_aug_period - 1: + raise ValueError("Unexpected iteration_to_period configuration") return functools.partial( fun, @@ -276,5 +276,4 @@ def _get_jnp_params_vec(params: pd.DataFrame, target_index: pd.MultiIndex) -> Ar msg += f"Your params have missing entries: {missing_entries}. " raise ValueError(msg) - vec = jnp.array(params.reindex(target_index)["value"].to_numpy()) - return vec + return jnp.array(params.reindex(target_index)["value"].to_numpy()) diff --git a/src/skillmodels/params_index.py b/src/skillmodels/params_index.py index 8328387f..e1d12b86 100644 --- a/src/skillmodels/params_index.py +++ b/src/skillmodels/params_index.py @@ -1,3 +1,5 @@ +"""Functions to construct the parameter index for model estimation.""" + from typing import TYPE_CHECKING import pandas as pd @@ -64,11 +66,10 @@ def get_params_index( has_endogenous_factors=endogenous_factors_info.has_endogenous_factors, ) - index = pd.MultiIndex.from_tuples( + return pd.MultiIndex.from_tuples( ind_tups, names=["category", "aug_period", "name1", "name2"], ) - return index def get_control_params_index_tuples( @@ -130,6 +131,7 @@ def get_meas_sds_index_tuples( def get_shock_sds_index_tuples( aug_periods: tuple[int, ...], factors: tuple[str, ...], + *, has_endogenous_factors: bool, ) -> list[tuple[str, int, str, str]]: """Index tuples for shock_sd. @@ -211,6 +213,7 @@ def get_initial_cholcovs_index_tuples( def get_transition_index_tuples( transition_info: TransitionInfo, aug_periods: tuple[int, ...], + *, has_endogenous_factors: bool, ) -> list[tuple[str, int, str, str]]: """Index tuples for transition equation coefficients. diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index ad6fcc4f..932abe01 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -1,3 +1,5 @@ +"""Functions to parse parameter vectors into structured dictionaries.""" + import warnings from typing import TYPE_CHECKING, Any @@ -15,6 +17,7 @@ def create_parsing_info( update_info: pd.DataFrame, labels: Labels, anchoring: Anchoring, + *, has_endogenous_factors: bool, ) -> dict[str, Any]: """Create a dictionary with information how the parameter vector has to be parsed. @@ -204,8 +207,8 @@ def _get_initial_upper_chols( upper_chols = jnp.zeros((n_obs, n_mixtures, n_states, n_states)) for i in range(n_mixtures): filler = jnp.zeros((n_states, n_states)) - filler = filler.at[jnp.tril_indices(n_states)].set(chol_params[i]) - upper_chols = upper_chols.at[:, i].set(filler.T) + filler = filler.at[jnp.tril_indices(n_states)].set(chol_params[i]) # noqa: PD008 + upper_chols = upper_chols.at[:, i].set(filler.T) # noqa: PD008 return upper_chols @@ -235,9 +238,8 @@ def _get_loadings( """Create the array of factor loadings.""" info = info["loadings"] free = params[info["slice"]] - extended = jnp.zeros(info["size"]).at[info["flat_indices"]].set(free) - out = extended.reshape(info["shape"]) - return out + extended = jnp.zeros(info["size"]).at[info["flat_indices"]].set(free) # noqa: PD008 + return extended.reshape(info["shape"]) def _get_meas_sds( @@ -293,7 +295,7 @@ def _get_anchoring_scaling_factors( dimensions.n_aug_periods, -1, ) - scaling_factors = scaling_factors.at[:, info["is_anchored_factor"]].set( + scaling_factors = scaling_factors.at[:, info["is_anchored_factor"]].set( # noqa: PD008 free_anchoring_loadings, ) @@ -301,9 +303,7 @@ def _get_anchoring_scaling_factors( (dimensions.n_aug_periods, dimensions.n_observed_factors), ) - scaling_factors = jnp.hstack([scaling_factors, scaling_for_observed]) - - return scaling_factors + return jnp.hstack([scaling_factors, scaling_for_observed]) def _get_anchoring_constants( @@ -322,12 +322,10 @@ def _get_anchoring_constants( dimensions.n_aug_periods, -1, ) - constants = constants.at[:, info["is_anchored_factor"]].set(values) + constants = constants.at[:, info["is_anchored_factor"]].set(values) # noqa: PD008 constants_for_observed = jnp.zeros( (dimensions.n_aug_periods, dimensions.n_observed_factors), ) - constants = jnp.hstack([constants, constants_for_observed]) - - return constants + return jnp.hstack([constants, constants_for_observed]) diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index ec7d9c4a..289b4354 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -1,3 +1,5 @@ +"""Functions to process and prepare data for model estimation.""" + import warnings from typing import TYPE_CHECKING, Any, Literal @@ -12,6 +14,7 @@ def process_data( df: pd.DataFrame, + *, has_endogenous_factors: bool, labels: Labels, update_info: pd.DataFrame, @@ -96,9 +99,7 @@ def pre_process_data( new_index = pd.MultiIndex.from_product([ids, periods], names=["id", "period"]) # set new index - df = df.reindex(new_index) - - return df + return df.reindex(new_index) def _get_period_data_for_endogenous_factors( @@ -143,8 +144,10 @@ def _augment_data_for_endogenous_factors( # Make sure datset is balanced n_ids = df["id"].nunique() n_periods = df["period"].nunique() - assert n_ids * n_periods == df.shape[0] - assert set(df["period"]) == set(labels.aug_periods_to_periods.values()) + if n_ids * n_periods != df.shape[0]: + raise ValueError("Dataset is not balanced: n_ids * n_periods != n_rows") + if set(df["period"]) != set(labels.aug_periods_to_periods.values()): + raise ValueError("Periods in data don't match expected periods") out = pd.concat( [ @@ -228,7 +231,7 @@ def _handle_controls_with_missings( old_names = df.loc[problematic_index][["__old_id__", "__old_period__"]] msg = "Set measurements to NaN because there are NaNs in the controls for:\n{}" msg = msg.format(list(map(tuple, old_names.to_numpy().tolist()))) - warnings.warn(msg) + warnings.warn(msg, stacklevel=2) df.loc[problematic_index] = np.nan return df diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index bea50c1c..6a572155 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -1,3 +1,5 @@ +"""Functions to process debug output from likelihood function into DataFrames.""" + from typing import TYPE_CHECKING, Any import numpy as np @@ -119,9 +121,7 @@ def _create_post_update_states( df["measurement"] = meas to_concat.append(df) - post_states = pd.concat(to_concat) - - return post_states + return pd.concat(to_concat) def _convert_state_array_to_df( @@ -168,15 +168,14 @@ def _create_filtered_states( df["id"] = np.arange(len(df)) to_concat.append(df) - filtered_states = pd.concat(to_concat) - - return filtered_states + return pd.concat(to_concat) def create_state_ranges( filtered_states: pd.DataFrame, factors: tuple[str, ...] | list[str], ) -> dict[str, pd.DataFrame]: + """Compute minimum and maximum state values for each factor by period.""" ranges: dict[str, pd.DataFrame] = {} # Group by whichever period column is present period_col = "aug_period" if "aug_period" in filtered_states.columns else "period" diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 7f5af946..d5527cf6 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -1,3 +1,5 @@ +"""Functions to process model specifications from user-friendly to internal form.""" + from copy import deepcopy from functools import partial from typing import TYPE_CHECKING, Any, Literal @@ -165,7 +167,7 @@ def get_has_endogenous_factors(factors: dict[str, Any]) -> bool: return endogenous_factors["is_endogenous"].any() # ty: ignore[invalid-return-type] -def get_dimensions(model_dict: dict, has_endogenous_factors: bool) -> Dimensions: +def get_dimensions(model_dict: dict, *, has_endogenous_factors: bool) -> Dimensions: """Extract the dimensions of the model. Args: @@ -191,7 +193,7 @@ def get_dimensions(model_dict: dict, has_endogenous_factors: bool) -> Dimensions def _get_aug_periods_to_periods( - n_aug_periods: int, has_endogenous_factors: bool + n_aug_periods: int, *, has_endogenous_factors: bool ) -> dict[int, int]: """Return mapper of (potentially) augmented periods to user-provided periods.""" aug_periods = list(range(n_aug_periods)) @@ -210,7 +212,7 @@ def _aug_periods_from_period( def _get_labels( - model_dict: dict, has_endogenous_factors: bool, dimensions: Dimensions + model_dict: dict, *, has_endogenous_factors: bool, dimensions: Dimensions ) -> Labels: """Extract labels of the model quantities. @@ -466,6 +468,7 @@ def _extract_factor(states: Array, pos: int) -> Array: def _get_endogenous_factors_info( + *, has_endogenous_factors: bool, model_dict: dict[str, Any], labels: Labels, @@ -501,6 +504,7 @@ def _get_endogenous_factors_info( def _get_aug_periods_to_aug_period_meas_types( aug_periods: tuple[int, ...] | KeysView[int], + *, has_endogenous_factors: bool, ) -> dict[int, Literal["states", "endogenous_factors"]]: if has_endogenous_factors: diff --git a/src/skillmodels/qr.py b/src/skillmodels/qr.py index 36a602fc..834ff533 100644 --- a/src/skillmodels/qr.py +++ b/src/skillmodels/qr.py @@ -1,3 +1,5 @@ +"""Custom QR decomposition implementation optimized for GPU.""" + import jax import jax.numpy as jnp from jax import Array diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 1ab93575..4ae38332 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -86,6 +86,7 @@ def simulate_dataset( warnings.warn( f"The number of observations inferred from data ({data_n_obs}) and " f"n_obs ({n_obs}) are different. n_obs is ignored.", + stacklevel=2, ) n_obs = data_n_obs @@ -112,7 +113,8 @@ def simulate_dataset( has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) - assert n_obs is not None # type narrowing: n_obs is set by either data or argument + if n_obs is None: + raise ValueError("n_obs must be set by either data or argument") states, covs, log_weights, pardict = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, @@ -153,7 +155,7 @@ def simulate_dataset( use_aug_period=False, ) - out = { + return { "unanchored_states": { "states": latent_data, "state_ranges": create_state_ranges( @@ -178,8 +180,6 @@ def simulate_dataset( "aug_measurements": aug_measurements, } - return out - def _simulate_dataset( latent_states: Array, @@ -189,6 +189,7 @@ def _simulate_dataset( labels: Labels, dimensions: Dimensions, n_obs: int, + *, has_endogenous_factors: bool, update_info: pd.DataFrame, control_data: Array, @@ -473,5 +474,4 @@ def measurements_from_states( epsilon = multivariate_normal([0] * n_meas, np.diag(sds**2), n_obs) states_part = np.dot(states, loadings.T) control_part = np.dot(controls, control_params.T) - meas = states_part + control_part + epsilon - return meas + return states_part + control_part + epsilon diff --git a/src/skillmodels/transition_functions.py b/src/skillmodels/transition_functions.py index 89a5b504..e8d08fcf 100644 --- a/src/skillmodels/transition_functions.py +++ b/src/skillmodels/transition_functions.py @@ -90,13 +90,12 @@ def translog(states: Array, params: Array) -> Array: def params_translog(factors: tuple[str, ...]) -> list[str]: """Index tuples for the translog production function.""" - names = ( + return ( list(factors) + [f"{factor} ** 2" for factor in factors] + [f"{a} * {b}" for a, b in combinations(factors, 2)] + ["constant"] ) - return names def identity_constraints_translog( @@ -131,8 +130,7 @@ def log_ces(states: Array, params: Array) -> Array: # the log step for gammas underflows for gamma = 0, but this is handled correctly # by logsumexp and does not raise a warning. unscaled = jax.scipy.special.logsumexp(jnp.log(gammas) + states * phi) - result = unscaled * scaling_factor - return result + return unscaled * scaling_factor def params_log_ces(factors: tuple[str, ...]) -> list[str]: @@ -187,6 +185,7 @@ def robust_translog(states: Array, params: Array) -> Array: def params_robust_translog(factors: tuple[str, ...]) -> list[str]: + """Return parameter names for robust translog transition function.""" return params_translog(factors) @@ -214,8 +213,7 @@ def linear_and_squares(states: Array, params: Array) -> Array: def params_linear_and_squares(factors: tuple[str, ...]) -> list[str]: """Index tuples for the linear_and_squares production function.""" - names = list(factors) + [f"{factor} ** 2" for factor in factors] + ["constant"] - return names + return list(factors) + [f"{factor} ** 2" for factor in factors] + ["constant"] def identity_constraints_linear_and_squares( @@ -251,8 +249,7 @@ def log_ces_general(states: Array, params: Array) -> Array: # the log step for gammas underflows for gamma = 0, but this is handled correctly # by logsumexp and does not raise a warning. unscaled = jax.scipy.special.logsumexp(jnp.log(gammas) + states * sigmas) - result = unscaled * tfp - return result + return unscaled * tfp def params_log_ces_general(factors: tuple[str, ...]) -> list[str]: diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index 4db48a90..411b1c1a 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -1,3 +1,5 @@ +"""Utility functions for manipulating model specifications and parameters.""" + import warnings from copy import deepcopy from typing import Any @@ -36,8 +38,7 @@ def extract_factors( factors = [factors] to_remove = list(set(model_dict["factors"]).difference(factors)) - out = remove_factors(to_remove, model_dict, params) - return out + return remove_factors(to_remove, model_dict, params) def update_parameter_values( @@ -116,11 +117,17 @@ def remove_factors( # Remove periods if necessary, but only if no endogenous factors are present. # (else we would mess up the mapping between raw periods model periods) if not has_endogenous_factors: - new_n_periods = get_dimensions(out, has_endogenous_factors).n_periods + new_n_periods = get_dimensions( + out, has_endogenous_factors=has_endogenous_factors + ).n_periods out = reduce_n_periods(out, new_n_periods) if params is not None: - out_params = _reduce_params(params, out, has_endogenous_factors) # ty: ignore[invalid-argument-type] + out_params = _reduce_params( + params, + out, # ty: ignore[invalid-argument-type] + has_endogenous_factors=has_endogenous_factors, + ) out = (out, out_params) return out # ty: ignore[invalid-return-type] @@ -337,6 +344,7 @@ def _remove_from_dict( def _reduce_params( params: pd.DataFrame, model_dict: dict[str, Any], + *, has_endogenous_factors: bool, ) -> pd.DataFrame: """Reduce a parameter DataFrame from a larger model to a reduced model. @@ -389,14 +397,13 @@ def _get_params_index_from_model_dict( model_dict: dict[str, Any], ) -> pd.MultiIndex: mod = process_model(model_dict) - index = get_params_index( + return get_params_index( update_info=mod.update_info, labels=mod.labels, dimensions=mod.dimensions, transition_info=mod.transition_info, endogenous_factors_info=mod.endogenous_factors_info, ) - return index def _remove_measurements_from_normalizations( @@ -408,6 +415,7 @@ def _remove_measurements_from_normalizations( warnings.warn( "Your removed a normalized measurement from a model. Make sure there are " "enough normalizations left to ensure identification.", + stacklevel=2, ) return reduced diff --git a/src/skillmodels/utils_plotting.py b/src/skillmodels/utils_plotting.py index ab7e6ea2..bafa90fd 100644 --- a/src/skillmodels/utils_plotting.py +++ b/src/skillmodels/utils_plotting.py @@ -1,3 +1,5 @@ +"""Utility functions for configuring plot layouts and subplots.""" + from typing import Any import numpy as np @@ -7,6 +9,7 @@ def get_layout_kwargs( layout_kwargs: dict[str, Any] | None = None, legend_kwargs: dict[str, Any] | None = None, title_kwargs: dict[str, Any] | None = None, + *, showlegend: bool = False, columns: list[str] | None = None, rows: list[str] | None = None, @@ -39,6 +42,7 @@ def get_layout_kwargs( def get_make_subplot_kwargs( + *, sharex: bool, sharey: bool, column_order: list[str], diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index 6ca1fc87..b0f0f3a6 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -1,3 +1,5 @@ +"""Functions to visualize distributions of latent factors.""" + import warnings from copy import deepcopy from typing import TYPE_CHECKING, Any @@ -29,6 +31,7 @@ def combine_distribution_plots( factor_order: list[str] | None = None, factor_mapping: dict[str, str] | None = None, make_subplot_kwargs: dict[str, Any] | None = None, + *, sharex: bool = False, sharey: bool = False, line_width: float = 1.5, @@ -165,6 +168,7 @@ def univariate_densities( params: pd.DataFrame, period: int, factors: list[str] | None = None, + *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, show_curve: bool = True, @@ -238,14 +242,14 @@ def univariate_densities( scenarios = df["scenario"].unique() plots_dict = {} distplot_kwargs = _process_distplot_kwargs( - show_curve, - show_hist, - show_rug, - curve_type, - bin_size, - scenarios, - colorscale, - distplot_kwargs, + show_curve=show_curve, + show_hist=show_hist, + show_rug=show_rug, + curve_type=curve_type, + bin_size=bin_size, + scenarios=scenarios, + colorscale=colorscale, + distplot_kwargs=distplot_kwargs, ) plots_dict = {} layout_kwargs = get_layout_kwargs(layout_kwargs) @@ -257,6 +261,7 @@ def univariate_densities( warnings.warn( f"""Plotting univariate density failed for {fac} in period {period} with error:\n\n{e}""", + stacklevel=2, ) fig = go.Figure() fig.update_layout(showlegend=False) @@ -273,6 +278,7 @@ def bivariate_density_contours( params: pd.DataFrame, period: int, factors: list[str] | None = None, + *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, n_points: int = 50, @@ -352,10 +358,10 @@ def bivariate_density_contours( plots_dict = {} contour_kwargs = _process_contour_kwargs( contour_kwargs, - contours_showlabels, - contours_coloring, - contours_colorscale, - showcolorbar, + contours_showlabels=contours_showlabels, + contours_coloring=contours_coloring, + contours_colorscale=contours_colorscale, + contours_showscale=showcolorbar, ) layout_kwargs = _process_layout_kwargs(layout_kwargs) pairs = [] @@ -387,6 +393,7 @@ def bivariate_density_contours( Contour plot failed for {pair} in period {period} with error:\n\n{e} """, + stacklevel=2, ) fig.update_xaxes(title={"text": pair[0]}) fig.update_yaxes(title={"text": pair[1]}) @@ -402,6 +409,7 @@ def bivariate_density_surfaces( params: pd.DataFrame, period: int, factors: list[str] | None = None, + *, observed_factors: bool = False, states: pd.DataFrame | None = None, n_points: int = 50, @@ -479,9 +487,9 @@ def bivariate_density_surfaces( plots_dict = {} layout_kwargs = _process_layout_kwargs_3d( layout_kwargs, - showgrids, - showaxlines, - showlabels, + showgrids=showgrids, + showaxlines=showaxlines, + showlabels=showlabels, ) pairs = [] for fac1 in factors: @@ -506,6 +514,7 @@ def bivariate_density_surfaces( warnings.warn( f"""Plotting bivariate density surfaces for {pair} in period {period} with error:\n\n{e}""", + stacklevel=2, ) fig = go.Figure() fig.update_layout( @@ -568,6 +577,7 @@ def _process_data( def _process_distplot_kwargs( + *, show_curve: bool, show_hist: bool, show_rug: bool, @@ -615,6 +625,7 @@ def _calculate_kde_for_3d( def _process_contour_kwargs( contour_kwargs: dict[str, Any] | None, + *, contours_showlabels: bool, contours_coloring: str | None, contours_colorscale: str, @@ -651,6 +662,7 @@ def _process_layout_kwargs( def _process_layout_kwargs_3d( layout_kwargs: dict[str, Any] | None, + *, showgrids: bool, showaxlines: bool, showlabels: bool, @@ -703,6 +715,7 @@ def _get_ordered_factors( def _get_factors( factors: list[str] | None, + *, observed_factors: bool, model: ProcessedModel, ) -> list[str]: diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 7400ccb4..060e2e90 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -1,3 +1,5 @@ +"""Functions to visualize transition equations and production functions.""" + import itertools from collections.abc import Callable # noqa: TC003 from copy import deepcopy @@ -29,6 +31,7 @@ def combine_transition_plots( row_order: list[str] | str | None = None, factor_mapping: dict[str, str] | None = None, make_subplot_kwargs: dict[str, Any] | None = None, + *, sharex: bool = False, sharey: bool = True, showlegend: bool = True, @@ -80,11 +83,11 @@ def combine_transition_plots( column_order, row_order = _process_orders(column_order, row_order, plots_dict) make_subplot_kwargs = get_make_subplot_kwargs( - sharex, - sharey, - column_order, - row_order, - make_subplot_kwargs, + sharex=sharex, + sharey=sharey, + column_order=column_order, + row_order=row_order, + make_subplot_kwargs=make_subplot_kwargs, ) factor_mapping = _process_factor_mapping_trans( factor_mapping, @@ -125,12 +128,12 @@ def combine_transition_plots( ) layout_kwargs = get_layout_kwargs( - layout_kwargs, - legend_kwargs, - title_kwargs, - showlegend, - column_order, - row_order, + layout_kwargs=layout_kwargs, + legend_kwargs=legend_kwargs, + title_kwargs=title_kwargs, + showlegend=showlegend, + columns=column_order, + rows=row_order, ) fig.update_layout(**layout_kwargs) return fig @@ -151,6 +154,7 @@ def get_transition_plots( n_draws: int = 50, colorscale: str = "Magenta_r", layout_kwargs: dict[str, Any] | None = None, + *, include_correction_factors: bool = False, ) -> dict[tuple[str, str], go.Figure]: """Get dictionary with individual plots of transition equations for each factor. @@ -208,7 +212,7 @@ def get_transition_plots( states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ "anchored_states" ]["states"] - plots_dict = _get_dictionary_with_plots( + return _get_dictionary_with_plots( model=model, data=data, params=params, @@ -223,7 +227,6 @@ def get_transition_plots( colorscale=colorscale, layout_kwargs=layout_kwargs, ) - return plots_dict def _get_dictionary_with_plots( @@ -240,6 +243,7 @@ def _get_dictionary_with_plots( n_draws: int, colorscale: str, layout_kwargs: dict[str, Any] | None, + *, showlegend: bool = True, ) -> dict[tuple[str, str], go.Figure]: """Get plots of transition functions for each input and output combination. @@ -409,8 +413,7 @@ def _set_index_params( endogenous_factors_info=model.endogenous_factors_info, ) - params = params.reindex(params_index) - return params + return params.reindex(params_index) def _get_states_data( @@ -503,8 +506,7 @@ def _prepare_data_for_one_plot_fixed_quantile_2d( quantile_data["quantile"] = quantile to_concat.append(quantile_data) - out = pd.concat(to_concat).reset_index() - return out + return pd.concat(to_concat).reset_index() def _process_quantiles_of_other_factors( @@ -551,8 +553,7 @@ def _prepare_data_for_one_plot_average_2d( draw_data[f"output_{output_factor}"] = np.array(output_arr) to_concat.append(draw_data) - out = pd.concat(to_concat).groupby(f"input_{input_factor}").mean().reset_index() - return out + return pd.concat(to_concat).groupby(f"input_{input_factor}").mean().reset_index() def _process_factor_mapping_trans( diff --git a/tests/test_clipping.py b/tests/test_clipping.py index b6bae71f..1afd17e3 100644 --- a/tests/test_clipping.py +++ b/tests/test_clipping.py @@ -1,10 +1,13 @@ +"""Tests for soft clipping functions.""" + import jax.numpy as jnp import numpy as np from skillmodels.clipping import soft_clipping -def test_one_sided_soft_maximum(): +def test_one_sided_soft_maximum() -> None: + """Test soft maximum clipping with lower bound.""" arr = jnp.array([-10.0, -5, -1, 1, 5, 10]) lower_bound = -8 lower_hardness = 3 @@ -21,7 +24,8 @@ def test_one_sided_soft_maximum(): np.testing.assert_allclose(res[1:], arr[1:], rtol=1e-05) -def test_one_sided_soft_minimum(): +def test_one_sided_soft_minimum() -> None: + """Test soft minimum clipping with upper bound.""" arr = jnp.array([-10.0, -5, -1, 1, 5, 10]) upper_bound = 8 upper_hardness = 3 diff --git a/tests/test_constraints.py b/tests/test_constraints.py index ddecd1ee..40ed681d 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -21,7 +21,7 @@ from skillmodels.types import Anchoring, Labels -def test_add_bounds(): +def test_add_bounds() -> None: ind_tups = [("shock_sds", i) for i in range(5)] + [ ("meas_sds", 4), ("bla", "blubb"), @@ -45,7 +45,7 @@ def test_add_bounds(): # ====================================================================================== -def test_normalization_constraints(): +def test_normalization_constraints() -> None: norm = { "fac1": { "loadings": [{"m1": 2, "m2": 1.5}, {"m1": 3}], @@ -94,7 +94,7 @@ def test_normalization_constraints(): # ====================================================================================== -def test_mixture_weight_constraints_mixture(): +def test_mixture_weight_constraints_mixture() -> None: calculated = _get_mixture_weights_constraints(n_mixtures=2) for c in calculated: del c["description"] @@ -102,7 +102,7 @@ def test_mixture_weight_constraints_mixture(): assert_list_equal_except_for_order(calculated, expected) -def test_mixture_weight_constraints_normal(): +def test_mixture_weight_constraints_normal() -> None: calculated = _get_mixture_weights_constraints(n_mixtures=1) for c in calculated: del c["description"] @@ -115,7 +115,7 @@ def test_mixture_weight_constraints_normal(): # ====================================================================================== -def test_stage_constraints(): +def test_stage_constraints() -> None: stages = (0,) stagemap = (0, 0, 0) @@ -136,7 +136,7 @@ def test_stage_constraints(): assert_list_equal_except_for_order(calculated, expected) -def test_stage_constraints_with_endogenous_factors(): +def test_stage_constraints_with_endogenous_factors() -> None: stages = (0, 1, 2, 3) stagemap = (0, 1, 0, 1, 2, 3) expected = [ @@ -169,7 +169,7 @@ def test_stage_constraints_with_endogenous_factors(): # ====================================================================================== -def test_constant_factor_constraints(): +def test_constant_factor_constraints() -> None: labels = Labels( latent_factors=("fac1", "fac2"), observed_factors=(), @@ -201,7 +201,7 @@ def test_constant_factor_constraints(): # ====================================================================================== -def test_initial_mean_constraints(): +def test_initial_mean_constraints() -> None: nmixtures = 3 factors = ("fac1", "fac2", "fac3") ind_tups = [ @@ -223,7 +223,7 @@ def test_initial_mean_constraints(): # ====================================================================================== -def test_trans_coeff_constraints(): +def test_trans_coeff_constraints() -> None: labels = Labels( latent_factors=("fac1", "fac2", "fac3"), observed_factors=(), @@ -297,12 +297,14 @@ def base_anchoring_info(): ) -def test_anchoring_constraints_no_constraint_needed(anch_uinfo, base_anchoring_info): +def test_anchoring_constraints_no_constraint_needed( + anch_uinfo, base_anchoring_info +) -> None: calculated = _get_anchoring_constraints(anch_uinfo, (), base_anchoring_info, (0, 1)) assert calculated == [] -def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): +def test_anchoring_constraints_for_constants(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), @@ -331,7 +333,7 @@ def test_anchoring_constraints_for_constants(anch_uinfo, base_anchoring_info): assert calculated == expected -def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): +def test_anchoring_constraints_for_controls(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), @@ -371,7 +373,7 @@ def test_anchoring_constraints_for_controls(anch_uinfo, base_anchoring_info): assert calculated == expected -def test_anchoring_constraints_for_loadings(anch_uinfo, base_anchoring_info): +def test_anchoring_constraints_for_loadings(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), @@ -402,7 +404,7 @@ def test_anchoring_constraints_for_loadings(anch_uinfo, base_anchoring_info): assert calculated == expected -def assert_list_equal_except_for_order(list1, list2): +def assert_list_equal_except_for_order(list1, list2) -> None: for item in list1: assert item in list2, f"{item} is in list1 but not in list2" for item in list2: @@ -411,12 +413,12 @@ def assert_list_equal_except_for_order(list1, list2): @pytest.fixture def simplest_augmented_model(): - with open(TEST_DATA_DIR / "simplest_augmented_model.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) + with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) return process_model(model_dict) -def test_get_constraints_for_augmented_periods(simplest_augmented_model): +def test_get_constraints_for_augmented_periods(simplest_augmented_model) -> None: calculated = _get_constraints_for_augmented_periods( labels=simplest_augmented_model.labels, endogenous_factors_info=simplest_augmented_model.endogenous_factors_info, diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 6ecfe9f0..50830522 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -16,7 +16,7 @@ from skillmodels.types import Labels -def test_get_measurement_data_with_single_period(): +def test_get_measurement_data_with_single_period() -> None: period = 1 factors = ["f3", "f1"] update_info = pd.DataFrame( @@ -64,7 +64,7 @@ def test_get_measurement_data_with_single_period(): afe(result, expected) -def test_get_factor_scores_data_with_single_period(): +def test_get_factor_scores_data_with_single_period() -> None: period = 1 factors = ["f1", "f2"] update_info = pd.DataFrame( @@ -117,7 +117,7 @@ def test_get_factor_scores_data_with_single_period(): afe(expected, result, check_dtype=False) -def test_get_measurement_data_with_multiple_periods(): +def test_get_measurement_data_with_multiple_periods() -> None: period = [1, 2] factors = ["f3", "f1"] update_info = pd.DataFrame( @@ -176,7 +176,7 @@ def test_get_measurement_data_with_multiple_periods(): afe(result, expected) -def test_get_factor_scores_data_with_multiple_period(): +def test_get_factor_scores_data_with_multiple_period() -> None: periods = [0, 1] factors = ["f1", "f2"] update_info = pd.DataFrame( @@ -245,7 +245,7 @@ def test_get_factor_scores_data_with_multiple_period(): afe(expected, result) -def test_process_factors(): +def test_process_factors() -> None: model = SimpleNamespace( labels=Labels( latent_factors=tuple("abcd"), @@ -273,7 +273,7 @@ def test_process_factors(): assert [factors[-1] == _process_factors(model, factors)[1]] # ty: ignore[invalid-argument-type] -def test_get_mask_lower_triangle_only(): +def test_get_mask_lower_triangle_only() -> None: corr = pd.DataFrame(np.ones((4, 4))) show_upper = False show_diag = False @@ -285,11 +285,11 @@ def test_get_mask_lower_triangle_only(): [True] * 3 + [False], ], ) - result = _get_mask(corr, show_upper, show_diag) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) -def test_get_mask_lower_triangle_and_diag(): +def test_get_mask_lower_triangle_and_diag() -> None: corr = pd.DataFrame(np.ones((4, 4))) show_upper = False show_diag = True @@ -301,11 +301,11 @@ def test_get_mask_lower_triangle_and_diag(): [True] * 4, ], ) - result = _get_mask(corr, show_upper, show_diag) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) -def test_get_mask_lower_and_upper_triangle_no_diag(): +def test_get_mask_lower_and_upper_triangle_no_diag() -> None: corr = pd.DataFrame(np.ones((4, 4))) show_upper = True show_diag = False @@ -317,14 +317,14 @@ def test_get_mask_lower_and_upper_triangle_no_diag(): [True] * 3 + [False], ], ) - result = _get_mask(corr, show_upper, show_diag) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) -def test_get_mask_full_square_matrix(): +def test_get_mask_full_square_matrix() -> None: corr = pd.DataFrame(np.ones((4, 4))) show_upper = True show_diag = True expected = corr.to_numpy().astype(bool) - result = _get_mask(corr, show_upper, show_diag) + result = _get_mask(corr, show_upper_triangle=show_upper, show_diagonal=show_diag) np.testing.assert_array_equal(result, expected) diff --git a/tests/test_decorators.py b/tests/test_decorators.py index a4f939cf..fcfcc762 100644 --- a/tests/test_decorators.py +++ b/tests/test_decorators.py @@ -3,7 +3,7 @@ from skillmodels.decorators import extract_params, jax_array_output, register_params -def test_extract_params_decorator_only_key(): +def test_extract_params_decorator_only_key() -> None: @extract_params(key="a") def f(x, params): return x * params @@ -11,7 +11,7 @@ def f(x, params): assert f(x=3, params={"a": 4, "b": 5}) == 12 -def test_extract_params_direct_call_only_key(): +def test_extract_params_direct_call_only_key() -> None: def f(x, params): return x * params @@ -20,7 +20,7 @@ def f(x, params): assert g(x=3, params={"a": 4, "b": 5}) == 12 -def test_extract_params_decorator_only_names(): +def test_extract_params_decorator_only_names() -> None: @extract_params(names=["c", "d"]) def f(x, params): return x * params["c"] @@ -28,7 +28,7 @@ def f(x, params): assert f(x=3, params=[4, 5]) == 12 -def test_extract_params_direct_call_only_names(): +def test_extract_params_direct_call_only_names() -> None: def f(x, params): return x * params["c"] @@ -36,7 +36,7 @@ def f(x, params): assert g(x=3, params=[4, 5]) == 12 -def test_extract_params_decorator_key_and_names(): +def test_extract_params_decorator_key_and_names() -> None: @extract_params(key="a", names=["c", "d"]) def f(x, params): return x * params["c"] @@ -44,7 +44,7 @@ def f(x, params): assert f(x=3, params={"a": [4, 5], "b": [5, 6]}) == 12 -def test_extract_params_direct_call_key_and_names(): +def test_extract_params_direct_call_key_and_names() -> None: def f(x, params): return x * params["c"] @@ -52,7 +52,7 @@ def f(x, params): assert g(x=3, params={"a": [4, 5], "b": [5, 6]}) == 12 -def test_jax_array_output_decorator(): +def test_jax_array_output_decorator() -> None: @jax_array_output def f(): return (1, 2, 3) @@ -60,7 +60,7 @@ def f(): assert isinstance(f(), jnp.ndarray) -def test_jax_array_output_direct_call(): +def test_jax_array_output_direct_call() -> None: def f(): return (1, 2, 3) @@ -69,17 +69,17 @@ def f(): assert isinstance(g(), jnp.ndarray) -def test_register_params_decorator(): +def test_register_params_decorator() -> None: @register_params(params=["a", "b", "c"]) - def f(): + def f() -> str: return "bla" assert f.__registered_params__ == ["a", "b", "c"] assert f() == "bla" -def test_register_params_direct_call(): - def f(): +def test_register_params_direct_call() -> None: + def f() -> str: return "bla" g = register_params(f, params=["a", "b", "c"]) diff --git a/tests/test_filtered_states.py b/tests/test_filtered_states.py index 18ad272c..bcfc0fc7 100644 --- a/tests/test_filtered_states.py +++ b/tests/test_filtered_states.py @@ -14,19 +14,17 @@ @pytest.fixture def model2(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + with (TEST_DATA_DIR / "model2.yaml").open() as y: + return yaml.load(y, Loader=yaml.SafeLoader) @pytest.fixture def model2_data(): data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") - data = data.set_index(["caseid", "period"]) - return data + return data.set_index(["caseid", "period"]) -def test_get_filtered_states(model2, model2_data): +def test_get_filtered_states(model2, model2_data) -> None: params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) diff --git a/tests/test_kalman_filters.py b/tests/test_kalman_filters.py index 39714b3e..b9c45d31 100644 --- a/tests/test_kalman_filters.py +++ b/tests/test_kalman_filters.py @@ -28,7 +28,7 @@ @pytest.mark.parametrize(("seed", "update_func"), product(SEEDS, UPDATE_FUNCS)) -def test_kalman_update(seed, update_func): +def test_kalman_update(seed, update_func) -> None: np.random.seed(seed) dim = np.random.randint(low=1, high=10) n_obs = 5 @@ -86,7 +86,7 @@ def test_kalman_update(seed, update_func): @pytest.mark.parametrize("update_func", UPDATE_FUNCS) -def test_kalman_update_with_missing(update_func): +def test_kalman_update_with_missing(update_func) -> None: """State, cov and weights should not change, log likelihood should be zero.""" n_mixtures = 2 n_obs = 3 @@ -133,7 +133,7 @@ def test_kalman_update_with_missing(update_func): @pytest.mark.parametrize("seed", SEEDS) -def test_sigma_points(seed: int): +def test_sigma_points(seed: int) -> None: np.random.seed(seed) state, cov = _random_state_and_covariance() observed_factors = jnp.arange(2).reshape(1, 2) @@ -157,7 +157,7 @@ def test_sigma_points(seed: int): @pytest.mark.parametrize("seed", SEEDS) -def test_sigma_scaling_factor_and_weights(seed): +def test_sigma_scaling_factor_and_weights(seed) -> None: np.random.seed(seed) dim = np.random.randint(low=1, high=15) kappa = np.random.uniform(low=0.5, high=5) @@ -176,14 +176,13 @@ def test_sigma_scaling_factor_and_weights(seed): # ====================================================================================== -def test_transformation_of_sigma_points(): +def test_transformation_of_sigma_points() -> None: sp = jnp.arange(10).reshape(1, 1, 5, 2) + 1 def f(params, states): - out = jnp.column_stack( + return jnp.column_stack( [(states * params["fac1"][0]).sum(axis=1), states[..., 1]], ) - return out trans_coeffs = {"fac1": jnp.array([2]), "fac2": jnp.array([])} @@ -213,7 +212,7 @@ def f(params, states): @pytest.mark.parametrize("seed", SEEDS) -def test_predict_against_linear_filterpy(seed): +def test_predict_against_linear_filterpy(seed) -> None: np.random.seed(seed) state, cov = _random_state_and_covariance() dim = len(state) @@ -235,8 +234,7 @@ def linear(params, states): return jnp.dot(states, params) def transition_function(params, states): - out = jnp.column_stack([linear(params[f"fac{i}"], states) for i in range(dim)]) - return out + return jnp.column_stack([linear(params[f"fac{i}"], states) for i in range(dim)]) sm_state, sm_chol = _convert_predict_inputs_from_filterpy_to_skillmodels(state, cov) scaling_factor, weights = calculate_sigma_scaling_factor_and_weights(dim, 2) diff --git a/tests/test_likelihood_regression.py b/tests/test_likelihood_regression.py index 2e2d2423..962c4a40 100644 --- a/tests/test_likelihood_regression.py +++ b/tests/test_likelihood_regression.py @@ -29,16 +29,14 @@ @pytest.fixture def model2(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + with (TEST_DATA_DIR / "model2.yaml").open() as y: + return yaml.load(y, Loader=yaml.SafeLoader) @pytest.fixture def model2_data(): data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") - data = data.set_index(["caseid", "period"]) - return data + return data.set_index(["caseid", "period"]) def _convert_model(base_model, model_name): @@ -60,8 +58,9 @@ def constant(fac3, params): @register_params(params=["fac1", "fac2", "fac3", "constant"]) def linear(fac1, fac2, fac3, params): p = params - out = p["constant"] + fac1 * p["fac1"] + fac2 * p["fac2"] + fac3 * p["fac3"] - return out + return ( + p["constant"] + fac1 * p["fac1"] + fac2 * p["fac2"] + fac3 * p["fac3"] + ) model["factors"]["fac2"]["transition_function"] = linear model["factors"]["fac3"]["transition_function"] = constant @@ -73,7 +72,9 @@ def linear(fac1, fac2, fac3, params): @pytest.mark.parametrize( ("model_name", "fun_key"), product(MODEL_NAMES, ["loglike", "debug_loglike"]) ) -def test_likelihood_values_have_not_changed(model2, model2_data, model_name, fun_key): +def test_likelihood_values_have_not_changed( + model2, model2_data, model_name, fun_key +) -> None: regvault = REGRESSION_VAULT model = _convert_model(model2, model_name) params = pd.read_csv(regvault / f"{model_name}.csv").set_index( @@ -87,12 +88,12 @@ def test_likelihood_values_have_not_changed(model2, model2_data, model_name, fun fun = inputs[fun_key] new_loglike = fun(params)["value"] if "debug" in fun_key else fun(params) - with open(regvault / f"{model_name}_result.json") as j: + with (regvault / f"{model_name}_result.json").open() as j: old_loglike = np.array(json.load(j)).sum() aaae(new_loglike, old_loglike) -def test_splitting_does_not_change_gradient(model2, model2_data): +def test_splitting_does_not_change_gradient(model2, model2_data) -> None: inputs = get_maximization_inputs(model2, model2_data) inputs_split = get_maximization_inputs(model2, model2_data, 13) @@ -110,7 +111,7 @@ def test_splitting_does_not_change_gradient(model2, model2_data): ) def test_likelihood_contributions_have_not_changed( model2, model2_data, model_name, fun_key -): +) -> None: regvault = REGRESSION_VAULT model = _convert_model(model2, model_name) params = pd.read_csv(regvault / f"{model_name}.csv").set_index( @@ -124,7 +125,7 @@ def test_likelihood_contributions_have_not_changed( fun = inputs[fun_key] new_loglikes = fun(params)["contributions"] if "debug" in fun_key else fun(params) - with open(regvault / f"{model_name}_result.json") as j: + with (regvault / f"{model_name}_result.json").open() as j: old_loglikes = np.array(json.load(j)) aaae(new_loglikes, old_loglikes) @@ -133,7 +134,9 @@ def test_likelihood_contributions_have_not_changed( ("model_type", "fun_key"), product(["no_stages_anchoring", "with_missings"], ["loglike_and_gradient"]), ) -def test_likelihood_contributions_large_nobs(model2, model2_data, model_type, fun_key): +def test_likelihood_contributions_large_nobs( + model2, model2_data, model_type, fun_key +) -> None: regvault = REGRESSION_VAULT model = _convert_model(model2, "no_stages_anchoring") params = pd.read_csv(regvault / "no_stages_anchoring.csv").set_index( @@ -191,7 +194,7 @@ def test_likelihood_contributions_large_nobs(model2, model2_data, model_type, fu assert np.isfinite(loglike[1]).all() -def test_likelihood_runs_with_empty_periods(model2, model2_data): +def test_likelihood_runs_with_empty_periods(model2, model2_data) -> None: del model2["anchoring"] for factor in ["fac1", "fac2"]: model2["factors"][factor]["measurements"][-1] = [] @@ -206,7 +209,7 @@ def test_likelihood_runs_with_empty_periods(model2, model2_data): debug_loglike(params) -def test_likelihood_runs_with_too_long_data(model2, model2_data): +def test_likelihood_runs_with_too_long_data(model2, model2_data) -> None: model = reduce_n_periods(model2, 2) func_dict = get_maximization_inputs(model, model2_data) # ty: ignore[invalid-argument-type] @@ -217,7 +220,7 @@ def test_likelihood_runs_with_too_long_data(model2, model2_data): debug_loglike(params) -def test_likelihood_runs_with_observed_factors(model2, model2_data): +def test_likelihood_runs_with_observed_factors(model2, model2_data) -> None: model2["observed_factors"] = ["ob1", "ob2"] model2_data["ob1"] = np.arange(len(model2_data)) model2_data["ob2"] = np.ones(len(model2_data)) diff --git a/tests/test_maximization_inputs.py b/tests/test_maximization_inputs.py index 0f901f25..1f2dd6fa 100644 --- a/tests/test_maximization_inputs.py +++ b/tests/test_maximization_inputs.py @@ -1,21 +1,26 @@ +"""Tests for maximization input functions.""" + import jax.numpy as jnp import numpy as np from skillmodels.maximization_inputs import _to_numpy -def test_to_numpy_with_dict(): +def test_to_numpy_with_dict() -> None: + """Test _to_numpy with dictionary input.""" dict_ = {"a": jnp.ones(3), "b": 4.5} calculated = _to_numpy(dict_) assert isinstance(calculated["a"], np.ndarray) assert isinstance(calculated["b"], float) -def test_to_numpy_one_array(): +def test_to_numpy_one_array() -> None: + """Test _to_numpy with single array input.""" calculated = _to_numpy(jnp.ones(3)) assert isinstance(calculated, np.ndarray) -def test_to_numpy_one_float(): +def test_to_numpy_one_float() -> None: + """Test _to_numpy with single float input.""" calculated = _to_numpy(3.5) assert isinstance(calculated, float) diff --git a/tests/test_params_index.py b/tests/test_params_index.py index 84d4730a..b34b2b42 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -21,21 +21,20 @@ @pytest.fixture def model2_inputs(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) processed = process_model(model_dict) - out = { + return { "update_info": processed.update_info, "labels": processed.labels, "dimensions": processed.dimensions, "transition_info": processed.transition_info, "endogenous_factors_info": processed.endogenous_factors_info, } - return out -def test_params_index_with_model2(model2_inputs): +def test_params_index_with_model2(model2_inputs) -> None: calculated = get_params_index(**model2_inputs) expected = pd.read_csv( TEST_DATA_DIR / "model2_correct_params_index.csv", @@ -45,7 +44,7 @@ def test_params_index_with_model2(model2_inputs): assert calculated.equals(expected) -def test_control_coeffs_index_tuples(): +def test_control_coeffs_index_tuples() -> None: uinfo_tups = [(0, "m1"), (0, "m2"), (0, "bla"), (1, "m1"), (1, "m2")] uinfo = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_tups)) controls = ("constant", "c1") @@ -67,7 +66,7 @@ def test_control_coeffs_index_tuples(): assert calculated == expected -def test_loading_index_tuples(): +def test_loading_index_tuples() -> None: uinfo_tups = [(0, "m1"), (0, "m2"), (0, "bla"), (1, "m1"), (1, "m2")] uinfo = pd.DataFrame( True, @@ -92,7 +91,7 @@ def test_loading_index_tuples(): assert calculated == expected -def test_meas_sd_index_tuples(): +def test_meas_sd_index_tuples() -> None: uinfo_tups = [(0, "m1"), (0, "m2"), (0, "bla"), (1, "m1"), (1, "m2")] uinfo = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_tups)) @@ -108,7 +107,7 @@ def test_meas_sd_index_tuples(): assert calculated == expected -def test_shock_sd_index_tuples(): +def test_shock_sd_index_tuples() -> None: periods = (0, 1, 2) factors = ("fac1", "fac2") @@ -119,11 +118,13 @@ def test_shock_sd_index_tuples(): ("shock_sds", 1, "fac2", "-"), ] - calculated = get_shock_sds_index_tuples(periods, factors, False) + calculated = get_shock_sds_index_tuples( + periods, factors, has_endogenous_factors=False + ) assert calculated == expected -def test_initial_mean_index_tuples(): +def test_initial_mean_index_tuples() -> None: nmixtures = 3 factors = ("fac1", "fac2") @@ -140,7 +141,7 @@ def test_initial_mean_index_tuples(): assert calculated == expected -def test_mixture_weight_index_tuples(): +def test_mixture_weight_index_tuples() -> None: nmixtures = 3 expected = [ ("mixture_weights", 0, "mixture_0", "-"), @@ -151,7 +152,7 @@ def test_mixture_weight_index_tuples(): assert calculated == expected -def test_initial_cov_index_tuples(): +def test_initial_cov_index_tuples() -> None: nmixtures = 2 factors = ("fac1", "fac2", "fac3") expected = [ @@ -173,7 +174,7 @@ def test_initial_cov_index_tuples(): assert calculated == expected -def test_trans_coeffs_index_tuples_no_endogenous_factors(): +def test_trans_coeffs_index_tuples_no_endogenous_factors() -> None: periods = (0, 1, 2) param_names = { @@ -216,7 +217,7 @@ def test_trans_coeffs_index_tuples_no_endogenous_factors(): assert calculated == expected -def test_trans_coeffs_index_tuples_has_endogenous_factors(): +def test_trans_coeffs_index_tuples_has_endogenous_factors() -> None: periods = (0, 1, 2, 3, 4, 5) param_names = { diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index b8383af2..036ab9a3 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -26,8 +26,8 @@ def parsed_parameters(): index_col=["category", "period", "name1", "name2"], ).index - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) processed = process_model(model_dict) @@ -64,34 +64,34 @@ def parsed_parameters(): ) -def test_controls(parsed_parameters): +def test_controls(parsed_parameters) -> None: expected = jnp.arange(118).reshape(59, 2) aae(parsed_parameters["pardict"]["controls"], expected) -def test_loadings(parsed_parameters): +def test_loadings(parsed_parameters) -> None: expected_values = jnp.arange(118, 177) calculated = parsed_parameters["pardict"]["loadings"] calculated_values = calculated[calculated != 0] aae(expected_values, calculated_values) -def test_meas_sds(parsed_parameters): +def test_meas_sds(parsed_parameters) -> None: expected = jnp.arange(177, 236) aae(parsed_parameters["pardict"]["meas_sds"], expected) -def test_shock_sds(parsed_parameters): +def test_shock_sds(parsed_parameters) -> None: expected = jnp.arange(236, 257).reshape(7, 3) aae(parsed_parameters["pardict"]["shock_sds"], expected) -def test_initial_states(parsed_parameters): +def test_initial_states(parsed_parameters) -> None: expected = jnp.arange(257, 260).reshape(1, 3).repeat(5, axis=0).reshape(5, 1, 3) aae(parsed_parameters["states"], expected) -def test_initial_upper_chols(parsed_parameters): +def test_initial_upper_chols(parsed_parameters) -> None: expected = ( jnp.array([[[261, 262, 264], [0, 263, 265], [0, 0, 266]]]) .repeat(5, axis=0) @@ -100,7 +100,7 @@ def test_initial_upper_chols(parsed_parameters): aae(parsed_parameters["upper_chols"], expected) -def test_transition_parameters(parsed_parameters): +def test_transition_parameters(parsed_parameters) -> None: calculated = parsed_parameters["pardict"]["transition"] aae(calculated["fac1"], jnp.arange(385, 413).reshape(7, 4) - 118) @@ -110,14 +110,14 @@ def test_transition_parameters(parsed_parameters): assert isinstance(calculated, dict) -def test_anchoring_scaling_factors(parsed_parameters): +def test_anchoring_scaling_factors(parsed_parameters) -> None: calculated = parsed_parameters["pardict"]["anchoring_scaling_factors"] expected = np.ones((8, 3)) expected[:, 0] = jnp.array([127 + 7 * i for i in range(8)]) aae(calculated, expected) -def test_anchoring_constants(parsed_parameters): +def test_anchoring_constants(parsed_parameters) -> None: calculated = parsed_parameters["pardict"]["anchoring_constants"] expected = np.zeros((8, 3)) expected[:, 0] = jnp.array([18 + i * 14 for i in range(8)]) diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 9e924459..8910857d 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -22,11 +22,11 @@ from skillmodels.types import Labels -def test_pre_process_data(): +def test_pre_process_data() -> None: df = pd.DataFrame(data=np.arange(20).reshape(2, 10).T, columns=["var", "inv"]) df["period"] = [1, 2, 3, 2, 3, 4, 2, 4, 3, 1] df["id"] = [1, 1, 1, 3, 3, 3, 4, 4, 5, 5] - df.set_index(["id", "period"], inplace=True) + df = df.set_index(["id", "period"]) exp = pd.DataFrame() period = [0, 1, 2, 3] * 4 @@ -38,7 +38,7 @@ def test_pre_process_data(): } data = np.column_stack([period, id_, data["var"], data["inv"]]) exp = pd.DataFrame(data=data, columns=["__period__", "__id__", "var", "inv"]) - exp.set_index(["__id__", "__period__"], inplace=True) + exp = exp.set_index(["__id__", "__period__"]) res = pre_process_data(df, [0, 1, 2, 3]) assert res[["var", "inv"]].equals(exp[["var", "inv"]]) @@ -47,8 +47,8 @@ def test_pre_process_data(): @pytest.fixture def simplest_augmented(): out = {} - with open(TEST_DATA_DIR / "simplest_augmented_model.yaml") as y: - out["model_dict"] = yaml.load(y, Loader=yaml.FullLoader) + with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: + out["model_dict"] = yaml.load(y, Loader=yaml.SafeLoader) _df = pd.DataFrame(data=np.arange(15).reshape(3, 5).T, columns=["var", "inv", "of"]) _df["period"] = [1, 1, 2, 1, 2] _df["id"] = [1, 3, 3, 5, 5] @@ -60,7 +60,7 @@ def simplest_augmented(): return out -def test_augment_data_for_endogenous_factors(simplest_augmented): +def test_augment_data_for_endogenous_factors(simplest_augmented) -> None: model = process_model(simplest_augmented["model_dict"]) pre_processed_data = pre_process_data( simplest_augmented["data_input"], model.labels.periods @@ -75,7 +75,7 @@ def test_augment_data_for_endogenous_factors(simplest_augmented): pd.testing.assert_frame_equal(res[cols], simplest_augmented["data_exp"][cols]) -def test_handle_controls_with_missings(): +def test_handle_controls_with_missings() -> None: controls = ("c1",) uinfo_ind_tups = [(0, "m1"), (0, "m2")] update_info = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_ind_tups)) @@ -85,14 +85,14 @@ def test_handle_controls_with_missings(): df["id"] = np.arange(4) df["__old_id__"] = df["id"] df["__old_period__"] = df["aug_period"] + 1 - df.set_index(["id", "aug_period"], inplace=True) + df = df.set_index(["id", "aug_period"]) with pytest.warns(UserWarning): # noqa: PT030 calculated = _handle_controls_with_missings(df, controls, update_info) assert calculated.loc[(2, 0)].isna().all() # ty: ignore[unresolved-attribute] -def test_generate_measurements_array(): +def test_generate_measurements_array() -> None: uinfo_ind_tups = [(0, "m1"), (0, "m2"), (1, "m1"), (1, "m3")] update_info = pd.DataFrame(index=pd.MultiIndex.from_tuples(uinfo_ind_tups)) @@ -111,7 +111,7 @@ def test_generate_measurements_array(): aae(calculated, expected) -def test_generate_controls_array(): +def test_generate_controls_array() -> None: csv = """ id,aug_period,c1,c2 0, 0, 1, 2 @@ -140,7 +140,7 @@ def test_generate_controls_array(): aae(calculated, expected) -def test_generate_observed_factor_array(): +def test_generate_observed_factor_array() -> None: csv = """ id,aug_period,v1,v2 0, 0, 1, 2 diff --git a/tests/test_process_model.py b/tests/test_process_model.py index 4cd8c8ca..87aa0a50 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -16,16 +16,15 @@ @pytest.fixture def model2(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + with (TEST_DATA_DIR / "model2.yaml").open() as y: + return yaml.load(y, Loader=yaml.SafeLoader) -def test_has_endogenous_factors(model2): +def test_has_endogenous_factors(model2) -> None: assert process_model(model2).endogenous_factors_info.has_endogenous_factors == False -def test_dimensions(model2): +def test_dimensions(model2) -> None: res = process_model(model2).dimensions assert res.n_latent_factors == 3 assert res.n_observed_factors == 0 @@ -35,7 +34,7 @@ def test_dimensions(model2): assert res.n_mixtures == 1 -def test_labels(model2): +def test_labels(model2) -> None: res = process_model(model2).labels assert res.latent_factors == ("fac1", "fac2", "fac3") assert res.observed_factors == () @@ -46,14 +45,14 @@ def test_labels(model2): assert res.stages == (0,) -def test_estimation_options(model2): +def test_estimation_options(model2) -> None: res = process_model(model2).estimation_options assert res.sigma_points_scale == 2 assert res.robust_bounds assert res.bounds_distance == 0.001 -def test_anchoring(model2): +def test_anchoring(model2) -> None: res = process_model(model2).anchoring assert res.outcomes == {"fac1": "Q1"} assert res.factors == ("fac1",) @@ -62,7 +61,7 @@ def test_anchoring(model2): assert res.free_loadings -def test_transition_info(model2): +def test_transition_info(model2) -> None: res = process_model(model2).transition_info assert isinstance(res, TransitionInfo) @@ -71,7 +70,7 @@ def test_transition_info(model2): assert list(inspect.signature(res.func).parameters) == ["params", "states"] -def test_update_info(model2): +def test_update_info(model2) -> None: res = process_model(model2).update_info expected = pd.read_csv( TEST_DATA_DIR / "model2_correct_update_info.csv", @@ -80,7 +79,7 @@ def test_update_info(model2): assert_frame_equal(res, expected) -def test_normalizations(model2): +def test_normalizations(model2) -> None: expected = { "fac1": { "loadings": [ @@ -123,9 +122,9 @@ def test_normalizations(model2): # ====================================================================================== -def test_anchoring_and_endogenous_factors_work_together(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_anchoring_and_endogenous_factors_work_together() -> None: + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True del model_dict["stagemap"] @@ -146,9 +145,9 @@ def test_anchoring_and_endogenous_factors_work_together(): ) # One per aug_period for the one anchored factor -def test_stagemap_with_endogenous_factors_wrong_labels(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_stagemap_with_endogenous_factors_wrong_labels() -> None: + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 4] @@ -157,9 +156,9 @@ def test_stagemap_with_endogenous_factors_wrong_labels(): process_model(model_dict) -def test_stagemap_with_endogenous_factors(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_stagemap_with_endogenous_factors() -> None: + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 3] @@ -172,8 +171,8 @@ def test_stagemap_with_endogenous_factors(): @pytest.fixture def model2_inv(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous model_dict["factors"]["fac3"]["is_endogenous"] = True del model_dict["stagemap"] @@ -181,13 +180,13 @@ def model2_inv(): return model_dict -def test_with_endog_has_endogenous_factors(model2_inv): +def test_with_endog_has_endogenous_factors(model2_inv) -> None: assert ( process_model(model2_inv).endogenous_factors_info.has_endogenous_factors == True ) -def test_with_endog_dimensions(model2_inv): +def test_with_endog_dimensions(model2_inv) -> None: res = process_model(model2_inv).dimensions assert res.n_latent_factors == 3 assert res.n_observed_factors == 0 @@ -198,7 +197,7 @@ def test_with_endog_dimensions(model2_inv): assert res.n_mixtures == 1 -def test_with_endog_labels(model2_inv): +def test_with_endog_labels(model2_inv) -> None: res = process_model(model2_inv).labels n_aug_periods = 16 assert res.latent_factors == ("fac1", "fac2", "fac3") @@ -211,14 +210,14 @@ def test_with_endog_labels(model2_inv): assert res.aug_stages == tuple(range(n_aug_periods - 2)) -def test_with_endog_estimation_options(model2_inv): +def test_with_endog_estimation_options(model2_inv) -> None: res = process_model(model2_inv).estimation_options assert res.sigma_points_scale == 2 assert res.robust_bounds assert res.bounds_distance == 0.001 -def test_with_endog_anchoring_is_empty(model2_inv): +def test_with_endog_anchoring_is_empty(model2_inv) -> None: res = process_model(model2_inv).anchoring assert res.outcomes == {} assert res.factors == () @@ -227,7 +226,7 @@ def test_with_endog_anchoring_is_empty(model2_inv): assert res.free_loadings is False -def test_with_endog_transition_info(model2_inv): +def test_with_endog_transition_info(model2_inv) -> None: res = process_model(model2_inv).transition_info assert isinstance(res, TransitionInfo) @@ -236,7 +235,7 @@ def test_with_endog_transition_info(model2_inv): assert list(inspect.signature(res.func).parameters) == ["params", "states"] -def test_with_endog_update_info(model2_inv): +def test_with_endog_update_info(model2_inv) -> None: res = process_model(model2_inv).update_info expected = pd.read_csv( TEST_DATA_DIR / "model2_with_endog_correct_update_info.csv", @@ -245,7 +244,7 @@ def test_with_endog_update_info(model2_inv): assert_frame_equal(res, expected) -def test_with_endog_normalizations(model2_inv): +def test_with_endog_normalizations(model2_inv) -> None: expected = { "fac1": { "loadings": [ @@ -372,24 +371,24 @@ def test_with_endog_normalizations(model2_inv): # ====================================================================================== -def test_model_has_endogenous_factors_not_specified(): +def test_model_has_endogenous_factors_not_specified() -> None: factors = {"a": {}} assert get_has_endogenous_factors(factors) == False -def test_get_has_endogenous_factors_wrong_type(): +def test_get_has_endogenous_factors_wrong_type() -> None: factors = {"a": {"is_endogenous": 3}} with pytest.raises(ValueError): get_has_endogenous_factors(factors) -def test_get_has_endogenous_factors_wrong_constellation(): +def test_get_has_endogenous_factors_wrong_constellation() -> None: factors = {"a": {"is_endogenous": False, "is_correction": True}} with pytest.raises(ValueError): get_has_endogenous_factors(factors) -def test_get_has_endogenous_factors_indeed(): +def test_get_has_endogenous_factors_indeed() -> None: factors = { "a": {"is_endogenous": True, "is_correction": False}, "b": {"is_endogenous": False, "is_correction": False}, @@ -397,7 +396,7 @@ def test_get_has_endogenous_factors_indeed(): assert get_has_endogenous_factors(factors) == True -def test_get_has_endogenous_factors_and_correction(): +def test_get_has_endogenous_factors_and_correction() -> None: factors = { "a": {"is_endogenous": True, "is_correction": False}, "b": {"is_endogenous": False, "is_correction": False}, diff --git a/tests/test_qr.py b/tests/test_qr.py index 5a35475f..ff231acf 100644 --- a/tests/test_qr.py +++ b/tests/test_qr.py @@ -1,3 +1,7 @@ +"""Tests for custom QR decomposition.""" + +from typing import TYPE_CHECKING + import jax import jax.numpy as jnp import numpy as np @@ -6,30 +10,37 @@ from skillmodels.qr import qr_gpu +if TYPE_CHECKING: + from numpy.typing import NDArray + SEED = 20 @pytest.fixture -def cov_matrix(): +def cov_matrix() -> NDArray[np.floating]: + """Create a covariance matrix for testing.""" fixedrng = np.random.default_rng(SEED) factorized = fixedrng.uniform(low=-1, high=3, size=(7, 7)) - cov = factorized @ factorized.T * 0.5 + np.eye(7) - return cov + return factorized @ factorized.T * 0.5 + np.eye(7) -def test_q(cov_matrix): +def test_q(cov_matrix: NDArray[np.floating]) -> None: + """Test Q matrix from QR decomposition matches JAX implementation.""" q_gpu, _ = qr_gpu(cov_matrix) q_jax, _ = jnp.linalg.qr(cov_matrix) aaae(q_gpu, q_jax) -def test_r(cov_matrix): +def test_r(cov_matrix: NDArray[np.floating]) -> None: + """Test R matrix from QR decomposition matches JAX implementation.""" _, r_gpu = qr_gpu(cov_matrix) _, r_jax = jnp.linalg.qr(cov_matrix) aaae(r_gpu, r_jax) -def test_grad_qr(cov_matrix): +def test_grad_qr(cov_matrix: NDArray[np.floating]) -> None: + """Test gradient of QR decomposition matches JAX implementation.""" + def f_jax(a): q, r = jnp.linalg.qr(a) return jnp.sum(r) + jnp.sum(q) diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index 533f5317..ff8dacfd 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -16,19 +16,17 @@ @pytest.fixture def model2(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + with (TEST_DATA_DIR / "model2.yaml").open() as y: + return yaml.load(y, Loader=yaml.SafeLoader) @pytest.fixture def model2_data(): data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") - data = data.set_index(["caseid", "period"]) - return data + return data.set_index(["caseid", "period"]) -def test_simulate_dataset(model2, model2_data): +def test_simulate_dataset(model2, model2_data) -> None: model_dict = model2 params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) @@ -48,7 +46,7 @@ def test_simulate_dataset(model2, model2_data): assert np.allclose(ratio, expected_ratio) -def test_measurements_from_factors(): +def test_measurements_from_factors() -> None: inputs = { "states": np.array([[0, 0, 0], [1, 1, 1]]), "controls": np.array([[1, 1], [1, 1]]), diff --git a/tests/test_transition_functions.py b/tests/test_transition_functions.py index 2f6eb877..0a84e7e7 100644 --- a/tests/test_transition_functions.py +++ b/tests/test_transition_functions.py @@ -15,14 +15,14 @@ jax.config.update("jax_enable_x64", True) -def test_linear(): +def test_linear() -> None: states = jnp.arange(3) params = jnp.array([0.1, 0.2, 0.3, 0.4]) expected = 1.2 aaae(linear(states, params), expected) -def test_translog(): +def test_translog() -> None: all_states = jnp.array( [ [2, 0, 0], @@ -63,7 +63,7 @@ def test_translog(): aaae(calculated, expected) -def test_log_ces(): +def test_log_ces() -> None: states = jnp.array([3, 7.5]) params = jnp.array([0.4, 0.6, 2]) expected = 7.244628323025 @@ -71,7 +71,7 @@ def test_log_ces(): aaae(calculated, expected) -def test_where_all_but_one_gammas_are_zero(): +def test_where_all_but_one_gammas_are_zero() -> None: """This has to be tested, becaus it leads to an underflow in the log step.""" states = jnp.ones(3) params = jnp.array([0, 0, 1, -0.5]) @@ -80,11 +80,11 @@ def test_where_all_but_one_gammas_are_zero(): aaae(calculated, expected) -def test_constant(): +def test_constant() -> None: assert constant("bla", "blubb") == "bla" # ty: ignore[invalid-argument-type] -def test_robust_translog(): +def test_robust_translog() -> None: all_states = jnp.array( [ [2, 0, 0], @@ -125,7 +125,7 @@ def test_robust_translog(): aaae(calculated, expected) -def test_log_ces_general(): +def test_log_ces_general() -> None: states = jnp.array([3, 7.5]) params = jnp.array([0.4, 0.6, 2, 2, 0.5]) expected = 7.244628323025 @@ -133,7 +133,7 @@ def test_log_ces_general(): aaae(calculated, expected) -def test_log_ces_general_where_all_but_one_gammas_are_zero(): +def test_log_ces_general_where_all_but_one_gammas_are_zero() -> None: """This has to be tested, becaus it leads to an underflow in the log step.""" states = jnp.ones(3) params = jnp.array([0, 0, 1, -0.5, -0.5, -0.5, -2]) @@ -142,7 +142,7 @@ def test_log_ces_general_where_all_but_one_gammas_are_zero(): aaae(calculated, expected) -def test_param_names_log_ces_general(): +def test_param_names_log_ces_general() -> None: factors = ("a", "b") expected = ["a", "b", "sigma_a", "sigma_b", "tfp"] calculated = params_log_ces_general(factors) diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 858c9513..35e3ca8a 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -31,13 +31,12 @@ @pytest.fixture def model2(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) - return model_dict + with (TEST_DATA_DIR / "model2.yaml").open() as y: + return yaml.load(y, Loader=yaml.SafeLoader) @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) -def test_extract_factors_single(model2, factors): +def test_extract_factors_single(model2, factors) -> None: reduced = extract_factors(factors, model2) assert list(reduced["factors"]) == ["fac2"] # ty: ignore[invalid-argument-type] assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] @@ -46,7 +45,7 @@ def test_extract_factors_single(model2, factors): process_model(reduced) # ty: ignore[invalid-argument-type] -def test_update_parameter_values(): +def test_update_parameter_values() -> None: params = pd.DataFrame() params["value"] = np.arange(5, dtype=np.int64) @@ -63,7 +62,7 @@ def test_update_parameter_values(): @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) -def test_remove_factors(model2, factors): +def test_remove_factors(model2, factors) -> None: reduced = remove_factors(factors, model2) assert list(reduced["factors"]) == ["fac1", "fac3"] # ty: ignore[invalid-argument-type] assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] @@ -72,7 +71,7 @@ def test_remove_factors(model2, factors): @pytest.mark.parametrize("measurements", ["y5", ["y5"]]) -def test_remove_measurements(model2, measurements): +def test_remove_measurements(model2, measurements) -> None: reduced = remove_measurements(measurements, model2) assert reduced["factors"]["fac2"]["measurements"] == [["y4", "y6"]] * 8 # ty: ignore[invalid-argument-type] assert "y5" in model2["factors"]["fac2"]["measurements"][0] @@ -80,33 +79,33 @@ def test_remove_measurements(model2, measurements): @pytest.mark.parametrize("controls", ["x1", ["x1"]]) -def test_remove_controls(model2, controls): +def test_remove_controls(model2, controls) -> None: reduced = remove_controls(controls, model2) assert "controls" not in reduced assert "controls" in model2 process_model(reduced) # ty: ignore[invalid-argument-type] -def test_reduce_n_periods(model2): +def test_reduce_n_periods(model2) -> None: reduced = reduce_n_periods(model2, 1) assert reduced["factors"]["fac1"]["measurements"] == [["y1", "y2", "y3"]] # ty: ignore[invalid-argument-type] assert reduced["factors"]["fac2"]["normalizations"]["loadings"] == [{"y4": 1}] # ty: ignore[invalid-argument-type] process_model(reduced) # ty: ignore[invalid-argument-type] -def test_switch_linear_to_translog(model2): +def test_switch_linear_to_translog(model2) -> None: switched = switch_linear_to_translog(model2) assert switched["factors"]["fac2"]["transition_function"] == "translog" # ty: ignore[invalid-argument-type] -def test_switch_linear_and_translog_back_and_forth(model2): +def test_switch_linear_and_translog_back_and_forth(model2) -> None: with_translog = switch_linear_to_translog(model2) with_linear = switch_translog_to_linear(with_translog) # ty: ignore[invalid-argument-type] assert model2 == with_linear @pytest.mark.parametrize("to_remove", ["a", ["a"]]) -def test_remove_from_list(to_remove): +def test_remove_from_list(to_remove) -> None: list_ = ["a", "b", "c"] calculated = _remove_from_list(list_, to_remove) assert calculated == ["b", "c"] @@ -114,14 +113,14 @@ def test_remove_from_list(to_remove): @pytest.mark.parametrize("to_remove", ["a", ["a"]]) -def test_remove_from_dict(to_remove): +def test_remove_from_dict(to_remove) -> None: dict_ = {"a": 1, "b": 2, "c": 3} calculated = _remove_from_dict(dict_, to_remove) assert calculated == {"b": 2, "c": 3} assert dict_ == {"a": 1, "b": 2, "c": 3} -def test_reduce_params_via_extract_factors(model2): +def test_reduce_params_via_extract_factors(model2) -> None: model_dict = reduce_n_periods(model2, 2) full_index = _get_params_index_from_model_dict(model_dict) # ty: ignore[invalid-argument-type] @@ -154,7 +153,7 @@ def test_reduce_params_via_extract_factors(model2): assert_index_equal(reduced_params.index, expected_index) # ty: ignore[invalid-argument-type] -def test_extend_params_via_switch_to_translog(model2): +def test_extend_params_via_switch_to_translog(model2) -> None: model_dict = reduce_n_periods(model2, 2) normal_index = _get_params_index_from_model_dict(model_dict) # ty: ignore[invalid-argument-type] params = pd.DataFrame(columns=["value"], index=normal_index) @@ -180,7 +179,7 @@ def test_extend_params_via_switch_to_translog(model2): assert extended_params.loc[added_index, "value"].unique()[0] == 0.05 # ty: ignore[possibly-missing-attribute] -def test_shorten_if_necessary(): +def test_shorten_if_necessary() -> None: list_ = list(range(3)) not_necessary = _shorten_if_necessary(list_, 5) assert not_necessary == list_ diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index f208295f..6bb08efd 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -16,15 +16,15 @@ REGRESSION_VAULT = Path(__file__).parent / "regression_vault" -def test_visualize_factor_distributions_runs_with_filtered_states(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_visualize_factor_distributions_runs_with_filtered_states() -> None: + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") - data.set_index(["caseid", "period"], inplace=True) + data = data.set_index(["caseid", "period"]) max_inputs = get_maximization_inputs(model_dict, data) params = params.loc[max_inputs["params_template"].index] @@ -53,12 +53,12 @@ def test_visualize_factor_distributions_runs_with_filtered_states(): ) -def test_visualize_factor_distributions_runs_with_simulated_states(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_visualize_factor_distributions_runs_with_simulated_states() -> None: + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") - data.set_index(["caseid", "period"], inplace=True) + data = data.set_index(["caseid", "period"]) params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) diff --git a/tests/test_visualize_transition_equations.py b/tests/test_visualize_transition_equations.py index a3bd90eb..65f3df3e 100644 --- a/tests/test_visualize_transition_equations.py +++ b/tests/test_visualize_transition_equations.py @@ -13,9 +13,9 @@ REGRESSION_VAULT = Path(__file__).parent / "regression_vault" -def test_visualize_transition_equations_runs(): - with open(TEST_DATA_DIR / "model2.yaml") as y: - model_dict = yaml.load(y, Loader=yaml.FullLoader) +def test_visualize_transition_equations_runs() -> None: + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) model_dict["observed_factors"] = ["ob1"] @@ -23,7 +23,7 @@ def test_visualize_transition_equations_runs(): params = params.set_index(["category", "period", "name1", "name2"]) data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") - data.set_index(["caseid", "period"], inplace=True) + data = data.set_index(["caseid", "period"]) data["ob1"] = 0 max_inputs = get_maximization_inputs(model_dict, data) From b15372a5243d1f4cc62498a859fe6c795bf7ea5a Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Sun, 11 Jan 2026 11:04:15 +0100 Subject: [PATCH 11/27] Further tighten type annotations. Fix a missing run-time annotation of NDArray. --- .../how_to_visualize_correlations.ipynb | 9 +- pixi.lock | 24 +- src/skillmodels/constraints.py | 5 +- src/skillmodels/filtered_states.py | 6 +- src/skillmodels/likelihood_function.py | 32 +-- src/skillmodels/likelihood_function_debug.py | 30 ++- src/skillmodels/maximization_inputs.py | 8 +- src/skillmodels/parse_params.py | 207 +++++++++--------- src/skillmodels/process_model.py | 56 ++--- src/skillmodels/simulate_data.py | 23 +- src/skillmodels/types.py | 120 +++++++++- .../visualize_transition_equations.py | 15 +- tests/test_parse_params.py | 27 ++- 13 files changed, 336 insertions(+), 226 deletions(-) diff --git a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb index 73560c56..45157de6 100644 --- a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb @@ -170,7 +170,10 @@ "metadata": {}, "outputs": [], "source": [ - "from skillmodels.visualize_transition_equations import _get_pardict, _set_index_params" + "from skillmodels.visualize_transition_equations import (\n", + " _get_parsed_params,\n", + " _set_index_params,\n", + ")" ] }, { @@ -188,10 +191,10 @@ "metadata": {}, "outputs": [], "source": [ - "_get_pardict(\n", + "_get_parsed_params(\n", " params=_set_index_params(process_model(model_dict), params),\n", " model=process_model(model_dict),\n", - ")[\"loadings\"]" + ").loadings" ] }, { diff --git a/pixi.lock b/pixi.lock index 64280cd4..0afb53a6 100644 --- a/pixi.lock +++ b/pixi.lock @@ -2270,7 +2270,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/9e/4c/2f9ac5edbd0e67bf82f5cd04275c4e87cbbf69a78f43e5dcf90c1573d44e/ty-0.0.10-py3-none-manylinux_2_24_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/74/18/8dd4fe6df1fd66f3e83b4798eddb1d8482d9d9b105f25099b76703402ebb/ty-0.0.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ @@ -2507,7 +2507,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e8/cd/9dd49e6d40e54d4b7d563f9e2a432c4ec002c0673a81266e269c4bc194ce/ty-0.0.10-py3-none-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/ad/01/3a563dba8b1255e474c35e1c3810b7589e81ae8c41df401b6a37c8e2cde9/ty-0.0.11-py3-none-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ @@ -2743,7 +2743,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/42/36/82e66b9753a76964d26fd9bc3514ea0abce0a5ba5ad7d5f084070c6981da/ty-0.0.10-py3-none-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/df/04/5a5dfd0aec0ea99ead1e824ee6e347fb623c464da7886aa1e3660fb0f36c/ty-0.0.11-py3-none-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ @@ -8841,20 +8841,20 @@ packages: - pkg:pypi/traitlets?source=hash-mapping size: 110051 timestamp: 1733367480074 -- pypi: https://files.pythonhosted.org/packages/42/36/82e66b9753a76964d26fd9bc3514ea0abce0a5ba5ad7d5f084070c6981da/ty-0.0.10-py3-none-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/74/18/8dd4fe6df1fd66f3e83b4798eddb1d8482d9d9b105f25099b76703402ebb/ty-0.0.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl name: ty - version: 0.0.10 - sha256: 16deb77a72cf93b89b4d29577829613eda535fbe030513dfd9fba70fe38bc9f5 + version: 0.0.11 + sha256: 25f88e8789072830348cb59b761d5ced70642ed5600673b4bf6a849af71eca8b requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/9e/4c/2f9ac5edbd0e67bf82f5cd04275c4e87cbbf69a78f43e5dcf90c1573d44e/ty-0.0.10-py3-none-manylinux_2_24_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/ad/01/3a563dba8b1255e474c35e1c3810b7589e81ae8c41df401b6a37c8e2cde9/ty-0.0.11-py3-none-macosx_11_0_arm64.whl name: ty - version: 0.0.10 - sha256: e206a23bd887574302138b33383ae1edfcc39d33a06a12a5a00803b3f0287a45 + version: 0.0.11 + sha256: 121987c906e02264c3b511b95cb9f8a3cdd66f3283b8bbab678ca3525652e304 requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/e8/cd/9dd49e6d40e54d4b7d563f9e2a432c4ec002c0673a81266e269c4bc194ce/ty-0.0.10-py3-none-macosx_11_0_arm64.whl +- pypi: https://files.pythonhosted.org/packages/df/04/5a5dfd0aec0ea99ead1e824ee6e347fb623c464da7886aa1e3660fb0f36c/ty-0.0.11-py3-none-win_amd64.whl name: ty - version: 0.0.10 - sha256: e4832f8879cb95fc725f7e7fcab4f22be0cf2550f3a50641d5f4409ee04176d4 + version: 0.0.11 + sha256: 1bb205db92715d4a13343bfd5b0c59ce8c0ca0daa34fb220ec9120fc66ccbda7 requires_python: '>=3.8' - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl name: types-pytz diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index 58235735..308ed4dc 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -9,6 +9,7 @@ import optimagic as om import skillmodels.transition_functions as t_f_module +from skillmodels.types import MeasurementType if TYPE_CHECKING: import pandas as pd @@ -427,9 +428,9 @@ def _get_constraints_for_augmented_periods( # We are restricting transitions and shocks, not measurements. So this might # look counterintuitive... aug_period_meas_type_to_constrain = ( - "states" + MeasurementType.STATES if endogenous_factors_info.factor_info[factor].is_state # ty: ignore[invalid-argument-type] - else "endogenous_factors" + else MeasurementType.ENDOGENOUS_FACTORS ) aug_period_meas_types = ( endogenous_factors_info.aug_periods_to_aug_period_meas_types diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index 09e83927..66e56f0e 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -91,7 +91,7 @@ def anchor_states_df( has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) - *_, pardict = parse_params( + *_, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, dimensions=model.dimensions, @@ -101,8 +101,8 @@ def anchor_states_df( n_latent = model.dimensions.n_latent_factors - _scaling_factors = np.array(pardict["anchoring_scaling_factors"][:, :n_latent]) - _constants = np.array(pardict["anchoring_constants"][:, :n_latent]) + _scaling_factors = np.array(parsed_params.anchoring_scaling_factors[:, :n_latent]) + _constants = np.array(parsed_params.anchoring_constants[:, :n_latent]) if use_aug_period: period_arr = states_df["aug_period"].to_numpy() ap_to_p = model.labels.aug_periods_to_periods diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index 720b04ac..d72055ac 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -14,12 +14,18 @@ kalman_update, ) from skillmodels.parse_params import parse_params -from skillmodels.types import Dimensions, EstimationOptions, Labels # noqa: TC001 +from skillmodels.types import ( # noqa: TC001 + Dimensions, + EstimationOptions, + Labels, + ParsedParams, + ParsingInfo, +) def log_likelihood( params: Array, - parsing_info: dict[str, Any], + parsing_info: ParsingInfo, measurements: Array, controls: Array, transition_func: Callable, @@ -85,7 +91,7 @@ def log_likelihood( def log_likelihood_obs( params: Array, - parsing_info: dict[str, Any], + parsing_info: ParsingInfo, measurements: Array, controls: Array, transition_func: Callable, @@ -141,7 +147,7 @@ def log_likelihood_obs( """ n_obs = measurements.shape[1] - states, upper_chols, log_mixture_weights, pardict = parse_params( + states, upper_chols, log_mixture_weights, parsed_params = parse_params( params, parsing_info, dimensions, @@ -157,9 +163,9 @@ def log_likelihood_obs( loop_args = { "period": iteration_to_period, - "loadings": pardict["loadings"], - "control_params": pardict["controls"], - "meas_sds": pardict["meas_sds"], + "loadings": parsed_params.loadings, + "control_params": parsed_params.controls, + "meas_sds": parsed_params.meas_sds, "measurements": measurements, "is_measurement_iteration": is_measurement_iteration, "is_predict_iteration": is_predict_iteration, @@ -168,7 +174,7 @@ def log_likelihood_obs( _body = functools.partial( _scan_body, controls=controls, - pardict=pardict, + parsed_params=parsed_params, sigma_scaling_factor=sigma_scaling_factor, sigma_weights=sigma_weights, transition_func=transition_func, @@ -192,7 +198,7 @@ def _scan_body( carry: dict[str, Array], loop_args: dict[str, Array], controls: Array, - pardict: dict[str, Any], + parsed_params: ParsedParams, sigma_scaling_factor: float, sigma_weights: Array, transition_func: Callable, @@ -235,12 +241,12 @@ def _scan_body( "upper_chols": upper_chols, "sigma_scaling_factor": sigma_scaling_factor, "sigma_weights": sigma_weights, - "trans_coeffs": {k: arr[t] for k, arr in pardict["transition"].items()}, - "shock_sds": pardict["shock_sds"][t], - "anchoring_scaling_factors": pardict["anchoring_scaling_factors"][ + "trans_coeffs": {k: arr[t] for k, arr in parsed_params.transition.items()}, + "shock_sds": parsed_params.shock_sds[t], + "anchoring_scaling_factors": parsed_params.anchoring_scaling_factors[ jnp.array([t, t + 1]) ], - "anchoring_constants": pardict["anchoring_constants"][jnp.array([t, t + 1])], + "anchoring_constants": parsed_params.anchoring_constants[jnp.array([t, t + 1])], "observed_factors": observed_factors[t], } diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index 5fc9db9f..800b5b6f 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -12,12 +12,18 @@ from skillmodels.kalman_filters import kalman_predict from skillmodels.kalman_filters_debug import kalman_update from skillmodels.parse_params import parse_params -from skillmodels.types import Dimensions, EstimationOptions, Labels # noqa: TC001 +from skillmodels.types import ( # noqa: TC001 + Dimensions, + EstimationOptions, + Labels, + ParsedParams, + ParsingInfo, +) def log_likelihood( params: Array, - parsing_info: dict[str, Any], + parsing_info: ParsingInfo, measurements: Array, controls: Array, transition_func: Callable[..., Array], @@ -66,7 +72,7 @@ def log_likelihood( """ n_obs = measurements.shape[1] - states, upper_chols, log_mixture_weights, pardict = parse_params( + states, upper_chols, log_mixture_weights, parsed_params = parse_params( params, parsing_info, dimensions, @@ -82,9 +88,9 @@ def log_likelihood( loop_args = { "period": iteration_to_period, - "loadings": pardict["loadings"], - "control_params": pardict["controls"], - "meas_sds": pardict["meas_sds"], + "loadings": parsed_params.loadings, + "control_params": parsed_params.controls, + "meas_sds": parsed_params.meas_sds, "measurements": measurements, "is_measurement_iteration": is_measurement_iteration, "is_predict_iteration": is_predict_iteration, @@ -93,7 +99,7 @@ def log_likelihood( _body = functools.partial( _scan_body, controls=controls, - pardict=pardict, + parsed_params=parsed_params, sigma_scaling_factor=sigma_scaling_factor, sigma_weights=sigma_weights, transition_func=transition_func, @@ -145,7 +151,7 @@ def _scan_body( carry: dict[str, Array], loop_args: dict[str, Array], controls: Array, - pardict: dict[str, Any], + parsed_params: ParsedParams, sigma_scaling_factor: float, sigma_weights: Array, transition_func: Callable[..., Array], @@ -188,12 +194,12 @@ def _scan_body( "upper_chols": upper_chols, "sigma_scaling_factor": sigma_scaling_factor, "sigma_weights": sigma_weights, - "trans_coeffs": {k: arr[t] for k, arr in pardict["transition"].items()}, - "shock_sds": pardict["shock_sds"][t], - "anchoring_scaling_factors": pardict["anchoring_scaling_factors"][ + "trans_coeffs": {k: arr[t] for k, arr in parsed_params.transition.items()}, + "shock_sds": parsed_params.shock_sds[t], + "anchoring_scaling_factors": parsed_params.anchoring_scaling_factors[ jnp.array([t, t + 1]) ], - "anchoring_constants": pardict["anchoring_constants"][jnp.array([t, t + 1])], + "anchoring_constants": parsed_params.anchoring_constants[jnp.array([t, t + 1])], "observed_factors": observed_factors[t], } diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index a07b422e..028f2ba4 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -9,6 +9,7 @@ import numpy as np import pandas as pd from jax import Array +from numpy.typing import NDArray # noqa: TC002 import skillmodels.likelihood_function as lf import skillmodels.likelihood_function_debug as lfd @@ -24,10 +25,9 @@ from skillmodels.process_data import process_data from skillmodels.process_debug_data import process_debug_data from skillmodels.process_model import process_model +from skillmodels.types import ParsingInfo # noqa: TC001 if TYPE_CHECKING: - from numpy.typing import NDArray - from skillmodels.types import ProcessedModel jax.config.update("jax_enable_x64", True) # noqa: FBT003 @@ -201,7 +201,7 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: def _partial_some_log_likelihood( fun: Callable, - parsing_info: dict[str, Any], + parsing_info: ParsingInfo, measurements: Array, controls: Array, observed_factors: Array, @@ -223,7 +223,7 @@ def _partial_some_log_likelihood( # be measurements for endogenous factors in the "second half" of the last period). last_aug_period = ( model.labels.aug_periods[-2] - if parsing_info["has_endogenous_factors"] + if parsing_info.has_endogenous_factors else model.labels.aug_periods[-1] ) iteration_to_period = _aug_periods.replace(last_aug_period, -1).to_numpy() diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index 932abe01..e1762348 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -1,13 +1,15 @@ """Functions to parse parameter vectors into structured dictionaries.""" import warnings -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING import jax.numpy as jnp import numpy as np import pandas as pd from jax import Array +from skillmodels.types import LoadingsParsingInfo, ParsedParams, ParsingInfo + if TYPE_CHECKING: from skillmodels.types import Anchoring, Dimensions, Labels @@ -19,81 +21,80 @@ def create_parsing_info( anchoring: Anchoring, *, has_endogenous_factors: bool, -) -> dict[str, Any]: - """Create a dictionary with information how the parameter vector has to be parsed. +) -> ParsingInfo: + """Create a dataclass with information how the parameter vector has to be parsed. Args: params_index: It has the levels ["category", "aug_period", "name1", "name2"] update_info: DataFrame with one row per Kalman update needed in the likelihood function. See :ref:`update_info`. - labels: Dict of lists with labels for the model quantities like + labels: Labels dataclass with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` - anchoring: Dictionary with anchoring settings. + anchoring: Anchoring dataclass with anchoring settings. has_endogenous_factors: Whether the model includes endogenous factors. Returns: - dict: dictionary that maps model quantities to positions or slices of the + ParsingInfo dataclass that maps model quantities to positions or slices of the parameter vector. """ range_sr = pd.Series(data=np.arange(len(params_index)), index=params_index) - parsing_info = {} - - simple_ones = [ - "initial_states", - "initial_cholcovs", - "mixture_weights", - "controls", - "meas_sds", - "shock_sds", - ] - - for quantity in simple_ones: - parsing_info[quantity] = _get_positional_selector_from_loc(range_sr, quantity) + # Simple quantities + initial_states = _get_positional_selector_from_loc(range_sr, "initial_states") + initial_cholcovs = _get_positional_selector_from_loc(range_sr, "initial_cholcovs") + mixture_weights = _get_positional_selector_from_loc(range_sr, "mixture_weights") + controls = _get_positional_selector_from_loc(range_sr, "controls") + meas_sds = _get_positional_selector_from_loc(range_sr, "meas_sds") + shock_sds = _get_positional_selector_from_loc(range_sr, "shock_sds") # loadings: mask = update_info[list(labels.latent_factors)].to_numpy() helper = np.arange(mask.size).reshape(mask.shape) flat_indices = helper[mask] - parsing_info["loadings"] = { - "slice": _get_positional_selector_from_loc(range_sr, "loadings"), - "flat_indices": jnp.array(flat_indices), - "shape": mask.shape, - "size": mask.size, - } + loadings = LoadingsParsingInfo( + slice=_get_positional_selector_from_loc(range_sr, "loadings"), + flat_indices=jnp.array(flat_indices), + shape=mask.shape, + size=mask.size, + ) - # "trans_coeffs" - pos_dict = {} + # transition coefficients + transition: dict[str, Array | slice] = {} for factor in list(labels.latent_factors): - helper = pd.DataFrame(index=params_index) - loc = helper.query(f"category == 'transition' & name1 == '{factor}'").index - pos_dict[factor] = _get_positional_selector_from_loc(range_sr, loc) - - parsing_info["transition"] = pos_dict + helper_df = pd.DataFrame(index=params_index) + loc = helper_df.query(f"category == 'transition' & name1 == '{factor}'").index + transition[factor] = _get_positional_selector_from_loc(range_sr, loc) # anchoring_scaling_factors is_free_loading = update_info[list(labels.latent_factors)].to_numpy() is_anchoring = (update_info["purpose"] == "anchoring").to_numpy().reshape(-1, 1) is_anchoring_loading = jnp.array(is_free_loading & is_anchoring) - parsing_info["is_anchoring_loading"] = is_anchoring_loading - parsing_info["is_anchored_factor"] = jnp.array( + is_anchored_factor = jnp.array( update_info.query("purpose == 'anchoring'")[list(labels.latent_factors)].any( axis=0, ), ) - parsing_info["is_anchoring_update"] = is_anchoring.flatten() - parsing_info["ignore_constant_when_anchoring"] = ( - anchoring.ignore_constant_when_anchoring + is_anchoring_update = jnp.array(is_anchoring.flatten()) + + return ParsingInfo( + initial_states=initial_states, + initial_cholcovs=initial_cholcovs, + mixture_weights=mixture_weights, + controls=controls, + meas_sds=meas_sds, + shock_sds=shock_sds, + loadings=loadings, + transition=transition, + is_anchoring_loading=is_anchoring_loading, + is_anchored_factor=is_anchored_factor, + is_anchoring_update=is_anchoring_update, + ignore_constant_when_anchoring=anchoring.ignore_constant_when_anchoring, + has_endogenous_factors=has_endogenous_factors, ) - # Add has_endogenous_factors to parsing_info - parsing_info["has_endogenous_factors"] = has_endogenous_factors - - return parsing_info - def _get_positional_selector_from_loc( range_sr: pd.Series, @@ -115,75 +116,77 @@ def _get_positional_selector_from_loc( def parse_params( params: Array, - parsing_info: dict[str, Any], + parsing_info: ParsingInfo, dimensions: Dimensions, labels: Labels, n_obs: int, -) -> tuple[Array, Array, Array, dict[str, Any]]: +) -> tuple[Array, Array, Array, ParsedParams]: """Parse params into the quantities that depend on it. Args: params: 1d array with model parameters. - parsing_info: Dictionary with information on how the parameters + parsing_info: ParsingInfo dataclass with information on how the parameters have to be parsed. dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - labels: Dict of lists with labels for the model quantities like + labels: Labels dataclass with labels for the model quantities like factors, periods, controls, stagemap and stages. See :ref:`labels` n_obs: Number of observations. Returns: - jax.numpy.array: Array of shape (n_obs, n_mixtures, n_states) with initial - state estimates. - jax.numpy.array: Array of shape (n_obs, n_mixtures, n_states, n_states) with the - transpose of the lower triangular cholesky factors of the initial covariance - matrices. - jax.numpy.array: Array of shape (n_obs, n_mixtures) with the log of the initial - weight for each element in the finite mixture of normals. - dict: Dictionary with other parameters. It has the following key-value pairs: - - "control_params": - - "loadings": - - "meas_sds": - - "shock_sds": - - "trans_params": - - "anchoring_scaling_factors": - - "anchoring_constants": + Tuple of: + - Array of shape (n_obs, n_mixtures, n_states) with initial state estimates. + - Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose + of the lower triangular cholesky factors of the initial covariance + matrices. + - Array of shape (n_obs, n_mixtures) with the log of the initial weight for + each element in the finite mixture of normals. + - ParsedParams dataclass with other model parameters. """ states = _get_initial_states(params, parsing_info, dimensions, n_obs) upper_chols = _get_initial_upper_chols(params, parsing_info, dimensions, n_obs) log_weights = _get_initial_log_mixture_weights(params, parsing_info, n_obs) - pardict = { - "controls": _get_control_params(params, parsing_info, dimensions), - "loadings": _get_loadings(params, parsing_info), - "meas_sds": _get_meas_sds(params, parsing_info), - "shock_sds": _get_shock_sds(params, parsing_info, dimensions), - "transition": _get_transition_params(params, parsing_info, labels), - } - - pardict["anchoring_scaling_factors"] = _get_anchoring_scaling_factors( - pardict["loadings"], # ty: ignore[invalid-argument-type] + + controls = _get_control_params(params, parsing_info, dimensions) + loadings = _get_loadings(params, parsing_info) + meas_sds = _get_meas_sds(params, parsing_info) + shock_sds = _get_shock_sds(params, parsing_info, dimensions) + transition = _get_transition_params(params, parsing_info, labels) + + anchoring_scaling_factors = _get_anchoring_scaling_factors( + loadings, parsing_info, dimensions, ) - pardict["anchoring_constants"] = _get_anchoring_constants( - pardict["controls"], # ty: ignore[invalid-argument-type] + anchoring_constants = _get_anchoring_constants( + controls, parsing_info, dimensions, ) - return states, upper_chols, log_weights, pardict + parsed = ParsedParams( + controls=controls, + loadings=loadings, + meas_sds=meas_sds, + shock_sds=shock_sds, + transition=transition, + anchoring_scaling_factors=anchoring_scaling_factors, + anchoring_constants=anchoring_constants, + ) + + return states, upper_chols, log_weights, parsed def _get_initial_states( params: Array, - info: dict[str, Any], + info: ParsingInfo, dimensions: Dimensions, n_obs: int, ) -> Array: """Create the array of initial states.""" - state = params[info["initial_states"]].reshape( + state = params[info.initial_states].reshape( 1, dimensions.n_mixtures, dimensions.n_latent_factors, @@ -193,7 +196,7 @@ def _get_initial_states( def _get_initial_upper_chols( params: Array, - info: dict[str, Any], + info: ParsingInfo, dimensions: Dimensions, n_obs: int, ) -> Array: @@ -203,7 +206,7 @@ def _get_initial_upper_chols( """ n_states, n_mixtures = dimensions.n_latent_factors, dimensions.n_mixtures - chol_params = params[info["initial_cholcovs"]].reshape(n_mixtures, -1) + chol_params = params[info.initial_cholcovs].reshape(n_mixtures, -1) upper_chols = jnp.zeros((n_obs, n_mixtures, n_states, n_states)) for i in range(n_mixtures): filler = jnp.zeros((n_states, n_states)) @@ -214,73 +217,71 @@ def _get_initial_upper_chols( def _get_initial_log_mixture_weights( params: Array, - info: dict[str, Any], + info: ParsingInfo, n_obs: int, ) -> Array: """Create the array with the log of initial mixture weights.""" - log_weights = jnp.log(params[info["mixture_weights"]]).reshape(1, -1) + log_weights = jnp.log(params[info.mixture_weights]).reshape(1, -1) return jnp.repeat(log_weights, n_obs, axis=0) def _get_control_params( params: Array, - info: dict[str, Any], + info: ParsingInfo, dimensions: Dimensions, ) -> Array: """Create the parameters for control variables in measurement equations.""" - return params[info["controls"]].reshape(-1, dimensions.n_controls) + return params[info.controls].reshape(-1, dimensions.n_controls) def _get_loadings( params: Array, - info: dict[str, Any], + info: ParsingInfo, ) -> Array: """Create the array of factor loadings.""" - info = info["loadings"] - free = params[info["slice"]] - extended = jnp.zeros(info["size"]).at[info["flat_indices"]].set(free) # noqa: PD008 - return extended.reshape(info["shape"]) + loadings_info = info.loadings + free = params[loadings_info.slice] + extended = jnp.zeros(loadings_info.size).at[loadings_info.flat_indices].set(free) # noqa: PD008 + return extended.reshape(loadings_info.shape) def _get_meas_sds( params: Array, - info: dict[str, Any], + info: ParsingInfo, ) -> Array: """Create the array of standard deviations of the measurement errors.""" - return params[info["meas_sds"]] + return params[info.meas_sds] def _get_shock_sds( params: Array, - info: dict[str, Any], + info: ParsingInfo, dimensions: Dimensions, ) -> Array: """Create the array of standard deviations of the shocks in transition functions.""" - return params[info["shock_sds"]].reshape(-1, dimensions.n_latent_factors) + return params[info.shock_sds].reshape(-1, dimensions.n_latent_factors) def _get_transition_params( params: Array, - info: dict[str, Any], + info: ParsingInfo, labels: Labels, ) -> dict[str, Array]: """Create a list of arrays with transition equation parameters.""" trans_params = {} - t_info = info["transition"] n_aug_periods = len(labels.aug_periods) - # Use has_endogenous_factors from parsing_info instead of undefined global - len_reduction = 2 if info["has_endogenous_factors"] else 1 + len_reduction = 2 if info.has_endogenous_factors else 1 for factor in list(labels.latent_factors): - ilocs = t_info[factor] + ilocs = info.transition[factor] trans_params[factor] = params[ilocs].reshape(n_aug_periods - len_reduction, -1) return trans_params def _get_anchoring_scaling_factors( loadings: Array, - info: dict[str, Any], + info: ParsingInfo, dimensions: Dimensions, ) -> Array: """Create an array of anchoring scaling factors. @@ -291,11 +292,11 @@ def _get_anchoring_scaling_factors( scaling_factors = jnp.ones( (dimensions.n_aug_periods, dimensions.n_latent_factors), ) - free_anchoring_loadings = loadings[info["is_anchoring_loading"]].reshape( + free_anchoring_loadings = loadings[info.is_anchoring_loading].reshape( dimensions.n_aug_periods, -1, ) - scaling_factors = scaling_factors.at[:, info["is_anchored_factor"]].set( # noqa: PD008 + scaling_factors = scaling_factors.at[:, info.is_anchored_factor].set( # noqa: PD008 free_anchoring_loadings, ) @@ -308,7 +309,7 @@ def _get_anchoring_scaling_factors( def _get_anchoring_constants( controls: Array, - info: dict[str, Any], + info: ParsingInfo, dimensions: Dimensions, ) -> Array: """Create an array of anchoring constants. @@ -317,12 +318,12 @@ def _get_anchoring_constants( """ constants = jnp.zeros((dimensions.n_aug_periods, dimensions.n_latent_factors)) - if not info["ignore_constant_when_anchoring"]: - values = controls[:, 0][info["is_anchoring_update"]].reshape( + if not info.ignore_constant_when_anchoring: + values = controls[:, 0][info.is_anchoring_update].reshape( dimensions.n_aug_periods, -1, ) - constants = constants.at[:, info["is_anchored_factor"]].set(values) # noqa: PD008 + constants = constants.at[:, info.is_anchored_factor].set(values) # noqa: PD008 constants_for_observed = jnp.zeros( (dimensions.n_aug_periods, dimensions.n_observed_factors), diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index d5527cf6..9ceb17ed 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -1,8 +1,9 @@ """Functions to process model specifications from user-friendly to internal form.""" from copy import deepcopy +from dataclasses import replace from functools import partial -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd @@ -21,7 +22,9 @@ EndogenousFactorsInfo, EstimationOptions, FactorInfo, + FactorType, Labels, + MeasurementType, ProcessedModel, TransitionInfo, ) @@ -97,9 +100,7 @@ def process_model(model_dict: dict) -> ProcessedModel: ), factor_info=frozendict( { - fac: FactorInfo( - is_state=True, is_endogenous=False, is_correction=False - ) + fac: FactorInfo(factor_type=FactorType.STATE) for fac in labels.latent_factors } ), @@ -112,20 +113,8 @@ def process_model(model_dict: dict) -> ProcessedModel: has_endogenous_factors=has_endogenous_factors, ) transition_info = _get_transition_info(_model_dict_aug, labels) - # Create new Labels with transition_names (frozen dataclass requires replacement) - labels = Labels( - latent_factors=labels.latent_factors, - observed_factors=labels.observed_factors, - controls=labels.controls, - periods=labels.periods, - stagemap=labels.stagemap, - stages=labels.stages, - aug_periods=labels.aug_periods, - aug_periods_to_periods=labels.aug_periods_to_periods, - aug_stagemap=labels.aug_stagemap, - aug_stages=labels.aug_stages, - aug_stages_to_stages=labels.aug_stages_to_stages, - transition_names=tuple(transition_info.function_names.values()), + labels = replace( + labels, transition_names=tuple(transition_info.function_names.values()) ) return ProcessedModel( @@ -316,10 +305,8 @@ def _process_anchoring(model_dict: dict) -> Anchoring: """ if "anchoring" in model_dict: anch = model_dict["anchoring"] - return Anchoring( - anchoring=True, - outcomes=frozendict(anch.get("outcomes", {})), - factors=tuple(anch.get("outcomes", {}).keys()), + return Anchoring.from_config( + outcomes=anch.get("outcomes", {}), free_controls=anch.get("free_controls", False), free_constant=anch.get("free_constant", False), free_loadings=anch.get("free_loadings", False), @@ -328,15 +315,7 @@ def _process_anchoring(model_dict: dict) -> Anchoring: ), ) - return Anchoring( - anchoring=False, - outcomes=frozendict({}), - factors=(), - free_controls=False, - free_constant=False, - free_loadings=False, - ignore_constant_when_anchoring=False, - ) + return Anchoring.disabled() def _insert_empty_elements_into_list( @@ -477,10 +456,7 @@ def _get_endogenous_factors_info( """Collect information about endogenous factors.""" factor_info = {} for fac, v in model_dict["factors"].items(): - factor_info[fac] = FactorInfo( - is_state=( - not v.get("is_endogenous", False) and not v.get("is_correction", False) - ), + factor_info[fac] = FactorInfo.from_flags( is_endogenous=v.get("is_endogenous", False), is_correction=v.get("is_correction", False), ) @@ -506,13 +482,17 @@ def _get_aug_periods_to_aug_period_meas_types( aug_periods: tuple[int, ...] | KeysView[int], *, has_endogenous_factors: bool, -) -> dict[int, Literal["states", "endogenous_factors"]]: +) -> dict[int, MeasurementType]: if has_endogenous_factors: return { - aug_p: ("states" if aug_p % 2 == 0 else "endogenous_factors") + aug_p: ( + MeasurementType.STATES + if aug_p % 2 == 0 + else MeasurementType.ENDOGENOUS_FACTORS + ) for aug_p in aug_periods } - return dict.fromkeys(aug_periods, "states") + return dict.fromkeys(aug_periods, MeasurementType.STATES) def _get_update_info( diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 4ae38332..7dbf553c 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -28,6 +28,7 @@ from skillmodels.process_data import process_data from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model +from skillmodels.types import ParsedParams # noqa: TC001 def simulate_dataset( @@ -115,7 +116,7 @@ def simulate_dataset( if n_obs is None: raise ValueError("n_obs must be set by either data or argument") - states, covs, log_weights, pardict = parse_params( + states, covs, log_weights, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, dimensions=model.dimensions, @@ -127,7 +128,7 @@ def simulate_dataset( latent_states=states, covs=covs, log_weights=log_weights, - pardict=pardict, + parsed_params=parsed_params, labels=model.labels, dimensions=model.dimensions, n_obs=n_obs, @@ -185,7 +186,7 @@ def _simulate_dataset( latent_states: Array, covs: Array, log_weights: Array, - pardict: dict, + parsed_params: ParsedParams, labels: Labels, dimensions: Dimensions, n_obs: int, @@ -205,7 +206,7 @@ def _simulate_dataset( covs: Array of shape (n_obs, n_mixtures, n_states, n_states) with initial covariance matrices. log_weights: Array of shape (n_obs, n_mixtures) with log mixture weights. - pardict: Dictionary with parsed parameters. + parsed_params: ParsedParams dataclass with parsed parameters. labels: Labels for the model quantities like factors, periods, controls. dimensions: Dimensional information like n_states, n_periods, n_controls. n_obs: Number of observations. @@ -231,23 +232,23 @@ def _simulate_dataset( weights = np.exp(log_weights)[0] loadings_df = pd.DataFrame( - data=pardict["loadings"], + data=parsed_params.loadings, index=update_info.index, columns=labels.latent_factors, ) control_params_df = pd.DataFrame( - data=pardict["controls"], + data=parsed_params.controls, index=update_info.index, columns=labels.controls, ) meas_sds = pd.DataFrame( - data=pardict["meas_sds"].reshape(-1, 1), + data=parsed_params.meas_sds.reshape(-1, 1), index=update_info.index, ) - transition_params = pardict["transition"] - shock_sds = pardict["shock_sds"] + transition_params = parsed_params.transition + shock_sds = parsed_params.shock_sds dist_args = [] for mixture in range(dimensions.n_mixtures): @@ -281,11 +282,11 @@ def _simulate_dataset( trans_coeffs = {k: arr[t] for k, arr in transition_params.items()} # get anchoring_scaling_factors for the period - anchoring_scaling_factors = pardict["anchoring_scaling_factors"][ + anchoring_scaling_factors = parsed_params.anchoring_scaling_factors[ jnp.array([t, t + 1]) ] # get anchoring constants for the period - anchoring_constants = pardict["anchoring_constants"][jnp.array([t, t + 1])] + anchoring_constants = parsed_params.anchoring_constants[jnp.array([t, t + 1])] # call transform_sigma_points and convert result to numpy next_states = np.array( diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py index 222bba0f..c1e01e38 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/types.py @@ -2,12 +2,35 @@ from collections.abc import Callable from dataclasses import dataclass -from typing import Literal +from enum import Enum, auto +from typing import NewType import pandas as pd from frozendict import frozendict from jax import Array +# NewType definitions for domain safety +# These prevent accidentally mixing up semantically different int values +Period = NewType("Period", int) +AugPeriod = NewType("AugPeriod", int) +Stage = NewType("Stage", int) +AugStage = NewType("AugStage", int) + + +class FactorType(Enum): + """Type of a latent factor in the model.""" + + STATE = auto() # Regular state factor + ENDOGENOUS = auto() # Endogenous factor (not a correction) + CORRECTION = auto() # Correction factor (is_endogenous=True, is_correction=True) + + +class MeasurementType(Enum): + """Type of measurement in an augmented period.""" + + STATES = auto() + ENDOGENOUS_FACTORS = auto() + @dataclass(frozen=True) class Dimensions: @@ -67,6 +90,52 @@ class Anchoring: free_loadings: bool ignore_constant_when_anchoring: bool + @classmethod + def disabled(cls) -> Anchoring: + """Create an Anchoring config with anchoring disabled.""" + return cls( + anchoring=False, + outcomes=frozendict({}), + factors=(), + free_controls=False, + free_constant=False, + free_loadings=False, + ignore_constant_when_anchoring=False, + ) + + @classmethod + def from_config( + cls, + outcomes: dict[str, str], + *, + free_controls: bool = False, + free_constant: bool = False, + free_loadings: bool = False, + ignore_constant_when_anchoring: bool = False, + ) -> Anchoring: + """Create an Anchoring config from a configuration dictionary. + + Args: + outcomes: Mapping from factor names to outcome variable names. + free_controls: Whether control parameters are free in anchoring equations. + free_constant: Whether constant is free in anchoring equations. + free_loadings: Whether loadings are free in anchoring equations. + ignore_constant_when_anchoring: Whether to ignore constant when anchoring. + + Returns: + Configured Anchoring instance with anchoring enabled. + + """ + return cls( + anchoring=True, + outcomes=frozendict(outcomes), + factors=tuple(outcomes.keys()), + free_controls=free_controls, + free_constant=free_constant, + free_loadings=free_loadings, + ignore_constant_when_anchoring=ignore_constant_when_anchoring, + ) + @dataclass(frozen=True) class EstimationOptions: @@ -95,9 +164,48 @@ class TransitionInfo: class FactorInfo: """Information for a single factor.""" - is_state: bool - is_endogenous: bool - is_correction: bool + factor_type: FactorType + + @property + def is_state(self) -> bool: + """Whether the factor is a regular state factor.""" + return self.factor_type == FactorType.STATE + + @property + def is_endogenous(self) -> bool: + """Whether the factor is endogenous (ENDOGENOUS or CORRECTION).""" + return self.factor_type in (FactorType.ENDOGENOUS, FactorType.CORRECTION) + + @property + def is_correction(self) -> bool: + """Whether the factor is a correction factor.""" + return self.factor_type == FactorType.CORRECTION + + @classmethod + def from_flags( + cls, *, is_endogenous: bool = False, is_correction: bool = False + ) -> FactorInfo: + """Create FactorInfo from boolean flags. + + Args: + is_endogenous: Whether the factor is endogenous. + is_correction: Whether the factor is a correction (must be endogenous). + + Returns: + FactorInfo with the appropriate FactorType. + + Raises: + ValueError: If is_correction is True but is_endogenous is False. + + """ + if is_correction and not is_endogenous: + msg = "A correction factor must also be endogenous" + raise ValueError(msg) + if is_correction: + return cls(factor_type=FactorType.CORRECTION) + if is_endogenous: + return cls(factor_type=FactorType.ENDOGENOUS) + return cls(factor_type=FactorType.STATE) @dataclass(frozen=True) @@ -105,9 +213,7 @@ class EndogenousFactorsInfo: """Information about endogenous factors in the model.""" has_endogenous_factors: bool - aug_periods_to_aug_period_meas_types: frozendict[ - int, Literal["states", "endogenous_factors"] - ] + aug_periods_to_aug_period_meas_types: frozendict[int, MeasurementType] bounds_distance: float aug_periods_from_period: Callable[[int], list[int]] factor_info: frozendict[str, FactorInfo] diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 060e2e90..0a755915 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -19,6 +19,7 @@ from skillmodels.process_data import process_data from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model +from skillmodels.types import ParsedParams # noqa: TC001 from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs if TYPE_CHECKING: @@ -289,7 +290,7 @@ def _get_dictionary_with_plots( observed_factors = model.labels.observed_factors states_data = _get_states_data(model, period, data, states, observed_factors) params = _set_index_params(model, params) - pardict = _get_pardict(model, params) + parsed_params = _get_parsed_params(model, params) state_ranges = _get_state_ranges(state_ranges, states_data, all_factors) layout_kwargs = get_layout_kwargs( layout_kwargs=layout_kwargs, @@ -313,7 +314,7 @@ def _get_dictionary_with_plots( else: aug_period = max(_aug_periods) transition_params = { - output_factor: pardict["transition"][output_factor][aug_period] + output_factor: parsed_params.transition[output_factor][aug_period] } if quantiles_of_other_factors is not None: @@ -377,11 +378,11 @@ def _get_state_ranges( return state_ranges -def _get_pardict( +def _get_parsed_params( model: ProcessedModel, params: pd.DataFrame, -) -> dict[str, Any]: - """Get parsed params dictionary.""" +) -> ParsedParams: + """Get parsed params dataclass.""" parsing_info = create_parsing_info( params_index=params.index, # ty: ignore[invalid-argument-type] update_info=model.update_info, @@ -390,14 +391,14 @@ def _get_pardict( has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, ) - _, _, _, pardict = parse_params( + _, _, _, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, dimensions=model.dimensions, labels=model.labels, n_obs=1, ) - return pardict + return parsed_params def _set_index_params( diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index 036ab9a3..35e8d2e9 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -57,33 +57,38 @@ def parsed_parameters(): params_vec = jnp.arange(len(p_index)) n_obs = 5 - parsed = parse_params(params_vec, parsing_info, dimensions, labels, n_obs) - - return dict( - zip(["states", "upper_chols", "log_weights", "pardict"], parsed, strict=False) + states, upper_chols, log_weights, parsed_params = parse_params( + params_vec, parsing_info, dimensions, labels, n_obs ) + return { + "states": states, + "upper_chols": upper_chols, + "log_weights": log_weights, + "parsed_params": parsed_params, + } + def test_controls(parsed_parameters) -> None: expected = jnp.arange(118).reshape(59, 2) - aae(parsed_parameters["pardict"]["controls"], expected) + aae(parsed_parameters["parsed_params"].controls, expected) def test_loadings(parsed_parameters) -> None: expected_values = jnp.arange(118, 177) - calculated = parsed_parameters["pardict"]["loadings"] + calculated = parsed_parameters["parsed_params"].loadings calculated_values = calculated[calculated != 0] aae(expected_values, calculated_values) def test_meas_sds(parsed_parameters) -> None: expected = jnp.arange(177, 236) - aae(parsed_parameters["pardict"]["meas_sds"], expected) + aae(parsed_parameters["parsed_params"].meas_sds, expected) def test_shock_sds(parsed_parameters) -> None: expected = jnp.arange(236, 257).reshape(7, 3) - aae(parsed_parameters["pardict"]["shock_sds"], expected) + aae(parsed_parameters["parsed_params"].shock_sds, expected) def test_initial_states(parsed_parameters) -> None: @@ -101,7 +106,7 @@ def test_initial_upper_chols(parsed_parameters) -> None: def test_transition_parameters(parsed_parameters) -> None: - calculated = parsed_parameters["pardict"]["transition"] + calculated = parsed_parameters["parsed_params"].transition aae(calculated["fac1"], jnp.arange(385, 413).reshape(7, 4) - 118) aae(calculated["fac2"], jnp.arange(413, 441).reshape(7, 4) - 118) @@ -111,14 +116,14 @@ def test_transition_parameters(parsed_parameters) -> None: def test_anchoring_scaling_factors(parsed_parameters) -> None: - calculated = parsed_parameters["pardict"]["anchoring_scaling_factors"] + calculated = parsed_parameters["parsed_params"].anchoring_scaling_factors expected = np.ones((8, 3)) expected[:, 0] = jnp.array([127 + 7 * i for i in range(8)]) aae(calculated, expected) def test_anchoring_constants(parsed_parameters) -> None: - calculated = parsed_parameters["pardict"]["anchoring_constants"] + calculated = parsed_parameters["parsed_params"].anchoring_constants expected = np.zeros((8, 3)) expected[:, 0] = jnp.array([18 + i * 14 for i in range(8)]) aae(calculated, expected) From 74b2f1813edec1c670c571c66bb2462da8aa7363 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Sun, 11 Jan 2026 14:48:16 +0100 Subject: [PATCH 12/27] Move unsafe_fixes = false from .pre-commit config to pyproject. --- .pre-commit-config.yaml | 1 - pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7c38d11f..7afbac1d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -54,7 +54,6 @@ repos: - jupyter args: - --fix - # - --unsafe-fixes - id: ruff-format types_or: - python diff --git a/pyproject.toml b/pyproject.toml index f8e28e92..eac78f9d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -169,7 +169,7 @@ ty = {features = ["test", "ty"], solve-group = "default"} target-version = "py314" fix = true line-length = 88 - +unsafe-fixes = false [tool.ruff.lint] select = ["ALL"] From 04aaa10f8dc05a4c755dbe9be1c0767f2225893c Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Mon, 12 Jan 2026 07:41:14 +0100 Subject: [PATCH 13/27] Fix query in data simulation. --- src/skillmodels/simulate_data.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 7dbf553c..8dcf3b55 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -375,12 +375,14 @@ def _collapse_aug_periods_to_periods( ] state_cols = [fac for fac in factors if fac not in endogenous_cols] - out = df.query('_aug_period_meas_type == "endogenous_factors"')[ + out = df.query("_aug_period_meas_type == @MeasurementType.ENDOGENOUS_FACTORS")[ ["id", "period", *endogenous_cols] ] return pd.merge( out, - df.query('_aug_period_meas_type == "states"')[["id", "period", *state_cols]], + df.query("_aug_period_meas_type == @MeasurementType.STATES")[ + ["id", "period", *state_cols] + ], on=["id", "period"], how="outer", ) From 493ead076b33a4071a520234cc1c0da0ffd2c160 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Mon, 12 Jan 2026 08:12:51 +0100 Subject: [PATCH 14/27] More fixes to imports, add test. --- pixi.lock | 4 +- src/skillmodels/simulate_data.py | 13 +++---- tests/test_simulate_data.py | 67 +++++++++++++++++++++++++++++++- 3 files changed, 74 insertions(+), 10 deletions(-) diff --git a/pixi.lock b/pixi.lock index 0afb53a6..39c7df6c 100644 --- a/pixi.lock +++ b/pixi.lock @@ -8471,8 +8471,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev246+gdce66ad7c.d20260109 - sha256: c4aac3043ab0ace6fc5dfb9a40c211225a79046f52762a52dc441938d4487364 + version: 0.0.24.dev248+gb15372a52.d20260111 + sha256: 351c9cc8e9a879fe282da02d64b6d79d4326650bb3466ea8db23b29628fba895 requires_dist: - dags - frozendict diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 8dcf3b55..f294cd09 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -28,7 +28,7 @@ from skillmodels.process_data import process_data from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model -from skillmodels.types import ParsedParams # noqa: TC001 +from skillmodels.types import MeasurementType, ParsedParams def simulate_dataset( @@ -375,14 +375,13 @@ def _collapse_aug_periods_to_periods( ] state_cols = [fac for fac in factors if fac not in endogenous_cols] - out = df.query("_aug_period_meas_type == @MeasurementType.ENDOGENOUS_FACTORS")[ - ["id", "period", *endogenous_cols] - ] + is_endogenous = df["_aug_period_meas_type"] == MeasurementType.ENDOGENOUS_FACTORS + is_states = df["_aug_period_meas_type"] == MeasurementType.STATES + + out = df.loc[is_endogenous, ["id", "period", *endogenous_cols]] return pd.merge( out, - df.query("_aug_period_meas_type == @MeasurementType.STATES")[ - ["id", "period", *state_cols] - ], + df.loc[is_states, ["id", "period", *state_cols]], on=["id", "period"], how="outer", ) diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index ff8dacfd..be7c6b4a 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -9,7 +9,12 @@ from numpy.testing import assert_array_almost_equal as aaae from skillmodels.config import TEST_DATA_DIR -from skillmodels.simulate_data import measurements_from_states, simulate_dataset +from skillmodels.process_model import process_model +from skillmodels.simulate_data import ( + _collapse_aug_periods_to_periods, + measurements_from_states, + simulate_dataset, +) REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @@ -56,3 +61,63 @@ def test_measurements_from_factors() -> None: } expected = np.array([[1, 1, 1], [1.9, 1.9, 1.9]]) aaae(measurements_from_states(**inputs), expected) + + +@pytest.fixture +def model2_with_endogenous(): + """Model2 with fac3 set as endogenous factor.""" + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model_dict["factors"]["fac3"]["is_endogenous"] = True + del model_dict["stagemap"] + del model_dict["anchoring"] + return model_dict + + +def test_collapse_aug_periods_to_periods_with_endogenous_factors( + model2_with_endogenous, +) -> None: + """Test that _collapse_aug_periods_to_periods works with endogenous factors. + + This is a regression test for a bug where MeasurementType enum values were + compared against strings in pandas queries, causing empty results. + """ + model = process_model(model2_with_endogenous) + factors = model.labels.latent_factors + + # Create a mock aug_latent_data DataFrame with aug_period column + n_obs = 5 + n_aug_periods = model.dimensions.n_aug_periods - 1 # Exclude last half-period + records = [] + for aug_p in range(n_aug_periods): + for obs_id in range(n_obs): + record = {"id": obs_id, "aug_period": aug_p} + for fac in factors: + record[fac] = np.random.randn() + records.append(record) + aug_latent_data = pd.DataFrame(records) + + result = _collapse_aug_periods_to_periods( + df=aug_latent_data, + factors=factors, + aug_periods_to_periods=model.labels.aug_periods_to_periods, + endogenous_factors_info=model.endogenous_factors_info, + ) + + # The result should not be empty + assert len(result) > 0, "Collapsed DataFrame should not be empty" + + # Should have 'period' column, not 'aug_period' + assert "period" in result.columns + assert "aug_period" not in result.columns + + # Should have all factor columns + for fac in factors: + assert fac in result.columns + + # Should have correct number of unique periods (half of aug_periods) + expected_n_periods = model.dimensions.n_periods + assert result["period"].nunique() == expected_n_periods + + # Should have all observations for each period + assert len(result) == n_obs * expected_n_periods From 51209ad695c81ec832ee7fa865aabfd7ffafa919 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Mon, 12 Jan 2026 09:10:30 +0100 Subject: [PATCH 15/27] Use more tuples in place of lists to prevent errors. --- src/skillmodels/correlation_heatmap.py | 77 +++++++++---------- .../visualize_factor_distributions.py | 34 ++++---- .../visualize_transition_equations.py | 40 +++++----- 3 files changed, 76 insertions(+), 75 deletions(-) diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 795ba99d..522acd08 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -137,7 +137,7 @@ def plot_correlation_heatmap( def get_measurements_corr( data: pd.DataFrame, model_dict: dict, - factors: list[str] | str | None, + factors: tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: """Get data frame with measurement correlations. @@ -179,7 +179,7 @@ def get_measurements_corr( def get_quasi_scores_corr( data: pd.DataFrame, model_dict: dict, - factors: list[str] | str | None, + factors: tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: """Get data frame with correlations of factor scores. @@ -225,7 +225,7 @@ def get_scores_corr( data: pd.DataFrame, params: pd.DataFrame, model_dict: dict, - factors: list[str] | str | None, + factors: tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: """Get data frame with correlations of factor scores. @@ -329,8 +329,8 @@ def _get_measurement_data( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get data frame with factor measurements in each period, in wide format. @@ -376,8 +376,8 @@ def _get_measurement_data_for_single_period( data: pd.DataFrame, update_info_by_period: pd.DataFrame, period: int, - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Extract measurements of factors for the given period. @@ -411,8 +411,8 @@ def _get_measurement_data_for_multiple_periods( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Extract measurements for factors for given periods. @@ -451,8 +451,8 @@ def _get_quasi_factor_scores_data( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get data frame with summary information on factor measurements in each period. @@ -501,8 +501,8 @@ def _get_quasi_factor_scores_data_for_single_period( data: pd.DataFrame, update_info_by_period: pd.DataFrame, period: int, - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get frame with summary scores on factor measurements in a given period. @@ -543,8 +543,8 @@ def _get_quasi_factor_scores_data_for_multiple_periods( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get frame with summary scores of factor measurements in a given period. @@ -584,8 +584,8 @@ def _get_factor_scores_data( params: pd.DataFrame, model: ProcessedModel, periods: list[int], - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get data frame with factor scores in each period. @@ -637,8 +637,8 @@ def _get_factor_scores_data_for_single_period( params: pd.DataFrame, model: ProcessedModel, period: int, - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get frame with factor scores in a given period. @@ -691,8 +691,8 @@ def _get_factor_scores_data_for_single_model_period( update_info: pd.DataFrame, aug_period: int, period: int, - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get frame with factor scores in a given model period. @@ -748,8 +748,8 @@ def _get_factor_scores_data_for_multiple_periods( params: pd.DataFrame, model: ProcessedModel, periods: list[int], - latent_factors: list[str], - observed_factors: list[str], + latent_factors: tuple[str, ...], + observed_factors: tuple[str, ...], ) -> pd.DataFrame: """Get frame with factor scores in a given period. @@ -787,27 +787,26 @@ def _get_factor_scores_data_for_multiple_periods( def _process_factors( model: ProcessedModel, - factors: list[str] | str | None, -) -> tuple[list[str], list[str]]: - """Process factors to get a tuple of lists.""" + factors: tuple[str, ...] | str | None, +) -> tuple[tuple[str, ...], tuple[str, ...]]: + """Process factors to get a tuple of tuples.""" if not factors: - latent_factors = list(model.labels.latent_factors) - observed_factors = list(model.labels.observed_factors) + latent_factors = model.labels.latent_factors + observed_factors = model.labels.observed_factors elif isinstance(factors, str): if factors in model.labels.latent_factors: - latent_factors = [factors] - observed_factors = [] + latent_factors = (factors,) + observed_factors = () elif factors in model.labels.observed_factors: - observed_factors = [factors] - latent_factors = [] + observed_factors = (factors,) + latent_factors = () else: - observed_factors = [] - latent_factors = [] - for factor in factors: - if factor in model.labels.latent_factors: - latent_factors.append(factor) - elif factor in model.labels.observed_factors: - observed_factors.append(factor) + latent_factors = tuple( + fac for fac in factors if fac in model.labels.latent_factors + ) + observed_factors = tuple( + fac for fac in factors if fac in model.labels.observed_factors + ) return latent_factors, observed_factors # ty: ignore[possibly-unresolved-reference] diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index b0f0f3a6..cb5561c8 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -28,7 +28,7 @@ def combine_distribution_plots( kde_plots: dict[str, go.Figure], contour_plots: dict[tuple[str, str], go.Figure], surface_plots: dict[tuple[str, str], go.Figure] | None = None, - factor_order: list[str] | None = None, + factor_order: tuple[str, ...] | None = None, factor_mapping: dict[str, str] | None = None, make_subplot_kwargs: dict[str, Any] | None = None, *, @@ -167,7 +167,7 @@ def univariate_densities( model_dict: dict[str, Any], params: pd.DataFrame, period: int, - factors: list[str] | None = None, + factors: tuple[str, ...] | None = None, *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, @@ -277,7 +277,7 @@ def bivariate_density_contours( model_dict: dict[str, Any], params: pd.DataFrame, period: int, - factors: list[str] | None = None, + factors: tuple[str, ...] | None = None, *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, @@ -408,7 +408,7 @@ def bivariate_density_surfaces( model_dict: dict[str, Any], params: pd.DataFrame, period: int, - factors: list[str] | None = None, + factors: tuple[str, ...] | None = None, *, observed_factors: bool = False, states: pd.DataFrame | None = None, @@ -532,7 +532,7 @@ def bivariate_density_surfaces( def _process_data( states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame], period: int, - factors: list[str], + factors: tuple[str, ...], aug_periods_to_periods: Mapping[int, int], observed_states: pd.DataFrame | None = None, ) -> pd.DataFrame: @@ -687,7 +687,7 @@ def _process_layout_kwargs_3d( def _process_factor_mapping_dist( mapper: dict[str, str] | None, - factors: list[str], + factors: tuple[str, ...], ) -> dict[str, str]: """Process mapper to return dictionary with old and new factor names.""" if mapper is None: @@ -700,37 +700,37 @@ def _process_factor_mapping_dist( def _get_ordered_factors( - factor_order: list[str] | str | None, - factors: list[str], -) -> list[str]: - """Process factor orders to return list of strings.""" + factor_order: tuple[str, ...] | str | None, + factors: tuple[str, ...], +) -> tuple[str, ...]: + """Process factor orders to return tuple of strings.""" if factor_order is None: ordered_factors = factors elif isinstance(factor_order, str): - ordered_factors = [factor_order] + ordered_factors = (factor_order,) else: ordered_factors = factor_order return ordered_factors def _get_factors( - factors: list[str] | None, + factors: tuple[str, ...] | None, *, observed_factors: bool, model: ProcessedModel, -) -> list[str]: - """Proccess factor names to return list of strings.""" +) -> tuple[str, ...]: + """Proccess factor names to return tuple of strings.""" if factors is None: if observed_factors: - factors = list(model.labels.all_factors) + factors = model.labels.all_factors else: - factors = list(model.labels.latent_factors) + factors = model.labels.latent_factors return factors def _get_data_observed_factors( data: pd.DataFrame, - factors: list[str], + factors: tuple[str, ...], ) -> pd.DataFrame | None: """Get data with observed factors if any.""" to_concat = [] diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 0a755915..1f1d5963 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -28,8 +28,8 @@ def combine_transition_plots( plots_dict: dict[tuple[str, str], go.Figure], - column_order: list[str] | str | None = None, - row_order: list[str] | str | None = None, + column_order: tuple[str, ...] | str | None = None, + row_order: tuple[str, ...] | str | None = None, factor_mapping: dict[str, str] | None = None, make_subplot_kwargs: dict[str, Any] | None = None, *, @@ -236,7 +236,7 @@ def _get_dictionary_with_plots( params: pd.DataFrame, states: pd.DataFrame, state_ranges: dict[str, pd.DataFrame] | None, - latent_factors: list[str], + latent_factors: tuple[str, ...], all_factors: tuple[str, ...], quantiles_of_other_factors: list[float] | None, period: int, @@ -559,8 +559,8 @@ def _prepare_data_for_one_plot_average_2d( def _process_factor_mapping_trans( factor_mapper: dict[str, str] | None, - output_factors: list[str], - input_factors: list[str], + output_factors: tuple[str, ...], + input_factors: tuple[str, ...], ) -> dict[str, str]: """Process mapper to return dictionary with old and new factor names.""" all_factors = input_factors + output_factors @@ -574,29 +574,31 @@ def _process_factor_mapping_trans( def _process_orders( - columns: list[str] | str | None, - rows: list[str] | str | None, + columns: tuple[str, ...] | str | None, + rows: tuple[str, ...] | str | None, plots_dict: dict[tuple[str, str], go.Figure], -) -> tuple[list[str], list[str]]: - """Process axes orders to return list of strings.""" - out_columns: list[str] - out_rows: list[str] +) -> tuple[tuple[str, ...], tuple[str, ...]]: + """Process axes orders to return tuples of strings.""" + out_columns: tuple[str, ...] + out_rows: tuple[str, ...] if columns is None: - out_columns = [] + seen: list[str] = [] for f in plots_dict: - if f[0] not in out_columns: - out_columns.append(f[0]) + if f[0] not in seen: + seen.append(f[0]) + out_columns = tuple(seen) elif isinstance(columns, str): - out_columns = [columns] + out_columns = (columns,) else: out_columns = columns if rows is None: - out_rows = [] + seen = [] for f in plots_dict: - if f[1] not in out_rows: - out_rows.append(f[1]) + if f[1] not in seen: + seen.append(f[1]) + out_rows = tuple(seen) elif isinstance(rows, str): - out_rows = [rows] + out_rows = (rows,) else: out_rows = rows return out_columns, out_rows From 21da17683d0a6885dbf555800cff838a0ba1ae09 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Mon, 12 Jan 2026 09:40:58 +0100 Subject: [PATCH 16/27] Fix typing. --- src/skillmodels/correlation_heatmap.py | 48 +++++++++---------- src/skillmodels/utils_plotting.py | 8 ++-- .../visualize_factor_distributions.py | 27 +++++------ .../visualize_transition_equations.py | 12 ++--- tests/test_correlation_heatmap.py | 12 ++--- 5 files changed, 53 insertions(+), 54 deletions(-) diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 522acd08..0286f1bc 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -137,7 +137,7 @@ def plot_correlation_heatmap( def get_measurements_corr( data: pd.DataFrame, model_dict: dict, - factors: tuple[str, ...] | str | None, + factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: """Get data frame with measurement correlations. @@ -179,7 +179,7 @@ def get_measurements_corr( def get_quasi_scores_corr( data: pd.DataFrame, model_dict: dict, - factors: tuple[str, ...] | str | None, + factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: """Get data frame with correlations of factor scores. @@ -225,7 +225,7 @@ def get_scores_corr( data: pd.DataFrame, params: pd.DataFrame, model_dict: dict, - factors: tuple[str, ...] | str | None, + factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: """Get data frame with correlations of factor scores. @@ -329,8 +329,8 @@ def _get_measurement_data( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get data frame with factor measurements in each period, in wide format. @@ -376,8 +376,8 @@ def _get_measurement_data_for_single_period( data: pd.DataFrame, update_info_by_period: pd.DataFrame, period: int, - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Extract measurements of factors for the given period. @@ -411,8 +411,8 @@ def _get_measurement_data_for_multiple_periods( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Extract measurements for factors for given periods. @@ -451,8 +451,8 @@ def _get_quasi_factor_scores_data( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get data frame with summary information on factor measurements in each period. @@ -501,8 +501,8 @@ def _get_quasi_factor_scores_data_for_single_period( data: pd.DataFrame, update_info_by_period: pd.DataFrame, period: int, - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get frame with summary scores on factor measurements in a given period. @@ -543,8 +543,8 @@ def _get_quasi_factor_scores_data_for_multiple_periods( data: pd.DataFrame, update_info_by_period: pd.DataFrame, periods: list[int], - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get frame with summary scores of factor measurements in a given period. @@ -584,8 +584,8 @@ def _get_factor_scores_data( params: pd.DataFrame, model: ProcessedModel, periods: list[int], - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get data frame with factor scores in each period. @@ -637,8 +637,8 @@ def _get_factor_scores_data_for_single_period( params: pd.DataFrame, model: ProcessedModel, period: int, - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get frame with factor scores in a given period. @@ -691,8 +691,8 @@ def _get_factor_scores_data_for_single_model_period( update_info: pd.DataFrame, aug_period: int, period: int, - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get frame with factor scores in a given model period. @@ -748,8 +748,8 @@ def _get_factor_scores_data_for_multiple_periods( params: pd.DataFrame, model: ProcessedModel, periods: list[int], - latent_factors: tuple[str, ...], - observed_factors: tuple[str, ...], + latent_factors: list[str] | tuple[str, ...], + observed_factors: list[str] | tuple[str, ...], ) -> pd.DataFrame: """Get frame with factor scores in a given period. @@ -787,7 +787,7 @@ def _get_factor_scores_data_for_multiple_periods( def _process_factors( model: ProcessedModel, - factors: tuple[str, ...] | str | None, + factors: list[str] | tuple[str, ...] | str | None, ) -> tuple[tuple[str, ...], tuple[str, ...]]: """Process factors to get a tuple of tuples.""" if not factors: diff --git a/src/skillmodels/utils_plotting.py b/src/skillmodels/utils_plotting.py index bafa90fd..b66b89b8 100644 --- a/src/skillmodels/utils_plotting.py +++ b/src/skillmodels/utils_plotting.py @@ -11,8 +11,8 @@ def get_layout_kwargs( title_kwargs: dict[str, Any] | None = None, *, showlegend: bool = False, - columns: list[str] | None = None, - rows: list[str] | None = None, + columns: list[str] | tuple[str, ...] | None = None, + rows: list[str] | tuple[str, ...] | None = None, ) -> dict[str, Any]: """Define and update default kwargs for update_layout. @@ -45,8 +45,8 @@ def get_make_subplot_kwargs( *, sharex: bool, sharey: bool, - column_order: list[str], - row_order: list[str], + column_order: list[str] | tuple[str, ...], + row_order: list[str] | tuple[str, ...], make_subplot_kwargs: dict[str, Any] | None, add_scenes: bool = False, ) -> dict[str, Any]: diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index cb5561c8..a12a39f7 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -28,7 +28,7 @@ def combine_distribution_plots( kde_plots: dict[str, go.Figure], contour_plots: dict[tuple[str, str], go.Figure], surface_plots: dict[tuple[str, str], go.Figure] | None = None, - factor_order: tuple[str, ...] | None = None, + factor_order: list[str] | tuple[str, ...] | None = None, factor_mapping: dict[str, str] | None = None, make_subplot_kwargs: dict[str, Any] | None = None, *, @@ -167,7 +167,7 @@ def univariate_densities( model_dict: dict[str, Any], params: pd.DataFrame, period: int, - factors: tuple[str, ...] | None = None, + factors: list[str] | tuple[str, ...] | None = None, *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, @@ -277,7 +277,7 @@ def bivariate_density_contours( model_dict: dict[str, Any], params: pd.DataFrame, period: int, - factors: tuple[str, ...] | None = None, + factors: list[str] | tuple[str, ...] | None = None, *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, @@ -408,7 +408,7 @@ def bivariate_density_surfaces( model_dict: dict[str, Any], params: pd.DataFrame, period: int, - factors: tuple[str, ...] | None = None, + factors: list[str] | tuple[str, ...] | None = None, *, observed_factors: bool = False, states: pd.DataFrame | None = None, @@ -687,7 +687,7 @@ def _process_layout_kwargs_3d( def _process_factor_mapping_dist( mapper: dict[str, str] | None, - factors: tuple[str, ...], + factors: list[str] | tuple[str, ...], ) -> dict[str, str]: """Process mapper to return dictionary with old and new factor names.""" if mapper is None: @@ -700,21 +700,21 @@ def _process_factor_mapping_dist( def _get_ordered_factors( - factor_order: tuple[str, ...] | str | None, - factors: tuple[str, ...], + factor_order: list[str] | tuple[str, ...] | str | None, + factors: list[str] | tuple[str, ...], ) -> tuple[str, ...]: """Process factor orders to return tuple of strings.""" if factor_order is None: - ordered_factors = factors + ordered_factors = tuple(factors) elif isinstance(factor_order, str): ordered_factors = (factor_order,) else: - ordered_factors = factor_order + ordered_factors = tuple(factor_order) return ordered_factors def _get_factors( - factors: tuple[str, ...] | None, + factors: list[str] | tuple[str, ...] | None, *, observed_factors: bool, model: ProcessedModel, @@ -722,10 +722,9 @@ def _get_factors( """Proccess factor names to return tuple of strings.""" if factors is None: if observed_factors: - factors = model.labels.all_factors - else: - factors = model.labels.latent_factors - return factors + return model.labels.all_factors + return model.labels.latent_factors + return tuple(factors) def _get_data_observed_factors( diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 1f1d5963..4203bece 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -28,8 +28,8 @@ def combine_transition_plots( plots_dict: dict[tuple[str, str], go.Figure], - column_order: tuple[str, ...] | str | None = None, - row_order: tuple[str, ...] | str | None = None, + column_order: list[str] | tuple[str, ...] | str | None = None, + row_order: list[str] | tuple[str, ...] | str | None = None, factor_mapping: dict[str, str] | None = None, make_subplot_kwargs: dict[str, Any] | None = None, *, @@ -574,8 +574,8 @@ def _process_factor_mapping_trans( def _process_orders( - columns: tuple[str, ...] | str | None, - rows: tuple[str, ...] | str | None, + columns: list[str] | tuple[str, ...] | str | None, + rows: list[str] | tuple[str, ...] | str | None, plots_dict: dict[tuple[str, str], go.Figure], ) -> tuple[tuple[str, ...], tuple[str, ...]]: """Process axes orders to return tuples of strings.""" @@ -590,7 +590,7 @@ def _process_orders( elif isinstance(columns, str): out_columns = (columns,) else: - out_columns = columns + out_columns = tuple(columns) if rows is None: seen = [] for f in plots_dict: @@ -600,5 +600,5 @@ def _process_orders( elif isinstance(rows, str): out_rows = (rows,) else: - out_rows = rows + out_rows = tuple(rows) return out_columns, out_rows diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 50830522..1784c224 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -265,12 +265,12 @@ def test_process_factors() -> None: observed_factor = "g" factors = ["b", "d", "g"] all_factors = None - assert list("abcd") == _process_factors(model, all_factors)[0] # ty: ignore[invalid-argument-type] - assert list("efg") == _process_factors(model, all_factors)[1] # ty: ignore[invalid-argument-type] - assert [latent_factor] == _process_factors(model, latent_factor)[0] # ty: ignore[invalid-argument-type] - assert [observed_factor] == _process_factors(model, observed_factor)[1] # ty: ignore[invalid-argument-type] - assert factors[:-1] == _process_factors(model, factors)[0] # ty: ignore[invalid-argument-type] - assert [factors[-1] == _process_factors(model, factors)[1]] # ty: ignore[invalid-argument-type] + assert tuple("abcd") == _process_factors(model, all_factors)[0] # ty: ignore[invalid-argument-type] + assert tuple("efg") == _process_factors(model, all_factors)[1] # ty: ignore[invalid-argument-type] + assert (latent_factor,) == _process_factors(model, latent_factor)[0] # ty: ignore[invalid-argument-type] + assert (observed_factor,) == _process_factors(model, observed_factor)[1] # ty: ignore[invalid-argument-type] + assert tuple(factors[:-1]) == _process_factors(model, factors)[0] # ty: ignore[invalid-argument-type] + assert (factors[-1],) == _process_factors(model, factors)[1] # ty: ignore[invalid-argument-type] def test_get_mask_lower_triangle_only() -> None: From 5925303b4e621de12a02f92c6627cc070911bfd7 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Mon, 19 Jan 2026 14:43:50 +0100 Subject: [PATCH 17/27] Dataclasses for user input. --- docs/source/getting_started/tutorial.ipynb | 4 +- .../how_to_simulate_dataset.ipynb | 6 +- .../how_to_visualize_correlations.ipynb | 12 +- ...sualize_pairwise_factor_distribution.ipynb | 28 +- ...ow_to_visualize_transition_equations.ipynb | 8 +- pixi.lock | 24 +- src/skillmodels/__init__.py | 18 +- src/skillmodels/correlation_heatmap.py | 43 +- src/skillmodels/filtered_states.py | 43 +- src/skillmodels/maximization_inputs.py | 56 +- src/skillmodels/model_spec.py | 496 ++++++++++++++++++ src/skillmodels/process_model.py | 7 +- src/skillmodels/simulate_data.py | 69 +-- .../visualize_factor_distributions.py | 46 +- .../visualize_transition_equations.py | 24 +- tests/test_constraints.py | 4 +- tests/test_filtered_states.py | 2 +- tests/test_params_index.py | 4 +- tests/test_parse_params.py | 4 +- tests/test_process_data.py | 10 +- tests/test_process_model.py | 45 +- tests/test_simulate_data.py | 28 +- tests/test_utilities.py | 12 +- tests/test_visualize_factor_distributions.py | 20 +- tests/test_visualize_transition_equations.py | 10 +- 25 files changed, 777 insertions(+), 246 deletions(-) create mode 100644 src/skillmodels/model_spec.py diff --git a/docs/source/getting_started/tutorial.ipynb b/docs/source/getting_started/tutorial.ipynb index 137c8115..345feb0f 100644 --- a/docs/source/getting_started/tutorial.ipynb +++ b/docs/source/getting_started/tutorial.ipynb @@ -42,7 +42,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model_dict = yaml.load(y, Loader=yaml.SafeLoader)" + " model = yaml.load(y, Loader=yaml.SafeLoader)" ] }, { @@ -72,7 +72,7 @@ "metadata": {}, "outputs": [], "source": [ - "max_inputs = get_maximization_inputs(model_dict, data)" + "max_inputs = get_maximization_inputs(model, data)" ] }, { diff --git a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb b/docs/source/how_to_guides/how_to_simulate_dataset.ipynb index 12905f6a..16b6247e 100644 --- a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb +++ b/docs/source/how_to_guides/how_to_simulate_dataset.ipynb @@ -35,7 +35,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", + " model = yaml.load(y, Loader=yaml.SafeLoader)\n", "\n", "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])\n", @@ -58,7 +58,7 @@ "outputs": [], "source": [ "initial_data = simulate_dataset(\n", - " model_dict=model_dict,\n", + " model=model,\n", " params=params,\n", " data=data,\n", ")\n", @@ -102,7 +102,7 @@ "outputs": [], "source": [ "data_after_policies = simulate_dataset(\n", - " model_dict=model_dict,\n", + " model=model,\n", " params=params,\n", " data=data,\n", ")\n", diff --git a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb index 45157de6..692b5ec8 100644 --- a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_correlations.ipynb @@ -41,7 +41,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model_dict = yaml.load(y, Loader=yaml.SafeLoader)" + " model = yaml.load(y, Loader=yaml.SafeLoader)" ] }, { @@ -71,7 +71,7 @@ "outputs": [], "source": [ "corr_meas = get_measurements_corr(\n", - " periods=0, data=data, model_dict=model_dict, factors=[\"fac1\", \"fac2\"]\n", + " periods=0, data=data, model=model, factors=[\"fac1\", \"fac2\"]\n", ")" ] }, @@ -109,7 +109,7 @@ "outputs": [], "source": [ "corr_score = get_scores_corr(\n", - " periods=None, params=params, data=data, model_dict=model_dict, factors=\"fac1\"\n", + " periods=None, params=params, data=data, model=model, factors=\"fac1\"\n", ")" ] }, @@ -140,7 +140,7 @@ "outputs": [], "source": [ "quasi_corr_score = get_quasi_scores_corr(\n", - " periods=None, data=data, model_dict=model_dict, factors=\"fac1\"\n", + " periods=None, data=data, model=model, factors=\"fac1\"\n", ")" ] }, @@ -192,8 +192,8 @@ "outputs": [], "source": [ "_get_parsed_params(\n", - " params=_set_index_params(process_model(model_dict), params),\n", - " model=process_model(model_dict),\n", + " params=_set_index_params(process_model(model), params),\n", + " model=process_model(model),\n", ").loadings" ] }, diff --git a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index 05b7a524..f11ee639 100644 --- a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -58,7 +58,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", + " model = yaml.load(y, Loader=yaml.SafeLoader)\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", @@ -82,13 +82,13 @@ "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model_dict=model_dict,\n", + " model=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model_dict=model_dict,\n", + " model=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", @@ -103,7 +103,7 @@ "outputs": [], "source": [ "surface_plots = bivariate_density_surfaces(\n", - " model_dict=model_dict,\n", + " model=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", @@ -167,9 +167,9 @@ "metadata": {}, "outputs": [], "source": [ - "sim_states = simulate_dataset(model_dict=model_dict, params=params, data=data)[\n", - " \"anchored_states\"\n", - "][\"states\"]" + "sim_states = simulate_dataset(model=model, params=params, data=data)[\"anchored_states\"][\n", + " \"states\"\n", + "]" ] }, { @@ -193,7 +193,7 @@ "outputs": [], "source": [ "sim_states_policy = simulate_dataset(\n", - " model_dict=model_dict,\n", + " model=model,\n", " params=params,\n", " data=data,\n", " policies=policies,\n", @@ -216,14 +216,14 @@ "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model_dict=model_dict,\n", + " model=model,\n", " states={\"baseline\": sim_states, \"subsidy\": sim_states_policy},\n", " data=data,\n", " params=params,\n", " period=1,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model_dict=model_dict,\n", + " model=model,\n", " states={\"baseline\": sim_states, \"subsidy\": sim_states_policy},\n", " data=data,\n", " params=params,\n", @@ -274,7 +274,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_dict[\"observed_factors\"] = [\"obs1\"]" + "model[\"observed_factors\"] = [\"obs1\"]" ] }, { @@ -294,7 +294,7 @@ "metadata": {}, "outputs": [], "source": [ - "params = get_maximization_inputs(model_dict=model_dict, data=data)[\"params_template\"]\n", + "params = get_maximization_inputs(model=model, data=data)[\"params_template\"]\n", "params[\"value\"] = 0.1" ] }, @@ -306,14 +306,14 @@ "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model_dict=model_dict,\n", + " model=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", " observed_factors=True,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model_dict=model_dict,\n", + " model=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", diff --git a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb index 9f9f8631..833b6585 100644 --- a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb @@ -48,7 +48,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", + " model = yaml.load(y, Loader=yaml.SafeLoader)\n", "\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", @@ -73,7 +73,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model_dict=model_dict,\n", + " model=model,\n", " params=params,\n", " data=data,\n", " period=0,\n", @@ -109,7 +109,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model_dict=model_dict,\n", + " model=model,\n", " params=params,\n", " data=data,\n", " period=0,\n", @@ -145,7 +145,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model_dict=model_dict,\n", + " model=model,\n", " params=params,\n", " data=data,\n", " period=1,\n", diff --git a/pixi.lock b/pixi.lock index 39c7df6c..96ae9215 100644 --- a/pixi.lock +++ b/pixi.lock @@ -274,7 +274,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -507,7 +507,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -727,7 +727,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl @@ -953,7 +953,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl @@ -1220,7 +1220,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -1453,7 +1453,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl @@ -1684,7 +1684,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl @@ -1998,7 +1998,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/b9/6ab941001c23cfb43499b5b0b7417b0bb4dfba3a29ffa2b06985422dad50/nvidia_nvshmem_cu12-3.5.19-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl @@ -2264,7 +2264,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -2501,7 +2501,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -2736,7 +2736,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/64/20/69f2a39792a653fd64d916cd563ed79ec6e5dcfa6408c4674021d810afcf/pandas_stubs-2.3.3.251219-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl @@ -6971,7 +6971,7 @@ packages: version: 3.4.0 sha256: 69bb92469f86a1565195ece4ac0323943e83477171b91d24c35afe028a90d7cd requires_python: '>=3.8' -- pypi: git+https://github.com/optimagic-dev/optimagic.git#522b8c9a21226569ffd25e950e44f0c5de308c9d +- pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d name: optimagic version: 0.5.3.dev31+g522b8c9a2 requires_dist: diff --git a/src/skillmodels/__init__.py b/src/skillmodels/__init__.py index 901e3138..057ad719 100644 --- a/src/skillmodels/__init__.py +++ b/src/skillmodels/__init__.py @@ -9,6 +9,22 @@ from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.model_spec import ( + AnchoringSpec, + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) from skillmodels.simulate_data import simulate_dataset -__all__ = ["get_filtered_states", "get_maximization_inputs", "simulate_dataset"] +__all__ = [ + "AnchoringSpec", + "EstimationOptionsSpec", + "FactorSpec", + "ModelSpec", + "Normalizations", + "get_filtered_states", + "get_maximization_inputs", + "simulate_dataset", +] diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 0286f1bc..2fc422d9 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -6,6 +6,7 @@ import pandas as pd from plotly import graph_objects as go +from skillmodels.model_spec import ModelSpec # noqa: TC001 from skillmodels.process_data import pre_process_data from skillmodels.process_model import process_model @@ -136,7 +137,7 @@ def plot_correlation_heatmap( def get_measurements_corr( data: pd.DataFrame, - model_dict: dict, + model: dict | ModelSpec, factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: @@ -147,8 +148,8 @@ def get_measurements_corr( Args: data: DataFrame with observed measurements. - model_dict: Dictionary of model attributes to be passed to process_model - and extract measurements for each period. + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` factors: List of factors, to retrieve measurements for. If None, then calculate correlations of measurements of all factors. periods: If int, the period within which to @@ -161,11 +162,11 @@ def get_measurements_corr( """ data = data.copy(deep=True) - model = process_model(model_dict) - periods = _process_periods(periods, model) + processed_model = process_model(model) + periods = _process_periods(periods, processed_model) processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(model, factors) - update_info_by_period = _get_update_info_for_periods(model) + latent_factors, observed_factors = _process_factors(processed_model, factors) + update_info_by_period = _get_update_info_for_periods(processed_model) df = _get_measurement_data( data=processed_data, update_info_by_period=update_info_by_period, @@ -178,7 +179,7 @@ def get_measurements_corr( def get_quasi_scores_corr( data: pd.DataFrame, - model_dict: dict, + model: dict | ModelSpec, factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: @@ -193,8 +194,8 @@ def get_quasi_scores_corr( Args: data: DataFrame with observed measurements. - model_dict: Dictionary of model attributes to be passed to process_model - and extract measurements for each period. + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` factors: List of factors, to retrieve measurements for. If None, then calculate correlations of measurements of all factors. periods: If int, the period within which to @@ -206,11 +207,11 @@ def get_quasi_scores_corr( """ data = data.copy(deep=True) - model = process_model(model_dict) - periods = _process_periods(periods, model) + processed_model = process_model(model) + periods = _process_periods(periods, processed_model) processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(model, factors) - update_info = _get_update_info_for_periods(model) + latent_factors, observed_factors = _process_factors(processed_model, factors) + update_info = _get_update_info_for_periods(processed_model) df = _get_quasi_factor_scores_data( data=processed_data, update_info_by_period=update_info, @@ -224,7 +225,7 @@ def get_quasi_scores_corr( def get_scores_corr( data: pd.DataFrame, params: pd.DataFrame, - model_dict: dict, + model: dict | ModelSpec, factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: @@ -238,8 +239,8 @@ def get_scores_corr( Args: data: DataFrame with observed measurements. params: DataFrame with estimated model parameters - model_dict: Dictionary of model attributes to be passed to process_model - and extract measurements for each period. + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` factors: List of factors, to retrieve measurements for. If None, then calculate correlations of measurements of all factors. periods: If int, the period within which to @@ -251,15 +252,15 @@ def get_scores_corr( """ data = data.copy(deep=True) - model = process_model(model_dict) - periods = _process_periods(periods, model) + processed_model = process_model(model) + periods = _process_periods(periods, processed_model) processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(model, factors) + latent_factors, observed_factors = _process_factors(processed_model, factors) params = params.loc[["controls", "loadings"]] df = _get_factor_scores_data( data=processed_data, params=params, - model=model, + model=processed_model, periods=periods, latent_factors=latent_factors, observed_factors=observed_factors, diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index 66e56f0e..e6fc75c0 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -6,6 +6,7 @@ import numpy as np from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.model_spec import ModelSpec # noqa: TC001 from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_debug_data import create_state_ranges @@ -16,29 +17,29 @@ def get_filtered_states( - model_dict: dict, + model: dict | ModelSpec, data: pd.DataFrame, params: pd.DataFrame, ) -> dict[str, dict[str, Any]]: """Compute filtered latent states given data and estimated parameters.""" - max_inputs = get_maximization_inputs(model_dict=model_dict, data=data) + max_inputs = get_maximization_inputs(model=model, data=data) params = params.loc[max_inputs["params_template"].index] debug_loglike = max_inputs["debug_loglike"] debug_data = debug_loglike(params) unanchored_states_df = debug_data["filtered_states"] unanchored_ranges = debug_data["state_ranges"] - model = process_model(model_dict) + processed_model = process_model(model) anchored_states_df = anchor_states_df( states_df=unanchored_states_df, - model_dict=model_dict, + model=model, params=params, use_aug_period=True, ) anchored_ranges = create_state_ranges( filtered_states=anchored_states_df, - factors=model.labels.latent_factors, + factors=processed_model.labels.latent_factors, ) return { @@ -55,7 +56,7 @@ def get_filtered_states( def anchor_states_df( states_df: pd.DataFrame, - model_dict: dict, + model: dict | ModelSpec, params: pd.DataFrame, *, use_aug_period: bool, @@ -71,41 +72,41 @@ def anchor_states_df( as an internal function that only works with jax objects). """ - model = process_model(model_dict) + processed_model = process_model(model) p_index = get_params_index( - update_info=model.update_info, - labels=model.labels, - dimensions=model.dimensions, - transition_info=model.transition_info, - endogenous_factors_info=model.endogenous_factors_info, + update_info=processed_model.update_info, + labels=processed_model.labels, + dimensions=processed_model.dimensions, + transition_info=processed_model.transition_info, + endogenous_factors_info=processed_model.endogenous_factors_info, ) params = params.loc[p_index] parsing_info = create_parsing_info( params_index=p_index, - update_info=model.update_info, - labels=model.labels, - anchoring=model.anchoring, - has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + update_info=processed_model.update_info, + labels=processed_model.labels, + anchoring=processed_model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, ) *_, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model.dimensions, - labels=model.labels, + dimensions=processed_model.dimensions, + labels=processed_model.labels, n_obs=1, ) - n_latent = model.dimensions.n_latent_factors + n_latent = processed_model.dimensions.n_latent_factors _scaling_factors = np.array(parsed_params.anchoring_scaling_factors[:, :n_latent]) _constants = np.array(parsed_params.anchoring_constants[:, :n_latent]) if use_aug_period: period_arr = states_df["aug_period"].to_numpy() - ap_to_p = model.labels.aug_periods_to_periods + ap_to_p = processed_model.labels.aug_periods_to_periods scaling_factors = np.empty(shape=(len(ap_to_p), n_latent)) constants = np.empty(shape=(len(ap_to_p), n_latent)) for ap, p in ap_to_p.items(): @@ -120,7 +121,7 @@ def anchor_states_df( constants_arr = constants[period_arr] out = states_df.copy(deep=True) - for pos, factor in enumerate(model.labels.latent_factors): + for pos, factor in enumerate(processed_model.labels.latent_factors): out[factor] = constants_arr[:, pos] + states_df[factor] * scaling_arr[:, pos] return out[states_df.columns] diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 028f2ba4..97dc6f82 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -20,6 +20,7 @@ get_constraints_dicts, ) from skillmodels.kalman_filters import calculate_sigma_scaling_factor_and_weights +from skillmodels.model_spec import ModelSpec # noqa: TC001 from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info from skillmodels.process_data import process_data @@ -34,14 +35,15 @@ def get_maximization_inputs( - model_dict: dict, + model: dict | ModelSpec, data: pd.DataFrame, split_dataset: int = 1, ) -> dict[str, Any]: """Create inputs for optimagic's maximize function. Args: - model_dict: The model specification. See: :ref:`model_specs` + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` data: dataset in long format. split_dataset(Int): Controls into how many sclices to split the dataset during the gradient computation. @@ -68,34 +70,34 @@ def get_maximization_inputs( endogenous factors, we double up the number of periods in order to add """ - model = process_model(model_dict) + processed_model = process_model(model) p_index = get_params_index( - update_info=model.update_info, - labels=model.labels, - dimensions=model.dimensions, - transition_info=model.transition_info, - endogenous_factors_info=model.endogenous_factors_info, + update_info=processed_model.update_info, + labels=processed_model.labels, + dimensions=processed_model.dimensions, + transition_info=processed_model.transition_info, + endogenous_factors_info=processed_model.endogenous_factors_info, ) parsing_info = create_parsing_info( params_index=p_index, - update_info=model.update_info, - labels=model.labels, - anchoring=model.anchoring, - has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + update_info=processed_model.update_info, + labels=processed_model.labels, + anchoring=processed_model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, ) processed_data = process_data( df=data, - has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, - labels=model.labels, - update_info=model.update_info, - anchoring_info=model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, + labels=processed_model.labels, + update_info=processed_model.update_info, + anchoring_info=processed_model.anchoring, purpose="estimation", ) sigma_scaling_factor, sigma_weights = calculate_sigma_scaling_factor_and_weights( - model.dimensions.n_latent_factors, - model.estimation_options.sigma_points_scale, + processed_model.dimensions.n_latent_factors, + processed_model.estimation_options.sigma_points_scale, ) partialed_get_jnp_params_vec = functools.partial( @@ -115,7 +117,7 @@ def get_maximization_inputs( measurements=processed_data["measurements"], controls=processed_data["controls"], observed_factors=processed_data["observed_factors"], - model=model, + model=processed_model, sigma_weights=sigma_weights, sigma_scaling_factor=sigma_scaling_factor, ) @@ -165,15 +167,15 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: jax_output = partialed_loglikes["debug_ll"](params_vec) tmp = _to_numpy(jax_output) tmp["value"] = float(tmp["value"]) - return process_debug_data(debug_data=tmp, model=model) + return process_debug_data(debug_data=tmp, model=processed_model) _constraints_dicts = get_constraints_dicts( - dimensions=model.dimensions, - labels=model.labels, - anchoring_info=model.anchoring, - update_info=model.update_info, - normalizations=model.normalizations, - endogenous_factors_info=model.endogenous_factors_info, + dimensions=processed_model.dimensions, + labels=processed_model.labels, + anchoring_info=processed_model.anchoring, + update_info=processed_model.update_info, + normalizations=processed_model.normalizations, + endogenous_factors_info=processed_model.endogenous_factors_info, ) constraints = constraints_dicts_to_om(_constraints_dicts) @@ -181,7 +183,7 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: params_template = pd.DataFrame(columns=["value"], index=p_index) params_template = add_bounds( params=params_template, - bounds_distance=model.estimation_options.bounds_distance, + bounds_distance=processed_model.estimation_options.bounds_distance, ) params_template = enforce_fixed_constraints( params_template=params_template, diff --git a/src/skillmodels/model_spec.py b/src/skillmodels/model_spec.py new file mode 100644 index 00000000..8de6f1ae --- /dev/null +++ b/src/skillmodels/model_spec.py @@ -0,0 +1,496 @@ +"""Strongly-typed model specification dataclasses. + +This module provides frozen dataclasses for defining model specifications +in a type-safe, immutable manner. All collections use immutable types +(tuples, frozendict) to ensure the specification cannot be accidentally modified. +""" + +from dataclasses import dataclass, field +from types import MappingProxyType +from typing import TYPE_CHECKING, Self + +from frozendict import frozendict + +if TYPE_CHECKING: + from collections.abc import Callable + + +@dataclass(frozen=True) +class Normalizations: + """Normalizations for factor identification. + + Attributes: + loadings: Per-period loading normalizations. Each element is a mapping + from variable name to fixed loading value. + intercepts: Per-period intercept normalizations. Each element is a mapping + from variable name to fixed intercept value. + + """ + + loadings: tuple[frozendict[str, float], ...] + intercepts: tuple[frozendict[str, float], ...] + + @classmethod + def from_dict(cls, d: dict) -> Self: + """Create Normalizations from a dictionary specification.""" + return cls( + loadings=tuple(frozendict(x) for x in d["loadings"]), + intercepts=tuple(frozendict(x) for x in d["intercepts"]), + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + return { + "loadings": [dict(x) for x in self.loadings], + "intercepts": [dict(x) for x in self.intercepts], + } + + +@dataclass(frozen=True) +class FactorSpec: + """Specification for a single latent factor. + + Attributes: + measurements: Per-period measurement variables. Each element is a tuple + of variable names measured in that period. + normalizations: Identification normalizations for this factor. + is_endogenous: Whether this factor is endogenous. + is_correction: Whether this factor is a correction factor. + transition_function: Optional transition function for this factor. + Can be a string (referencing built-in functions) or a callable. + + """ + + measurements: tuple[tuple[str, ...], ...] + normalizations: Normalizations | None = None + is_endogenous: bool = False + is_correction: bool = False + transition_function: str | Callable | None = None + + @classmethod + def from_dict(cls, d: dict) -> Self: + """Create FactorSpec from a dictionary specification.""" + normalizations = None + if "normalizations" in d: + normalizations = Normalizations.from_dict(d["normalizations"]) + + return cls( + measurements=tuple(tuple(m) for m in d["measurements"]), + normalizations=normalizations, + is_endogenous=d.get("is_endogenous", False), + is_correction=d.get("is_correction", False), + transition_function=d.get("transition_function"), + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + result: dict = { + "measurements": [list(m) for m in self.measurements], + "is_endogenous": self.is_endogenous, + "is_correction": self.is_correction, + } + if self.normalizations is not None: + result["normalizations"] = self.normalizations.to_dict() + if self.transition_function is not None: + result["transition_function"] = self.transition_function + return result + + def with_transition_function(self, func: str | Callable) -> Self: + """Return a new FactorSpec with the given transition function.""" + return type(self)( + measurements=self.measurements, + normalizations=self.normalizations, + is_endogenous=self.is_endogenous, + is_correction=self.is_correction, + transition_function=func, + ) + + def with_normalizations(self, normalizations: Normalizations) -> Self: + """Return a new FactorSpec with the given normalizations.""" + return type(self)( + measurements=self.measurements, + normalizations=normalizations, + is_endogenous=self.is_endogenous, + is_correction=self.is_correction, + transition_function=self.transition_function, + ) + + +@dataclass(frozen=True) +class EstimationOptionsSpec: + """Options for model estimation. + + Attributes: + robust_bounds: Whether to use robust bounds. + bounds_distance: Distance for bounds. + n_mixtures: Number of mixture components. + sigma_points_scale: Scaling factor for sigma points in unscented transform. + clipping_lower_bound: Lower bound for soft clipping. + clipping_upper_bound: Upper bound for soft clipping (None for no upper bound). + clipping_lower_hardness: Hardness of lower clipping. + clipping_upper_hardness: Hardness of upper clipping. + + """ + + robust_bounds: bool = True + bounds_distance: float = 1e-3 + n_mixtures: int = 1 + sigma_points_scale: float = 2 + clipping_lower_bound: float = -1e30 + clipping_upper_bound: float | None = None + clipping_lower_hardness: float = 1 + clipping_upper_hardness: float = 1 + + @classmethod + def from_dict(cls, d: dict) -> Self: + """Create EstimationOptionsSpec from a dictionary specification.""" + return cls( + robust_bounds=d.get("robust_bounds", True), + bounds_distance=d.get("bounds_distance", 1e-3), + n_mixtures=d.get("n_mixtures", 1), + sigma_points_scale=d.get("sigma_points_scale", 2), + clipping_lower_bound=d.get("clipping_lower_bound", -1e30), + clipping_upper_bound=d.get("clipping_upper_bound"), + clipping_lower_hardness=d.get("clipping_lower_hardness", 1), + clipping_upper_hardness=d.get("clipping_upper_hardness", 1), + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + result = { + "robust_bounds": self.robust_bounds, + "bounds_distance": self.bounds_distance, + "n_mixtures": self.n_mixtures, + "sigma_points_scale": self.sigma_points_scale, + "clipping_lower_bound": self.clipping_lower_bound, + "clipping_lower_hardness": self.clipping_lower_hardness, + "clipping_upper_hardness": self.clipping_upper_hardness, + } + if self.clipping_upper_bound is not None: + result["clipping_upper_bound"] = self.clipping_upper_bound + return result + + +def _default_empty_frozendict() -> frozendict[str, str]: + return frozendict({}) + + +@dataclass(frozen=True) +class AnchoringSpec: + """Specification for anchoring latent factors to outcomes. + + Attributes: + outcomes: Mapping from factor names to outcome variable names. + free_controls: Whether control coefficients are free in anchoring equations. + free_constant: Whether the constant is free in anchoring equations. + free_loadings: Whether loadings are free in anchoring equations. + ignore_constant_when_anchoring: Whether to ignore constant when anchoring. + + """ + + outcomes: frozendict[str, str] = field(default_factory=_default_empty_frozendict) + free_controls: bool = False + free_constant: bool = False + free_loadings: bool = False + ignore_constant_when_anchoring: bool = False + + @classmethod + def from_dict(cls, d: dict) -> Self: + """Create AnchoringSpec from a dictionary specification.""" + outcomes = d.get("outcomes", {}) + ignore_constant = d.get("ignore_constant_when_anchoring", False) + return cls( + outcomes=frozendict(outcomes), + free_controls=d.get("free_controls", False), + free_constant=d.get("free_constant", False), + free_loadings=d.get("free_loadings", False), + ignore_constant_when_anchoring=ignore_constant, + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility.""" + return { + "outcomes": dict(self.outcomes), + "free_controls": self.free_controls, + "free_constant": self.free_constant, + "free_loadings": self.free_loadings, + "ignore_constant_when_anchoring": self.ignore_constant_when_anchoring, + } + + +@dataclass(frozen=True, init=False) +class ModelSpec: + """Complete model specification. + + This is the main strongly-typed container for model specifications. + All fields are immutable to prevent accidental modifications. + + Attributes: + factors: Mapping from factor name to FactorSpec. + observed_factors: Tuple of observed factor variable names. + controls: Tuple of control variable names. + stagemap: Stage mapping for transition functions. + anchoring: Anchoring specification. + estimation_options: Estimation tuning parameters. + + """ + + _factors: MappingProxyType[str, FactorSpec] + observed_factors: tuple[str, ...] = () + controls: tuple[str, ...] = () + stagemap: tuple[int, ...] | None = None + anchoring: AnchoringSpec | None = None + estimation_options: EstimationOptionsSpec | None = None + + def __init__( + self, + factors: dict[str, FactorSpec] | MappingProxyType[str, FactorSpec], + observed_factors: tuple[str, ...] = (), + controls: tuple[str, ...] = (), + stagemap: tuple[int, ...] | None = None, + anchoring: AnchoringSpec | None = None, + estimation_options: EstimationOptionsSpec | None = None, + ) -> None: + """Create ModelSpec, wrapping factors dict in MappingProxyType.""" + if isinstance(factors, MappingProxyType): + object.__setattr__(self, "_factors", factors) + else: + object.__setattr__(self, "_factors", MappingProxyType(factors)) + object.__setattr__(self, "observed_factors", observed_factors) + object.__setattr__(self, "controls", controls) + object.__setattr__(self, "stagemap", stagemap) + object.__setattr__(self, "anchoring", anchoring) + object.__setattr__(self, "estimation_options", estimation_options) + + @property + def factors(self) -> MappingProxyType[str, FactorSpec]: + """Immutable mapping of factor names to specifications.""" + return self._factors + + @classmethod + def from_dict(cls, d: dict) -> Self: + """Create ModelSpec from a dictionary specification. + + Args: + d: Dictionary with keys 'factors', and optionally 'observed_factors', + 'controls', 'stagemap', 'anchoring', and 'estimation_options'. + + Returns: + Immutable ModelSpec instance. + + """ + factors = { + name: FactorSpec.from_dict(spec) for name, spec in d["factors"].items() + } + observed = d.get("observed_factors", []) + controls = d.get("controls", []) + stagemap = d.get("stagemap") + anchoring = None + if "anchoring" in d: + anchoring = AnchoringSpec.from_dict(d["anchoring"]) + estimation = None + if "estimation_options" in d: + estimation = EstimationOptionsSpec.from_dict(d["estimation_options"]) + + return cls( + factors=MappingProxyType(factors), + observed_factors=tuple(observed), + controls=tuple(controls), + stagemap=tuple(stagemap) if stagemap is not None else None, + anchoring=anchoring, + estimation_options=estimation, + ) + + def to_dict(self) -> dict: + """Convert to dictionary for backwards compatibility with skillmodels. + + Returns: + Mutable dictionary in the format expected by skillmodels. + + """ + result: dict = { + "factors": {name: spec.to_dict() for name, spec in self.factors.items()}, + "observed_factors": list(self.observed_factors), + } + if self.controls: + result["controls"] = list(self.controls) + if self.stagemap is not None: + result["stagemap"] = list(self.stagemap) + if self.anchoring is not None: + result["anchoring"] = self.anchoring.to_dict() + if self.estimation_options is not None: + result["estimation_options"] = self.estimation_options.to_dict() + return result + + def with_transition_functions( + self, + transition_functions: dict[str, str | Callable], + ) -> Self: + """Return a new ModelSpec with transition functions added to factors. + + Args: + transition_functions: Mapping from factor name to transition function. + Can be strings (referencing built-in functions) or callables. + + Returns: + New ModelSpec with transition functions set on factors. + + Raises: + ValueError: If transition_functions keys don't match factor names. + + """ + if set(transition_functions.keys()) != set(self.factors.keys()): + msg = ( + f"Transition function keys {set(transition_functions.keys())} " + f"do not match factor keys {set(self.factors.keys())}" + ) + raise ValueError(msg) + + new_factors = { + name: spec.with_transition_function(transition_functions[name]) + for name, spec in self.factors.items() + } + return type(self)( + factors=MappingProxyType(new_factors), + observed_factors=self.observed_factors, + controls=self.controls, + stagemap=self.stagemap, + anchoring=self.anchoring, + estimation_options=self.estimation_options, + ) + + def with_added_factor( + self, + name: str, + spec: FactorSpec, + ) -> Self: + """Return a new ModelSpec with an additional factor. + + Args: + name: Name of the new factor. + spec: Specification for the new factor. + + Returns: + New ModelSpec with the additional factor. + + """ + new_factors = dict(self.factors) + new_factors[name] = spec + return type(self)( + factors=MappingProxyType(new_factors), + observed_factors=self.observed_factors, + controls=self.controls, + stagemap=self.stagemap, + anchoring=self.anchoring, + estimation_options=self.estimation_options, + ) + + def with_added_observed_factors( + self, + *names: str, + ) -> Self: + """Return a new ModelSpec with additional observed factors. + + Args: + *names: Names of additional observed factors. + + Returns: + New ModelSpec with the additional observed factors. + + """ + return type(self)( + factors=self.factors, + observed_factors=self.observed_factors + names, + controls=self.controls, + stagemap=self.stagemap, + anchoring=self.anchoring, + estimation_options=self.estimation_options, + ) + + def with_estimation_options( + self, + estimation_options: EstimationOptionsSpec, + ) -> Self: + """Return a new ModelSpec with the given estimation options. + + Args: + estimation_options: New estimation options. + + Returns: + New ModelSpec with the updated estimation options. + + """ + return type(self)( + factors=self.factors, + observed_factors=self.observed_factors, + controls=self.controls, + stagemap=self.stagemap, + anchoring=self.anchoring, + estimation_options=estimation_options, + ) + + def with_anchoring( + self, + anchoring: AnchoringSpec, + ) -> Self: + """Return a new ModelSpec with the given anchoring specification. + + Args: + anchoring: New anchoring specification. + + Returns: + New ModelSpec with the updated anchoring. + + """ + return type(self)( + factors=self.factors, + observed_factors=self.observed_factors, + controls=self.controls, + stagemap=self.stagemap, + anchoring=anchoring, + estimation_options=self.estimation_options, + ) + + def with_controls( + self, + controls: tuple[str, ...], + ) -> Self: + """Return a new ModelSpec with the given controls. + + Args: + controls: New control variable names. + + Returns: + New ModelSpec with the updated controls. + + """ + return type(self)( + factors=self.factors, + observed_factors=self.observed_factors, + controls=controls, + stagemap=self.stagemap, + anchoring=self.anchoring, + estimation_options=self.estimation_options, + ) + + def with_stagemap( + self, + stagemap: tuple[int, ...], + ) -> Self: + """Return a new ModelSpec with the given stagemap. + + Args: + stagemap: New stage mapping. + + Returns: + New ModelSpec with the updated stagemap. + + """ + return type(self)( + factors=self.factors, + observed_factors=self.observed_factors, + controls=self.controls, + stagemap=stagemap, + anchoring=self.anchoring, + estimation_options=self.estimation_options, + ) diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 9ceb17ed..1048631f 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -16,6 +16,7 @@ import skillmodels.transition_functions as t_f_module from skillmodels.check_model import check_model, check_stagemap from skillmodels.decorators import extract_params, jax_array_output +from skillmodels.model_spec import ModelSpec from skillmodels.types import ( Anchoring, Dimensions, @@ -35,7 +36,7 @@ pd.set_option("future.no_silent_downcasting", True) # noqa: FBT003 -def process_model(model_dict: dict) -> ProcessedModel: +def process_model(model: dict | ModelSpec) -> ProcessedModel: """Check, clean, extend and transform the model specs. Check the completeness, consistency and validity of the model specifications. @@ -43,7 +44,8 @@ def process_model(model_dict: dict) -> ProcessedModel: Set default values and extend the model specification where necessary. Args: - model_dict: The model specification. See: :ref:`model_specs` + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` Returns: dict: nested dictionary of model specs. It has the following entries: @@ -59,6 +61,7 @@ def process_model(model_dict: dict) -> ProcessedModel: loadings and intercepts for each factor. See :ref:`normalizations`. """ + model_dict = model.to_dict() if isinstance(model, ModelSpec) else model has_endogenous_factors = get_has_endogenous_factors(model_dict["factors"]) dims = get_dimensions( model_dict=model_dict, has_endogenous_factors=has_endogenous_factors diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index f294cd09..9f884d64 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -23,6 +23,7 @@ TransitionInfo, ) from skillmodels.kalman_filters import transform_sigma_points +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_data import process_data @@ -32,7 +33,7 @@ def simulate_dataset( - model_dict: dict, + model: dict | ModelSpec, params: pd.DataFrame, n_obs: int | None = None, data: pd.DataFrame | None = None, @@ -41,7 +42,8 @@ def simulate_dataset( """Simulate datasets generated by a latent factor model. Args: - model_dict: The model specification. See: :ref:`model_specs` + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` params: DataFrame with model parameters. n_obs: Number of simulated individuals data: Dataset in the same format as for estimation, containing @@ -60,23 +62,24 @@ def simulate_dataset( if data is None and n_obs is None: raise ValueError("Either `data` or `n_obs` has to be provided.") - model = process_model(model_dict) + model_dict = model.to_dict() if isinstance(model, ModelSpec) else model + processed_model = process_model(model_dict) - if model.labels.observed_factors and data is None: + if processed_model.labels.observed_factors and data is None: raise ValueError( "To simulate a model with observed factors, data cannot be None.", ) - if model.labels.controls != ["constant"] and data is None: + if processed_model.labels.controls != ["constant"] and data is None: raise ValueError("To simulate a model with controls, data cannot be None.") if data is not None: processed_data = process_data( df=data, - has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, - labels=model.labels, - update_info=model.update_info, - anchoring_info=model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, + labels=processed_model.labels, + update_info=processed_model.update_info, + anchoring_info=processed_model.anchoring, purpose="simulation", ) control_data = processed_data["controls"] @@ -93,25 +96,25 @@ def simulate_dataset( else: control_data = jnp.ones((n_obs, 1)) - n_periods = model.dimensions.n_periods + n_periods = processed_model.dimensions.n_periods observed_factors = jnp.zeros((n_periods, n_obs, 0)) params_index = get_params_index( - update_info=model.update_info, - labels=model.labels, - dimensions=model.dimensions, - transition_info=model.transition_info, - endogenous_factors_info=model.endogenous_factors_info, + update_info=processed_model.update_info, + labels=processed_model.labels, + dimensions=processed_model.dimensions, + transition_info=processed_model.transition_info, + endogenous_factors_info=processed_model.endogenous_factors_info, ) params = params.reindex(params_index) parsing_info = create_parsing_info( params_index=params.index, # ty: ignore[invalid-argument-type] - update_info=model.update_info, - labels=model.labels, - anchoring=model.anchoring, - has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, + update_info=processed_model.update_info, + labels=processed_model.labels, + anchoring=processed_model.anchoring, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, ) if n_obs is None: @@ -119,8 +122,8 @@ def simulate_dataset( states, covs, log_weights, parsed_params = parse_params( params=jnp.array(params["value"].to_numpy()), parsing_info=parsing_info, - dimensions=model.dimensions, - labels=model.labels, + dimensions=processed_model.dimensions, + labels=processed_model.labels, n_obs=n_obs, ) @@ -129,29 +132,29 @@ def simulate_dataset( covs=covs, log_weights=log_weights, parsed_params=parsed_params, - labels=model.labels, - dimensions=model.dimensions, + labels=processed_model.labels, + dimensions=processed_model.dimensions, n_obs=n_obs, - has_endogenous_factors=model.endogenous_factors_info.has_endogenous_factors, - update_info=model.update_info, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, + update_info=processed_model.update_info, control_data=control_data, observed_factors=observed_factors, policies=policies, # ty: ignore[invalid-argument-type] - transition_info=model.transition_info, + transition_info=processed_model.transition_info, ) # Create collapsed versions with user-facing periods latent_data = _collapse_aug_periods_to_periods( df=aug_latent_data, - factors=model.labels.latent_factors, - aug_periods_to_periods=model.labels.aug_periods_to_periods, - endogenous_factors_info=model.endogenous_factors_info, + factors=processed_model.labels.latent_factors, + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, + endogenous_factors_info=processed_model.endogenous_factors_info, ) # Anchor the collapsed version (anchoring only works with period, not aug_period) anchored_latent_data = anchor_states_df( states_df=latent_data, - model_dict=model_dict, + model=model_dict, params=params, use_aug_period=False, ) @@ -161,21 +164,21 @@ def simulate_dataset( "states": latent_data, "state_ranges": create_state_ranges( latent_data, - model.labels.latent_factors, + processed_model.labels.latent_factors, ), }, "anchored_states": { "states": anchored_latent_data, "state_ranges": create_state_ranges( anchored_latent_data, - model.labels.latent_factors, + processed_model.labels.latent_factors, ), }, "aug_unanchored_states": { "states": aug_latent_data, "state_ranges": create_state_ranges( aug_latent_data, - model.labels.latent_factors, + processed_model.labels.latent_factors, ), }, "aug_measurements": aug_measurements, diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index a12a39f7..450ce2bc 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -13,6 +13,7 @@ from scipy.stats import gaussian_kde from skillmodels.filtered_states import get_filtered_states +from skillmodels.model_spec import ModelSpec # noqa: TC001 from skillmodels.process_model import process_model from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs @@ -164,7 +165,7 @@ def combine_distribution_plots( def univariate_densities( data: pd.DataFrame, - model_dict: dict[str, Any], + model: dict[str, Any] | ModelSpec, params: pd.DataFrame, period: int, factors: list[str] | tuple[str, ...] | None = None, @@ -187,7 +188,8 @@ def univariate_densities( Args: data: Model estimation input data. - model_dict: Dictionary with model specifications. + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` params: DataFrame with estimated parameter values. period: Model period for which to plot the distributions for. factors: List of factors for which to plot the densities. @@ -196,7 +198,7 @@ def univariate_densities( states: List or dictionary with tidy DataFrames with filtered or simulated states or only one DataFrame with filtered or simulated states. If None, retrieve data frame with filtered - states using model_dict and data. States are used to estimate the state + states using model and data. States are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the latent factors. show_hist: Add histogram to the distplot. @@ -222,12 +224,12 @@ def univariate_densities( """ if states is None: - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + states = get_filtered_states(model=model, data=data, params=params)[ "anchored_states" ]["states"] - model = process_model(model_dict) + processed_model = process_model(model) factors = _get_factors( - model=model, + model=processed_model, factors=factors, observed_factors=observed_factors, ) @@ -236,7 +238,7 @@ def univariate_densities( states=states, period=period, factors=factors, - aug_periods_to_periods=model.labels.aug_periods_to_periods, + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, observed_states=observed_states, ) scenarios = df["scenario"].unique() @@ -274,7 +276,7 @@ def univariate_densities( def bivariate_density_contours( data: pd.DataFrame, - model_dict: dict[str, Any], + model: dict[str, Any] | ModelSpec, params: pd.DataFrame, period: int, factors: list[str] | tuple[str, ...] | None = None, @@ -297,7 +299,8 @@ def bivariate_density_contours( Args: data: Model estimation input data. - model_dict: Dictionary with model specifications. + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` params: DataFrame with estimated parameter values. period: Model period for which to plot the distributions for. factors: List of factors for which to plot the densities. @@ -306,7 +309,7 @@ def bivariate_density_contours( states: List or dictionary with tidy DataFrames with filtered or simulated states or only one DataFrame with filtered or simulated states. If None, retrieve data frame with filtered - states using model_dict and data. States are used to estimate the state + states using model and data. States are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the latent factors. n_points: Number of grid points used to create the mesh for calculation @@ -338,12 +341,12 @@ def bivariate_density_contours( """ if states is None: - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + states = get_filtered_states(model=model, data=data, params=params)[ "anchored_states" ]["states"] - model = process_model(model_dict) + processed_model = process_model(model) factors = _get_factors( - model=model, + model=processed_model, factors=factors, observed_factors=observed_factors, ) @@ -352,7 +355,7 @@ def bivariate_density_contours( states=states, period=period, factors=factors, - aug_periods_to_periods=model.labels.aug_periods_to_periods, + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} @@ -405,7 +408,7 @@ def bivariate_density_contours( def bivariate_density_surfaces( data: pd.DataFrame, - model_dict: dict[str, Any], + model: dict[str, Any] | ModelSpec, params: pd.DataFrame, period: int, factors: list[str] | tuple[str, ...] | None = None, @@ -428,7 +431,8 @@ def bivariate_density_surfaces( Args: data: Model estimation input data. - model_dict: Dictionary with model specifications. + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` params: DataFrame with estimated parameter values. period: Model period for which to plot the distributions for. factors: List of factors for which to plot the densities. @@ -437,7 +441,7 @@ def bivariate_density_surfaces( states: List or dictionary with tidy DataFrames with filtered or simulated states or only one DataFrame with filtered or simulated states. If None, retrieve data frame with filtered - states using model_dict and data. States are used to estimate the state + states using model and data. States are used to estimate the state ranges in each period (if state_ranges are not given explicitly) and to estimate the distribution of the latent factors. n_points: Number of grid points used to create the mesh for calculation @@ -465,14 +469,14 @@ def bivariate_density_surfaces( """ if states is None: - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + states = get_filtered_states(model=model, data=data, params=params)[ "anchored_states" ]["states"] elif not isinstance(states, pd.DataFrame): raise ValueError("3d plots are only supported if states is a DataFrame") - model = process_model(model_dict) + processed_model = process_model(model) factors = _get_factors( - model=model, + model=processed_model, factors=factors, observed_factors=observed_factors, ) @@ -481,7 +485,7 @@ def bivariate_density_surfaces( states=states, period=period, factors=factors, - aug_periods_to_periods=model.labels.aug_periods_to_periods, + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, observed_states=observed_states, ) plots_dict = {} diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 4203bece..eba82292 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -14,6 +14,7 @@ from plotly.subplots import make_subplots from skillmodels.filtered_states import get_filtered_states +from skillmodels.model_spec import ModelSpec # noqa: TC001 from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_data import process_data @@ -141,7 +142,7 @@ def combine_transition_plots( def get_transition_plots( - model_dict: dict[str, Any], + model: dict[str, Any] | ModelSpec, params: pd.DataFrame, data: pd.DataFrame, period: int, @@ -161,7 +162,8 @@ def get_transition_plots( """Get dictionary with individual plots of transition equations for each factor. Args: - model_dict: The model specification. See: :ref:`model_specs` + model: The model specification, either as a dict or ModelSpec instance. + See: :ref:`model_specs` params: DataFrame with model parameters. data: Empirical dataset that is used to estimate the model. period: The start period of the transition equations that are plotted. @@ -191,30 +193,30 @@ def get_transition_plots( quantiles_of_other_factors, ) - model = process_model(model_dict) + processed_model = process_model(model) - if period >= model.labels.periods[-1]: + if period >= processed_model.labels.periods[-1]: raise ValueError( "*period* must be the penultimate period of the model or earlier.", ) if ( include_correction_factors - or not model.endogenous_factors_info.has_endogenous_factors + or not processed_model.endogenous_factors_info.has_endogenous_factors ): - latent_factors = model.labels.latent_factors + latent_factors = processed_model.labels.latent_factors else: latent_factors = [ lf - for lf in model.labels.latent_factors - if not model.endogenous_factors_info.factor_info[lf].is_correction # ty: ignore[invalid-argument-type] + for lf in processed_model.labels.latent_factors + if not processed_model.endogenous_factors_info.factor_info[lf].is_correction # ty: ignore[invalid-argument-type] ] - all_factors = model.labels.all_factors - states = get_filtered_states(model_dict=model_dict, data=data, params=params)[ + all_factors = processed_model.labels.all_factors + states = get_filtered_states(model=model, data=data, params=params)[ "anchored_states" ]["states"] return _get_dictionary_with_plots( - model=model, + model=processed_model, data=data, params=params, states=states, diff --git a/tests/test_constraints.py b/tests/test_constraints.py index 40ed681d..25cd6f4b 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -414,8 +414,8 @@ def assert_list_equal_except_for_order(list1, list2) -> None: @pytest.fixture def simplest_augmented_model(): with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - return process_model(model_dict) + model = yaml.load(y, Loader=yaml.SafeLoader) + return process_model(model) def test_get_constraints_for_augmented_periods(simplest_augmented_model) -> None: diff --git a/tests/test_filtered_states.py b/tests/test_filtered_states.py index bcfc0fc7..0a0fd53e 100644 --- a/tests/test_filtered_states.py +++ b/tests/test_filtered_states.py @@ -31,7 +31,7 @@ def test_get_filtered_states(model2, model2_data) -> None: max_inputs = get_maximization_inputs(model2, model2_data) params = params.loc[max_inputs["params_template"].index] - calculated = get_filtered_states(model_dict=model2, data=model2_data, params=params) + calculated = get_filtered_states(model=model2, data=model2_data, params=params) factors = ["fac1", "fac2", "fac3"] expected_ratios = [1.187757, 1, 1] diff --git a/tests/test_params_index.py b/tests/test_params_index.py index b34b2b42..e2d2075d 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -22,8 +22,8 @@ @pytest.fixture def model2_inputs(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - processed = process_model(model_dict) + model = yaml.load(y, Loader=yaml.SafeLoader) + processed = process_model(model) return { "update_info": processed.update_info, diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index 35e8d2e9..e2853977 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -27,9 +27,9 @@ def parsed_parameters(): ).index with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) - processed = process_model(model_dict) + processed = process_model(model) update_info = processed.update_info labels = processed.labels diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 8910857d..4b6d5d3b 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -48,7 +48,7 @@ def test_pre_process_data() -> None: def simplest_augmented(): out = {} with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: - out["model_dict"] = yaml.load(y, Loader=yaml.SafeLoader) + out["model"] = yaml.load(y, Loader=yaml.SafeLoader) _df = pd.DataFrame(data=np.arange(15).reshape(3, 5).T, columns=["var", "inv", "of"]) _df["period"] = [1, 1, 2, 1, 2] _df["id"] = [1, 3, 3, 5, 5] @@ -61,15 +61,15 @@ def simplest_augmented(): def test_augment_data_for_endogenous_factors(simplest_augmented) -> None: - model = process_model(simplest_augmented["model_dict"]) + processed_model = process_model(simplest_augmented["model"]) pre_processed_data = pre_process_data( - simplest_augmented["data_input"], model.labels.periods + simplest_augmented["data_input"], processed_model.labels.periods ) pre_processed_data["constant"] = 1 res = _augment_data_for_endogenous_factors( df=pre_processed_data, - labels=model.labels, - update_info=model.update_info, + labels=processed_model.labels, + update_info=processed_model.update_info, ) cols = ["var", "inv", "constant", "of"] pd.testing.assert_frame_equal(res[cols], simplest_augmented["data_exp"][cols]) diff --git a/tests/test_process_model.py b/tests/test_process_model.py index 87aa0a50..d41c4f1b 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -124,12 +124,12 @@ def test_normalizations(model2) -> None: def test_anchoring_and_endogenous_factors_work_together() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] + model["factors"]["fac3"]["is_endogenous"] = True + del model["stagemap"] # Should not raise - anchoring and endogenous factors now work together - result = process_model(model_dict) + result = process_model(model) # Verify anchoring is enabled assert result.anchoring.anchoring assert result.anchoring.factors == ("fac1",) @@ -147,37 +147,38 @@ def test_anchoring_and_endogenous_factors_work_together() -> None: def test_stagemap_with_endogenous_factors_wrong_labels() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 4] - del model_dict["anchoring"] + model["factors"]["fac3"]["is_endogenous"] = True + model["stagemap"] = [0, 0, 1, 1, 2, 2, 4] + del model["anchoring"] with pytest.raises(ValueError, match="Invalid stage map:"): - process_model(model_dict) + process_model(model) def test_stagemap_with_endogenous_factors() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 3] - del model_dict["anchoring"] - model = process_model(model_dict) - assert model.labels.stagemap == tuple(model_dict["stagemap"]) - assert model.labels.stages == (0, 1, 2, 3) - assert model.labels.aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) + model["factors"]["fac3"]["is_endogenous"] = True + stagemap = [0, 0, 1, 1, 2, 2, 3] + model["stagemap"] = stagemap + del model["anchoring"] + processed = process_model(model) + assert processed.labels.stagemap == tuple(stagemap) + assert processed.labels.stages == (0, 1, 2, 3) + assert processed.labels.aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) @pytest.fixture def model2_inv(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] - del model_dict["anchoring"] - return model_dict + model["factors"]["fac3"]["is_endogenous"] = True + del model["stagemap"] + del model["anchoring"] + return model def test_with_endog_has_endogenous_factors(model2_inv) -> None: diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index be7c6b4a..2a7506c0 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -32,12 +32,12 @@ def model2_data(): def test_simulate_dataset(model2, model2_data) -> None: - model_dict = model2 + model = model2 params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) calculated = simulate_dataset( - model_dict=model_dict, + model=model, params=params, data=model2_data, ) @@ -67,11 +67,11 @@ def test_measurements_from_factors() -> None: def model2_with_endogenous(): """Model2 with fac3 set as endogenous factor.""" with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] - del model_dict["anchoring"] - return model_dict + model = yaml.load(y, Loader=yaml.SafeLoader) + model["factors"]["fac3"]["is_endogenous"] = True + del model["stagemap"] + del model["anchoring"] + return model def test_collapse_aug_periods_to_periods_with_endogenous_factors( @@ -82,12 +82,14 @@ def test_collapse_aug_periods_to_periods_with_endogenous_factors( This is a regression test for a bug where MeasurementType enum values were compared against strings in pandas queries, causing empty results. """ - model = process_model(model2_with_endogenous) - factors = model.labels.latent_factors + processed_model = process_model(model2_with_endogenous) + factors = processed_model.labels.latent_factors # Create a mock aug_latent_data DataFrame with aug_period column n_obs = 5 - n_aug_periods = model.dimensions.n_aug_periods - 1 # Exclude last half-period + n_aug_periods = ( + processed_model.dimensions.n_aug_periods - 1 + ) # Exclude last half-period records = [] for aug_p in range(n_aug_periods): for obs_id in range(n_obs): @@ -100,8 +102,8 @@ def test_collapse_aug_periods_to_periods_with_endogenous_factors( result = _collapse_aug_periods_to_periods( df=aug_latent_data, factors=factors, - aug_periods_to_periods=model.labels.aug_periods_to_periods, - endogenous_factors_info=model.endogenous_factors_info, + aug_periods_to_periods=processed_model.labels.aug_periods_to_periods, + endogenous_factors_info=processed_model.endogenous_factors_info, ) # The result should not be empty @@ -116,7 +118,7 @@ def test_collapse_aug_periods_to_periods_with_endogenous_factors( assert fac in result.columns # Should have correct number of unique periods (half of aug_periods) - expected_n_periods = model.dimensions.n_periods + expected_n_periods = processed_model.dimensions.n_periods assert result["period"].nunique() == expected_n_periods # Should have all observations for each period diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 35e3ca8a..a6613881 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -121,12 +121,12 @@ def test_remove_from_dict(to_remove) -> None: def test_reduce_params_via_extract_factors(model2) -> None: - model_dict = reduce_n_periods(model2, 2) + model = reduce_n_periods(model2, 2) - full_index = _get_params_index_from_model_dict(model_dict) # ty: ignore[invalid-argument-type] + full_index = _get_params_index_from_model_dict(model) # ty: ignore[invalid-argument-type] params = pd.DataFrame(columns=["value"], index=full_index) - _, reduced_params = extract_factors("fac3", model_dict, params) # ty: ignore[invalid-argument-type] + _, reduced_params = extract_factors("fac3", model, params) # ty: ignore[invalid-argument-type] expected_index = pd.MultiIndex.from_tuples( [ @@ -154,11 +154,11 @@ def test_reduce_params_via_extract_factors(model2) -> None: def test_extend_params_via_switch_to_translog(model2) -> None: - model_dict = reduce_n_periods(model2, 2) - normal_index = _get_params_index_from_model_dict(model_dict) # ty: ignore[invalid-argument-type] + model = reduce_n_periods(model2, 2) + normal_index = _get_params_index_from_model_dict(model) # ty: ignore[invalid-argument-type] params = pd.DataFrame(columns=["value"], index=normal_index) - _, extended_params = switch_linear_to_translog(model_dict, params) # ty: ignore[invalid-argument-type] + _, extended_params = switch_linear_to_translog(model, params) # ty: ignore[invalid-argument-type] added_index = extended_params.index.difference(normal_index) # ty: ignore[possibly-missing-attribute] diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index 6bb08efd..c288e30e 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -18,7 +18,7 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) @@ -26,23 +26,23 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) - max_inputs = get_maximization_inputs(model_dict, data) + max_inputs = get_maximization_inputs(model, data) params = params.loc[max_inputs["params_template"].index] kde = univariate_densities( data=data, - model_dict=model_dict, + model=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, - model_dict=model_dict, + model=model, params=params, period=1, ) surfaces = bivariate_density_surfaces( data=data, - model_dict=model_dict, + model=model, params=params, period=1, ) @@ -55,7 +55,7 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: def test_visualize_factor_distributions_runs_with_simulated_states() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) @@ -63,24 +63,24 @@ def test_visualize_factor_distributions_runs_with_simulated_states() -> None: params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) - max_inputs = get_maximization_inputs(model_dict, data) + max_inputs = get_maximization_inputs(model, data) params = params.loc[max_inputs["params_template"].index] - latent_data = simulate_dataset(model_dict, params, data=data, policies=None)[ + latent_data = simulate_dataset(model, params, data=data, policies=None)[ "aug_unanchored_states" ]["states"] kde = univariate_densities( data=data, states=latent_data, - model_dict=model_dict, + model=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, states=latent_data, - model_dict=model_dict, + model=model, params=params, period=1, ) diff --git a/tests/test_visualize_transition_equations.py b/tests/test_visualize_transition_equations.py index 65f3df3e..2b5c1da9 100644 --- a/tests/test_visualize_transition_equations.py +++ b/tests/test_visualize_transition_equations.py @@ -15,9 +15,9 @@ def test_visualize_transition_equations_runs() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model = yaml.load(y, Loader=yaml.SafeLoader) - model_dict["observed_factors"] = ["ob1"] + model["observed_factors"] = ["ob1"] params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) @@ -26,12 +26,12 @@ def test_visualize_transition_equations_runs() -> None: data = data.set_index(["caseid", "period"]) data["ob1"] = 0 - max_inputs = get_maximization_inputs(model_dict, data) + max_inputs = get_maximization_inputs(model, data) full_index = max_inputs["params_template"].index params = params.reindex(full_index) params["value"] = params["value"].fillna(0) subplots = get_transition_plots( - model_dict=model_dict, + model=model, params=params, period=0, quantiles_of_other_factors=[0.1, 0.25, 0.5, 0.75, 0.9], @@ -39,7 +39,7 @@ def test_visualize_transition_equations_runs() -> None: ) combine_transition_plots(subplots) subplots = get_transition_plots( - model_dict=model_dict, + model=model, params=params, period=0, quantiles_of_other_factors=None, From 9590c464e904fa95254605479902813216000059 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Wed, 21 Jan 2026 11:11:20 +0100 Subject: [PATCH 18/27] Simplify. --- src/skillmodels/__init__.py | 4 +- .../visualize_factor_distributions.py | 42 +++++-- tests/test_visualize_factor_distributions.py | 107 ++++++++++++++++++ 3 files changed, 138 insertions(+), 15 deletions(-) diff --git a/src/skillmodels/__init__.py b/src/skillmodels/__init__.py index 057ad719..0b15d218 100644 --- a/src/skillmodels/__init__.py +++ b/src/skillmodels/__init__.py @@ -2,10 +2,8 @@ import contextlib -try: +with contextlib.suppress(ImportError): import pdbp # noqa: F401 -except ImportError: - contextlib.suppress(Exception) from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index 450ce2bc..f4dfed7c 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -533,6 +533,34 @@ def bivariate_density_surfaces( return plots_dict +def _get_one_state_per_period( + states: pd.DataFrame, + ap_to_p: pd.Series, +) -> pd.DataFrame: + """Get one state per (period, id). + + Handles aug_period and/or period index/columns. + """ + # Always reset index to work with columns + df = states.reset_index() + + has_aug_period = "aug_period" in df.columns + has_period = "period" in df.columns + + if has_aug_period and not has_period: + # Only aug_period: merge to get period, then collapse to one per (period, id) + df = df.merge(ap_to_p, left_on="aug_period", right_index=True, how="left") + return df.sort_values(["aug_period", "id"]).groupby(["period", "id"]).last() + if has_aug_period and has_period: + # Both exist: collapse multiple aug_periods to one per (period, id) + return df.sort_values(["aug_period", "id"]).groupby(["period", "id"]).last() + if has_period: + # Only period (no aug_period): just set index + return df.set_index(["period", "id"]) + msg = "States must have either 'aug_period' or 'period' column/index." + raise ValueError(msg) + + def _process_data( states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame], period: int, @@ -543,12 +571,7 @@ def _process_data( ap_to_p = pd.Series(aug_periods_to_periods, name="period") ap_to_p.index.name = "aug_period" if isinstance(states, pd.DataFrame): - one_state_per_period = ( - states.merge(ap_to_p, left_on="aug_period", right_index=True, how="left") - .sort_values(["aug_period", "id"]) - .groupby(["period", "id"]) - .last() - ) + one_state_per_period = _get_one_state_per_period(states, ap_to_p) to_concat = [] for fac in factors: if fac in one_state_per_period: @@ -560,12 +583,7 @@ def _process_data( states = dict(enumerate(states)) to_concat = [] for name, df in states.items(): - one_state_per_period = ( - df.merge(ap_to_p, left_on="aug_period", right_index=True, how="left") - .sort_values(["aug_period", "id"]) - .groupby(["period", "id"]) - .last() - ) + one_state_per_period = _get_one_state_per_period(df, ap_to_p) to_keep = one_state_per_period.query(f"period == {period}")[factors].copy() to_keep["scenario"] = name to_concat.append(to_keep) diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index c288e30e..dc666dc9 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -4,7 +4,9 @@ import yaml from skillmodels.config import TEST_DATA_DIR +from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.process_model import process_model from skillmodels.simulate_data import simulate_dataset from skillmodels.visualize_factor_distributions import ( bivariate_density_contours, @@ -89,3 +91,108 @@ def test_visualize_factor_distributions_runs_with_simulated_states() -> None: contour_plots=contours, surface_plots=None, ) + + +def test_visualize_factor_distributions_with_period_indexed_states() -> None: + """Test visualization with states indexed by (id, period) without aug_period. + + This mimics the scenario where states come from a downstream task that has + already mapped aug_period to period and dropped the aug_period column. + """ + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model = yaml.load(y, Loader=yaml.SafeLoader) + + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + data = data.set_index(["caseid", "period"]) + + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") + params = params.set_index(["category", "period", "name1", "name2"]) + + max_inputs = get_maximization_inputs(model, data) + params = params.loc[max_inputs["params_template"].index] + + # Get filtered states and convert to (id, period) index without aug_period + filtered_states = get_filtered_states(model=model, data=data, params=params)[ + "anchored_states" + ]["states"] + processed = process_model(model) + + # Add period column and drop aug_period + # (mimics task_filtered_states_and_measurements) + filtered_states["period"] = filtered_states["aug_period"].map( + processed.labels.aug_periods_to_periods + ) + filtered_states = filtered_states.drop(columns=["aug_period"]).set_index( + ["id", "period"] + ) + + kde = univariate_densities( + data=data, + states=filtered_states, + model=model, + params=params, + period=1, + ) + contours = bivariate_density_contours( + data=data, + states=filtered_states, + model=model, + params=params, + period=1, + ) + combine_distribution_plots( + kde_plots=kde, + contour_plots=contours, + surface_plots=None, + ) + + +def test_visualize_factor_distributions_with_both_aug_period_and_period() -> None: + """Test visualization with states having both aug_period and period. + + This mimics the scenario where states have aug_period as a column and period + in the index (or both as columns). + """ + with (TEST_DATA_DIR / "model2.yaml").open() as y: + model = yaml.load(y, Loader=yaml.SafeLoader) + + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + data = data.set_index(["caseid", "period"]) + + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") + params = params.set_index(["category", "period", "name1", "name2"]) + + max_inputs = get_maximization_inputs(model, data) + params = params.loc[max_inputs["params_template"].index] + + # Get filtered states and add period while keeping aug_period + filtered_states = get_filtered_states(model=model, data=data, params=params)[ + "anchored_states" + ]["states"] + processed = process_model(model) + + # Add period column but keep aug_period (both are present) + filtered_states["period"] = filtered_states["aug_period"].map( + processed.labels.aug_periods_to_periods + ) + filtered_states = filtered_states.set_index(["id", "period"]) + + kde = univariate_densities( + data=data, + states=filtered_states, + model=model, + params=params, + period=1, + ) + contours = bivariate_density_contours( + data=data, + states=filtered_states, + model=model, + params=params, + period=1, + ) + combine_distribution_plots( + kde_plots=kde, + contour_plots=contours, + surface_plots=None, + ) From 69f6eab6de9a60a01c26d5892efa5c3bf7b8a9c9 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Wed, 28 Jan 2026 08:27:08 +0100 Subject: [PATCH 19/27] Use modern rng everywhere. --- ...sualize_pairwise_factor_distribution.ipynb | 3 +- src/skillmodels/simulate_data.py | 31 ++++++++++---- tests/test_kalman_filters.py | 42 ++++++++++--------- tests/test_likelihood_regression.py | 5 ++- tests/test_simulate_data.py | 21 ++++++---- 5 files changed, 62 insertions(+), 40 deletions(-) diff --git a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index f11ee639..db7380fa 100644 --- a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -284,7 +284,8 @@ "metadata": {}, "outputs": [], "source": [ - "data[\"obs1\"] = np.random.rand(data.shape[0])" + "rng = np.random.default_rng(42)\n", + "data[\"obs1\"] = rng.random(data.shape[0])" ] }, { diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 9f884d64..de4f0955 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -7,7 +7,6 @@ import numpy as np import pandas as pd from jax import Array -from numpy.random import choice, multivariate_normal from skillmodels.filtered_states import anchor_states_df @@ -38,6 +37,7 @@ def simulate_dataset( n_obs: int | None = None, data: pd.DataFrame | None = None, policies: list[dict] | None = None, + seed: int | None = None, ) -> dict: """Simulate datasets generated by a latent factor model. @@ -51,6 +51,8 @@ def simulate_dataset( policies: list of dictionaries. Each dictionary specifies a a stochastic shock to a latent factor AT THE END of "period" for "factor" with mean "effect_size" and "standard deviation" + seed: Random seed for reproducibility. If None, uses numpy's default random + state. Returns: observed_data: Dataset with measurements and control variables @@ -59,6 +61,8 @@ def simulate_dataset( latent_data: Dataset with latent factors in long format """ + rng = np.random.default_rng(seed) + if data is None and n_obs is None: raise ValueError("Either `data` or `n_obs` has to be provided.") @@ -141,6 +145,7 @@ def simulate_dataset( observed_factors=observed_factors, policies=policies, # ty: ignore[invalid-argument-type] transition_info=processed_model.transition_info, + rng=rng, ) # Create collapsed versions with user-facing periods @@ -200,6 +205,7 @@ def _simulate_dataset( observed_factors: Array, policies: list[dict], transition_info: TransitionInfo, + rng: np.random.Generator, ) -> tuple[pd.DataFrame, pd.DataFrame]: """Simulate datasets generated by a latent factor model. @@ -219,6 +225,7 @@ def _simulate_dataset( observed_factors: Array of shape (n_periods, n_obs, n_observed_factors). policies: List of policy dictionaries specifying stochastic shocks. transition_info: Information about transition functions. + rng: NumPy random number generator. Returns: observed_data: DataFrame with simulated measurements. @@ -262,7 +269,7 @@ def _simulate_dataset( dist_args.append(args) latent_states = np.zeros((n_aug_periods, n_obs, n_states)) # ty: ignore[invalid-assignment] - latent_states[0] = generate_start_states(n_obs, dimensions, dist_args, weights) + latent_states[0] = generate_start_states(rng, n_obs, dimensions, dist_args, weights) for t in range(n_aug_periods - 1): # if there is a shock in period t, add it here @@ -270,6 +277,7 @@ def _simulate_dataset( for policy in policies_t: position = labels.latent_factors.index(policy["factor"]) latent_states[t, :, position] += _get_shock( + rng=rng, mean=policy["effect_size"], sd=policy["standard_deviation"], size=n_obs, @@ -302,7 +310,7 @@ def _simulate_dataset( ), ).reshape(n_obs, -1) - errors = multivariate_normal( + errors = rng.multivariate_normal( mean=np.zeros(n_states), cov=np.diag(shock_sds[t] ** 2), size=n_obs, @@ -316,6 +324,7 @@ def _simulate_dataset( for t in range(n_aug_periods): meas = pd.DataFrame( data=measurements_from_states( + rng, latent_states[t], # ty: ignore[invalid-argument-type] control_data[t], # ty: ignore[invalid-argument-type] loadings_df.loc[t].to_numpy(), @@ -391,6 +400,7 @@ def _collapse_aug_periods_to_periods( def _get_shock( + rng: np.random.Generator, mean: float, sd: float, size: int, @@ -398,6 +408,7 @@ def _get_shock( """Add stochastic effect to a factor of length n_obs. Args: + rng: NumPy random number generator. mean: mean of the stochastic effect sd: standard deviation of the effect size: length of resulting array @@ -409,13 +420,14 @@ def _get_shock( if sd == 0: shock = np.full(size, mean) elif sd > 0: - shock = np.random.normal(mean, sd, size) + shock = rng.normal(mean, sd, size) else: raise ValueError("No negative standard deviation allowed.") return shock def generate_start_states( + rng: np.random.Generator, n_obs: int, dimensions: Dimensions, dist_args: list[dict], @@ -424,6 +436,7 @@ def generate_start_states( """Draw initial states and control variables from a (mixture of) normals. Args: + rng: NumPy random number generator. n_obs: number of observations dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. @@ -438,17 +451,18 @@ def generate_start_states( """ n_states = dimensions.n_latent_factors if np.size(weights) == 1: - out = multivariate_normal(size=n_obs, **dist_args[0]) + out = rng.multivariate_normal(size=n_obs, **dist_args[0]) else: - helper_array = choice(np.arange(len(weights)), p=weights, size=n_obs) + helper_array = rng.choice(np.arange(len(weights)), p=weights, size=n_obs) out = np.zeros((n_obs, n_states)) for i in range(n_obs): - out[i] = multivariate_normal(**dist_args[helper_array[i]]) + out[i] = rng.multivariate_normal(**dist_args[helper_array[i]]) return out def measurements_from_states( + rng: np.random.Generator, states: NDArray[np.floating], controls: NDArray[np.floating], loadings: NDArray[np.floating], @@ -461,6 +475,7 @@ def measurements_from_states( of measurements in that period. Args: + rng: NumPy random number generator. states: DataFrame of shape (n_obs, n_states) controls: DataFrame of shape (n_obs, n_controls) @@ -476,7 +491,7 @@ def measurements_from_states( """ n_meas = loadings.shape[0] n_obs = len(states) - epsilon = multivariate_normal([0] * n_meas, np.diag(sds**2), n_obs) + epsilon = rng.multivariate_normal([0] * n_meas, np.diag(sds**2), n_obs) states_part = np.dot(states, loadings.T) control_part = np.dot(controls, control_params.T) return states_part + control_part + epsilon diff --git a/tests/test_kalman_filters.py b/tests/test_kalman_filters.py index b9c45d31..26b578bc 100644 --- a/tests/test_kalman_filters.py +++ b/tests/test_kalman_filters.py @@ -29,8 +29,8 @@ @pytest.mark.parametrize(("seed", "update_func"), product(SEEDS, UPDATE_FUNCS)) def test_kalman_update(seed, update_func) -> None: - np.random.seed(seed) - dim = np.random.randint(low=1, high=10) + rng = np.random.default_rng(seed) + dim = int(rng.integers(low=1, high=10)) n_obs = 5 n_mix = 2 @@ -38,9 +38,11 @@ def test_kalman_update(seed, update_func) -> None: covs = np.zeros((n_obs, n_mix, dim, dim)) for i in range(n_obs): for j in range(n_mix): - states[i, j], covs[i, j] = _random_state_and_covariance(dim=dim) + states[i, j], covs[i, j] = _random_state_and_covariance(rng, dim=dim) - loadings, measurements, meas_sd = _random_loadings_measurements_and_meas_sd(states) + loadings, measurements, meas_sd = _random_loadings_measurements_and_meas_sd( + rng, states + ) expected_states = np.zeros_like(states) expected_covs = np.zeros_like(covs) @@ -134,8 +136,8 @@ def test_kalman_update_with_missing(update_func) -> None: @pytest.mark.parametrize("seed", SEEDS) def test_sigma_points(seed: int) -> None: - np.random.seed(seed) - state, cov = _random_state_and_covariance() + rng = np.random.default_rng(seed) + state, cov = _random_state_and_covariance(rng) observed_factors = jnp.arange(2).reshape(1, 2) expected = JulierSigmaPoints(n=len(state), kappa=2).sigma_points(state, cov) observed_part = np.tile(observed_factors, len(expected)).reshape(-1, 2) @@ -158,9 +160,9 @@ def test_sigma_points(seed: int) -> None: @pytest.mark.parametrize("seed", SEEDS) def test_sigma_scaling_factor_and_weights(seed) -> None: - np.random.seed(seed) - dim = np.random.randint(low=1, high=15) - kappa = np.random.uniform(low=0.5, high=5) + rng = np.random.default_rng(seed) + dim = int(rng.integers(low=1, high=15)) + kappa = float(rng.uniform(low=0.5, high=5)) # Test my assumption that weights for mean and cov are equal in the Julier algorithm expected_weights = JulierSigmaPoints(n=dim, kappa=kappa).Wm expected_weights2 = JulierSigmaPoints(n=dim, kappa=kappa).Wc @@ -213,10 +215,10 @@ def f(params, states): @pytest.mark.parametrize("seed", SEEDS) def test_predict_against_linear_filterpy(seed) -> None: - np.random.seed(seed) - state, cov = _random_state_and_covariance() + rng = np.random.default_rng(seed) + state, cov = _random_state_and_covariance(rng) dim = len(state) - trans_mat = np.random.uniform(low=-1, high=1, size=(dim, dim)) + trans_mat = rng.uniform(low=-1, high=1, size=(dim, dim)) shock_sds = 0.5 * np.arange(dim) / dim @@ -265,20 +267,20 @@ def transition_function(params, states): # ====================================================================================== -def _random_state_and_covariance(dim=None): +def _random_state_and_covariance(rng, dim=None): if dim is None: - dim = np.random.randint(low=1, high=10) - factorized = np.random.uniform(low=-1, high=3, size=(dim, dim)) + dim = rng.integers(low=1, high=10) + factorized = rng.uniform(low=-1, high=3, size=(dim, dim)) cov = factorized @ factorized.T * 0.5 + np.eye(dim) - state = np.random.uniform(low=-5, high=5, size=dim) + state = rng.uniform(low=-5, high=5, size=dim) return state, cov -def _random_loadings_measurements_and_meas_sd(state): +def _random_loadings_measurements_and_meas_sd(rng, state): n_obs, _n_mix, dim = state.shape - loadings = np.random.uniform(size=dim) - meas_sd = np.random.uniform() - epsilon = np.random.normal(loc=0, scale=meas_sd, size=(n_obs)) + loadings = rng.uniform(size=dim) + meas_sd = rng.uniform() + epsilon = rng.normal(loc=0, scale=meas_sd, size=(n_obs)) measurement = (state @ loadings).sum(axis=1) + epsilon return loadings, measurement, meas_sd diff --git a/tests/test_likelihood_regression.py b/tests/test_likelihood_regression.py index 962c4a40..c55dbf16 100644 --- a/tests/test_likelihood_regression.py +++ b/tests/test_likelihood_regression.py @@ -137,6 +137,7 @@ def test_likelihood_contributions_have_not_changed( def test_likelihood_contributions_large_nobs( model2, model2_data, model_type, fun_key ) -> None: + rng = np.random.default_rng(42) regvault = REGRESSION_VAULT model = _convert_model(model2, "no_stages_anchoring") params = pd.read_csv(regvault / "no_stages_anchoring.csv").set_index( @@ -171,12 +172,12 @@ def test_likelihood_contributions_large_nobs( ] if model_type == "no_stages_anchoring": for col in cols: - this_round[col] += np.random.normal(0, 0.1, (len(model2_data),)) + this_round[col] += rng.normal(0, 0.1, (len(model2_data),)) elif model_type == "with_missings": fraction_to_set_missing = 0.9 n_rows = len(this_round) n_missing = int(n_rows * fraction_to_set_missing) - rows_to_set_missing = this_round.sample(n=n_missing).index + rows_to_set_missing = this_round.sample(n=n_missing, random_state=rng).index this_round.loc[rows_to_set_missing, cols] = np.nan else: raise ValueError(f"Invalid model type: {model_type}") diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index 2a7506c0..c799175f 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -52,15 +52,17 @@ def test_simulate_dataset(model2, model2_data) -> None: def test_measurements_from_factors() -> None: - inputs = { - "states": np.array([[0, 0, 0], [1, 1, 1]]), - "controls": np.array([[1, 1], [1, 1]]), - "loadings": np.array([[0.3, 0.3, 0.3], [0.3, 0.3, 0.3], [0.3, 0.3, 0.3]]), - "control_params": np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]), - "sds": np.zeros(3), - } + rng = np.random.default_rng(42) + states = np.array([[0, 0, 0], [1, 1, 1]], dtype=np.float64) + controls = np.array([[1, 1], [1, 1]], dtype=np.float64) + loadings = np.array([[0.3, 0.3, 0.3], [0.3, 0.3, 0.3], [0.3, 0.3, 0.3]]) + control_params = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) + sds = np.zeros(3) expected = np.array([[1, 1, 1], [1.9, 1.9, 1.9]]) - aaae(measurements_from_states(**inputs), expected) + aaae( + measurements_from_states(rng, states, controls, loadings, control_params, sds), + expected, + ) @pytest.fixture @@ -82,6 +84,7 @@ def test_collapse_aug_periods_to_periods_with_endogenous_factors( This is a regression test for a bug where MeasurementType enum values were compared against strings in pandas queries, causing empty results. """ + rng = np.random.default_rng(42) processed_model = process_model(model2_with_endogenous) factors = processed_model.labels.latent_factors @@ -95,7 +98,7 @@ def test_collapse_aug_periods_to_periods_with_endogenous_factors( for obs_id in range(n_obs): record = {"id": obs_id, "aug_period": aug_p} for fac in factors: - record[fac] = np.random.randn() + record[fac] = rng.standard_normal() records.append(record) aug_latent_data = pd.DataFrame(records) From f8dac75d861db807ab9cfe1796e57dbf5ad44f12 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Wed, 28 Jan 2026 09:34:05 +0100 Subject: [PATCH 20/27] Update CLAUDE.md --- CLAUDE.md | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index ce3840ba..c6a1547c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -27,13 +27,29 @@ pixi run -e test-cpu pytest tests/test_kalman_filters.py::test_function_name # Type checking pixi run ty -# Install pre-commit hooks (required before committing) -pre-commit install +# Quality checks (linting, formatting) +prek run --all-files # Build documentation (from docs/ directory) make html ``` +## Command Rules + +Always use these command mappings: + +- **Python**: Use `pixi run python` instead of `python` or `python3` +- **Type checker**: Use `pixi run ty` instead of running ty/mypy/pyright directly +- **Tests**: Use `pixi run tests` instead of `pytest` directly +- **Linting/formatting**: Use `prek run --all-files` instead of `ruff` directly +- **All quality checks**: Use `prek run --all-files` + +Before finishing any task that modifies code, always run: + +1. `pixi run ty` (type checker) +1. `pixi run tests` (tests) +1. `prek run --all-files` (quality checks) + ## Architecture ### Core Pipeline Flow @@ -82,7 +98,8 @@ The main package exports three functions: - `get_maximization_inputs()`: Prepare optimization problem for parameter estimation - `get_filtered_states()`: Extract filtered latent factor estimates -- `simulate_dataset()`: Generate synthetic data from model specification +- `simulate_dataset()`: Generate synthetic data from model specification (accepts + optional `seed` parameter for reproducibility) ## Code Style @@ -92,6 +109,8 @@ The main package exports three functions: - Pre-commit hooks enforce formatting and linting - Type checking via `ty` with strict rules - Do not use `from __future__ import annotations` +- Use modern numpy random API: `rng = np.random.default_rng(seed)` instead of + `np.random.seed()` or legacy functions like `np.random.randn()` ## Testing From 846534bd376103251366d6207a4a8829d6ef053d Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Wed, 28 Jan 2026 11:04:51 +0100 Subject: [PATCH 21/27] Get rid of if TYPE_CHECKING blocks --- .gitignore | 143 +++------- .pre-commit-config.yaml | 7 +- .yamllint.yml | 4 +- pixi.lock | 269 +++--------------- pyproject.toml | 38 ++- src/skillmodels/check_model.py | 5 +- src/skillmodels/constraints.py | 16 +- src/skillmodels/correlation_heatmap.py | 11 +- src/skillmodels/filtered_states.py | 8 +- src/skillmodels/maximization_inputs.py | 13 +- src/skillmodels/model_spec.py | 6 +- src/skillmodels/params_index.py | 15 +- src/skillmodels/parse_params.py | 13 +- src/skillmodels/process_data.py | 5 +- src/skillmodels/process_debug_data.py | 10 +- src/skillmodels/process_model.py | 6 +- src/skillmodels/simulate_data.py | 24 +- .../visualize_factor_distributions.py | 14 +- .../visualize_transition_equations.py | 11 +- tests/test_qr.py | 6 +- 20 files changed, 178 insertions(+), 446 deletions(-) diff --git a/.gitignore b/.gitignore index 5ff6fb08..39a277d2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,124 +1,51 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - # Distribution / packaging -.Python +*.egg +*.egg-info/ +*.manifest +*.spec +.eggs/ +.installed.cfg build/ -develop-eggs/ dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ +MANIFEST sdist/ -var/ wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST -*build/ - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec -*.sublime-workspace -*.sublime-project -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation +# Documentation docs/_build/ -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site +# IDE +.idea/ +.vscode/ -# mypy -.mypy_cache/ +# Jupyter +.ipynb_checkpoints/ -*notes/ +# macOS +.DS_Store -.idea/ +# pixi +.pixi/ -*.bak +# Python +__pycache__/ +*.py[cod] +*.so +*$py.class +# Ruff +.ruff_cache/ -*.db +# Testing +.cache/ +.coverage +.coverage.* +.hypothesis/ +.pytest_cache/ +coverage.xml +htmlcov/ +# Version file (generated by hatch-vcs) +src/*/_version.py -mixed_documents/ -src/skillmodels/_version.py -.pixi -.vscode -*.py.*.bin -*.py.*.html +.claude diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7afbac1d..9af75621 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,6 @@ repos: hooks: - id: check-hooks-apply - id: check-useless-excludes - # - id: identity # Prints all files passed to pre-commits. Debugging. - repo: https://github.com/lyz-code/yamlfix rev: 1.19.1 hooks: @@ -41,7 +40,7 @@ repos: - id: check-ast - id: check-docstring-first - repo: https://github.com/adrienverge/yamllint.git - rev: v1.37.1 + rev: v1.38.0 hooks: - id: yamllint - repo: https://github.com/astral-sh/ruff-pre-commit @@ -71,10 +70,12 @@ repos: hooks: - id: mdformat additional_dependencies: - - mdformat-myst + - mdformat-gfm + - mdformat-gfm-alerts - mdformat-ruff args: - --wrap - '88' + files: (CLAUDE\.md|README\.md) ci: autoupdate_schedule: monthly diff --git a/.yamllint.yml b/.yamllint.yml index 0bdfa076..707dcd81 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -25,9 +25,9 @@ rules: key-duplicates: enable key-ordering: disable line-length: - max: 88 + allow-non-breakable-inline-mappings: true allow-non-breakable-words: true - allow-non-breakable-inline-mappings: false + max: 88 new-line-at-end-of-file: enable new-lines: type: unix diff --git a/pixi.lock b/pixi.lock index 96ae9215..ff4b4046 100644 --- a/pixi.lock +++ b/pixi.lock @@ -35,7 +35,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -65,10 +64,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -84,7 +81,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -165,7 +161,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda @@ -180,7 +175,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda @@ -232,11 +227,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -313,7 +306,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -324,10 +316,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -339,7 +329,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -414,7 +403,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda @@ -429,7 +417,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda @@ -480,11 +468,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -538,7 +524,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -549,10 +534,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -563,7 +546,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -633,7 +615,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda @@ -648,7 +629,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda @@ -701,11 +682,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -757,7 +736,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -768,10 +746,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -783,7 +759,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -854,7 +829,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda @@ -868,7 +842,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda @@ -921,14 +895,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -994,7 +966,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -1006,12 +977,10 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -1025,7 +994,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1120,7 +1088,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/nettle-3.10.1-h4a9d5aa_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda @@ -1136,7 +1103,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda @@ -1193,11 +1160,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1251,7 +1216,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -1263,11 +1227,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -1278,7 +1240,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1353,7 +1314,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda @@ -1368,7 +1328,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda @@ -1427,11 +1387,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1483,7 +1441,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -1495,11 +1452,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -1511,7 +1466,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1582,7 +1536,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda @@ -1596,7 +1549,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda @@ -1652,14 +1605,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -1727,7 +1678,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -1758,12 +1708,10 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -1781,7 +1729,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -1882,7 +1829,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/nettle-3.10.1-h4a9d5aa_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda @@ -1898,7 +1844,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda @@ -1956,11 +1902,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2038,7 +1982,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -2050,12 +1993,10 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -2069,7 +2010,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -2164,7 +2104,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/nettle-3.10.1-h4a9d5aa_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda @@ -2180,7 +2119,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda @@ -2237,11 +2176,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2299,7 +2236,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -2311,11 +2247,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -2326,7 +2260,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -2401,7 +2334,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda @@ -2416,7 +2348,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda @@ -2475,11 +2407,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -2535,7 +2465,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda @@ -2547,11 +2476,9 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda @@ -2563,7 +2490,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -2634,7 +2560,6 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda @@ -2648,7 +2573,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda @@ -2704,14 +2629,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda - - conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda @@ -3310,17 +3233,6 @@ packages: - pkg:pypi/cffi?source=hash-mapping size: 294731 timestamp: 1761203441365 -- conda: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.5.0-pyhd8ed1ab_0.conda - sha256: aa589352e61bb221351a79e5946d56916e3c595783994884accdb3b97fe9d449 - md5: 381bd45fb7aa032691f3063aff47e3a1 - depends: - - python >=3.10 - license: MIT - license_family: MIT - purls: - - pkg:pypi/cfgv?source=hash-mapping - size: 13589 - timestamp: 1763607964133 - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda sha256: b32f8362e885f1b8417bac2b3da4db7323faa12d5db62b7fd6691c02d60d6f59 md5: a22d1fd9bf98827e280a02875d9a007a @@ -3788,17 +3700,6 @@ packages: - pkg:pypi/defusedxml?source=hash-mapping size: 24062 timestamp: 1615232388757 -- conda: https://conda.anaconda.org/conda-forge/noarch/distlib-0.4.0-pyhd8ed1ab_0.conda - sha256: 6d977f0b2fc24fee21a9554389ab83070db341af6d6f09285360b2e09ef8b26e - md5: 003b8ba0a94e2f1e117d0bd46aebc901 - depends: - - python >=3.9 - license: Apache-2.0 - license_family: APACHE - purls: - - pkg:pypi/distlib?source=hash-mapping - size: 275642 - timestamp: 1752823081585 - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda sha256: f71eae7dc8ff9392d225d2d529691b2db16289b7d8009646eeb1adf0caf3937b md5: 6da1f998c8ea85ba7692afbb5db72fb9 @@ -3852,16 +3753,6 @@ packages: - pkg:pypi/executing?source=hash-mapping size: 30753 timestamp: 1756729456476 -- conda: https://conda.anaconda.org/conda-forge/noarch/filelock-3.20.2-pyhd8ed1ab_0.conda - sha256: 8c4210ed4dc439e87528635e226042ddab9bf458d4d0a12e7ba48d6c5babd0f8 - md5: 7e7cf4d6c2be6991e6ae2b3f4331701c - depends: - - python >=3.10 - license: Unlicense - purls: - - pkg:pypi/filelock?source=compressed-mapping - size: 18646 - timestamp: 1767377337824 - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda sha256: dc81e6283bd2cdc5e8a3e5c88527870b2992a8f71f25ddec9dd995223c08aed8 md5: 261bd75b03d09c5eeea5aedf7365e811 @@ -4157,18 +4048,6 @@ packages: purls: [] size: 13849749 timestamp: 1766299627069 -- conda: https://conda.anaconda.org/conda-forge/noarch/identify-2.6.15-pyhd8ed1ab_0.conda - sha256: 32d5007d12e5731867908cbf5345f5cd44a6c8755a2e8e63e15a184826a51f82 - md5: 25f954b7dae6dd7b0dc004dab74f1ce9 - depends: - - python >=3.10 - - ukkonen - license: MIT - license_family: MIT - purls: - - pkg:pypi/identify?source=hash-mapping - size: 79151 - timestamp: 1759437561529 - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda sha256: ae89d0299ada2a3162c2614a9d26557a92aa6a77120ce142f8e0109bbf0342b0 md5: 53abe63df7e10a6ba605dc5f9f961d36 @@ -6725,18 +6604,6 @@ packages: - pkg:pypi/networkx?source=compressed-mapping size: 1587439 timestamp: 1765215107045 -- conda: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.10.0-pyhd8ed1ab_0.conda - sha256: 4fa40e3e13fc6ea0a93f67dfc76c96190afd7ea4ffc1bac2612d954b42cdc3ee - md5: eb52d14a901e23c39e9e7b4a1a5c015f - depends: - - python >=3.10 - - setuptools - license: BSD-3-Clause - license_family: BSD - purls: - - pkg:pypi/nodeenv?source=hash-mapping - size: 40866 - timestamp: 1766261270149 - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda sha256: 7b920e46b9f7a2d2aa6434222e5c8d739021dbc5cc75f32d124a8191d86f9056 md5: e7f89ea5f7ea9401642758ff50a2d9c1 @@ -7503,22 +7370,43 @@ packages: - pkg:pypi/pluggy?source=compressed-mapping size: 25877 timestamp: 1764896838868 -- conda: https://conda.anaconda.org/conda-forge/noarch/pre-commit-4.5.1-pyha770c72_0.conda - sha256: 5b81b7516d4baf43d0c185896b245fa7384b25dc5615e7baa504b7fa4e07b706 - md5: 7f3ac694319c7eaf81a0325d6405e974 +- conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda + sha256: 5a97802244394fa59b9868cbeeeb7b88102608f4e9b70a386672e3634f04c578 + md5: 22a0109b98aa8c0b324f4a8b68dce7b5 depends: - - cfgv >=2.0.0 - - identify >=1.0.0 - - nodeenv >=0.11.1 - - python >=3.10 - - pyyaml >=5.1 - - virtualenv >=20.10.0 + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + constrains: + - __glibc >=2.17 license: MIT license_family: MIT - purls: - - pkg:pypi/pre-commit?source=hash-mapping - size: 200827 - timestamp: 1765937577534 + purls: [] + size: 4691139 + timestamp: 1769236430772 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda + sha256: 375344751361cf4c110fa9f712f86b953fb327a5d91016e60c24d115cc76ee8a + md5: ff0b1270caf4965286d1a12b7e2c1f94 + depends: + - __osx >=11.0 + constrains: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 4302853 + timestamp: 1769236582783 +- conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda + sha256: 5ad8041ded09bbc8ac863a440a5cd855706508c99af8615dfc63237c466fe73e + md5: b28e209b4c161efb64af7260c05097e3 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + license: MIT + license_family: MIT + purls: [] + size: 4974031 + timestamp: 1769236447810 - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda sha256: 13dc67de68db151ff909f2c1d2486fa7e2d51355b25cee08d26ede1b62d48d40 md5: a1e91db2d17fd258c64921cb38e6745a @@ -8471,8 +8359,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev248+gb15372a52.d20260111 - sha256: 351c9cc8e9a879fe282da02d64b6d79d4326650bb3466ea8db23b29628fba895 + version: 0.0.24.dev257+gf8dac75d8.d20260128 + sha256: 38a2501c7691f383a3800cca35841c15b739e5ab614472eb630093397311fd4b requires_dist: - dags - frozendict @@ -8916,54 +8804,6 @@ packages: purls: [] size: 694692 timestamp: 1756385147981 -- conda: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py314h9891dd4_6.conda - sha256: ef6753f6febaa74d35253e4e0dd09dc9497af8e370893bd97c479f59346daa57 - md5: 28303a78c48916ab07b95ffdbffdfd6c - depends: - - __glibc >=2.17,<3.0.a0 - - cffi - - libgcc >=14 - - libstdcxx >=14 - - python >=3.14,<3.15.0a0 - - python_abi 3.14.* *_cp314 - license: MIT - license_family: MIT - purls: - - pkg:pypi/ukkonen?source=hash-mapping - size: 14762 - timestamp: 1761594960135 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/ukkonen-1.0.1-py314h6b18a25_6.conda - sha256: 2ef342cc861c52ec3ac464e89b192a37fd7afd79740b2c0773d2588fd8acff26 - md5: 452b75f09bc2a4c5eea4044b769bc659 - depends: - - __osx >=11.0 - - cffi - - libcxx >=19 - - python >=3.14,<3.15.0a0 - - python >=3.14,<3.15.0a0 *_cp314 - - python_abi 3.14.* *_cp314 - license: MIT - license_family: MIT - purls: - - pkg:pypi/ukkonen?source=hash-mapping - size: 14635 - timestamp: 1761595172213 -- conda: https://conda.anaconda.org/conda-forge/win-64/ukkonen-1.0.1-py314h909e829_6.conda - sha256: f65b3bf31d22ae37300ed2521352107be830e7c5ba805a4c93e2ce0e0f739078 - md5: 8528e182a2d9b5d14f0072734a24a6b9 - depends: - - cffi - - python >=3.14,<3.15.0a0 - - python_abi 3.14.* *_cp314 - - ucrt >=10.0.20348.0 - - vc >=14.3,<15 - - vc14_runtime >=14.44.35208 - license: MIT - license_family: MIT - purls: - - pkg:pypi/ukkonen?source=hash-mapping - size: 18357 - timestamp: 1761595080794 - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda sha256: d1dafc15fc5d2b1dd5b0a525e8a815028de20dd53b2c775a1b56e8e4839fb736 md5: 58e2ee530005067c5db23f33c6ab43d2 @@ -9070,21 +8910,6 @@ packages: purls: [] size: 115235 timestamp: 1767320173250 -- conda: https://conda.anaconda.org/conda-forge/noarch/virtualenv-20.36.0-pyhd8ed1ab_0.conda - sha256: cbb40ae88ccc72e95ce00911a73d9175eead4fb4e74925b0e9557bb60737317e - md5: c9a9b6e144b880308f5eedc905fe503d - depends: - - distlib >=0.3.7,<1 - - filelock >=3.20.1,<4 - - platformdirs >=3.9.1,<5 - - python >=3.10 - - typing_extensions >=4.13.2 - license: MIT - license_family: MIT - purls: - - pkg:pypi/virtualenv?source=hash-mapping - size: 4403353 - timestamp: 1767880093070 - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda sha256: e311b64e46c6739e2a35ab8582c20fa30eb608da130625ed379f4467219d4813 md5: 7e1e5ff31239f9cd5855714df8a3783d diff --git a/pyproject.toml b/pyproject.toml index eac78f9d..4b9a27ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -100,8 +100,9 @@ networkx = "*" numpy = ">=2.3,<2.4" # optimagic = "*" plotly = ">=6.2" -pre-commit = "*" +prek = "*" pybaum = "*" +python = "~=3.14.0" python-kaleido = ">=1.0" scipy = "*" @@ -174,19 +175,22 @@ unsafe-fixes = false [tool.ruff.lint] select = ["ALL"] extend-ignore = [ - "ANN401", # Dynamically typed expressions (typing.Any) are disallowed - too strict - "COM812", # In conflict with formatter - "EM101", # exception must not use a string literal - "EM102", # exception must not use an f-string literal - "FBT002", # Boolean default positional argument in function definition - "ISC001", # In conflict with formatter - "NPY002", # Leave Numpy's legacy RNG - "PD015", # pd.merge is fine + "ANN401", # Dynamically typed expressions (typing.Any) are disallowed - too strict + "COM812", # Conflicts with ruff-format + "EM101", # Exception must not use a string literal + "EM102", # Exception must not use an f-string literal + "FBT002", # Boolean default positional argument in function definition + "FIX002", # Line contains TODO + "ISC001", # Conflicts with ruff-format + "PD015", # pd.merge is fine "PERF401", # Many suggestions to use list comprehension are not helpful "PLR0913", # Too many arguments to function call "PLR2004", # Magic values are fine - "S301", # `pickle` module is unsafe - "TRY003", # long messages outside the exception class + "S301", # `pickle` module is unsafe + "TC001", # Move application import into a type-checking block + "TC002", # Move third-party import into a type-checking block + "TC003", # Move standard library import into a type-checking block + "TRY003", # Long messages outside exception class ] [tool.ruff.lint.per-file-ignores] @@ -241,12 +245,18 @@ unused-ignore-comment = "error" useless-overload-body = "error" +# ====================================================================================== +# pytest configuration +# ====================================================================================== + [tool.pytest.ini_options] +addopts = ["--pdbcls=pdbp:Pdb"] +filterwarnings = [] markers = [ - "wip: Tests that are work-in-progress.", - "unit: Flag for unit tests which target mainly a single function.", - "integration: Flag for integration tests which may comprise of multiple unit tests.", "end_to_end: Flag for tests that cover the whole program.", + "integration: Flag for integration tests which may comprise of multiple unit tests.", + "unit: Flag for unit tests which target mainly a single function.", + "wip: Tests that are work-in-progress.", ] norecursedirs = ["docs", ".envs"] diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index 28a618bb..863fab4f 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -1,11 +1,8 @@ """Functions to validate model specifications.""" -from typing import TYPE_CHECKING - import numpy as np -if TYPE_CHECKING: - from skillmodels.types import Anchoring, Dimensions, Labels +from skillmodels.types import Anchoring, Dimensions, Labels def check_model( diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index 308ed4dc..69327340 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -3,18 +3,20 @@ import functools import warnings from dataclasses import dataclass -from typing import TYPE_CHECKING, Any +from typing import Any import numpy as np import optimagic as om +import pandas as pd import skillmodels.transition_functions as t_f_module -from skillmodels.types import MeasurementType - -if TYPE_CHECKING: - import pandas as pd - - from skillmodels.types import Anchoring, Dimensions, EndogenousFactorsInfo, Labels +from skillmodels.types import ( + Anchoring, + Dimensions, + EndogenousFactorsInfo, + Labels, + MeasurementType, +) def get_constraints_dicts( diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 2fc422d9..e89d1b5c 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -1,19 +1,16 @@ """Functions for creating correlation heatmap visualizations.""" -from typing import TYPE_CHECKING, Any +from typing import Any import numpy as np import pandas as pd +from numpy.typing import NDArray from plotly import graph_objects as go -from skillmodels.model_spec import ModelSpec # noqa: TC001 +from skillmodels.model_spec import ModelSpec from skillmodels.process_data import pre_process_data from skillmodels.process_model import process_model - -if TYPE_CHECKING: - from numpy.typing import NDArray - - from skillmodels.types import ProcessedModel +from skillmodels.types import ProcessedModel def plot_correlation_heatmap( diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index e6fc75c0..a443a82d 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -1,20 +1,18 @@ """Functions to compute and process filtered latent states.""" -from typing import TYPE_CHECKING, Any +from typing import Any import jax.numpy as jnp import numpy as np +import pandas as pd from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.model_spec import ModelSpec # noqa: TC001 +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model -if TYPE_CHECKING: - import pandas as pd - def get_filtered_states( model: dict | ModelSpec, diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 97dc6f82..01d94dba 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -1,15 +1,15 @@ """Functions to create inputs for optimization of the log-likelihood.""" import functools -from collections.abc import Callable # noqa: TC003 -from typing import TYPE_CHECKING, Any +from collections.abc import Callable +from typing import Any import jax import jax.numpy as jnp import numpy as np import pandas as pd from jax import Array -from numpy.typing import NDArray # noqa: TC002 +from numpy.typing import NDArray import skillmodels.likelihood_function as lf import skillmodels.likelihood_function_debug as lfd @@ -20,16 +20,13 @@ get_constraints_dicts, ) from skillmodels.kalman_filters import calculate_sigma_scaling_factor_and_weights -from skillmodels.model_spec import ModelSpec # noqa: TC001 +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info from skillmodels.process_data import process_data from skillmodels.process_debug_data import process_debug_data from skillmodels.process_model import process_model -from skillmodels.types import ParsingInfo # noqa: TC001 - -if TYPE_CHECKING: - from skillmodels.types import ProcessedModel +from skillmodels.types import ParsingInfo, ProcessedModel jax.config.update("jax_enable_x64", True) # noqa: FBT003 diff --git a/src/skillmodels/model_spec.py b/src/skillmodels/model_spec.py index 8de6f1ae..c98d321c 100644 --- a/src/skillmodels/model_spec.py +++ b/src/skillmodels/model_spec.py @@ -5,15 +5,13 @@ (tuples, frozendict) to ensure the specification cannot be accidentally modified. """ +from collections.abc import Callable from dataclasses import dataclass, field from types import MappingProxyType -from typing import TYPE_CHECKING, Self +from typing import Self from frozendict import frozendict -if TYPE_CHECKING: - from collections.abc import Callable - @dataclass(frozen=True) class Normalizations: diff --git a/src/skillmodels/params_index.py b/src/skillmodels/params_index.py index e1d12b86..c3a19587 100644 --- a/src/skillmodels/params_index.py +++ b/src/skillmodels/params_index.py @@ -1,16 +1,13 @@ """Functions to construct the parameter index for model estimation.""" -from typing import TYPE_CHECKING - import pandas as pd -if TYPE_CHECKING: - from skillmodels.types import ( - Dimensions, - EndogenousFactorsInfo, - Labels, - TransitionInfo, - ) +from skillmodels.types import ( + Dimensions, + EndogenousFactorsInfo, + Labels, + TransitionInfo, +) def get_params_index( diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index e1762348..3a52d6c9 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -1,17 +1,20 @@ """Functions to parse parameter vectors into structured dictionaries.""" import warnings -from typing import TYPE_CHECKING import jax.numpy as jnp import numpy as np import pandas as pd from jax import Array -from skillmodels.types import LoadingsParsingInfo, ParsedParams, ParsingInfo - -if TYPE_CHECKING: - from skillmodels.types import Anchoring, Dimensions, Labels +from skillmodels.types import ( + Anchoring, + Dimensions, + Labels, + LoadingsParsingInfo, + ParsedParams, + ParsingInfo, +) def create_parsing_info( diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index 289b4354..36cfee56 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -1,15 +1,14 @@ """Functions to process and prepare data for model estimation.""" import warnings -from typing import TYPE_CHECKING, Any, Literal +from typing import Any, Literal import jax.numpy as jnp import numpy as np import pandas as pd from jax import Array -if TYPE_CHECKING: - from skillmodels.types import Anchoring, Labels +from skillmodels.types import Anchoring, Labels def process_data( diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index 6a572155..13f2928b 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -1,15 +1,13 @@ """Functions to process debug output from likelihood function into DataFrames.""" -from typing import TYPE_CHECKING, Any +from typing import Any import numpy as np import pandas as pd +from jax import Array +from numpy.typing import NDArray -if TYPE_CHECKING: - from jax import Array - from numpy.typing import NDArray - - from skillmodels.types import ProcessedModel +from skillmodels.types import ProcessedModel def process_debug_data( diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 1048631f..42b0ba52 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -1,9 +1,10 @@ """Functions to process model specifications from user-friendly to internal form.""" +from collections.abc import KeysView, Mapping from copy import deepcopy from dataclasses import replace from functools import partial -from typing import TYPE_CHECKING, Any +from typing import Any import numpy as np import pandas as pd @@ -30,9 +31,6 @@ TransitionInfo, ) -if TYPE_CHECKING: - from collections.abc import KeysView, Mapping - pd.set_option("future.no_silent_downcasting", True) # noqa: FBT003 diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index de4f0955..0bd690d7 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -1,26 +1,15 @@ """Functions to simulate a dataset generated by a latent factor model.""" import warnings -from typing import TYPE_CHECKING +from collections.abc import Mapping import jax.numpy as jnp import numpy as np import pandas as pd from jax import Array +from numpy.typing import NDArray from skillmodels.filtered_states import anchor_states_df - -if TYPE_CHECKING: - from collections.abc import Mapping - - from numpy.typing import NDArray - - from skillmodels.types import ( - Dimensions, - EndogenousFactorsInfo, - Labels, - TransitionInfo, - ) from skillmodels.kalman_filters import transform_sigma_points from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index @@ -28,7 +17,14 @@ from skillmodels.process_data import process_data from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model -from skillmodels.types import MeasurementType, ParsedParams +from skillmodels.types import ( + Dimensions, + EndogenousFactorsInfo, + Labels, + MeasurementType, + ParsedParams, + TransitionInfo, +) def simulate_dataset( diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index f4dfed7c..cb5b37a6 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -1,29 +1,25 @@ """Functions to visualize distributions of latent factors.""" import warnings +from collections.abc import Mapping from copy import deepcopy -from typing import TYPE_CHECKING, Any +from typing import Any import numpy as np import pandas as pd import plotly.express as px import plotly.figure_factory as ff import plotly.graph_objects as go +from numpy.typing import NDArray from plotly.subplots import make_subplots from scipy.stats import gaussian_kde from skillmodels.filtered_states import get_filtered_states -from skillmodels.model_spec import ModelSpec # noqa: TC001 +from skillmodels.model_spec import ModelSpec from skillmodels.process_model import process_model +from skillmodels.types import ProcessedModel from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs -if TYPE_CHECKING: - from collections.abc import Mapping - - from numpy.typing import NDArray - - from skillmodels.types import ProcessedModel - def combine_distribution_plots( kde_plots: dict[str, go.Figure], diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index eba82292..37ef1997 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -1,9 +1,9 @@ """Functions to visualize transition equations and production functions.""" import itertools -from collections.abc import Callable # noqa: TC003 +from collections.abc import Callable from copy import deepcopy -from typing import TYPE_CHECKING, Any +from typing import Any import jax.numpy as jnp import numpy as np @@ -14,18 +14,15 @@ from plotly.subplots import make_subplots from skillmodels.filtered_states import get_filtered_states -from skillmodels.model_spec import ModelSpec # noqa: TC001 +from skillmodels.model_spec import ModelSpec from skillmodels.params_index import get_params_index from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_data import process_data from skillmodels.process_debug_data import create_state_ranges from skillmodels.process_model import process_model -from skillmodels.types import ParsedParams # noqa: TC001 +from skillmodels.types import ParsedParams, ProcessedModel from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs -if TYPE_CHECKING: - from skillmodels.types import ProcessedModel - def combine_transition_plots( plots_dict: dict[tuple[str, str], go.Figure], diff --git a/tests/test_qr.py b/tests/test_qr.py index ff231acf..b6f74532 100644 --- a/tests/test_qr.py +++ b/tests/test_qr.py @@ -1,18 +1,14 @@ """Tests for custom QR decomposition.""" -from typing import TYPE_CHECKING - import jax import jax.numpy as jnp import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae +from numpy.typing import NDArray from skillmodels.qr import qr_gpu -if TYPE_CHECKING: - from numpy.typing import NDArray - SEED = 20 From 97d84b83d33002ed2cb7c772066dbe39c2771fc4 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Wed, 28 Jan 2026 11:09:28 +0100 Subject: [PATCH 22/27] Update hooks and clean up --- .pre-commit-config.yaml | 4 ++-- src/skillmodels/decorators.py | 2 +- src/skillmodels/kalman_filters.py | 2 +- src/skillmodels/likelihood_function.py | 4 ++-- src/skillmodels/likelihood_function_debug.py | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9af75621..68111924 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -44,7 +44,7 @@ repos: hooks: - id: yamllint - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.14.11 + rev: v0.14.14 hooks: - id: ruff-check types_or: @@ -59,7 +59,7 @@ repos: - pyi - jupyter - repo: https://github.com/kynan/nbstripout - rev: 0.8.2 + rev: 0.9.0 hooks: - id: nbstripout args: diff --git a/src/skillmodels/decorators.py b/src/skillmodels/decorators.py index e1c834a7..53060d00 100644 --- a/src/skillmodels/decorators.py +++ b/src/skillmodels/decorators.py @@ -1,7 +1,7 @@ """Decorators for parameter extraction and registration in transition functions.""" import functools -from collections.abc import Callable # noqa: TC003 +from collections.abc import Callable from typing import Any import jax.numpy as jnp diff --git a/src/skillmodels/kalman_filters.py b/src/skillmodels/kalman_filters.py index d1b2b97a..113fbdcb 100644 --- a/src/skillmodels/kalman_filters.py +++ b/src/skillmodels/kalman_filters.py @@ -1,6 +1,6 @@ """Kalman filter operations for state estimation using the square-root form.""" -from collections.abc import Callable # noqa: TC003 +from collections.abc import Callable import jax import jax.numpy as jnp diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index d72055ac..501aba22 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -1,7 +1,7 @@ """Log-likelihood function for latent factor models.""" import functools -from collections.abc import Callable # noqa: TC003 +from collections.abc import Callable from typing import Any import jax @@ -14,7 +14,7 @@ kalman_update, ) from skillmodels.parse_params import parse_params -from skillmodels.types import ( # noqa: TC001 +from skillmodels.types import ( Dimensions, EstimationOptions, Labels, diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index 800b5b6f..90431e75 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -1,7 +1,7 @@ """Debug version of log-likelihood function that returns intermediate results.""" import functools -from collections.abc import Callable # noqa: TC003 +from collections.abc import Callable from typing import Any import jax @@ -12,7 +12,7 @@ from skillmodels.kalman_filters import kalman_predict from skillmodels.kalman_filters_debug import kalman_update from skillmodels.parse_params import parse_params -from skillmodels.types import ( # noqa: TC001 +from skillmodels.types import ( Dimensions, EstimationOptions, Labels, From b8a9fbcd1c3837b4f301945f1ea941885bec196f Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Thu, 29 Jan 2026 05:31:58 +0100 Subject: [PATCH 23/27] Call by name throughout. --- src/skillmodels/check_model.py | 24 ++-- src/skillmodels/constraints.py | 2 +- src/skillmodels/correlation_heatmap.py | 106 +++++++++--------- src/skillmodels/kalman_filters.py | 18 +-- src/skillmodels/likelihood_function.py | 10 +- src/skillmodels/likelihood_function_debug.py | 20 ++-- src/skillmodels/maximization_inputs.py | 4 +- src/skillmodels/parse_params.py | 60 ++++++---- src/skillmodels/process_data.py | 34 ++++-- src/skillmodels/process_debug_data.py | 24 ++-- src/skillmodels/process_model.py | 13 ++- src/skillmodels/qr.py | 4 +- src/skillmodels/simulate_data.py | 32 +++--- src/skillmodels/transition_functions.py | 6 +- src/skillmodels/utilities.py | 51 +++++---- .../visualize_factor_distributions.py | 20 ++-- .../visualize_transition_equations.py | 30 +++-- 17 files changed, 268 insertions(+), 190 deletions(-) diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index 863fab4f..2c4731e2 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -40,15 +40,19 @@ def check_model( is_augmented=has_endogenous_factors, ) report += _check_anchoring(anchoring) - invalid_measurements = _check_measurements(model_dict, labels.latent_factors) + invalid_measurements = _check_measurements( + model_dict=model_dict, factors=labels.latent_factors + ) if invalid_measurements: report += invalid_measurements elif has_endogenous_factors: # Make this conditional because the check only works for valid meas. report += _check_no_overlap_in_measurements_of_states_and_inv( - model_dict, labels + model_dict=model_dict, labels=labels ) - report += _check_normalizations(model_dict, labels.latent_factors) + report += _check_normalizations( + model_dict=model_dict, factors=labels.latent_factors + ) report = "\n".join(report) if report != "": @@ -112,7 +116,7 @@ def _check_measurements( report: list[str] = [] for factor in factors: candidate = model_dict["factors"][factor]["measurements"] - if not _is_list_of(candidate, list): + if not _is_list_of(candidate=candidate, type_=list): report.append( f"measurements must be lists of lists. Check measurements of {factor}.", ) @@ -156,22 +160,22 @@ def _check_normalizations( norminfo = model_dict["factors"][factor].get("normalizations", {}) for norm_type in ["loadings", "intercepts"]: candidate = norminfo.get(norm_type, []) - if not _is_list_of(candidate, dict): + if not _is_list_of(candidate=candidate, type_=dict): report.append( f"normalizations must be lists of dicts. Check {norm_type} " f"normalizations for {factor}.", ) else: report += _check_normalized_variables_are_present( - candidate, - model_dict, - factor, + list_of_normdicts=candidate, + model_dict=model_dict, + factor=factor, ) if norm_type == "loadings": report += _check_loadings_are_not_normalized_to_zero( - candidate, - factor, + list_of_normdicts=candidate, + factor=factor, ) return report diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index 69327340..e7da1aa5 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -56,7 +56,7 @@ def get_constraints_dicts( constraints_dicts = [] constraints_dicts += _get_normalization_constraints( - normalizations, labels.latent_factors + normalizations=normalizations, factors=labels.latent_factors ) constraints_dicts += _get_mixture_weights_constraints(dimensions.n_mixtures) constraints_dicts += _get_stage_constraints( diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index e89d1b5c..0b60018e 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -95,16 +95,16 @@ def plot_correlation_heatmap( """ corr = _process_corr_data_for_plotting( - corr, - rounding, + corr=corr, + rounding=rounding, show_upper_triangle=show_upper_triangle, show_diagonal=show_diagonal, trim_heatmap=trim_heatmap, ) heatmap_kwargs = _get_heatmap_kwargs( - corr, - heatmap_kwargs, - colorscale, + corr=corr, + heatmap_kwargs=heatmap_kwargs, + colorscale=colorscale, show_color_bar=show_color_bar, zmax=zmax, zmin=zmin, @@ -160,9 +160,11 @@ def get_measurements_corr( """ data = data.copy(deep=True) processed_model = process_model(model) - periods = _process_periods(periods, processed_model) - processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(processed_model, factors) + periods = _process_periods(periods=periods, model=processed_model) + processed_data = pre_process_data(df=data, periods=periods) + latent_factors, observed_factors = _process_factors( + model=processed_model, factors=factors + ) update_info_by_period = _get_update_info_for_periods(processed_model) df = _get_measurement_data( data=processed_data, @@ -205,9 +207,11 @@ def get_quasi_scores_corr( """ data = data.copy(deep=True) processed_model = process_model(model) - periods = _process_periods(periods, processed_model) - processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(processed_model, factors) + periods = _process_periods(periods=periods, model=processed_model) + processed_data = pre_process_data(df=data, periods=periods) + latent_factors, observed_factors = _process_factors( + model=processed_model, factors=factors + ) update_info = _get_update_info_for_periods(processed_model) df = _get_quasi_factor_scores_data( data=processed_data, @@ -250,9 +254,11 @@ def get_scores_corr( """ data = data.copy(deep=True) processed_model = process_model(model) - periods = _process_periods(periods, processed_model) - processed_data = pre_process_data(data, periods) - latent_factors, observed_factors = _process_factors(processed_model, factors) + periods = _process_periods(periods=periods, model=processed_model) + processed_data = pre_process_data(df=data, periods=periods) + latent_factors, observed_factors = _process_factors( + model=processed_model, factors=factors + ) params = params.loc[["controls", "loadings"]] df = _get_factor_scores_data( data=processed_data, @@ -477,19 +483,19 @@ def _get_quasi_factor_scores_data( if len(periods) == 1: period = periods[0] df = _get_quasi_factor_scores_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, + data=data, + update_info_by_period=update_info_by_period, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) else: df = _get_quasi_factor_scores_data_for_multiple_periods( - data, - update_info_by_period, - periods, - latent_factors, - observed_factors, + data=data, + update_info_by_period=update_info_by_period, + periods=periods, + latent_factors=latent_factors, + observed_factors=observed_factors, ) return df @@ -565,11 +571,11 @@ def _get_quasi_factor_scores_data_for_multiple_periods( for period in periods: to_concat.append( _get_quasi_factor_scores_data_for_single_period( - data, - update_info_by_period, - period, - latent_factors, - observed_factors, + data=data, + update_info_by_period=update_info_by_period, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) .add_suffix(f", {period}") .reset_index(drop=True), @@ -610,21 +616,21 @@ def _get_factor_scores_data( if len(periods) == 1: period = periods[0] df = _get_factor_scores_data_for_single_period( - data, - params, - model, - period, - latent_factors, - observed_factors, + data=data, + params=params, + model=model, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) else: df = _get_factor_scores_data_for_multiple_periods( - data, - params, - model, - periods, - latent_factors, - observed_factors, + data=data, + params=params, + model=model, + periods=periods, + latent_factors=latent_factors, + observed_factors=observed_factors, ) return df @@ -770,12 +776,12 @@ def _get_factor_scores_data_for_multiple_periods( for period in periods: to_concat.append( _get_factor_scores_data_for_single_period( - data, - params, - model, - period, - latent_factors, - observed_factors, + data=data, + params=params, + model=model, + period=period, + latent_factors=latent_factors, + observed_factors=observed_factors, ) .add_suffix(f", {period}") .reset_index(drop=True), @@ -868,9 +874,9 @@ def _get_layout_kwargs( ) default_layout_kwargs.update( _get_axes_ticks_kwargs( - axes_tick_fontsize, - axes_tick_label_angle, - axes_tick_label_color, + axes_tick_fontsize=axes_tick_fontsize, + axes_tick_label_angle=axes_tick_label_angle, + axes_tick_label_color=axes_tick_label_color, ), ) if layout_kwargs: diff --git a/src/skillmodels/kalman_filters.py b/src/skillmodels/kalman_filters.py index 113fbdcb..10bc034f 100644 --- a/src/skillmodels/kalman_filters.py +++ b/src/skillmodels/kalman_filters.py @@ -206,17 +206,17 @@ def kalman_predict( """ sigma_points = _calculate_sigma_points( - states, - upper_chols, - sigma_scaling_factor, - observed_factors, + states=states, + upper_chols=upper_chols, + scaling_factor=sigma_scaling_factor, + observed_factors=observed_factors, ) transformed = transform_sigma_points( - sigma_points, - transition_func, - trans_coeffs, - anchoring_scaling_factors, - anchoring_constants, + sigma_points=sigma_points, + transition_func=transition_func, + trans_coeffs=trans_coeffs, + anchoring_scaling_factors=anchoring_scaling_factors, + anchoring_constants=anchoring_constants, ) # do not use sigma_points.shape because sigma_points contain observed factors diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/likelihood_function.py index 501aba22..5d6ec5c5 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/likelihood_function.py @@ -148,11 +148,11 @@ def log_likelihood_obs( """ n_obs = measurements.shape[1] states, upper_chols, log_mixture_weights, parsed_params = parse_params( - params, - parsing_info, - dimensions, - labels, - n_obs, + params=params, + parsing_info=parsing_info, + dimensions=dimensions, + labels=labels, + n_obs=n_obs, ) carry = { diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/likelihood_function_debug.py index 90431e75..203d90f9 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/likelihood_function_debug.py @@ -73,11 +73,11 @@ def log_likelihood( """ n_obs = measurements.shape[1] states, upper_chols, log_mixture_weights, parsed_params = parse_params( - params, - parsing_info, - dimensions, - labels, - n_obs, + params=params, + parsing_info=parsing_info, + dimensions=dimensions, + labels=labels, + n_obs=n_obs, ) carry = { @@ -132,11 +132,11 @@ def log_likelihood( out["residual_sds"] = static_out["residual_sds"] initial_states, _, initial_log_mixture_weights, _ = parse_params( - params, - parsing_info, - dimensions, - labels, - n_obs, + params=params, + parsing_info=parsing_info, + dimensions=dimensions, + labels=labels, + n_obs=n_obs, ) out["initial_states"] = initial_states out["initial_log_mixture_weights"] = initial_log_mixture_weights diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 01d94dba..6e2dd2ed 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -93,8 +93,8 @@ def get_maximization_inputs( ) sigma_scaling_factor, sigma_weights = calculate_sigma_scaling_factor_and_weights( - processed_model.dimensions.n_latent_factors, - processed_model.estimation_options.sigma_points_scale, + n_states=processed_model.dimensions.n_latent_factors, + kappa=processed_model.estimation_options.sigma_points_scale, ) partialed_get_jnp_params_vec = functools.partial( diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/parse_params.py index 3a52d6c9..06b4c3ef 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/parse_params.py @@ -45,12 +45,18 @@ def create_parsing_info( range_sr = pd.Series(data=np.arange(len(params_index)), index=params_index) # Simple quantities - initial_states = _get_positional_selector_from_loc(range_sr, "initial_states") - initial_cholcovs = _get_positional_selector_from_loc(range_sr, "initial_cholcovs") - mixture_weights = _get_positional_selector_from_loc(range_sr, "mixture_weights") - controls = _get_positional_selector_from_loc(range_sr, "controls") - meas_sds = _get_positional_selector_from_loc(range_sr, "meas_sds") - shock_sds = _get_positional_selector_from_loc(range_sr, "shock_sds") + initial_states = _get_positional_selector_from_loc( + range_sr=range_sr, loc="initial_states" + ) + initial_cholcovs = _get_positional_selector_from_loc( + range_sr=range_sr, loc="initial_cholcovs" + ) + mixture_weights = _get_positional_selector_from_loc( + range_sr=range_sr, loc="mixture_weights" + ) + controls = _get_positional_selector_from_loc(range_sr=range_sr, loc="controls") + meas_sds = _get_positional_selector_from_loc(range_sr=range_sr, loc="meas_sds") + shock_sds = _get_positional_selector_from_loc(range_sr=range_sr, loc="shock_sds") # loadings: mask = update_info[list(labels.latent_factors)].to_numpy() @@ -58,7 +64,7 @@ def create_parsing_info( flat_indices = helper[mask] loadings = LoadingsParsingInfo( - slice=_get_positional_selector_from_loc(range_sr, "loadings"), + slice=_get_positional_selector_from_loc(range_sr=range_sr, loc="loadings"), flat_indices=jnp.array(flat_indices), shape=mask.shape, size=mask.size, @@ -69,7 +75,9 @@ def create_parsing_info( for factor in list(labels.latent_factors): helper_df = pd.DataFrame(index=params_index) loc = helper_df.query(f"category == 'transition' & name1 == '{factor}'").index - transition[factor] = _get_positional_selector_from_loc(range_sr, loc) + transition[factor] = _get_positional_selector_from_loc( + range_sr=range_sr, loc=loc + ) # anchoring_scaling_factors is_free_loading = update_info[list(labels.latent_factors)].to_numpy() @@ -147,26 +155,34 @@ def parse_params( - ParsedParams dataclass with other model parameters. """ - states = _get_initial_states(params, parsing_info, dimensions, n_obs) - upper_chols = _get_initial_upper_chols(params, parsing_info, dimensions, n_obs) - log_weights = _get_initial_log_mixture_weights(params, parsing_info, n_obs) + states = _get_initial_states( + params=params, info=parsing_info, dimensions=dimensions, n_obs=n_obs + ) + upper_chols = _get_initial_upper_chols( + params=params, info=parsing_info, dimensions=dimensions, n_obs=n_obs + ) + log_weights = _get_initial_log_mixture_weights( + params=params, info=parsing_info, n_obs=n_obs + ) - controls = _get_control_params(params, parsing_info, dimensions) - loadings = _get_loadings(params, parsing_info) - meas_sds = _get_meas_sds(params, parsing_info) - shock_sds = _get_shock_sds(params, parsing_info, dimensions) - transition = _get_transition_params(params, parsing_info, labels) + controls = _get_control_params( + params=params, info=parsing_info, dimensions=dimensions + ) + loadings = _get_loadings(params=params, info=parsing_info) + meas_sds = _get_meas_sds(params=params, info=parsing_info) + shock_sds = _get_shock_sds(params=params, info=parsing_info, dimensions=dimensions) + transition = _get_transition_params(params=params, info=parsing_info, labels=labels) anchoring_scaling_factors = _get_anchoring_scaling_factors( - loadings, - parsing_info, - dimensions, + loadings=loadings, + info=parsing_info, + dimensions=dimensions, ) anchoring_constants = _get_anchoring_constants( - controls, - parsing_info, - dimensions, + controls=controls, + info=parsing_info, + dimensions=dimensions, ) parsed = ParsedParams( diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index 36cfee56..f48257a2 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -45,24 +45,32 @@ def process_data( Only returned if estimation==True """ - df = pre_process_data(df, labels.periods) + df = pre_process_data(df=df, periods=labels.periods) df["constant"] = 1 out = {} - df = _add_copies_of_anchoring_outcome(df, anchoring_info) + df = _add_copies_of_anchoring_outcome(df=df, anchoring_info=anchoring_info) if has_endogenous_factors: - df = _augment_data_for_endogenous_factors(df, labels, update_info) + df = _augment_data_for_endogenous_factors( + df=df, labels=labels, update_info=update_info + ) else: df.index = df.index.set_names(["id", "aug_period"]) - _check_data(df, update_info, labels, purpose=purpose) + _check_data(df=df, update_info=update_info, labels=labels, purpose=purpose) n_obs = int(len(df) / len(labels.aug_periods)) - df = _handle_controls_with_missings(df, labels.controls, update_info) - out["controls"] = _generate_controls_array(df, labels, n_obs) - out["observed_factors"] = _generate_observed_factor_array(df, labels, n_obs) + df = _handle_controls_with_missings( + df=df, controls=labels.controls, update_info=update_info + ) + out["controls"] = _generate_controls_array(df=df, labels=labels, n_obs=n_obs) + out["observed_factors"] = _generate_observed_factor_array( + df=df, labels=labels, n_obs=n_obs + ) if purpose == "estimation": - out["measurements"] = _generate_measurements_array(df, update_info, n_obs) + out["measurements"] = _generate_measurements_array( + df=df, update_info=update_info, n_obs=n_obs + ) return out @@ -108,7 +116,7 @@ def _get_period_data_for_endogenous_factors( labels: Labels, update_info: pd.DataFrame, ) -> pd.DataFrame: - meas = _get_period_measurements(update_info, aug_period) + meas = _get_period_measurements(update_info=update_info, aug_period=aug_period) controls = labels.controls observed = labels.observed_factors @@ -188,7 +196,9 @@ def _check_data( # noqa: C901 var_report.loc[(aug_period, cont), "problem"] = "Variable is missing" if purpose == "estimation": - for meas in _get_period_measurements(update_info, aug_period): + for meas in _get_period_measurements( + update_info=update_info, aug_period=aug_period + ): if meas not in period_data.columns: var_report.loc[(aug_period, meas), "problem"] = ( "Variable is missing" @@ -222,7 +232,9 @@ def _handle_controls_with_missings( for aug_period in aug_periods: period_data = df.query(f"aug_period == {aug_period}") control_data = period_data[list(controls)] - meas_data = period_data[_get_period_measurements(update_info, aug_period)] + meas_data = period_data[ + _get_period_measurements(update_info=update_info, aug_period=aug_period) + ] problem = control_data.isna().any(axis=1) & meas_data.notna().any(axis=1) problematic_index = problematic_index.union(period_data[problem].index) diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/process_debug_data.py index 13f2928b..f8d055a0 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/process_debug_data.py @@ -66,9 +66,9 @@ def process_debug_data( factors = model.labels.latent_factors post_update_states = _create_post_update_states( - debug_data["filtered_states"], - factors, - update_info, + filtered_states=debug_data["filtered_states"], + factors=factors, + update_info=update_info, ) filtered_states = _create_filtered_states( @@ -78,14 +78,18 @@ def process_debug_data( factors=factors, ) - state_ranges = create_state_ranges(filtered_states, factors) + state_ranges = create_state_ranges(filtered_states=filtered_states, factors=factors) - residuals = _process_residuals(debug_data["residuals"], update_info) - residual_sds = _process_residual_sds(debug_data["residual_sds"], update_info) + residuals = _process_residuals( + residuals=debug_data["residuals"], update_info=update_info + ) + residual_sds = _process_residual_sds( + residual_sds=debug_data["residual_sds"], update_info=update_info + ) all_contributions = _process_all_contributions( - debug_data["all_contributions"], - update_info, + all_contributions=debug_data["all_contributions"], + update_info=update_info, ) res = { @@ -113,7 +117,7 @@ def _create_post_update_states( for (aug_period, meas), data in zip( update_info.index, filtered_states, strict=False ): - df = _convert_state_array_to_df(data, factors) + df = _convert_state_array_to_df(arr=data, factor_names=factors) df["aug_period"] = aug_period df["id"] = np.arange(len(df)) df["measurement"] = meas @@ -206,7 +210,7 @@ def _process_residual_sds( residual_sds: Array, update_info: pd.DataFrame, ) -> pd.DataFrame: - return _process_residuals(residual_sds, update_info) + return _process_residuals(residuals=residual_sds, update_info=update_info) def _process_all_contributions( diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 42b0ba52..f58889e2 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -113,7 +113,7 @@ def process_model(model: dict | ModelSpec) -> ProcessedModel: anchoring=anchoring, has_endogenous_factors=has_endogenous_factors, ) - transition_info = _get_transition_info(_model_dict_aug, labels) + transition_info = _get_transition_info(model_dict=_model_dict_aug, labels=labels) labels = replace( labels, transition_names=tuple(transition_info.function_names.values()) ) @@ -124,8 +124,15 @@ def process_model(model: dict | ModelSpec) -> ProcessedModel: anchoring=anchoring, estimation_options=_process_estimation_options(_model_dict_aug), transition_info=transition_info, - update_info=_get_update_info(_model_dict_aug, dims, labels, anchoring), - normalizations=_process_normalizations(_model_dict_aug, dims, labels), + update_info=_get_update_info( + model_dict=_model_dict_aug, + dimensions=dims, + labels=labels, + anchoring_info=anchoring, + ), + normalizations=_process_normalizations( + model_dict=_model_dict_aug, dimensions=dims, labels=labels + ), endogenous_factors_info=endogenous_factors_info, ) diff --git a/src/skillmodels/qr.py b/src/skillmodels/qr.py index 834ff533..566276b4 100644 --- a/src/skillmodels/qr.py +++ b/src/skillmodels/qr.py @@ -10,7 +10,7 @@ def qr_gpu(a: Array) -> tuple[Array, Array]: """Custom implementation of the QR Decomposition.""" r, tau = jnp.linalg.qr(a, mode="raw") - q = _householder(r.mT, tau) + q = _householder(r=r.mT, tau=tau) return q, jnp.triu(r.mT[: tau.shape[0]]) @@ -65,7 +65,7 @@ def qr_jvp_rule( q, r = qr_gpu(x) dx_rinv = jax.lax.linalg.triangular_solve(r, dx) # Right side solve by default qt_dx_rinv = _h(q) @ dx_rinv - qt_dx_rinv_lower = _tril(qt_dx_rinv, -1) + qt_dx_rinv_lower = _tril(m=qt_dx_rinv, k=-1) do = qt_dx_rinv_lower - _h(qt_dx_rinv_lower) # This is skew-symmetric # The following correction is necessary for complex inputs n = x.shape[-1] diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index 0bd690d7..dfede0b8 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -164,22 +164,22 @@ def simulate_dataset( "unanchored_states": { "states": latent_data, "state_ranges": create_state_ranges( - latent_data, - processed_model.labels.latent_factors, + filtered_states=latent_data, + factors=processed_model.labels.latent_factors, ), }, "anchored_states": { "states": anchored_latent_data, "state_ranges": create_state_ranges( - anchored_latent_data, - processed_model.labels.latent_factors, + filtered_states=anchored_latent_data, + factors=processed_model.labels.latent_factors, ), }, "aug_unanchored_states": { "states": aug_latent_data, "state_ranges": create_state_ranges( - aug_latent_data, - processed_model.labels.latent_factors, + filtered_states=aug_latent_data, + factors=processed_model.labels.latent_factors, ), }, "aug_measurements": aug_measurements, @@ -265,7 +265,13 @@ def _simulate_dataset( dist_args.append(args) latent_states = np.zeros((n_aug_periods, n_obs, n_states)) # ty: ignore[invalid-assignment] - latent_states[0] = generate_start_states(rng, n_obs, dimensions, dist_args, weights) + latent_states[0] = generate_start_states( + rng=rng, + n_obs=n_obs, + dimensions=dimensions, + dist_args=dist_args, + weights=weights, + ) for t in range(n_aug_periods - 1): # if there is a shock in period t, add it here @@ -320,12 +326,12 @@ def _simulate_dataset( for t in range(n_aug_periods): meas = pd.DataFrame( data=measurements_from_states( - rng, - latent_states[t], # ty: ignore[invalid-argument-type] - control_data[t], # ty: ignore[invalid-argument-type] - loadings_df.loc[t].to_numpy(), - control_params_df.loc[t].to_numpy(), - meas_sds.loc[t].to_numpy().flatten(), + rng=rng, + states=latent_states[t], # ty: ignore[invalid-argument-type] + controls=control_data[t], # ty: ignore[invalid-argument-type] + loadings=loadings_df.loc[t].to_numpy(), + control_params=control_params_df.loc[t].to_numpy(), + sds=meas_sds.loc[t].to_numpy().flatten(), ), columns=loadings_df.loc[t].index, ) diff --git a/src/skillmodels/transition_functions.py b/src/skillmodels/transition_functions.py index e8d08fcf..9233557b 100644 --- a/src/skillmodels/transition_functions.py +++ b/src/skillmodels/transition_functions.py @@ -181,7 +181,7 @@ def robust_translog(states: Array, params: Array) -> Array: """ clipped_states = jnp.clip(states, -1e12, 1e12) - return translog(clipped_states, params) + return translog(states=clipped_states, params=params) def params_robust_translog(factors: tuple[str, ...]) -> list[str]: @@ -195,7 +195,9 @@ def identity_constraints_robust_translog( all_factors: tuple[str, ...], ) -> list[dict]: """Identity constraints for robust_translog.""" - return identity_constraints_translog(factor, aug_period, all_factors) + return identity_constraints_translog( + factor=factor, aug_period=aug_period, all_factors=all_factors + ) def linear_and_squares(states: Array, params: Array) -> Array: diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index 411b1c1a..6b161c6b 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -38,7 +38,7 @@ def extract_factors( factors = [factors] to_remove = list(set(model_dict["factors"]).difference(factors)) - return remove_factors(to_remove, model_dict, params) + return remove_factors(factors=to_remove, model_dict=model_dict, params=params) def update_parameter_values( @@ -103,16 +103,16 @@ def remove_factors( out = deepcopy(model_dict) - out["factors"] = _remove_from_dict(out["factors"], factors) + out["factors"] = _remove_from_dict(dict_=out["factors"], to_remove=factors) # adjust anchoring if "anchoring" in model_dict: out["anchoring"]["outcomes"] = _remove_from_dict( - out["anchoring"]["outcomes"], - factors, + dict_=out["anchoring"]["outcomes"], + to_remove=factors, ) if out["anchoring"]["outcomes"] == {}: - out = _remove_from_dict(out, "anchoring") + out = _remove_from_dict(dict_=out, to_remove="anchoring") # Remove periods if necessary, but only if no endogenous factors are present. # (else we would mess up the mapping between raw periods model periods) @@ -120,7 +120,7 @@ def remove_factors( new_n_periods = get_dimensions( out, has_endogenous_factors=has_endogenous_factors ).n_periods - out = reduce_n_periods(out, new_n_periods) + out = reduce_n_periods(model_dict=out, new_n_periods=new_n_periods) if params is not None: out_params = _reduce_params( @@ -156,23 +156,26 @@ def remove_measurements( for factor in model_dict["factors"]: full = model_dict["factors"][factor]["measurements"] - reduced = [_remove_from_list(meas_list, measurements) for meas_list in full] + reduced = [ + _remove_from_list(list_=meas_list, to_remove=measurements) + for meas_list in full + ] out["factors"][factor]["measurements"] = reduced norminfo = model_dict["factors"][factor].get("normalizations", {}) if "loadings" in norminfo: out["factors"][factor]["normalizations"]["loadings"] = ( _remove_measurements_from_normalizations( - measurements, - norminfo["loadings"], + measurements=measurements, + normalizations=norminfo["loadings"], ) ) if "intercepts" in norminfo: out["factors"][factor]["normalizations"]["intercepts"] = ( _remove_measurements_from_normalizations( - measurements, - norminfo["intercepts"], + measurements=measurements, + normalizations=norminfo["intercepts"], ) ) @@ -204,9 +207,9 @@ def remove_controls( """ out = deepcopy(model_dict) - out["controls"] = _remove_from_list(out["controls"], controls) + out["controls"] = _remove_from_list(list_=out["controls"], to_remove=controls) if out["controls"] == []: - out = _remove_from_dict(out, "controls") + out = _remove_from_dict(dict_=out, to_remove="controls") if params is not None: # This likely won't work if we have endogenous factors. @@ -272,7 +275,7 @@ def switch_linear_to_translog( out["factors"][factor]["transition_function"] = "translog" if params is not None: - out_params = _extend_params(params, out, 0.05) + out_params = _extend_params(params=params, model_dict=out, fill_value=0.05) out = (out, out_params) return out @@ -297,26 +300,30 @@ def reduce_n_periods( out = deepcopy(model_dict) for factor in model_dict["factors"]: out["factors"][factor]["measurements"] = _shorten_if_necessary( - out["factors"][factor]["measurements"], - new_n_periods, + list_=out["factors"][factor]["measurements"], + length=new_n_periods, ) norminfo = model_dict["factors"][factor].get("normalizations", {}) if "loadings" in norminfo: out["factors"][factor]["normalizations"]["loadings"] = ( - _shorten_if_necessary(norminfo["loadings"], new_n_periods) + _shorten_if_necessary(list_=norminfo["loadings"], length=new_n_periods) ) if "intercepts" in norminfo: out["factors"][factor]["normalizations"]["intercepts"] = ( - _shorten_if_necessary(norminfo["intercepts"], new_n_periods) + _shorten_if_necessary( + list_=norminfo["intercepts"], length=new_n_periods + ) ) if "stagemap" in out: - out["stagemap"] = _shorten_if_necessary(out["stagemap"], new_n_periods - 1) + out["stagemap"] = _shorten_if_necessary( + list_=out["stagemap"], length=new_n_periods - 1 + ) if params is not None: - out_params = _extend_params(params, out, 0.05) + out_params = _extend_params(params=params, model_dict=out, fill_value=0.05) out = (out, out_params) return out @@ -410,7 +417,9 @@ def _remove_measurements_from_normalizations( measurements: str | list[str], normalizations: list[dict[str, Any]], ) -> list[dict[str, Any]]: - reduced = [_remove_from_dict(norm, measurements) for norm in normalizations] + reduced = [ + _remove_from_dict(dict_=norm, to_remove=measurements) for norm in normalizations + ] if reduced != normalizations: warnings.warn( "Your removed a normalized measurement from a model. Make sure there are " diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index cb5b37a6..e11ec405 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -87,8 +87,8 @@ def combine_distribution_plots( contour_plots = deepcopy(contour_plots) surface_plots = deepcopy(surface_plots) factors = list(kde_plots.keys()) - factor_names = _process_factor_mapping_dist(factor_mapping, factors) - ordered_factors = _get_ordered_factors(factor_order, factors) + factor_names = _process_factor_mapping_dist(mapper=factor_mapping, factors=factors) + ordered_factors = _get_ordered_factors(factor_order=factor_order, factors=factors) make_subplot_kwargs = get_make_subplot_kwargs( sharex=sharex, sharey=sharey, @@ -229,7 +229,7 @@ def univariate_densities( factors=factors, observed_factors=observed_factors, ) - observed_states = _get_data_observed_factors(data, factors) + observed_states = _get_data_observed_factors(data=data, factors=factors) df = _process_data( states=states, period=period, @@ -374,9 +374,9 @@ def bivariate_density_contours( for i, scenario in enumerate(df["scenario"].unique()): try: x, y, z = _calculate_kde_for_3d( - df[df["scenario"] == scenario], - pair, - n_points, + data=df[df["scenario"] == scenario], + factors=pair, + n_points=n_points, ) contour = go.Contour( x=x[:, 0], @@ -476,7 +476,7 @@ def bivariate_density_surfaces( factors=factors, observed_factors=observed_factors, ) - observed_states = _get_data_observed_factors(data, factors) + observed_states = _get_data_observed_factors(data=data, factors=factors) df = _process_data( states=states, period=period, @@ -499,7 +499,7 @@ def bivariate_density_surfaces( pairs = list(set(pairs)) for pair in pairs: try: - x, y, z = _calculate_kde_for_3d(df, pair, n_points) + x, y, z = _calculate_kde_for_3d(data=df, factors=pair, n_points=n_points) fig = go.Figure( go.Surface( x=x, @@ -567,7 +567,7 @@ def _process_data( ap_to_p = pd.Series(aug_periods_to_periods, name="period") ap_to_p.index.name = "aug_period" if isinstance(states, pd.DataFrame): - one_state_per_period = _get_one_state_per_period(states, ap_to_p) + one_state_per_period = _get_one_state_per_period(states=states, ap_to_p=ap_to_p) to_concat = [] for fac in factors: if fac in one_state_per_period: @@ -579,7 +579,7 @@ def _process_data( states = dict(enumerate(states)) to_concat = [] for name, df in states.items(): - one_state_per_period = _get_one_state_per_period(df, ap_to_p) + one_state_per_period = _get_one_state_per_period(states=df, ap_to_p=ap_to_p) to_keep = one_state_per_period.query(f"period == {period}")[factors].copy() to_keep["scenario"] = name to_concat.append(to_keep) diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 37ef1997..35dfe5af 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -80,7 +80,9 @@ def combine_transition_plots( """ plots_dict = deepcopy(plots_dict) - column_order, row_order = _process_orders(column_order, row_order, plots_dict) + column_order, row_order = _process_orders( + columns=column_order, rows=row_order, plots_dict=plots_dict + ) make_subplot_kwargs = get_make_subplot_kwargs( sharex=sharex, sharey=sharey, @@ -89,9 +91,9 @@ def combine_transition_plots( make_subplot_kwargs=make_subplot_kwargs, ) factor_mapping = _process_factor_mapping_trans( - factor_mapping, - row_order, - column_order, + factor_mapper=factor_mapping, + output_factors=row_order, + input_factors=column_order, ) fig = make_subplots(**make_subplot_kwargs) for (output_factor, input_factor), (row, col) in zip( @@ -287,10 +289,18 @@ def _get_dictionary_with_plots( """ observed_factors = model.labels.observed_factors - states_data = _get_states_data(model, period, data, states, observed_factors) - params = _set_index_params(model, params) - parsed_params = _get_parsed_params(model, params) - state_ranges = _get_state_ranges(state_ranges, states_data, all_factors) + states_data = _get_states_data( + model=model, + period=period, + data=data, + states=states, + observed_factors=observed_factors, + ) + params = _set_index_params(model=model, params=params) + parsed_params = _get_parsed_params(model=model, params=params) + state_ranges = _get_state_ranges( + state_ranges=state_ranges, states_data=states_data, all_factors=all_factors + ) layout_kwargs = get_layout_kwargs( layout_kwargs=layout_kwargs, legend_kwargs=None, @@ -373,7 +383,9 @@ def _get_state_ranges( ) -> dict[str, pd.DataFrame]: """Create state ranges if none is given.""" if state_ranges is None: - state_ranges = create_state_ranges(states_data, list(all_factors)) + state_ranges = create_state_ranges( + filtered_states=states_data, factors=list(all_factors) + ) return state_ranges From 5012a8bbcdc20165df2120691f4ae5a7609e4f86 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Thu, 29 Jan 2026 06:07:35 +0100 Subject: [PATCH 24/27] Autogenerated docs, harmonised hooks / project configuration. --- .gitignore | 1 + .pre-commit-config.yaml | 29 +- .readthedocs.yaml | 20 + .yamllint.yml | 14 +- docs/Makefile | 216 ----- docs/{source => }/_static/css/custom.css | 0 docs/{source => }/_static/images/book.svg | 0 docs/{source => }/_static/images/books.svg | 0 docs/{source => }/_static/images/coding.svg | 0 .../_static/images/light-bulb.svg | 0 docs/{source => }/_static/images/logo.svg | 0 docs/explanations/names_and_concepts.md | 83 ++ docs/explanations/notes_on_factor_scales.md | 84 ++ docs/getting_started/tutorial.ipynb | 274 ++++++ .../how_to_simulate_dataset.ipynb | 0 .../how_to_visualize_correlations.ipynb | 0 ...sualize_pairwise_factor_distribution.ipynb | 0 ...ow_to_visualize_transition_equations.ipynb | 0 docs/how_to_guides/model_specs.md | 172 ++++ docs/index.md | 52 ++ docs/make.bat | 263 ------ docs/myst.yml | 51 ++ .../endogeneity_corrections.md | 50 ++ docs/reference_guides/transition_functions.md | 128 +++ docs/source/chs_test_params.csv | 209 ----- docs/source/conf.py | 155 ---- docs/source/explanations/index.rst | 9 - .../explanations/names_and_concepts.rst | 95 -- .../explanations/notes_on_factor_scales.rst | 158 ---- docs/source/getting_started/index.rst | 7 - docs/source/getting_started/tutorial.ipynb | 381 -------- docs/source/how_to_guides/index.rst | 13 - docs/source/how_to_guides/model_specs.rst | 236 ----- docs/source/how_to_guides/utilities.rst | 14 - docs/source/index.rst | 142 --- .../endogeneity_corrections.rst | 47 - docs/source/reference_guides/estimation.rst | 42 - docs/source/reference_guides/index.rst | 12 - .../reference_guides/pre_processing.rst | 37 - docs/source/reference_guides/simulation.rst | 12 - .../reference_guides/transition_functions.rst | 8 - docs/source/rtd_environment.yml | 11 - docs/source/start_params.csv | 237 ----- docs/source/start_params_template.csv | 237 ----- pixi.lock | 836 +++++++++++++++++- pyproject.toml | 207 +++-- 46 files changed, 1870 insertions(+), 2672 deletions(-) create mode 100644 .readthedocs.yaml delete mode 100644 docs/Makefile rename docs/{source => }/_static/css/custom.css (100%) rename docs/{source => }/_static/images/book.svg (100%) rename docs/{source => }/_static/images/books.svg (100%) rename docs/{source => }/_static/images/coding.svg (100%) rename docs/{source => }/_static/images/light-bulb.svg (100%) rename docs/{source => }/_static/images/logo.svg (100%) create mode 100644 docs/explanations/names_and_concepts.md create mode 100644 docs/explanations/notes_on_factor_scales.md create mode 100644 docs/getting_started/tutorial.ipynb rename docs/{source => }/how_to_guides/how_to_simulate_dataset.ipynb (100%) rename docs/{source => }/how_to_guides/how_to_visualize_correlations.ipynb (100%) rename docs/{source => }/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb (100%) rename docs/{source => }/how_to_guides/how_to_visualize_transition_equations.ipynb (100%) create mode 100644 docs/how_to_guides/model_specs.md create mode 100644 docs/index.md delete mode 100644 docs/make.bat create mode 100644 docs/myst.yml create mode 100644 docs/reference_guides/endogeneity_corrections.md create mode 100644 docs/reference_guides/transition_functions.md delete mode 100644 docs/source/chs_test_params.csv delete mode 100644 docs/source/conf.py delete mode 100644 docs/source/explanations/index.rst delete mode 100644 docs/source/explanations/names_and_concepts.rst delete mode 100644 docs/source/explanations/notes_on_factor_scales.rst delete mode 100644 docs/source/getting_started/index.rst delete mode 100644 docs/source/getting_started/tutorial.ipynb delete mode 100644 docs/source/how_to_guides/index.rst delete mode 100644 docs/source/how_to_guides/model_specs.rst delete mode 100644 docs/source/how_to_guides/utilities.rst delete mode 100644 docs/source/index.rst delete mode 100644 docs/source/reference_guides/endogeneity_corrections.rst delete mode 100644 docs/source/reference_guides/estimation.rst delete mode 100644 docs/source/reference_guides/index.rst delete mode 100644 docs/source/reference_guides/pre_processing.rst delete mode 100644 docs/source/reference_guides/simulation.rst delete mode 100644 docs/source/reference_guides/transition_functions.rst delete mode 100644 docs/source/rtd_environment.yml delete mode 100644 docs/source/start_params.csv delete mode 100644 docs/source/start_params_template.csv diff --git a/.gitignore b/.gitignore index 39a277d2..edcf29f8 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ wheels/ # Documentation docs/_build/ +_build/ # IDE .idea/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 68111924..300eaf14 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,6 +4,10 @@ repos: hooks: - id: check-hooks-apply - id: check-useless-excludes + - repo: https://github.com/tox-dev/tox-toml-fmt + rev: v1.2.2 + hooks: + - id: tox-toml-fmt - repo: https://github.com/lyz-code/yamlfix rev: 1.19.1 hooks: @@ -14,31 +18,30 @@ repos: - id: check-added-large-files args: - --maxkb=50000 + - id: check-ast - id: check-case-conflict + - id: check-docstring-first - id: check-merge-conflict + - id: check-toml - id: check-vcs-permalinks - id: check-yaml - - id: check-toml - id: debug-statements - id: end-of-file-fixer - id: fix-byte-order-marker types: - text - - id: forbid-submodules - id: mixed-line-ending args: - --fix=lf description: Forces to replace line ending by the UNIX 'lf' character. + - id: name-tests-test + args: + - --pytest-test-first - id: no-commit-to-branch args: - --branch - main - - id: name-tests-test - args: - - --pytest-test-first - id: trailing-whitespace - - id: check-ast - - id: check-docstring-first - repo: https://github.com/adrienverge/yamllint.git rev: v1.38.0 hooks: @@ -47,17 +50,17 @@ repos: rev: v0.14.14 hooks: - id: ruff-check - types_or: - - python - - pyi - - jupyter args: - --fix - - id: ruff-format types_or: - - python + - jupyter - pyi + - python + - id: ruff-format + types_or: - jupyter + - pyi + - python - repo: https://github.com/kynan/nbstripout rev: 0.9.0 hooks: diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..f0038d2b --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,20 @@ +--- +version: 2 +build: + os: ubuntu-24.04 + tools: + python: '3.14' + nodejs: '22' + jobs: + create_environment: + - asdf plugin add pixi + - asdf install pixi latest + - asdf global pixi latest + install: + - pixi install -e docs + post_build: + # Jupyter Book 2.0 builds site content to _build/html. + # For ReadTheDocs, we build and then copy to the expected output location. + - mkdir --parents $READTHEDOCS_OUTPUT/html/ + - BASE_URL="/$READTHEDOCS_LANGUAGE/$READTHEDOCS_VERSION" pixi run -e docs docs + - cp -a docs/_build/html/. "$READTHEDOCS_OUTPUT/html" && rm -r docs/_build diff --git a/.yamllint.yml b/.yamllint.yml index 707dcd81..631965cd 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -1,10 +1,4 @@ --- -yaml-files: - - '*.yaml' - - '*.yml' - - .yamllint -ignore: - - src/skillmodels/test_data/simplest_augmented_model.yaml rules: braces: enable brackets: enable @@ -35,5 +29,11 @@ rules: quoted-strings: disable trailing-spaces: enable truthy: - level: warning check-keys: false + level: warning +yaml-files: + - '*.yaml' + - '*.yml' + - .yamllint +ignore: + - src/skillmodels/test_data/simplest_augmented_model.yaml diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index be6e0795..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,216 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - -.PHONY: clean -clean: - rm -rf $(BUILDDIR)/* - -.PHONY: html -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -.PHONY: dirhtml -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -.PHONY: singlehtml -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -.PHONY: pickle -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -.PHONY: json -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -.PHONY: htmlhelp -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -.PHONY: qthelp -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/skillmodels.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/skillmodels.qhc" - -.PHONY: applehelp -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -.PHONY: devhelp -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/skillmodels" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/skillmodels" - @echo "# devhelp" - -.PHONY: epub -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -.PHONY: latex -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -.PHONY: latexpdf -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: latexpdfja -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: text -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -.PHONY: man -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -.PHONY: texinfo -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -.PHONY: info -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -.PHONY: gettext -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -.PHONY: changes -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -.PHONY: linkcheck -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -.PHONY: doctest -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -.PHONY: coverage -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -.PHONY: xml -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -.PHONY: pseudoxml -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/source/_static/css/custom.css b/docs/_static/css/custom.css similarity index 100% rename from docs/source/_static/css/custom.css rename to docs/_static/css/custom.css diff --git a/docs/source/_static/images/book.svg b/docs/_static/images/book.svg similarity index 100% rename from docs/source/_static/images/book.svg rename to docs/_static/images/book.svg diff --git a/docs/source/_static/images/books.svg b/docs/_static/images/books.svg similarity index 100% rename from docs/source/_static/images/books.svg rename to docs/_static/images/books.svg diff --git a/docs/source/_static/images/coding.svg b/docs/_static/images/coding.svg similarity index 100% rename from docs/source/_static/images/coding.svg rename to docs/_static/images/coding.svg diff --git a/docs/source/_static/images/light-bulb.svg b/docs/_static/images/light-bulb.svg similarity index 100% rename from docs/source/_static/images/light-bulb.svg rename to docs/_static/images/light-bulb.svg diff --git a/docs/source/_static/images/logo.svg b/docs/_static/images/logo.svg similarity index 100% rename from docs/source/_static/images/logo.svg rename to docs/_static/images/logo.svg diff --git a/docs/explanations/names_and_concepts.md b/docs/explanations/names_and_concepts.md new file mode 100644 index 00000000..75a23060 --- /dev/null +++ b/docs/explanations/names_and_concepts.md @@ -0,0 +1,83 @@ +# Names and Concepts + +This section explains key concepts and variable names used throughout skillmodels. +Understanding these is helpful if you want to understand the implementation or extend +the package. + +## Dimensions + +The `Dimensions` dataclass contains integer values for model dimensions: + +- **n_latent_factors**: Number of latent factors (states) in the model +- **n_observed_factors**: Number of observed factors +- **n_periods**: Number of periods (one more than transition equations) +- **n_aug_periods**: Number of augmented periods (includes sub-periods for endogenous + factors) +- **n_mixtures**: Number of elements in the finite mixture of normals distribution +- **n_controls**: Number of control variables (always >= 1 due to constant) + +## Labels + +The `Labels` dataclass contains names and indices: + +- **latent_factors**: Tuple of latent factor names +- **observed_factors**: Tuple of observed factor names +- **controls**: Tuple of control variable names (first is always "constant") +- **periods**: Tuple of period indices (0, 1, 2, ...) +- **aug_periods**: Tuple of augmented period indices +- **stagemap**: Tuple mapping periods to stages +- **stages**: Tuple of stage indices + +## Development Stages vs Periods + +A **development stage** is a group of consecutive periods where the skill formation +technology (transition function parameters) remains constant. Stages are just equality +constraints on parameters. + +Example: With 5 periods, you can estimate at most 4 different transition functions. +The stagemap `[0, 0, 1, 1]` means: +- Periods 0→1 and 1→2 share the same parameters (stage 0) +- Periods 2→3 and 3→4 share the same parameters (stage 1) + +## Augmented Periods + +When models include endogenous factors (factors that depend on other factors in the +same period), skillmodels internally expands periods into "augmented periods" to handle +the sequential updating. Each regular period may contain multiple augmented periods. + +## Anchoring + +Anchoring links latent factors to observable outcomes, allowing identification and +interpretation of the factor scale. The `Anchoring` dataclass contains: + +- **outcomes**: Which factors are anchored to which outcome variables +- **free_controls**: Whether anchoring equations have their own control coefficients +- **free_constant**: Whether anchoring equations have a free constant +- **free_loadings**: Whether anchoring loadings are estimated (vs fixed to 1) +- **ignore_constant_when_anchoring**: Skip constant in anchoring transformation + +## Update Info + +A DataFrame specifying each Kalman update step: + +- One row per measurement equation evaluation +- Columns indicate which factors have free loadings for each measurement +- Used internally to structure the Kalman filter passes + +## Normalizations + +Settings for identifying the model scale and location: + +- **loadings**: Fixed factor loading values (cannot be zero) +- **intercepts**: Fixed intercept values for measurement equations + +Without normalizations, latent factor models are not identified (the scale and location +of factors are arbitrary). + +## Estimation Options + +The `EstimationOptions` dataclass controls numerical aspects: + +- **sigma_points_scale**: Controls spread of sigma points in unscented Kalman filter +- **robust_bounds**: Tightens parameter bounds to avoid numerical issues +- **clipping_***: Parameters for soft-clipping the log-likelihood to prevent infinities diff --git a/docs/explanations/notes_on_factor_scales.md b/docs/explanations/notes_on_factor_scales.md new file mode 100644 index 00000000..e938faf4 --- /dev/null +++ b/docs/explanations/notes_on_factor_scales.md @@ -0,0 +1,84 @@ +# Notes on Scales and Normalizations + +This section discusses factor scales and normalization, building on the +[critique by Wiswall and Agostinelli](https://tinyurl.com/y3wl43kz) of the original +CHS estimator. + +Wiswall and Agostinelli define a class of transition functions with Known Location and +Scale (KLS) that require fewer normalizations. Their critique potentially invalidates +certain empirical estimates from CHS, but not the general estimation approach. + +To get estimates that avoid renormalization issues, you can either: +1. Use fewer normalizations with KLS transition functions, or +2. Use non-KLS transition functions with one normalization per period and factor + +As there is no natural scale for skills, neither approach is inherently better. +However, we prefer using flexible non-KLS transition functions with explicit +normalizations because: +1. They are more compatible with development stages spanning multiple periods +2. Suitable normalizations can give latent factors a more meaningful interpretation + +## Why KLS Functions Don't Keep Scales Constant + +After reading the Wiswall-Agostinelli critique, one might think that using KLS +transition functions identifies some sort of "natural" scale. This is not the case. + +Consider a simple model of financial investments with two latent factors: +- **w**: wealth (stock variable) +- **i**: investment (flow variable) + +Suppose periods are one year and the annual interest rate is 10%. The most intuitive +representation measures everything in dollars: + +$$ +w_{t+1} = 1.1 w_t + i_t +$$ + +However, we could measure w in period t in dollars, i in 1000 dollars, and w in period +t+1 in cents. The transition equation becomes: + +$$ +w_{t+1} = 110 w_t + 100000 i_t +$$ + +This describes the exact same system in different scales. Any linear function could +describe this system—just with different scale combinations. + +The CES function is KLS and contains all linear functions (without intercept) whose +parameters sum to 1. If we set both factor scales to dollars initially, the CES +function would choose: + +$$ +w_{t+1} = \frac{1}{2.1}(1.1 w_t + i_t) \approx 0.524 w_t + 0.476 i_t +$$ + +This means wealth in period t+1 is measured in approximately 0.476 dollars—an +arbitrary choice made by the functional form, not something "natural." + +## Why CES and log_CES Functions are Problematic + +The KLS definition refers only to the scale of the output. But CES and log_CES +functions may also impose restrictions on input scales. + +Simulations suggest that with log_CES: +- You need initial location normalizations for all factors +- You only need to normalize the scale of one factor initially + +However, we don't have formal identification results for this. **We advise caution** +when using CES or log_CES functions—think carefully about your normalizations rather +than relying on automatic generation. + +## Normalizations and Development Stages + +When using development stages (periods with identical transition parameters), the +normalization requirements change. + +The key insight: you can identify scale from the first period of a stage, so no later +normalizations are needed until the next stage begins. + +**Recommendations:** +- Normalize only in the first period of each stage +- For the initial stage, normalize the first two periods +- Use automatic normalizations when working with stages to avoid confusion + +This reveals another type of over-normalization in the original CHS paper. diff --git a/docs/getting_started/tutorial.ipynb b/docs/getting_started/tutorial.ipynb new file mode 100644 index 00000000..dda7917b --- /dev/null +++ b/docs/getting_started/tutorial.ipynb @@ -0,0 +1,274 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Skillmodels Quickstart\n", + "\n", + "This tutorial demonstrates the basic workflow for estimating a latent factor model\n", + "using skillmodels. We'll use Example 2 from the Cunha, Heckman, and Schennach (2010)\n", + "replication files." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import yaml\n", + "\n", + "from skillmodels import get_maximization_inputs\n", + "from skillmodels.config import TEST_DATA_DIR" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "## Loading Model Specification and Data\n", + "\n", + "Models are specified as Python dictionaries (or using dataclasses). These can be\n", + "stored in YAML or JSON files for reuse.\n", + "\n", + "For this tutorial, we use a model specification that ships with skillmodels." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "with (TEST_DATA_DIR / \"model2.yaml\").open() as f:\n", + " model = yaml.safe_load(f)\n", + "\n", + "# Show the structure\n", + "print(\"Model keys:\", list(model.keys()))\n", + "print(\"Factors:\", list(model[\"factors\"].keys()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", + "data = data.set_index([\"caseid\", \"period\"])\n", + "data.head()" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Getting Maximization Inputs\n", + "\n", + "The main entry point is `get_maximization_inputs()`. It takes a model specification\n", + "and dataset, and returns everything needed to maximize the likelihood using optimagic:\n", + "\n", + "- `loglike`: The compiled log-likelihood function\n", + "- `gradient`: The gradient of the log-likelihood\n", + "- `loglike_and_gradient`: Combined function (more efficient)\n", + "- `debug_loglike`: Uncompiled version for debugging\n", + "- `params_template`: Parameter DataFrame with bounds and starting values\n", + "- `constraints`: Parameter constraints for optimization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "max_inputs = get_maximization_inputs(model, data)\n", + "print(\"Available keys:\", list(max_inputs.keys()))" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## Parameter Template\n", + "\n", + "The `params_template` is a pandas DataFrame with:\n", + "- A MultiIndex identifying each parameter (category, period, name1, name2)\n", + "- Columns for `value` (to be filled with starting values), `lower_bound`, `upper_bound`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "params_template = max_inputs[\"params_template\"]\n", + "params_template.head(10)" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## Choosing Starting Values\n", + "\n", + "Good starting values are important for optimization. As a rule of thumb:\n", + "\n", + "- If measurements are standardized, use 1.0 for free loadings and 0.0 for free intercepts\n", + "- Start measurement and shock standard deviations slightly larger than expected\n", + "- Initial state means can often start at 0\n", + "\n", + "Here we set reasonable defaults:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "params = params_template.copy()\n", + "\n", + "# Set starting values by category\n", + "for category in params.index.get_level_values(\"category\").unique():\n", + " if category == \"loadings\":\n", + " params.loc[category, \"value\"] = 1.0\n", + " elif category == \"controls\":\n", + " params.loc[category, \"value\"] = 0.0\n", + " elif category in (\"meas_sds\", \"shock_sds\") or category == \"initial_cholcovs\":\n", + " params.loc[category, \"value\"] = 0.5\n", + " elif category == \"initial_states\":\n", + " params.loc[category, \"value\"] = 0.0\n", + " elif category == \"mixture_weights\":\n", + " params.loc[category, \"value\"] = 1.0\n", + " elif category == \"transition\":\n", + " # Set transition parameters to reasonable defaults\n", + " params.loc[category, \"value\"] = 0.5\n", + "\n", + "params.head(10)" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "## JAX Compilation\n", + "\n", + "Skillmodels uses JAX for just-in-time compilation and automatic differentiation.\n", + "The first call to `loglike` or `gradient` triggers compilation, which takes a few\n", + "seconds. Subsequent calls are very fast." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "loglike = max_inputs[\"loglike\"]\n", + "gradient = max_inputs[\"gradient\"]\n", + "loglike_and_gradient = max_inputs[\"loglike_and_gradient\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# First call includes compilation time\n", + "loglike_value = loglike(params)\n", + "print(f\"Log-likelihood at starting values: {loglike_value:.2f}\")" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "## Constraints\n", + "\n", + "Skillmodels automatically generates constraints from the model specification:\n", + "- Fixed parameters (normalized loadings and intercepts)\n", + "- Stagemap equality constraints\n", + "- Bound constraints\n", + "\n", + "You can add additional constraints for your specific model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "constraints = max_inputs[\"constraints\"]\n", + "print(f\"Number of auto-generated constraints: {len(constraints)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "## Estimation with optimagic\n", + "\n", + "To estimate the model, use optimagic's `maximize` function:\n", + "\n", + "```python\n", + "import optimagic as om\n", + "\n", + "result = om.maximize(\n", + " fun=loglike,\n", + " params=params,\n", + " algorithm=\"scipy_lbfgsb\",\n", + " fun_and_jac=loglike_and_gradient,\n", + " constraints=constraints,\n", + ")\n", + "```\n", + "\n", + "The `fun_and_jac` argument is important: it uses the combined function that\n", + "computes both the likelihood and gradient efficiently." + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "## Next Steps\n", + "\n", + "- See the [Model Specifications](../how_to_guides/model_specs.md) guide for details\n", + " on writing model specifications\n", + "- See the [Simulation](../how_to_guides/how_to_simulate_dataset.ipynb) guide for\n", + " generating synthetic data\n", + "- After estimation, use `get_filtered_states()` to extract latent factor estimates" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/source/how_to_guides/how_to_simulate_dataset.ipynb b/docs/how_to_guides/how_to_simulate_dataset.ipynb similarity index 100% rename from docs/source/how_to_guides/how_to_simulate_dataset.ipynb rename to docs/how_to_guides/how_to_simulate_dataset.ipynb diff --git a/docs/source/how_to_guides/how_to_visualize_correlations.ipynb b/docs/how_to_guides/how_to_visualize_correlations.ipynb similarity index 100% rename from docs/source/how_to_guides/how_to_visualize_correlations.ipynb rename to docs/how_to_guides/how_to_visualize_correlations.ipynb diff --git a/docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb similarity index 100% rename from docs/source/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb rename to docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb diff --git a/docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb similarity index 100% rename from docs/source/how_to_guides/how_to_visualize_transition_equations.ipynb rename to docs/how_to_guides/how_to_visualize_transition_equations.ipynb diff --git a/docs/how_to_guides/model_specs.md b/docs/how_to_guides/model_specs.md new file mode 100644 index 00000000..85dadc14 --- /dev/null +++ b/docs/how_to_guides/model_specs.md @@ -0,0 +1,172 @@ +# Model Specifications + +Models can be specified using Python dataclasses or dictionaries. The dataclass approach +is recommended for type safety and IDE support. + +## Using Dataclasses (Recommended) + +```python +from skillmodels import ( + AnchoringSpec, + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) + +# Define factors +fac1 = FactorSpec( + measurements=[ + ["y1", "y2", "y3"], # period 0 + ["y1", "y2", "y3"], # period 1 + # ... + ], + normalizations=Normalizations( + loadings=[{"y1": 1.0}, {}, {}], # fix loading of y1 to 1 in period 0 + intercepts=[{}, {}, {}], + ), + transition_equation="log_ces", +) + +# Create model +model = ModelSpec( + factors={"fac1": fac1, "fac2": fac2, "fac3": fac3}, + anchoring=AnchoringSpec( + outcomes={"fac1": "Q1"}, + free_loadings=True, + ), + controls=["x1", "x2"], + stagemap=[0, 0, 1, 1, 2, 2, 3], + estimation_options=EstimationOptionsSpec(), +) +``` + +## Using Dictionaries + +For backwards compatibility and interoperability with YAML/JSON files, models can also +be specified as dictionaries: + +```python +import yaml + +with open("model.yaml") as f: + model = yaml.safe_load(f) +``` + +The dictionary structure mirrors the dataclass structure: + +```yaml +factors: + fac1: + measurements: + - [y1, y2, y3] + - [y1, y2, y3] + normalizations: + loadings: + - {y1: 1.0} + - {} + intercepts: + - {} + - {} + transition_equation: log_ces + fac2: + measurements: + - [y4, y5, y6] + - [y4, y5, y6] + transition_equation: linear + fac3: + measurements: + - [y7, y8, y9] + - [] + transition_equation: constant + +anchoring: + outcomes: + fac1: Q1 + free_loadings: true + +controls: + - x1 + - x2 + +stagemap: [0, 0, 1, 1, 2, 2, 3] +``` + +## Factor Specification + +Each factor requires: + +- **measurements**: A nested list with measurement variable names for each period. Empty + lists indicate no measurements in that period. +- **transition_equation**: Name of a transition function (`linear`, `log_ces`, + `constant`, `translog`) or a custom function. +- **normalizations** (optional): Fixed values for loadings and intercepts to identify + the model. + +## Anchoring + +Anchoring links latent factors to observable outcomes. Options: + +- **outcomes**: Dictionary mapping factor names to anchoring outcome variables +- **free_controls**: Whether to estimate control coefficients in anchoring equations + (default: false) +- **free_constant**: Whether to estimate a constant in anchoring equations + (default: false) +- **free_loadings**: Whether to estimate loadings in anchoring equations + (default: false) +- **ignore_constant_when_anchoring**: Skip constant when anchoring (default: false) + +## Controls + +A list of variable names used as control variables in measurement equations. A constant +is always included automatically. + +## Stagemap + +Maps periods to development stages. Has one entry less than the number of periods. +Parameters are constrained to be equal within a stage. + +Example: `[0, 0, 1, 1]` means periods 0-1 share stage 0 parameters, and periods 2-3 +share stage 1 parameters. + +## Observed Factors + +Variables in the dataset that represent observed (not latent) factors. These don't need +transition equations or multiple measurements. + +```python +model = ModelSpec( + factors={...}, + observed_factors=["income", "treatment"], +) +``` + +## Estimation Options + +Fine-tune the estimation: + +- **sigma_points_scale**: Scaling for Julier sigma points (default: 2) +- **robust_bounds**: Make bounds stricter to avoid numerical issues (default: true) +- **bounds_distance**: How much stricter to make bounds (default: 0.001) +- **clipping_lower_bound**: Clip log-likelihood from below (default: -1e250) +- **clipping_upper_bound**: Clip log-likelihood from above (default: null) +- **clipping_lower_hardness**: Hardness of lower clipping (default: 1) +- **clipping_upper_hardness**: Hardness of upper clipping (default: 1) + +## Custom Transition Functions + +Define custom transition equations using the `@register_params` decorator: + +```python +from skillmodels.decorators import register_params + +@register_params(params=["lincoeff"]) +def my_linear(fac, params): + return params["lincoeff"] * fac +``` + +Custom functions must: +- Accept `params` as a required argument (dictionary with registered parameters) +- Accept factor values as floats or use `states` for a JAX array of all states +- Return a float +- Be JAX jit and vmap compatible diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..7ad49e93 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,52 @@ +# skillmodels + +Welcome to skillmodels, a Python implementation of estimators for nonlinear dynamic +latent factor models. The package implements the Kalman filter-based maximum likelihood +estimator proposed by Cunha, Heckman and Schennach +([Econometrica 2010](http://onlinelibrary.wiley.com/doi/10.3982/ECTA6551/abstract)). + +## Overview + +Skillmodels was developed for skill formation models but can be applied to any dynamic +nonlinear latent factor model. Key features: + +- **Kalman filter estimation**: Uses square-root implementations for numerical stability +- **Flexible model specification**: Define models using Python dataclasses or dictionaries +- **JAX-powered**: Automatic differentiation and JIT compilation for fast optimization +- **GPU support**: Optional CUDA acceleration + +## Public API + +The main package exports three functions: + +- `get_maximization_inputs()`: Prepare optimization problem for parameter estimation +- `get_filtered_states()`: Extract filtered latent factor estimates +- `simulate_dataset()`: Generate synthetic data from model specification + +And dataclasses for model specification: + +- `ModelSpec`: Main model specification container +- `FactorSpec`: Specification for individual factors +- `AnchoringSpec`: Anchoring settings +- `EstimationOptionsSpec`: Options for estimation +- `Normalizations`: Normalization settings for loadings and intercepts + +## Implementation Notes + +The CHS estimator implemented here differs from the original +[replication files](https://tinyurl.com/yyuq2sa4) in two ways: + +1. Uses different normalizations that account for the + [critique](https://tinyurl.com/y3wl43kz) of Wiswall and Agostinelli +2. Uses robust square-root implementations of the Kalman filters + +## Citation + +If you find skillmodels helpful for research, please cite it. See the +[GitHub repository](https://github.com/OpenSourceEconomics/skillmodels) for citation +information. + +## Feedback + +If you encounter any problems or have suggestions, please open an issue on +[GitHub](https://github.com/OpenSourceEconomics/skillmodels/issues). diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index efa432b9..00000000 --- a/docs/make.bat +++ /dev/null @@ -1,263 +0,0 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source -set I18NSPHINXOPTS=%SPHINXOPTS% source -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. xml to make Docutils-native XML files - echo. pseudoxml to make pseudoxml-XML files for display purposes - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - echo. coverage to run coverage check of the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - - -REM Check if sphinx-build is available and fallback to Python version if any -%SPHINXBUILD% 1>NUL 2>NUL -if errorlevel 9009 goto sphinx_python -goto sphinx_ok - -:sphinx_python - -set SPHINXBUILD=python -m sphinx.__init__ -%SPHINXBUILD% 2> nul -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -:sphinx_ok - - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\skillmodels.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\skillmodels.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdf" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf - cd %~dp0 - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdfja" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf-ja - cd %~dp0 - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -if "%1" == "coverage" ( - %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage - if errorlevel 1 exit /b 1 - echo. - echo.Testing of coverage in the sources finished, look at the ^ -results in %BUILDDIR%/coverage/python.txt. - goto end -) - -if "%1" == "xml" ( - %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The XML files are in %BUILDDIR%/xml. - goto end -) - -if "%1" == "pseudoxml" ( - %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. - goto end -) - -:end diff --git a/docs/myst.yml b/docs/myst.yml new file mode 100644 index 00000000..ba1be062 --- /dev/null +++ b/docs/myst.yml @@ -0,0 +1,51 @@ +--- +# Jupyter Book 2.0 configuration +# See: https://jupyterbook.org/ +version: 1 +project: + id: skillmodels + title: skillmodels + description: >- + Python implementation of estimators for nonlinear dynamic latent factor models, + primarily used for skill formation research in economics. + authors: + - name: Janos Gabler + email: janos.gabler@gmail.com + - name: Hans-Martin von Gaudecker + email: hmgaudecker@uni-bonn.de + keywords: + - skill formation + - latent factor models + - kalman filter + - economics + - econometrics + - python + github: https://github.com/OpenSourceEconomics/skillmodels + jupyter: true + toc: + - file: index.md + - title: Getting Started + children: + - file: getting_started/tutorial.ipynb + - title: How-to Guides + children: + - file: how_to_guides/model_specs.md + - file: how_to_guides/how_to_simulate_dataset.ipynb + - file: how_to_guides/how_to_visualize_transition_equations.ipynb + - file: how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb + - file: how_to_guides/how_to_visualize_correlations.ipynb + - title: Explanations + children: + - file: explanations/names_and_concepts.md + - file: explanations/notes_on_factor_scales.md + - title: Reference Guides + children: + - file: reference_guides/transition_functions.md + - file: reference_guides/endogeneity_corrections.md + error_rules: + - rule: link-resolves + severity: ignore +site: + template: book-theme + options: + logo_text: skillmodels diff --git a/docs/reference_guides/endogeneity_corrections.md b/docs/reference_guides/endogeneity_corrections.md new file mode 100644 index 00000000..11ad21c2 --- /dev/null +++ b/docs/reference_guides/endogeneity_corrections.md @@ -0,0 +1,50 @@ +# Endogeneity Corrections + +This page discusses endogeneity correction methods from the CHS paper and their +limitations. Note that skillmodels does not currently implement these methods—this is +background for users considering extensions. + +## CHS Methods + +CHS use two endogeneity correction methods, both requiring strong assumptions about +factor scales. + +### Time-Invariant Heterogeneity (Section 4.2.4) + +This method adds a time-invariant individual fixed effect. The assumption of time +invariance is only valid if factor scales remain constant throughout the model. + +**Requirements:** +- Age-invariant measurements for normalization in all periods for all factors +- Three adult outcomes +- Constant factor scales (highly unlikely with KLS transition functions) + +If your dataset meets these requirements, consider using the original CHS Fortran code. + +### Time-Varying Heterogeneity (Section 4.2.5) + +This method uses heterogeneity that follows an AR(1) process. It also relies on: +- Constant factor scales +- A time-invariant investment equation +- Exclusion restrictions (e.g., income affects investment but not skill transitions) + +To adapt this for models with changing factor scales, you would need: +- A linear transition function with period-specific parameters (instead of AR(1)) +- Period-specific investment functions + +Identification of such a model is an open question. + +## Wiswall-Agostinelli Approach + +Wiswall and Agostinelli propose a simpler endogeneity model (Section 6.1.2 of their +[paper](https://tinyurl.com/y5ezloh2)) that could work with both the CHS and WA +estimators. + +## Implementation Status + +None of these correction methods are currently implemented in skillmodels. Users +interested in endogeneity corrections should consider: + +1. The Wiswall-Agostinelli approach as a starting point +2. The original CHS Fortran code for their specific methods +3. Contributing an implementation to skillmodels diff --git a/docs/reference_guides/transition_functions.md b/docs/reference_guides/transition_functions.md new file mode 100644 index 00000000..63d3d775 --- /dev/null +++ b/docs/reference_guides/transition_functions.md @@ -0,0 +1,128 @@ +# Transition Functions + +Transition functions describe how latent factors evolve over time. skillmodels provides +several pre-built functions and supports custom functions. + +## Pre-built Transition Functions + +### linear + +Linear transition function with a constant term: + +$$ +f_{t+1} = \sum_j \beta_j \cdot s_j + c +$$ + +where $s_j$ are the state values and $c$ is a constant. + +**Parameters**: One coefficient per factor plus a constant. + +### translog + +Linear-in-parameters function with squares and interaction terms: + +$$ +f_{t+1} = \sum_j \beta_j s_j + \sum_j \gamma_j s_j^2 + \sum_{j < k} \delta_{jk} s_j s_k + c +$$ + +Despite the name (convention in skill formation literature), this is not a true +translog function. + +**Parameters**: Linear terms, squared terms, interaction terms, and constant. + +### robust_translog + +Same as `translog` but clips state values at ±10^12 before computation. Use this when +states might grow very large and cause numerical overflow. + +### linear_and_squares + +Like `translog` but without interaction terms: + +$$ +f_{t+1} = \sum_j \beta_j s_j + \sum_j \gamma_j s_j^2 + c +$$ + +### log_ces + +Log CES (Constant Elasticity of Substitution) in the Known Location and Scale version: + +$$ +f_{t+1} = \frac{1}{\phi} \ln\left(\sum_j \gamma_j e^{\phi \cdot s_j}\right) +$$ + +This is a KLS function—see [Notes on Factor Scales](../explanations/notes_on_factor_scales.md) +for implications. + +**Parameters**: One weight $\gamma_j$ per factor (constrained to sum to 1) plus $\phi$. + +### log_ces_general + +Generalized log CES without known location and scale: + +$$ +f_{t+1} = \text{tfp} \cdot \ln\left(\sum_j \gamma_j e^{\sigma_j \cdot s_j}\right) +$$ + +**Parameters**: Weights $\gamma_j$, factor-specific elasticities $\sigma_j$, and total +factor productivity. + +### constant + +The factor value does not change: + +$$ +f_{t+1} = f_t +$$ + +**Parameters**: None. + +## Custom Transition Functions + +Define custom functions using the `@register_params` decorator: + +```python +from skillmodels.decorators import register_params + +@register_params(params=["alpha", "beta"]) +def my_transition(fac1, fac2, params): + return params["alpha"] * fac1 + params["beta"] * fac2**2 +``` + +### Requirements + +Custom transition functions must: + +1. Accept `params` as a mandatory argument (dictionary with registered parameter names) +2. Accept factor values as floats or use `states` for a JAX array of all factors +3. Return a float (or scalar JAX array) +4. Be JAX jit and vmap compatible (no Python control flow on state values) + +### Using Custom Functions + +```python +from skillmodels import FactorSpec + +factor = FactorSpec( + measurements=[...], + transition_equation=my_transition, # Pass the function object +) +``` + +Or with a dictionary-based model: + +```python +model["factors"]["fac1"]["transition_function"] = my_transition +``` + +### Advanced: Accessing All States + +If your transition function needs access to all states at once: + +```python +@register_params(params=["weights"]) +def weighted_sum(states, params): + return jnp.dot(states, params["weights"]) +``` + +The `states` argument is a 1D JAX array with all factor values in order. diff --git a/docs/source/chs_test_params.csv b/docs/source/chs_test_params.csv deleted file mode 100644 index ea204de7..00000000 --- a/docs/source/chs_test_params.csv +++ /dev/null @@ -1,209 +0,0 @@ -category,period,name1,name2,lower,upper,chs_value,good_start_value,bad_start_value -control_coeffs,0,y1,constant,-inf,inf,1.00161847,1.0,0.0 -control_coeffs,0,y1,x1,-inf,inf,1.00545482,1.0,0.0 -control_coeffs,0,y2,constant,-inf,inf,1.03143922,1.0,0.0 -control_coeffs,0,y2,x1,-inf,inf,0.97599155,1.0,0.0 -control_coeffs,0,y3,constant,-inf,inf,0.99409082,1.0,0.0 -control_coeffs,0,y3,x1,-inf,inf,0.99413941,1.0,0.0 -control_coeffs,0,y4,constant,-inf,inf,1.01833579,1.0,0.0 -control_coeffs,0,y4,x1,-inf,inf,1.00601018,1.0,0.0 -control_coeffs,0,y5,constant,-inf,inf,1.02354619,1.0,0.0 -control_coeffs,0,y5,x1,-inf,inf,1.00099227,1.0,0.0 -control_coeffs,0,y6,constant,-inf,inf,1.04884144,1.0,0.0 -control_coeffs,0,y6,x1,-inf,inf,0.96857115,1.0,0.0 -control_coeffs,0,y7,constant,-inf,inf,1.04176152,1.0,0.0 -control_coeffs,0,y7,x1,-inf,inf,0.97482723,1.0,0.0 -control_coeffs,0,y8,constant,-inf,inf,1.00566791,1.0,0.0 -control_coeffs,0,y8,x1,-inf,inf,0.98188173,1.0,0.0 -control_coeffs,0,y9,constant,-inf,inf,1.04848607,1.0,0.0 -control_coeffs,0,y9,x1,-inf,inf,0.92514421,0.9,0.0 -control_coeffs,1,y1,constant,-inf,inf,0.9565773999999999,1.0,0.0 -control_coeffs,1,y1,x1,-inf,inf,1.0528786,1.1,0.0 -control_coeffs,1,y2,constant,-inf,inf,0.94514768,0.9,0.0 -control_coeffs,1,y2,x1,-inf,inf,1.10002752,1.1,0.0 -control_coeffs,1,y3,constant,-inf,inf,0.94921197,0.9,0.0 -control_coeffs,1,y3,x1,-inf,inf,1.11220924,1.1,0.0 -control_coeffs,1,y4,constant,-inf,inf,0.97927901,1.0,0.0 -control_coeffs,1,y4,x1,-inf,inf,1.05321768,1.1,0.0 -control_coeffs,1,y5,constant,-inf,inf,0.95632487,1.0,0.0 -control_coeffs,1,y5,x1,-inf,inf,1.07984824,1.1,0.0 -control_coeffs,1,y6,constant,-inf,inf,0.98720524,1.0,0.0 -control_coeffs,1,y6,x1,-inf,inf,1.04155956,1.0,0.0 -control_coeffs,2,y1,constant,-inf,inf,0.98278806,1.0,0.0 -control_coeffs,2,y1,x1,-inf,inf,1.01759386,1.0,0.0 -control_coeffs,2,y2,constant,-inf,inf,1.00015093,1.0,0.0 -control_coeffs,2,y2,x1,-inf,inf,0.99449381,1.0,0.0 -control_coeffs,2,y3,constant,-inf,inf,0.93918533,0.9,0.0 -control_coeffs,2,y3,x1,-inf,inf,1.04145063,1.0,0.0 -control_coeffs,2,y4,constant,-inf,inf,1.00539897,1.0,0.0 -control_coeffs,2,y4,x1,-inf,inf,0.98808056,1.0,0.0 -control_coeffs,2,y5,constant,-inf,inf,1.03995916,1.0,0.0 -control_coeffs,2,y5,x1,-inf,inf,0.93727871,0.9,0.0 -control_coeffs,2,y6,constant,-inf,inf,1.02370063,1.0,0.0 -control_coeffs,2,y6,x1,-inf,inf,0.97863335,1.0,0.0 -control_coeffs,3,y1,constant,-inf,inf,0.95263385,1.0,0.0 -control_coeffs,3,y1,x1,-inf,inf,1.07747808,1.1,0.0 -control_coeffs,3,y2,constant,-inf,inf,0.97511705,1.0,0.0 -control_coeffs,3,y2,x1,-inf,inf,1.01595775,1.0,0.0 -control_coeffs,3,y3,constant,-inf,inf,0.99671239,1.0,0.0 -control_coeffs,3,y3,x1,-inf,inf,1.00409134,1.0,0.0 -control_coeffs,3,y4,constant,-inf,inf,0.97463783,1.0,0.0 -control_coeffs,3,y4,x1,-inf,inf,1.00265983,1.0,0.0 -control_coeffs,3,y5,constant,-inf,inf,1.00354587,1.0,0.0 -control_coeffs,3,y5,x1,-inf,inf,0.98936892,1.0,0.0 -control_coeffs,3,y6,constant,-inf,inf,1.00220065,1.0,0.0 -control_coeffs,3,y6,x1,-inf,inf,1.01816115,1.0,0.0 -control_coeffs,4,y1,constant,-inf,inf,1.01871361,1.0,0.0 -control_coeffs,4,y1,x1,-inf,inf,0.97390947,1.0,0.0 -control_coeffs,4,y2,constant,-inf,inf,0.96884594,1.0,0.0 -control_coeffs,4,y2,x1,-inf,inf,1.01276643,1.0,0.0 -control_coeffs,4,y3,constant,-inf,inf,0.96348822,1.0,0.0 -control_coeffs,4,y3,x1,-inf,inf,1.0639416000000002,1.1,0.0 -control_coeffs,4,y4,constant,-inf,inf,0.97249741,1.0,0.0 -control_coeffs,4,y4,x1,-inf,inf,1.05030944,1.1,0.0 -control_coeffs,4,y5,constant,-inf,inf,1.01263275,1.0,0.0 -control_coeffs,4,y5,x1,-inf,inf,0.95867367,1.0,0.0 -control_coeffs,4,y6,constant,-inf,inf,0.97561054,1.0,0.0 -control_coeffs,4,y6,x1,-inf,inf,1.02067436,1.0,0.0 -control_coeffs,5,y1,constant,-inf,inf,1.02885338,1.0,0.0 -control_coeffs,5,y1,x1,-inf,inf,0.92274679,0.9,0.0 -control_coeffs,5,y2,constant,-inf,inf,0.99430379,1.0,0.0 -control_coeffs,5,y2,x1,-inf,inf,0.97278707,1.0,0.0 -control_coeffs,5,y3,constant,-inf,inf,0.97922354,1.0,0.0 -control_coeffs,5,y3,x1,-inf,inf,1.02364392,1.0,0.0 -control_coeffs,5,y4,constant,-inf,inf,1.01557986,1.0,0.0 -control_coeffs,5,y4,x1,-inf,inf,0.9576603000000001,1.0,0.0 -control_coeffs,5,y5,constant,-inf,inf,0.96898918,1.0,0.0 -control_coeffs,5,y5,x1,-inf,inf,1.04015694,1.0,0.0 -control_coeffs,5,y6,constant,-inf,inf,0.97658414,1.0,0.0 -control_coeffs,5,y6,x1,-inf,inf,1.00635915,1.0,0.0 -control_coeffs,6,y1,constant,-inf,inf,0.98368467,1.0,0.0 -control_coeffs,6,y1,x1,-inf,inf,0.99929141,1.0,0.0 -control_coeffs,6,y2,constant,-inf,inf,0.93183755,0.9,0.0 -control_coeffs,6,y2,x1,-inf,inf,1.04782772,1.0,0.0 -control_coeffs,6,y3,constant,-inf,inf,0.95146637,1.0,0.0 -control_coeffs,6,y3,x1,-inf,inf,1.00920751,1.0,0.0 -control_coeffs,6,y4,constant,-inf,inf,0.99101302,1.0,0.0 -control_coeffs,6,y4,x1,-inf,inf,1.05400193,1.1,0.0 -control_coeffs,6,y5,constant,-inf,inf,0.9781293,1.0,0.0 -control_coeffs,6,y5,x1,-inf,inf,1.06296891,1.1,0.0 -control_coeffs,6,y6,constant,-inf,inf,0.9949447,1.0,0.0 -control_coeffs,6,y6,x1,-inf,inf,1.01391456,1.0,0.0 -control_coeffs,7,y1,constant,-inf,inf,0.94997421,0.9,0.0 -control_coeffs,7,y1,x1,-inf,inf,1.03694603,1.0,0.0 -control_coeffs,7,y2,constant,-inf,inf,0.94683688,0.9,0.0 -control_coeffs,7,y2,x1,-inf,inf,1.05734328,1.1,0.0 -control_coeffs,7,y3,constant,-inf,inf,0.97187261,1.0,0.0 -control_coeffs,7,y3,x1,-inf,inf,1.04305552,1.0,0.0 -control_coeffs,7,y4,constant,-inf,inf,0.94043886,0.9,0.0 -control_coeffs,7,y4,x1,-inf,inf,1.06978168,1.1,0.0 -control_coeffs,7,y5,constant,-inf,inf,0.9762738999999999,1.0,0.0 -control_coeffs,7,y5,x1,-inf,inf,1.01505491,1.0,0.0 -control_coeffs,7,y6,constant,-inf,inf,0.9811178,1.0,0.0 -control_coeffs,7,y6,x1,-inf,inf,1.02936846,1.0,0.0 -control_coeffs,7,Q1,constant,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,7,Q1,x1,-inf,inf,1.03648928,1.0,0.0 -loading,0,y2,fac1,-inf,inf,1.24759799,1.2,1.0 -loading,0,y3,fac1,-inf,inf,1.55939677,1.6,1.0 -loading,0,y5,fac2,-inf,inf,0.9908065,1.0,1.0 -loading,0,y6,fac2,-inf,inf,0.68554932,0.7,1.0 -loading,0,y8,fac3,-inf,inf,1.1132403,1.1,1.0 -loading,0,y9,fac3,-inf,inf,0.70906931,0.7,1.0 -loading,1,y2,fac1,-inf,inf,1.21745308,1.2,1.0 -loading,1,y3,fac1,-inf,inf,1.34547457,1.3,1.0 -loading,1,y5,fac2,-inf,inf,0.82645675,0.8,1.0 -loading,1,y6,fac2,-inf,inf,0.60128174,0.6,1.0 -loading,2,y2,fac1,-inf,inf,1.18091396,1.2,1.0 -loading,2,y3,fac1,-inf,inf,1.41494654,1.4,1.0 -loading,2,y5,fac2,-inf,inf,0.78412623,0.8,1.0 -loading,2,y6,fac2,-inf,inf,0.6095229,0.6,1.0 -loading,3,y2,fac1,-inf,inf,1.28957256,1.3,1.0 -loading,3,y3,fac1,-inf,inf,1.42483912,1.4,1.0 -loading,3,y5,fac2,-inf,inf,0.75831051,0.8,1.0 -loading,3,y6,fac2,-inf,inf,0.60278636,0.6,1.0 -loading,4,y2,fac1,-inf,inf,1.24216652,1.2,1.0 -loading,4,y3,fac1,-inf,inf,1.4194579999999999,1.4,1.0 -loading,4,y5,fac2,-inf,inf,0.83681772,0.8,1.0 -loading,4,y6,fac2,-inf,inf,0.5249411999999999,0.5,1.0 -loading,5,y2,fac1,-inf,inf,1.20739978,1.2,1.0 -loading,5,y3,fac1,-inf,inf,1.43586124,1.4,1.0 -loading,5,y5,fac2,-inf,inf,0.76476822,0.8,1.0 -loading,5,y6,fac2,-inf,inf,0.60105183,0.6,1.0 -loading,6,y2,fac1,-inf,inf,1.22402329,1.2,1.0 -loading,6,y3,fac1,-inf,inf,1.42265974,1.4,1.0 -loading,6,y5,fac2,-inf,inf,0.68885845,0.7,1.0 -loading,6,y6,fac2,-inf,inf,0.61882325,0.6,1.0 -loading,7,y2,fac1,-inf,inf,1.23608389,1.2,1.0 -loading,7,y3,fac1,-inf,inf,1.47859872,1.5,1.0 -loading,7,y5,fac2,-inf,inf,0.81524559,0.8,1.0 -loading,7,y6,fac2,-inf,inf,0.57084593,0.6,1.0 -loading,7,Q1,fac1,-inf,inf,0.93520167,0.9,1.0 -meas_sd,0,y1,-,-inf,inf,0.50497719,0.5,1.0 -meas_sd,0,y2,-,-inf,inf,0.50088168,0.5,1.0 -meas_sd,0,y3,-,-inf,inf,0.48136282,0.5,1.0 -meas_sd,0,y4,-,-inf,inf,0.53215346,0.5,1.0 -meas_sd,0,y5,-,-inf,inf,0.47039143,0.5,1.0 -meas_sd,0,y6,-,-inf,inf,0.48344469,0.5,1.0 -meas_sd,0,y7,-,-inf,inf,0.47943359,0.5,1.0 -meas_sd,0,y8,-,-inf,inf,0.53421227,0.5,1.0 -meas_sd,0,y9,-,-inf,inf,0.50146093,0.5,1.0 -meas_sd,1,y1,-,-inf,inf,0.49105567,0.5,1.0 -meas_sd,1,y2,-,-inf,inf,0.49870431,0.5,1.0 -meas_sd,1,y3,-,-inf,inf,0.50514084,0.5,1.0 -meas_sd,1,y4,-,-inf,inf,0.49743526,0.5,1.0 -meas_sd,1,y5,-,-inf,inf,0.49941779999999997,0.5,1.0 -meas_sd,1,y6,-,-inf,inf,0.50424182,0.5,1.0 -meas_sd,2,y1,-,-inf,inf,0.50427244,0.5,1.0 -meas_sd,2,y2,-,-inf,inf,0.51856939,0.5,1.0 -meas_sd,2,y3,-,-inf,inf,0.50392617,0.5,1.0 -meas_sd,2,y4,-,-inf,inf,0.49161026,0.5,1.0 -meas_sd,2,y5,-,-inf,inf,0.50441808,0.5,1.0 -meas_sd,2,y6,-,-inf,inf,0.48482939,0.5,1.0 -meas_sd,3,y1,-,-inf,inf,0.49476345,0.5,1.0 -meas_sd,3,y2,-,-inf,inf,0.49363682,0.5,1.0 -meas_sd,3,y3,-,-inf,inf,0.49918763,0.5,1.0 -meas_sd,3,y4,-,-inf,inf,0.49728617,0.5,1.0 -meas_sd,3,y5,-,-inf,inf,0.5116465,0.5,1.0 -meas_sd,3,y6,-,-inf,inf,0.48035036,0.5,1.0 -meas_sd,4,y1,-,-inf,inf,0.50529312,0.5,1.0 -meas_sd,4,y2,-,-inf,inf,0.50706948,0.5,1.0 -meas_sd,4,y3,-,-inf,inf,0.47849704,0.5,1.0 -meas_sd,4,y4,-,-inf,inf,0.49962829,0.5,1.0 -meas_sd,4,y5,-,-inf,inf,0.49001347,0.5,1.0 -meas_sd,4,y6,-,-inf,inf,0.48723789,0.5,1.0 -meas_sd,5,y1,-,-inf,inf,0.51551926,0.5,1.0 -meas_sd,5,y2,-,-inf,inf,0.52331776,0.5,1.0 -meas_sd,5,y3,-,-inf,inf,0.48326815,0.5,1.0 -meas_sd,5,y4,-,-inf,inf,0.47910757,0.5,1.0 -meas_sd,5,y5,-,-inf,inf,0.50327233,0.5,1.0 -meas_sd,5,y6,-,-inf,inf,0.49705186,0.5,1.0 -meas_sd,6,y1,-,-inf,inf,0.52223919,0.5,1.0 -meas_sd,6,y2,-,-inf,inf,0.48641122,0.5,1.0 -meas_sd,6,y3,-,-inf,inf,0.47597189,0.5,1.0 -meas_sd,6,y4,-,-inf,inf,0.52595048,0.5,1.0 -meas_sd,6,y5,-,-inf,inf,0.51187305,0.5,1.0 -meas_sd,6,y6,-,-inf,inf,0.52425668,0.5,1.0 -meas_sd,7,y1,-,-inf,inf,0.52163477,0.5,1.0 -meas_sd,7,y2,-,-inf,inf,0.52112353,0.5,1.0 -meas_sd,7,y3,-,-inf,inf,0.47545353,0.5,1.0 -meas_sd,7,y4,-,-inf,inf,0.5150197,0.5,1.0 -meas_sd,7,y5,-,-inf,inf,0.48993218,0.5,1.0 -meas_sd,7,y6,-,-inf,inf,0.52777721,0.5,1.0 -meas_sd,7,Q1,-,-inf,inf,1.03957418,1.0,1.0 -shock_sd,0,fac1,-,-inf,inf,0.321936173798472,0.31622776601683794,1.0 -shock_sd,0,fac2,-,-inf,inf,0.3131064355774247,0.31622776601683794,1.0 -initial_mean,0,mixture_0,fac1,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac2,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac3,-inf,inf,0.0,0.0,0.0 -initial_cov,0,mixture_0,fac1-fac1,-inf,inf,0.17647290000000002,0.2,1.0 -initial_cov,0,mixture_0,fac2-fac1,-inf,inf,0.00524114,0.0,0.0 -initial_cov,0,mixture_0,fac2-fac2,-inf,inf,0.18362641,0.2,1.0 -initial_cov,0,mixture_0,fac3-fac1,-inf,inf,0.005665399999999999,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac2,-inf,inf,-0.00067522,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac3,-inf,inf,0.23194739,0.2,1.0 -trans,0,fac1,fac1,-inf,inf,0.65978837,0.7,0.4 -trans,0,fac1,fac2,-inf,inf,0.174038,0.2,0.3 -trans,0,fac1,fac3,-inf,inf,0.16617363,0.1,0.3 -trans,0,fac1,phi,-inf,inf,-0.40701787,-0.4,-0.5 -trans,0,fac2,fac2,-inf,inf,0.60887112,0.6,0.5 -trans,0,fac2,constant,-inf,inf,0.0,0.0,0.0 diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 09c9be1f..00000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,155 +0,0 @@ -"""Sphinx configuration file for skillmodels documentation.""" - -# -# Documentation build configuration file, created by sphinx-quickstart -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. -import sys -from pathlib import Path - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use Path.resolve() to make it absolute, like shown here. -sys.path.insert(0, str(Path("../..").resolve())) - - -# -- General configuration ---------------------------------------------------- - -# If your documentation needs a minimal Sphinx, state it here. -needs_sphinx = "1.6" - -# Add any Sphinx extension module names here, as strings. -# They can be extensions coming with Sphinx (named "sphinx.ext.*") -# or your custom ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.viewcode", - "sphinx.ext.mathjax", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "nbsphinx", -] - -# Mock imports. -autodoc_mock_imports = [ - "optimagic", - "matplotlib", - "jax", - "numpy", - "pandas", - "scipy", - "filterpy", - "dags", - "plotly", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix of source filenames. -source_suffix = ".rst" - -# The encoding of source files. -source_encoding = "utf-8" - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "skillmodels" -copyright = "2016-, Janos Gabler" # noqa: A001 - -# The version info for the project you"re documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "0.2" -# The full version, including alpha/beta/rc tags. -release = "0.2.2" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = "" -# Else, today_fmt is used as the format for a strftime call. -today_fmt = "%d %B %Y" - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, "()" will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ["src."] - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "pydata_sphinx_theme" - -html_logo = "_static/images/logo.svg" - -html_theme_options = { - "github_url": "https://github.com/OpenSourceEconomics/skillmodels" -} - -html_css_files = ["css/custom.css"] - -html_sidebars = { - "**": [ - "relations.html", # needs 'show_related': True theme option to display - "searchbox.html", - ], -} - -templates_path = ["_templates"] -html_static_path = ["_static"] - - -html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = "" - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -html_file_suffix = ".html" - -# Output file base name for HTML help builder. -htmlhelp_basename = "somedoc" - -# Other settings - -autodoc_member_order = "bysource" -napoleon_use_rtype = False -napoleon_include_private_with_doc = False -todo_include_todos = True diff --git a/docs/source/explanations/index.rst b/docs/source/explanations/index.rst deleted file mode 100644 index 4102eda2..00000000 --- a/docs/source/explanations/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Explanations -============ - - -.. toctree:: - :maxdepth: 1 - - names_and_concepts - notes_on_factor_scales diff --git a/docs/source/explanations/names_and_concepts.rst b/docs/source/explanations/names_and_concepts.rst deleted file mode 100644 index 0e55e298..00000000 --- a/docs/source/explanations/names_and_concepts.rst +++ /dev/null @@ -1,95 +0,0 @@ -.. _names_and_concepts: - - -================== -Names and concepts -================== - -This section contains an overview of frequently used variable names and -concepts. It's not necessary to read this section if you are only interested in -using the code, but you might want to skim it if you are interested in what the -code actually does or plan to adapt it to your use case. - -Most of those quantities are generated once during the :ref:`model_processing` -and appear as arguments of many other functions. - -.. _dimensions: - -``dimensions`` -============== - -Dimensions of the model quantities. All of them are integers. - -- n_states: Number of latent factors or states in the model. Note that the terms - state and factor are used interchangeably throughout the documentation. -- n_periods: Number of periods of the model. There is one more period than - transition equations of the model. -- n_mixtures: Number of elements in the finite mixture of normals distribution. -- n_controls: Number of control variables in the measurement equations. This - includes the intercept of the measurement equation. Thus n_controls is always - 1 or larger. - - -.. _labels: - -``labels`` -========== - -Labels for the model quantities. All of them are lists. - - -- factors: Names of the latent factors. -- controls: Names of the control variables. The first entry is always "constant". -- periods: List of integers, starting at zero. The indices of the periods. -- stagemap: Maps periods to stages. Has one entry less than the number of periods. -- stages: The indices of the stages of the model. - - -.. _stages_vs_periods: - - -Development-Stages vs Periods -============================= - -A development is a group of consecutive periods for which the technology of skill -formation remains the same. Thus the number of stages is always <= the number of -periods of a model. - -Thus development stages are just equality constraints on the estimated parameter -vector. Because they are very frequently used, skillmodels can generate the -constraints automatically if you specify a stagemap in your model dictionary. - - -Example: If you have a model with 5 periods you can estimate at most 4 different -production functions (one for each transition between periods). If you want to -keep the parameters of the technology of skill formation constant between two -consecutive periods, you would specify the following stagemap: ``[0, 0, 1, 1]`` - - -.. _anchoring: - -``anchoring`` -============= - - - - -.. _update_info: - - -``update_info`` -=============== - - - -.. _normalizations: - -``normalizations`` -================== - - -.. _estimation_options: - - -``estimation_options`` -====================== diff --git a/docs/source/explanations/notes_on_factor_scales.rst b/docs/source/explanations/notes_on_factor_scales.rst deleted file mode 100644 index 3dd39ead..00000000 --- a/docs/source/explanations/notes_on_factor_scales.rst +++ /dev/null @@ -1,158 +0,0 @@ -********************************** -Notes on Scales and Normalizations -********************************** - -Here I collect Notes on different aspects of the discussion about factor -scales and re-normalization. This discussion originates in the `critique`_ by -Wiswall and Agostinelli but I argue below, that this critique is not yet -complete. - -Wiswall and Agostinelli define a class of transition functions with Known -Location and Scale (KLS) that require less normalizations. You should read -this definition in their paper. - -The critique by Wiswall and Agostinelli potentially invalidates the empirical -estimates of CHS, but not their general estimation routine. To get estimates -that don't suffer from renormalization you can either use less normalizations -or non-KLS transition functions. As there is no natural scale of skills, none -of the approaches is better or worse. Nevertheless, I prefer using flexible -Non-KLS transition functions with one normalization per period and factor. -Firstly, because they are more compatible with using development stages that -span more than one period. Secondly, because picking suitable normalizations -might help to give the latent factors a more meaningful scale. - - -.. _KLS_not_constant: - -Why KLS functions don't keep the scales constant -************************************************ - -Skills have no natural scale, but after reading the critique paper by Wiswall -and Agostinelli one could easily get the impression that using KLS transition -functions and less normalizations is better, because it identifies some sort -of natural scale. Moreover in their `estimation`_ paper (p. 7), they write: -"We argue that our limited normalization is appropriate for the dynamic -setting of child development we analyze. With our normalization for the -initial period only, latent skills in all periods share a common location -and scale with respect to the one chosen normalizing measure." - -The following example intuitively shows firstly that the scale identified with -KLS functions is as arbitrary as a scale identified through normalizations and -secondly that this scale is not constant over time in general. - -The example completely abstracts from measurement and estimation problems and -thereby allows to focus essential on the aspects of the problem. - -Consider a simple model of financial investments with two latent factors: a -stock variable wealth (w) and a flow variable investment (i). Suppose periods -last one year and annual interest rate on wealth is 10 percent. New -investments are deposited at the end of the year (get interests only in the -next year). - -The most intuitive scales to describe the system would be to measure all -latent factors in all periods in the same currency, say Dollars. In this case -the transition equation of wealth is given by: - -.. math:: - - w_{t + 1} = 1.1 w_t + i_t - -However, it would also be possible to measure w in period t in Dollars, i in -period t in 1000 Dollars and w in period t + 1 in Dollar cents. The transition -equation -- that still describes the exactly same system -- is then: - -.. math:: - - w_{t + 1} = 110 w_t + 100000 i_t - -The parameters now reflect the actual technology and scale changes between -periods. They are much harder to interpret than before. In fact any linear -function - -.. math:: - - f: \mathbb{R}^2 \rightarrow \mathbb{R} - -could describe the example system -- just in different combinations of scales. - -When latent factor models are estimated, the scales of each factor are usually -set through normalizations in each period. The main point of the first paper -by Wiswall and Agostinelli is that a KLS transition function prohibits to make -such normalizations except for the initial period. One could say that after -that, the transition function chooses the scale. - -The CES function has KLS and contains the subset of all linear functions -without intercept whose parameters sum to 1 as special cases. It can therefore -be used to describe the example system. After setting the scale of both -factors to Dollars in the initial period, the CES function would then choose -the scales for all other periods. - -The linear function that is a CES function and describes the system is: - -.. math:: - w_{t + 1} = \frac{1}{2.1} (1.1 w_t + i_t) \approx 0.524 w_t + 0.476 i_t - -The scale of w in period t + 1 chosen by this function is thus 1 / 2.1 or -approximately 0.476 Dollars which means that wealth in period t + 1 is -approximately measured in 100 Philippine Pesos. - - -.. _log_ces_problem: - -Why the CES and log_CES functions are problematic -************************************************* - -The definition of Known Location and Scale refers only to the scale of the -(always one-dimensional) output of a transition function. After reading the -Wiswall and Agostinelli critique, I wondered if the CES and log_CES functions -also pose restrictions on the scales of their inputs, i.e. can describe a system -only at a certain location or scale of inputs. - -According to Wiswall and Agostinelli, when using a log_CES function (which -belongs to the KLS class), one needs initial normalizations of location and -scale for all factors in the model. I made some pen-and-paper-calculations and -estimated models with simulated data and the results suggest that less -normalizations are needed with the log_CES function. - -While one does need to make initial normalizations for the location of all -factors, it is sufficient to normalize the scale of only one factor in the -initial period and the model is still identified. However, these are only -simulations and I do not have a formal result that shows that the restrictions -the log_CES function poses on the scale of its inputs are always enough for -identification. - -I would therefore currently advise not to use the CES or log_CES function -without thinking deeply about the normalizations you need. The automatic -generation of normalizations treats the log_ces function simply as a KLS -function. - - -.. _normalization_and_stages: - -Normalizations and Development stages -************************************* - -CHS use development stages, i.e. several periods of childhood in which the -parameters of the technology of skill formation remain the same. Wiswall and -Agostinelli do not use or analyze this case, but development stages do change -the normalization requirements. - -I always had the intuition that with development stages it is possible to -identify a scale from the first period of the stage, such that no later -normalizations are necessary until the next stage. When extending the WA -estimator to be compatible with development stages, I could confirm this -intuition as one nice feature of this estimator is that its identification -strategy has to be very explicit. - -If development stages are used, one only has to make normalizations in the first -period of each stage, except for the initial stage where the first two periods -have to be normalized. My recommendation is to use automatic normalizations if -you use development stages because it is very easy to get confused. - -This shows another type of over-normalization in the original CHS paper. - -.. _critique: - https://tinyurl.com/y3wl43kz - -.. _estimation: - https://tinyurl.com/y5ezloh2 diff --git a/docs/source/getting_started/index.rst b/docs/source/getting_started/index.rst deleted file mode 100644 index a70e7102..00000000 --- a/docs/source/getting_started/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Getting Started -=============== - -.. toctree:: - :maxdepth: 1 - - tutorial.ipynb diff --git a/docs/source/getting_started/tutorial.ipynb b/docs/source/getting_started/tutorial.ipynb deleted file mode 100644 index 345feb0f..00000000 --- a/docs/source/getting_started/tutorial.ipynb +++ /dev/null @@ -1,381 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Skillmodels Quickstart" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from time import time\n", - "\n", - "import optimagic as om\n", - "import pandas as pd\n", - "import yaml\n", - "\n", - "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", - "from skillmodels.maximization_inputs import get_maximization_inputs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Loading Model Specification and Data\n", - "\n", - "Model specifications are python dictionaries that can be safed in yaml or json files. For a moment, just assume you know how to write a model specification and have a skillmodels compatible dataset. Both are \n", - "explained in different tutorials.\n", - "\n", - "Next we load the model specification and the dataset. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = yaml.load(y, Loader=yaml.SafeLoader)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", - "data = data.set_index([\"caseid\", \"period\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Getting the inputs for ``optimagic.maximize``\n", - "\n", - "Skillmodels basically just has one public function called ``get_maximization_inputs``. When called with a model specification and a dataset it contains a dictionary with everything you need to maximize the likelihood function using optimagic. \n", - "\n", - "By everything you need I mean everything model-specific. You should still use the optional arguments of ``maximize`` to tune the optimization." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "max_inputs = get_maximization_inputs(model, data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Filling the Params Template\n", - "\n", - "Often you can greatly reduce estimation time by choosing good start parameters. What are good start parameters depends strongly on the model specifications, the scaling of your variables and the normalizations you make. \n", - "\n", - "If you have strong difficulties to pick good start values, you probably want to think again about the interpretability of your model parameters and possibly change the normalizations and scaling of your \n", - "measurements. \n", - "\n", - "As a rule of thumb: If all measurements are standardized and, all fixed loadings are 1 and all fixed intercepts are 0 then one is a good start value for all free loadings and 0 is a good start value for all free intercepts. \n", - "\n", - "Measurement and shock standard deviations are better started slightly larger than you would expect them. \n", - "\n", - "Below I just load start parameters for the CHS example model that I filled out manually. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params_template = max_inputs[\"params_template\"]\n", - "params_template.head()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "index_cols = [\"category\", \"period\", \"name1\", \"name2\"]\n", - "chs_path = REGRESSION_VAULT / \"chs_results.csv\"\n", - "chs_values = pd.read_csv(chs_path)\n", - "chs_values = chs_values.set_index(index_cols)\n", - "chs_values = chs_values[[\"chs_value\", \"good_start_value\", \"bad_start_value\"]]\n", - "chs_values.head()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = params_template.copy()\n", - "params[\"value\"] = chs_values[\"chs_value\"]\n", - "params.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Time compilation speed\n", - "\n", - "Skillmodels uses jax to just-in-time compile the numerical code and get a gradient of the likelihood function by automatic differentiation. \n", - "\n", - "There are several versions of the log likelihood function and its gradient:\n", - "\n", - "- **debug_loglike**: Is not compiled, can be debugged with a debugger, returns a lot of intermediate outputs and is slow. \n", - "- **loglike**: Is compiled and fast but does not return intermediate outputs\n", - "- **gradient**: Is compiled and fast, returns the gradient of loglike\n", - "- **loglike_and_gradient**: Is compiled and fast and exploits synergies between loglike and gradient calculation. This is the most important one for estimation. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "debug_loglike = max_inputs[\"debug_loglike\"]\n", - "loglike = max_inputs[\"loglike\"]\n", - "gradient = max_inputs[\"gradient\"]\n", - "loglike_and_gradient = max_inputs[\"loglike_and_gradient\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "debug_loglike_value = debug_loglike(params)\n", - "print(time() - start)\n", - "debug_loglike_value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "loglike_value = loglike(params)\n", - "print(time() - start)\n", - "loglike_value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit loglike(params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "gradient_value = gradient(params)\n", - "print(time() - start)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit gradient(params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = time()\n", - "loglike_and_gradient_value = loglike_and_gradient(params)\n", - "print(time() - start)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit loglike_and_gradient(params)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## A few additional constraints\n", - "\n", - "To get the same values as CHS we will have to do a little more work. The reason is that on top of the many constraints skillmodels generates atuomatically from the model specification, CHS impose two more constraints:\n", - "\n", - "1. All but the self productivity paramet in the linear transition equaltion are fixed to zero\n", - "2. The initial mean of the states is not estimated but assumed to be zero.\n", - "3. The anchoring parameters (intercepts, control variables, loadings and SDs of measurement error are pairwise equal across periods).\n", - "\n", - "Fortunately, optimagic makes it easy to express such constraints:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "constraints = max_inputs[\"constraints\"]\n", - "\n", - "additional_constraints = [\n", - " {\n", - " \"query\": \"category == 'transition' & name1 == 'fac2' & name2 != 'fac2'\",\n", - " \"type\": \"fixed\",\n", - " \"value\": 0,\n", - " },\n", - " {\"loc\": \"initial_states\", \"type\": \"fixed\", \"value\": 0},\n", - " {\n", - " \"queries\": [f\"period == {i} & name1 == 'Q1_fac1'\" for i in range(8)],\n", - " \"type\": \"pairwise_equality\",\n", - " },\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "constraints = constraints + additional_constraints" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Generating a group column for better dashboard output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params[\"group\"] = params.index.get_level_values(\"category\")\n", - "params.loc[\"controls\", \"group\"] = params.loc[\"controls\"].index.get_level_values(\"name2\")\n", - "\n", - "params[\"group\"] = (\n", - " params[\"group\"].astype(str)\n", - " + \"_\"\n", - " + params.index.get_level_values(\"period\").astype(str)\n", - ")\n", - "params[\"group\"] = params[\"group\"].str.replace(\"_\", \"-\")\n", - "params" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Estimating the model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params[\"value\"] = chs_values[\"good_start_value\"]\n", - "loc = params.query(\"category == 'shock_sds' & name1 == 'fac3'\").index\n", - "params.loc[loc, \"lower_bound\"] = 0.00\n", - "loglike(params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res = om.maximize(\n", - " criterion=loglike,\n", - " params=params,\n", - " algorithm=\"scipy_lbfgsb\",\n", - " fun_and_jac=loglike_and_gradient,\n", - " constraints=constraints,\n", - " logging=False,\n", - " algo_options={\"convergence.relative_criterion_tolerance\": 1e-9},\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res[\"message\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res[\"success\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/how_to_guides/index.rst b/docs/source/how_to_guides/index.rst deleted file mode 100644 index 94ecff7b..00000000 --- a/docs/source/how_to_guides/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -How-To Guides -============= - - -.. toctree:: - :maxdepth: 1 - - model_specs - utilities - how_to_visualize_transition_equations.ipynb - how_to_simulate_dataset.ipynb - how_to_visualize_pairwise_factor_distribution.ipynb - how_to_visualize_correlations.ipynb diff --git a/docs/source/how_to_guides/model_specs.rst b/docs/source/how_to_guides/model_specs.rst deleted file mode 100644 index ecd7ff05..00000000 --- a/docs/source/how_to_guides/model_specs.rst +++ /dev/null @@ -1,236 +0,0 @@ - -.. _model_specs: - -******************** -Model specifications -******************** - -Models are specified as -`Python dictionaries `_. -To improve reuse of the model specifications these dictionaries can be stored in json -or yaml files. - -Example 2 from the CHS replication files -**************************************** - -Below the model-specification is illustrated using Example 2 from the CHS -`replication files`_. If you want you can read it in section 4.1 of their -readme file but I briefly reproduce it here for convenience. - -There are three latent factors fac1, fac2 and fac3 and 8 periods that all belong to the -same development stage. fac1 evolves according to a log_ces production function and -depends on its past values as well as the past values of all other factors. -Moreover, it is linearly anchored with anchoring outcome Q1. This results in the -following transition equation: - -.. math:: - - fac1_{t + 1} = \frac{1}{\phi \lambda_1} ln\big(\gamma_{1,t}e^{\phi - \lambda_1 fac1_t} + \gamma_{2,t}e^{\phi \lambda_2 fac2_t} + - \gamma_{3,t}e^{\phi \lambda_3 fac3_t}\big) + \eta_{1, t} - -where the lambdas are anchoring parameters from a linear anchoring equation. -fac1 is measured by measurements y1, y2 and y3 in all periods. To sum up: -fac1 has the same properties as cognitive and non-cognitive skills in the CHS -paper. - -The evolution of fac2 is described by a linear function and fac2 only depends -on its own past values, not on other factors, i.e. has the following -transition equation: - -.. math:: - - fac2_{t + 1} = lincoeff \cdot fac2_t + \eta_{2, t} - -It is measured by y4, y5 and y6 in all periods. Thus fac2 has the same -properties as parental investments in the CHS paper. - -fac3 is constant over time. It is measured by y7, y8 and y9 in the first -period and has no measurements in other periods. This makes it similar to -parental skills in the CHS paper. - -In all periods and for all measurement equations the control variables x1 and -x2 are used, where x2 is a constant. - -What has to be specified? -************************* - -Before thinking about how to translate the above example into a model -specification it is helpful to recall what information is needed to define a -general latent factor model: - - #. What are the latent factors of the model and how are they related over time? - (transition equations) - #. What are the measurement variables of each factor in each period and how are - measurements and factors related? (measurement equations) - #. What are the normalizations of scale (normalized factor loadings) - and location (normalized intercepts or means)? - #. What are the control variables in each period? - #. If development stages are used: Which periods belong to which stage? - #. If anchoring is used: Which factors are anchored and what is the anchoring - outcome? - #. Are there any observed factors? - -Translating the model to a dictionary -************************************* - -before explaining how the model dictionary is written, here a full specification of the -example model as yaml file: - - -.. literalinclude:: ../../../skillmodels/tests/model2.yaml - :language: yaml - :linenos: - -The model specification is a nested dictionary. The outer keys (which I call sections) -are ``"factors"``, ``"anchoring"``, ``"controls"``, ``"stagemap"`` and -``"estimation_options"``. All but the first are optional, but typically you will use at -least some of them. - - -``factors`` ------------ - -The factors are described as a dictionary. The keys are the names of the factors. -Any python string is possible as factor name. The values are dictionaries with three -entries: - -- measurements: A nested list that is as long as the number of periods of the model. - Each sublist contains the names of the measurements in that period. If a factor has - no measurements in a period, it has to be an empty list. - -- normalizations: This entry is optional. It is a dictionary that can have the keys - ``"loadings"`` and ``"intercepts"``. The values are lists of dictionaries. The list - needs to contain one dictionary per period of the model. The keys of the dictionaries - are names of measurements. The values are the value they are normalized to. Note that - loadings cannot be normalized to zero. - -- transition_equation: A string with the name of a pre-implemented transition equation - or a custom transition equation. Pre-implemented transition equations are - linear, log_ces (in the known location and scale version), constant and translog. - The example model dictionary only uses pre-implemented transition functions. - - To see how to use custom transition functions, assume that the yaml file shown above - has been loaded into a python dictionary called ``model`` and look at the following - code: - - .. code-block:: - - from skillmodels.decorators import register_params - - @register_params(params=[]) - def constant(fac3, params): - return fac3 - - @register_params(params=["fac1", "fac2", "fac3", "constant"]) - def linear(fac1, fac2, fac3, params): - p = params - out = p["constant"] + fac1 * p["fac1"] + fac2 * p["fac2"] + fac3 * p["fac3"] - return out - - model["factors"]["fac2"]["transition_function"] = linear - model["factors"]["fac3"]["transition_function"] = constant - - The modified model_dict describes the exact same model but this time it is expressed - in terms of custom transition functions. - - The ``@register_params`` decorator is necessary to tell skillmodels which parameters - are required for the transition function. Custom transition functions can take the - following arguments: - - - **params** (mandatory): A dictionary with the parameters described in the decorator. - - The observed and unobserved factors as floats - - **states**: A 1d jax array with states in the factor order specified in the - model dictionary. - - The order of arguments is irrelevant. All functions need to return a float. - The functions need to be jax jit and vmap compatible. We vmap over all arguments - except for params. - - - - -``"anchoring"`` ---------------- - -The specification for anchoring is a dictionary. It has the following entries: - -- ``"outcomes"``: a dictionary that maps names of factors to variables that are used - as anchoring outcome. Factors that are not anchored can simply be left out. -- ``"free_controls"``: Whether the control variables used in the measurement equations - should also be used in the anchoring equations. Default False. This is mainly there - to support the CHS example model and will probably not be set to True in any real - application. -- ``"free_constant"``: Whether the anchoring equation should have a constant. Default - False. This should be set to True if there are normalizations of location (i.e. - normalized intercepts) in the measurement equations. -- ``"free_loadings"``: If true, the loadings are estimated, otherwise they are fixed to - one. Default False. This should be set to True if there are normalizations of scale - (i.e. normalized loadings) in the measurement equations. -- ``"ignore_constant_when_anchoring"``: If true, no constant is used when anchoring the - latent factors, even if one was estimated. Default False. This is mainly there - to support the CHS example model and will probably not be set to True in any real - application. - - - -``"controls"`` --------------- - -A list of variables that are used as controls in the measurement equations. You do not -have to specify as constant as control variable, because it is always included. If you -want to get rid of controls in some periods, you have to normalize their coefficients -to zero. - -``"stagemap"`` --------------- - - -A list that has one entry less than the number of periods of the model. It maps periods -to development stages. See :ref:`stages_vs_periods` for the meaning of development -stages. - - -``"observed_factors"`` ----------------------- - -A list with variable names. Those variable names must be present in the dataset and -contain information about observed factors. An example of an observed factor could -be income, a treatment assignment or age. - - -Observed factors do not have transition equations, do not require multiple measurements -per period and are not part of the covariance matrix of the latent factors. As such, -adding an observed factor is computationally much less demanding than adding an -unobserved factor. - - -``"estimation_options"`` ------------------------- - -Another dictionary. It has the following entries. - -- ``"sigma_points_scale"``: The scaling factor of Julier sigma points. Default 2 which - was shown to work well for the example models by Cunha, Heckman and Schennach. -- ``"robust_bounds"``: Bool. If true, bound constraints are made stricter. This avoids - exploding likelihoods when the standard deviation of the measurement error is zero. - Default True. -- ``"bounds_distance"``: By how much the bounds are made stricter. Only relevant when - robust bounds are used. Default ``0.001``. -- ``"clipping_lower_bound": Strongly negative value at which the log likelihood is - clipped a log likelihood of -infinity. The clipping is done using a soft maximum - to avoid non-differentiable points in the likelihood. Default ``-1e-250``. Set to - ``None`` to disable this completely. -- ``"clipping_upper_bound". Same as ``"clipping_lower_bound"`` but from above. Default - None because typically the better way of avoiding upwards exploding likelihoods is to - set bounds strictly above zero for the measurement error standard deviations. -- ``"clipping_lower_hardness"`` and ``"clipping_upper_hardness"``. How closely the soft - maximum or minimum we use for clipping approximates its hard counterpart. Default 1 - which is an extremely close approximation of the hard maximum or minimum. If you want - to make the likelihood function smoother you should set it to a much lower value. - - - - -.. _replication files: - https://tinyurl.com/yyuq2sa4 diff --git a/docs/source/how_to_guides/utilities.rst b/docs/source/how_to_guides/utilities.rst deleted file mode 100644 index 7b372f30..00000000 --- a/docs/source/how_to_guides/utilities.rst +++ /dev/null @@ -1,14 +0,0 @@ -How to modify model specifications -================================== - - -``skillmodels.utilities`` contains functions to construct a model dictionary by varying -an existing one and to update the parameters of a larger model from estimated parameters -from smaller models. - -All functions that modify model dictionaries can can also modify a params DataFrame -that was constructed for the original model accordingly. - - -.. automodule:: skillmodels.utilities - :members: diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 37219c78..00000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,142 +0,0 @@ -Welcome to the documentation of skillmodels! -============================================ - - - -Structure of the Documentation -============================== - - -.. raw:: html - - - - -Welcome to skillmodels, a Python implementation of estimators for skill -formation models. The econometrics of skill formation models is a very active -field and several estimators were proposed. None of them is implemented in -standard econometrics packages. - - -Skillmodels implements the Kalman filter based maximum likelihood estimator -proposed by Cunha, Heckman and Schennach (CHS), (`Econometrica 2010`_) - - -Skillmodels was developed for skill formation models but is by no means -limited to this particular application. It can be applied to any dynamic -nonlinear latent factor model. - -The CHS estimator implemented here differs in two points from the one -implemented in their `replication files`_: 1) It uses different normalizations -that take into account the `critique`_ of Wiswall and Agostinelli. 2) It can -optionally use more robust square-root implementations of the Kalman filters. - - -Most of the code is unit tested. Furthermore, the results have been compared -to the Fortran code by CHS for two basic models with hypothetical data from -their `replication files`_. - - -**Citation** - -It took countless hours to write skillmodels. I make it available under a very -permissive license in the hope that it helps other people to do great research -that advances our knowledge about the formation of cognitive and noncognitive -siklls. If you find skillmodels helpful, please don't forget to cite it. You -can find a suggested citation in the README file on `GitHub`_. - - -**Feedback** - -If you find skillmodels helpful for research or teaching, please let me know. -If you encounter any problems with the installation or while using -skillmodels, please complain or open an issue at `GitHub`_. - - - -.. _critique: - https://tinyurl.com/y3wl43kz - -.. _replication files: - https://tinyurl.com/yyuq2sa4 - -.. _GitHub: - https://github.com/OpenSourceEconomics/skillmodels - - -.. _Econometrica 2010: - http://onlinelibrary.wiley.com/doi/10.3982/ECTA6551/abstract - - -.. toctree:: - :maxdepth: 1 - - getting_started/index - how_to_guides/index - explanations/index - reference_guides/index diff --git a/docs/source/reference_guides/endogeneity_corrections.rst b/docs/source/reference_guides/endogeneity_corrections.rst deleted file mode 100644 index 15eda667..00000000 --- a/docs/source/reference_guides/endogeneity_corrections.rst +++ /dev/null @@ -1,47 +0,0 @@ -A note on endogeneity correction methods: -***************************************** - -In the empirical part of their paper, CHS use two methods for endogeneity -correction. Both require very strong assumptions on the scale of factors. -Below I give an overview of the proposed endogeneity correction methods that -can serve as a starting point for someone who wants to extend skillmodels in -that direction: - -In secton 4.2.4 CHS extend their basic model with a time invariant individual -specific heterogeneity component, i.e. a fixed effect. The time invariance -assumption can only be valid if the scale of all factors remains the same -throughout the model. This is highly unlikely, unless age invariant -measurements (as defined by Wiswall and Agostinelli) are available and used -for normalization in all periods for all factors. With KLS transition -functions the assumption of the factor scales remaining constant in all -periods is highly unlikely (see: :ref:`KLS_not_constant`). Moreover, this -approach requires 3 adult outcomes. If you have a dataset with enough time -invariant measurements and enough adult outcomes, this method is suitable for -you and you could use the Fortran code by CHS as a starting point. - -In 4.2.5 they make a endogeneity correction with time varying heterogeneity. -However, this heterogeneity follows the same AR1 process in each period and -relies on an estimated time invariant investment equation, so it also requires -the factor scales to be constant. This might not be a good assumption in many -applications. Moreover, this correction method relies on a exclusion -restriction (Income is an argument of the investment function but not of the -transition functions of other latent factors) or suitable functional form -assumptions for identification. - -To use this correction method in models where not enough age invariant -measurements are available to ensure constant factor scales, one would have to -replace the AR1 process by a linear transition function with different -estimated parameters in each period and also estimate a different investment -function in each period. I don't know if this model is identified. - -I don't know if these methods could be used in the WA estimator. - -Wiswall and Agostinelli use a simpler model of endegeneity of investments that -could be used with both estimators. See section 6.1.2 of their `paper`_. - -.. _paper: - https://tinyurl.com/y5ezloh2 - - -.. _replication files: - https://tinyurl.com/yyuq2sa4 diff --git a/docs/source/reference_guides/estimation.rst b/docs/source/reference_guides/estimation.rst deleted file mode 100644 index c1b4201f..00000000 --- a/docs/source/reference_guides/estimation.rst +++ /dev/null @@ -1,42 +0,0 @@ -============================= -Modules Related to Estimation -============================= - -.. _likelihood_function: - -The Likelihood Function -======================= - -.. automodule:: skillmodels.likelihood_function - :members: - -.. _kalman_filters: - -The Kalman Filters -================== - - -.. automodule:: skillmodels.kalman_filters - :members: - - -The Index of the Parameter DataFrame -==================================== - - -.. _params_index: - - -.. automodule:: skillmodels.params_index - :members: - - - -.. _parse_params: - -Parsing the Parameter Vector -============================ - - -.. automodule:: skillmodels.parse_params - :members: diff --git a/docs/source/reference_guides/index.rst b/docs/source/reference_guides/index.rst deleted file mode 100644 index 97224906..00000000 --- a/docs/source/reference_guides/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -Reference Guides -================ - - -.. toctree:: - :maxdepth: 1 - - pre_processing - estimation - simulation - transition_functions - endogeneity_corrections diff --git a/docs/source/reference_guides/pre_processing.rst b/docs/source/reference_guides/pre_processing.rst deleted file mode 100644 index 2b1b1e21..00000000 --- a/docs/source/reference_guides/pre_processing.rst +++ /dev/null @@ -1,37 +0,0 @@ -================================= -How the User Inputs are Processed -================================= - - - - -.. _model_processing: - -Model Processing -================ - - -.. automodule:: skillmodels.process_model - :members: - - - -.. _data_processing: - -Data Processing -=============== - - -.. automodule:: skillmodels.process_data - :members: - - - -.. _model_checking: - -Model Checking -============== - - -.. automodule:: skillmodels.check_model - :members: diff --git a/docs/source/reference_guides/simulation.rst b/docs/source/reference_guides/simulation.rst deleted file mode 100644 index f44cea30..00000000 --- a/docs/source/reference_guides/simulation.rst +++ /dev/null @@ -1,12 +0,0 @@ -============================= -Modules Related to Simulation -============================= - -.. _simulate_data: - - -Simulating a Dataset -==================== - -.. automodule:: skillmodels.simulate_data - :members: diff --git a/docs/source/reference_guides/transition_functions.rst b/docs/source/reference_guides/transition_functions.rst deleted file mode 100644 index 72c46fe9..00000000 --- a/docs/source/reference_guides/transition_functions.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _transition_functions: - -Transition Equations -==================== - - -.. automodule:: skillmodels.transition_functions - :members: diff --git a/docs/source/rtd_environment.yml b/docs/source/rtd_environment.yml deleted file mode 100644 index 66fd0464..00000000 --- a/docs/source/rtd_environment.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -name: skillmodels_rtd -channels: - - conda-forge -dependencies: - - python=3.12 - - skillmodels - - sphinxcontrib-bibtex - - pydata-sphinx-theme>=0.3 - - sphinx - - nbsphinx diff --git a/docs/source/start_params.csv b/docs/source/start_params.csv deleted file mode 100644 index bce6c807..00000000 --- a/docs/source/start_params.csv +++ /dev/null @@ -1,237 +0,0 @@ -category,period,name1,name2,value,lower,upper,chs_value,good_start_value,bad_start_value -control_coeffs,0,y1,constant,1.00545482,-inf,inf,1.00545482,1.0,0.0 -control_coeffs,0,y1,x1,1.00161847,-inf,inf,1.00161847,1.0,0.0 -control_coeffs,0,y2,constant,0.97599155,-inf,inf,0.97599155,1.0,0.0 -control_coeffs,0,y2,x1,1.03143922,-inf,inf,1.03143922,1.0,0.0 -control_coeffs,0,y3,constant,0.99413941,-inf,inf,0.99413941,1.0,0.0 -control_coeffs,0,y3,x1,0.99409082,-inf,inf,0.99409082,1.0,0.0 -control_coeffs,0,y4,constant,1.00601018,-inf,inf,1.00601018,1.0,0.0 -control_coeffs,0,y4,x1,1.01833579,-inf,inf,1.01833579,1.0,0.0 -control_coeffs,0,y5,constant,1.00099227,-inf,inf,1.00099227,1.0,0.0 -control_coeffs,0,y5,x1,1.02354619,-inf,inf,1.02354619,1.0,0.0 -control_coeffs,0,y6,constant,0.96857115,-inf,inf,0.96857115,1.0,0.0 -control_coeffs,0,y6,x1,1.04884144,-inf,inf,1.04884144,1.0,0.0 -control_coeffs,0,y7,constant,0.97482723,-inf,inf,0.97482723,1.0,0.0 -control_coeffs,0,y7,x1,1.04176152,-inf,inf,1.04176152,1.0,0.0 -control_coeffs,0,y8,constant,0.98188173,-inf,inf,0.98188173,1.0,0.0 -control_coeffs,0,y8,x1,1.00566791,-inf,inf,1.00566791,1.0,0.0 -control_coeffs,0,y9,constant,0.92514421,-inf,inf,0.92514421,0.9,0.0 -control_coeffs,0,y9,x1,1.04848607,-inf,inf,1.04848607,1.0,0.0 -control_coeffs,0,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,0,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,1,y1,constant,1.0528786,-inf,inf,1.0528786,1.1,0.0 -control_coeffs,1,y1,x1,0.9565774,-inf,inf,0.9565774,1.0,0.0 -control_coeffs,1,y2,constant,1.10002752,-inf,inf,1.10002752,1.1,0.0 -control_coeffs,1,y2,x1,0.94514768,-inf,inf,0.94514768,0.9,0.0 -control_coeffs,1,y3,constant,1.11220924,-inf,inf,1.11220924,1.1,0.0 -control_coeffs,1,y3,x1,0.94921197,-inf,inf,0.94921197,0.9,0.0 -control_coeffs,1,y4,constant,1.05321768,-inf,inf,1.05321768,1.1,0.0 -control_coeffs,1,y4,x1,0.97927901,-inf,inf,0.97927901,1.0,0.0 -control_coeffs,1,y5,constant,1.07984824,-inf,inf,1.07984824,1.1,0.0 -control_coeffs,1,y5,x1,0.95632487,-inf,inf,0.95632487,1.0,0.0 -control_coeffs,1,y6,constant,1.04155956,-inf,inf,1.04155956,1.0,0.0 -control_coeffs,1,y6,x1,0.98720524,-inf,inf,0.98720524,1.0,0.0 -control_coeffs,1,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,1,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,2,y1,constant,1.01759386,-inf,inf,1.01759386,1.0,0.0 -control_coeffs,2,y1,x1,0.98278806,-inf,inf,0.98278806,1.0,0.0 -control_coeffs,2,y2,constant,0.99449381,-inf,inf,0.99449381,1.0,0.0 -control_coeffs,2,y2,x1,1.00015093,-inf,inf,1.00015093,1.0,0.0 -control_coeffs,2,y3,constant,1.04145063,-inf,inf,1.04145063,1.0,0.0 -control_coeffs,2,y3,x1,0.93918533,-inf,inf,0.93918533,0.9,0.0 -control_coeffs,2,y4,constant,0.98808056,-inf,inf,0.98808056,1.0,0.0 -control_coeffs,2,y4,x1,1.00539897,-inf,inf,1.00539897,1.0,0.0 -control_coeffs,2,y5,constant,0.93727871,-inf,inf,0.93727871,0.9,0.0 -control_coeffs,2,y5,x1,1.03995916,-inf,inf,1.03995916,1.0,0.0 -control_coeffs,2,y6,constant,0.97863335,-inf,inf,0.97863335,1.0,0.0 -control_coeffs,2,y6,x1,1.02370063,-inf,inf,1.02370063,1.0,0.0 -control_coeffs,2,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,2,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,3,y1,constant,1.07747808,-inf,inf,1.07747808,1.1,0.0 -control_coeffs,3,y1,x1,0.95263385,-inf,inf,0.95263385,1.0,0.0 -control_coeffs,3,y2,constant,1.01595775,-inf,inf,1.01595775,1.0,0.0 -control_coeffs,3,y2,x1,0.97511705,-inf,inf,0.97511705,1.0,0.0 -control_coeffs,3,y3,constant,1.00409134,-inf,inf,1.00409134,1.0,0.0 -control_coeffs,3,y3,x1,0.99671239,-inf,inf,0.99671239,1.0,0.0 -control_coeffs,3,y4,constant,1.00265983,-inf,inf,1.00265983,1.0,0.0 -control_coeffs,3,y4,x1,0.97463783,-inf,inf,0.97463783,1.0,0.0 -control_coeffs,3,y5,constant,0.98936892,-inf,inf,0.98936892,1.0,0.0 -control_coeffs,3,y5,x1,1.00354587,-inf,inf,1.00354587,1.0,0.0 -control_coeffs,3,y6,constant,1.01816115,-inf,inf,1.01816115,1.0,0.0 -control_coeffs,3,y6,x1,1.00220065,-inf,inf,1.00220065,1.0,0.0 -control_coeffs,3,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,3,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,4,y1,constant,0.97390947,-inf,inf,0.97390947,1.0,0.0 -control_coeffs,4,y1,x1,1.01871361,-inf,inf,1.01871361,1.0,0.0 -control_coeffs,4,y2,constant,1.01276643,-inf,inf,1.01276643,1.0,0.0 -control_coeffs,4,y2,x1,0.96884594,-inf,inf,0.96884594,1.0,0.0 -control_coeffs,4,y3,constant,1.0639416000000002,-inf,inf,1.0639416000000002,1.1,0.0 -control_coeffs,4,y3,x1,0.96348822,-inf,inf,0.96348822,1.0,0.0 -control_coeffs,4,y4,constant,1.05030944,-inf,inf,1.05030944,1.1,0.0 -control_coeffs,4,y4,x1,0.97249741,-inf,inf,0.97249741,1.0,0.0 -control_coeffs,4,y5,constant,0.95867367,-inf,inf,0.95867367,1.0,0.0 -control_coeffs,4,y5,x1,1.01263275,-inf,inf,1.01263275,1.0,0.0 -control_coeffs,4,y6,constant,1.02067436,-inf,inf,1.02067436,1.0,0.0 -control_coeffs,4,y6,x1,0.97561054,-inf,inf,0.97561054,1.0,0.0 -control_coeffs,4,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,4,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,5,y1,constant,0.92274679,-inf,inf,0.92274679,0.9,0.0 -control_coeffs,5,y1,x1,1.02885338,-inf,inf,1.02885338,1.0,0.0 -control_coeffs,5,y2,constant,0.97278707,-inf,inf,0.97278707,1.0,0.0 -control_coeffs,5,y2,x1,0.99430379,-inf,inf,0.99430379,1.0,0.0 -control_coeffs,5,y3,constant,1.02364392,-inf,inf,1.02364392,1.0,0.0 -control_coeffs,5,y3,x1,0.97922354,-inf,inf,0.97922354,1.0,0.0 -control_coeffs,5,y4,constant,0.9576603,-inf,inf,0.9576603,1.0,0.0 -control_coeffs,5,y4,x1,1.01557986,-inf,inf,1.01557986,1.0,0.0 -control_coeffs,5,y5,constant,1.04015694,-inf,inf,1.04015694,1.0,0.0 -control_coeffs,5,y5,x1,0.96898918,-inf,inf,0.96898918,1.0,0.0 -control_coeffs,5,y6,constant,1.00635915,-inf,inf,1.00635915,1.0,0.0 -control_coeffs,5,y6,x1,0.97658414,-inf,inf,0.97658414,1.0,0.0 -control_coeffs,5,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,5,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,6,y1,constant,0.99929141,-inf,inf,0.99929141,1.0,0.0 -control_coeffs,6,y1,x1,0.98368467,-inf,inf,0.98368467,1.0,0.0 -control_coeffs,6,y2,constant,1.04782772,-inf,inf,1.04782772,1.0,0.0 -control_coeffs,6,y2,x1,0.93183755,-inf,inf,0.93183755,0.9,0.0 -control_coeffs,6,y3,constant,1.00920751,-inf,inf,1.00920751,1.0,0.0 -control_coeffs,6,y3,x1,0.95146637,-inf,inf,0.95146637,1.0,0.0 -control_coeffs,6,y4,constant,1.05400193,-inf,inf,1.05400193,1.1,0.0 -control_coeffs,6,y4,x1,0.99101302,-inf,inf,0.99101302,1.0,0.0 -control_coeffs,6,y5,constant,1.06296891,-inf,inf,1.06296891,1.1,0.0 -control_coeffs,6,y5,x1,0.9781293,-inf,inf,0.9781293,1.0,0.0 -control_coeffs,6,y6,constant,1.01391456,-inf,inf,1.01391456,1.0,0.0 -control_coeffs,6,y6,x1,0.9949447,-inf,inf,0.9949447,1.0,0.0 -control_coeffs,6,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,6,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -control_coeffs,7,y1,constant,1.03694603,-inf,inf,1.03694603,1.0,0.0 -control_coeffs,7,y1,x1,0.94997421,-inf,inf,0.94997421,0.9,0.0 -control_coeffs,7,y2,constant,1.05734328,-inf,inf,1.05734328,1.1,0.0 -control_coeffs,7,y2,x1,0.94683688,-inf,inf,0.94683688,0.9,0.0 -control_coeffs,7,y3,constant,1.04305552,-inf,inf,1.04305552,1.0,0.0 -control_coeffs,7,y3,x1,0.97187261,-inf,inf,0.97187261,1.0,0.0 -control_coeffs,7,y4,constant,1.06978168,-inf,inf,1.06978168,1.1,0.0 -control_coeffs,7,y4,x1,0.94043886,-inf,inf,0.94043886,0.9,0.0 -control_coeffs,7,y5,constant,1.01505491,-inf,inf,1.01505491,1.0,0.0 -control_coeffs,7,y5,x1,0.9762739,-inf,inf,0.9762739,1.0,0.0 -control_coeffs,7,y6,constant,1.02936846,-inf,inf,1.02936846,1.0,0.0 -control_coeffs,7,y6,x1,0.9811178,-inf,inf,0.9811178,1.0,0.0 -control_coeffs,7,Q1_fac1,constant,1.03648928,-inf,inf,1.03648928,1.0,0.0 -control_coeffs,7,Q1_fac1,x1,0.94351157,-inf,inf,0.94351157,0.9,0.0 -loading,0,y2,fac1,1.24759799,-inf,inf,1.24759799,1.2,1.0 -loading,0,y3,fac1,1.55939677,-inf,inf,1.55939677,1.6,1.0 -loading,0,y5,fac2,0.9908065,-inf,inf,0.9908065,1.0,1.0 -loading,0,y6,fac2,0.68554932,-inf,inf,0.68554932,0.7,1.0 -loading,0,y8,fac3,1.1132403,-inf,inf,1.1132403,1.1,1.0 -loading,0,y9,fac3,0.70906931,-inf,inf,0.70906931,0.7,1.0 -loading,0,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,1,y2,fac1,1.21745308,-inf,inf,1.21745308,1.2,1.0 -loading,1,y3,fac1,1.34547457,-inf,inf,1.34547457,1.3,1.0 -loading,1,y5,fac2,0.82645675,-inf,inf,0.82645675,0.8,1.0 -loading,1,y6,fac2,0.60128174,-inf,inf,0.60128174,0.6,1.0 -loading,1,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,2,y2,fac1,1.18091396,-inf,inf,1.18091396,1.2,1.0 -loading,2,y3,fac1,1.41494654,-inf,inf,1.41494654,1.4,1.0 -loading,2,y5,fac2,0.78412623,-inf,inf,0.78412623,0.8,1.0 -loading,2,y6,fac2,0.6095229,-inf,inf,0.6095229,0.6,1.0 -loading,2,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,3,y2,fac1,1.28957256,-inf,inf,1.28957256,1.3,1.0 -loading,3,y3,fac1,1.42483912,-inf,inf,1.42483912,1.4,1.0 -loading,3,y5,fac2,0.75831051,-inf,inf,0.75831051,0.8,1.0 -loading,3,y6,fac2,0.60278636,-inf,inf,0.60278636,0.6,1.0 -loading,3,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,4,y2,fac1,1.24216652,-inf,inf,1.24216652,1.2,1.0 -loading,4,y3,fac1,1.419458,-inf,inf,1.419458,1.4,1.0 -loading,4,y5,fac2,0.83681772,-inf,inf,0.83681772,0.8,1.0 -loading,4,y6,fac2,0.5249411999999999,-inf,inf,0.5249411999999999,0.5,1.0 -loading,4,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,5,y2,fac1,1.20739978,-inf,inf,1.20739978,1.2,1.0 -loading,5,y3,fac1,1.43586124,-inf,inf,1.43586124,1.4,1.0 -loading,5,y5,fac2,0.76476822,-inf,inf,0.76476822,0.8,1.0 -loading,5,y6,fac2,0.60105183,-inf,inf,0.60105183,0.6,1.0 -loading,5,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,6,y2,fac1,1.22402329,-inf,inf,1.22402329,1.2,1.0 -loading,6,y3,fac1,1.42265974,-inf,inf,1.42265974,1.4,1.0 -loading,6,y5,fac2,0.68885845,-inf,inf,0.68885845,0.7,1.0 -loading,6,y6,fac2,0.61882325,-inf,inf,0.61882325,0.6,1.0 -loading,6,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -loading,7,y2,fac1,1.23608389,-inf,inf,1.23608389,1.2,1.0 -loading,7,y3,fac1,1.47859872,-inf,inf,1.47859872,1.5,1.0 -loading,7,y5,fac2,0.81524559,-inf,inf,0.81524559,0.8,1.0 -loading,7,y6,fac2,0.57084593,-inf,inf,0.57084593,0.6,1.0 -loading,7,Q1_fac1,fac1,0.93520167,-inf,inf,0.93520167,0.9,1.0 -meas_sd,0,y1,-,0.50497719,-inf,inf,0.50497719,0.5,0.8 -meas_sd,0,y2,-,0.50088168,-inf,inf,0.50088168,0.5,0.8 -meas_sd,0,y3,-,0.48136282,-inf,inf,0.48136282,0.5,0.8 -meas_sd,0,y4,-,0.53215346,-inf,inf,0.53215346,0.5,0.8 -meas_sd,0,y5,-,0.47039143,-inf,inf,0.47039143,0.5,0.8 -meas_sd,0,y6,-,0.48344469,-inf,inf,0.48344469,0.5,0.8 -meas_sd,0,y7,-,0.47943359,-inf,inf,0.47943359,0.5,0.8 -meas_sd,0,y8,-,0.53421227,-inf,inf,0.53421227,0.5,0.8 -meas_sd,0,y9,-,0.50146093,-inf,inf,0.50146093,0.5,0.8 -meas_sd,0,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,1,y1,-,0.49105567,-inf,inf,0.49105567,0.5,0.8 -meas_sd,1,y2,-,0.49870431,-inf,inf,0.49870431,0.5,0.8 -meas_sd,1,y3,-,0.50514084,-inf,inf,0.50514084,0.5,0.8 -meas_sd,1,y4,-,0.49743526,-inf,inf,0.49743526,0.5,0.8 -meas_sd,1,y5,-,0.4994178,-inf,inf,0.4994178,0.5,0.8 -meas_sd,1,y6,-,0.50424182,-inf,inf,0.50424182,0.5,0.8 -meas_sd,1,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,2,y1,-,0.50427244,-inf,inf,0.50427244,0.5,0.8 -meas_sd,2,y2,-,0.51856939,-inf,inf,0.51856939,0.5,0.8 -meas_sd,2,y3,-,0.50392617,-inf,inf,0.50392617,0.5,0.8 -meas_sd,2,y4,-,0.49161026,-inf,inf,0.49161026,0.5,0.8 -meas_sd,2,y5,-,0.50441808,-inf,inf,0.50441808,0.5,0.8 -meas_sd,2,y6,-,0.48482939,-inf,inf,0.48482939,0.5,0.8 -meas_sd,2,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,3,y1,-,0.49476345,-inf,inf,0.49476345,0.5,0.8 -meas_sd,3,y2,-,0.49363682,-inf,inf,0.49363682,0.5,0.8 -meas_sd,3,y3,-,0.49918763,-inf,inf,0.49918763,0.5,0.8 -meas_sd,3,y4,-,0.49728617,-inf,inf,0.49728617,0.5,0.8 -meas_sd,3,y5,-,0.5116465,-inf,inf,0.5116465,0.5,0.8 -meas_sd,3,y6,-,0.48035036,-inf,inf,0.48035036,0.5,0.8 -meas_sd,3,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,4,y1,-,0.50529312,-inf,inf,0.50529312,0.5,0.8 -meas_sd,4,y2,-,0.50706948,-inf,inf,0.50706948,0.5,0.8 -meas_sd,4,y3,-,0.47849704,-inf,inf,0.47849704,0.5,0.8 -meas_sd,4,y4,-,0.49962829,-inf,inf,0.49962829,0.5,0.8 -meas_sd,4,y5,-,0.49001347,-inf,inf,0.49001347,0.5,0.8 -meas_sd,4,y6,-,0.48723789,-inf,inf,0.48723789,0.5,0.8 -meas_sd,4,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,5,y1,-,0.51551926,-inf,inf,0.51551926,0.5,0.8 -meas_sd,5,y2,-,0.52331776,-inf,inf,0.52331776,0.5,0.8 -meas_sd,5,y3,-,0.48326815,-inf,inf,0.48326815,0.5,0.8 -meas_sd,5,y4,-,0.47910757,-inf,inf,0.47910757,0.5,0.8 -meas_sd,5,y5,-,0.50327233,-inf,inf,0.50327233,0.5,0.8 -meas_sd,5,y6,-,0.49705186,-inf,inf,0.49705186,0.5,0.8 -meas_sd,5,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,6,y1,-,0.52223919,-inf,inf,0.52223919,0.5,0.8 -meas_sd,6,y2,-,0.48641122,-inf,inf,0.48641122,0.5,0.8 -meas_sd,6,y3,-,0.47597189,-inf,inf,0.47597189,0.5,0.8 -meas_sd,6,y4,-,0.52595048,-inf,inf,0.52595048,0.5,0.8 -meas_sd,6,y5,-,0.51187305,-inf,inf,0.51187305,0.5,0.8 -meas_sd,6,y6,-,0.52425668,-inf,inf,0.52425668,0.5,0.8 -meas_sd,6,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -meas_sd,7,y1,-,0.52163477,-inf,inf,0.52163477,0.5,0.8 -meas_sd,7,y2,-,0.52112353,-inf,inf,0.52112353,0.5,0.8 -meas_sd,7,y3,-,0.47545353,-inf,inf,0.47545353,0.5,0.8 -meas_sd,7,y4,-,0.5150197,-inf,inf,0.5150197,0.5,0.8 -meas_sd,7,y5,-,0.48993218,-inf,inf,0.48993218,0.5,0.8 -meas_sd,7,y6,-,0.52777721,-inf,inf,0.52777721,0.5,0.8 -meas_sd,7,Q1_fac1,-,1.03957418,-inf,inf,1.03957418,1.0,0.8 -shock_sd,0,fac1,-,0.321936173798472,-inf,inf,0.5673941961268832,0.31622776601683794,0.7071067811865476 -shock_sd,0,fac2,-,0.3131064355774247,-inf,inf,0.5595591439494352,0.31622776601683794,0.7071067811865476 -initial_mean,0,mixture_0,fac1,0.0,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac2,0.0,-inf,inf,0.0,0.0,0.0 -initial_mean,0,mixture_0,fac3,0.0,-inf,inf,0.0,0.0,0.0 -initial_cov,0,mixture_0,fac1-fac1,0.17647290000000002,-inf,inf,0.17647290000000002,0.2,0.5 -initial_cov,0,mixture_0,fac2-fac1,0.00524114,-inf,inf,0.00524114,0.0,0.0 -initial_cov,0,mixture_0,fac2-fac2,0.18362641,-inf,inf,0.18362641,0.2,0.5 -initial_cov,0,mixture_0,fac3-fac1,0.005665399999999999,-inf,inf,0.005665399999999999,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac2,-0.00067522,-inf,inf,-0.00067522,0.0,0.0 -initial_cov,0,mixture_0,fac3-fac3,0.23194739,-inf,inf,0.23194739,0.2,0.5 -trans,0,fac1,fac1,0.65978837,-inf,inf,0.65978837,0.7,0.5 -trans,0,fac1,fac2,0.174038,-inf,inf,0.174038,0.2,0.25 -trans,0,fac1,fac3,0.16617363,-inf,inf,0.16617363,0.1,0.25 -trans,0,fac1,phi,-0.40701787,-inf,inf,-0.40701787,-0.4,-0.2 -trans,0,fac2,fac2,0.60887112,-inf,inf,0.60887112,0.6,0.5 -trans,0,fac2,constant,0.0,-inf,inf,0.0,0.0,0.0 diff --git a/docs/source/start_params_template.csv b/docs/source/start_params_template.csv deleted file mode 100644 index 7b219d81..00000000 --- a/docs/source/start_params_template.csv +++ /dev/null @@ -1,237 +0,0 @@ -category,period,name1,name2,value,lower_bound,upper_bound -delta,0,y1,constant,,-inf,inf -delta,0,y1,x1,,-inf,inf -delta,0,y2,constant,,-inf,inf -delta,0,y2,x1,,-inf,inf -delta,0,y3,constant,,-inf,inf -delta,0,y3,x1,,-inf,inf -delta,0,y4,constant,,-inf,inf -delta,0,y4,x1,,-inf,inf -delta,0,y5,constant,,-inf,inf -delta,0,y5,x1,,-inf,inf -delta,0,y6,constant,,-inf,inf -delta,0,y6,x1,,-inf,inf -delta,0,y7,constant,,-inf,inf -delta,0,y7,x1,,-inf,inf -delta,0,y8,constant,,-inf,inf -delta,0,y8,x1,,-inf,inf -delta,0,y9,constant,,-inf,inf -delta,0,y9,x1,,-inf,inf -delta,0,Q1_fac1,constant,,-inf,inf -delta,0,Q1_fac1,x1,,-inf,inf -delta,1,y1,constant,,-inf,inf -delta,1,y1,x1,,-inf,inf -delta,1,y2,constant,,-inf,inf -delta,1,y2,x1,,-inf,inf -delta,1,y3,constant,,-inf,inf -delta,1,y3,x1,,-inf,inf -delta,1,y4,constant,,-inf,inf -delta,1,y4,x1,,-inf,inf -delta,1,y5,constant,,-inf,inf -delta,1,y5,x1,,-inf,inf -delta,1,y6,constant,,-inf,inf -delta,1,y6,x1,,-inf,inf -delta,1,Q1_fac1,constant,,-inf,inf -delta,1,Q1_fac1,x1,,-inf,inf -delta,2,y1,constant,,-inf,inf -delta,2,y1,x1,,-inf,inf -delta,2,y2,constant,,-inf,inf -delta,2,y2,x1,,-inf,inf -delta,2,y3,constant,,-inf,inf -delta,2,y3,x1,,-inf,inf -delta,2,y4,constant,,-inf,inf -delta,2,y4,x1,,-inf,inf -delta,2,y5,constant,,-inf,inf -delta,2,y5,x1,,-inf,inf -delta,2,y6,constant,,-inf,inf -delta,2,y6,x1,,-inf,inf -delta,2,Q1_fac1,constant,,-inf,inf -delta,2,Q1_fac1,x1,,-inf,inf -delta,3,y1,constant,,-inf,inf -delta,3,y1,x1,,-inf,inf -delta,3,y2,constant,,-inf,inf -delta,3,y2,x1,,-inf,inf -delta,3,y3,constant,,-inf,inf -delta,3,y3,x1,,-inf,inf -delta,3,y4,constant,,-inf,inf -delta,3,y4,x1,,-inf,inf -delta,3,y5,constant,,-inf,inf -delta,3,y5,x1,,-inf,inf -delta,3,y6,constant,,-inf,inf -delta,3,y6,x1,,-inf,inf -delta,3,Q1_fac1,constant,,-inf,inf -delta,3,Q1_fac1,x1,,-inf,inf -delta,4,y1,constant,,-inf,inf -delta,4,y1,x1,,-inf,inf -delta,4,y2,constant,,-inf,inf -delta,4,y2,x1,,-inf,inf -delta,4,y3,constant,,-inf,inf -delta,4,y3,x1,,-inf,inf -delta,4,y4,constant,,-inf,inf -delta,4,y4,x1,,-inf,inf -delta,4,y5,constant,,-inf,inf -delta,4,y5,x1,,-inf,inf -delta,4,y6,constant,,-inf,inf -delta,4,y6,x1,,-inf,inf -delta,4,Q1_fac1,constant,,-inf,inf -delta,4,Q1_fac1,x1,,-inf,inf -delta,5,y1,constant,,-inf,inf -delta,5,y1,x1,,-inf,inf -delta,5,y2,constant,,-inf,inf -delta,5,y2,x1,,-inf,inf -delta,5,y3,constant,,-inf,inf -delta,5,y3,x1,,-inf,inf -delta,5,y4,constant,,-inf,inf -delta,5,y4,x1,,-inf,inf -delta,5,y5,constant,,-inf,inf -delta,5,y5,x1,,-inf,inf -delta,5,y6,constant,,-inf,inf -delta,5,y6,x1,,-inf,inf -delta,5,Q1_fac1,constant,,-inf,inf -delta,5,Q1_fac1,x1,,-inf,inf -delta,6,y1,constant,,-inf,inf -delta,6,y1,x1,,-inf,inf -delta,6,y2,constant,,-inf,inf -delta,6,y2,x1,,-inf,inf -delta,6,y3,constant,,-inf,inf -delta,6,y3,x1,,-inf,inf -delta,6,y4,constant,,-inf,inf -delta,6,y4,x1,,-inf,inf -delta,6,y5,constant,,-inf,inf -delta,6,y5,x1,,-inf,inf -delta,6,y6,constant,,-inf,inf -delta,6,y6,x1,,-inf,inf -delta,6,Q1_fac1,constant,,-inf,inf -delta,6,Q1_fac1,x1,,-inf,inf -delta,7,y1,constant,,-inf,inf -delta,7,y1,x1,,-inf,inf -delta,7,y2,constant,,-inf,inf -delta,7,y2,x1,,-inf,inf -delta,7,y3,constant,,-inf,inf -delta,7,y3,x1,,-inf,inf -delta,7,y4,constant,,-inf,inf -delta,7,y4,x1,,-inf,inf -delta,7,y5,constant,,-inf,inf -delta,7,y5,x1,,-inf,inf -delta,7,y6,constant,,-inf,inf -delta,7,y6,x1,,-inf,inf -delta,7,Q1_fac1,constant,,-inf,inf -delta,7,Q1_fac1,x1,,-inf,inf -loading,0,y2,fac1,,-inf,inf -loading,0,y3,fac1,,-inf,inf -loading,0,y5,fac2,,-inf,inf -loading,0,y6,fac2,,-inf,inf -loading,0,y8,fac3,,-inf,inf -loading,0,y9,fac3,,-inf,inf -loading,0,Q1_fac1,fac1,,-inf,inf -loading,1,y2,fac1,,-inf,inf -loading,1,y3,fac1,,-inf,inf -loading,1,y5,fac2,,-inf,inf -loading,1,y6,fac2,,-inf,inf -loading,1,Q1_fac1,fac1,,-inf,inf -loading,2,y2,fac1,,-inf,inf -loading,2,y3,fac1,,-inf,inf -loading,2,y5,fac2,,-inf,inf -loading,2,y6,fac2,,-inf,inf -loading,2,Q1_fac1,fac1,,-inf,inf -loading,3,y2,fac1,,-inf,inf -loading,3,y3,fac1,,-inf,inf -loading,3,y5,fac2,,-inf,inf -loading,3,y6,fac2,,-inf,inf -loading,3,Q1_fac1,fac1,,-inf,inf -loading,4,y2,fac1,,-inf,inf -loading,4,y3,fac1,,-inf,inf -loading,4,y5,fac2,,-inf,inf -loading,4,y6,fac2,,-inf,inf -loading,4,Q1_fac1,fac1,,-inf,inf -loading,5,y2,fac1,,-inf,inf -loading,5,y3,fac1,,-inf,inf -loading,5,y5,fac2,,-inf,inf -loading,5,y6,fac2,,-inf,inf -loading,5,Q1_fac1,fac1,,-inf,inf -loading,6,y2,fac1,,-inf,inf -loading,6,y3,fac1,,-inf,inf -loading,6,y5,fac2,,-inf,inf -loading,6,y6,fac2,,-inf,inf -loading,6,Q1_fac1,fac1,,-inf,inf -loading,7,y2,fac1,,-inf,inf -loading,7,y3,fac1,,-inf,inf -loading,7,y5,fac2,,-inf,inf -loading,7,y6,fac2,,-inf,inf -loading,7,Q1_fac1,fac1,,-inf,inf -meas_sd,0,y1,-,,-inf,inf -meas_sd,0,y2,-,,-inf,inf -meas_sd,0,y3,-,,-inf,inf -meas_sd,0,y4,-,,-inf,inf -meas_sd,0,y5,-,,-inf,inf -meas_sd,0,y6,-,,-inf,inf -meas_sd,0,y7,-,,-inf,inf -meas_sd,0,y8,-,,-inf,inf -meas_sd,0,y9,-,,-inf,inf -meas_sd,0,Q1_fac1,-,,-inf,inf -meas_sd,1,y1,-,,-inf,inf -meas_sd,1,y2,-,,-inf,inf -meas_sd,1,y3,-,,-inf,inf -meas_sd,1,y4,-,,-inf,inf -meas_sd,1,y5,-,,-inf,inf -meas_sd,1,y6,-,,-inf,inf -meas_sd,1,Q1_fac1,-,,-inf,inf -meas_sd,2,y1,-,,-inf,inf -meas_sd,2,y2,-,,-inf,inf -meas_sd,2,y3,-,,-inf,inf -meas_sd,2,y4,-,,-inf,inf -meas_sd,2,y5,-,,-inf,inf -meas_sd,2,y6,-,,-inf,inf -meas_sd,2,Q1_fac1,-,,-inf,inf -meas_sd,3,y1,-,,-inf,inf -meas_sd,3,y2,-,,-inf,inf -meas_sd,3,y3,-,,-inf,inf -meas_sd,3,y4,-,,-inf,inf -meas_sd,3,y5,-,,-inf,inf -meas_sd,3,y6,-,,-inf,inf -meas_sd,3,Q1_fac1,-,,-inf,inf -meas_sd,4,y1,-,,-inf,inf -meas_sd,4,y2,-,,-inf,inf -meas_sd,4,y3,-,,-inf,inf -meas_sd,4,y4,-,,-inf,inf -meas_sd,4,y5,-,,-inf,inf -meas_sd,4,y6,-,,-inf,inf -meas_sd,4,Q1_fac1,-,,-inf,inf -meas_sd,5,y1,-,,-inf,inf -meas_sd,5,y2,-,,-inf,inf -meas_sd,5,y3,-,,-inf,inf -meas_sd,5,y4,-,,-inf,inf -meas_sd,5,y5,-,,-inf,inf -meas_sd,5,y6,-,,-inf,inf -meas_sd,5,Q1_fac1,-,,-inf,inf -meas_sd,6,y1,-,,-inf,inf -meas_sd,6,y2,-,,-inf,inf -meas_sd,6,y3,-,,-inf,inf -meas_sd,6,y4,-,,-inf,inf -meas_sd,6,y5,-,,-inf,inf -meas_sd,6,y6,-,,-inf,inf -meas_sd,6,Q1_fac1,-,,-inf,inf -meas_sd,7,y1,-,,-inf,inf -meas_sd,7,y2,-,,-inf,inf -meas_sd,7,y3,-,,-inf,inf -meas_sd,7,y4,-,,-inf,inf -meas_sd,7,y5,-,,-inf,inf -meas_sd,7,y6,-,,-inf,inf -meas_sd,7,Q1_fac1,-,,-inf,inf -shock_variance,0,fac1,-,,-inf,inf -shock_variance,0,fac2,-,,-inf,inf -initial_mean,0,mixture_0,fac1,,-inf,inf -initial_mean,0,mixture_0,fac2,,-inf,inf -initial_mean,0,mixture_0,fac3,,-inf,inf -initial_cov,0,mixture_0,fac1-fac1,,-inf,inf -initial_cov,0,mixture_0,fac2-fac1,,-inf,inf -initial_cov,0,mixture_0,fac2-fac2,,-inf,inf -initial_cov,0,mixture_0,fac3-fac1,,-inf,inf -initial_cov,0,mixture_0,fac3-fac2,,-inf,inf -initial_cov,0,mixture_0,fac3-fac3,,-inf,inf -trans,0,fac1,fac1,,-inf,inf -trans,0,fac1,fac2,,-inf,inf -trans,0,fac1,fac3,,-inf,inf -trans,0,fac1,phi,,-inf,inf -trans,0,fac2,fac2,,-inf,inf -trans,0,fac2,constant,,-inf,inf diff --git a/pixi.lock b/pixi.lock index ff4b4046..94bc45f4 100644 --- a/pixi.lock +++ b/pixi.lock @@ -932,6 +932,678 @@ environments: - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: ./ + docs: + channels: + - url: https://conda.anaconda.org/conda-forge/ + indexes: + - https://pypi.org/simple + options: + pypi-prerelease-mode: if-necessary-or-explicit + packages: + linux-64: + - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h9891dd4_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.18-py314h42812f9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.1-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipykernel-7.1.0-pyha191276_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.9.0-pyh53cf698_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-specifications-2025.9.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-with-format-nongpl-4.26.0-hcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.7.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_core-5.9.1-pyhc90fa1f_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.0-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.17.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.9-py314h97ea11e_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.17-h717163a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45-default_hbd61a6d_105.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.0.0-h0aef613_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-5_h0358290_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.25-h17f619e_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.3-hecca717_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h9ec8514_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.1-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype6-2.14.1-h73754d4_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-15.2.0-he0feb66_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-15.2.0-h69a702a_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-15.2.0-h69a702a_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-15.2.0-h68bc16d_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-15.2.0-he0feb66_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libjpeg-turbo-3.1.2-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-5_h47877c9_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libmpdec-4.0.0-hb9d3cd8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.30-pthreads_h94d23a6_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.53-h421ea60_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.20-h4ab18f5_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.51.1-hf4e2dac_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.7.1-h9d88235_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libuv-1.51.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.16.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/nodejs-22.21.1-h273caaf_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.3.5-py314h2b28147_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/orjson-3.11.5-py314h3b757c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.1.0-py314h8ec4b1a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.0-hb17b654_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.1-py314h0f05182_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.2-h32b2ec7_100_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hfb55c3c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.32.5-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.16.3-py314hf07bd8e_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyha191276_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.20.2-py314h5bd0f2a_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.0-py314h5bd0f2a_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.12-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.5-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h280c20c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zeromq-4.3.5-h387f397_9.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zlib-ng-2.3.2-hceb46e0_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb78ec9c_6.conda + - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/b3/27/caf606ee924282fe4747ee4fd454b335a72a6e018f97eab5ff7f28199e16/sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl + - pypi: ./ + osx-arm64: + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/_openmp_mutex-4.5-7_kmp_llvm.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/argon2-cffi-bindings-25.1.0-py314h0612a62_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-1.2.0-h7d5ae5b_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_8.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-hbd8a1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/cffi-2.0.0-py314h44086f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/contourpy-1.3.3-py314h784bc60_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/debugpy-1.8.19-py314hf820bb6_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.1-hce30654_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/icu-75.1-hfee45f7_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipykernel-7.1.0-pyh5552912_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.9.0-pyh53cf698_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-specifications-2025.9.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-with-format-nongpl-4.26.0-hcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.7.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_core-5.9.1-pyhc90fa1f_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.0-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.17.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/kiwisolver-1.4.9-py314h42813c9_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.17-h7eeda09_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lerc-4.0.0-hd64df32_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libabseil-20250512.1-cxx17_hd41c47c_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.11.0-5_h51639a9_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlicommon-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlidec-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlienc-1.2.0-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.11.0-5_hb0561ab_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-21.1.8-hf598326_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libdeflate-1.25-hc11a715_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.3-haf25636_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.5.2-he5f378a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype-2.14.1-hce30654_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype6-2.14.1-h6da58f4_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgcc-15.2.0-hcbb3090_16.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran-15.2.0-h07b0088_16.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran5-15.2.0-hdae7583_16.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libjpeg-turbo-3.1.2-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.11.0-5_hd9741b5_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.8.1-h39f12f2_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libmpdec-4.0.0-h5505292_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.67.0-hc438710_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.30-openmp_ha158390_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libpng-1.6.53-hfab5511_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.20-h99b78c6_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.51.1-h1b79a29_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libtiff-4.7.1-h4030677_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libuv-1.51.0-h6caf38d_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libwebp-base-1.6.0-h07db88b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libxcb-1.17.0-hdb1d25a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-21.1.8-h4a912ad_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.16.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/nodejs-25.2.1-h5230ea7_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.3.5-py314hae46ccb_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openjpeg-2.5.4-hbfb3c88_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.0-h5503f6c_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/orjson-3.11.5-py314hda6d10a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.0-py314hab283cf_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/prek-0.3.0-h6fdd925_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-7.2.1-py314ha14b1ff_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pthread-stubs-0.4-hd74edd7_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-framework-cocoa-12.1-py314h36abed7_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.2-h40d2674_100_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-27.1.0-py312hd65ceae_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/qhull-2020.2-h420ef59_5.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.32.5-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.16.3-py314h725efaa_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh5552912_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/simplejson-3.20.2-py314h0612a62_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h892fb3f_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/unicodedata2-17.0.0-py314h0612a62_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xorg-libxau-1.0.12-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xorg-libxdmcp-1.1.5-hc919400_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/yaml-0.2.5-h925e9cb_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zeromq-4.3.5-h888dc83_9.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zlib-ng-2.3.2-hed4e4f5_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zstd-1.5.7-hbf9d68e_6.conda + - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/72/4e/1339dc6e2557a344f5ba5590872e80346f76f6cb2ac3dd16e4666e88818c/ml_dtypes-0.5.4-cp314-cp314-macosx_10_13_universal2.whl + - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/bf/e1/3ccb13c643399d22289c6a9786c1a91e3dcbb68bce4beb44926ac2c557bf/sqlalchemy-2.0.45-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl + - pypi: ./ + win-64: + - conda: https://conda.anaconda.org/conda-forge/win-64/_openmp_mutex-4.5-2_gnu.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.12.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/argon2-cffi-bindings-25.1.0-py314h5a2d7ad_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.0.5-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.17.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-h5f6438b_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-1.2.0-h2d644bc_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-bin-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.2.0-py314he701e3d_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_8.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.1.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/cffi-2.0.0-py314h5a2d7ad_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/choreographer-1.2.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.3.3-py314h909e829_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.2-py314hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/debugpy-1.8.19-py314hb98de8c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.61.1-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.1-h57928b3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipykernel-7.1.0-pyh6dadd2b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.9.0-pyhe2676ad_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-specifications-2025.9.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-with-format-nongpl-4.26.0-hcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.7.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_core-5.9.1-pyh6dadd2b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.0-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.17.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/kiwisolver-1.4.9-py314hf309875_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.17-hbcf6048_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/lerc-4.0.0-h6470a55_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libblas-3.11.0-5_hf2e6a31_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlicommon-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlidec-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlienc-1.2.0-hfd05255_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libcblas-3.11.0-5_h2a3cdd5_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.25-h51727cc_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.3-hac47afa_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.5.2-h52bdfb6_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype-2.14.1-h57928b3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libfreetype6-2.14.1-hdbac1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libgcc-15.2.0-h8ee18e1_16.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_16.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libhwloc-2.12.2-default_h4379cf1_1000.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.18-hc1393d2_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-3.1.2-hfd05255_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/liblapack-3.11.0-5_hf9ab0e9_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/liblzma-5.8.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libmpdec-4.0.0-h2466b09_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libpng-1.6.53-h7351971_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.20-hc70643c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.51.1-hf5d6505_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.7.1-h8f73337_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libwebp-base-1.6.0-h4d5522a_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libwinpthread-12.0.0.r4.gg4f2fc60ca-h57928b3_10.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libxcb-1.17.0-h0e4246c_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-16-2.15.1-h3cfd58e_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-2.15.1-h779ef1b_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-21.1.8-h4fa8253_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/logistro-2.0.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markupsafe-3.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/narwhals-2.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.16.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/nodejs-25.2.1-he453025_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/numpy-2.3.5-py314h06c3c77_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/openjpeg-2.5.4-h24db6dd_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.0-h725018a_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/orjson-3.11.5-py314h64f83cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-25.0-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.5-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pillow-12.1.0-py314h61b30b5_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/plotly-6.5.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/prek-0.3.0-h18a1a76_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.23.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-7.2.1-py314hc5dbbe4_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pthread-stubs-0.4-h0e40799_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyh09c184e_7.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-timeout-2.4.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.2-h4b44e0e_100_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.2-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-kaleido-1.2.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytz-2025.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-311-py314h8f8f202_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pywinpty-2.0.15-py314h51f0985_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyyaml-6.0.3-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-27.1.0-py312hbb5da91_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/qhull-2020.2-hc790b64_5.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.32.5-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.16.3-py314h221f224_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.0.0-pyh6dadd2b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-80.9.0-pyhff2d567_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.20.2-py314h5a2d7ad_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.5.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h2c6b04d_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/tornado-6.5.4-py314h5a2d7ad_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/unicodedata2-17.0.0-py314h5a2d7ad_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.6.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.44.35208-h818238b_34.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/vcomp14-14.44.35208-h818238b_34.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.14-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/win_inet_pton-1.1.0-pyh7428d3b_8.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/winpty-0.4.3-4.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/win-64/xorg-libxau-1.0.12-hba3369d_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/xorg-libxdmcp-1.1.5-hba3369d_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/yaml-0.2.5-h6a83c73_3.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/zeromq-4.3.5-h5bddc39_9.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/zlib-ng-2.3.2-h0261ad2_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/zstd-1.5.7-h534d264_6.conda + - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/e9/93/2bfed22d2498c468f6bcd0d9f56b033eaa19f33320389314c19ef6766413/ml_dtypes-0.5.4-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=main#522b8c9a21226569ffd25e950e44f0c5de308c9d + - pypi: https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/75/58/3af430d0de0b95d5adf7e576067e07d750ba76e28d142871982464fb40db/pdbp-1.8.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/89/a2/0e1590e9adb292b1d576dbcf67ff7df8cf55e56e78d2c927686d01080f4b/sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl + - pypi: ./ test-cpu: channels: - url: https://conda.anaconda.org/conda-forge/ @@ -993,7 +1665,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -2009,7 +2681,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.11-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda @@ -3135,6 +3807,16 @@ packages: purls: [] size: 207882 timestamp: 1765214722852 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda + sha256: 2995f2aed4e53725e5efbc28199b46bf311c3cab2648fc4f10c2227d6d5fa196 + md5: bcb3cba70cf1eec964a03b4ba7775f01 + depends: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 180327 + timestamp: 1765215064054 - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.1.4-h4c7d964_0.conda sha256: 4ddcb01be03f85d3db9d881407fb13a673372f1b9fac9c836ea441893390e049 md5: 84d389c9eee640dda3d26fc5335c67d8 @@ -4036,6 +4718,28 @@ packages: purls: [] size: 12722920 timestamp: 1766299101259 +- conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.2-h33c6efd_0.conda + sha256: 142a722072fa96cf16ff98eaaf641f54ab84744af81754c292cb81e0881c0329 + md5: 186a18e3ba246eccfc7cff00cd19a870 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libstdcxx >=14 + license: MIT + license_family: MIT + purls: [] + size: 12728445 + timestamp: 1767969922681 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/icu-75.1-hfee45f7_0.conda + sha256: 9ba12c93406f3df5ab0a43db8a4b4ef67a5871dfd401010fbe29b218b2cbe620 + md5: 5eb22c1d7b3fc4abb50d92d621583137 + depends: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 11857802 + timestamp: 1720853997952 - conda: https://conda.anaconda.org/conda-forge/win-64/icu-78.1-h637d24d_0.conda sha256: bee083d5a0f05c380fcec1f30a71ef5518b23563aeb0a21f6b60b792645f9689 md5: cb8048bed35ef01431184d6a88e46b3e @@ -4417,6 +5121,23 @@ packages: purls: [] size: 4740 timestamp: 1767839954258 +- conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-book-2.1.1-pyhcf101f3_0.conda + sha256: efea291760fba57a8abaf5b3a05c57f99d60cf11c8950fe8499f4d2eaa4473bb + md5: 29cc201b7334408707a8866d6baa35cc + depends: + - ipykernel + - jupyter_core + - jupyter_server + - nodejs >=20 + - platformdirs >=4.2.2 + - python >=3.10 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/jupyter-book?source=hash-mapping + size: 2175135 + timestamp: 1769203439705 - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda sha256: 897ad2e2c2335ef3c2826d7805e16002a1fd0d509b4ae0bc66617f0e0ff07bc2 md5: 62b7c96c6cd77f8173cc5cada6a9acaa @@ -4812,6 +5533,20 @@ packages: purls: [] size: 164701 timestamp: 1745264384716 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libabseil-20250512.1-cxx17_hd41c47c_0.conda + sha256: 7f0ee9ae7fa2cf7ac92b0acf8047c8bac965389e48be61bf1d463e057af2ea6a + md5: 360dbb413ee2c170a0a684a33c4fc6b8 + depends: + - __osx >=11.0 + - libcxx >=18 + constrains: + - libabseil-static =20250512.1=cxx17* + - abseil-cpp =20250512.1 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 1174081 + timestamp: 1750194620012 - conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.5-gpl_hc2c16d8_100.conda sha256: ee2cf1499a5a5fd5f03c6203597fe14bf28c6ca2a8fffb761e41f3cf371e768e md5: 5fdaa8b856683a5598459dead3976578 @@ -5129,6 +5864,14 @@ packages: purls: [] size: 112766 timestamp: 1702146165126 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda + sha256: 95cecb3902fbe0399c3a7e67a5bed1db813e5ab0e22f4023a5e0f722f2cc214f + md5: 36d33e440c31857372a72137f78bacf5 + license: BSD-2-Clause + license_family: BSD + purls: [] + size: 107458 + timestamp: 1702146414478 - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.3-hecca717_0.conda sha256: 1e1b08f6211629cbc2efe7a5bca5953f8f6b3cae0eeb04ca4dacee1bd4e2db2f md5: 8b09ae86839581147ef2e5c5e229d164 @@ -5630,6 +6373,22 @@ packages: purls: [] size: 666600 timestamp: 1756834976695 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.67.0-hc438710_0.conda + sha256: a07cb53b5ffa2d5a18afc6fd5a526a5a53dd9523fbc022148bd2f9395697c46d + md5: a4b4dd73c67df470d091312ab87bf6ae + depends: + - __osx >=11.0 + - c-ares >=1.34.5,<2.0a0 + - libcxx >=19 + - libev >=4.33,<4.34.0a0 + - libev >=4.33,<5.0a0 + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.2,<4.0a0 + license: MIT + license_family: MIT + purls: [] + size: 575454 + timestamp: 1756835746393 - conda: https://conda.anaconda.org/conda-forge/linux-64/libnvptxcompiler-dev-12.9.86-ha770c72_2.conda sha256: 1e7a7b34f8639a5feb75ba864127059e4d83edfe1a516547f0dbb9941e7b8f8b md5: 3fd926c321c6dbf386aa14bd8b125bfb @@ -5926,6 +6685,27 @@ packages: purls: [] size: 40311 timestamp: 1766271528534 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libuv-1.51.0-hb03c661_1.conda + sha256: c180f4124a889ac343fc59d15558e93667d894a966ec6fdb61da1604481be26b + md5: 0f03292cc56bf91a077a134ea8747118 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: MIT + license_family: MIT + purls: [] + size: 895108 + timestamp: 1753948278280 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libuv-1.51.0-h6caf38d_1.conda + sha256: 042c7488ad97a5629ec0a991a8b2a3345599401ecc75ad6a5af73b60e6db9689 + md5: c0d87c3c8e075daf1daf6c31b53e8083 + depends: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 421195 + timestamp: 1753948426421 - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda sha256: 3aed21ab28eddffdaf7f804f49be7a7d701e8f0e46c856d801270b470820a37b md5: aea31d2e5b1091feca96fcfe945c3cf9 @@ -6604,6 +7384,54 @@ packages: - pkg:pypi/networkx?source=compressed-mapping size: 1587439 timestamp: 1765215107045 +- conda: https://conda.anaconda.org/conda-forge/linux-64/nodejs-22.21.1-h273caaf_1.conda + sha256: cff5f9e02bdb2be15e25b3fd1ea0f5d933cb68ff2da5983ec6962cfe86f50b89 + md5: 2306549f0179b16be2e9e40e5396456e + depends: + - libstdcxx >=14 + - libgcc >=14 + - __glibc >=2.28,<3.0.a0 + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.4,<4.0a0 + - icu >=78.2,<79.0a0 + - libuv >=1.51.0,<2.0a0 + license: MIT + license_family: MIT + purls: [] + size: 24191530 + timestamp: 1769159735495 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/nodejs-25.2.1-h5230ea7_1.conda + sha256: acb4a33a096fa89d0ec0eea5d5f19988594d4e5c8d482ac60d2b0365d16dd984 + md5: 0b6dfe96bcfb469afe82885b3fecbd56 + depends: + - __osx >=11.0 + - libcxx >=19 + - libsqlite >=3.51.1,<4.0a0 + - libbrotlicommon >=1.2.0,<1.3.0a0 + - libbrotlienc >=1.2.0,<1.3.0a0 + - libbrotlidec >=1.2.0,<1.3.0a0 + - openssl >=3.5.4,<4.0a0 + - c-ares >=1.34.6,<2.0a0 + - icu >=75.1,<76.0a0 + - zstd >=1.5.7,<1.6.0a0 + - libabseil >=20250512.1,<20250513.0a0 + - libabseil * cxx17* + - libnghttp2 >=1.67.0,<2.0a0 + - libuv >=1.51.0,<2.0a0 + - libzlib >=1.3.1,<2.0a0 + license: MIT + license_family: MIT + purls: [] + size: 16202237 + timestamp: 1765482731453 +- conda: https://conda.anaconda.org/conda-forge/win-64/nodejs-25.2.1-he453025_2.conda + sha256: abe64c5dce6d7024919807f9d5ac72729862848238e6ad6bf9ed4e721c8cc232 + md5: b965c8d527c0a5b4781e39339abc808a + license: MIT + license_family: MIT + purls: [] + size: 30449041 + timestamp: 1769159661802 - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda sha256: 7b920e46b9f7a2d2aa6434222e5c8d739021dbc5cc75f32d124a8191d86f9056 md5: e7f89ea5f7ea9401642758ff50a2d9c1 @@ -8359,8 +9187,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev257+gf8dac75d8.d20260128 - sha256: 38a2501c7691f383a3800cca35841c15b739e5ab614472eb630093397311fd4b + version: 0.0.24.dev260+gb8a9fbcd1.d20260129 + sha256: 4d0959c4cd33463e60884329d70524903b887d9b0c17398a34170ede6517c273 requires_dist: - dags - frozendict diff --git a/pyproject.toml b/pyproject.toml index 4b9a27ed..db3b5bee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,90 +3,83 @@ # ====================================================================================== [project] -name = "skillmodels" authors = [ - { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, + { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, ] -maintainers = [ - { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, - { name = "Hans-Martin von Gaudecker", email = "hmgaudecker@uni-bonn.de" }, +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Topic :: Scientific/Engineering", +] +dependencies = [ + "dags", + "frozendict", + "jax>=0.8", + "numpy", + "pandas", ] description = "Estimators for skill formation models" -dynamic = ["version"] +dynamic = [ "version" ] keywords = [ - "Skill formation", - "Econometrics", - "Economics", - "Estimation", - "Statistics", + "Skill formation", + "Econometrics", + "Economics", + "Estimation", + "Statistics", ] -classifiers = [ - "Development Status :: 4 - Beta", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: MIT License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Operating System :: Unix", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Topic :: Scientific/Engineering", +license = { text = "MIT" } +maintainers = [ + { name = "Janoś Gabler", email = "janos.gabler@gmail.com" }, + { name = "Hans-Martin von Gaudecker", email = "hmgaudecker@uni-bonn.de" }, ] +name = "skillmodels" +readme = { file = "README.md", content-type = "text/markdown" } requires-python = ">=3.14,<3.15" -dependencies = [ - "dags", - "frozendict", - "jax>=0.8", - "numpy", - "pandas", -] - -[project.readme] -file = "README.md" -content-type = "text/markdown" - -[project.license] -text = "MIT" [project.urls] -Repository = "https://github.com/OpenSourceEconomics/skillmodels" Github = "https://github.com/OpenSourceEconomics/skillmodels" +Repository = "https://github.com/OpenSourceEconomics/skillmodels" Tracker = "https://github.com/OpenSourceEconomics/skillmodels/issues" - # ====================================================================================== # Build system configuration # ====================================================================================== [build-system] -requires = ["hatchling", "hatch-vcs"] build-backend = "hatchling.build" +requires = [ "hatchling", "hatch-vcs" ] [tool.hatch.build.hooks.vcs] version-file = "src/skillmodels/_version.py" [tool.hatch.build.targets.sdist] -exclude = ["tests"] +exclude = [ "tests" ] only-packages = true [tool.hatch.build.targets.wheel] -only-include = ["src"] -sources = ["src"] - -[tool.hatch.version] -source = "vcs" +only-include = [ "src" ] +sources = [ "src" ] [tool.hatch.metadata] allow-direct-references = true +[tool.hatch.version] +source = "vcs" # ====================================================================================== -# Pixi +# Pixi configuration # ====================================================================================== [tool.pixi.workspace] -channels = ["conda-forge"] -platforms = ["linux-64", "osx-arm64", "win-64"] +channels = [ "conda-forge" ] +platforms = [ "linux-64", "osx-arm64", "win-64" ] # Development Dependencies (conda) # -------------------------------------------------------------------------------------- @@ -107,22 +100,22 @@ python-kaleido = ">=1.0" scipy = "*" [tool.pixi.pypi-dependencies] -skillmodels = {path = ".", editable = true} -optimagic = { git = "https://github.com/optimagic-dev/optimagic.git", branch = "main"} +optimagic = { git = "https://github.com/optimagic-dev/optimagic.git", branch = "main" } pdbp = "*" +skillmodels = { path = ".", editable = true } # Features and Tasks # -------------------------------------------------------------------------------------- [tool.pixi.feature.cuda] -platforms = ["linux-64"] -system-requirements = {cuda = "12"} +platforms = [ "linux-64" ] +system-requirements = { cuda = "12" } [tool.pixi.feature.cuda.dependencies] cuda-nvcc = ">=12" [tool.pixi.feature.cuda.pypi-dependencies] -jax = {version = ">=0.7", extras = ["cuda12"]} +jax = { version = ">=0.7", extras = [ "cuda12" ] } [tool.pixi.feature.test.dependencies] pytest = "*" @@ -133,7 +126,6 @@ snakeviz = "*" [tool.pixi.feature.test.target.unix.dependencies] pytest-memray = "*" - [tool.pixi.feature.test.tasks] tests = "pytest tests" tests-with-cov = "pytest tests --cov-report=xml --cov=./" @@ -144,7 +136,7 @@ mem-on-clean-repo = "git status --porcelain && git diff-index --quiet HEAD -- && mem-cuda = "pytest -x -s --pdb --memray --fail-on-increase tests/test_likelihood_regression.py::test_likelihood_contributions_large_nobs" [tool.pixi.feature.ty.pypi-dependencies] -matplotlib = "*" # required because of pandas +matplotlib = "*" # required because of pandas pandas-stubs = "*" ty = "*" types-PyYAML = "*" @@ -153,86 +145,89 @@ types-pytz = "*" [tool.pixi.feature.ty.tasks] ty = "ty check src tests docs" +[tool.pixi.feature.docs.dependencies] +jupyter-book = ">=2.0" + +[tool.pixi.feature.docs.tasks] +docs = { cmd = "jupyter book build --html", cwd = "docs" } +view-docs = { cmd = "jupyter book start", cwd = "docs" } + # Environments # -------------------------------------------------------------------------------------- [tool.pixi.environments] -cuda = {features = ["cuda"], solve-group = "cuda"} -test-cpu = {features = ["test"], solve-group = "default"} -test-gpu = {features = ["test", "cuda"], solve-group = "cuda"} -ty = {features = ["test", "ty"], solve-group = "default"} +cuda = { features = [ "cuda" ], solve-group = "cuda" } +docs = { features = [ "docs" ], solve-group = "default" } +test-cpu = { features = [ "test" ], solve-group = "default" } +test-gpu = { features = [ "test", "cuda" ], solve-group = "cuda" } +ty = { features = [ "test", "ty" ], solve-group = "default" } # ====================================================================================== # Ruff configuration # ====================================================================================== [tool.ruff] -target-version = "py314" fix = true line-length = 88 +target-version = "py314" unsafe-fixes = false [tool.ruff.lint] -select = ["ALL"] extend-ignore = [ - "ANN401", # Dynamically typed expressions (typing.Any) are disallowed - too strict - "COM812", # Conflicts with ruff-format - "EM101", # Exception must not use a string literal - "EM102", # Exception must not use an f-string literal - "FBT002", # Boolean default positional argument in function definition - "FIX002", # Line contains TODO - "ISC001", # Conflicts with ruff-format - "PD015", # pd.merge is fine - "PERF401", # Many suggestions to use list comprehension are not helpful - "PLR0913", # Too many arguments to function call - "PLR2004", # Magic values are fine - "S301", # `pickle` module is unsafe - "TC001", # Move application import into a type-checking block - "TC002", # Move third-party import into a type-checking block - "TC003", # Move standard library import into a type-checking block - "TRY003", # Long messages outside exception class + "ANN401", # Dynamically typed expressions (typing.Any) are disallowed - too strict + "COM812", # Conflicts with ruff-format + "EM101", # Exception must not use a string literal + "EM102", # Exception must not use an f-string literal + "FBT002", # Boolean default positional argument in function definition + "FIX002", # Line contains TODO + "ISC001", # Conflicts with ruff-format + "PD015", # pd.merge is fine + "PERF401", # Many suggestions to use list comprehension are not helpful + "PLR0913", # Too many arguments to function call + "PLR2004", # Magic values are fine + "S301", # `pickle` module is unsafe + "TC001", # Move application import into a type-checking block + "TC002", # Move third-party import into a type-checking block + "TC003", # Move standard library import into a type-checking block + "TRY003", # Long messages outside exception class ] +select = [ "ALL" ] [tool.ruff.lint.per-file-ignores] -"src/skillmodels/types.py" = ["TC"] # Dataclasses need types at runtime -"src/skillmodels/visualize_*.py" = ["BLE001"] +"src/skillmodels/types.py" = [ "TC" ] # Dataclasses need types at runtime +"src/skillmodels/visualize_*.py" = [ "BLE001" ] "**/*.ipynb" = [ - "B018", # Seemingly useless expression for printing. - "T201", # Printing is fine here. - "INP001", # No need for a namespace. -] -"docs/source/conf.py" = [ - "ERA001", # Lots of erased code - "INP001", # No need for a namespace. + "B018", # Seemingly useless expression for printing. + "INP001", # No need for a namespace. + "T201", # Printing is fine here. ] "tests/*" = [ - "ANN", # No type annotations needed for tests - "ARG001", # Unused arguments are common in fixture-heavy tests - "D100", # No module docstrings needed for tests - "D103", # No function docstrings needed for tests - "E712", # Comparison to True/False using == might be necessary for arrays. - "FBT003", # Boolean positional values are common in test setup - "INP001", # No need for a namespace. - "PT011", # Broad pytest.raises() blocks are okay - "S101", # use of `assert` detected + "ANN", # No type annotations needed for tests + "ARG001", # Unused arguments are common in fixture-heavy tests + "D100", # No module docstrings needed for tests + "D103", # No function docstrings needed for tests + "E712", # Comparison to True/False using == might be necessary for arrays. + "FBT003", # Boolean positional values are common in test setup + "INP001", # No need for a namespace. + "PT011", # Broad pytest.raises() blocks are okay + "S101", # use of `assert` detected ] [tool.ruff.lint.pydocstyle] convention = "google" - # ====================================================================================== # ty configuration # ====================================================================================== [tool.ty.rules] -invalid-return-type = "error" ambiguous-protocol-member = "error" deprecated = "error" division-by-zero = "error" ignore-comment-unknown-rule = "error" invalid-argument-type = "error" invalid-ignore-comment = "error" +invalid-return-type = "error" possibly-missing-attribute = "error" possibly-missing-implicit-call = "error" possibly-missing-import = "error" @@ -244,22 +239,20 @@ unsupported-base = "error" unused-ignore-comment = "error" useless-overload-body = "error" - # ====================================================================================== # pytest configuration # ====================================================================================== [tool.pytest.ini_options] -addopts = ["--pdbcls=pdbp:Pdb"] -filterwarnings = [] +addopts = [ "--pdbcls=pdbp:Pdb" ] +filterwarnings = [ ] markers = [ - "end_to_end: Flag for tests that cover the whole program.", - "integration: Flag for integration tests which may comprise of multiple unit tests.", - "unit: Flag for unit tests which target mainly a single function.", - "wip: Tests that are work-in-progress.", + "end_to_end: Flag for tests that cover the whole program.", + "integration: Flag for integration tests which may comprise of multiple unit tests.", + "unit: Flag for unit tests which target mainly a single function.", + "wip: Tests that are work-in-progress.", ] -norecursedirs = ["docs", ".envs"] - +norecursedirs = [ "docs", ".envs" ] # ====================================================================================== # yamlfix configuration @@ -267,5 +260,5 @@ norecursedirs = ["docs", ".envs"] [tool.yamlfix] line_length = 88 -sequence_style = "block_style" none_representation = "null" +sequence_style = "block_style" From 7e7784e59686bc2c961a51648ab0bcbab29583ca Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Thu, 29 Jan 2026 08:54:46 +0100 Subject: [PATCH 25/27] Get rid of model_dict. --- docs/getting_started/tutorial.ipynb | 9 +- .../how_to_simulate_dataset.ipynb | 7 +- .../how_to_visualize_correlations.ipynb | 9 +- ...sualize_pairwise_factor_distribution.ipynb | 31 +- ...ow_to_visualize_transition_equations.ipynb | 9 +- docs/how_to_guides/model_specs.md | 98 ++---- pixi.lock | 24 +- pyproject.toml | 1 - src/skillmodels/check_model.py | 84 +++-- src/skillmodels/constraints.py | 4 +- src/skillmodels/correlation_heatmap.py | 52 ++- src/skillmodels/filtered_states.py | 12 +- src/skillmodels/maximization_inputs.py | 11 +- src/skillmodels/model_spec.py | 239 +++++-------- src/skillmodels/process_data.py | 2 +- src/skillmodels/process_model.py | 321 +++++++++--------- src/skillmodels/simulate_data.py | 28 +- src/skillmodels/types.py | 22 +- src/skillmodels/utilities.py | 316 +++++++++-------- .../visualize_factor_distributions.py | 110 +++--- .../visualize_transition_equations.py | 19 +- tests/conftest.py | 16 + tests/test_constraints.py | 22 +- tests/test_correlation_heatmap.py | 7 +- tests/test_filtered_states.py | 5 +- tests/test_likelihood_regression.py | 20 +- tests/test_params_index.py | 18 +- tests/test_parse_params.py | 8 +- tests/test_process_data.py | 13 +- tests/test_process_model.py | 67 ++-- tests/test_simulate_data.py | 15 +- tests/test_utilities.py | 118 +++---- tests/test_visualize_factor_distributions.py | 31 +- tests/test_visualize_transition_equations.py | 10 +- 34 files changed, 831 insertions(+), 927 deletions(-) create mode 100644 tests/conftest.py diff --git a/docs/getting_started/tutorial.ipynb b/docs/getting_started/tutorial.ipynb index dda7917b..622d1b6f 100644 --- a/docs/getting_started/tutorial.ipynb +++ b/docs/getting_started/tutorial.ipynb @@ -22,7 +22,7 @@ "import pandas as pd\n", "import yaml\n", "\n", - "from skillmodels import get_maximization_inputs\n", + "from skillmodels import ModelSpec, get_maximization_inputs\n", "from skillmodels.config import TEST_DATA_DIR" ] }, @@ -47,11 +47,12 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as f:\n", - " model = yaml.safe_load(f)\n", + " model_dict = yaml.safe_load(f)\n", + "\n", + "model = ModelSpec.from_dict(model_dict)\n", "\n", "# Show the structure\n", - "print(\"Model keys:\", list(model.keys()))\n", - "print(\"Factors:\", list(model[\"factors\"].keys()))" + "print(\"Factors:\", list(model.factors.keys()))" ] }, { diff --git a/docs/how_to_guides/how_to_simulate_dataset.ipynb b/docs/how_to_guides/how_to_simulate_dataset.ipynb index 16b6247e..cd0f7831 100644 --- a/docs/how_to_guides/how_to_simulate_dataset.ipynb +++ b/docs/how_to_guides/how_to_simulate_dataset.ipynb @@ -10,6 +10,7 @@ "import yaml\n", "\n", "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.model_spec import ModelSpec\n", "from skillmodels.simulate_data import simulate_dataset" ] }, @@ -35,7 +36,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = yaml.load(y, Loader=yaml.SafeLoader)\n", + " model = ModelSpec.from_dict(yaml.load(y, Loader=yaml.SafeLoader))\n", "\n", "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])\n", @@ -58,7 +59,7 @@ "outputs": [], "source": [ "initial_data = simulate_dataset(\n", - " model=model,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", ")\n", @@ -102,7 +103,7 @@ "outputs": [], "source": [ "data_after_policies = simulate_dataset(\n", - " model=model,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", ")\n", diff --git a/docs/how_to_guides/how_to_visualize_correlations.ipynb b/docs/how_to_guides/how_to_visualize_correlations.ipynb index 692b5ec8..e290bf21 100644 --- a/docs/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/how_to_guides/how_to_visualize_correlations.ipynb @@ -23,6 +23,7 @@ " get_scores_corr,\n", " plot_correlation_heatmap,\n", ")\n", + "from skillmodels.model_spec import ModelSpec\n", "\n", "%load_ext nb_black" ] @@ -41,7 +42,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = yaml.load(y, Loader=yaml.SafeLoader)" + " model = ModelSpec.from_dict(yaml.load(y, Loader=yaml.SafeLoader))" ] }, { @@ -71,7 +72,7 @@ "outputs": [], "source": [ "corr_meas = get_measurements_corr(\n", - " periods=0, data=data, model=model, factors=[\"fac1\", \"fac2\"]\n", + " periods=0, data=data, model_spec=model, factors=[\"fac1\", \"fac2\"]\n", ")" ] }, @@ -109,7 +110,7 @@ "outputs": [], "source": [ "corr_score = get_scores_corr(\n", - " periods=None, params=params, data=data, model=model, factors=\"fac1\"\n", + " periods=None, params=params, data=data, model_spec=model, factors=\"fac1\"\n", ")" ] }, @@ -140,7 +141,7 @@ "outputs": [], "source": [ "quasi_corr_score = get_quasi_scores_corr(\n", - " periods=None, data=data, model=model, factors=\"fac1\"\n", + " periods=None, data=data, model_spec=model, factors=\"fac1\"\n", ")" ] }, diff --git a/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index db7380fa..32e99566 100644 --- a/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -23,6 +23,7 @@ "\n", "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.maximization_inputs import get_maximization_inputs\n", + "from skillmodels.model_spec import ModelSpec\n", "from skillmodels.simulate_data import simulate_dataset\n", "from skillmodels.visualize_factor_distributions import (\n", " bivariate_density_contours,\n", @@ -58,7 +59,8 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = yaml.load(y, Loader=yaml.SafeLoader)\n", + " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", + " model = ModelSpec.from_dict(model_dict)\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", @@ -82,13 +84,13 @@ "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model=model,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model=model,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", @@ -103,7 +105,7 @@ "outputs": [], "source": [ "surface_plots = bivariate_density_surfaces(\n", - " model=model,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", @@ -167,9 +169,9 @@ "metadata": {}, "outputs": [], "source": [ - "sim_states = simulate_dataset(model=model, params=params, data=data)[\"anchored_states\"][\n", - " \"states\"\n", - "]" + "sim_states = simulate_dataset(model_spec=model, params=params, data=data)[\n", + " \"anchored_states\"\n", + "][\"states\"]" ] }, { @@ -193,7 +195,7 @@ "outputs": [], "source": [ "sim_states_policy = simulate_dataset(\n", - " model=model,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " policies=policies,\n", @@ -216,14 +218,14 @@ "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model=model,\n", + " model_spec=model,\n", " states={\"baseline\": sim_states, \"subsidy\": sim_states_policy},\n", " data=data,\n", " params=params,\n", " period=1,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model=model,\n", + " model_spec=model,\n", " states={\"baseline\": sim_states, \"subsidy\": sim_states_policy},\n", " data=data,\n", " params=params,\n", @@ -274,7 +276,8 @@ "metadata": {}, "outputs": [], "source": [ - "model[\"observed_factors\"] = [\"obs1\"]" + "model_dict[\"observed_factors\"] = [\"obs1\"]\n", + "model = ModelSpec.from_dict(model_dict)" ] }, { @@ -295,7 +298,7 @@ "metadata": {}, "outputs": [], "source": [ - "params = get_maximization_inputs(model=model, data=data)[\"params_template\"]\n", + "params = get_maximization_inputs(model_spec=model, data=data)[\"params_template\"]\n", "params[\"value\"] = 0.1" ] }, @@ -307,14 +310,14 @@ "outputs": [], "source": [ "kde_plots = univariate_densities(\n", - " model=model,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", " observed_factors=True,\n", ")\n", "contour_plots = bivariate_density_contours(\n", - " model=model,\n", + " model_spec=model,\n", " data=data,\n", " params=params,\n", " period=1,\n", diff --git a/docs/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb index 833b6585..efece1f8 100644 --- a/docs/how_to_guides/how_to_visualize_transition_equations.ipynb +++ b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb @@ -11,6 +11,7 @@ "import yaml\n", "\n", "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.model_spec import ModelSpec\n", "from skillmodels.visualize_transition_equations import (\n", " combine_transition_plots,\n", " get_transition_plots,\n", @@ -48,7 +49,7 @@ "outputs": [], "source": [ "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = yaml.load(y, Loader=yaml.SafeLoader)\n", + " model = ModelSpec.from_dict(yaml.load(y, Loader=yaml.SafeLoader))\n", "\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", @@ -73,7 +74,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model=model,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " period=0,\n", @@ -109,7 +110,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model=model,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " period=0,\n", @@ -145,7 +146,7 @@ "outputs": [], "source": [ "subplots = get_transition_plots(\n", - " model=model,\n", + " model_spec=model,\n", " params=params,\n", " data=data,\n", " period=1,\n", diff --git a/docs/how_to_guides/model_specs.md b/docs/how_to_guides/model_specs.md index 85dadc14..8ec71f95 100644 --- a/docs/how_to_guides/model_specs.md +++ b/docs/how_to_guides/model_specs.md @@ -1,11 +1,12 @@ # Model Specifications -Models can be specified using Python dataclasses or dictionaries. The dataclass approach -is recommended for type safety and IDE support. +Models are specified using Python dataclasses. -## Using Dataclasses (Recommended) +## Defining a Model ```python +from types import MappingProxyType + from skillmodels import ( AnchoringSpec, EstimationOptionsSpec, @@ -16,80 +17,55 @@ from skillmodels import ( # Define factors fac1 = FactorSpec( - measurements=[ - ["y1", "y2", "y3"], # period 0 - ["y1", "y2", "y3"], # period 1 + measurements=( + ("y1", "y2", "y3"), # period 0 + ("y1", "y2", "y3"), # period 1 # ... - ], + ), normalizations=Normalizations( - loadings=[{"y1": 1.0}, {}, {}], # fix loading of y1 to 1 in period 0 - intercepts=[{}, {}, {}], + loadings=( + MappingProxyType({"y1": 1.0}), # fix loading of y1 to 1 in period 0 + MappingProxyType({}), + ), + intercepts=(MappingProxyType({}), MappingProxyType({})), ), - transition_equation="log_ces", + transition_function="log_ces", ) # Create model model = ModelSpec( factors={"fac1": fac1, "fac2": fac2, "fac3": fac3}, anchoring=AnchoringSpec( - outcomes={"fac1": "Q1"}, + outcomes=MappingProxyType({"fac1": "Q1"}), free_loadings=True, ), - controls=["x1", "x2"], - stagemap=[0, 0, 1, 1, 2, 2, 3], + controls=("x1", "x2"), + stagemap=(0, 0, 1, 1, 2, 2, 3), estimation_options=EstimationOptionsSpec(), ) ``` -## Using Dictionaries - -For backwards compatibility and interoperability with YAML/JSON files, models can also -be specified as dictionaries: +For a more ergonomic approach, use `ModelSpec.from_dict()` which accepts plain Python +lists and dicts: ```python -import yaml - -with open("model.yaml") as f: - model = yaml.safe_load(f) -``` - -The dictionary structure mirrors the dataclass structure: - -```yaml -factors: - fac1: - measurements: - - [y1, y2, y3] - - [y1, y2, y3] - normalizations: - loadings: - - {y1: 1.0} - - {} - intercepts: - - {} - - {} - transition_equation: log_ces - fac2: - measurements: - - [y4, y5, y6] - - [y4, y5, y6] - transition_equation: linear - fac3: - measurements: - - [y7, y8, y9] - - [] - transition_equation: constant - -anchoring: - outcomes: - fac1: Q1 - free_loadings: true - -controls: - - x1 - - x2 - -stagemap: [0, 0, 1, 1, 2, 2, 3] +from skillmodels import ModelSpec + +model = ModelSpec.from_dict({ + "factors": { + "fac1": { + "measurements": [["y1", "y2", "y3"], ["y1", "y2", "y3"]], + "normalizations": { + "loadings": [{"y1": 1.0}, {}], + "intercepts": [{}, {}], + }, + "transition_function": "log_ces", + }, + }, + "anchoring": {"outcomes": {"fac1": "Q1"}, "free_loadings": True}, + "controls": ["x1", "x2"], + "stagemap": [0, 0, 1, 1, 2, 2, 3], +}) ``` ## Factor Specification @@ -98,7 +74,7 @@ Each factor requires: - **measurements**: A nested list with measurement variable names for each period. Empty lists indicate no measurements in that period. -- **transition_equation**: Name of a transition function (`linear`, `log_ces`, +- **transition_function**: Name of a transition function (`linear`, `log_ces`, `constant`, `translog`) or a custom function. - **normalizations** (optional): Fixed values for loadings and intercepts to identify the model. diff --git a/pixi.lock b/pixi.lock index 94bc45f4..b511af04 100644 --- a/pixi.lock +++ b/pixi.lock @@ -245,7 +245,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl @@ -486,7 +485,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl @@ -700,7 +698,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl @@ -918,7 +915,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl @@ -1148,7 +1144,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl @@ -1370,7 +1365,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl @@ -1590,7 +1584,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl @@ -1850,7 +1843,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl @@ -2077,7 +2069,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl @@ -2300,7 +2291,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl @@ -2592,7 +2582,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/53/f2/44ad0ce1d115f0f6be10f4af0ca05a18afb838b06e6ca6b01ba4b0137421/jax_cuda12_pjrt-0.8.2-py3-none-manylinux_2_27_x86_64.whl @@ -2866,7 +2855,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/5e/27/2e6032727e41ce74914277478021140947af59127d68aa9e6f3776b428fd/jaxlib-0.8.2-cp314-cp314-manylinux_2_27_x86_64.whl @@ -3097,7 +3085,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/d8/9d/dca93d916bf8664d7a2bb73ea3d219028dabbe382c31774348963287356a/jaxlib-0.8.2-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl @@ -3324,7 +3311,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/01/c1/e32fb13d9cb5afc5f25f0c84ca21d3ea2d5380f4c06417b814ecc9bf0f38/dags-0.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/f5/ee39c6e92acc742c052f137b47c210cd0a1b72dcd3f98495528bb4d27761/flatten_dict-0.4.2-py2.py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/a8/f7/ae4ecf183d9693cd5fcce7ee063c5e54f173b66dc80a8a79951861e1b557/jax-0.8.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b3/8c/af5a00b07a446414edf6b84a7397eab02cf01ba44b6ae1fce7798ce4c127/jaxlib-0.8.2-cp314-cp314-win_amd64.whl @@ -4516,11 +4502,6 @@ packages: purls: [] size: 184553 timestamp: 1757946164012 -- pypi: https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl - name: frozendict - version: 2.4.7 - sha256: 972af65924ea25cf5b4d9326d549e69a9a4918d8a76a9d3a7cd174d98b237550 - requires_python: '>=3.6' - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-he8b2097_16.conda sha256: 4acf50b7d5673250d585a256a40aabdd922e0947ca12cdbad0cef960ee1a9509 md5: d274bf1343507683e6eb2954d1871569 @@ -9187,11 +9168,10 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev260+gb8a9fbcd1.d20260129 - sha256: 4d0959c4cd33463e60884329d70524903b887d9b0c17398a34170ede6517c273 + version: 0.0.24.dev261+g5012a8bbc.d20260129 + sha256: 9293ca106ac1c10ef614af441b7e40cb7282ea24b26d4b5775398f556e5e2dc4 requires_dist: - dags - - frozendict - jax>=0.8 - numpy - pandas diff --git a/pyproject.toml b/pyproject.toml index db3b5bee..a2d5cbbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ classifiers = [ ] dependencies = [ "dags", - "frozendict", "jax>=0.8", "numpy", "pandas", diff --git a/src/skillmodels/check_model.py b/src/skillmodels/check_model.py index 2c4731e2..18f85af7 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/check_model.py @@ -1,12 +1,15 @@ """Functions to validate model specifications.""" +from collections.abc import Mapping + import numpy as np +from skillmodels.model_spec import ModelSpec from skillmodels.types import Anchoring, Dimensions, Labels def check_model( - model_dict: dict, + model_spec: ModelSpec, labels: Labels, dimensions: Dimensions, anchoring: Anchoring, @@ -19,11 +22,11 @@ def check_model( because processing them will not raise any errors except for easy to understand KeyErrors. - Other specifications are checked in the model dict before processing to make sure + Other specifications are checked in the model spec before processing to make sure that the assumptions we make during the processing are fulfilled. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` dimensions: Dimensional information. labels: Labels for model quantities. anchoring: Information about anchoring. @@ -41,17 +44,17 @@ def check_model( ) report += _check_anchoring(anchoring) invalid_measurements = _check_measurements( - model_dict=model_dict, factors=labels.latent_factors + model_spec=model_spec, factors=labels.latent_factors ) if invalid_measurements: report += invalid_measurements elif has_endogenous_factors: # Make this conditional because the check only works for valid meas. report += _check_no_overlap_in_measurements_of_states_and_inv( - model_dict=model_dict, labels=labels + model_spec=model_spec, labels=labels ) report += _check_normalizations( - model_dict=model_dict, factors=labels.latent_factors + model_spec=model_spec, factors=labels.latent_factors ) report = "\n".join(report) @@ -61,7 +64,7 @@ def check_model( def check_stagemap( stagemap: tuple[int, ...], - stages: tuple[int, ...] | list[int], + stages: tuple[int, ...], n_periods: int, *, is_augmented: bool, @@ -74,8 +77,7 @@ def check_stagemap( f"The stagemap needs to be of length n_periods - {step_size}. " f" n_periods is {n_periods}, the stagemap has length {len(stagemap)}.", ) - # Convert to list for comparison (stages may be a tuple from dataclass) - if list(stages) != list(range(len(stages))): + if stages != tuple(range(len(stages))): report.append("Stages need to be integers, start at zero and increase by 1.") # Hijacking the stagemap for endogenous factors leads to interleaved elements. @@ -92,8 +94,9 @@ def _check_anchoring(anchoring: Anchoring) -> list[str]: report = [] if not isinstance(anchoring.anchoring, bool): report.append("anchoring.anchoring must be a bool.") - if not isinstance(anchoring.outcomes, dict): - report.append("anchoring.outcomes must be a dict") + + if not isinstance(anchoring.outcomes, Mapping): + report.append("anchoring.outcomes must be a Mapping") else: variables = list(anchoring.outcomes.values()) for var in variables: @@ -110,15 +113,16 @@ def _check_anchoring(anchoring: Anchoring) -> list[str]: def _check_measurements( - model_dict: dict, + model_spec: ModelSpec, factors: tuple[str, ...], ) -> list[str]: report: list[str] = [] for factor in factors: - candidate = model_dict["factors"][factor]["measurements"] - if not _is_list_of(candidate=candidate, type_=list): + candidate = model_spec.factors[factor].measurements + if not _is_sequence_of(candidate=candidate, type_=tuple): report.append( - f"measurements must be lists of lists. Check measurements of {factor}.", + "measurements must be tuples of tuples. " + f"Check measurements of {factor}.", ) else: for period, meas_list in enumerate(candidate): @@ -132,17 +136,17 @@ def _check_measurements( def _check_no_overlap_in_measurements_of_states_and_inv( - model_dict: dict, labels: Labels + model_spec: ModelSpec, labels: Labels ) -> list[str]: report = [] for period in labels.periods: - meas = {} + meas: dict[str, set] = {} for factor in labels.latent_factors: - props = model_dict["factors"][factor] - if props.get("is_endogenous", False): - meas["endogenous_factors"] = set(props["measurements"][period]) + fspec = model_spec.factors[factor] + if fspec.is_endogenous: + meas["endogenous_factors"] = set(fspec.measurements[period]) else: - meas["states"] = set(props["measurements"][period]) + meas["states"] = set(fspec.measurements[period]) if overlap := meas["states"].intersection(meas["endogenous_factors"]): report.append( "Measurements for exogenous and endogenous latent factors must not " @@ -152,23 +156,26 @@ def _check_no_overlap_in_measurements_of_states_and_inv( def _check_normalizations( - model_dict: dict, + model_spec: ModelSpec, factors: tuple[str, ...], ) -> list[str]: report: list[str] = [] for factor in factors: - norminfo = model_dict["factors"][factor].get("normalizations", {}) + fspec = model_spec.factors[factor] + if fspec.normalizations is None: + continue for norm_type in ["loadings", "intercepts"]: - candidate = norminfo.get(norm_type, []) - if not _is_list_of(candidate=candidate, type_=dict): + norms = getattr(fspec.normalizations, norm_type) + candidate = [dict(m) for m in norms] + if not _is_sequence_of(candidate=candidate, type_=dict): report.append( - f"normalizations must be lists of dicts. Check {norm_type} " + f"normalizations must be sequences of dicts. Check {norm_type} " f"normalizations for {factor}.", ) else: report += _check_normalized_variables_are_present( list_of_normdicts=candidate, - model_dict=model_dict, + model_spec=model_spec, factor=factor, ) @@ -182,13 +189,13 @@ def _check_normalizations( def _check_normalized_variables_are_present( list_of_normdicts: list[dict], - model_dict: dict, + model_spec: ModelSpec, factor: str, ) -> list[str]: report: list[str] = [] for period, norm_dict in enumerate(list_of_normdicts): for var in norm_dict: - if var not in model_dict["factors"][factor]["measurements"][period]: + if var not in model_spec.factors[factor].measurements[period]: report.append( "You can only normalize variables that are specified as " f"measurements. Check {var} for {factor} in period " @@ -213,19 +220,22 @@ def _check_loadings_are_not_normalized_to_zero( return report -def _is_list_of(candidate: object, type_: type) -> bool: - """Check if candidate is a list that only contains elements of type. +def _is_sequence_of(candidate: object, type_: type) -> bool: + """Check if candidate is a sequence that only contains elements of type. - Note that this is always falls if candidate is not a list and always true if - it is an empty list. + Works with both lists and tuples. Examples: - >>> _is_list_of([["a"], ["b"]], list) + >>> _is_sequence_of([["a"], ["b"]], list) True - >>> _is_list_of([{}], list) + >>> _is_sequence_of((("a",), ("b",)), tuple) + True + >>> _is_sequence_of([{}], list) False - >>> _is_list_of([], dict) + >>> _is_sequence_of([], dict) True """ - return isinstance(candidate, list) and all(isinstance(i, type_) for i in candidate) + return isinstance(candidate, list | tuple) and all( + isinstance(i, type_) for i in candidate + ) diff --git a/src/skillmodels/constraints.py b/src/skillmodels/constraints.py index e7da1aa5..bbb60283 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/constraints.py @@ -389,7 +389,7 @@ def _get_anchoring_constraints( ind_tups = [] for period in periods: for factor in anchoring_info.factors: - outcome = anchoring_info.outcomes[factor] # ty: ignore[invalid-argument-type] + outcome = anchoring_info.outcomes[factor] meas = f"{outcome}_{factor}" ind_tups.append(("loadings", period, meas, factor)) @@ -431,7 +431,7 @@ def _get_constraints_for_augmented_periods( # look counterintuitive... aug_period_meas_type_to_constrain = ( MeasurementType.STATES - if endogenous_factors_info.factor_info[factor].is_state # ty: ignore[invalid-argument-type] + if endogenous_factors_info.factor_info[factor].is_state else MeasurementType.ENDOGENOUS_FACTORS ) aug_period_meas_types = ( diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/correlation_heatmap.py index 0b60018e..b1f98d89 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/correlation_heatmap.py @@ -134,7 +134,7 @@ def plot_correlation_heatmap( def get_measurements_corr( data: pd.DataFrame, - model: dict | ModelSpec, + model_spec: ModelSpec, factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: @@ -144,22 +144,21 @@ def get_measurements_corr( across period specific measurements. Args: - data: DataFrame with observed measurements. - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - factors: List of factors, to retrieve measurements for. + data: The observed measurements. + model_spec: The model specification. See: :ref:`model_specs` + factors: Factors to retrieve measurements for. If None, then calculate correlations of measurements of all factors. periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Note: Periods - refer to originl periods, not the augmented periods. + refer to original periods, not the augmented periods. Returns: - corr: DataFrame with measurement correlations. + corr: Measurement correlations. """ data = data.copy(deep=True) - processed_model = process_model(model) + processed_model = process_model(model_spec) periods = _process_periods(periods=periods, model=processed_model) processed_data = pre_process_data(df=data, periods=periods) latent_factors, observed_factors = _process_factors( @@ -178,7 +177,7 @@ def get_measurements_corr( def get_quasi_scores_corr( data: pd.DataFrame, - model: dict | ModelSpec, + model_spec: ModelSpec, factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: @@ -192,21 +191,20 @@ def get_quasi_scores_corr( The calculated scores coincide with factor scores for linear models. Args: - data: DataFrame with observed measurements. - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - factors: List of factors, to retrieve measurements for. + data: The observed measurements. + model_spec: The model specification. See: :ref:`model_specs` + factors: Factors to retrieve measurements for. If None, then calculate correlations of measurements of all factors. periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over periods. If None, calculate correlations across all periods. Returns: - corr: DataFrame with score correlations. + corr: Score correlations. """ data = data.copy(deep=True) - processed_model = process_model(model) + processed_model = process_model(model_spec) periods = _process_periods(periods=periods, model=processed_model) processed_data = pre_process_data(df=data, periods=periods) latent_factors, observed_factors = _process_factors( @@ -226,7 +224,7 @@ def get_quasi_scores_corr( def get_scores_corr( data: pd.DataFrame, params: pd.DataFrame, - model: dict | ModelSpec, + model_spec: ModelSpec, factors: list[str] | tuple[str, ...] | str | None, periods: float | list[int] | None, ) -> pd.DataFrame: @@ -238,11 +236,10 @@ def get_scores_corr( scores. Args: - data: DataFrame with observed measurements. - params: DataFrame with estimated model parameters - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - factors: List of factors, to retrieve measurements for. + data: The observed measurements. + params: Estimated model parameters. + model_spec: The model specification. See: :ref:`model_specs` + factors: Factors to retrieve measurements for. If None, then calculate correlations of measurements of all factors. periods: If int, the period within which to calculate measurement correlations. If a list, calculate correlations over @@ -253,7 +250,7 @@ def get_scores_corr( """ data = data.copy(deep=True) - processed_model = process_model(model) + processed_model = process_model(model_spec) periods = _process_periods(periods=periods, model=processed_model) processed_data = pre_process_data(df=data, periods=periods) latent_factors, observed_factors = _process_factors( @@ -601,7 +598,7 @@ def _get_factor_scores_data( data: Data with observable variables. params: Data frame with estimated measurement relevant model parameters. - model: Processed model dict. + model: The processed model. periods: The list of periods that correlations are calculated for. latent_factors: List of latent factors the scores of which @@ -652,9 +649,8 @@ def _get_factor_scores_data_for_single_period( Args: data: Data with observable variables. - params: Data frame with estimated measurement relevant - model parameters. - model: Processed model dict. + params: Estimated measurement-relevant model parameters. + model: The processed model. period: The period that correlations are calculated for. latent_factors: List of latent factors the scores of which correlations are calculated for. @@ -759,8 +755,8 @@ def _get_factor_scores_data_for_multiple_periods( Args: data: Data with observable variables. - params: Data frame with estimated model parameters. - model: Processed model dict. + params: Estimated model parameters. + model: The processed model. periods: The list of periods that correlations are calculated for. latent_factors: List of latent factors the scores of which diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/filtered_states.py index a443a82d..308707f4 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/filtered_states.py @@ -15,22 +15,22 @@ def get_filtered_states( - model: dict | ModelSpec, + model_spec: ModelSpec, data: pd.DataFrame, params: pd.DataFrame, ) -> dict[str, dict[str, Any]]: """Compute filtered latent states given data and estimated parameters.""" - max_inputs = get_maximization_inputs(model=model, data=data) + max_inputs = get_maximization_inputs(model_spec=model_spec, data=data) params = params.loc[max_inputs["params_template"].index] debug_loglike = max_inputs["debug_loglike"] debug_data = debug_loglike(params) unanchored_states_df = debug_data["filtered_states"] unanchored_ranges = debug_data["state_ranges"] - processed_model = process_model(model) + processed_model = process_model(model_spec) anchored_states_df = anchor_states_df( states_df=unanchored_states_df, - model=model, + model_spec=model_spec, params=params, use_aug_period=True, ) @@ -54,7 +54,7 @@ def get_filtered_states( def anchor_states_df( states_df: pd.DataFrame, - model: dict | ModelSpec, + model_spec: ModelSpec, params: pd.DataFrame, *, use_aug_period: bool, @@ -70,7 +70,7 @@ def anchor_states_df( as an internal function that only works with jax objects). """ - processed_model = process_model(model) + processed_model = process_model(model_spec) p_index = get_params_index( update_info=processed_model.update_info, diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/maximization_inputs.py index 6e2dd2ed..3d376cb2 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/maximization_inputs.py @@ -32,17 +32,16 @@ def get_maximization_inputs( - model: dict | ModelSpec, + model_spec: ModelSpec, data: pd.DataFrame, split_dataset: int = 1, ) -> dict[str, Any]: """Create inputs for optimagic's maximize function. Args: - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - data: dataset in long format. - split_dataset(Int): Controls into how many sclices to split the dataset + model_spec: The model specification. See: :ref:`model_specs` + data: Dataset in long format. + split_dataset: Controls into how many slices to split the dataset during the gradient computation. Returns a dictionary with keys: @@ -67,7 +66,7 @@ def get_maximization_inputs( endogenous factors, we double up the number of periods in order to add """ - processed_model = process_model(model) + processed_model = process_model(model_spec) p_index = get_params_index( update_info=processed_model.update_info, labels=processed_model.labels, diff --git a/src/skillmodels/model_spec.py b/src/skillmodels/model_spec.py index c98d321c..235310c0 100644 --- a/src/skillmodels/model_spec.py +++ b/src/skillmodels/model_spec.py @@ -2,15 +2,14 @@ This module provides frozen dataclasses for defining model specifications in a type-safe, immutable manner. All collections use immutable types -(tuples, frozendict) to ensure the specification cannot be accidentally modified. +(tuples, MappingProxyType) to ensure the specification cannot be accidentally +modified. """ from collections.abc import Callable -from dataclasses import dataclass, field +from dataclasses import dataclass, field, replace from types import MappingProxyType -from typing import Self - -from frozendict import frozendict +from typing import Any, Self @dataclass(frozen=True) @@ -25,16 +24,8 @@ class Normalizations: """ - loadings: tuple[frozendict[str, float], ...] - intercepts: tuple[frozendict[str, float], ...] - - @classmethod - def from_dict(cls, d: dict) -> Self: - """Create Normalizations from a dictionary specification.""" - return cls( - loadings=tuple(frozendict(x) for x in d["loadings"]), - intercepts=tuple(frozendict(x) for x in d["intercepts"]), - ) + loadings: tuple[MappingProxyType[str, float], ...] + intercepts: tuple[MappingProxyType[str, float], ...] def to_dict(self) -> dict: """Convert to dictionary for backwards compatibility.""" @@ -65,21 +56,6 @@ class FactorSpec: is_correction: bool = False transition_function: str | Callable | None = None - @classmethod - def from_dict(cls, d: dict) -> Self: - """Create FactorSpec from a dictionary specification.""" - normalizations = None - if "normalizations" in d: - normalizations = Normalizations.from_dict(d["normalizations"]) - - return cls( - measurements=tuple(tuple(m) for m in d["measurements"]), - normalizations=normalizations, - is_endogenous=d.get("is_endogenous", False), - is_correction=d.get("is_correction", False), - transition_function=d.get("transition_function"), - ) - def to_dict(self) -> dict: """Convert to dictionary for backwards compatibility.""" result: dict = { @@ -95,23 +71,11 @@ def to_dict(self) -> dict: def with_transition_function(self, func: str | Callable) -> Self: """Return a new FactorSpec with the given transition function.""" - return type(self)( - measurements=self.measurements, - normalizations=self.normalizations, - is_endogenous=self.is_endogenous, - is_correction=self.is_correction, - transition_function=func, - ) + return replace(self, transition_function=func) def with_normalizations(self, normalizations: Normalizations) -> Self: """Return a new FactorSpec with the given normalizations.""" - return type(self)( - measurements=self.measurements, - normalizations=normalizations, - is_endogenous=self.is_endogenous, - is_correction=self.is_correction, - transition_function=self.transition_function, - ) + return replace(self, normalizations=normalizations) @dataclass(frozen=True) @@ -139,20 +103,6 @@ class EstimationOptionsSpec: clipping_lower_hardness: float = 1 clipping_upper_hardness: float = 1 - @classmethod - def from_dict(cls, d: dict) -> Self: - """Create EstimationOptionsSpec from a dictionary specification.""" - return cls( - robust_bounds=d.get("robust_bounds", True), - bounds_distance=d.get("bounds_distance", 1e-3), - n_mixtures=d.get("n_mixtures", 1), - sigma_points_scale=d.get("sigma_points_scale", 2), - clipping_lower_bound=d.get("clipping_lower_bound", -1e30), - clipping_upper_bound=d.get("clipping_upper_bound"), - clipping_lower_hardness=d.get("clipping_lower_hardness", 1), - clipping_upper_hardness=d.get("clipping_upper_hardness", 1), - ) - def to_dict(self) -> dict: """Convert to dictionary for backwards compatibility.""" result = { @@ -169,8 +119,8 @@ def to_dict(self) -> dict: return result -def _default_empty_frozendict() -> frozendict[str, str]: - return frozendict({}) +def _default_empty_mapping_proxy() -> MappingProxyType[str, str]: + return MappingProxyType({}) @dataclass(frozen=True) @@ -186,25 +136,14 @@ class AnchoringSpec: """ - outcomes: frozendict[str, str] = field(default_factory=_default_empty_frozendict) + outcomes: MappingProxyType[str, str] = field( + default_factory=_default_empty_mapping_proxy, + ) free_controls: bool = False free_constant: bool = False free_loadings: bool = False ignore_constant_when_anchoring: bool = False - @classmethod - def from_dict(cls, d: dict) -> Self: - """Create AnchoringSpec from a dictionary specification.""" - outcomes = d.get("outcomes", {}) - ignore_constant = d.get("ignore_constant_when_anchoring", False) - return cls( - outcomes=frozendict(outcomes), - free_controls=d.get("free_controls", False), - free_constant=d.get("free_constant", False), - free_loadings=d.get("free_loadings", False), - ignore_constant_when_anchoring=ignore_constant, - ) - def to_dict(self) -> dict: """Convert to dictionary for backwards compatibility.""" return { @@ -260,45 +199,94 @@ def __init__( object.__setattr__(self, "anchoring", anchoring) object.__setattr__(self, "estimation_options", estimation_options) - @property - def factors(self) -> MappingProxyType[str, FactorSpec]: - """Immutable mapping of factor names to specifications.""" - return self._factors - @classmethod - def from_dict(cls, d: dict) -> Self: - """Create ModelSpec from a dictionary specification. + def from_dict(cls, d: dict[str, Any]) -> Self: + """Create a ModelSpec from a dictionary (e.g. loaded from YAML). Args: - d: Dictionary with keys 'factors', and optionally 'observed_factors', - 'controls', 'stagemap', 'anchoring', and 'estimation_options'. + d: A dictionary with keys like "factors", "observed_factors", + "controls", "stagemap", "anchoring", "estimation_options". Returns: - Immutable ModelSpec instance. + A ModelSpec instance. """ - factors = { - name: FactorSpec.from_dict(spec) for name, spec in d["factors"].items() - } - observed = d.get("observed_factors", []) - controls = d.get("controls", []) - stagemap = d.get("stagemap") + factors = {} + for name, spec in d["factors"].items(): + normalizations = None + if "normalizations" in spec: + nd = spec["normalizations"] + if "intercepts" not in nd: + n_periods = len(nd.get("loadings", [])) + nd["intercepts"] = [{} for _ in range(n_periods)] + normalizations = Normalizations( + loadings=tuple(MappingProxyType(x) for x in nd["loadings"]), + intercepts=tuple(MappingProxyType(x) for x in nd["intercepts"]), + ) + factors[name] = FactorSpec( + measurements=tuple(tuple(m) for m in spec["measurements"]), + normalizations=normalizations, + is_endogenous=spec.get("is_endogenous", False), + is_correction=spec.get("is_correction", False), + transition_function=spec.get("transition_function"), + ) + anchoring = None if "anchoring" in d: - anchoring = AnchoringSpec.from_dict(d["anchoring"]) + ad = d["anchoring"] + anchoring = AnchoringSpec( + outcomes=MappingProxyType(ad.get("outcomes", {})), + free_controls=ad.get("free_controls", False), + free_constant=ad.get("free_constant", False), + free_loadings=ad.get("free_loadings", False), + ignore_constant_when_anchoring=ad.get( + "ignore_constant_when_anchoring", False + ), + ) + estimation = None if "estimation_options" in d: - estimation = EstimationOptionsSpec.from_dict(d["estimation_options"]) + ed = d["estimation_options"] + estimation = EstimationOptionsSpec( + robust_bounds=ed.get("robust_bounds", True), + bounds_distance=ed.get("bounds_distance", 1e-3), + n_mixtures=ed.get("n_mixtures", 1), + sigma_points_scale=ed.get("sigma_points_scale", 2), + clipping_lower_bound=ed.get("clipping_lower_bound", -1e30), + clipping_upper_bound=ed.get("clipping_upper_bound"), + clipping_lower_hardness=ed.get("clipping_lower_hardness", 1), + clipping_upper_hardness=ed.get("clipping_upper_hardness", 1), + ) + + stagemap = d.get("stagemap") return cls( - factors=MappingProxyType(factors), - observed_factors=tuple(observed), - controls=tuple(controls), + factors=factors, + observed_factors=tuple(d.get("observed_factors", [])), + controls=tuple(d.get("controls", [])), stagemap=tuple(stagemap) if stagemap is not None else None, anchoring=anchoring, estimation_options=estimation, ) + @property + def factors(self) -> MappingProxyType[str, FactorSpec]: + """Immutable mapping of factor names to specifications.""" + return self._factors + + def _replace(self, **changes: Any) -> Self: + """Return a new ModelSpec with the specified fields replaced.""" + return type(self)( + factors=changes.get("factors", self.factors), + observed_factors=changes.get("observed_factors", self.observed_factors), + controls=changes.get("controls", self.controls), + stagemap=changes.get("stagemap", self.stagemap), + anchoring=changes.get("anchoring", self.anchoring), + estimation_options=changes.get( + "estimation_options", self.estimation_options + ), + ) + def to_dict(self) -> dict: """Convert to dictionary for backwards compatibility with skillmodels. @@ -348,14 +336,7 @@ def with_transition_functions( name: spec.with_transition_function(transition_functions[name]) for name, spec in self.factors.items() } - return type(self)( - factors=MappingProxyType(new_factors), - observed_factors=self.observed_factors, - controls=self.controls, - stagemap=self.stagemap, - anchoring=self.anchoring, - estimation_options=self.estimation_options, - ) + return self._replace(factors=MappingProxyType(new_factors)) def with_added_factor( self, @@ -374,14 +355,7 @@ def with_added_factor( """ new_factors = dict(self.factors) new_factors[name] = spec - return type(self)( - factors=MappingProxyType(new_factors), - observed_factors=self.observed_factors, - controls=self.controls, - stagemap=self.stagemap, - anchoring=self.anchoring, - estimation_options=self.estimation_options, - ) + return self._replace(factors=MappingProxyType(new_factors)) def with_added_observed_factors( self, @@ -396,13 +370,8 @@ def with_added_observed_factors( New ModelSpec with the additional observed factors. """ - return type(self)( - factors=self.factors, + return self._replace( observed_factors=self.observed_factors + names, - controls=self.controls, - stagemap=self.stagemap, - anchoring=self.anchoring, - estimation_options=self.estimation_options, ) def with_estimation_options( @@ -418,14 +387,7 @@ def with_estimation_options( New ModelSpec with the updated estimation options. """ - return type(self)( - factors=self.factors, - observed_factors=self.observed_factors, - controls=self.controls, - stagemap=self.stagemap, - anchoring=self.anchoring, - estimation_options=estimation_options, - ) + return self._replace(estimation_options=estimation_options) def with_anchoring( self, @@ -440,14 +402,7 @@ def with_anchoring( New ModelSpec with the updated anchoring. """ - return type(self)( - factors=self.factors, - observed_factors=self.observed_factors, - controls=self.controls, - stagemap=self.stagemap, - anchoring=anchoring, - estimation_options=self.estimation_options, - ) + return self._replace(anchoring=anchoring) def with_controls( self, @@ -462,14 +417,7 @@ def with_controls( New ModelSpec with the updated controls. """ - return type(self)( - factors=self.factors, - observed_factors=self.observed_factors, - controls=controls, - stagemap=self.stagemap, - anchoring=self.anchoring, - estimation_options=self.estimation_options, - ) + return self._replace(controls=controls) def with_stagemap( self, @@ -484,11 +432,4 @@ def with_stagemap( New ModelSpec with the updated stagemap. """ - return type(self)( - factors=self.factors, - observed_factors=self.observed_factors, - controls=self.controls, - stagemap=stagemap, - anchoring=self.anchoring, - estimation_options=self.estimation_options, - ) + return self._replace(stagemap=stagemap) diff --git a/src/skillmodels/process_data.py b/src/skillmodels/process_data.py index f48257a2..72f731fe 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/process_data.py @@ -177,7 +177,7 @@ def _add_copies_of_anchoring_outcome( ) -> pd.DataFrame: df = df.copy() for factor in anchoring_info.factors: - outcome = anchoring_info.outcomes[factor] # ty: ignore[invalid-argument-type] + outcome = anchoring_info.outcomes[factor] df[f"{outcome}_{factor}"] = df[outcome] return df diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index f58889e2..8b52d7eb 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -1,30 +1,27 @@ """Functions to process model specifications from user-friendly to internal form.""" from collections.abc import KeysView, Mapping -from copy import deepcopy from dataclasses import replace from functools import partial -from typing import Any +from types import MappingProxyType import numpy as np import pandas as pd from dags import concatenate_functions from dags.signature import rename_arguments -from frozendict import frozendict from jax import Array, vmap from pandas import DataFrame import skillmodels.transition_functions as t_f_module from skillmodels.check_model import check_model, check_stagemap from skillmodels.decorators import extract_params, jax_array_output -from skillmodels.model_spec import ModelSpec +from skillmodels.model_spec import FactorSpec, ModelSpec, Normalizations from skillmodels.types import ( Anchoring, Dimensions, EndogenousFactorsInfo, EstimationOptions, FactorInfo, - FactorType, Labels, MeasurementType, ProcessedModel, @@ -34,7 +31,7 @@ pd.set_option("future.no_silent_downcasting", True) # noqa: FBT003 -def process_model(model: dict | ModelSpec) -> ProcessedModel: +def process_model(model_spec: ModelSpec) -> ProcessedModel: """Check, clean, extend and transform the model specs. Check the completeness, consistency and validity of the model specifications. @@ -42,11 +39,10 @@ def process_model(model: dict | ModelSpec) -> ProcessedModel: Set default values and extend the model specification where necessary. Args: - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` Returns: - dict: nested dictionary of model specs. It has the following entries: + ProcessedModel with the following entries: - dimensions: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. - labels: Dict of lists with labels for the model quantities like @@ -59,61 +55,40 @@ def process_model(model: dict | ModelSpec) -> ProcessedModel: loadings and intercepts for each factor. See :ref:`normalizations`. """ - model_dict = model.to_dict() if isinstance(model, ModelSpec) else model - has_endogenous_factors = get_has_endogenous_factors(model_dict["factors"]) + has_endogenous_factors = get_has_endogenous_factors(model_spec.factors) + est_opts = model_spec.estimation_options dims = get_dimensions( - model_dict=model_dict, has_endogenous_factors=has_endogenous_factors + model_spec=model_spec, has_endogenous_factors=has_endogenous_factors ) labels = _get_labels( - model_dict=model_dict, + model_spec=model_spec, has_endogenous_factors=has_endogenous_factors, dimensions=dims, ) - anchoring = _process_anchoring(model_dict) + anchoring = _process_anchoring(model_spec) if has_endogenous_factors: - _model_dict_aug = _augment_periods_for_endogenous_factors( - model_dict=model_dict, + _model_spec_aug = _augment_periods_for_endogenous_factors( + model_spec=model_spec, dimensions=dims, labels=labels, ) - endogenous_factors_info = _get_endogenous_factors_info( - has_endogenous_factors=has_endogenous_factors, - model_dict=_model_dict_aug, - labels=labels, - bounds_distance=model_dict["estimation_options"]["bounds_distance"], - ) else: - _model_dict_aug = model_dict - endogenous_factors_info = EndogenousFactorsInfo( - has_endogenous_factors=has_endogenous_factors, - aug_periods_to_aug_period_meas_types=frozendict( - _get_aug_periods_to_aug_period_meas_types( - aug_periods=labels.aug_periods_to_periods.keys(), - has_endogenous_factors=has_endogenous_factors, - ) - ), - bounds_distance=model_dict["estimation_options"].get( - "bounds_distance", 1e-3 - ), - aug_periods_from_period=partial( - _aug_periods_from_period, - aug_periods_to_periods=labels.aug_periods_to_periods, - ), - factor_info=frozendict( - { - fac: FactorInfo(factor_type=FactorType.STATE) - for fac in labels.latent_factors - } - ), - ) + _model_spec_aug = model_spec + bounds_distance = est_opts.bounds_distance if est_opts else 1e-3 + endogenous_factors_info = _get_endogenous_factors_info( + has_endogenous_factors=has_endogenous_factors, + model_spec=_model_spec_aug, + labels=labels, + bounds_distance=bounds_distance, + ) check_model( - model_dict=_model_dict_aug, + model_spec=_model_spec_aug, labels=labels, dimensions=dims, anchoring=anchoring, has_endogenous_factors=has_endogenous_factors, ) - transition_info = _get_transition_info(model_dict=_model_dict_aug, labels=labels) + transition_info = _get_transition_info(model_spec=_model_spec_aug, labels=labels) labels = replace( labels, transition_names=tuple(transition_info.function_names.values()) ) @@ -122,29 +97,31 @@ def process_model(model: dict | ModelSpec) -> ProcessedModel: dimensions=dims, labels=labels, anchoring=anchoring, - estimation_options=_process_estimation_options(_model_dict_aug), + estimation_options=_process_estimation_options(_model_spec_aug), transition_info=transition_info, update_info=_get_update_info( - model_dict=_model_dict_aug, + model_spec=_model_spec_aug, dimensions=dims, labels=labels, anchoring_info=anchoring, ), normalizations=_process_normalizations( - model_dict=_model_dict_aug, dimensions=dims, labels=labels + model_spec=_model_spec_aug, dimensions=dims, labels=labels ), endogenous_factors_info=endogenous_factors_info, ) -def get_has_endogenous_factors(factors: dict[str, Any]) -> bool: +def get_has_endogenous_factors( + factors: Mapping[str, FactorSpec], +) -> bool: """Return True if any endogenous factors are present.""" endogenous_factors = pd.DataFrame( [ { "factor": f, - "is_endogenous": v.get("is_endogenous", False), - "is_correction": v.get("is_correction", False), + "is_endogenous": v.is_endogenous, + "is_correction": v.is_correction, } for f, v in factors.items() ] @@ -164,26 +141,29 @@ def get_has_endogenous_factors(factors: dict[str, Any]) -> bool: return endogenous_factors["is_endogenous"].any() # ty: ignore[invalid-return-type] -def get_dimensions(model_dict: dict, *, has_endogenous_factors: bool) -> Dimensions: +def get_dimensions( + model_spec: ModelSpec, *, has_endogenous_factors: bool +) -> Dimensions: """Extract the dimensions of the model. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. has_endogenous_factors: Whether endogenous factors are present. Returns: Dimensions dataclass with all dimensional information. """ - all_n_periods = [len(d["measurements"]) for d in model_dict["factors"].values()] + all_n_periods = [len(fspec.measurements) for fspec in model_spec.factors.values()] n_periods = max(all_n_periods) n_aug_periods = 2 * n_periods if has_endogenous_factors else n_periods + est_opts = model_spec.estimation_options return Dimensions( - n_latent_factors=len(model_dict["factors"]), - n_observed_factors=len(model_dict.get("observed_factors", [])), - n_controls=len(model_dict.get("controls", [])) + 1, # plus 1: constant - n_mixtures=model_dict["estimation_options"].get("n_mixtures", 1), + n_latent_factors=len(model_spec.factors), + n_observed_factors=len(model_spec.observed_factors), + n_controls=len(model_spec.controls) + 1, # plus 1: constant + n_mixtures=est_opts.n_mixtures if est_opts else 1, n_aug_periods=n_aug_periods, n_periods=n_periods, ) @@ -209,12 +189,12 @@ def _aug_periods_from_period( def _get_labels( - model_dict: dict, *, has_endogenous_factors: bool, dimensions: Dimensions + model_spec: ModelSpec, *, has_endogenous_factors: bool, dimensions: Dimensions ) -> Labels: """Extract labels of the model quantities. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` has_endogenous_factors: Whether endogenous factors are present. dimensions: Dimensional information. @@ -227,12 +207,16 @@ def _get_labels( has_endogenous_factors=has_endogenous_factors, ) - stagemap = model_dict.get("stagemap", list(range(dimensions.n_periods - 1))) + stagemap: list[int] = ( + list(model_spec.stagemap) + if model_spec.stagemap is not None + else list(range(dimensions.n_periods - 1)) + ) stages = sorted(int(v) for v in np.unique(stagemap)) report = check_stagemap( - stagemap=stagemap, - stages=stages, + stagemap=tuple(stagemap), + stages=tuple(stages), n_periods=dimensions.n_periods, is_augmented=False, ) @@ -253,148 +237,144 @@ def _get_labels( aug_stages_to_stages = {s: s for s in stages} return Labels( - latent_factors=tuple(model_dict["factors"]), - observed_factors=tuple(model_dict.get("observed_factors", [])), - controls=("constant", *model_dict.get("controls", [])), + latent_factors=tuple(model_spec.factors), + observed_factors=tuple(model_spec.observed_factors), + controls=("constant", *model_spec.controls), periods=tuple(sorted(set(aug_periods_to_periods.values()))), stagemap=tuple(stagemap), stages=tuple(stages), aug_periods=tuple(aug_periods_to_periods.keys()), - aug_periods_to_periods=frozendict(aug_periods_to_periods), + aug_periods_to_periods=MappingProxyType(aug_periods_to_periods), aug_stagemap=tuple(aug_stagemap), aug_stages=tuple(sorted(int(v) for v in np.unique(aug_stagemap))), - aug_stages_to_stages=frozendict(aug_stages_to_stages), + aug_stages_to_stages=MappingProxyType(aug_stages_to_stages), ) -def _process_estimation_options(model_dict: dict) -> EstimationOptions: +def _process_estimation_options(model_spec: ModelSpec) -> EstimationOptions: """Process options. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` Returns: EstimationOptions dataclass with tuning parameters for the estimation. """ - user_opts = model_dict.get("estimation_options", {}) - - sigma_points_scale = user_opts.get("sigma_points_scale", 2) - robust_bounds = user_opts.get("robust_bounds", True) - bounds_distance = user_opts.get("bounds_distance", 1e-3) - clipping_lower_bound = user_opts.get("clipping_lower_bound", -1e30) - clipping_upper_bound = user_opts.get("clipping_upper_bound", None) - clipping_lower_hardness = user_opts.get("clipping_lower_hardness", 1) - clipping_upper_hardness = user_opts.get("clipping_upper_hardness", 1) - - if not robust_bounds: - bounds_distance = 0 + opts = model_spec.estimation_options + if opts is None: + return EstimationOptions( + sigma_points_scale=2, + robust_bounds=True, + bounds_distance=1e-3, + clipping_lower_bound=-1e30, + clipping_upper_bound=None, + clipping_lower_hardness=1, + clipping_upper_hardness=1, + ) return EstimationOptions( - sigma_points_scale=sigma_points_scale, - robust_bounds=robust_bounds, - bounds_distance=bounds_distance, - clipping_lower_bound=clipping_lower_bound, - clipping_upper_bound=clipping_upper_bound, - clipping_lower_hardness=clipping_lower_hardness, - clipping_upper_hardness=clipping_upper_hardness, + sigma_points_scale=opts.sigma_points_scale, + robust_bounds=opts.robust_bounds, + bounds_distance=opts.bounds_distance if opts.robust_bounds else 0, + clipping_lower_bound=opts.clipping_lower_bound, + clipping_upper_bound=opts.clipping_upper_bound, + clipping_lower_hardness=opts.clipping_lower_hardness, + clipping_upper_hardness=opts.clipping_upper_hardness, ) -def _process_anchoring(model_dict: dict) -> Anchoring: +def _process_anchoring(model_spec: ModelSpec) -> Anchoring: """Process the specification that governs how latent factors are anchored. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` Returns: Anchoring dataclass with information about anchoring. """ - if "anchoring" in model_dict: - anch = model_dict["anchoring"] + anch = model_spec.anchoring + if anch is not None: return Anchoring.from_config( - outcomes=anch.get("outcomes", {}), - free_controls=anch.get("free_controls", False), - free_constant=anch.get("free_constant", False), - free_loadings=anch.get("free_loadings", False), - ignore_constant_when_anchoring=anch.get( - "ignore_constant_when_anchoring", False - ), + outcomes=dict(anch.outcomes), + free_controls=anch.free_controls, + free_constant=anch.free_constant, + free_loadings=anch.free_loadings, + ignore_constant_when_anchoring=anch.ignore_constant_when_anchoring, ) return Anchoring.disabled() -def _insert_empty_elements_into_list( - old: list, - insert_at_modulo: int, - to_insert: Any, - aug_p_to_p: Mapping[int, int], -) -> list: - return [ - to_insert if aug_p % 2 == insert_at_modulo else old[p] - for aug_p, p in aug_p_to_p.items() - ] - - def _augment_periods_for_endogenous_factors( - model_dict: dict[str, Any], dimensions: Dimensions, labels: Labels -) -> dict[str, Any]: + model_spec: ModelSpec, dimensions: Dimensions, labels: Labels +) -> ModelSpec: """Augment periods if endogenous factors are present. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` dimensions: Dimensional information. labels: Labels for model quantities. Returns: - Model dictionary with twice the amount of periods + ModelSpec with twice the amount of periods. """ - aug = deepcopy(model_dict) - for fac, v in model_dict["factors"].items(): - insert_at_modulo = 0 if v.get("is_endogenous", False) else 1 + new_factors: dict[str, FactorSpec] = {} + for fac, fspec in model_spec.factors.items(): + insert_at_modulo = 0 if fspec.is_endogenous else 1 # Insert empty elements into measurements when we do not have those. - if len(v["measurements"]) != dimensions.n_periods: + if len(fspec.measurements) != dimensions.n_periods: raise ValueError( "Measurements must be of length `n_periods`, " - f"got {v['measurements']} for {fac}" + f"got {fspec.measurements} for {fac}" ) - aug["factors"][fac]["measurements"] = _insert_empty_elements_into_list( - old=v["measurements"], - insert_at_modulo=insert_at_modulo, - to_insert=[], - aug_p_to_p=labels.aug_periods_to_periods, + aug_measurements = tuple( + () if aug_p % 2 == insert_at_modulo else fspec.measurements[p] + for aug_p, p in labels.aug_periods_to_periods.items() ) # Insert empty elements into normalizations when we do not have those. - for norm_type, normalizations in v.get("normalizations", {}).items(): - if not len(normalizations) == dimensions.n_periods: - raise ValueError( - "Normalizations must be lists of length `n_periods`, " - f"got {normalizations} for {fac}['normalizations']['{norm_type}']" - ) - aug["factors"][fac]["normalizations"][norm_type] = ( - _insert_empty_elements_into_list( - old=normalizations, - insert_at_modulo=insert_at_modulo, - to_insert={}, - aug_p_to_p=labels.aug_periods_to_periods, + aug_normalizations = None + if fspec.normalizations is not None: + aug_norm_parts: dict[str, tuple[MappingProxyType[str, float], ...]] = {} + for norm_type in ("loadings", "intercepts"): + norms = getattr(fspec.normalizations, norm_type) + if len(norms) != dimensions.n_periods: + raise ValueError( + "Normalizations must be lists of length `n_periods`, " + f"got {norms} for {fac}['normalizations']['{norm_type}']" + ) + aug_norm_parts[norm_type] = tuple( + MappingProxyType({}) if aug_p % 2 == insert_at_modulo else norms[p] + for aug_p, p in labels.aug_periods_to_periods.items() ) + aug_normalizations = Normalizations( + loadings=aug_norm_parts["loadings"], + intercepts=aug_norm_parts["intercepts"], ) - return aug + new_factors[fac] = FactorSpec( + measurements=aug_measurements, + normalizations=aug_normalizations, + is_endogenous=fspec.is_endogenous, + is_correction=fspec.is_correction, + transition_function=fspec.transition_function, + ) + + return model_spec._replace(factors=MappingProxyType(new_factors)) -def _get_transition_info(model_dict: dict, labels: Labels) -> TransitionInfo: + +def _get_transition_info(model_spec: ModelSpec, labels: Labels) -> TransitionInfo: """Collect information about transition functions.""" func_list, param_names = [], [] latent_factors = labels.latent_factors all_factors = labels.all_factors for factor in latent_factors: - spec = model_dict["factors"][factor]["transition_function"] + spec = model_spec.factors[factor].transition_function if isinstance(spec, str): func = getattr(t_f_module, spec) if spec == "constant": @@ -407,7 +387,7 @@ def _get_transition_info(model_dict: dict, labels: Labels) -> TransitionInfo: "Custom transition functions must have a __name__ attribute.", ) if hasattr(spec, "__registered_params__"): - names = spec.__registered_params__ + names: list[str] = spec.__registered_params__ # ty: ignore[invalid-assignment] param_names.append(names) else: raise AttributeError( @@ -448,30 +428,34 @@ def _extract_factor(states: Array, pos: int) -> Array: return TransitionInfo( func=transition_function, - param_names=frozendict(zip(latent_factors, param_names, strict=False)), - individual_functions=frozendict(individual_functions), - function_names=frozendict(zip(latent_factors, function_names, strict=False)), + param_names=MappingProxyType( + dict(zip(latent_factors, param_names, strict=False)) + ), + individual_functions=MappingProxyType(individual_functions), + function_names=MappingProxyType( + dict(zip(latent_factors, function_names, strict=False)) + ), ) def _get_endogenous_factors_info( *, has_endogenous_factors: bool, - model_dict: dict[str, Any], + model_spec: ModelSpec, labels: Labels, bounds_distance: float, ) -> EndogenousFactorsInfo: """Collect information about endogenous factors.""" factor_info = {} - for fac, v in model_dict["factors"].items(): + for fac, fspec in model_spec.factors.items(): factor_info[fac] = FactorInfo.from_flags( - is_endogenous=v.get("is_endogenous", False), - is_correction=v.get("is_correction", False), + is_endogenous=fspec.is_endogenous, + is_correction=fspec.is_correction, ) return EndogenousFactorsInfo( has_endogenous_factors=has_endogenous_factors, - aug_periods_to_aug_period_meas_types=frozendict( + aug_periods_to_aug_period_meas_types=MappingProxyType( _get_aug_periods_to_aug_period_meas_types( aug_periods=labels.aug_periods_to_periods.keys(), has_endogenous_factors=has_endogenous_factors, @@ -482,7 +466,7 @@ def _get_endogenous_factors_info( _aug_periods_from_period, aug_periods_to_periods=labels.aug_periods_to_periods, ), - factor_info=frozendict(factor_info), + factor_info=MappingProxyType(factor_info), ) @@ -504,12 +488,15 @@ def _get_aug_periods_to_aug_period_meas_types( def _get_update_info( - model_dict: dict, dimensions: Dimensions, labels: Labels, anchoring_info: Anchoring + model_spec: ModelSpec, + dimensions: Dimensions, + labels: Labels, + anchoring_info: Anchoring, ) -> DataFrame: """Construct a DataFrame with information on each Kalman update. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` dimensions: Dimensional information. labels: Labels for model quantities. anchoring_info: Information about anchoring. See :ref:`anchoring` @@ -525,7 +512,7 @@ def _get_update_info( measurements = {} for factor in labels.latent_factors: - measurements[factor] = model_dict["factors"][factor]["measurements"] + measurements[factor] = model_spec.factors[factor].measurements if len(measurements[factor]) != dimensions.n_aug_periods: raise ValueError( "Measurements must be of length `n_aug_periods`, " @@ -538,7 +525,7 @@ def _get_update_info( uinfo.loc[(aug_period, meas), factor] = True uinfo.loc[(aug_period, meas), "purpose"] = "measurement" for factor in anchoring_info.factors: - outcome = anchoring_info.outcomes[factor] # ty: ignore[invalid-argument-type] + outcome = anchoring_info.outcomes[factor] name = f"{outcome}_{factor}" uinfo.loc[(aug_period, name), factor] = True uinfo.loc[(aug_period, name), "purpose"] = "anchoring" @@ -549,12 +536,12 @@ def _get_update_info( def _process_normalizations( - model_dict: dict, dimensions: Dimensions, labels: Labels + model_spec: ModelSpec, dimensions: Dimensions, labels: Labels ) -> dict[str, dict[str, list]]: """Process the normalizations of intercepts and factor loadings. Args: - model_dict: The model specification. See: :ref:`model_specs` + model_spec: The model specification. See: :ref:`model_specs` dimensions: Dimensional information. labels: Labels for model quantities. @@ -563,18 +550,20 @@ def _process_normalizations( intercepts for each factor. """ - normalizations = {} + normalizations: dict[str, dict[str, list]] = {} for factor in labels.latent_factors: normalizations[factor] = {} - norminfo = model_dict["factors"][factor].get("normalizations", {}) + fspec = model_spec.factors[factor] for norm_type in ["loadings", "intercepts"]: - candidate = norminfo.get( - norm_type, [{} for _ in range(dimensions.n_aug_periods)] - ) - if not len(candidate) == dimensions.n_aug_periods: + if fspec.normalizations is not None: + norms = getattr(fspec.normalizations, norm_type) + candidate = [dict(m) for m in norms] + else: + candidate = [{} for _ in range(dimensions.n_aug_periods)] + if len(candidate) != dimensions.n_aug_periods: raise ValueError( "Normalizations must be of length `n_aug_periods`, " - f"got {norminfo} for {factor}['{norm_type}']" + f"got {candidate} for {factor}['{norm_type}']" ) normalizations[factor][norm_type] = candidate diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/simulate_data.py index dfede0b8..b59554d4 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/simulate_data.py @@ -28,7 +28,7 @@ def simulate_dataset( - model: dict | ModelSpec, + model_spec: ModelSpec, params: pd.DataFrame, n_obs: int | None = None, data: pd.DataFrame | None = None, @@ -38,15 +38,14 @@ def simulate_dataset( """Simulate datasets generated by a latent factor model. Args: - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - params: DataFrame with model parameters. - n_obs: Number of simulated individuals + model_spec: The model specification. See: :ref:`model_specs` + params: Model parameters. + n_obs: Number of simulated individuals. data: Dataset in the same format as for estimation, containing information about observed factors and control variables. - policies: list of dictionaries. Each dictionary specifies a - a stochastic shock to a latent factor AT THE END of "period" for "factor" - with mean "effect_size" and "standard deviation" + policies: Each dictionary specifies a stochastic shock to a latent factor + AT THE END of "period" for "factor" with mean "effect_size" and + "standard deviation". seed: Random seed for reproducibility. If None, uses numpy's default random state. @@ -62,15 +61,14 @@ def simulate_dataset( if data is None and n_obs is None: raise ValueError("Either `data` or `n_obs` has to be provided.") - model_dict = model.to_dict() if isinstance(model, ModelSpec) else model - processed_model = process_model(model_dict) + processed_model = process_model(model_spec) if processed_model.labels.observed_factors and data is None: raise ValueError( "To simulate a model with observed factors, data cannot be None.", ) - if processed_model.labels.controls != ["constant"] and data is None: + if processed_model.labels.controls != ("constant",) and data is None: raise ValueError("To simulate a model with controls, data cannot be None.") if data is not None: @@ -155,7 +153,7 @@ def simulate_dataset( # Anchor the collapsed version (anchoring only works with period, not aug_period) anchored_latent_data = anchor_states_df( states_df=latent_data, - model=model_dict, + model_spec=model_spec, params=params, use_aug_period=False, ) @@ -383,9 +381,7 @@ def _collapse_aug_periods_to_periods( ) endogenous_cols = [ - fac - for fac in factors - if endogenous_factors_info.factor_info[fac].is_endogenous # ty: ignore[invalid-argument-type] + fac for fac in factors if endogenous_factors_info.factor_info[fac].is_endogenous ] state_cols = [fac for fac in factors if fac not in endogenous_cols] @@ -407,7 +403,7 @@ def _get_shock( sd: float, size: int, ) -> NDArray[np.floating]: - """Add stochastic effect to a factor of length n_obs. + """Add stochastic effect to a factor of length n_obs. Args: rng: NumPy random number generator. diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py index c1e01e38..fd066786 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/types.py @@ -3,10 +3,10 @@ from collections.abc import Callable from dataclasses import dataclass from enum import Enum, auto +from types import MappingProxyType from typing import NewType import pandas as pd -from frozendict import frozendict from jax import Array # NewType definitions for domain safety @@ -66,10 +66,10 @@ class Labels: stagemap: tuple[int, ...] stages: tuple[int, ...] aug_periods: tuple[int, ...] - aug_periods_to_periods: frozendict[int, int] + aug_periods_to_periods: MappingProxyType[int, int] aug_stagemap: tuple[int, ...] aug_stages: tuple[int, ...] - aug_stages_to_stages: frozendict[int, int] + aug_stages_to_stages: MappingProxyType[int, int] transition_names: tuple[str, ...] = () @property @@ -83,7 +83,7 @@ class Anchoring: """Information about how latent factors are anchored to observed outcomes.""" anchoring: bool - outcomes: frozendict[str, str] + outcomes: MappingProxyType[str, str] factors: tuple[str, ...] free_controls: bool free_constant: bool @@ -95,7 +95,7 @@ def disabled(cls) -> Anchoring: """Create an Anchoring config with anchoring disabled.""" return cls( anchoring=False, - outcomes=frozendict({}), + outcomes=MappingProxyType({}), factors=(), free_controls=False, free_constant=False, @@ -128,7 +128,7 @@ def from_config( """ return cls( anchoring=True, - outcomes=frozendict(outcomes), + outcomes=MappingProxyType(outcomes), factors=tuple(outcomes.keys()), free_controls=free_controls, free_constant=free_constant, @@ -155,9 +155,9 @@ class TransitionInfo: """Information about transition functions.""" func: Callable - param_names: frozendict[str, list[str]] - individual_functions: frozendict[str, Callable] - function_names: frozendict[str, str] + param_names: MappingProxyType[str, list[str]] + individual_functions: MappingProxyType[str, Callable] + function_names: MappingProxyType[str, str] @dataclass(frozen=True) @@ -213,10 +213,10 @@ class EndogenousFactorsInfo: """Information about endogenous factors in the model.""" has_endogenous_factors: bool - aug_periods_to_aug_period_meas_types: frozendict[int, MeasurementType] + aug_periods_to_aug_period_meas_types: MappingProxyType[int, MeasurementType] bounds_distance: float aug_periods_from_period: Callable[[int], list[int]] - factor_info: frozendict[str, FactorInfo] + factor_info: MappingProxyType[str, FactorInfo] @dataclass(frozen=True) diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index 6b161c6b..b7f8226b 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -1,12 +1,17 @@ """Utility functions for manipulating model specifications and parameters.""" import warnings -from copy import deepcopy -from typing import Any +from dataclasses import replace +from types import MappingProxyType import numpy as np import pandas as pd +from skillmodels.model_spec import ( + FactorSpec, + ModelSpec, + Normalizations, +) from skillmodels.params_index import get_params_index from skillmodels.process_model import ( get_dimensions, @@ -17,28 +22,28 @@ def extract_factors( factors: str | list[str], - model_dict: dict[str, Any], + model_spec: ModelSpec, params: pd.DataFrame | None = None, -) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Reduce a specification to a model with fewer latent factors. If provided, a params DataFrame is also reduced correspondingly. Args: factors: Name(s) of the factor(s) to extract. - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ if isinstance(factors, str): factors = [factors] - to_remove = list(set(model_dict["factors"]).difference(factors)) - return remove_factors(factors=to_remove, model_dict=model_dict, params=params) + to_remove = list(set(model_spec.factors).difference(factors)) + return remove_factors(factors=to_remove, model_spec=model_spec, params=params) def update_parameter_values( @@ -78,9 +83,9 @@ def update_parameter_values( def remove_factors( factors: str | list[str], - model_dict: dict[str, Any], + model_spec: ModelSpec, params: pd.DataFrame | None = None, -) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove factors from a model specification. If provided, a params DataFrame is also reduced correspondingly. @@ -90,29 +95,39 @@ def remove_factors( Args: factors: Name(s) of the factor(s) to remove. - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - # We need this for the full model when endogenous factors are present. - has_endogenous_factors = get_has_endogenous_factors(model_dict["factors"]) + if isinstance(factors, str): + factors = [factors] - out = deepcopy(model_dict) + # We need this for the full model when endogenous factors are present. + has_endogenous_factors = get_has_endogenous_factors(model_spec.factors) - out["factors"] = _remove_from_dict(dict_=out["factors"], to_remove=factors) + new_factors = {k: v for k, v in model_spec.factors.items() if k not in factors} # adjust anchoring - if "anchoring" in model_dict: - out["anchoring"]["outcomes"] = _remove_from_dict( - dict_=out["anchoring"]["outcomes"], - to_remove=factors, - ) - if out["anchoring"]["outcomes"] == {}: - out = _remove_from_dict(dict_=out, to_remove="anchoring") + new_anchoring = model_spec.anchoring + if new_anchoring is not None: + new_outcomes = { + k: v for k, v in new_anchoring.outcomes.items() if k not in factors + } + if new_outcomes: + new_anchoring = replace( + new_anchoring, outcomes=MappingProxyType(new_outcomes) + ) + else: + new_anchoring = None + + out = model_spec._replace( + factors=MappingProxyType(new_factors), + anchoring=new_anchoring, + ) # Remove periods if necessary, but only if no endogenous factors are present. # (else we would mess up the mapping between raw periods model periods) @@ -120,139 +135,159 @@ def remove_factors( new_n_periods = get_dimensions( out, has_endogenous_factors=has_endogenous_factors ).n_periods - out = reduce_n_periods(model_dict=out, new_n_periods=new_n_periods) + reduced = reduce_n_periods(model_spec=out, new_n_periods=new_n_periods) + if not isinstance(reduced, ModelSpec): + msg = "Expected ModelSpec from reduce_n_periods without params" + raise TypeError(msg) + out = reduced if params is not None: out_params = _reduce_params( params, - out, # ty: ignore[invalid-argument-type] + out, has_endogenous_factors=has_endogenous_factors, ) - out = (out, out_params) + return (out, out_params) - return out # ty: ignore[invalid-return-type] + return out def remove_measurements( measurements: str | list[str], - model_dict: dict[str, Any], + model_spec: ModelSpec, params: pd.DataFrame | None = None, -) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove measurements from a model specification. If provided, a params DataFrame is also reduced correspondingly. Args: measurements: Name(s) of the measurement(s) to remove. - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - - for factor in model_dict["factors"]: - full = model_dict["factors"][factor]["measurements"] - reduced = [ - _remove_from_list(list_=meas_list, to_remove=measurements) - for meas_list in full - ] - out["factors"][factor]["measurements"] = reduced - - norminfo = model_dict["factors"][factor].get("normalizations", {}) - if "loadings" in norminfo: - out["factors"][factor]["normalizations"]["loadings"] = ( - _remove_measurements_from_normalizations( - measurements=measurements, - normalizations=norminfo["loadings"], - ) - ) + if isinstance(measurements, str): + measurements = [measurements] + + new_factors: dict[str, FactorSpec] = {} + for factor, fspec in model_spec.factors.items(): + new_meas = tuple( + tuple(m for m in period_meas if m not in measurements) + for period_meas in fspec.measurements + ) - if "intercepts" in norminfo: - out["factors"][factor]["normalizations"]["intercepts"] = ( - _remove_measurements_from_normalizations( - measurements=measurements, - normalizations=norminfo["intercepts"], + new_normalizations = fspec.normalizations + if new_normalizations is not None: + new_loadings = tuple( + MappingProxyType({k: v for k, v in d.items() if k not in measurements}) + for d in new_normalizations.loadings + ) + new_intercepts = tuple( + MappingProxyType({k: v for k, v in d.items() if k not in measurements}) + for d in new_normalizations.intercepts + ) + if new_loadings != new_normalizations.loadings or ( + new_intercepts != new_normalizations.intercepts + ): + warnings.warn( + "Your removed a normalized measurement from a model. Make sure " + "there are enough normalizations left to ensure identification.", + stacklevel=2, ) + new_normalizations = Normalizations( + loadings=new_loadings, + intercepts=new_intercepts, ) + new_factors[factor] = replace( + fspec, measurements=new_meas, normalizations=new_normalizations + ) + + out = model_spec._replace(factors=MappingProxyType(new_factors)) + if params is not None: # This likely won't work if we have endogenous factors. out_params = _reduce_params(params, out, has_endogenous_factors=False) - out = (out, out_params) + return (out, out_params) return out def remove_controls( controls: str | list[str], - model_dict: dict[str, Any], + model_spec: ModelSpec, params: pd.DataFrame | None = None, -) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove control variables from a model specification. If provided, a params DataFrame is also reduced correspondingly. Args: controls: Name(s) of the contral variable(s) to remove. - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - out["controls"] = _remove_from_list(list_=out["controls"], to_remove=controls) - if out["controls"] == []: - out = _remove_from_dict(dict_=out, to_remove="controls") + if isinstance(controls, str): + controls = [controls] + + new_controls = tuple(c for c in model_spec.controls if c not in controls) + out = model_spec._replace(controls=new_controls) if params is not None: # This likely won't work if we have endogenous factors. out_params = _reduce_params(params, out, has_endogenous_factors=False) - out = (out, out_params) + return (out, out_params) return out def switch_translog_to_linear( - model_dict: dict[str, Any], + model_spec: ModelSpec, params: pd.DataFrame | None = None, -) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Switch all translog production functions to linear. If provided, a params DataFrame is also reduced correspondingly. Args: - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - for factor in model_dict["factors"]: - if model_dict["factors"][factor]["transition_function"] == "translog": - out["factors"][factor]["transition_function"] = "linear" + new_factors: dict[str, FactorSpec] = {} + for name, fspec in model_spec.factors.items(): + if fspec.transition_function == "translog": + new_factors[name] = fspec.with_transition_function("linear") + else: + new_factors[name] = fspec + out = model_spec._replace(factors=MappingProxyType(new_factors)) if params is not None: # This likely won't work if we have endogenous factors. out_params = _reduce_params(params, out, has_endogenous_factors=False) - out = (out, out_params) + return (out, out_params) return out def switch_linear_to_translog( - model_dict: dict[str, Any], + model_spec: ModelSpec, params: pd.DataFrame | None = None, -) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Switch all linear production functions to translog. If provided, a params DataFrame is also extended correspondingly. The fill value @@ -261,96 +296,78 @@ def switch_linear_to_translog( the additional parameters are not initialized at zero. Args: - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - for factor in model_dict["factors"]: - if model_dict["factors"][factor]["transition_function"] == "linear": - out["factors"][factor]["transition_function"] = "translog" + new_factors: dict[str, FactorSpec] = {} + for name, fspec in model_spec.factors.items(): + if fspec.transition_function == "linear": + new_factors[name] = fspec.with_transition_function("translog") + else: + new_factors[name] = fspec + out = model_spec._replace(factors=MappingProxyType(new_factors)) if params is not None: - out_params = _extend_params(params=params, model_dict=out, fill_value=0.05) - out = (out, out_params) + out_params = _extend_params(params=params, model_spec=out, fill_value=0.05) + return (out, out_params) + return out def reduce_n_periods( - model_dict: dict[str, Any], + model_spec: ModelSpec, new_n_periods: int, params: pd.DataFrame | None = None, -) -> dict[str, Any] | tuple[dict[str, Any], pd.DataFrame]: +) -> ModelSpec | tuple[ModelSpec, pd.DataFrame]: """Remove all periods after n_periods. Args: - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. new_n_periods: The new number of periods. params: The params DataFrame for the full model. Returns: - dict: The reduced model dictionary + ModelSpec: The reduced model specification pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ - out = deepcopy(model_dict) - for factor in model_dict["factors"]: - out["factors"][factor]["measurements"] = _shorten_if_necessary( - list_=out["factors"][factor]["measurements"], - length=new_n_periods, - ) - - norminfo = model_dict["factors"][factor].get("normalizations", {}) - if "loadings" in norminfo: - out["factors"][factor]["normalizations"]["loadings"] = ( - _shorten_if_necessary(list_=norminfo["loadings"], length=new_n_periods) + new_factors: dict[str, FactorSpec] = {} + for name, fspec in model_spec.factors.items(): + new_meas = fspec.measurements[:new_n_periods] + new_normalizations = fspec.normalizations + if new_normalizations is not None: + new_normalizations = Normalizations( + loadings=new_normalizations.loadings[:new_n_periods], + intercepts=new_normalizations.intercepts[:new_n_periods], ) + new_factors[name] = replace( + fspec, measurements=new_meas, normalizations=new_normalizations + ) - if "intercepts" in norminfo: - out["factors"][factor]["normalizations"]["intercepts"] = ( - _shorten_if_necessary( - list_=norminfo["intercepts"], length=new_n_periods - ) - ) + new_stagemap = model_spec.stagemap + if new_stagemap is not None and len(new_stagemap) > new_n_periods - 1: + new_stagemap = new_stagemap[: new_n_periods - 1] - if "stagemap" in out: - out["stagemap"] = _shorten_if_necessary( - list_=out["stagemap"], length=new_n_periods - 1 - ) + out = model_spec._replace( + factors=MappingProxyType(new_factors), + stagemap=new_stagemap, + ) if params is not None: - out_params = _extend_params(params=params, model_dict=out, fill_value=0.05) - out = (out, out_params) + out_params = _extend_params(params=params, model_spec=out, fill_value=0.05) + return (out, out_params) return out -def _remove_from_list( - list_: list[Any], - to_remove: str | list[str], -) -> list[Any]: - if isinstance(to_remove, str): - to_remove = [to_remove] - return [element for element in list_ if element not in to_remove] - - -def _remove_from_dict( - dict_: dict[str, Any], - to_remove: str | list[str], -) -> dict[str, Any]: - if isinstance(to_remove, str): - to_remove = [to_remove] - - return {key: val for key, val in dict_.items() if key not in to_remove} - - def _reduce_params( params: pd.DataFrame, - model_dict: dict[str, Any], + model_spec: ModelSpec, *, has_endogenous_factors: bool, ) -> pd.DataFrame: @@ -361,14 +378,14 @@ def _reduce_params( Args: params: The params DataFrame for the full model. - model_dict: The model specification. See: :ref:`model_specs`. + model_spec: The model specification. See: :ref:`model_specs`. has_endogenous_factors: Whether the model has endogenous factors. Returns: pandas.DataFrame: The reduced parameters DataFrame. """ - index = _get_params_index_from_model_dict(model_dict) + index = _get_params_index(model_spec) # If we have endogenous factors, we need to keep the periods from params. if has_endogenous_factors: df = pd.merge( @@ -385,10 +402,10 @@ def _reduce_params( def _extend_params( params: pd.DataFrame, - model_dict: dict[str, Any], + model_spec: ModelSpec, fill_value: float, ) -> pd.DataFrame: - index = _get_params_index_from_model_dict(model_dict) + index = _get_params_index(model_spec) out = params.reindex(index) out["value"] = out["value"].fillna(fill_value) if "lower_bound" in out: @@ -400,10 +417,10 @@ def _extend_params( return out -def _get_params_index_from_model_dict( - model_dict: dict[str, Any], +def _get_params_index( + model_spec: ModelSpec, ) -> pd.MultiIndex: - mod = process_model(model_dict) + mod = process_model(model_spec) return get_params_index( update_info=mod.update_info, labels=mod.labels, @@ -411,28 +428,3 @@ def _get_params_index_from_model_dict( transition_info=mod.transition_info, endogenous_factors_info=mod.endogenous_factors_info, ) - - -def _remove_measurements_from_normalizations( - measurements: str | list[str], - normalizations: list[dict[str, Any]], -) -> list[dict[str, Any]]: - reduced = [ - _remove_from_dict(dict_=norm, to_remove=measurements) for norm in normalizations - ] - if reduced != normalizations: - warnings.warn( - "Your removed a normalized measurement from a model. Make sure there are " - "enough normalizations left to ensure identification.", - stacklevel=2, - ) - return reduced - - -def _shorten_if_necessary( - list_: list[Any], - length: int, -) -> list[Any]: - if len(list_) > length: - list_ = list_[:length] - return list_ diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/visualize_factor_distributions.py index e11ec405..cdf7587a 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/visualize_factor_distributions.py @@ -161,7 +161,7 @@ def combine_distribution_plots( def univariate_densities( data: pd.DataFrame, - model: dict[str, Any] | ModelSpec, + model_spec: ModelSpec, params: pd.DataFrame, period: int, factors: list[str] | tuple[str, ...] | None = None, @@ -184,46 +184,42 @@ def univariate_densities( Args: data: Model estimation input data. - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - params: DataFrame with estimated parameter values. + model_spec: The model specification. See: :ref:`model_specs` + params: Estimated parameter values. period: Model period for which to plot the distributions for. - factors: List of factors for which to plot the densities. + factors: Factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. observed_factors: If True, plot densities of observed factors too. - states: List or dictionary with tidy - DataFrames with filtered or simulated states or only one DataFrame with - filtered or simulated states. If None, retrieve data frame with filtered - states using model and data. States are used to estimate the state - ranges in each period (if state_ranges are not given explicitly) and to - estimate the distribution of the latent factors. + states: Filtered or simulated states. Can be a single DataFrame, a list, + or a dictionary of DataFrames. If None, retrieve filtered states using + model and data. Used to estimate state ranges and factor distributions. show_hist: Add histogram to the distplot. - show_curve: Add density curve to the displot. + show_curve: Add density curve to the distplot. show_rug: Add rug to the distplot. curve_type: Curve type, 'normal' or 'kde', to add to the distplot. colorscale: The color palette used when plotting multiple data. Must be a valid attribute of px.colors.qualitative. bin_size: Size of the histogram bins. - distplot_kwargs: Dictionary with additional keyword - arguments passed to ff.create_distplot() to initiate - the distplot. - layout_kwargs: Dictionary of keyword arguments to update - layout of the plot figures. Some essential layout kwargs are: - - xaxis_title: label label + distplot_kwargs: Additional keyword arguments passed to + ff.create_distplot(). + layout_kwargs: Keyword arguments to update layout of the plot figures. + Some essential layout kwargs are: + - xaxis_title: label of x axis - yaxis_title: label of y axis - xaxis_showgrid: display axis grid - yaxis_showgrid: display axis grid - template: figure background theme - showlegend: add legend + Returns: - plots_dict: Dictionary with density plots. + plots_dict: Density plots keyed by factor name. """ if states is None: - states = get_filtered_states(model=model, data=data, params=params)[ + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] - processed_model = process_model(model) + processed_model = process_model(model_spec) factors = _get_factors( model=processed_model, factors=factors, @@ -272,7 +268,7 @@ def univariate_densities( def bivariate_density_contours( data: pd.DataFrame, - model: dict[str, Any] | ModelSpec, + model_spec: ModelSpec, params: pd.DataFrame, period: int, factors: list[str] | tuple[str, ...] | None = None, @@ -295,25 +291,20 @@ def bivariate_density_contours( Args: data: Model estimation input data. - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - params: DataFrame with estimated parameter values. + model_spec: The model specification. See: :ref:`model_specs` + params: Estimated parameter values. period: Model period for which to plot the distributions for. - factors: List of factors for which to plot the densities. + factors: Factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. observed_factors: If True, plot densities of observed factors too. - states: List or dictionary with tidy - DataFrames with filtered or simulated states or only one DataFrame with - filtered or simulated states. If None, retrieve data frame with filtered - states using model and data. States are used to estimate the state - ranges in each period (if state_ranges are not given explicitly) and to - estimate the distribution of the latent factors. + states: Filtered or simulated states. Can be a single DataFrame, a list, + or a dictionary of DataFrames. If None, retrieve filtered states using + model and data. Used to estimate state ranges and factor distributions. n_points: Number of grid points used to create the mesh for calculation of kernel densities. - contour_kwargs: Dictionary with keyword arguments to set - contour line properties (such as annotation, colorscale). - layout_kwargs: Dictionary with keyword arguments to set - figure layout properties. + contour_kwargs: Keyword arguments to set contour line properties + (such as annotation, colorscale). + layout_kwargs: Keyword arguments to set figure layout properties. The following are various essential keyword arguments defining various features of plots. All features can also be changed ex-post via 'update_layout' or @@ -329,18 +320,17 @@ def bivariate_density_contours( lines_colorscale: The color palette used for contour lines when plotting multiple scenarios. Must be a valid px.colors.qualitative attribute. Default 'D3'. - showcolorbar: A boolean variable for displaying color bar. + showcolorbar: Whether to display the color bar. Returns: - plots_dict: Dictionary with factor combinations as keys and respective - pariwise plots of density contours as values. + plots_dict: Pairwise density contour plots keyed by factor combinations. """ if states is None: - states = get_filtered_states(model=model, data=data, params=params)[ + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] - processed_model = process_model(model) + processed_model = process_model(model_spec) factors = _get_factors( model=processed_model, factors=factors, @@ -404,7 +394,7 @@ def bivariate_density_contours( def bivariate_density_surfaces( data: pd.DataFrame, - model: dict[str, Any] | ModelSpec, + model_spec: ModelSpec, params: pd.DataFrame, period: int, factors: list[str] | tuple[str, ...] | None = None, @@ -427,19 +417,15 @@ def bivariate_density_surfaces( Args: data: Model estimation input data. - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - params: DataFrame with estimated parameter values. + model_spec: The model specification. See: :ref:`model_specs` + params: Estimated parameter values. period: Model period for which to plot the distributions for. - factors: List of factors for which to plot the densities. + factors: Factors for which to plot the densities. If None, plot pairwise distributions for all latent factors. observed_factors: If True, plot densities of observed factors too. - states: List or dictionary with tidy - DataFrames with filtered or simulated states or only one DataFrame with - filtered or simulated states. If None, retrieve data frame with filtered - states using model and data. States are used to estimate the state - ranges in each period (if state_ranges are not given explicitly) and to - estimate the distribution of the latent factors. + states: Filtered or simulated states as a single DataFrame. + If None, retrieve filtered states using model and data. Used to estimate + state ranges and factor distributions. n_points: Number of grid points used to create the mesh for calculation of kernel densities. @@ -448,29 +434,27 @@ def bivariate_density_surfaces( 'update_traces'. Some default figure layout properties (such as background theme) are defined if layout_kwargs is None. - layout_kwargs: Dictionary with keyword arguments to set - figure layout properties. + layout_kwargs: Keyword arguments to set figure layout properties. colorscale: The color scale to use for line legends. Must be a valid plotly.express.colors.sequential attribute. Default 'RdBu_r'. opacity: Opacity of the surface. Default 0.9. - showcolorbar: A boolean variable for displaying the colorbar associated - with the surface color scale. - showgrids: A boolean variable for showing axes grids. - showaxlines: A boolean variable for showing axes lines. - showlabels: A boolean variable for displaying axes labels. + showcolorbar: Whether to display the colorbar associated with the + surface color scale. + showgrids: Whether to show axes grids. + showaxlines: Whether to show axes lines. + showlabels: Whether to display axes labels. Returns: - plots_dict: Dictionary with factor combinations as keys and respective - pariwise plots of 3d density plots as values. + plots_dict: Pairwise 3d density surface plots keyed by factor combinations. """ if states is None: - states = get_filtered_states(model=model, data=data, params=params)[ + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] elif not isinstance(states, pd.DataFrame): raise ValueError("3d plots are only supported if states is a DataFrame") - processed_model = process_model(model) + processed_model = process_model(model_spec) factors = _get_factors( model=processed_model, factors=factors, diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/visualize_transition_equations.py index 35dfe5af..9a8b6669 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/visualize_transition_equations.py @@ -141,7 +141,7 @@ def combine_transition_plots( def get_transition_plots( - model: dict[str, Any] | ModelSpec, + model_spec: ModelSpec, params: pd.DataFrame, data: pd.DataFrame, period: int, @@ -161,10 +161,9 @@ def get_transition_plots( """Get dictionary with individual plots of transition equations for each factor. Args: - model: The model specification, either as a dict or ModelSpec instance. - See: :ref:`model_specs` - params: DataFrame with model parameters. - data: Empirical dataset that is used to estimate the model. + model_spec: The model specification. See: :ref:`model_specs` + params: Model parameters. + data: Empirical dataset used to estimate the model. period: The start period of the transition equations that are plotted. state_ranges: The keys are the names of the latent factors. The values are DataFrames with the columns "period", "minimum", "maximum". @@ -192,7 +191,7 @@ def get_transition_plots( quantiles_of_other_factors, ) - processed_model = process_model(model) + processed_model = process_model(model_spec) if period >= processed_model.labels.periods[-1]: raise ValueError( @@ -208,10 +207,10 @@ def get_transition_plots( latent_factors = [ lf for lf in processed_model.labels.latent_factors - if not processed_model.endogenous_factors_info.factor_info[lf].is_correction # ty: ignore[invalid-argument-type] + if not processed_model.endogenous_factors_info.factor_info[lf].is_correction ] all_factors = processed_model.labels.all_factors - states = get_filtered_states(model=model, data=data, params=params)[ + states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ "anchored_states" ]["states"] return _get_dictionary_with_plots( @@ -314,10 +313,10 @@ def _get_dictionary_with_plots( _aug_periods = [period] plots_dict = {} for output_factor, input_factor in itertools.product(latent_factors, all_factors): - transition_function = model.transition_info.individual_functions[output_factor] # ty: ignore[invalid-argument-type] + transition_function = model.transition_info.individual_functions[output_factor] if ( has_endogenous_factors - and model.endogenous_factors_info.factor_info[output_factor].is_endogenous # ty: ignore[invalid-argument-type] + and model.endogenous_factors_info.factor_info[output_factor].is_endogenous ): aug_period = min(_aug_periods) else: diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..8e273e10 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,16 @@ +"""Shared test fixtures and helpers.""" + +from skillmodels.model_spec import ModelSpec + + +def model_spec_from_yaml_dict(d: dict) -> ModelSpec: + """Create a ModelSpec from a YAML-loaded dictionary. + + Args: + d: A dictionary loaded from a YAML model specification file. + + Returns: + A ModelSpec instance. + + """ + return ModelSpec.from_dict(d) diff --git a/tests/test_constraints.py b/tests/test_constraints.py index 25cd6f4b..cdb72fa9 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -1,8 +1,10 @@ +from types import MappingProxyType + import numpy as np import pandas as pd import pytest import yaml -from frozendict import frozendict +from conftest import model_spec_from_yaml_dict from pandas.testing import assert_frame_equal from skillmodels.config import TEST_DATA_DIR @@ -178,10 +180,10 @@ def test_constant_factor_constraints() -> None: stagemap=(0, 0, 0), stages=(0,), aug_periods=(0, 1, 2), - aug_periods_to_periods=frozendict({0: 0, 1: 1, 2: 2}), + aug_periods_to_periods=MappingProxyType({0: 0, 1: 1, 2: 2}), aug_stagemap=(0, 0, 0), aug_stages=(0,), - aug_stages_to_stages=frozendict({0: 0}), + aug_stages_to_stages=MappingProxyType({0: 0}), transition_names=("bla", "constant"), ) @@ -232,10 +234,10 @@ def test_trans_coeff_constraints() -> None: stagemap=(0, 0, 0), stages=(0,), aug_periods=(0, 1, 2), - aug_periods_to_periods=frozendict({0: 0, 1: 1, 2: 2}), + aug_periods_to_periods=MappingProxyType({0: 0, 1: 1, 2: 2}), aug_stagemap=(0, 0, 0), aug_stages=(0,), - aug_stages_to_stages=frozendict({0: 0}), + aug_stages_to_stages=MappingProxyType({0: 0}), transition_names=("log_ces", "bla", "blubb"), ) @@ -289,7 +291,7 @@ def base_anchoring_info(): return Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), + outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), free_controls=True, free_constant=True, free_loadings=True, @@ -308,7 +310,7 @@ def test_anchoring_constraints_for_constants(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), + outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), free_controls=True, free_constant=False, free_loadings=True, @@ -337,7 +339,7 @@ def test_anchoring_constraints_for_controls(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), + outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), free_controls=False, free_constant=True, free_loadings=True, @@ -377,7 +379,7 @@ def test_anchoring_constraints_for_loadings(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=frozendict({"f1": "outcome", "f2": "outcome"}), + outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), free_controls=True, free_constant=True, free_loadings=False, @@ -414,7 +416,7 @@ def assert_list_equal_except_for_order(list1, list2) -> None: @pytest.fixture def simplest_augmented_model(): with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) return process_model(model) diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 1784c224..02811287 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -1,8 +1,7 @@ -from types import SimpleNamespace +from types import MappingProxyType, SimpleNamespace import numpy as np import pandas as pd -from frozendict import frozendict from pandas.testing import assert_frame_equal as afe from skillmodels.correlation_heatmap import ( @@ -255,10 +254,10 @@ def test_process_factors() -> None: stagemap=(0,), stages=(0,), aug_periods=(0,), - aug_periods_to_periods=frozendict({0: 0}), + aug_periods_to_periods=MappingProxyType({0: 0}), aug_stagemap=(0,), aug_stages=(0,), - aug_stages_to_stages=frozendict({0: 0}), + aug_stages_to_stages=MappingProxyType({0: 0}), ), ) latent_factor = "c" diff --git a/tests/test_filtered_states.py b/tests/test_filtered_states.py index 0a0fd53e..0048deb2 100644 --- a/tests/test_filtered_states.py +++ b/tests/test_filtered_states.py @@ -4,6 +4,7 @@ import pandas as pd import pytest import yaml +from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.filtered_states import get_filtered_states @@ -15,7 +16,7 @@ @pytest.fixture def model2(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - return yaml.load(y, Loader=yaml.SafeLoader) + return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) @pytest.fixture @@ -31,7 +32,7 @@ def test_get_filtered_states(model2, model2_data) -> None: max_inputs = get_maximization_inputs(model2, model2_data) params = params.loc[max_inputs["params_template"].index] - calculated = get_filtered_states(model=model2, data=model2_data, params=params) + calculated = get_filtered_states(model_spec=model2, data=model2_data, params=params) factors = ["fac1", "fac2", "fac3"] expected_ratios = [1.187757, 1, 1] diff --git a/tests/test_likelihood_regression.py b/tests/test_likelihood_regression.py index c55dbf16..41b877c5 100644 --- a/tests/test_likelihood_regression.py +++ b/tests/test_likelihood_regression.py @@ -7,11 +7,13 @@ import pandas as pd import pytest import yaml +from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_almost_equal as aaae from skillmodels.config import TEST_DATA_DIR from skillmodels.decorators import register_params from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.model_spec import ModelSpec from skillmodels.utilities import reduce_n_periods jax.config.update("jax_enable_x64", True) @@ -66,7 +68,7 @@ def linear(fac1, fac2, fac3, params): model["factors"]["fac3"]["transition_function"] = constant else: raise ValueError("Invalid model name.") - return model + return model_spec_from_yaml_dict(model) @pytest.mark.parametrize( @@ -94,8 +96,9 @@ def test_likelihood_values_have_not_changed( def test_splitting_does_not_change_gradient(model2, model2_data) -> None: - inputs = get_maximization_inputs(model2, model2_data) - inputs_split = get_maximization_inputs(model2, model2_data, 13) + model = model_spec_from_yaml_dict(model2) + inputs = get_maximization_inputs(model, model2_data) + inputs_split = get_maximization_inputs(model, model2_data, 13) params = inputs["params_template"] params["value"] = 0.1 @@ -201,7 +204,7 @@ def test_likelihood_runs_with_empty_periods(model2, model2_data) -> None: model2["factors"][factor]["measurements"][-1] = [] model2["factors"][factor]["normalizations"]["loadings"][-1] = {} - func_dict = get_maximization_inputs(model2, model2_data) + func_dict = get_maximization_inputs(model_spec_from_yaml_dict(model2), model2_data) params = func_dict["params_template"] params["value"] = 0.1 @@ -211,8 +214,11 @@ def test_likelihood_runs_with_empty_periods(model2, model2_data) -> None: def test_likelihood_runs_with_too_long_data(model2, model2_data) -> None: - model = reduce_n_periods(model2, 2) - func_dict = get_maximization_inputs(model, model2_data) # ty: ignore[invalid-argument-type] + full_model = model_spec_from_yaml_dict(model2) + reduced = reduce_n_periods(full_model, 2) + assert isinstance(reduced, ModelSpec) + model = reduced + func_dict = get_maximization_inputs(model, model2_data) params = func_dict["params_template"] params["value"] = 0.1 @@ -225,7 +231,7 @@ def test_likelihood_runs_with_observed_factors(model2, model2_data) -> None: model2["observed_factors"] = ["ob1", "ob2"] model2_data["ob1"] = np.arange(len(model2_data)) model2_data["ob2"] = np.ones(len(model2_data)) - func_dict = get_maximization_inputs(model2, model2_data) + func_dict = get_maximization_inputs(model_spec_from_yaml_dict(model2), model2_data) params = func_dict["params_template"] params["value"] = 0.1 diff --git a/tests/test_params_index.py b/tests/test_params_index.py index e2d2075d..88837058 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -1,7 +1,9 @@ +from types import MappingProxyType + import pandas as pd import pytest import yaml -from frozendict import frozendict +from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.params_index import ( @@ -22,7 +24,7 @@ @pytest.fixture def model2_inputs(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) processed = process_model(model) return { @@ -184,9 +186,9 @@ def test_trans_coeffs_index_tuples_no_endogenous_factors() -> None: } trans_info = TransitionInfo( func=lambda x: x, # dummy function - param_names=frozendict(param_names), - individual_functions=frozendict({}), - function_names=frozendict({}), + param_names=MappingProxyType(param_names), + individual_functions=MappingProxyType({}), + function_names=MappingProxyType({}), ) expected = [ @@ -227,9 +229,9 @@ def test_trans_coeffs_index_tuples_has_endogenous_factors() -> None: } trans_info = TransitionInfo( func=lambda x: x, # dummy function - param_names=frozendict(param_names), - individual_functions=frozendict({}), - function_names=frozendict({}), + param_names=MappingProxyType(param_names), + individual_functions=MappingProxyType({}), + function_names=MappingProxyType({}), ) expected = [ diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index e2853977..fa0f8aac 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -5,12 +5,14 @@ """ +from types import MappingProxyType + import jax.numpy as jnp import numpy as np import pandas as pd import pytest import yaml -from frozendict import frozendict +from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_equal as aae from skillmodels.config import TEST_DATA_DIR @@ -27,7 +29,7 @@ def parsed_parameters(): ).index with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) processed = process_model(model) @@ -38,7 +40,7 @@ def parsed_parameters(): # more meaningful test anchoring = Anchoring( anchoring=False, - outcomes=frozendict({}), + outcomes=MappingProxyType({}), factors=(), free_controls=True, free_constant=True, diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 4b6d5d3b..0d6c6774 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -1,12 +1,13 @@ import io import textwrap +from types import MappingProxyType import jax.numpy as jnp import numpy as np import pandas as pd import pytest import yaml -from frozendict import frozendict +from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_equal as aae from skillmodels.config import TEST_DATA_DIR @@ -48,7 +49,7 @@ def test_pre_process_data() -> None: def simplest_augmented(): out = {} with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: - out["model"] = yaml.load(y, Loader=yaml.SafeLoader) + out["model"] = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) _df = pd.DataFrame(data=np.arange(15).reshape(3, 5).T, columns=["var", "inv", "of"]) _df["period"] = [1, 1, 2, 1, 2] _df["id"] = [1, 3, 3, 5, 5] @@ -129,10 +130,10 @@ def test_generate_controls_array() -> None: stagemap=(0, 0), stages=(0,), aug_periods=(0, 1), - aug_periods_to_periods=frozendict({0: 0, 1: 1}), + aug_periods_to_periods=MappingProxyType({0: 0, 1: 1}), aug_stagemap=(0, 0), aug_stages=(0,), - aug_stages_to_stages=frozendict({0: 0}), + aug_stages_to_stages=MappingProxyType({0: 0}), ) calculated = _generate_controls_array(data, labels, 2) @@ -158,10 +159,10 @@ def test_generate_observed_factor_array() -> None: stagemap=(0, 0), stages=(0,), aug_periods=(0, 1), - aug_periods_to_periods=frozendict({0: 0, 1: 1}), + aug_periods_to_periods=MappingProxyType({0: 0, 1: 1}), aug_stagemap=(0, 0), aug_stages=(0,), - aug_stages_to_stages=frozendict({0: 0}), + aug_stages_to_stages=MappingProxyType({0: 0}), ) calculated = _generate_observed_factor_array(data, labels, 2) diff --git a/tests/test_process_model.py b/tests/test_process_model.py index d41c4f1b..6256e1d9 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -3,9 +3,11 @@ import pandas as pd import pytest import yaml +from conftest import model_spec_from_yaml_dict from pandas.testing import assert_frame_equal from skillmodels.config import TEST_DATA_DIR +from skillmodels.model_spec import FactorSpec from skillmodels.process_model import get_has_endogenous_factors, process_model from skillmodels.types import TransitionInfo @@ -17,7 +19,7 @@ @pytest.fixture def model2(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - return yaml.load(y, Loader=yaml.SafeLoader) + return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) def test_has_endogenous_factors(model2) -> None: @@ -124,12 +126,12 @@ def test_normalizations(model2) -> None: def test_anchoring_and_endogenous_factors_work_together() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model["factors"]["fac3"]["is_endogenous"] = True - del model["stagemap"] + model_dict["factors"]["fac3"]["is_endogenous"] = True + del model_dict["stagemap"] # Should not raise - anchoring and endogenous factors now work together - result = process_model(model) + result = process_model(model_spec_from_yaml_dict(model_dict)) # Verify anchoring is enabled assert result.anchoring.anchoring assert result.anchoring.factors == ("fac1",) @@ -147,24 +149,24 @@ def test_anchoring_and_endogenous_factors_work_together() -> None: def test_stagemap_with_endogenous_factors_wrong_labels() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model["factors"]["fac3"]["is_endogenous"] = True - model["stagemap"] = [0, 0, 1, 1, 2, 2, 4] - del model["anchoring"] + model_dict["factors"]["fac3"]["is_endogenous"] = True + model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 4] + del model_dict["anchoring"] with pytest.raises(ValueError, match="Invalid stage map:"): - process_model(model) + process_model(model_spec_from_yaml_dict(model_dict)) def test_stagemap_with_endogenous_factors() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model["factors"]["fac3"]["is_endogenous"] = True + model_dict["factors"]["fac3"]["is_endogenous"] = True stagemap = [0, 0, 1, 1, 2, 2, 3] - model["stagemap"] = stagemap - del model["anchoring"] - processed = process_model(model) + model_dict["stagemap"] = stagemap + del model_dict["anchoring"] + processed = process_model(model_spec_from_yaml_dict(model_dict)) assert processed.labels.stagemap == tuple(stagemap) assert processed.labels.stages == (0, 1, 2, 3) assert processed.labels.aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) @@ -173,12 +175,12 @@ def test_stagemap_with_endogenous_factors() -> None: @pytest.fixture def model2_inv(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model_dict = yaml.load(y, Loader=yaml.SafeLoader) # Set fac3 to be endogenous - model["factors"]["fac3"]["is_endogenous"] = True - del model["stagemap"] - del model["anchoring"] - return model + model_dict["factors"]["fac3"]["is_endogenous"] = True + del model_dict["stagemap"] + del model_dict["anchoring"] + return model_spec_from_yaml_dict(model_dict) def test_with_endog_has_endogenous_factors(model2_inv) -> None: @@ -372,35 +374,34 @@ def test_with_endog_normalizations(model2_inv) -> None: # ====================================================================================== -def test_model_has_endogenous_factors_not_specified() -> None: - factors = {"a": {}} - assert get_has_endogenous_factors(factors) == False +def _fspec(**kwargs) -> FactorSpec: + """Create a minimal FactorSpec for unit tests.""" + return FactorSpec(measurements=((),), **kwargs) -def test_get_has_endogenous_factors_wrong_type() -> None: - factors = {"a": {"is_endogenous": 3}} - with pytest.raises(ValueError): - get_has_endogenous_factors(factors) +def test_model_has_endogenous_factors_not_specified() -> None: + factors = {"a": _fspec()} + assert get_has_endogenous_factors(factors) == False def test_get_has_endogenous_factors_wrong_constellation() -> None: - factors = {"a": {"is_endogenous": False, "is_correction": True}} + factors = {"a": _fspec(is_endogenous=False, is_correction=True)} with pytest.raises(ValueError): get_has_endogenous_factors(factors) def test_get_has_endogenous_factors_indeed() -> None: factors = { - "a": {"is_endogenous": True, "is_correction": False}, - "b": {"is_endogenous": False, "is_correction": False}, + "a": _fspec(is_endogenous=True, is_correction=False), + "b": _fspec(is_endogenous=False, is_correction=False), } assert get_has_endogenous_factors(factors) == True def test_get_has_endogenous_factors_and_correction() -> None: factors = { - "a": {"is_endogenous": True, "is_correction": False}, - "b": {"is_endogenous": False, "is_correction": False}, - "c": {"is_endogenous": True, "is_correction": True}, + "a": _fspec(is_endogenous=True, is_correction=False), + "b": _fspec(is_endogenous=False, is_correction=False), + "c": _fspec(is_endogenous=True, is_correction=True), } assert get_has_endogenous_factors(factors) == True diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index c799175f..d667cf61 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -6,6 +6,7 @@ import pandas as pd import pytest import yaml +from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_almost_equal as aaae from skillmodels.config import TEST_DATA_DIR @@ -22,7 +23,7 @@ @pytest.fixture def model2(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - return yaml.load(y, Loader=yaml.SafeLoader) + return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) @pytest.fixture @@ -37,7 +38,7 @@ def test_simulate_dataset(model2, model2_data) -> None: params = params.set_index(["category", "period", "name1", "name2"]) calculated = simulate_dataset( - model=model, + model_spec=model, params=params, data=model2_data, ) @@ -69,11 +70,11 @@ def test_measurements_from_factors() -> None: def model2_with_endogenous(): """Model2 with fac3 set as endogenous factor.""" with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) - model["factors"]["fac3"]["is_endogenous"] = True - del model["stagemap"] - del model["anchoring"] - return model + model_dict = yaml.load(y, Loader=yaml.SafeLoader) + model_dict["factors"]["fac3"]["is_endogenous"] = True + del model_dict["stagemap"] + del model_dict["anchoring"] + return model_spec_from_yaml_dict(model_dict) def test_collapse_aug_periods_to_periods_with_endogenous_factors( diff --git a/tests/test_utilities.py b/tests/test_utilities.py index a6613881..c5fecf5b 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -9,15 +9,14 @@ import pandas as pd import pytest import yaml +from conftest import model_spec_from_yaml_dict from pandas.testing import assert_frame_equal, assert_index_equal from skillmodels.config import TEST_DATA_DIR +from skillmodels.model_spec import ModelSpec from skillmodels.process_model import process_model from skillmodels.utilities import ( - _get_params_index_from_model_dict, - _remove_from_dict, - _remove_from_list, - _shorten_if_necessary, + _get_params_index, extract_factors, reduce_n_periods, remove_controls, @@ -32,17 +31,19 @@ @pytest.fixture def model2(): with (TEST_DATA_DIR / "model2.yaml").open() as y: - return yaml.load(y, Loader=yaml.SafeLoader) + return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) def test_extract_factors_single(model2, factors) -> None: reduced = extract_factors(factors, model2) - assert list(reduced["factors"]) == ["fac2"] # ty: ignore[invalid-argument-type] - assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] - assert "anchoring" not in reduced - assert model2["anchoring"]["outcomes"] == {"fac1": "Q1"} - process_model(reduced) # ty: ignore[invalid-argument-type] + assert isinstance(reduced, ModelSpec) + assert list(reduced.factors) == ["fac2"] + assert list(model2.factors) == ["fac1", "fac2", "fac3"] + assert reduced.anchoring is None + assert model2.anchoring is not None + assert dict(model2.anchoring.outcomes) == {"fac1": "Q1"} + process_model(reduced) def test_update_parameter_values() -> None: @@ -64,69 +65,76 @@ def test_update_parameter_values() -> None: @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) def test_remove_factors(model2, factors) -> None: reduced = remove_factors(factors, model2) - assert list(reduced["factors"]) == ["fac1", "fac3"] # ty: ignore[invalid-argument-type] - assert list(model2["factors"]) == ["fac1", "fac2", "fac3"] - assert "anchoring" in reduced - process_model(reduced) # ty: ignore[invalid-argument-type] + assert isinstance(reduced, ModelSpec) + assert list(reduced.factors) == ["fac1", "fac3"] + assert list(model2.factors) == ["fac1", "fac2", "fac3"] + assert reduced.anchoring is not None + process_model(reduced) @pytest.mark.parametrize("measurements", ["y5", ["y5"]]) def test_remove_measurements(model2, measurements) -> None: reduced = remove_measurements(measurements, model2) - assert reduced["factors"]["fac2"]["measurements"] == [["y4", "y6"]] * 8 # ty: ignore[invalid-argument-type] - assert "y5" in model2["factors"]["fac2"]["measurements"][0] - process_model(reduced) # ty: ignore[invalid-argument-type] + assert isinstance(reduced, ModelSpec) + for period_meas in reduced.factors["fac2"].measurements: + assert list(period_meas) == ["y4", "y6"] + assert "y5" in model2.factors["fac2"].measurements[0] + process_model(reduced) @pytest.mark.parametrize("controls", ["x1", ["x1"]]) def test_remove_controls(model2, controls) -> None: reduced = remove_controls(controls, model2) - assert "controls" not in reduced - assert "controls" in model2 - process_model(reduced) # ty: ignore[invalid-argument-type] + assert isinstance(reduced, ModelSpec) + assert reduced.controls == () + assert model2.controls == ("x1",) + process_model(reduced) def test_reduce_n_periods(model2) -> None: reduced = reduce_n_periods(model2, 1) - assert reduced["factors"]["fac1"]["measurements"] == [["y1", "y2", "y3"]] # ty: ignore[invalid-argument-type] - assert reduced["factors"]["fac2"]["normalizations"]["loadings"] == [{"y4": 1}] # ty: ignore[invalid-argument-type] - process_model(reduced) # ty: ignore[invalid-argument-type] + assert isinstance(reduced, ModelSpec) + assert list(reduced.factors["fac1"].measurements[0]) == ["y1", "y2", "y3"] + assert len(reduced.factors["fac1"].measurements) == 1 + norms = reduced.factors["fac2"].normalizations + assert norms is not None + assert dict(norms.loadings[0]) == {"y4": 1} + assert len(norms.loadings) == 1 + process_model(reduced) def test_switch_linear_to_translog(model2) -> None: switched = switch_linear_to_translog(model2) - assert switched["factors"]["fac2"]["transition_function"] == "translog" # ty: ignore[invalid-argument-type] + assert isinstance(switched, ModelSpec) + assert switched.factors["fac2"].transition_function == "translog" def test_switch_linear_and_translog_back_and_forth(model2) -> None: with_translog = switch_linear_to_translog(model2) - with_linear = switch_translog_to_linear(with_translog) # ty: ignore[invalid-argument-type] - assert model2 == with_linear - - -@pytest.mark.parametrize("to_remove", ["a", ["a"]]) -def test_remove_from_list(to_remove) -> None: - list_ = ["a", "b", "c"] - calculated = _remove_from_list(list_, to_remove) - assert calculated == ["b", "c"] - assert list_ == ["a", "b", "c"] - - -@pytest.mark.parametrize("to_remove", ["a", ["a"]]) -def test_remove_from_dict(to_remove) -> None: - dict_ = {"a": 1, "b": 2, "c": 3} - calculated = _remove_from_dict(dict_, to_remove) - assert calculated == {"b": 2, "c": 3} - assert dict_ == {"a": 1, "b": 2, "c": 3} + assert isinstance(with_translog, ModelSpec) + with_linear = switch_translog_to_linear(with_translog) + assert isinstance(with_linear, ModelSpec) + # Check equivalence of factors + for name in model2.factors: + orig = model2.factors[name] + back = with_linear.factors[name] + assert orig.measurements == back.measurements + assert orig.normalizations == back.normalizations + assert orig.transition_function == back.transition_function + assert orig.is_endogenous == back.is_endogenous + assert orig.is_correction == back.is_correction def test_reduce_params_via_extract_factors(model2) -> None: model = reduce_n_periods(model2, 2) + assert isinstance(model, ModelSpec) - full_index = _get_params_index_from_model_dict(model) # ty: ignore[invalid-argument-type] + full_index = _get_params_index(model) params = pd.DataFrame(columns=["value"], index=full_index) - _, reduced_params = extract_factors("fac3", model, params) # ty: ignore[invalid-argument-type] + result = extract_factors("fac3", model, params) + assert not isinstance(result, ModelSpec) + _, reduced_params = result expected_index = pd.MultiIndex.from_tuples( [ @@ -150,17 +158,20 @@ def test_reduce_params_via_extract_factors(model2) -> None: names=["category", "aug_period", "name1", "name2"], ) - assert_index_equal(reduced_params.index, expected_index) # ty: ignore[invalid-argument-type] + assert_index_equal(reduced_params.index, expected_index) def test_extend_params_via_switch_to_translog(model2) -> None: model = reduce_n_periods(model2, 2) - normal_index = _get_params_index_from_model_dict(model) # ty: ignore[invalid-argument-type] + assert isinstance(model, ModelSpec) + normal_index = _get_params_index(model) params = pd.DataFrame(columns=["value"], index=normal_index) - _, extended_params = switch_linear_to_translog(model, params) # ty: ignore[invalid-argument-type] + result = switch_linear_to_translog(model, params) + assert not isinstance(result, ModelSpec) + _, extended_params = result - added_index = extended_params.index.difference(normal_index) # ty: ignore[possibly-missing-attribute] + added_index = extended_params.index.difference(normal_index) expected_added_index = pd.MultiIndex.from_tuples( [ @@ -176,13 +187,4 @@ def test_extend_params_via_switch_to_translog(model2) -> None: assert_index_equal(added_index, expected_added_index) - assert extended_params.loc[added_index, "value"].unique()[0] == 0.05 # ty: ignore[possibly-missing-attribute] - - -def test_shorten_if_necessary() -> None: - list_ = list(range(3)) - not_necessary = _shorten_if_necessary(list_, 5) - assert not_necessary == list_ - - necessary = _shorten_if_necessary(list_, 2) - assert necessary == [0, 1] + assert extended_params.loc[added_index, "value"].unique()[0] == 0.05 diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index dc666dc9..1ec41c73 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -2,6 +2,7 @@ import pandas as pd import yaml +from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.filtered_states import get_filtered_states @@ -20,7 +21,7 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) @@ -32,19 +33,19 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: params = params.loc[max_inputs["params_template"].index] kde = univariate_densities( data=data, - model=model, + model_spec=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, - model=model, + model_spec=model, params=params, period=1, ) surfaces = bivariate_density_surfaces( data=data, - model=model, + model_spec=model, params=params, period=1, ) @@ -57,7 +58,7 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: def test_visualize_factor_distributions_runs_with_simulated_states() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) @@ -75,14 +76,14 @@ def test_visualize_factor_distributions_runs_with_simulated_states() -> None: kde = univariate_densities( data=data, states=latent_data, - model=model, + model_spec=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, states=latent_data, - model=model, + model_spec=model, params=params, period=1, ) @@ -100,7 +101,7 @@ def test_visualize_factor_distributions_with_period_indexed_states() -> None: already mapped aug_period to period and dropped the aug_period column. """ with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) @@ -112,7 +113,7 @@ def test_visualize_factor_distributions_with_period_indexed_states() -> None: params = params.loc[max_inputs["params_template"].index] # Get filtered states and convert to (id, period) index without aug_period - filtered_states = get_filtered_states(model=model, data=data, params=params)[ + filtered_states = get_filtered_states(model_spec=model, data=data, params=params)[ "anchored_states" ]["states"] processed = process_model(model) @@ -129,14 +130,14 @@ def test_visualize_factor_distributions_with_period_indexed_states() -> None: kde = univariate_densities( data=data, states=filtered_states, - model=model, + model_spec=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, states=filtered_states, - model=model, + model_spec=model, params=params, period=1, ) @@ -154,7 +155,7 @@ def test_visualize_factor_distributions_with_both_aug_period_and_period() -> Non in the index (or both as columns). """ with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) @@ -166,7 +167,7 @@ def test_visualize_factor_distributions_with_both_aug_period_and_period() -> Non params = params.loc[max_inputs["params_template"].index] # Get filtered states and add period while keeping aug_period - filtered_states = get_filtered_states(model=model, data=data, params=params)[ + filtered_states = get_filtered_states(model_spec=model, data=data, params=params)[ "anchored_states" ]["states"] processed = process_model(model) @@ -180,14 +181,14 @@ def test_visualize_factor_distributions_with_both_aug_period_and_period() -> Non kde = univariate_densities( data=data, states=filtered_states, - model=model, + model_spec=model, params=params, period=1, ) contours = bivariate_density_contours( data=data, states=filtered_states, - model=model, + model_spec=model, params=params, period=1, ) diff --git a/tests/test_visualize_transition_equations.py b/tests/test_visualize_transition_equations.py index 2b5c1da9..924ba62c 100644 --- a/tests/test_visualize_transition_equations.py +++ b/tests/test_visualize_transition_equations.py @@ -2,6 +2,7 @@ import pandas as pd import yaml +from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.maximization_inputs import get_maximization_inputs @@ -15,9 +16,10 @@ def test_visualize_transition_equations_runs() -> None: with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = yaml.load(y, Loader=yaml.SafeLoader) + model_dict = yaml.load(y, Loader=yaml.SafeLoader) - model["observed_factors"] = ["ob1"] + model_dict["observed_factors"] = ["ob1"] + model = model_spec_from_yaml_dict(model_dict) params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) @@ -31,7 +33,7 @@ def test_visualize_transition_equations_runs() -> None: params = params.reindex(full_index) params["value"] = params["value"].fillna(0) subplots = get_transition_plots( - model=model, + model_spec=model, params=params, period=0, quantiles_of_other_factors=[0.1, 0.25, 0.5, 0.75, 0.9], @@ -39,7 +41,7 @@ def test_visualize_transition_equations_runs() -> None: ) combine_transition_plots(subplots) subplots = get_transition_plots( - model=model, + model_spec=model, params=params, period=0, quantiles_of_other_factors=None, From 6934f793870bafa4e3df4c7487fb41159bfe331e Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Fri, 30 Jan 2026 13:28:03 +0100 Subject: [PATCH 26/27] Replace yaml model specifications by ModelSpec-s. --- docs/getting_started/tutorial.ipynb | 19 ++-- .../how_to_simulate_dataset.ipynb | 8 +- .../how_to_visualize_correlations.ipynb | 8 +- ...sualize_pairwise_factor_distribution.ipynb | 66 ++++++-------- ...ow_to_visualize_transition_equations.ipynb | 6 +- docs/how_to_guides/model_specs.md | 43 ++------- docs/myst.yml | 2 + pixi.lock | 12 +-- pyproject.toml | 1 - src/skillmodels/model_spec.py | 50 ++++++---- src/skillmodels/process_model.py | 31 +++---- src/skillmodels/test_data/__init__.py | 1 + src/skillmodels/test_data/model2.py | 57 ++++++++++++ src/skillmodels/test_data/model2.yaml | 85 ----------------- .../test_data/simplest_augmented_model.py | 39 ++++++++ .../test_data/simplest_augmented_model.yaml | 28 ------ src/skillmodels/types.py | 91 ++++++++++++++++--- src/skillmodels/utilities.py | 19 ++-- tests/conftest.py | 15 --- tests/test_constraints.py | 26 ++---- tests/test_correlation_heatmap.py | 6 +- tests/test_filtered_states.py | 6 +- tests/test_likelihood_regression.py | 81 ++++++++++------- tests/test_params_index.py | 21 ++--- tests/test_parse_params.py | 12 +-- tests/test_process_data.py | 15 ++- tests/test_process_model.py | 62 ++++++------- tests/test_simulate_data.py | 21 +++-- tests/test_utilities.py | 7 +- tests/test_visualize_factor_distributions.py | 15 +-- tests/test_visualize_transition_equations.py | 9 +- 31 files changed, 410 insertions(+), 452 deletions(-) create mode 100644 src/skillmodels/test_data/__init__.py create mode 100644 src/skillmodels/test_data/model2.py delete mode 100644 src/skillmodels/test_data/model2.yaml create mode 100644 src/skillmodels/test_data/simplest_augmented_model.py delete mode 100644 src/skillmodels/test_data/simplest_augmented_model.yaml diff --git a/docs/getting_started/tutorial.ipynb b/docs/getting_started/tutorial.ipynb index 622d1b6f..b123b437 100644 --- a/docs/getting_started/tutorial.ipynb +++ b/docs/getting_started/tutorial.ipynb @@ -20,10 +20,10 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import yaml\n", "\n", - "from skillmodels import ModelSpec, get_maximization_inputs\n", - "from skillmodels.config import TEST_DATA_DIR" + "from skillmodels import get_maximization_inputs\n", + "from skillmodels.config import TEST_DATA_DIR\n", + "from skillmodels.test_data.model2 import MODEL2" ] }, { @@ -31,12 +31,12 @@ "id": "2", "metadata": {}, "source": [ - "## Loading Model Specification and Data\n", + "" ] }, { @@ -46,10 +46,7 @@ "metadata": {}, "outputs": [], "source": [ - "with (TEST_DATA_DIR / \"model2.yaml\").open() as f:\n", - " model_dict = yaml.safe_load(f)\n", - "\n", - "model = ModelSpec.from_dict(model_dict)\n", + "model = MODEL2\n", "\n", "# Show the structure\n", "print(\"Factors:\", list(model.factors.keys()))" diff --git a/docs/how_to_guides/how_to_simulate_dataset.ipynb b/docs/how_to_guides/how_to_simulate_dataset.ipynb index cd0f7831..e15340fa 100644 --- a/docs/how_to_guides/how_to_simulate_dataset.ipynb +++ b/docs/how_to_guides/how_to_simulate_dataset.ipynb @@ -7,11 +7,10 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import yaml\n", "\n", "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", - "from skillmodels.model_spec import ModelSpec\n", - "from skillmodels.simulate_data import simulate_dataset" + "from skillmodels.simulate_data import simulate_dataset\n", + "from skillmodels.test_data.model2 import MODEL2" ] }, { @@ -35,8 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = ModelSpec.from_dict(yaml.load(y, Loader=yaml.SafeLoader))\n", + "model = MODEL2\n", "\n", "data = pd.read_stata(TEST_DATA_DIR / \"model2_simulated_data.dta\")\n", "data = data.set_index([\"caseid\", \"period\"])\n", diff --git a/docs/how_to_guides/how_to_visualize_correlations.ipynb b/docs/how_to_guides/how_to_visualize_correlations.ipynb index e290bf21..57e2c484 100644 --- a/docs/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/how_to_guides/how_to_visualize_correlations.ipynb @@ -14,7 +14,6 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import yaml\n", "\n", "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.correlation_heatmap import (\n", @@ -23,9 +22,7 @@ " get_scores_corr,\n", " plot_correlation_heatmap,\n", ")\n", - "from skillmodels.model_spec import ModelSpec\n", - "\n", - "%load_ext nb_black" + "from skillmodels.test_data.model2 import MODEL2" ] }, { @@ -41,8 +38,7 @@ "metadata": {}, "outputs": [], "source": [ - "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = ModelSpec.from_dict(yaml.load(y, Loader=yaml.SafeLoader))" + "model = MODEL2" ] }, { diff --git a/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index 32e99566..73831c95 100644 --- a/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -6,25 +6,14 @@ "id": "0", "metadata": {}, "outputs": [], - "source": [ - "%load_ext nb_black" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", - "import yaml\n", "\n", "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", "from skillmodels.maximization_inputs import get_maximization_inputs\n", - "from skillmodels.model_spec import ModelSpec\n", "from skillmodels.simulate_data import simulate_dataset\n", + "from skillmodels.test_data.model2 import MODEL2\n", "from skillmodels.visualize_factor_distributions import (\n", " bivariate_density_contours,\n", " bivariate_density_surfaces,\n", @@ -35,7 +24,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "# How to visualize the distribution of latent factors\n", @@ -54,13 +43,11 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ - "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model_dict = yaml.load(y, Loader=yaml.SafeLoader)\n", - " model = ModelSpec.from_dict(model_dict)\n", + "model = MODEL2\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", "\n", @@ -70,7 +57,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Plotting one dataset of states" @@ -79,7 +66,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +87,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -115,7 +102,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -129,7 +116,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -138,7 +125,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "## (Outdated) Optional arguments of the plotting function\n", @@ -154,7 +141,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "## Getting simulated datasets (with and without policy)\n", @@ -165,7 +152,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -177,7 +164,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -190,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -204,7 +191,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "## Plotting differences in distributions" @@ -213,7 +200,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -236,7 +223,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -246,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -255,7 +242,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "All the optional arguments stay the same. The only difference ist that 3d plots do not work for several datasets." @@ -263,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "18", "metadata": {}, "source": [ "# Plotting with observed factors" @@ -272,18 +259,17 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ - "model_dict[\"observed_factors\"] = [\"obs1\"]\n", - "model = ModelSpec.from_dict(model_dict)" + "model = MODEL2.with_added_observed_factors(\"obs1\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -294,7 +280,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -305,7 +291,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -328,7 +314,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb index efece1f8..d4581df2 100644 --- a/docs/how_to_guides/how_to_visualize_transition_equations.ipynb +++ b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb @@ -8,10 +8,9 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import yaml\n", "\n", "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", - "from skillmodels.model_spec import ModelSpec\n", + "from skillmodels.test_data.model2 import MODEL2\n", "from skillmodels.visualize_transition_equations import (\n", " combine_transition_plots,\n", " get_transition_plots,\n", @@ -48,8 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "with (TEST_DATA_DIR / \"model2.yaml\").open() as y:\n", - " model = ModelSpec.from_dict(yaml.load(y, Loader=yaml.SafeLoader))\n", + "model = MODEL2\n", "\n", "params = pd.read_csv(REGRESSION_VAULT / \"one_stage_anchoring.csv\")\n", "params = params.set_index([\"category\", \"period\", \"name1\", \"name2\"])\n", diff --git a/docs/how_to_guides/model_specs.md b/docs/how_to_guides/model_specs.md index 8ec71f95..39680e14 100644 --- a/docs/how_to_guides/model_specs.md +++ b/docs/how_to_guides/model_specs.md @@ -5,8 +5,6 @@ Models are specified using Python dataclasses. ## Defining a Model ```python -from types import MappingProxyType - from skillmodels import ( AnchoringSpec, EstimationOptionsSpec, @@ -24,10 +22,10 @@ fac1 = FactorSpec( ), normalizations=Normalizations( loadings=( - MappingProxyType({"y1": 1.0}), # fix loading of y1 to 1 in period 0 - MappingProxyType({}), + {"y1": 1.0}, # fix loading of y1 to 1 in period 0 + {}, ), - intercepts=(MappingProxyType({}), MappingProxyType({})), + intercepts=({}, {}), ), transition_function="log_ces", ) @@ -36,7 +34,7 @@ fac1 = FactorSpec( model = ModelSpec( factors={"fac1": fac1, "fac2": fac2, "fac3": fac3}, anchoring=AnchoringSpec( - outcomes=MappingProxyType({"fac1": "Q1"}), + outcomes={"fac1": "Q1"}, free_loadings=True, ), controls=("x1", "x2"), @@ -45,35 +43,12 @@ model = ModelSpec( ) ``` -For a more ergonomic approach, use `ModelSpec.from_dict()` which accepts plain Python -lists and dicts: - -```python -from skillmodels import ModelSpec - -model = ModelSpec.from_dict({ - "factors": { - "fac1": { - "measurements": [["y1", "y2", "y3"], ["y1", "y2", "y3"]], - "normalizations": { - "loadings": [{"y1": 1.0}, {}], - "intercepts": [{}, {}], - }, - "transition_function": "log_ces", - }, - }, - "anchoring": {"outcomes": {"fac1": "Q1"}, "free_loadings": True}, - "controls": ["x1", "x2"], - "stagemap": [0, 0, 1, 1, 2, 2, 3], -}) -``` - ## Factor Specification Each factor requires: -- **measurements**: A nested list with measurement variable names for each period. Empty - lists indicate no measurements in that period. +- **measurements**: A nested tuple with measurement variable names for each period. + Empty tuples indicate no measurements in that period. - **transition_function**: Name of a transition function (`linear`, `log_ces`, `constant`, `translog`) or a custom function. - **normalizations** (optional): Fixed values for loadings and intercepts to identify @@ -94,7 +69,7 @@ Anchoring links latent factors to observable outcomes. Options: ## Controls -A list of variable names used as control variables in measurement equations. A constant +A tuple of variable names used as control variables in measurement equations. A constant is always included automatically. ## Stagemap @@ -102,7 +77,7 @@ is always included automatically. Maps periods to development stages. Has one entry less than the number of periods. Parameters are constrained to be equal within a stage. -Example: `[0, 0, 1, 1]` means periods 0-1 share stage 0 parameters, and periods 2-3 +Example: `(0, 0, 1, 1)` means periods 0-1 share stage 0 parameters, and periods 2-3 share stage 1 parameters. ## Observed Factors @@ -113,7 +88,7 @@ transition equations or multiple measurements. ```python model = ModelSpec( factors={...}, - observed_factors=["income", "treatment"], + observed_factors=("income", "treatment"), ) ``` diff --git a/docs/myst.yml b/docs/myst.yml index ba1be062..90508429 100644 --- a/docs/myst.yml +++ b/docs/myst.yml @@ -22,6 +22,8 @@ project: - python github: https://github.com/OpenSourceEconomics/skillmodels jupyter: true + execute: + enabled: true toc: - file: index.md - title: Getting Started diff --git a/pixi.lock b/pixi.lock index b511af04..761c5859 100644 --- a/pixi.lock +++ b/pixi.lock @@ -2869,7 +2869,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/74/18/8dd4fe6df1fd66f3e83b4798eddb1d8482d9d9b105f25099b76703402ebb/ty-0.0.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ osx-arm64: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/_openmp_mutex-4.5-7_kmp_llvm.conda @@ -3098,7 +3097,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/ad/01/3a563dba8b1255e474c35e1c3810b7589e81ae8c41df401b6a37c8e2cde9/ty-0.0.11-py3-none-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ win-64: - conda: https://conda.anaconda.org/conda-forge/win-64/_openmp_mutex-4.5-2_gnu.conda @@ -3326,7 +3324,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/df/04/5a5dfd0aec0ea99ead1e824ee6e347fb623c464da7886aa1e3660fb0f36c/ty-0.0.11-py3-none-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - pypi: ./ packages: - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 @@ -9168,8 +9165,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev261+g5012a8bbc.d20260129 - sha256: 9293ca106ac1c10ef614af441b7e40cb7282ea24b26d4b5775398f556e5e2dc4 + version: 0.0.24.dev262+g7e7784e59.d20260130 + sha256: d7b8e677f24dc24e6a4c7b146f578efbc96c4494d1242caaf376ae8a7453f5f4 requires_dist: - dags - jax>=0.8 @@ -9557,11 +9554,6 @@ packages: version: 2025.2.0.20251108 sha256: 0f1c9792cab4eb0e46c52f8845c8f77cf1e313cb3d68bf826aa867fe4717d91c requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl - name: types-pyyaml - version: 6.0.12.20250915 - sha256: e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6 - requires_python: '>=3.9' - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda sha256: 7c2df5721c742c2a47b2c8f960e718c930031663ac1174da67c1ed5999f7938c md5: edd329d7d3a4ab45dcf905899a7a6115 diff --git a/pyproject.toml b/pyproject.toml index a2d5cbbc..6144dd45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -138,7 +138,6 @@ mem-cuda = "pytest -x -s --pdb --memray --fail-on-increase tests/test_likelihood matplotlib = "*" # required because of pandas pandas-stubs = "*" ty = "*" -types-PyYAML = "*" types-pytz = "*" [tool.pixi.feature.ty.tasks] diff --git a/src/skillmodels/model_spec.py b/src/skillmodels/model_spec.py index 235310c0..b0759cd8 100644 --- a/src/skillmodels/model_spec.py +++ b/src/skillmodels/model_spec.py @@ -6,11 +6,13 @@ modified. """ -from collections.abc import Callable +from collections.abc import Callable, Mapping from dataclasses import dataclass, field, replace from types import MappingProxyType from typing import Any, Self +from skillmodels.types import _make_immutable, ensure_containers_are_immutable + @dataclass(frozen=True) class Normalizations: @@ -24,8 +26,20 @@ class Normalizations: """ - loadings: tuple[MappingProxyType[str, float], ...] - intercepts: tuple[MappingProxyType[str, float], ...] + loadings: tuple[Mapping[str, float], ...] + intercepts: tuple[Mapping[str, float], ...] + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, + "loadings", + tuple(_make_immutable(dict(m)) for m in self.loadings), + ) + object.__setattr__( + self, + "intercepts", + tuple(_make_immutable(dict(m)) for m in self.intercepts), + ) def to_dict(self) -> dict: """Convert to dictionary for backwards compatibility.""" @@ -119,10 +133,6 @@ def to_dict(self) -> dict: return result -def _default_empty_mapping_proxy() -> MappingProxyType[str, str]: - return MappingProxyType({}) - - @dataclass(frozen=True) class AnchoringSpec: """Specification for anchoring latent factors to outcomes. @@ -136,14 +146,17 @@ class AnchoringSpec: """ - outcomes: MappingProxyType[str, str] = field( - default_factory=_default_empty_mapping_proxy, - ) + outcomes: Mapping[str, str] = field(default_factory=dict) free_controls: bool = False free_constant: bool = False free_loadings: bool = False ignore_constant_when_anchoring: bool = False + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, "outcomes", ensure_containers_are_immutable(self.outcomes) + ) + def to_dict(self) -> dict: """Convert to dictionary for backwards compatibility.""" return { @@ -181,7 +194,7 @@ class ModelSpec: def __init__( self, - factors: dict[str, FactorSpec] | MappingProxyType[str, FactorSpec], + factors: Mapping[str, FactorSpec], observed_factors: tuple[str, ...] = (), controls: tuple[str, ...] = (), stagemap: tuple[int, ...] | None = None, @@ -189,10 +202,7 @@ def __init__( estimation_options: EstimationOptionsSpec | None = None, ) -> None: """Create ModelSpec, wrapping factors dict in MappingProxyType.""" - if isinstance(factors, MappingProxyType): - object.__setattr__(self, "_factors", factors) - else: - object.__setattr__(self, "_factors", MappingProxyType(factors)) + object.__setattr__(self, "_factors", ensure_containers_are_immutable(factors)) object.__setattr__(self, "observed_factors", observed_factors) object.__setattr__(self, "controls", controls) object.__setattr__(self, "stagemap", stagemap) @@ -220,8 +230,8 @@ def from_dict(cls, d: dict[str, Any]) -> Self: n_periods = len(nd.get("loadings", [])) nd["intercepts"] = [{} for _ in range(n_periods)] normalizations = Normalizations( - loadings=tuple(MappingProxyType(x) for x in nd["loadings"]), - intercepts=tuple(MappingProxyType(x) for x in nd["intercepts"]), + loadings=tuple(nd["loadings"]), + intercepts=tuple(nd["intercepts"]), ) factors[name] = FactorSpec( measurements=tuple(tuple(m) for m in spec["measurements"]), @@ -235,7 +245,7 @@ def from_dict(cls, d: dict[str, Any]) -> Self: if "anchoring" in d: ad = d["anchoring"] anchoring = AnchoringSpec( - outcomes=MappingProxyType(ad.get("outcomes", {})), + outcomes=ad.get("outcomes", {}), free_controls=ad.get("free_controls", False), free_constant=ad.get("free_constant", False), free_loadings=ad.get("free_loadings", False), @@ -336,7 +346,7 @@ def with_transition_functions( name: spec.with_transition_function(transition_functions[name]) for name, spec in self.factors.items() } - return self._replace(factors=MappingProxyType(new_factors)) + return self._replace(factors=new_factors) def with_added_factor( self, @@ -355,7 +365,7 @@ def with_added_factor( """ new_factors = dict(self.factors) new_factors[name] = spec - return self._replace(factors=MappingProxyType(new_factors)) + return self._replace(factors=new_factors) def with_added_observed_factors( self, diff --git a/src/skillmodels/process_model.py b/src/skillmodels/process_model.py index 8b52d7eb..3745e9a7 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/process_model.py @@ -3,7 +3,6 @@ from collections.abc import KeysView, Mapping from dataclasses import replace from functools import partial -from types import MappingProxyType import numpy as np import pandas as pd @@ -244,10 +243,10 @@ def _get_labels( stagemap=tuple(stagemap), stages=tuple(stages), aug_periods=tuple(aug_periods_to_periods.keys()), - aug_periods_to_periods=MappingProxyType(aug_periods_to_periods), + aug_periods_to_periods=aug_periods_to_periods, aug_stagemap=tuple(aug_stagemap), aug_stages=tuple(sorted(int(v) for v in np.unique(aug_stagemap))), - aug_stages_to_stages=MappingProxyType(aug_stages_to_stages), + aug_stages_to_stages=aug_stages_to_stages, ) @@ -339,7 +338,7 @@ def _augment_periods_for_endogenous_factors( # Insert empty elements into normalizations when we do not have those. aug_normalizations = None if fspec.normalizations is not None: - aug_norm_parts: dict[str, tuple[MappingProxyType[str, float], ...]] = {} + aug_norm_parts: dict[str, tuple[Mapping[str, float], ...]] = {} for norm_type in ("loadings", "intercepts"): norms = getattr(fspec.normalizations, norm_type) if len(norms) != dimensions.n_periods: @@ -348,7 +347,7 @@ def _augment_periods_for_endogenous_factors( f"got {norms} for {fac}['normalizations']['{norm_type}']" ) aug_norm_parts[norm_type] = tuple( - MappingProxyType({}) if aug_p % 2 == insert_at_modulo else norms[p] + {} if aug_p % 2 == insert_at_modulo else norms[p] for aug_p, p in labels.aug_periods_to_periods.items() ) aug_normalizations = Normalizations( @@ -364,7 +363,7 @@ def _augment_periods_for_endogenous_factors( transition_function=fspec.transition_function, ) - return model_spec._replace(factors=MappingProxyType(new_factors)) + return model_spec._replace(factors=new_factors) def _get_transition_info(model_spec: ModelSpec, labels: Labels) -> TransitionInfo: @@ -428,13 +427,9 @@ def _extract_factor(states: Array, pos: int) -> Array: return TransitionInfo( func=transition_function, - param_names=MappingProxyType( - dict(zip(latent_factors, param_names, strict=False)) - ), - individual_functions=MappingProxyType(individual_functions), - function_names=MappingProxyType( - dict(zip(latent_factors, function_names, strict=False)) - ), + param_names=dict(zip(latent_factors, param_names, strict=False)), + individual_functions=individual_functions, + function_names=dict(zip(latent_factors, function_names, strict=False)), ) @@ -455,18 +450,16 @@ def _get_endogenous_factors_info( return EndogenousFactorsInfo( has_endogenous_factors=has_endogenous_factors, - aug_periods_to_aug_period_meas_types=MappingProxyType( - _get_aug_periods_to_aug_period_meas_types( - aug_periods=labels.aug_periods_to_periods.keys(), - has_endogenous_factors=has_endogenous_factors, - ) + aug_periods_to_aug_period_meas_types=_get_aug_periods_to_aug_period_meas_types( + aug_periods=labels.aug_periods_to_periods.keys(), + has_endogenous_factors=has_endogenous_factors, ), bounds_distance=bounds_distance, aug_periods_from_period=partial( _aug_periods_from_period, aug_periods_to_periods=labels.aug_periods_to_periods, ), - factor_info=MappingProxyType(factor_info), + factor_info=factor_info, ) diff --git a/src/skillmodels/test_data/__init__.py b/src/skillmodels/test_data/__init__.py new file mode 100644 index 00000000..f4fd042b --- /dev/null +++ b/src/skillmodels/test_data/__init__.py @@ -0,0 +1 @@ +"""Test data and example model specifications for skillmodels.""" diff --git a/src/skillmodels/test_data/model2.py b/src/skillmodels/test_data/model2.py new file mode 100644 index 00000000..7ae9e3f3 --- /dev/null +++ b/src/skillmodels/test_data/model2.py @@ -0,0 +1,57 @@ +"""Model 2 from the replication files of Cunha, Heckman, and Schennach (2010). + +This model has three latent factors (fac1, fac2, fac3) observed over 8 periods, +with CES, linear, and constant transition functions respectively. It includes +anchoring of fac1 to outcome Q1 and a single control variable x1. +""" + +from skillmodels.model_spec import ( + AnchoringSpec, + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) + +MODEL2 = ModelSpec( + factors={ + "fac1": FactorSpec( + measurements=(("y1", "y2", "y3"),) * 8, + normalizations=Normalizations( + loadings=({"y1": 1},) * 8, + intercepts=({},) * 8, + ), + transition_function="log_ces", + ), + "fac2": FactorSpec( + measurements=(("y4", "y5", "y6"),) * 8, + normalizations=Normalizations( + loadings=({"y4": 1},) * 8, + intercepts=({},) * 8, + ), + transition_function="linear", + ), + "fac3": FactorSpec( + measurements=(("y7", "y8", "y9"),) + ((),) * 7, + normalizations=Normalizations( + loadings=({"y7": 1},) + ({},) * 7, + intercepts=({},) * 8, + ), + transition_function="constant", + ), + }, + anchoring=AnchoringSpec( + outcomes={"fac1": "Q1"}, + free_controls=True, + free_constant=True, + free_loadings=True, + ignore_constant_when_anchoring=True, + ), + controls=("x1",), + stagemap=(0, 0, 0, 0, 0, 0, 0), + estimation_options=EstimationOptionsSpec( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), +) diff --git a/src/skillmodels/test_data/model2.yaml b/src/skillmodels/test_data/model2.yaml deleted file mode 100644 index 3b80ca3d..00000000 --- a/src/skillmodels/test_data/model2.yaml +++ /dev/null @@ -1,85 +0,0 @@ ---- -factors: - fac1: - measurements: - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - - [y1, y2, y3] - transition_function: log_ces - normalizations: - loadings: - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - - {y1: 1} - fac2: - measurements: - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - - [y4, y5, y6] - transition_function: linear - normalizations: - loadings: - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - - {y4: 1} - fac3: - measurements: - - [y7, y8, y9] - - [] - - [] - - [] - - [] - - [] - - [] - - [] - transition_function: constant - normalizations: - loadings: - - {y7: 1} - - {} - - {} - - {} - - {} - - {} - - {} - - {} -anchoring: - outcomes: {fac1: Q1} - free_controls: true - free_constant: true - free_loadings: true - ignore_constant_when_anchoring: true -controls: - - x1 -stagemap: - - 0 - - 0 - - 0 - - 0 - - 0 - - 0 - - 0 -estimation_options: - robust_bounds: true - bounds_distance: 0.001 - n_mixtures: 1 diff --git a/src/skillmodels/test_data/simplest_augmented_model.py b/src/skillmodels/test_data/simplest_augmented_model.py new file mode 100644 index 00000000..89eeb267 --- /dev/null +++ b/src/skillmodels/test_data/simplest_augmented_model.py @@ -0,0 +1,39 @@ +"""Simplest augmented model with endogenous factors. + +A minimal model with two latent factors (fac1, fac2) and one observed factor (of). +Factor fac2 is endogenous. Both factors use linear transition functions with two +periods. Used for testing endogenous factor augmentation. +""" + +from skillmodels.model_spec import ( + EstimationOptionsSpec, + FactorSpec, + ModelSpec, + Normalizations, +) + +SIMPLEST_AUGMENTED_MODEL = ModelSpec( + factors={ + "fac1": FactorSpec( + measurements=(("var",), ("var",)), + normalizations=Normalizations( + loadings=({"var": 1}, {"var": 1}), + intercepts=({}, {}), + ), + transition_function="linear", + ), + "fac2": FactorSpec( + measurements=(("inv",), ("inv",)), + normalizations=Normalizations( + loadings=({"inv": 1}, {"inv": 1}), + intercepts=({}, {}), + ), + is_endogenous=True, + transition_function="linear", + ), + }, + observed_factors=("of",), + estimation_options=EstimationOptionsSpec( + bounds_distance=1e-8, + ), +) diff --git a/src/skillmodels/test_data/simplest_augmented_model.yaml b/src/skillmodels/test_data/simplest_augmented_model.yaml deleted file mode 100644 index 04e892b4..00000000 --- a/src/skillmodels/test_data/simplest_augmented_model.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -factors: - fac1: - is_correction: false - is_endogenous: false - measurements: - - - var - - - var - normalizations: - loadings: - - {var: 1} - - {var: 1} - transition_function: linear - fac2: - is_correction: false - is_endogenous: true - measurements: - - - inv - - - inv - normalizations: - loadings: - - {inv: 1} - - {inv: 1} - transition_function: linear -observed_factors: - - of -estimation_options: - bounds_distance: 0.00000001 diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py index fd066786..5ef7d53b 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/types.py @@ -1,6 +1,6 @@ """Dataclass definitions for skillmodels internal data structures.""" -from collections.abc import Callable +from collections.abc import Callable, Mapping from dataclasses import dataclass from enum import Enum, auto from types import MappingProxyType @@ -9,6 +9,31 @@ import pandas as pd from jax import Array + +def _make_immutable(value: object) -> object: + """Recursively convert mutable containers to immutable equivalents. + + - dict → MappingProxyType + - list → tuple + + Other types are returned unchanged. + """ + if isinstance(value, dict): + return MappingProxyType({k: _make_immutable(v) for k, v in value.items()}) + if isinstance(value, list): + return tuple(_make_immutable(v) for v in value) + return value + + +def ensure_containers_are_immutable( + value: Mapping, +) -> MappingProxyType: + """Convert a Mapping to a MappingProxyType, leaving existing proxies unchanged.""" + if isinstance(value, MappingProxyType): + return value + return MappingProxyType(dict(value)) + + # NewType definitions for domain safety # These prevent accidentally mixing up semantically different int values Period = NewType("Period", int) @@ -66,12 +91,24 @@ class Labels: stagemap: tuple[int, ...] stages: tuple[int, ...] aug_periods: tuple[int, ...] - aug_periods_to_periods: MappingProxyType[int, int] + aug_periods_to_periods: Mapping[int, int] aug_stagemap: tuple[int, ...] aug_stages: tuple[int, ...] - aug_stages_to_stages: MappingProxyType[int, int] + aug_stages_to_stages: Mapping[int, int] transition_names: tuple[str, ...] = () + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, + "aug_periods_to_periods", + ensure_containers_are_immutable(self.aug_periods_to_periods), + ) + object.__setattr__( + self, + "aug_stages_to_stages", + ensure_containers_are_immutable(self.aug_stages_to_stages), + ) + @property def all_factors(self) -> tuple[str, ...]: """All factor names (latent + observed).""" @@ -83,19 +120,24 @@ class Anchoring: """Information about how latent factors are anchored to observed outcomes.""" anchoring: bool - outcomes: MappingProxyType[str, str] + outcomes: Mapping[str, str] factors: tuple[str, ...] free_controls: bool free_constant: bool free_loadings: bool ignore_constant_when_anchoring: bool + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, "outcomes", ensure_containers_are_immutable(self.outcomes) + ) + @classmethod def disabled(cls) -> Anchoring: """Create an Anchoring config with anchoring disabled.""" return cls( anchoring=False, - outcomes=MappingProxyType({}), + outcomes={}, factors=(), free_controls=False, free_constant=False, @@ -128,7 +170,7 @@ def from_config( """ return cls( anchoring=True, - outcomes=MappingProxyType(outcomes), + outcomes=outcomes, factors=tuple(outcomes.keys()), free_controls=free_controls, free_constant=free_constant, @@ -155,9 +197,24 @@ class TransitionInfo: """Information about transition functions.""" func: Callable - param_names: MappingProxyType[str, list[str]] - individual_functions: MappingProxyType[str, Callable] - function_names: MappingProxyType[str, str] + param_names: Mapping[str, list[str]] + individual_functions: Mapping[str, Callable] + function_names: Mapping[str, str] + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, "param_names", ensure_containers_are_immutable(self.param_names) + ) + object.__setattr__( + self, + "individual_functions", + ensure_containers_are_immutable(self.individual_functions), + ) + object.__setattr__( + self, + "function_names", + ensure_containers_are_immutable(self.function_names), + ) @dataclass(frozen=True) @@ -213,10 +270,22 @@ class EndogenousFactorsInfo: """Information about endogenous factors in the model.""" has_endogenous_factors: bool - aug_periods_to_aug_period_meas_types: MappingProxyType[int, MeasurementType] + aug_periods_to_aug_period_meas_types: Mapping[int, MeasurementType] bounds_distance: float aug_periods_from_period: Callable[[int], list[int]] - factor_info: MappingProxyType[str, FactorInfo] + factor_info: Mapping[str, FactorInfo] + + def __post_init__(self) -> None: # noqa: D105 + object.__setattr__( + self, + "aug_periods_to_aug_period_meas_types", + ensure_containers_are_immutable(self.aug_periods_to_aug_period_meas_types), + ) + object.__setattr__( + self, + "factor_info", + ensure_containers_are_immutable(self.factor_info), + ) @dataclass(frozen=True) diff --git a/src/skillmodels/utilities.py b/src/skillmodels/utilities.py index b7f8226b..37c2908d 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/utilities.py @@ -2,7 +2,6 @@ import warnings from dataclasses import replace -from types import MappingProxyType import numpy as np import pandas as pd @@ -118,14 +117,12 @@ def remove_factors( k: v for k, v in new_anchoring.outcomes.items() if k not in factors } if new_outcomes: - new_anchoring = replace( - new_anchoring, outcomes=MappingProxyType(new_outcomes) - ) + new_anchoring = replace(new_anchoring, outcomes=new_outcomes) else: new_anchoring = None out = model_spec._replace( - factors=MappingProxyType(new_factors), + factors=new_factors, anchoring=new_anchoring, ) @@ -184,11 +181,11 @@ def remove_measurements( new_normalizations = fspec.normalizations if new_normalizations is not None: new_loadings = tuple( - MappingProxyType({k: v for k, v in d.items() if k not in measurements}) + {k: v for k, v in d.items() if k not in measurements} for d in new_normalizations.loadings ) new_intercepts = tuple( - MappingProxyType({k: v for k, v in d.items() if k not in measurements}) + {k: v for k, v in d.items() if k not in measurements} for d in new_normalizations.intercepts ) if new_loadings != new_normalizations.loadings or ( @@ -208,7 +205,7 @@ def remove_measurements( fspec, measurements=new_meas, normalizations=new_normalizations ) - out = model_spec._replace(factors=MappingProxyType(new_factors)) + out = model_spec._replace(factors=new_factors) if params is not None: # This likely won't work if we have endogenous factors. @@ -274,7 +271,7 @@ def switch_translog_to_linear( new_factors[name] = fspec.with_transition_function("linear") else: new_factors[name] = fspec - out = model_spec._replace(factors=MappingProxyType(new_factors)) + out = model_spec._replace(factors=new_factors) if params is not None: # This likely won't work if we have endogenous factors. @@ -310,7 +307,7 @@ def switch_linear_to_translog( new_factors[name] = fspec.with_transition_function("translog") else: new_factors[name] = fspec - out = model_spec._replace(factors=MappingProxyType(new_factors)) + out = model_spec._replace(factors=new_factors) if params is not None: out_params = _extend_params(params=params, model_spec=out, fill_value=0.05) @@ -354,7 +351,7 @@ def reduce_n_periods( new_stagemap = new_stagemap[: new_n_periods - 1] out = model_spec._replace( - factors=MappingProxyType(new_factors), + factors=new_factors, stagemap=new_stagemap, ) diff --git a/tests/conftest.py b/tests/conftest.py index 8e273e10..81b003e2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,16 +1 @@ """Shared test fixtures and helpers.""" - -from skillmodels.model_spec import ModelSpec - - -def model_spec_from_yaml_dict(d: dict) -> ModelSpec: - """Create a ModelSpec from a YAML-loaded dictionary. - - Args: - d: A dictionary loaded from a YAML model specification file. - - Returns: - A ModelSpec instance. - - """ - return ModelSpec.from_dict(d) diff --git a/tests/test_constraints.py b/tests/test_constraints.py index cdb72fa9..fcfad8c2 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -1,13 +1,8 @@ -from types import MappingProxyType - import numpy as np import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from pandas.testing import assert_frame_equal -from skillmodels.config import TEST_DATA_DIR from skillmodels.constraints import ( _get_anchoring_constraints, _get_constant_factors_constraints, @@ -20,6 +15,7 @@ add_bounds, ) from skillmodels.process_model import process_model +from skillmodels.test_data.simplest_augmented_model import SIMPLEST_AUGMENTED_MODEL from skillmodels.types import Anchoring, Labels @@ -180,10 +176,10 @@ def test_constant_factor_constraints() -> None: stagemap=(0, 0, 0), stages=(0,), aug_periods=(0, 1, 2), - aug_periods_to_periods=MappingProxyType({0: 0, 1: 1, 2: 2}), + aug_periods_to_periods={0: 0, 1: 1, 2: 2}, aug_stagemap=(0, 0, 0), aug_stages=(0,), - aug_stages_to_stages=MappingProxyType({0: 0}), + aug_stages_to_stages={0: 0}, transition_names=("bla", "constant"), ) @@ -234,10 +230,10 @@ def test_trans_coeff_constraints() -> None: stagemap=(0, 0, 0), stages=(0,), aug_periods=(0, 1, 2), - aug_periods_to_periods=MappingProxyType({0: 0, 1: 1, 2: 2}), + aug_periods_to_periods={0: 0, 1: 1, 2: 2}, aug_stagemap=(0, 0, 0), aug_stages=(0,), - aug_stages_to_stages=MappingProxyType({0: 0}), + aug_stages_to_stages={0: 0}, transition_names=("log_ces", "bla", "blubb"), ) @@ -291,7 +287,7 @@ def base_anchoring_info(): return Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), + outcomes={"f1": "outcome", "f2": "outcome"}, free_controls=True, free_constant=True, free_loadings=True, @@ -310,7 +306,7 @@ def test_anchoring_constraints_for_constants(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), + outcomes={"f1": "outcome", "f2": "outcome"}, free_controls=True, free_constant=False, free_loadings=True, @@ -339,7 +335,7 @@ def test_anchoring_constraints_for_controls(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), + outcomes={"f1": "outcome", "f2": "outcome"}, free_controls=False, free_constant=True, free_loadings=True, @@ -379,7 +375,7 @@ def test_anchoring_constraints_for_loadings(anch_uinfo) -> None: anchoring_info = Anchoring( anchoring=True, factors=("f1", "f2"), - outcomes=MappingProxyType({"f1": "outcome", "f2": "outcome"}), + outcomes={"f1": "outcome", "f2": "outcome"}, free_controls=True, free_constant=True, free_loadings=False, @@ -415,9 +411,7 @@ def assert_list_equal_except_for_order(list1, list2) -> None: @pytest.fixture def simplest_augmented_model(): - with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: - model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) - return process_model(model) + return process_model(SIMPLEST_AUGMENTED_MODEL) def test_get_constraints_for_augmented_periods(simplest_augmented_model) -> None: diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 02811287..49ce8f63 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -1,4 +1,4 @@ -from types import MappingProxyType, SimpleNamespace +from types import SimpleNamespace import numpy as np import pandas as pd @@ -254,10 +254,10 @@ def test_process_factors() -> None: stagemap=(0,), stages=(0,), aug_periods=(0,), - aug_periods_to_periods=MappingProxyType({0: 0}), + aug_periods_to_periods={0: 0}, aug_stagemap=(0,), aug_stages=(0,), - aug_stages_to_stages=MappingProxyType({0: 0}), + aug_stages_to_stages={0: 0}, ), ) latent_factor = "c" diff --git a/tests/test_filtered_states.py b/tests/test_filtered_states.py index 0048deb2..d3268196 100644 --- a/tests/test_filtered_states.py +++ b/tests/test_filtered_states.py @@ -3,20 +3,18 @@ import numpy as np import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.test_data.model2 import MODEL2 REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with (TEST_DATA_DIR / "model2.yaml").open() as y: - return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + return MODEL2 @pytest.fixture diff --git a/tests/test_likelihood_regression.py b/tests/test_likelihood_regression.py index 41b877c5..a6ccb176 100644 --- a/tests/test_likelihood_regression.py +++ b/tests/test_likelihood_regression.py @@ -1,4 +1,5 @@ import json +from dataclasses import replace from itertools import product from pathlib import Path @@ -6,14 +7,13 @@ import numpy as np import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_almost_equal as aaae from skillmodels.config import TEST_DATA_DIR from skillmodels.decorators import register_params from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.model_spec import ModelSpec +from skillmodels.model_spec import ModelSpec, Normalizations +from skillmodels.test_data.model2 import MODEL2 from skillmodels.utilities import reduce_n_periods jax.config.update("jax_enable_x64", True) @@ -31,8 +31,7 @@ @pytest.fixture def model2(): - with (TEST_DATA_DIR / "model2.yaml").open() as y: - return yaml.load(y, Loader=yaml.SafeLoader) + return MODEL2 @pytest.fixture @@ -42,16 +41,15 @@ def model2_data(): def _convert_model(base_model, model_name): - model = base_model.copy() if model_name == "no_stages_anchoring": - model.pop("stagemap") - elif model_name == "one_stage": - model.pop("anchoring") - elif model_name == "one_stage_anchoring": - pass - elif model_name == "two_stages_anchoring": - model["stagemap"] = [0, 0, 0, 0, 1, 1, 1] - elif model_name == "one_stage_anchoring_custom_functions": + return base_model._replace(stagemap=None) + if model_name == "one_stage": + return base_model._replace(anchoring=None) + if model_name == "one_stage_anchoring": + return base_model + if model_name == "two_stages_anchoring": + return base_model.with_stagemap((0, 0, 0, 0, 1, 1, 1)) + if model_name == "one_stage_anchoring_custom_functions": @register_params(params=[]) def constant(fac3, params): @@ -64,11 +62,14 @@ def linear(fac1, fac2, fac3, params): p["constant"] + fac1 * p["fac1"] + fac2 * p["fac2"] + fac3 * p["fac3"] ) - model["factors"]["fac2"]["transition_function"] = linear - model["factors"]["fac3"]["transition_function"] = constant - else: - raise ValueError("Invalid model name.") - return model_spec_from_yaml_dict(model) + return base_model.with_transition_functions( + { + "fac1": "log_ces", + "fac2": linear, + "fac3": constant, + } + ) + raise ValueError("Invalid model name.") @pytest.mark.parametrize( @@ -96,9 +97,8 @@ def test_likelihood_values_have_not_changed( def test_splitting_does_not_change_gradient(model2, model2_data) -> None: - model = model_spec_from_yaml_dict(model2) - inputs = get_maximization_inputs(model, model2_data) - inputs_split = get_maximization_inputs(model, model2_data, 13) + inputs = get_maximization_inputs(model2, model2_data) + inputs_split = get_maximization_inputs(model2, model2_data, 13) params = inputs["params_template"] params["value"] = 0.1 @@ -199,12 +199,29 @@ def test_likelihood_contributions_large_nobs( def test_likelihood_runs_with_empty_periods(model2, model2_data) -> None: - del model2["anchoring"] - for factor in ["fac1", "fac2"]: - model2["factors"][factor]["measurements"][-1] = [] - model2["factors"][factor]["normalizations"]["loadings"][-1] = {} + # Remove anchoring and clear last-period measurements for fac1 and fac2 + new_factors = {} + for name, spec in model2.factors.items(): + if name in ("fac1", "fac2"): + new_meas = (*spec.measurements[:-1], ()) + old_norms = spec.normalizations + assert old_norms is not None + new_loadings = (*old_norms.loadings[:-1], {}) + new_norms = Normalizations( + loadings=new_loadings, + intercepts=old_norms.intercepts, + ) + new_factors[name] = replace( + spec, measurements=new_meas, normalizations=new_norms + ) + else: + new_factors[name] = spec + model = model2._replace( + factors=new_factors, + anchoring=None, + ) - func_dict = get_maximization_inputs(model_spec_from_yaml_dict(model2), model2_data) + func_dict = get_maximization_inputs(model, model2_data) params = func_dict["params_template"] params["value"] = 0.1 @@ -214,11 +231,9 @@ def test_likelihood_runs_with_empty_periods(model2, model2_data) -> None: def test_likelihood_runs_with_too_long_data(model2, model2_data) -> None: - full_model = model_spec_from_yaml_dict(model2) - reduced = reduce_n_periods(full_model, 2) + reduced = reduce_n_periods(model2, 2) assert isinstance(reduced, ModelSpec) - model = reduced - func_dict = get_maximization_inputs(model, model2_data) + func_dict = get_maximization_inputs(reduced, model2_data) params = func_dict["params_template"] params["value"] = 0.1 @@ -228,10 +243,10 @@ def test_likelihood_runs_with_too_long_data(model2, model2_data) -> None: def test_likelihood_runs_with_observed_factors(model2, model2_data) -> None: - model2["observed_factors"] = ["ob1", "ob2"] + model = model2.with_added_observed_factors("ob1", "ob2") model2_data["ob1"] = np.arange(len(model2_data)) model2_data["ob2"] = np.ones(len(model2_data)) - func_dict = get_maximization_inputs(model_spec_from_yaml_dict(model2), model2_data) + func_dict = get_maximization_inputs(model, model2_data) params = func_dict["params_template"] params["value"] = 0.1 diff --git a/tests/test_params_index.py b/tests/test_params_index.py index 88837058..e7d00794 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -1,9 +1,5 @@ -from types import MappingProxyType - import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.params_index import ( @@ -18,14 +14,13 @@ initial_mean_index_tuples, ) from skillmodels.process_model import process_model +from skillmodels.test_data.model2 import MODEL2 from skillmodels.types import TransitionInfo @pytest.fixture def model2_inputs(): - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) - processed = process_model(model) + processed = process_model(MODEL2) return { "update_info": processed.update_info, @@ -186,9 +181,9 @@ def test_trans_coeffs_index_tuples_no_endogenous_factors() -> None: } trans_info = TransitionInfo( func=lambda x: x, # dummy function - param_names=MappingProxyType(param_names), - individual_functions=MappingProxyType({}), - function_names=MappingProxyType({}), + param_names=param_names, + individual_functions={}, + function_names={}, ) expected = [ @@ -229,9 +224,9 @@ def test_trans_coeffs_index_tuples_has_endogenous_factors() -> None: } trans_info = TransitionInfo( func=lambda x: x, # dummy function - param_names=MappingProxyType(param_names), - individual_functions=MappingProxyType({}), - function_names=MappingProxyType({}), + param_names=param_names, + individual_functions={}, + function_names={}, ) expected = [ diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index fa0f8aac..2d1392c8 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -5,19 +5,16 @@ """ -from types import MappingProxyType - import jax.numpy as jnp import numpy as np import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_equal as aae from skillmodels.config import TEST_DATA_DIR from skillmodels.parse_params import create_parsing_info, parse_params from skillmodels.process_model import process_model +from skillmodels.test_data.model2 import MODEL2 from skillmodels.types import Anchoring @@ -28,10 +25,7 @@ def parsed_parameters(): index_col=["category", "period", "name1", "name2"], ).index - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) - - processed = process_model(model) + processed = process_model(MODEL2) update_info = processed.update_info labels = processed.labels @@ -40,7 +34,7 @@ def parsed_parameters(): # more meaningful test anchoring = Anchoring( anchoring=False, - outcomes=MappingProxyType({}), + outcomes={}, factors=(), free_controls=True, free_constant=True, diff --git a/tests/test_process_data.py b/tests/test_process_data.py index 0d6c6774..4437911e 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -1,13 +1,10 @@ import io import textwrap -from types import MappingProxyType import jax.numpy as jnp import numpy as np import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_equal as aae from skillmodels.config import TEST_DATA_DIR @@ -20,6 +17,7 @@ pre_process_data, ) from skillmodels.process_model import process_model +from skillmodels.test_data.simplest_augmented_model import SIMPLEST_AUGMENTED_MODEL from skillmodels.types import Labels @@ -48,8 +46,7 @@ def test_pre_process_data() -> None: @pytest.fixture def simplest_augmented(): out = {} - with (TEST_DATA_DIR / "simplest_augmented_model.yaml").open() as y: - out["model"] = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + out["model"] = SIMPLEST_AUGMENTED_MODEL _df = pd.DataFrame(data=np.arange(15).reshape(3, 5).T, columns=["var", "inv", "of"]) _df["period"] = [1, 1, 2, 1, 2] _df["id"] = [1, 3, 3, 5, 5] @@ -130,10 +127,10 @@ def test_generate_controls_array() -> None: stagemap=(0, 0), stages=(0,), aug_periods=(0, 1), - aug_periods_to_periods=MappingProxyType({0: 0, 1: 1}), + aug_periods_to_periods={0: 0, 1: 1}, aug_stagemap=(0, 0), aug_stages=(0,), - aug_stages_to_stages=MappingProxyType({0: 0}), + aug_stages_to_stages={0: 0}, ) calculated = _generate_controls_array(data, labels, 2) @@ -159,10 +156,10 @@ def test_generate_observed_factor_array() -> None: stagemap=(0, 0), stages=(0,), aug_periods=(0, 1), - aug_periods_to_periods=MappingProxyType({0: 0, 1: 1}), + aug_periods_to_periods={0: 0, 1: 1}, aug_stagemap=(0, 0), aug_stages=(0,), - aug_stages_to_stages=MappingProxyType({0: 0}), + aug_stages_to_stages={0: 0}, ) calculated = _generate_observed_factor_array(data, labels, 2) diff --git a/tests/test_process_model.py b/tests/test_process_model.py index 6256e1d9..7d5227b2 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -1,14 +1,14 @@ import inspect +from dataclasses import replace import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from pandas.testing import assert_frame_equal from skillmodels.config import TEST_DATA_DIR from skillmodels.model_spec import FactorSpec from skillmodels.process_model import get_has_endogenous_factors, process_model +from skillmodels.test_data.model2 import MODEL2 from skillmodels.types import TransitionInfo # ====================================================================================== @@ -18,8 +18,7 @@ @pytest.fixture def model2(): - with (TEST_DATA_DIR / "model2.yaml").open() as y: - return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + return MODEL2 def test_has_endogenous_factors(model2) -> None: @@ -124,14 +123,18 @@ def test_normalizations(model2) -> None: # ====================================================================================== +def _make_fac3_endogenous(model): + """Return a new model with fac3 set as endogenous.""" + fac3 = model.factors["fac3"] + new_fac3 = replace(fac3, is_endogenous=True) + new_factors = dict(model.factors) | {"fac3": new_fac3} + return model._replace(factors=new_factors) + + def test_anchoring_and_endogenous_factors_work_together() -> None: - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] + model = _make_fac3_endogenous(MODEL2)._replace(stagemap=None) # Should not raise - anchoring and endogenous factors now work together - result = process_model(model_spec_from_yaml_dict(model_dict)) + result = process_model(model) # Verify anchoring is enabled assert result.anchoring.anchoring assert result.anchoring.factors == ("fac1",) @@ -148,39 +151,32 @@ def test_anchoring_and_endogenous_factors_work_together() -> None: def test_stagemap_with_endogenous_factors_wrong_labels() -> None: - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - model_dict["stagemap"] = [0, 0, 1, 1, 2, 2, 4] - del model_dict["anchoring"] + model = _make_fac3_endogenous(MODEL2)._replace( + stagemap=(0, 0, 1, 1, 2, 2, 4), + anchoring=None, + ) with pytest.raises(ValueError, match="Invalid stage map:"): - process_model(model_spec_from_yaml_dict(model_dict)) + process_model(model) def test_stagemap_with_endogenous_factors() -> None: - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - stagemap = [0, 0, 1, 1, 2, 2, 3] - model_dict["stagemap"] = stagemap - del model_dict["anchoring"] - processed = process_model(model_spec_from_yaml_dict(model_dict)) - assert processed.labels.stagemap == tuple(stagemap) + stagemap = (0, 0, 1, 1, 2, 2, 3) + model = _make_fac3_endogenous(MODEL2)._replace( + stagemap=stagemap, + anchoring=None, + ) + processed = process_model(model) + assert processed.labels.stagemap == stagemap assert processed.labels.stages == (0, 1, 2, 3) assert processed.labels.aug_stagemap == (0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7) @pytest.fixture def model2_inv(): - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - # Set fac3 to be endogenous - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] - del model_dict["anchoring"] - return model_spec_from_yaml_dict(model_dict) + return _make_fac3_endogenous(MODEL2)._replace( + stagemap=None, + anchoring=None, + ) def test_with_endog_has_endogenous_factors(model2_inv) -> None: diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index d667cf61..c2b742ab 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -1,12 +1,11 @@ """Tests for functions in simulate_data module.""" +from dataclasses import replace from pathlib import Path import numpy as np import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from numpy.testing import assert_array_almost_equal as aaae from skillmodels.config import TEST_DATA_DIR @@ -16,14 +15,14 @@ measurements_from_states, simulate_dataset, ) +from skillmodels.test_data.model2 import MODEL2 REGRESSION_VAULT = Path(__file__).parent / "regression_vault" @pytest.fixture def model2(): - with (TEST_DATA_DIR / "model2.yaml").open() as y: - return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + return MODEL2 @pytest.fixture @@ -69,12 +68,14 @@ def test_measurements_from_factors() -> None: @pytest.fixture def model2_with_endogenous(): """Model2 with fac3 set as endogenous factor.""" - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - model_dict["factors"]["fac3"]["is_endogenous"] = True - del model_dict["stagemap"] - del model_dict["anchoring"] - return model_spec_from_yaml_dict(model_dict) + fac3 = MODEL2.factors["fac3"] + new_fac3 = replace(fac3, is_endogenous=True) + new_factors = dict(MODEL2.factors) | {"fac3": new_fac3} + return MODEL2._replace( + factors=new_factors, + stagemap=None, + anchoring=None, + ) def test_collapse_aug_periods_to_periods_with_endogenous_factors( diff --git a/tests/test_utilities.py b/tests/test_utilities.py index c5fecf5b..5540f9c6 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -8,13 +8,11 @@ import numpy as np import pandas as pd import pytest -import yaml -from conftest import model_spec_from_yaml_dict from pandas.testing import assert_frame_equal, assert_index_equal -from skillmodels.config import TEST_DATA_DIR from skillmodels.model_spec import ModelSpec from skillmodels.process_model import process_model +from skillmodels.test_data.model2 import MODEL2 from skillmodels.utilities import ( _get_params_index, extract_factors, @@ -30,8 +28,7 @@ @pytest.fixture def model2(): - with (TEST_DATA_DIR / "model2.yaml").open() as y: - return model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + return MODEL2 @pytest.mark.parametrize("factors", ["fac2", ["fac2"]]) diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index 1ec41c73..74574fab 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -1,14 +1,13 @@ from pathlib import Path import pandas as pd -import yaml -from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.filtered_states import get_filtered_states from skillmodels.maximization_inputs import get_maximization_inputs from skillmodels.process_model import process_model from skillmodels.simulate_data import simulate_dataset +from skillmodels.test_data.model2 import MODEL2 from skillmodels.visualize_factor_distributions import ( bivariate_density_contours, bivariate_density_surfaces, @@ -20,8 +19,7 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + model = MODEL2 params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) @@ -57,8 +55,7 @@ def test_visualize_factor_distributions_runs_with_filtered_states() -> None: def test_visualize_factor_distributions_runs_with_simulated_states() -> None: - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + model = MODEL2 data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) @@ -100,8 +97,7 @@ def test_visualize_factor_distributions_with_period_indexed_states() -> None: This mimics the scenario where states come from a downstream task that has already mapped aug_period to period and dropped the aug_period column. """ - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + model = MODEL2 data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) @@ -154,8 +150,7 @@ def test_visualize_factor_distributions_with_both_aug_period_and_period() -> Non This mimics the scenario where states have aug_period as a column and period in the index (or both as columns). """ - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model = model_spec_from_yaml_dict(yaml.load(y, Loader=yaml.SafeLoader)) + model = MODEL2 data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") data = data.set_index(["caseid", "period"]) diff --git a/tests/test_visualize_transition_equations.py b/tests/test_visualize_transition_equations.py index 924ba62c..3ce950fc 100644 --- a/tests/test_visualize_transition_equations.py +++ b/tests/test_visualize_transition_equations.py @@ -1,11 +1,10 @@ from pathlib import Path import pandas as pd -import yaml -from conftest import model_spec_from_yaml_dict from skillmodels.config import TEST_DATA_DIR from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.test_data.model2 import MODEL2 from skillmodels.visualize_transition_equations import ( combine_transition_plots, get_transition_plots, @@ -15,11 +14,7 @@ def test_visualize_transition_equations_runs() -> None: - with (TEST_DATA_DIR / "model2.yaml").open() as y: - model_dict = yaml.load(y, Loader=yaml.SafeLoader) - - model_dict["observed_factors"] = ["ob1"] - model = model_spec_from_yaml_dict(model_dict) + model = MODEL2.with_added_observed_factors("ob1") params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") params = params.set_index(["category", "period", "name1", "name2"]) From 7cf471a8d735298b103de0cc6805e45f11c32603 Mon Sep 17 00:00:00 2001 From: Hans-Martin von Gaudecker Date: Sun, 1 Feb 2026 16:48:24 +0100 Subject: [PATCH 27/27] Next shot at fixing pickling. --- src/skillmodels/types.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/skillmodels/types.py b/src/skillmodels/types.py index 5ef7d53b..b7710ca6 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/types.py @@ -1,5 +1,6 @@ """Dataclass definitions for skillmodels internal data structures.""" +import copyreg from collections.abc import Callable, Mapping from dataclasses import dataclass from enum import Enum, auto @@ -34,6 +35,13 @@ def ensure_containers_are_immutable( return MappingProxyType(dict(value)) +def _reduce_mapping_proxy(mp: MappingProxyType) -> tuple: + return ensure_containers_are_immutable, (dict(mp),) + + +copyreg.pickle(MappingProxyType, _reduce_mapping_proxy) + + # NewType definitions for domain safety # These prevent accidentally mixing up semantically different int values Period = NewType("Period", int)