Skip to content

API Reference

psychoanalyze

Top-level package for psychoanalyze.

Modules:

  • plot: Global plot settings and generic plot utilities.
  • sigmoids: Implementations of psychometric sigmoid functions.

Subpackages:

  • data: Submodules in psychoanalyze.data contain data manipulation and transformation functions that relate to a level in the data hierarchy.
  • analysis: Submodules in psychoanalyze.analysis contain data manipulation and transformation functions, often corresponding to a level in the data hierarchy.

main

PsychoAnalyze command line interface.

main(command)

Main commands.

Source code in psychoanalyze/main.py
@app.command()
def main(command: str) -> None:
    """Main commands."""
    if command == "version":
        Console().print(importlib.metadata.version("psychoanalyze"))
    if command == "dash":
        dash_app.app.run(debug=True)

params

Psychometric function parameter conversions.

intercept_to_location(intercept, scale)

Convert intercept to location.

Source code in psychoanalyze/params.py
def intercept_to_location(intercept: float, scale: float) -> float:
    """Convert intercept to location."""
    return -intercept * scale

location_to_intercept(location, scale)

Convert location to intercept.

Source code in psychoanalyze/params.py
def location_to_intercept(location: float, scale: float) -> float:
    """Convert location to intercept."""
    return -location / scale

slope_to_scale(slope)

Convert slope to scale.

Source code in psychoanalyze/params.py
def slope_to_scale(slope: float) -> float:
    """Convert slope to scale."""
    return 1 / slope

scale_to_slope(scale)

Convert scale to slope.

Source code in psychoanalyze/params.py
def scale_to_slope(scale: float) -> float:
    """Convert scale to slope."""
    return 1 / scale

plot

Global plot settings and generic plot utilities.

sigmoids

Sigmoid functions used in the psychometric function.

weibull(x, alpha, beta)

Calculate psi using Weibull function.

Source code in psychoanalyze/sigmoids.py
def weibull(
    x: np.ndarray[Any, np.dtype[np.floating[Any]]],
    alpha: float,
    beta: float,
) -> float:
    """Calculate psi using Weibull function."""
    return 1 - np.exp(-((x / alpha) ** beta))

gumbel(x, alpha, beta)

Calculate psi using gumbel function.

Source code in psychoanalyze/sigmoids.py
def gumbel(x: np.ndarray[Any, np.dtype[Any]], alpha: float, beta: float) -> float:
    """Calculate psi using gumbel function."""
    return 1 - np.exp(-(10 ** (beta * (x - alpha))))

quick(x, alpha, beta)

Calculate psi using quick function.

Source code in psychoanalyze/sigmoids.py
def quick(x: float, alpha: float, beta: float) -> float:
    """Calculate psi using quick function."""
    return 1 - 2 ** (-((x / alpha) ** beta))

log_quick(x, alpha, beta)

Calculate psi using log_quick function.

Source code in psychoanalyze/sigmoids.py
def log_quick(x: float, alpha: float, beta: float) -> float:
    """Calculate psi using log_quick function."""
    return 1 - 2 ** (-(10 ** (beta * (x - alpha))))

data

Data modules and general-purpose data transformation utilities.

Submodules:

points

Utilities for points-level data.

Points correspond to the aggregate measures of method-of-constant-stimuli experiments at each stimulus level measured. For example, a block that samples 8 stimulus intensity levels would have 8 corresponding points.

from_trials(trials)

Aggregate point-level measures from trial data.

Source code in psychoanalyze/data/points.py
@check_io(trials=types.trials, out=types.points)
def from_trials(trials: pd.DataFrame) -> pd.DataFrame:
    """Aggregate point-level measures from trial data."""
    points = trials.groupby(["Block", "Intensity"])["Result"].agg(["count", "sum"])
    points = points.rename(columns={"count": "n trials", "sum": "Hits"})
    points["Hit Rate"] = points["Hits"] / points["n trials"]
    points["logit(Hit Rate)"] = logit(points["Hit Rate"])
    return points.reset_index()
load(data_path)

Load points data from csv.

Source code in psychoanalyze/data/points.py
@check_output(types.points)
def load(data_path: Path) -> pd.DataFrame:
    """Load points data from csv."""
    trials = pa_trials.load(data_path)
    return from_trials(trials)
prep_fit(points, dimension='Amp1')

Transform points data for numpy-related fitting procedures.

Source code in psychoanalyze/data/points.py
def prep_fit(points: pd.DataFrame, dimension: str = "Amp1") -> dict:
    """Transform points data for numpy-related fitting procedures."""
    points = points.reset_index()
    return {
        "X": len(points),
        "x": points[f"{dimension}"].to_numpy(),
        "N": points["n"].to_numpy(),
        "hits": points["Hits"].to_numpy(),
    }
hits(n, params)

Sample list of n hits from a list of intensity values.

Source code in psychoanalyze/data/points.py
def hits(
    n: pd.Series,
    params: dict[str, float],
) -> pd.Series:
    """Sample list of n hits from a list of intensity values."""
    p = logistic.cdf(n.index.to_numpy(), params["Threshold"], params["Slope"])
    psi = params["Guess Rate"] + (1.0 - params["Guess Rate"] - params["Lapse Rate"]) * p
    return pd.Series(
        np.random.default_rng().binomial(
            n,
            psi,
            len(n),
        ),
        index=n.index,
        name="Hits",
    )
generate(n_trials, options, params)

Generate points-level data.

Source code in psychoanalyze/data/points.py
def generate(
    n_trials: int,
    options: pd.Index,
    params: dict[str, float],
) -> pd.DataFrame:
    """Generate points-level data."""
    n = generate_n(n_trials, options)
    _hits = hits(
        n,
        params,
    )
    points = pd.concat([n, _hits], axis=1)
    _hit_rate = hit_rate(points)
    logit_hit_rate = pd.Series(
        logit(_hit_rate),
        name="logit(Hit Rate)",
        index=n.index,
    )
    return pd.concat([points, _hit_rate, logit_hit_rate], axis=1)
generate_point(n, p)

Sample n hits from n trials and probability p from binomial dist.

Source code in psychoanalyze/data/points.py
def generate_point(n: int, p: float) -> int:
    """Sample n hits from n trials and probability p from binomial dist."""
    return np.random.default_rng().binomial(n, p)
datatable(data)

Convert dataframe to Dash DataTable-friendly format.

Source code in psychoanalyze/data/points.py
def datatable(data: pd.DataFrame) -> dash_table.DataTable:
    """Convert dataframe to Dash DataTable-friendly format."""
    return dash_table.DataTable(
        data.reset_index()[["Amp1", "Hit Rate", "n"]].to_dict("records"),
        columns=[
            {
                "id": "Amp1",
                "name": "Amp1",
                "type": "numeric",
                "format": dash_table.Format.Format(
                    precision=2,
                    scheme=dash_table.Format.Scheme.fixed,
                ),
            },
            {
                "id": "Hit Rate",
                "name": "Hit Rate",
                "type": "numeric",
                "format": dash_table.Format.Format(
                    precision=2,
                    scheme=dash_table.Format.Scheme.fixed,
                ),
            },
            {
                "id": "n",
                "name": "n",
                "type": "numeric",
            },
        ],
        id="experiment-psych-table",
    )
from_store(store_data)

Get points-level measures from trials-level data store.

Source code in psychoanalyze/data/points.py
def from_store(store_data: str) -> pd.DataFrame:
    """Get points-level measures from trials-level data store."""
    trials = pa_trials.from_store(store_data)
    return from_trials(trials)
combine_plots(fig1, fig2)

Combine two points-level plots. Possible duplicate.

Source code in psychoanalyze/data/points.py
def combine_plots(fig1: go.Figure, fig2: go.Figure) -> go.Figure:
    """Combine two points-level plots. Possible duplicate."""
    return go.Figure(data=fig1.data + fig2.data)
n(trials)

Count trials at each point.

Source code in psychoanalyze/data/points.py
def n(trials: pd.Index) -> pd.Index:
    """Count trials at each point."""
    return pd.Series(trials.value_counts(), name="n")
generate_n(n_trials, options)

Simulate how many trials were performed per intensity level.

Source code in psychoanalyze/data/points.py
def generate_n(n_trials: int, options: pd.Index) -> pd.Series:
    """Simulate how many trials were performed per intensity level."""
    return pd.Series(n(pa_trials.generate_trial_index(n_trials, options)))
to_block(points)

Aggregate to block-level measures from points-level data.

Source code in psychoanalyze/data/points.py
def to_block(points: pd.DataFrame) -> pd.DataFrame:
    """Aggregate to block-level measures from points-level data."""
    return points.groupby(level="Block").sum()
psi(x, params)

Calculate psi for an array of intensity levels x.

Source code in psychoanalyze/data/points.py
def psi(
    x: pd.Index,
    params: dict[str, float],
) -> pd.Series:
    """Calculate psi for an array of intensity levels x."""
    return pd.Series(
        params["gamma"]
        + (1 - params["gamma"] - params["lambda"])
        * expit(params["x_0"] + params["k"] * x),
        index=x,
        name="p(x)",
    )
plot(points, y)

Plot the psychometric function.

Source code in psychoanalyze/data/points.py
def plot(points: pd.DataFrame, y: str) -> go.Figure:
    """Plot the psychometric function."""
    return px.scatter(
        points.reset_index(),
        x="Intensity",
        y=y,
        size="n",
        color="Block",
        template="plotly_white",
    )
hit_rate(df)

Calculate hit rate from hits and number of trials.

Source code in psychoanalyze/data/points.py
def hit_rate(df: pd.DataFrame) -> pd.Series:
    """Calculate hit rate from hits and number of trials."""
    return pd.Series(df["Hits"] / df["n"], name="Hit Rate")
transform(hit_rate, y)

Logit transform hit rate.

Source code in psychoanalyze/data/points.py
def transform(hit_rate: float, y: str) -> float:
    """Logit transform hit rate."""
    return logit(hit_rate) if y == "alpha" else hit_rate
generate_index(n_levels, x_range)

Generate evenly-spaced values along the modulated stimulus dimension.

Source code in psychoanalyze/data/points.py
def generate_index(n_levels: int, x_range: list[float]) -> pd.Index:
    """Generate evenly-spaced values along the modulated stimulus dimension."""
    min_x = x_range[0]
    max_x = x_range[1]
    return pd.Index(np.linspace(min_x, max_x, n_levels), name="Intensity")

logistic

Utilities for working with logistic distributions.

to_intercept(location, scale)

Calculate the intercept of a logistic distribution given location and scale.

Parameters:

Name Type Description Default
location float

The location parameter of a logistic distribution.

required
scale float

The scale parameter of a logistic distribution.

required

Returns:

Type Description
float

The intercept of the logistic distribution.

Source code in psychoanalyze/data/logistic.py
def to_intercept(location: float, scale: float) -> float:
    """Calculate the intercept of a logistic distribution given location and scale.

    Params:
        location: The location parameter of a logistic distribution.
        scale: The scale parameter of a logistic distribution.

    Returns:
        The intercept of the logistic distribution.

    """
    return -location / scale
to_slope(scale)

Calculate the slope of a logistic distribution given scale.

Parameters:

Name Type Description Default
scale float

The scale parameter of a logistic distribution.

required

Returns:

Type Description
float

The slope of the logistic distribution at the inflection point.

Source code in psychoanalyze/data/logistic.py
def to_slope(scale: float) -> float:
    """Calculate the slope of a logistic distribution given scale.

    Params:
        scale: The scale parameter of a logistic distribution.

    Returns:
        The slope of the logistic distribution at the inflection point.

    """
    return 1 / scale
min_x(intercept, slope)

Calculate the minimum x value to be sampled.

Parameters:

Name Type Description Default
intercept float

The intercept of the logistic distribution.

required
slope float

The slope of the logistic distribution at the inflection point.

required

Returns:

Type Description
float

The minimum x value of the logistic distribution.

Source code in psychoanalyze/data/logistic.py
def min_x(intercept: float, slope: float) -> float:
    """Calculate the minimum x value to be sampled.

    Params:
        intercept: The intercept of the logistic distribution.
        slope: The slope of the logistic distribution at the inflection point.

    Returns:
        The minimum x value of the logistic distribution.

    """
    return (logit(0.01) - intercept) / slope

stimulus

Constants for dimension labels.

subjects

Data transformation functions for subject-level data.

load(data_path)

Load subject data from csv.

Source code in psychoanalyze/data/subjects.py
def load(data_path: Path) -> pd.DataFrame:
    """Load subject data from csv."""
    return pd.read_csv(
        data_path / "subjects.csv",
        index_col="Monkey",
        parse_dates=["Surgery Date"],
    )
generate_letter_names(n_subjects)

Generate a list of dummy subjects using capital letters in alph. order.

Source code in psychoanalyze/data/subjects.py
def generate_letter_names(n_subjects: int) -> list[str]:
    """Generate a list of dummy subjects using capital letters in alph. order."""
    return list("ABCDEFG"[:n_subjects])
generate_trials(n_trials, model_params, n_days, n_subjects)

Generate trial-level data, including subject-level info.

Source code in psychoanalyze/data/subjects.py
def generate_trials(
    n_trials: int,
    model_params: dict[str, float],
    n_days: int,
    n_subjects: int,
) -> pd.DataFrame:
    """Generate trial-level data, including subject-level info."""
    return pd.concat(
        {
            subj: sessions.generate_trials(n_trials, model_params, n_days)
            for subj in string.ascii_uppercase[:n_subjects]
        },
        names=["Subject"],
    )

blocks

Block-level data utilities.

Blocks are the most analytically significant objects in the PsychoAnalyze data hierarchy. They represent a specific set of experimental conditions and generally correspond to a single fit of the psychometric function.

generate(n_trials_per_level, x_min, x_max, n_levels)

Generate block-level data.

Source code in psychoanalyze/data/blocks.py
def generate(
    n_trials_per_level: int,
    x_min: float,
    x_max: float,
    n_levels: int,
) -> pd.DataFrame:
    """Generate block-level data."""
    index = pd.Index(np.linspace(x_min, x_max, n_levels), name="x")
    n = [n_trials_per_level] * len(index)
    p = scipy_logistic.cdf(index)
    return pd.DataFrame(
        {"n": n, "Hits": np.random.default_rng().binomial(n, p)},
        index=index,
    )
plot_fits(blocks)

Plot fits.

Source code in psychoanalyze/data/blocks.py
def plot_fits(blocks: pd.DataFrame) -> go.Figure:
    """Plot fits."""
    x = np.linspace(-3, 3, 100)
    y = expit(x)
    return px.line(blocks.reset_index(), x=x, y=y)
load(data_path)

Load block data from csv.

Source code in psychoanalyze/data/blocks.py
def load(data_path: Path) -> pd.DataFrame:
    """Load block data from csv."""
    full_path = data_path / "blocks.csv"
    channel_config = ["Active Channels", "Return Channels"]
    blocks = pd.read_csv(full_path / "blocks.csv", parse_dates=["Date"]).set_index(
        sessions.dims + stimulus.ref_dims + channel_config,
    )
    blocks["Block"] = days(blocks, subjects.load(full_path))
    return blocks
days(blocks, intervention_dates)

Calculate days for block-level data. Possible duplicate.

Source code in psychoanalyze/data/blocks.py
def days(blocks: pd.DataFrame, intervention_dates: pd.DataFrame) -> pd.Series:
    """Calculate days for block-level data. Possible duplicate."""
    blocks = blocks.join(intervention_dates, on="Subject")
    days = pd.Series(
        blocks.index.get_level_values("Date") - blocks["Surgery Date"],
    ).dt.days
    days.name = "Days"
    return days
n_trials(trials)

Calculate n trials for each block.

Source code in psychoanalyze/data/blocks.py
def n_trials(trials: pd.DataFrame) -> pd.Series:
    """Calculate n trials for each block."""
    session_cols = ["Subject", "Date"]
    ref_stim_cols = ["Amp2", "Width2", "Freq2", "Dur2"]
    channel_config = ["Active Channels", "Return Channels"]
    return trials.groupby(session_cols + ref_stim_cols + channel_config)[
        "Result"
    ].count()
is_valid(block)

Determine if curve data is valid.

Source code in psychoanalyze/data/blocks.py
def is_valid(block: pd.DataFrame) -> bool:
    """Determine if curve data is valid."""
    return any(block["Hit Rate"] > 0.5) & any(block["Hit Rate"] < 0.5)  # noqa: PLR2004
subject_counts(data)

Determine how many subjects are in the data.

Source code in psychoanalyze/data/blocks.py
def subject_counts(data: pd.DataFrame) -> pd.DataFrame:
    """Determine how many subjects are in the data."""
    summary = (
        data.index.get_level_values("Subject").value_counts().rename("Total Blocks")
    )
    summary.index.name = "Subject"
    return summary
fit(trials)

Fit logistic regression to trial data.

Source code in psychoanalyze/data/blocks.py
def fit(trials: pd.DataFrame) -> pd.Series:
    """Fit logistic regression to trial data."""
    fit = LogisticRegression().fit(
        trials[["Intensity"]],
        trials["Result"],
    )
    intercept = fit.intercept_[0]
    slope = fit.coef_[0][0]
    return pd.Series({"intercept": intercept, "slope": slope})
generate_trials(n_trials, model_params)

Generate trials for block-level context.

Source code in psychoanalyze/data/blocks.py
def generate_trials(n_trials: int, model_params: dict[str, float]) -> pd.DataFrame:
    """Generate trials for block-level context."""
    return trials.moc_sample(n_trials, model_params)
from_points(points)

Aggregate block measures from points data.

Source code in psychoanalyze/data/blocks.py
def from_points(points: pd.DataFrame) -> pd.DataFrame:
    """Aggregate block measures from points data."""
    return points.groupby("BlockID")[["n"]].sum()
plot_thresholds(blocks)

Plot longitudinal threshold data.

Parameters:

Name Type Description Default
blocks pd.DataFrame

Block-level DataFrame.

required

Returns:

Type Description
go.Figure

A plotly Graph Object.

Source code in psychoanalyze/data/blocks.py
def plot_thresholds(blocks: pd.DataFrame) -> go.Figure:
    """Plot longitudinal threshold data.

    Args:
        blocks: Block-level DataFrame.

    Returns:
        A plotly Graph Object.
    """
    return px.scatter(
        transform_errors(blocks),
        x="Block",
        y="50%",
        error_y="err+",
        error_y_minus="err-",
        color="Subject",
        color_discrete_map={"U": "#e41a1c", "Y": "#377eb8", "Z": "#4daf4a"},
        template=template,
    )
transform_errors(fit)

Transform errors from absolute to relative.

Source code in psychoanalyze/data/blocks.py
def transform_errors(fit: pd.DataFrame) -> pd.DataFrame:
    """Transform errors from absolute to relative."""
    fit["err+"] = fit["95%"] - fit["50%"]
    fit["err-"] = fit["50%"] - fit["5%"]
    return fit.drop(columns=["95%", "5%"])
reshape_fit_results(fits, x, y)

Reshape fit params for plotting.

Source code in psychoanalyze/data/blocks.py
def reshape_fit_results(fits: pd.DataFrame, x: pd.Index, y: str) -> pd.DataFrame:
    """Reshape fit params for plotting."""
    rows = [f"{y}[{i}]" for i in range(1, len(x) + 1)]
    param_fits = fits.loc[
        rows,  # row eg 'p[1]:p[8]'
        ["5%", "50%", "95%"],  # col
    ]
    param_fits = transform_errors(param_fits)
    param_fits = param_fits.rename(columns={"50%": y})
    param_fits.index = x
    return param_fits
standard_logistic()

Generate points for a line trace of a standard logistic function.

Source code in psychoanalyze/data/blocks.py
def standard_logistic() -> pd.Series:
    """Generate points for a line trace of a standard logistic function."""
    x = pd.Index(np.linspace(-3, 3, 100), name="x")
    y = expit(x)
    return pd.Series(y, index=x, name="f(x)")
logistic(location, scale)

Generate points for a line trace of a logistic function.

Source code in psychoanalyze/data/blocks.py
def logistic(location: float, scale: float) -> pd.Series:
    """Generate points for a line trace of a logistic function."""
    x_min = (location - 4) * scale
    x_max = (location + 4) * scale
    x = pd.Index(np.linspace(x_min, x_max, 100), name="Intensity")
    y = expit((x - location) / scale)
    return pd.Series(y, index=x, name="Ψ(x)")
plot_logistic(location, scale)

Plot a logistic function.

Parameters:

Name Type Description Default
location float

x₀ in the location-scale parameterization, corresponds to threshold.

required
scale float

σ corresponds to the width of the curve (inverse of slope)

required

Returns:

Type Description
go.Scatter

A Plotly figure of the psychometric function with a logistic link function.

Source code in psychoanalyze/data/blocks.py
def plot_logistic(location: float, scale: float) -> go.Scatter:
    """Plot a logistic function.

    Parameters:
        location: x₀ in the location-scale parameterization, corresponds to threshold.
        scale: σ corresponds to the width of the curve (inverse of slope)

    Returns:
        A Plotly figure of the psychometric function with a logistic link function.
    """
    return px.line(
        logistic(location, scale),
        y="Ψ(x)",
        template="plotly_white",
    )
plot_standard_logistic()

Plot a standard logistic function.

Source code in psychoanalyze/data/blocks.py
def plot_standard_logistic() -> go.Scatter:
    """Plot a standard logistic function."""
    return px.line(
        standard_logistic(),
        y="f(x)",
        template="plotly_white",
        title="$f(x) = \\frac{1}{1 + e^{-x}}$",
    )

trials

Functions for data manipulations at the trial level.

generate_trial_index(n_trials, options)

Generate n trials (no outcomes).

Source code in psychoanalyze/data/trials.py
def generate_trial_index(n_trials: int, options: pd.Index) -> pd.Index:
    """Generate n trials (no outcomes)."""
    return pd.Index(
        [random.choice(options) for _ in range(n_trials)],
        name="Intensity",
    )
sample_trials(trials_ix, params)

Sample trials from a given index.

Source code in psychoanalyze/data/trials.py
def sample_trials(trials_ix: pd.Index, params: dict[str, float]) -> pd.Series:
    """Sample trials from a given index."""
    return pd.Series(
        [int(random.random() <= psi(x, params)) for x in trials_ix],
        index=trials_ix,
        name="Result",
    )
generate(n_trials, options, params, n_blocks)

Generate n trials with outcomes.

Source code in psychoanalyze/data/trials.py
def generate(
    n_trials: int,
    options: pd.Index,
    params: dict[str, float],
    n_blocks: int,
) -> pd.DataFrame:
    """Generate n trials with outcomes."""
    return pd.concat(
        {
            i: sample_trials(
                trials_ix=generate_trial_index(n_trials, options),
                params=params,
            )
            for i in range(n_blocks)
        },
        names=["Block"],
    ).reset_index()
load(data_path)

Load trials data from csv.

Source code in psychoanalyze/data/trials.py
def load(data_path: Path) -> pd.DataFrame:
    """Load trials data from csv."""
    return types.trials.validate(
        pd.read_csv(
            data_path,
            dtype={
                "Result": int,
                "Intensity": float,
                "Block": int,
            },
        ),
    )
from_store(store_data)

Convert JSON-formatted string to DataFrame.

Source code in psychoanalyze/data/trials.py
def from_store(store_data: str) -> pd.DataFrame:
    """Convert JSON-formatted string to DataFrame."""
    df_dict = json.loads(store_data)
    index_names = df_dict.pop("index_names")
    index = pd.MultiIndex.from_tuples(df_dict["index"])
    trials = pd.DataFrame({"Result": df_dict["data"][0]}, index=index)
    trials.index.names = index_names
    return types.trials.validate(trials)
to_store(trials)

Convert data to a JSON-formatted string for dcc.Store.

Source code in psychoanalyze/data/trials.py
def to_store(trials: pd.DataFrame) -> str:
    """Convert data to a JSON-formatted string for dcc.Store."""
    data_dict = trials.to_dict(orient="split")
    data_dict["index_names"] = types.points_index_levels
    return json.dumps(data_dict)
normalize(trials)

Normalize denormalized trial data.

Source code in psychoanalyze/data/trials.py
def normalize(trials: pd.DataFrame) -> dict[str, pd.DataFrame]:
    """Normalize denormalized trial data."""
    return {
        "Session": trials[["Monkey", "Block"]].drop_duplicates(),
        "Reference Stimulus": trials[["Amp2", "Width2", "Freq2", "Dur2"]],
        "Channel Config": trials[["Active Channels", "Return Channels"]],
        "Test Stimulus": trials[["Amp1", "Width1", "Freq1", "Dur1"]],
    }
result(p)

Return a trial result given a probability p.

Source code in psychoanalyze/data/trials.py
def result(p: float) -> bool:
    """Return a trial result given a probability p."""
    return random.random() < p
results(n, p_x)

Return a list of trial results in dict format.

Source code in psychoanalyze/data/trials.py
def results(n: int, p_x: pd.Series) -> list[Trial]:
    """Return a list of trial results in dict format."""
    results = []
    for _ in range(n):
        stimulus_magnitude = random.choice(p_x.index.to_list())
        _result = result(p_x[stimulus_magnitude])
        results.append(
            Trial(
                {
                    "Stimulus Magnitude": stimulus_magnitude,
                    "Result": _result,
                },
            ),
        )
    return results
labels(results)

Convert a list of outcome codes to their labels.

Source code in psychoanalyze/data/trials.py
def labels(results: list[int]) -> list[str]:
    """Convert a list of outcome codes to their labels."""
    return [codes[result] for result in results]
psi(intensity, params)

Calculate the value of the psychometric function for a given intensity.

Source code in psychoanalyze/data/trials.py
def psi(intensity: float, params: dict[str, float]) -> float:
    """Calculate the value of the psychometric function for a given intensity."""
    gamma = params["gamma"]
    lambda_ = params["lambda"]
    k = params["k"]
    x_0 = params["x_0"]
    return gamma + (1 - gamma - lambda_) * (1 / (1 + np.exp(-k * (intensity - x_0))))
moc_sample(n_trials, model_params)

Sample results from a method-of-constant-stimuli experiment.

Source code in psychoanalyze/data/trials.py
def moc_sample(n_trials: int, model_params: dict[str, float]) -> pd.DataFrame:
    """Sample results from a method-of-constant-stimuli experiment."""
    x_0 = model_params["x_0"]
    k = model_params["k"]
    intensity_choices = np.linspace(x_0 - 4 / k, x_0 + 4 / k, 7)
    intensities = [float(random.choice(intensity_choices)) for _ in range(n_trials)]
    intensity_index = pd.Index(intensities, name="Intensity")
    results = [
        int(random.random() <= psi(intensity, model_params))
        for intensity in intensities
    ]
    return pd.DataFrame(
        {"Result": pd.Series(results, dtype=int)},
        index=intensity_index,
    )
fit(trials)

Fit trial data using logistic regression.

Source code in psychoanalyze/data/trials.py
def fit(trials: pd.DataFrame) -> dict[str, float]:
    """Fit trial data using logistic regression."""
    fits = LogisticRegression().fit(trials[["Intensity"]], trials["Result"])
    return {"Threshold": -fits.intercept_[0], "Slope": fits.coef_[0][0]}

sessions

Utilities for session-level data.

Sessions represent a single day of experiments performed by a subject. It may contain several blocks.

generate(n)

Generate session-level data.

Source code in psychoanalyze/data/sessions.py
def generate(n: int) -> list[int]:
    """Generate session-level data."""
    return list(range(n))
cache_results(sessions)

Save session data to csv.

Source code in psychoanalyze/data/sessions.py
def cache_results(sessions: pd.DataFrame) -> None:
    """Save session data to csv."""
    sessions.to_csv("data/normalized/sessions.csv", index=False)
from_trials_csv(path)

Aggregate to session level from trial-level data.

Source code in psychoanalyze/data/sessions.py
def from_trials_csv(path: Path) -> pd.DataFrame:
    """Aggregate to session level from trial-level data."""
    return pd.read_csv(path)[["Monkey", "Date"]].drop_duplicates()
day_marks(subjects, sessions, monkey)

Calculate days since surgery date for a given subject.

Source code in psychoanalyze/data/sessions.py
def day_marks(subjects: pd.DataFrame, sessions: pd.DataFrame, monkey: str) -> dict:
    """Calculate days since surgery date for a given subject."""
    surgery_date = pd.to_datetime(
        subjects.loc[subjects["Monkey"] == monkey, "Surgery Date"],
    )[0]
    sessions = sessions[sessions["Monkey"] == "U"]
    sessions["Days"] = (pd.to_datetime(sessions["Date"]) - surgery_date).dt.days
    return {sessions.loc[i, "Days"]: sessions.loc[i, "Date"] for i in sessions.index}
days(sessions, subjects)

Calculate days since surgery date.

Source code in psychoanalyze/data/sessions.py
def days(sessions: pd.DataFrame, subjects: pd.DataFrame) -> pd.Series:
    """Calculate days since surgery date."""
    sessions_subjects = sessions.join(subjects, on="Monkey")
    return (
        pd.to_datetime(sessions_subjects.index.get_level_values("Date"))
        - sessions_subjects["Surgery Date"]
    ).dt.days
n_trials(trials)

Count trials per session.

Source code in psychoanalyze/data/sessions.py
def n_trials(trials: pd.DataFrame) -> pd.DataFrame:
    """Count trials per session."""
    return trials.groupby(["Monkey", "Date"])[["Result"]].count()
load(data_dir)

Load session-level data from csv.

Source code in psychoanalyze/data/sessions.py
def load(data_dir: Path) -> pd.DataFrame:
    """Load session-level data from csv."""
    return pd.read_csv(data_dir / "sessions.csv", index_col=["Monkey", "Date"])
generate_trials(n_trials, model_params, n_days)

Generate trial-level data for session-level context.

Source code in psychoanalyze/data/sessions.py
def generate_trials(
    n_trials: int,
    model_params: dict[str, float],
    n_days: int,
) -> pd.DataFrame:
    """Generate trial-level data for session-level context."""
    return pd.concat(
        {day: blocks.generate_trials(n_trials, model_params) for day in range(n_days)},
        names=["Block"],
    )

types

Pandera schemas for psychoanalyze dataframes.

Contains data table schemas of the hierarchical entities described above.

PsiAnimation

Bases: DataFrameModel

Pandera type for psychometric function animation dataset.

Source code in psychoanalyze/data/types.py
class PsiAnimation(DataFrameModel):
    """Pandera type for psychometric function animation dataset."""

    trial_id: typing.Series[int]
    intensity: typing.Series[float]
    hit_rate: typing.Series[float]
PsiAnimationFrame

Bases: DataFrameModel

Pandera type for a single psychometric function animation frame.

Source code in psychoanalyze/data/types.py
class PsiAnimationFrame(DataFrameModel):
    """Pandera type for a single psychometric function animation frame."""

    intensity: typing.Series[float]
    hit_rate: typing.Series[float]
Blocks

Bases: DataFrameModel

Blocks type for Pandera.

Source code in psychoanalyze/data/types.py
class Blocks(DataFrameModel):
    """Blocks type for Pandera."""

    slope: float
    threshold: float
Points

Bases: DataFrameModel

Pandera data type.

Source code in psychoanalyze/data/types.py
class Points(DataFrameModel):
    """Pandera data type."""

    n: int
    Hits: int
    block_id: int
Trials

Bases: DataFrameModel

Trials data type for pandera + mypy type checking.

Source code in psychoanalyze/data/types.py
class Trials(DataFrameModel):
    """Trials data type for pandera + mypy type checking."""

    result: int
    intensity: typing.Index[float]

dashboard

The PsychoAnalyze dashboard is powered by Plotly/Dash.

  • app.py is the main file which contains the infrastructure for the Dash app. and callback functions in their entirety.

  • layout.py specifies the layout of the HTML and Dash components that comprise the app.

  • components.py contains more complex and/or reusable components used in the app.

app

Main Dash app file.

Contains callbacks.

update_trials(contents, filename, n_param, param)

Update points table.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("trials-store", "data"),
    Output({"type": "x-param", "name": "min"}, "value"),
    Output({"type": "x-param", "name": "max"}, "value"),
    Input("upload", "contents"),
    State("upload", "filename"),
    Input({"type": "n-param", "name": ALL}, "value"),
    Input({"type": "param", "name": ALL}, "value"),
)
def update_trials(
    contents: str,
    filename: str,
    n_param: list[int],
    param: list[float],
) -> tuple[Records, float, float]:
    """Update points table."""
    n_params = pd.Series(n_param, index=["n_levels", "n_trials", "n_blocks"])
    params = pd.Series([*param, 0.0, 0.0], index=["x_0", "k", "gamma", "lambda"])
    params["intercept"] = to_intercept(params["x_0"], params["k"])
    params["slope"] = to_slope(params["k"])
    min_x = (logit(0.01) - params["intercept"]) / params["slope"]
    max_x = (logit(0.99) - params["intercept"]) / params["slope"]
    if callback_context.triggered_id == "upload":
        trials = process_upload(contents, filename)
    else:
        trials = generate(
            n_trials=n_params["n_trials"],
            options=generate_index(n_params["n_levels"], [min_x, max_x]),
            params=params.to_dict(),
            n_blocks=n_params["n_blocks"],
        )
    return trials.to_dict("records"), min_x, max_x
update_points_table(trials)

Update points table.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("points-store", "data"),
    Input("trials-store", "data"),
)
def update_points_table(trials: Records) -> Records:
    """Update points table."""
    trials_df = pd.DataFrame.from_records(trials)
    trials_df["Intensity"] = trials_df["Intensity"].astype(float)
    points = pa_points.from_trials(trials_df)
    return points.to_dict("records")
update_blocks_table(trials)

Update blocks table.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("blocks-table", "data"),
    Input("trials-store", "data"),
)
def update_blocks_table(trials: Records) -> Records:
    """Update blocks table."""
    trials_df = pd.DataFrame.from_records(trials)

    blocks = trials_df.groupby("Block").apply(pa_blocks.fit).reset_index()
    blocks["gamma"] = 0.0
    blocks["lambda"] = 0.0
    return blocks.to_dict("records")
filter_points(selected_rows, points)

Filter points table.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("points-table", "data"),
    Input("blocks-table", "derived_virtual_selected_rows"),
    Input("points-store", "data"),
)
def filter_points(
    selected_rows: list[int],
    points: Records,
) -> Records:
    """Filter points table."""
    points_df = pd.DataFrame.from_records(points)
    return (
        points_df[points_df["Block"].isin(selected_rows)].to_dict("records")
        if selected_rows
        else points_df.to_dict("records")
    )
update_fig(param, points, blocks, selected_rows, min_x, max_x)

Update plot and tables based on data store and selected view.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("plot", "figure"),
    Input({"type": "param", "name": ALL}, "value"),
    Input("points-table", "data"),
    Input("blocks-table", "data"),
    Input("blocks-table", "derived_virtual_selected_rows"),
    State({"type": "x-param", "name": "min"}, "value"),
    State({"type": "x-param", "name": "max"}, "value"),
)
def update_fig(  # noqa: PLR0913
    param: list[float],
    points: Records,
    blocks: Records,
    selected_rows: list[int],
    min_x: float,
    max_x: float,
) -> go.Figure:
    """Update plot and tables based on data store and selected view."""
    param = [*param, 0.0, 0.0]
    params = pd.Series(param, index=["x_0", "k", "gamma", "lambda"])
    params["intercept"] = -params["x_0"] / params["k"]
    params["slope"] = 1 / params["k"]
    blocks = [blocks[i] for i in selected_rows] + [
        {"Block": "Model"} | params.to_dict(),
    ]
    x = pd.Index(
        np.linspace(min_x, max_x, 100),
        name="Intensity",
    )
    fits = pd.concat(
        {
            block["Block"]: pd.Series(
                expit(x.to_numpy() * block["slope"] + block["intercept"]),
                name="Hit Rate",
                index=x,
            )
            for block in blocks
        },
        names=["Block"],
    ).reset_index()
    fits["Block"] = fits["Block"].astype(str)
    points_df = pd.DataFrame.from_records(points)
    points_df["Block"] = points_df["Block"].astype(str)
    fits_fig = px.line(
        fits,
        x="Intensity",
        y="Hit Rate",
        color="Block",
    )
    results_fig = px.scatter(
        points_df,
        x="Intensity",
        y="Hit Rate",
        size="n trials",
        color="Block",
        template="plotly_white",
    )
    return results_fig.add_traces(fits_fig.data)
export_image(export_clicked, fig)

Export image.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("img-download", "data"),
    Input({"type": "img-export", "name": ALL}, "n_clicks"),
    State("plot", "figure"),
    prevent_initial_call=True,
)
def export_image(
    export_clicked: int,  # noqa: ARG001
    fig: go.Figure,
) -> dict[str, str | bool | bytes]:
    """Export image."""
    format_suffix = callback_context.triggered_id["name"]
    return {
        "base64": True,
        "content": base64.b64encode(
            go.Figure(fig)
            .update_layout(showlegend=False)
            .to_image(format=format_suffix, width=500, height=500),
        ).decode("utf-8"),
        "filename": f"fig.{format_suffix}",
    }
export_data(export_clicked, points, blocks, trials)

Export image.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("data-download", "data"),
    Input({"type": "data-export", "name": ALL}, "n_clicks"),
    State("points-table", "data"),
    State("blocks-table", "data"),
    State("trials-store", "data"),
    prevent_initial_call=True,
)
def export_data(
    export_clicked: int,  # noqa: ARG001
    points: Records,
    blocks: Records,
    trials: Records,
) -> dict[str, Any | None]:
    """Export image."""
    format_suffix = callback_context.triggered_id["name"]
    points_df = pd.DataFrame.from_records(points)
    blocks_df = pd.DataFrame.from_records(blocks)
    trials_df = pd.DataFrame.from_records(trials)
    if format_suffix == "csv":
        zip_buffer = io.BytesIO()

        with zipfile.ZipFile(
            zip_buffer,
            mode="a",
            compression=zipfile.ZIP_DEFLATED,
            allowZip64=False,
        ) as zip_file:

            def write_file(df: pd.DataFrame, name: str) -> None:
                buffer = io.StringIO()
                df.to_csv(buffer, index=False)
                zip_file.writestr(name, buffer.getvalue())

            data = {
                "points": points_df,
                "blocks": blocks_df,
                "trials": trials_df,
            }
            for level, points_df in data.items():
                write_file(points_df, f"{level}.csv")

        zip_buffer.seek(0)
        zip_bytes = zip_buffer.read()

        base64_bytes = base64.b64encode(zip_bytes)
        base64_string = base64_bytes.decode("utf-8")
        timestamp = datetime.now(tz=pytz.timezone("America/Chicago")).strftime(
            "%Y-%m-%d_%H%M",
        )
        download = {
            "base64": True,
            "content": base64_string,
            "filename": f"{timestamp}_psychoanalyze.zip",
        }

    elif format_suffix == "json":
        download = dcc.send_data_frame(points_df.to_json, "data.json")
    elif format_suffix == "parquet":
        download = dcc.send_data_frame(points_df.to_parquet, "data.parquet")
    elif format_suffix == "duckdb":
        connection = duckdb.connect("psychoanalyze.duckdb")
        connection.sql("CREATE TABLE points AS SELECT * FROM _data")
        connection.close()
        with Path("psychoanalyze.duckdb").open("rb") as f:
            download = {
                "base64": True,
                "content": base64.b64encode(f.read()).decode("utf-8"),
                "filename": f"psychoanalyze.{format_suffix}",
            }

    return download
toggle_eqn(n_clicks)

Toggle equation.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("F-eqn", "is_open"),
    Output("show-eqn", "children"),
    Output("plot-equation", "children"),
    Input("show-eqn", "n_clicks"),
)
def toggle_eqn(n_clicks: int) -> tuple[bool, str, str]:
    """Toggle equation."""
    equation_abstracted = """
    $$
    \\psi(x) = \\gamma + (1 - \\gamma - \\lambda)F(x)
    $$
    """
    if n_clicks:
        n_clicks_is_even = n_clicks % 2 == 0
        label = "Show F(x) ▾ " if n_clicks_is_even else "Hide F(x) ▴ "
        equation_expanded = """
        $$
        \\psi(x) = \\frac{\\gamma + (1 - \\gamma - \\lambda)}{1 + e^{-k(x - x_0)}}
        $$
        """
        equation = equation_abstracted if n_clicks_is_even else equation_expanded
        return not n_clicks_is_even, label, equation
    return False, "Show F(x) ▾ ", equation_abstracted
toggle_param(free)

Toggle parameter.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output({"type": "param", "name": MATCH}, "disabled"),
    Input({"type": "free", "name": MATCH}, "value"),
    prevent_initial_call=True,
)
def toggle_param(free: bool) -> bool:  # noqa: FBT001
    """Toggle parameter."""
    return not free
set_params_to_preset(preset, param)

Set parameters to preset values.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output({"type": "param", "name": ALL}, "value"),
    Input("preset", "value"),
    State({"type": "param", "name": ALL}, "value"),
    prevent_initial_call=True,
)
def set_params_to_preset(
    preset: str,
    param: list[float],
) -> list[float]:
    """Set parameters to preset values."""
    presets = {
        "standard": [0.0, 1.0, 0.0, 0.0],
        "non-standard": [10, 2, 0.2, 0.1],
        "2AFC": [0, 1, 0.5, 0.0],
    }
    return presets.get(preset, param)
set_fixed_params_to_preset(preset, param)

Set parameters to preset values.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output({"type": "free", "name": ALL}, "value"),
    Input("preset", "value"),
    State({"type": "free", "name": ALL}, "value"),
    prevent_initial_call=True,
)
def set_fixed_params_to_preset(
    preset: str,
    param: list[bool],
) -> list[bool]:
    """Set parameters to preset values."""
    presets = {
        "standard": [True] * 4,
        "non-standard": [True] * 4,
        "2AFC": [True, True, False, True],
    }
    return presets.get(preset, param)
update_page_size(n_levels)

Update page size.

Source code in psychoanalyze/dashboard/app.py
@callback(
    Output("points-table", "page_size"),
    Input({"type": "n-param", "name": "n-levels"}, "value"),
)
def update_page_size(n_levels: int) -> int:
    """Update page size."""
    return n_levels

utils

Helper functions for the dashboard.

process_upload(contents, filename)

Process a file upload.

Parameters:

Name Type Description Default
contents str

The contents of the uploaded file.

required
filename str

The name of the uploaded file.

required

Returns:

Type Description
pd.DataFrame

A dataframe of the uploaded file.

Source code in psychoanalyze/dashboard/utils.py
def process_upload(contents: str, filename: str) -> pd.DataFrame:
    """Process a file upload.

    Params:
        contents: The contents of the uploaded file.
        filename: The name of the uploaded file.

    Returns:
        A dataframe of the uploaded file.
    """
    _, content_string = contents.split(",")
    decoded = base64.b64decode(content_string)
    if "zip" in filename:
        with zipfile.ZipFile(io.BytesIO(decoded)) as z:
            trials = pd.read_csv(z.open("trials.csv"))
    else:
        trials = pd.read_csv(io.StringIO(decoded.decode("utf-8")))
    return trials

components

Components for Dash dashboard.

layout

Layout for Dash dashboard.

analysis

Data structures and manipulation methods.

strength_duration

Strength-duration analysis.

Contains functions assessing the relationship between the amplitude and the time course of the stimulus.

from_blocks(blocks, dim)

Calculate strength-duration measures from block data.

Source code in psychoanalyze/analysis/strength_duration.py
def from_blocks(blocks: pd.DataFrame, dim: str) -> pd.DataFrame:
    """Calculate strength-duration measures from block data."""
    if dim == "Amp":
        ylabel = "Threshold Amplitude (μA)"
        xlabel = "Fixed Pulse Width (μs)"
    elif dim == "Width":
        ylabel = "Fixed Amplitude (μA)"
        xlabel = "Threshold Pulse Width (μs)"

    blocks[ylabel] = blocks["Threshold"]
    blocks[xlabel] = blocks["Fixed Magnitude"]
    return blocks.drop(columns=["Threshold", "Fixed Magnitude"])
plot(blocks, dim, x_data, y_data)

Plot strength-duration curve given detection data.

Source code in psychoanalyze/analysis/strength_duration.py
def plot(
    blocks: pd.DataFrame,
    dim: str,
    x_data: list[float],
    y_data: list[float],
) -> go.Figure:
    """Plot strength-duration curve given detection data."""

    def _get_labels_given_dim(
        labels: dict[str, dict[str, str]],
        dim: str,
    ) -> dict[str, str]:
        """Get appropriate axis labels for different choices of modulated dimension."""
        return {"x": labels[dim]["x"], "y": labels[dim]["y"]}

    labels_given_dim = _get_labels_given_dim(labels=labels, dim=dim)
    x = labels_given_dim["x"]
    y = labels_given_dim["y"]
    if blocks is not None:
        sd_df = blocks[blocks["Dimension"] == dim]
    else:
        sd_df = pd.DataFrame({x: x_data, y: y_data})
    return px.scatter(
        sd_df,
        x=x,
        y=y,
        template=template,
    )

bayes

Bayesian analysis of psychophysical data.

plot(simulated, estimated)

Plot Psychometric curve to emphasize Bayesian posteriors.

Source code in psychoanalyze/analysis/bayes.py
def plot(simulated: pd.DataFrame, estimated: pd.Series) -> go.Figure:
    """Plot Psychometric curve to emphasize Bayesian posteriors."""
    combined = pd.concat(
        [simulated.reset_index(), estimated.reset_index()],
        keys=["Simulated", "Estimated"],
        names=["Type"],
    )
    return px.scatter(
        combined.reset_index(),
        x="x",
        y="Hit Rate",
        color="Type",
        template="plotly_white",
    )

ecdf

Empirical Distribution Functions (eCDF).

plot(blocks, param)

Plot empirical cumulative distrubtion function (eCDF) of fitted params.

Source code in psychoanalyze/analysis/ecdf.py
def plot(blocks: pd.DataFrame, param: str) -> go.Figure:
    """Plot empirical cumulative distrubtion function (eCDF) of fitted params."""
    return px.ecdf(
        blocks.reset_index(),
        x=param,
        color=blocks.get("Monkey"),
    ).update_layout(xaxis_title=param)

weber

Test functions related to Weber's Law analysis.

Contains functions assessing how discriminability of two stimuli relates to the baseline intensities of the stimuli according to Weber's Law.

plot(data, trendline='ols', error_y=None)

Plot data according to Weber's Law.

Source code in psychoanalyze/analysis/weber.py
def plot(
    data: pd.DataFrame,
    trendline: str = "ols",
    error_y: str | None = None,
) -> go.Figure:
    """Plot data according to Weber's Law."""
    _trendline = "ols" if trendline else None
    return px.scatter(
        data.reset_index(),
        x="Reference Charge (nC)",
        y="Difference Threshold (nC)",
        error_y="err+" if error_y == "error bars" else None,
        error_y_minus="err-" if error_y == "error bars" else None,
        color="Monkey",
        symbol="Dimension",
        trendline=_trendline,
        template="plotly_white",
        hover_data=["Date"],
    )
aggregate(data)

Calculate agg stats for Weber data.

Source code in psychoanalyze/analysis/weber.py
def aggregate(data: pd.DataFrame) -> pd.DataFrame:
    """Calculate agg stats for Weber data."""
    return (
        data.groupby(["Monkey", "Dimension", "Reference Charge (nC)"])[
            "Difference Threshold (nC)"
        ]
        .agg(["mean", "count", "std"])
        .rename(columns={"mean": "Difference Threshold (nC)"})
    )
load(path)

Load weber file from a csv.

Source code in psychoanalyze/analysis/weber.py
def load(path: Path) -> pd.DataFrame:
    """Load weber file from a csv."""
    weber = pd.read_csv(path, parse_dates=["Date"])
    weber["err+"] = (
        weber["location_CI_5"] * weber["Fixed_Param_Value"] / 1000
    ) - weber["Threshold_Charge_nC"]
    weber["err-"] = (
        weber["Threshold_Charge_nC"]
        - (weber["location_CI_95"]) * weber["Fixed_Param_Value"] / 1000
    )
    return weber