diff --git a/notebooks/Untitled.ipynb b/notebooks/Untitled.ipynb new file mode 100644 index 00000000..e5cb1aac --- /dev/null +++ b/notebooks/Untitled.ipynb @@ -0,0 +1,1131 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "e0e588be-6a7a-486b-814e-0ccc5666ee8a", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "%matplotlib widget" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a913cd4d-2231-4b75-b264-f3ca9f7c224a", + "metadata": {}, + "outputs": [], + "source": [ + "import ompy as om\n", + "import numpy as np\n", + "import pymc3 as pm\n", + "import arviz as az\n", + "import theano\n", + "import theano.tensor as tt\n", + "import matplotlib.pyplot as plt\n", + "import logging\n", + "from scipy.interpolate import interp1d\n", + "from model_op import calculate_FG, LogLike, LogLike2, FG_loglike, loglike" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a786c35d-dfcd-4818-9f3b-a431906fede9", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c6a686c416db4128a690bd72cd007172", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(,\n", + " ,\n", + "
)" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "raw = om.example_raw('Dy164')\n", + "\n", + "raw.cut_diagonal(E1=(800, 0), E2=(7500, 7300))\n", + "raw.cut('Ex', 0, 8400)\n", + "\n", + "raw.values = np.around(raw.values)\n", + "raw.remove_negative()\n", + "raw.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1108ecc3-15e2-4b38-8fbb-d0cf0c73a144", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "01a60d6b193f49989fef9f8b7a91be0f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(,\n", + " ,\n", + "
)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "folderpath = \"../OCL_response_functions/nai2012_for_opt13\"\n", + "Eg = raw.Eg\n", + "\n", + "# Experimental relative FWHM at 1.33 MeV of resulting array\n", + "fwhm_abs = 90.44 # (90/1330 = 6.8%)\n", + "\n", + "# Magne recommends 1/10 of the actual resolution for unfolding purposes\n", + "response = om.Response(folderpath)\n", + "R_ompy_unf, R_tab_unf = response.interpolate(Eg, fwhm_abs=fwhm_abs/10, return_table=True)\n", + "R_ompy_view, _ = response.interpolate(Eg, fwhm_abs=fwhm_abs, return_table=True)\n", + "fthreshold = interp1d([30., 80., 122., 183., 244., 294., 344., 562., 779., 1000.],\n", + " [0.0, 0.0, 0.0, 0.06, 0.44, 0.60, 0.87, 0.99, 1.00, 1.00],\n", + " fill_value=\"extrapolate\")\n", + "\n", + "def apply_detector_threshold(response, table, fthreshold):\n", + " thres = fthreshold(response.Eg)\n", + " response.values = response.values * thres\n", + " # renormalize\n", + " response.values = om.div0(response.values, response.values.sum(axis=1)[:, np.newaxis])\n", + " table[\"eff_tot\"] *= thres\n", + "apply_detector_threshold(R_ompy_unf, R_tab_unf, fthreshold)\n", + "R_ompy_view.plot(scale='log', vmin=1e-4)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "cbf55e7d-1864-40ea-bdd5-5137eeeefe0b", + "metadata": {}, + "outputs": [], + "source": [ + "# With compton subtraction and all tweaks\n", + "unfolder= om.Unfolder(response=R_ompy_unf)\n", + "unfolder.response_tab = R_tab_unf\n", + "# Magne suggests some \"tweaks\" for a better unfolding performance. Default is 1 for all.\n", + "unfolder.FWHM_tweak_multiplier = {\"fe\": 1., \"se\": 1.1,\n", + " \"de\": 1.3, \"511\": 0.9}\n", + "firstgen = om.FirstGeneration()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "86ee14f3-1329-4958-9b43-a897d62440f3", + "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + }, + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-11-12 14:48:18,745 - ompy.ensemble - INFO - Start normalization with 7 cpus\n", + "2021-11-12 14:48:18,819 - ompy.ensemble - INFO - Generating/loading 0\n", + "2021-11-12 14:48:18,851 - ompy.ensemble - INFO - Generating/loading 1\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8ca2291bcbdb437d85066cfce69b0c91", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/25 [00:00,\n", + " ,\n", + "
)" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "firstgen.plot()\n", + "firstgen_std.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "86d9b1ab-0efa-4989-bab8-e9c058728f94", + "metadata": {}, + "outputs": [], + "source": [ + "# We run the extractor on the ensemble just to have a reference" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "5978d341-d93c-4c63-bf4a-300fcf684415", + "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + }, + "tags": [] + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "05719e8ec60e48a3b2a2f45a2d5efcc4", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/25 [00:00= 0]\n", + "# Keep only unique\n", + "Ef = np.unique(Ef)\n", + "\n", + "# Next we can get the index for NLDs\n", + "index_NLD = []\n", + "for i, ex in enumerate(firstgen.Ex):\n", + " idx = []\n", + " for j, eg in enumerate(firstgen.Eg):\n", + " if ex - eg < 0:\n", + " idx.append(len(Ef))\n", + " continue\n", + " idx.append(i-j+k0)\n", + " index_NLD.append(idx)\n", + "index_NLD = np.array(index_NLD)\n", + "\n", + "# Lastly we can make the index for gSFs\n", + "index_GSF = []\n", + "for i, ex in enumerate(firstgen.Ex):\n", + " idx = []\n", + " for j, eg in enumerate(firstgen.Eg):\n", + " if eg > ex:\n", + " idx.append(len(firstgen.Eg)-1)\n", + " continue\n", + " idx.append(j)\n", + " index_GSF.append(idx)\n", + "index_GSF = np.array(index_GSF)\n", + " \n", + "index_keep = []\n", + "for i, ex in enumerate(firstgen.Ex):\n", + " for j, eg in enumerate(firstgen.Eg):\n", + " if eg > ex:\n", + " continue\n", + " index_keep.append(i*len(firstgen.Eg) + j)\n", + "index_keep = np.array(index_keep)\n", + "\n", + "\n", + "print(index_NLD.shape)\n", + "print(index_GSF.shape)\n", + "print(index_keep)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "ebd312ab-6962-4327-b289-fec3f7d967df", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0. 0.2 0.4 0.6 0.8 1. 1.2 1.4 1.6 1.8 2. 2.2 2.4 2.6 2.8 3. 3.2 3.4\n", + " 3.6 3.8 4. 4.2 4.4 4.6 4.8 5. 5.2 5.4 5.6 5.8 6. 6.2 6.4]\n", + "33\n", + "[0 1 2]\n", + "[3 4 5 6 7]\n", + "[ 8 9 10 11 12 13 14]\n", + "[15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31]\n", + "[32]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "aa26347171e7456fbbe301e6eb5efd23", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(
, )" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nld0 = extractor.nld[0].copy()\n", + "gsf0 = extractor.gsf[0].copy()\n", + "\n", + "nld0_no_nan = nld0.copy()\n", + "nld0_no_nan.cut_nan()\n", + "nld0_no_nan.to_MeV()\n", + "\n", + "\n", + "gsf0_no_nan = gsf0.copy()\n", + "gsf0_no_nan.cut_nan()\n", + "gsf0_no_nan.to_MeV()\n", + "\n", + "E_nld = nld0_no_nan.E.copy()\n", + "print(E_nld)\n", + "\n", + "Sn = 7.658 # MeV\n", + "rhoSn = [1.916E+06, 3.503E+05]\n", + "Γ_γ_obs = [113., 13.]\n", + "nld_range_low = [0.6, 1.7]\n", + "nld_range_high = [3., 6.5]\n", + "\n", + "N_ρ = len(nld0_no_nan)\n", + "N_T = len(firstgen.Eg)-1\n", + "print(len(gsf0_no_nan))\n", + "\n", + "# bins that we compare with the discrete\n", + "idx_discrete = np.array(range(nld0_no_nan.index(nld_range_low[0]), nld0_no_nan.index(nld_range_low[1])), dtype=int)\n", + "idx_model = np.array(range(nld0_no_nan.index(nld_range_high[0]), nld0_no_nan.index(nld_range_high[1])), dtype=int)\n", + "idx_pre = np.array(range(idx_discrete[0]), dtype=int)\n", + "idx_mid = np.array(range(idx_discrete[-1]+1, idx_model[0]), dtype=int)\n", + "idx_end = np.array(range(idx_model[-1]+1, N_ρ), dtype=int)\n", + "\n", + "print(idx_pre)\n", + "print(idx_discrete)\n", + "print(idx_mid)\n", + "print(idx_model)\n", + "print(idx_end)\n", + "\n", + "nld_discrete = om.normalizer_nld.load_levels_discrete(\"../example_data/discrete_levels_Dy164.txt\", nld0_no_nan.E)\n", + "nld_discrete.plot(kind='step')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8496f135-83c3-4476-b37f-84533ad2d4c8", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Initializing NUTS failed. Falling back to elementwise auto-assignment.\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "Slice: [x]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 0.00% [0/8000 00:00<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fg_norm, fg_std_norm = om.extractor.normalize(firstgen, firstgen_std)\n", + "logl = FG_loglike(firstgen, firstgen_std, nld0.E)\n", + "fg_op = LogLike2(logl)\n", + "\n", + "pre_points = np.ones(len(nld0.E) - N_ρ, dtype=float)\n", + "\n", + "with pm.Model() as model:\n", + " \n", + " # Define ρ\n", + " #σ_ρ = om.FermiDirac(\"σ_ρ\", mu=1.2, lam=10., shape=N_ρ)\n", + " \n", + " #ρ0_pre = pm.HalfFlat(\"ρ0_pre\", shape=len(idx_pre))\n", + " #ρ0_mid = pm.HalfFlat(\"ρ0_mid\", shape=len(idx_mid))\n", + " #ρ0_end = pm.HalfFlat(\"ρ0_end\", shape=len(idx_end))\n", + " \n", + " #ρ_pre = pm.Normal(\"ρ_pre\", mu=ρ0_pre, sigma=σ_ρ[idx_pre]*ρ0_pre, shape=len(idx_pre))\n", + " #ρ_mid = pm.Normal(\"ρ_mid\", mu=ρ0_mid, sigma=σ_ρ[idx_mid]*ρ0_mid, shape=len(idx_mid))\n", + " #ρ_end = pm.Normal(\"ρ_end\", mu=ρ0_end, sigma=σ_ρ[idx_end]*ρ0_end, shape=len(idx_end))\n", + " \n", + " #Temp = om.FermiDirac(\"Temp\", mu=2., lam=10.)\n", + " #E0 = pm.Normal(\"E0\", mu=0, sigma=10.)\n", + " \n", + " #ρ_CT = pm.math.exp((E_nld[idx_model] - E0)/Temp)/Temp\n", + " \n", + " \n", + " #ρ_discrete = pm.Normal(\"ρ_discrete\", mu=nld_discrete.values[idx_discrete], \n", + " # sigma=nld_discrete.values[idx_discrete]*σ_ρ[idx_discrete], shape=len(idx_discrete))\n", + " \n", + " #ρ_model = pm.Normal(\"ρ_model\", mu=ρ_CT, sigma=ρ_CT*σ_ρ[idx_model], shape=len(idx_model))\n", + " \n", + " # Next we define ρ proper\n", + " #ρ = pm.math.concatenate((ρ_pre, ρ_discrete, ρ_mid, ρ_model, ρ_end))\n", + " #ρ = pm.HalfFlat(\"ρ\", shape=N_ρ)\n", + " \n", + " # Define transmission coefficients\n", + " #σ_T = om.FermiDirac(\"σ_ρ\", mu=1.2, lam=10., shape=N_T)\n", + " #T = pm.HalfFlat(\"T\", shape=N_T)\n", + " \n", + " # Models\n", + " \n", + " #Γ_γ_mu = pm.math.sum(T) # At the moment we will use a super hacky solution. We do not implement the integral just yet\n", + " \n", + " #ρ_mat = pm.math.concatenate((theano.tensor.as_tensor_variable(pre_points), ρ))\n", + " #T_mat = pm.math.concatenate((T, theano.tensor.as_tensor_variable(np.array([0], dtype=float))))\n", + " x = pm.HalfFlat(\"x\", shape=len(firstgen.Eg)+len(nld0))\n", + " #x = pm.math.concatenate((T_mat, ρ_mat))\n", + " \n", + " #Pth = fg_op(x)\n", + " #Pth = ρ_mat * T_mat\n", + " #Pth /= pm.math.sum(Pth, axis=1, keepdims=True)\n", + " \n", + " # We flatten and remove all that are not\n", + " #mu_FG = pm.math.flatten(Pth)[index_keep]\n", + " \n", + " # Observables\n", + " \n", + " #Γ_γ = pm.Normal(\"Γ_γ\", mu=Γ_γ_mu, sigma=Γ_γ_obs[1], observed=Γ_γ_obs[0])\n", + " #ρ_Sn = pm.Normal(\"ρ_Sn\", mu=pm.math.exp((Sn - E0)/Temp)/Temp, sigma=rhoSn[1], observed=rhoSn[0])\n", + " #P = pm.Normal(\"P\", mu=mu_FG, sigma=fg_std_norm.flatten()[index_keep], observed=fg_norm.flatten()[index_keep])\n", + " P = pm.DensityDist(\"P\", lambda x: fg_op(x), observed={'x': x})\n", + " \n", + " # Sample\n", + " trace = pm.sample()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7eb51c06-d0d2-4158-b054-408d5e00d64c", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Initializing NUTS failed. Falling back to elementwise auto-assignment.\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "CompoundStep\n", + ">Slice: [c]\n", + ">Slice: [m]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [16000/16000 00:06<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 3_000 draw iterations (4_000 + 12_000 draws total) took 16 seconds.\n" + ] + }, + { + "ename": "MissingInputError", + "evalue": "Input 0 of the graph (indices start from 0), used to compute sigmoid(c_interval__), was not provided and not given a value. Use the Theano flag exception_verbosity='high', for more information on this error.", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mMissingInputError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/var/folders/cb/ly7f8x1n48q9lkmqt6c6lyv00000gn/T/ipykernel_27971/1192511093.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0mpm\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDensityDist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"likelihood\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mlambda\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mlogl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobserved\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m\"v\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mtheta\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 37\u001b[0;31m \u001b[0mtrace\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpm\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msample\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mndraws\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtune\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnburn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdiscard_tuned_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/pymc3/sampling.py\u001b[0m in \u001b[0;36msample\u001b[0;34m(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, progressbar, model, random_seed, discard_tuned_samples, compute_convergence_checks, callback, jitter_max_retries, return_inferencedata, idata_kwargs, mp_ctx, pickle_backend, **kwargs)\u001b[0m\n\u001b[1;32m 637\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0midata_kwargs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 638\u001b[0m \u001b[0mikwargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0midata_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 639\u001b[0;31m \u001b[0midata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0marviz\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_pymc3\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mikwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 640\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 641\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcompute_convergence_checks\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/arviz/data/io_pymc3.py\u001b[0m in \u001b[0;36mfrom_pymc3\u001b[0;34m(trace, prior, posterior_predictive, log_likelihood, coords, dims, model, save_warmup, density_dist_obs)\u001b[0m\n\u001b[1;32m 561\u001b[0m \u001b[0mInferenceData\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 562\u001b[0m \"\"\"\n\u001b[0;32m--> 563\u001b[0;31m return PyMC3Converter(\n\u001b[0m\u001b[1;32m 564\u001b[0m \u001b[0mtrace\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrace\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 565\u001b[0m \u001b[0mprior\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mprior\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/arviz/data/io_pymc3.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, trace, prior, posterior_predictive, log_likelihood, predictions, coords, dims, model, save_warmup, density_dist_obs)\u001b[0m\n\u001b[1;32m 169\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 170\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdensity_dist_obs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdensity_dist_obs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 171\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobservations\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmulti_observations\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfind_observations\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 172\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 173\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mfind_observations\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mTuple\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mOptional\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mDict\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mVar\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mOptional\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mDict\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mVar\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/arviz/data/io_pymc3.py\u001b[0m in \u001b[0;36mfind_observations\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"data\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdensity_dist_obs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 183\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mval\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mobs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 184\u001b[0;31m \u001b[0mmulti_observations\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0meval\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"eval\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mval\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 185\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mobservations\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmulti_observations\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 186\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/graph/basic.py\u001b[0m in \u001b[0;36meval\u001b[0;34m(self, inputs_to_values)\u001b[0m\n\u001b[1;32m 552\u001b[0m \u001b[0minputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msorted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs_to_values\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 553\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minputs\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_fn_cache\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 554\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_fn_cache\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtheano\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfunction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 555\u001b[0m \u001b[0margs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0minputs_to_values\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mparam\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mparam\u001b[0m \u001b[0;32min\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 556\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/compile/function/__init__.py\u001b[0m in \u001b[0;36mfunction\u001b[0;34m(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)\u001b[0m\n\u001b[1;32m 335\u001b[0m \u001b[0;31m# note: pfunc will also call orig_function -- orig_function is\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 336\u001b[0m \u001b[0;31m# a choke point that all compilation must pass through\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 337\u001b[0;31m fn = pfunc(\n\u001b[0m\u001b[1;32m 338\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/compile/function/pfunc.py\u001b[0m in \u001b[0;36mpfunc\u001b[0;34m(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys)\u001b[0m\n\u001b[1;32m 522\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msi\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 523\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 524\u001b[0;31m return orig_function(\n\u001b[0m\u001b[1;32m 525\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 526\u001b[0m \u001b[0mcloned_outputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/compile/function/types.py\u001b[0m in \u001b[0;36morig_function\u001b[0;34m(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys)\u001b[0m\n\u001b[1;32m 1968\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1969\u001b[0m \u001b[0mMaker\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"function_maker\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFunctionMaker\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1970\u001b[0;31m m = Maker(\n\u001b[0m\u001b[1;32m 1971\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1972\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/compile/function/types.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, inputs, outputs, mode, accept_inplace, function_builder, profile, on_unused_input, fgraph, output_keys, name)\u001b[0m\n\u001b[1;32m 1582\u001b[0m \u001b[0;31m# make the fgraph (copies the graph, creates NEW INPUT AND\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1583\u001b[0m \u001b[0;31m# OUTPUT VARIABLES)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1584\u001b[0;31m \u001b[0mfgraph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0madditional_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstd_fgraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maccept_inplace\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1585\u001b[0m \u001b[0mfgraph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprofile\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mprofile\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1586\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/compile/function/types.py\u001b[0m in \u001b[0;36mstd_fgraph\u001b[0;34m(input_specs, output_specs, accept_inplace)\u001b[0m\n\u001b[1;32m 186\u001b[0m \u001b[0morig_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mspec\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvariable\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mspec\u001b[0m \u001b[0;32min\u001b[0m \u001b[0moutput_specs\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mupdates\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 188\u001b[0;31m \u001b[0mfgraph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFunctionGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0morig_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0morig_outputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mupdate_mapping\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mupdate_mapping\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 189\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 190\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mnode\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mfgraph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_nodes\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/graph/fg.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, inputs, outputs, features, clone, update_mapping)\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 161\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;32min\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 162\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_var\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreason\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"init\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 163\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclients\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"output\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/graph/fg.py\u001b[0m in \u001b[0;36mimport_var\u001b[0;34m(self, var, reason)\u001b[0m\n\u001b[1;32m 328\u001b[0m \u001b[0;31m# Imports the owners of the variables\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 329\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mvar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mowner\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mvar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mowner\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_nodes\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 330\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mowner\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreason\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mreason\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 331\u001b[0m elif (\n\u001b[1;32m 332\u001b[0m \u001b[0mvar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mowner\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/3.9.6/lib/python3.9/site-packages/theano/graph/fg.py\u001b[0m in \u001b[0;36mimport_node\u001b[0;34m(self, apply_node, check, reason)\u001b[0m\n\u001b[1;32m 381\u001b[0m \u001b[0;34m\"for more information on this error.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 382\u001b[0m )\n\u001b[0;32m--> 383\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mMissingInputError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merror_msg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariable\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mvar\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 384\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mnode\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mnew_nodes\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mMissingInputError\u001b[0m: Input 0 of the graph (indices start from 0), used to compute sigmoid(c_interval__), was not provided and not given a value. Use the Theano flag exception_verbosity='high', for more information on this error." + ] + } + ], + "source": [ + "\n", + "\n", + "\n", + "def my_model(theta, x):\n", + " m, c = theta\n", + " return m*x + c\n", + "\n", + "# set up our data\n", + "N = 10 # number of data points\n", + "sigma = 1.0 # standard deviation of noise\n", + "x = np.linspace(0.0, 9.0, N)\n", + "\n", + "mtrue = 0.4 # true gradient\n", + "ctrue = 3.0 # true y-intercept\n", + "\n", + "truemodel = my_model([mtrue, ctrue], x)\n", + "\n", + "# make data\n", + "np.random.seed(716742) # set random seed, so the data is reproducible each time\n", + "data = sigma * np.random.randn(N) + truemodel\n", + "\n", + "ndraws = 3000 # number of draws from the distribution\n", + "nburn = 1000 # number of \"burn-in points\" (which we'll discard)\n", + "\n", + "# create our Op\n", + "logl = LogLike(loglike, data, x, sigma)\n", + "\n", + "# use PyMC3 to sampler from log-likelihood\n", + "with pm.Model():\n", + " # uniform priors on m and c\n", + " m = pm.Uniform(\"m\", lower=-10.0, upper=10.0)\n", + " c = pm.Uniform(\"c\", lower=-10.0, upper=10.0)\n", + "\n", + " # convert m and c to a tensor vector\n", + " theta = tt.as_tensor_variable([m, c])\n", + "\n", + " # use a DensityDist (use a lamdba function to \"call\" the Op)\n", + " pm.DensityDist(\"likelihood\", lambda v: logl(v), observed={\"v\": theta})\n", + "\n", + " trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bb86dd80-036f-403f-8b17-16b139af7257", + "metadata": {}, + "outputs": [], + "source": [ + "FG_loglike" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "e7183e98-821d-4df0-86ab-10dac4f10197", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "51c102b5ef0f41aeb6421c60c0b63b04", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 79, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Test a toy model to make sure it works...\n", + "nld_values = np.concatenate((np.mean(trace['ρ_pre'], axis=0), np.mean(trace['ρ_discrete'], axis=0),\n", + " np.mean(trace['ρ_mid'], axis=0), np.mean(trace['ρ_model'], axis=0),\n", + " np.mean(trace['ρ_end'], axis=0)))\n", + "nld_std = np.concatenate((np.std(trace['ρ_pre'], axis=0), np.std(trace['ρ_discrete'], axis=0),\n", + " np.std(trace['ρ_mid'], axis=0), np.std(trace['ρ_model'], axis=0),\n", + " np.std(trace['ρ_end'], axis=0)))\n", + "\n", + "nld_normed = om.Vector(E=E_nld, values=nld_values, std=nld_std)\n", + "fig, ax = nld_normed.plot()\n", + "nld_discrete.plot(ax=ax, kind='step')\n", + "ax.errorbar(Sn, rhoSn[0], yerr=rhoSn[1], fmt=\"o\")\n", + "ax.plot(np.linspace(3, Sn, 1001), np.exp((np.linspace(3, Sn, 1001) - np.mean(trace['E0']))/np.mean(trace['Temp']))/np.mean(trace['']))\n", + "ax.semilogy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3789a2ee-2efa-42d6-a986-af2e708e1d9f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/model_op.py b/notebooks/model_op.py new file mode 100644 index 00000000..9ef169fe --- /dev/null +++ b/notebooks/model_op.py @@ -0,0 +1,194 @@ +import numpy as np +import ompy as om +import theano.tensor as tt +import pymc3 as pm + +def diagonal_resolution(matrix, resolution_Ex): + """Detector resolution at the Ex=Eg diagonal + + Uses gaussian error propagations which assumes independence of + resolutions along Ex and Eg axis. + + Args: + matrix (Matrix): Matrix for which the sesoluton shall be calculated + + Returns: + resolution at Ex = Eg. + """ + def resolution_Eg(matrix): + """Resolution along Eg axis for each Ex. Defaults in this class are for OSCAR. + + Args: + matrix (Matrix): Matrix for which the sesoluton shall be calculated + + Returns: + resolution + """ + def fFWHM(E, p): + return np.sqrt(p[0] + p[1] * E + p[2] * E**2) + fwhm_pars = np.array([73.2087, 0.50824, 9.62481e-05]) + return fFWHM(matrix.Ex, fwhm_pars) + + dEx = matrix.Ex[1] - matrix.Ex[0] + dEg = matrix.Eg[1] - matrix.Eg[0] + assert dEx == dEg + + dE_resolution = np.sqrt(resolution_Ex**2 + + resolution_Eg(matrix)**2) + return dE_resolution + + + + +# Ops to calculate the FG matrix from the NLDs and Ts +class calculate_FG(tt.Op): + + itypes = [tt.dvector] # expects a vector of parameter values when called + otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood) + + def __init__(self, matrix, std, E_nld): + + self.matrix = matrix.copy() + self.std = std.copy() + self.resolution = diagonal_resolution(matrix, 150.) + self.E_nld = E_nld.copy(order='C') + + self.matrix.values, self.std.values = om.extractor.normalize(self.matrix, self.std) + + self.std.values = std.values.copy(order='C') + self.matrix.Ex = self.matrix.Ex.copy(order='C') + self.matrix.Eg = self.matrix.Eg.copy(order='C') + self.matrix.values = self.matrix.values.copy(order='C') + + + def perform(self, node, inputs, outputs): + + (x,) = inputs + T = x[:self.matrix.Eg.size] + nld = x[self.matrix.Eg.size:] + + fg_th = om.decomposition.nld_T_product(nld, T, self.resolution, self.E_nld, + self.matrix.Eg, self.matrix.Ex) + + z = -0.5*np.array(om.decomposition.chisquare_diagonal(self.matrix.values, fg_th, + self.std.values, self.resolution, + self.matrix.Eg, self.matrix.Ex)) + outputs[0][0] = z + print(z) + #return outputs[0][0] + +class FG_loglike: + def __init__(self, matrix, std, E_nld): + + self.matrix = matrix.copy() + self.std = std.copy() + self.resolution = diagonal_resolution(matrix, 150.) + self.E_nld = E_nld.copy(order='C') + + self.matrix.values, self.std.values = om.extractor.normalize(self.matrix, self.std) + + self.std.values = std.values.copy(order='C') + self.matrix.Ex = self.matrix.Ex.copy(order='C') + self.matrix.Eg = self.matrix.Eg.copy(order='C') + self.matrix.values = self.matrix.values.copy(order='C') + + def __call__(self, x): + T = x[:self.matrix.Eg.size] + nld = x[self.matrix.Eg.size:] + + fg_th = om.decomposition.nld_T_product(nld, T, self.resolution, self.E_nld, + self.matrix.Eg, self.matrix.Ex) + + return om.decomposition.chisquare_diagonal(self.matrix.values, fg_th, + self.std.values, self.resolution, + self.matrix.Eg, self.matrix.Ex) + +class LogLike2(tt.Op): + """ + Specify what type of object will be passed and returned to the Op when it is + called. In our case we will be passing it a vector of values (the parameters + that define our model) and returning a single "scalar" value (the + log-likelihood) + """ + + itypes = [tt.dvector] # expects a vector of parameter values when called + otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood) + + def __init__(self, loglike): + """ + Initialise the Op with various things that our log-likelihood function + requires. Below are the things that are needed in this particular + example. + + Parameters + ---------- + loglike: + The log-likelihood (or whatever) function we've defined + data: + The "observed" data that our log-likelihood function takes in + x: + The dependent variable (aka 'x') that our model requires + sigma: + The noise standard deviation that our function requires. + """ + + # add inputs as class attributes + self.likelihood = loglike + + def perform(self, node, inputs, outputs): + # the method that is used when calling the Op + (theta,) = inputs # this will contain my variables + + # call the log-likelihood function + logl = self.likelihood(theta) + + outputs[0][0] = np.array(logl) # output the log-likelihood + +def loglike(theta, x, data, sigma): + return -0.5*np.sum(theta) + +# define a theano Op for our likelihood function +class LogLike(tt.Op): + + """ + Specify what type of object will be passed and returned to the Op when it is + called. In our case we will be passing it a vector of values (the parameters + that define our model) and returning a single "scalar" value (the + log-likelihood) + """ + + itypes = [tt.dvector] # expects a vector of parameter values when called + otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood) + + def __init__(self, loglike, data, x, sigma): + """ + Initialise the Op with various things that our log-likelihood function + requires. Below are the things that are needed in this particular + example. + + Parameters + ---------- + loglike: + The log-likelihood (or whatever) function we've defined + data: + The "observed" data that our log-likelihood function takes in + x: + The dependent variable (aka 'x') that our model requires + sigma: + The noise standard deviation that our function requires. + """ + + # add inputs as class attributes + self.likelihood = loglike + self.data = data + self.x = x + self.sigma = sigma + + def perform(self, node, inputs, outputs): + # the method that is used when calling the Op + (theta,) = inputs # this will contain my variables + + # call the log-likelihood function + logl = self.likelihood(theta, self.x, self.data, self.sigma) + + outputs[0][0] = np.array(logl) # output the log-likelihood \ No newline at end of file diff --git a/notebooks/uncertanty_by_counts.ipynb b/notebooks/uncertanty_by_counts.ipynb new file mode 100644 index 00000000..b7c368c0 --- /dev/null +++ b/notebooks/uncertanty_by_counts.ipynb @@ -0,0 +1,7874 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "0e6a22c9-5631-443b-98dd-e25c0cc05cf9", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "%matplotlib widget" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "79d7254a-6b71-460d-96f0-400bf769a436", + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import ompy as om\n", + "import logging\n", + "import pymc3 as pm\n", + "import arviz as az" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cd12b7cd-7248-4ba8-bf64-6a98b86167f7", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'1.1.0.dev0+02f42f6'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "om.__full_version__" + ] + }, + { + "cell_type": "markdown", + "id": "c085a2be-0201-4ecb-a972-94ec65cb5b44", + "metadata": {}, + "source": [ + "# Global settings of notebook\n", + "In the cell below there are a nuber of global settings that will affect the notebook from here. This is to make it easier to do a short PoC without having it to run for hours and hours.\n", + "\n", + "* `ensemble_size` controls the number of ensemble members that gets generated. Depending on the operation done the runtime may be linear or N^(N-1).\n", + "* `multinest_livepoints` controls the number of live points used by PyMultinest. It is recomended to use at least 400 when accuracy is important. For debugging purposes this can be lowered." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "dd94bf0d-0ad4-4c46-a90e-4dd30911f037", + "metadata": {}, + "outputs": [], + "source": [ + "ensemble_size = 10\n", + "multinest_livepoints = 300" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7c5ff2cd-da69-4d52-bca1-7e723fff8c15", + "metadata": {}, + "outputs": [], + "source": [ + "def draw_random(matrix: om.Matrix, count: int, **kwargs) -> om.Matrix:\n", + " \"\"\" Simple function to generate a matrix with N counts\n", + " where the probability of drawing a particular bin is given\n", + " by the number of counts in the input matrix.\n", + " Args:\n", + " matrix: (Matrix) Matrix giving the probability\n", + " count: (int) Number of counts to draw\n", + " **kwargs: Keyword arguments to the random number generator.\n", + " Returns:\n", + " A matrix-type with bin contents randomly drawn with the\n", + " same distribution as the input matrix.\n", + " \"\"\"\n", + " \n", + " # Step 1, make a giant array with number of entries equal to the number of counts in each bin in the matrix.\n", + " coords = []\n", + " probability = []\n", + " for i in range(len(matrix.Ex)):\n", + " for j in range(len(matrix.Eg)):\n", + " coords.append([i,j])\n", + " probability.append(int(matrix.values[i,j]))\n", + " \n", + " coords = np.array(coords)\n", + " probability = np.array(probability, dtype=float)\n", + " probability /= np.sum(probability)\n", + " \n", + " # Step 2, draw 'draws' number of elements from the list\n", + " rng = np.random.default_rng(**kwargs)\n", + " drawed = rng.choice(coords, size=int(count), p=probability)\n", + " mat = matrix.copy()\n", + " mat.values *= 0\n", + " for i,j in drawed:\n", + " mat.values[i,j] += 1\n", + " return mat" + ] + }, + { + "cell_type": "markdown", + "id": "8cbbf6ed-aeae-49f8-b87b-561ce1cdb208", + "metadata": {}, + "source": [ + "# Loading data\n", + "In this notebook we will be looking at the same data-set as in the [Getting Started](https://ompy.readthedocs.io/en/latest/getting_started.html) tutorial, but will do the analysis with three different statistics levels. To simulate low statistics we will use the `draw_random` function declared above. We will be looking at the full statistics case (5,279,152 counts), a low count case (100,000 counts) and a medium count case (250,000 counts).\n", + "\n", + "The $^{164}\\mathrm{Dy}$ data used below has been gathered from following experiment: Nyhus, H. T. *et al.* (2010). DOI: [10.1103/physrevc.81.024325](https://doi.org/10.1103/PhysRevC.81.024325)\n", + "and is reanalyzed in Renstrøm, T. *et al.* (2018). DOI: [10.1103/physrevc.98.054310](https://doi.org/10.1103/PhysRevC.98.054310)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "ed0784d7-9111-4519-a940-4f5e33e0592c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Total number of counts in raw matrix: 5279151.998904223\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "fb0a934117f249349701a8dd2b95a723", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Import raw matrix into instance of om.Matrix() and plot it\n", + "raw = om.example_raw('Dy164')\n", + "# To use you own data, uncomment/adapt the line below instead\n", + "# raw = om.Matrix(path=\"/path/to/matrix.ending\")\n", + "\n", + "print(f\"Total number of counts in raw matrix: {np.sum(raw.values)}\")\n", + "\n", + "# Cut the diagonal\n", + "raw.cut_diagonal(E1=(800, 0), E2=(7500, 7300))\n", + "raw.cut('Ex', 0, 8400)\n", + "\n", + "# Plot the entire matrix\n", + "raw_org = raw.copy() # workaround due to execution order in jupyter notebook\n", + " # (calculations are performed before plotting, but we make a cut to raw further down)\n", + "raw_org.plot();" + ] + }, + { + "cell_type": "markdown", + "id": "6c653219-7b91-4e22-95e0-75c51efd0b49", + "metadata": {}, + "source": [ + "## Declare and define used classes\n", + "We are declaring all our analysis classes in the cells bellow as we only need to do this once." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c4fd9521-72c7-403b-ae06-b1fb390802fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 09:56:07,851 - ompy.response - INFO - Note: Spectra outside of 200.0 and 20000.0 are extrapolation only.\n" + ] + } + ], + "source": [ + "logger = om.introspection.get_logger('response', 'INFO')\n", + "# Then do the same using OMpy functionality:\n", + "# You may need to adpot this to whereever you response matrices are stored\n", + "folderpath = \"../OCL_response_functions/oscar2017_scale1.15\"\n", + "\n", + "# Energy calibration of resulting response matrix:\n", + "Eg = raw.Eg\n", + "\n", + "# Experimental relative FWHM at 1.33 MeV of resulting array\n", + "fwhm_abs = 30 # (30/1330 = 2.25% )\n", + "\n", + "# Magne recommends 1/10 of the actual resolution for unfolding purposes\n", + "response = om.Response(folderpath)\n", + "R_ompy_unf, R_tab_unf = response.interpolate(Eg, fwhm_abs=fwhm_abs/10, return_table=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "62cb066b-0b79-4932-a69b-41dfef2e69c7", + "metadata": {}, + "outputs": [], + "source": [ + "# With compton subtraction and all tweaks\n", + "unfolder= om.Unfolder(response=R_ompy_unf)\n", + "firstgen = om.FirstGeneration()\n", + "unfolder.use_compton_subtraction = True # default\n", + "unfolder.response_tab = R_tab_unf\n", + "# Magne suggests some \"tweaks\" for a better unfolding performance. Default is 1 for all.\n", + "unfolder.FWHM_tweak_multiplier = {\"fe\": 1., \"se\": 1.1,\n", + " \"de\": 1.3, \"511\": 0.9}\n", + "\n", + "trapezoid_cut = om.Action('matrix')\n", + "trapezoid_cut.trapezoid(Ex_min=4000, Ex_max=7000, Eg_min=1000, Eg_max=7000+200, inplace=True)\n", + "E_rebinned = np.arange(100., 8500, 200)" + ] + }, + { + "cell_type": "markdown", + "id": "da8405e4-6667-4da2-b30e-5717be7f72c4", + "metadata": {}, + "source": [ + "### Normalization parameters\n", + "See [getting started]() for more details." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "99350fc0-4246-411b-af81-c7451ffff09a", + "metadata": {}, + "outputs": [], + "source": [ + "norm_pars = om.NormalizationParameters(name=\"164Dy\")\n", + "norm_pars.D0 = [6.8, 0.6] # eV\n", + "norm_pars.Sn = [7.658, 0.001] # MeV\n", + "norm_pars.Gg = [113., 13.] #meV\n", + "norm_pars.spincutModel = 'Disc_and_EB05' # see eg. Guttormsen et al., 2017, PRC 96, 024313\n", + "norm_pars.spincutPars = {\"mass\":164, \"NLDa\":18.12, \"Eshift\":0.31,\n", + " \"Sn\": norm_pars.Sn[0], \"sigma2_disc\":[1.5, 3.6]}\n", + "norm_pars.Jtarget = 5/2 # A-1 nucleus" + ] + }, + { + "cell_type": "markdown", + "id": "1933ac94-54f0-430e-a4d2-a539c7797352", + "metadata": {}, + "source": [ + "### Setup useful loggers" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f9f34016-82b5-4eb3-b736-68d9660cb3ee", + "metadata": {}, + "outputs": [], + "source": [ + "nld_log = om.introspection.get_logger('normalizer_nld', 'INFO')\n", + "gsf_log = om.introspection.get_logger('normalizer_gsf', 'INFO')\n", + "sim_log = om.introspection.get_logger('normalizer_simultan', 'INFO')\n", + "ens_log = om.introspection.get_logger('ensembleNormalizer', 'INFO')\n", + "logger = om.introspection.get_logger('error_finder', 'DEBUG')" + ] + }, + { + "cell_type": "markdown", + "id": "ed0a22ee-97fa-4e0b-a15e-8db8b90a0ea3", + "metadata": {}, + "source": [ + "## Declare ErrorFinder\n", + "This declares the error estimator using the logarithmic model. This is the more stable model." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "83dbc654-9179-44fa-8caa-6661db38b7d5", + "metadata": {}, + "outputs": [], + "source": [ + "error_estimator = om.error_finder.ErrorFinder(algorithm='log')" + ] + }, + { + "cell_type": "markdown", + "id": "dc7d94d1-93ff-4840-9cf6-d8f27941758f", + "metadata": {}, + "source": [ + "## Test dependence on statistics\n", + "In the next few cells we will generate input matrices with various number of counts. This is to simulate the effect of statistics has on the results." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "96b883fc-66f6-4c95-8c76-6ece9f000c69", + "metadata": {}, + "outputs": [], + "source": [ + "raw_orig = raw.copy()\n", + "raw_orig.fill_and_remove_negative(window_size=2)\n", + "\n", + "counts = [25000, 50000, 75000, 100000, 150000, 200000, 250000, 500000, 750000, 1000000, 1500000, 2000000]\n", + "raw_matrices = [draw_random(raw_orig, count) for count in counts]\n", + "raw_matrices.append(raw_orig.copy())\n", + "counts.append(int(raw_orig.counts))" + ] + }, + { + "cell_type": "markdown", + "id": "16f9f6d2-347d-48e1-b921-b6860ddce67a", + "metadata": {}, + "source": [ + "### Setup the ensembles" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f7f32cc8-8074-4959-88ce-c008954ad525", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "a891a1cf71644fae9304b8606cadc53d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/10 [00:00\n", + " \n", + " \n", + " 100.00% [8000/8000 01:05<00:00 Sampling 4 chains, 0 divergences]\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 76 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:17:25,444 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.27 │ 0.00 ± 0.17 │ -0.000 ± 0.049 │ 0.276 ± 0.022 │ 0.168 ± 0.015 │ 0.0492 ± 0.0043 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 37.0 ± 2.9 │ 13.2 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 32.5 ± 2.5 │ 11.39 ± 0.98 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 46.5 ± 3.7 │ 10.11 ± 0.87 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 20.6 ± 1.7 │ 15.1 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 18.0 ± 1.5 │ 9.25 ± 0.78 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 22.7 ± 1.8 │ 9.57 ± 0.79 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 18.7 ± 1.5 │ 12.20 ± 0.98 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 12.2 ± 1.0 │ 14.6 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 15.3 ± 1.1 │ 18.7 ± 1.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 9.61 ± 0.82 │ 5.11 ± 0.57 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 13.8 ± 1.1 │ 11.37 ± 0.92 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 10.21 ± 0.82 │ 12.2 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 5.53 ± 0.56 │ 14.0 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 11.42 ± 0.88 │ 9.08 ± 0.80 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 10.62 ± 0.84 │ 17.0 ± 1.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 12.33 ± 1.00 │ 13.6 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 10.08 ± 0.82 │ 26.6 ± 2.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 10.51 ± 0.86 │ 20.6 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 16.0 ± 1.3 │ 42.3 ± 3.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 17.9 ± 1.4 │ 34.4 ± 2.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 13.3 ± 1.1 │ 54.1 ± 4.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 13.7 ± 1.2 │ 39.6 ± 3.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 14.6 ± 1.2 │ 88.1 ± 6.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 13.9 ± 1.1 │ 56.9 ± 4.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 17.8 ± 1.4 │ 61.2 ± 4.7 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 29.5 ± 2.2 │ 82.3 ± 6.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 15.1 ± 1.3 │ 68.6 ± 5.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 27.0 ± 2.1 │ │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 52.3 ± 4.2 │ │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 44.4 ± 3.5 │ │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:17:25,462 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:17:25,463 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:17:25,467 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:17:25,468 - ompy.error_finder - WARNING - Some members of the ensemble have different lengths. Consider re-binning or changing limits.\n", + "2021-08-12 10:17:25,471 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 28 GSF values\n", + "2021-08-12 10:17:25,473 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:07<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 78 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:18:55,744 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.33 │ -0.00 ± 0.31 │ 0.000 ± 0.067 │ 0.332 ± 0.026 │ 0.309 ± 0.023 │ 0.0679 ± 0.0054 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 165 ± 11 │ 13.4 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 12.3 ± 1.0 │ 5.29 ± 0.58 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 14.7 ± 1.2 │ 8.79 ± 0.73 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 11.7 ± 1.0 │ 12.10 ± 0.95 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 13.2 ± 1.1 │ 11.97 ± 0.95 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 11.68 ± 0.98 │ 10.04 ± 0.85 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 13.8 ± 1.1 │ 11.85 ± 0.94 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 17.1 ± 1.4 │ 6.85 ± 0.63 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 18.0 ± 1.4 │ 7.35 ± 0.64 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 11.96 ± 0.93 │ 8.41 ± 0.71 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 7.29 ± 0.63 │ 11.66 ± 0.91 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 11.37 ± 0.91 │ 10.07 ± 0.81 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 6.73 ± 0.59 │ 9.23 ± 0.77 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 7.71 ± 0.63 │ 13.4 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 9.27 ± 0.74 │ 12.8 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 10.91 ± 0.84 │ 12.7 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 7.62 ± 0.65 │ 11.93 ± 0.97 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 13.9 ± 1.1 │ 12.7 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 6.07 ± 0.56 │ 16.1 ± 1.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 7.68 ± 0.66 │ 14.0 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 9.43 ± 0.77 │ 27.7 ± 2.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 14.6 ± 1.1 │ 44.9 ± 3.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 18.4 ± 1.5 │ 31.8 ± 2.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 14.7 ± 1.2 │ 25.9 ± 2.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 20.6 ± 1.6 │ 49.1 ± 3.8 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 16.4 ± 1.3 │ 39.6 ± 3.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 27.1 ± 2.0 │ 251 ± 16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 17.5 ± 1.5 │ 108.8 ± 7.5 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 33.2 ± 2.5 │ │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 38.6 ± 3.0 │ │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:18:55,759 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:18:55,760 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:18:55,761 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:18:55,762 - ompy.error_finder - WARNING - Some members of the ensemble have different lengths. Consider re-binning or changing limits.\n", + "2021-08-12 10:18:55,765 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 28 GSF values\n", + "2021-08-12 10:18:55,767 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:11<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 82 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:20:28,338 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.28 │ 0.00 ± 0.18 │ -0.000 ± 0.068 │ 0.289 ± 0.022 │ 0.185 ± 0.015 │ 0.0690 ± 0.0057 │\n", + "└──────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 8.35 ± 0.72 │ 8.80 ± 0.73 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 10.91 ± 0.88 │ 7.14 ± 0.61 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 11.41 ± 0.93 │ 10.56 ± 0.85 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 11.15 ± 0.89 │ 9.76 ± 0.80 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 16.1 ± 1.2 │ 4.90 ± 0.49 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 8.49 ± 0.69 │ 4.58 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 13.2 ± 1.1 │ 10.88 ± 0.83 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 14.7 ± 1.1 │ 10.60 ± 0.86 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 8.45 ± 0.68 │ 7.18 ± 0.61 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 12.42 ± 0.97 │ 9.61 ± 0.76 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 3.96 ± 0.37 │ 11.02 ± 0.87 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 6.51 ± 0.53 │ 10.08 ± 0.79 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 4.68 ± 0.41 │ 6.97 ± 0.59 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 6.32 ± 0.52 │ 9.84 ± 0.78 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 8.88 ± 0.70 │ 7.20 ± 0.60 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 7.13 ± 0.58 │ 12.09 ± 0.97 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 9.00 ± 0.69 │ 13.1 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 8.25 ± 0.66 │ 11.26 ± 0.89 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 15.9 ± 1.2 │ 12.5 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 10.93 ± 0.87 │ 15.2 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 8.41 ± 0.70 │ 21.0 ± 1.7 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 9.89 ± 0.80 │ 25.4 ± 2.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 15.6 ± 1.2 │ 19.5 ± 1.5 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 8.90 ± 0.70 │ 41.2 ± 3.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 16.3 ± 1.3 │ 11.40 ± 0.98 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 6.92 ± 0.68 │ 22.7 ± 1.8 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 27.5 ± 2.2 │ 50.6 ± 3.9 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 27.1 ± 2.0 │ 55.5 ± 4.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 43.8 ± 3.3 │ │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 45.0 ± 3.5 │ │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:20:28,354 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:20:28,355 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:20:28,356 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:20:28,357 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:20:28,359 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:11<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 81 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:22:02,477 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.28 │ -0.00 ± 0.21 │ 0.000 ± 0.066 │ 0.288 ± 0.023 │ 0.216 ± 0.017 │ 0.0671 ± 0.0052 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 10.89 ± 0.93 │ 6.94 ± 0.61 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 10.74 ± 0.88 │ 5.37 ± 0.50 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 10.79 ± 0.87 │ 8.28 ± 0.68 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 11.38 ± 0.94 │ 11.25 ± 0.89 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 11.86 ± 0.97 │ 10.91 ± 0.86 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 10.77 ± 0.85 │ 5.14 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 12.8 ± 1.0 │ 10.58 ± 0.84 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 7.45 ± 0.64 │ 6.96 ± 0.56 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 11.98 ± 0.94 │ 6.10 ± 0.51 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 10.83 ± 0.86 │ 6.64 ± 0.55 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 8.95 ± 0.72 │ 8.04 ± 0.66 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 8.12 ± 0.65 │ 6.54 ± 0.54 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 8.44 ± 0.68 │ 9.41 ± 0.76 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 6.12 ± 0.51 │ 8.61 ± 0.69 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 8.22 ± 0.67 │ 5.35 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 10.93 ± 0.85 │ 11.18 ± 0.87 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 11.09 ± 0.89 │ 14.2 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 5.20 ± 0.47 │ 14.1 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 9.91 ± 0.77 │ 15.3 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 8.42 ± 0.68 │ 13.2 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 9.19 ± 0.78 │ 14.0 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 12.29 ± 0.96 │ 16.7 ± 1.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 9.64 ± 0.79 │ 15.1 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 10.71 ± 0.85 │ 28.8 ± 2.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 10.58 ± 0.88 │ 17.8 ± 1.5 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 12.46 ± 0.99 │ 21.6 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 20.4 ± 1.6 │ 49.9 ± 3.8 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 18.4 ± 1.4 │ 62.5 ± 4.9 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 16.3 ± 1.3 │ 54.9 ± 4.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 58.4 ± 4.3 │ 66.1 ± 5.0 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:22:02,494 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:22:02,495 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:22:02,497 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:22:02,497 - ompy.error_finder - WARNING - Some members of the ensemble have different lengths. Consider re-binning or changing limits.\n", + "2021-08-12 10:22:02,500 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 29 GSF values\n", + "2021-08-12 10:22:02,502 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:14<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 84 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:23:39,646 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.29 │ -0.00 ± 0.21 │ 0.000 ± 0.057 │ 0.297 ± 0.023 │ 0.216 ± 0.017 │ 0.0578 ± 0.0046 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 13.7 ± 1.1 │ 15.0 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 5.71 ± 0.58 │ 10.84 ± 0.85 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 12.8 ± 1.0 │ 10.36 ± 0.81 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 16.9 ± 1.4 │ 9.92 ± 0.78 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 9.14 ± 0.77 │ 6.45 ± 0.54 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 7.29 ± 0.61 │ 4.78 ± 0.42 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 6.12 ± 0.54 │ 7.51 ± 0.61 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 9.69 ± 0.79 │ 7.16 ± 0.57 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 7.11 ± 0.59 │ 8.24 ± 0.66 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 6.48 ± 0.53 │ 2.47 ± 0.30 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 5.77 ± 0.45 │ 4.42 ± 0.40 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 8.74 ± 0.70 │ 7.79 ± 0.61 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 4.02 ± 0.37 │ 10.98 ± 0.85 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 3.98 ± 0.38 │ 8.38 ± 0.65 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 5.55 ± 0.47 │ 9.24 ± 0.73 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 8.45 ± 0.67 │ 7.56 ± 0.62 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 10.47 ± 0.81 │ 6.20 ± 0.51 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 13.6 ± 1.1 │ 5.99 ± 0.53 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 6.83 ± 0.56 │ 11.54 ± 0.91 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 17.1 ± 1.3 │ 15.6 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 12.28 ± 0.94 │ 17.3 ± 1.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 8.87 ± 0.73 │ 17.2 ± 1.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 8.64 ± 0.72 │ 18.7 ± 1.5 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 18.2 ± 1.4 │ 29.0 ± 2.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 12.03 ± 0.99 │ 15.6 ± 1.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 9.97 ± 0.85 │ 17.8 ± 1.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 17.5 ± 1.4 │ 44.9 ± 3.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 25.6 ± 1.9 │ 71.0 ± 5.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 22.5 ± 1.7 │ 52.1 ± 4.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 46.4 ± 3.5 │ │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:23:39,664 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:23:39,665 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:23:39,666 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:23:39,667 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:23:39,668 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:22<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 93 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:25:25,352 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.25 │ -0.00 ± 0.23 │ 0.000 ± 0.061 │ 0.258 ± 0.020 │ 0.231 ± 0.018 │ 0.0617 ± 0.0049 │\n", + "└─────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 8.81 ± 0.73 │ 5.59 ± 0.50 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 9.00 ± 0.73 │ 8.70 ± 0.71 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 6.67 ± 0.58 │ 7.57 ± 0.60 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 12.36 ± 0.97 │ 5.81 ± 0.49 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 8.48 ± 0.68 │ 4.94 ± 0.42 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 7.28 ± 0.60 │ 8.18 ± 0.69 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 9.92 ± 0.79 │ 5.57 ± 0.44 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 11.70 ± 0.92 │ 4.97 ± 0.44 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 9.33 ± 0.72 │ 5.85 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 6.06 ± 0.51 │ 4.03 ± 0.35 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 3.13 ± 0.31 │ 8.89 ± 0.71 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 7.59 ± 0.60 │ 11.54 ± 0.91 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 5.97 ± 0.49 │ 6.97 ± 0.55 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 8.45 ± 0.67 │ 5.94 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 6.03 ± 0.49 │ 9.60 ± 0.76 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 3.59 ± 0.32 │ 9.45 ± 0.73 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 6.43 ± 0.52 │ 11.11 ± 0.88 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 5.78 ± 0.47 │ 5.69 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 7.46 ± 0.60 │ 4.11 ± 0.41 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 10.10 ± 0.81 │ 9.68 ± 0.76 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 6.14 ± 0.52 │ 14.6 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 9.45 ± 0.75 │ 21.5 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 9.41 ± 0.75 │ 12.74 ± 0.98 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 11.51 ± 0.91 │ 14.0 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 10.63 ± 0.87 │ 20.8 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 10.27 ± 0.86 │ 24.2 ± 1.9 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 15.4 ± 1.2 │ 16.5 ± 1.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 15.1 ± 1.2 │ 26.2 ± 2.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 15.9 ± 1.2 │ 37.8 ± 3.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 37.9 ± 2.9 │ 107.4 ± 7.2 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:25:25,371 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:25:25,372 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:25:25,374 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:25:25,374 - ompy.error_finder - WARNING - Some members of the ensemble have different lengths. Consider re-binning or changing limits.\n", + "2021-08-12 10:25:25,377 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 29 GSF values\n", + "2021-08-12 10:25:25,378 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:13<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 84 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:27:03,190 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.25 │ -0.00 ± 0.23 │ 0.000 ± 0.050 │ 0.251 ± 0.019 │ 0.233 ± 0.018 │ 0.0502 ± 0.0039 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 7.74 ± 0.65 │ 5.80 ± 0.52 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 7.71 ± 0.64 │ 5.16 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 6.03 ± 0.51 │ 7.43 ± 0.60 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 7.55 ± 0.63 │ 8.40 ± 0.68 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 8.26 ± 0.68 │ 7.28 ± 0.59 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 9.02 ± 0.73 │ 5.78 ± 0.51 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 5.43 ± 0.46 │ 5.69 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 12.64 ± 0.98 │ 5.43 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 7.77 ± 0.62 │ 6.89 ± 0.56 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 5.22 ± 0.42 │ 8.54 ± 0.70 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 5.20 ± 0.43 │ 10.04 ± 0.77 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 5.88 ± 0.48 │ 5.97 ± 0.50 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 5.42 ± 0.44 │ 7.41 ± 0.58 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 8.37 ± 0.63 │ 8.78 ± 0.72 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 6.30 ± 0.51 │ 11.10 ± 0.86 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 5.65 ± 0.45 │ 8.09 ± 0.66 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 6.69 ± 0.54 │ 6.47 ± 0.55 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 4.46 ± 0.40 │ 7.41 ± 0.61 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 7.88 ± 0.62 │ 9.96 ± 0.82 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 11.97 ± 0.91 │ 14.8 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 5.03 ± 0.46 │ 11.17 ± 0.89 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 4.08 ± 0.42 │ 12.8 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 9.43 ± 0.76 │ 12.09 ± 0.97 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 13.7 ± 1.1 │ 19.3 ± 1.5 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 13.0 ± 1.0 │ 20.2 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 14.3 ± 1.2 │ 20.5 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 14.7 ± 1.1 │ 20.1 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 11.37 ± 0.92 │ 30.7 ± 2.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 29.5 ± 2.3 │ 61.1 ± 4.8 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 24.4 ± 1.9 │ │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:27:03,209 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:27:03,211 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:27:03,213 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:27:03,214 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:27:03,217 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:16<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 88 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:28:44,238 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.27 │ 0.00 ± 0.26 │ -0.000 ± 0.060 │ 0.273 ± 0.021 │ 0.266 ± 0.020 │ 0.0611 ± 0.0047 │\n", + "└──────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 7.03 ± 0.57 │ 5.15 ± 0.44 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 6.36 ± 0.53 │ 5.85 ± 0.49 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 4.85 ± 0.43 │ 8.68 ± 0.68 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 4.81 ± 0.42 │ 5.02 ± 0.42 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 6.86 ± 0.55 │ 5.09 ± 0.42 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 5.03 ± 0.41 │ 4.17 ± 0.37 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 5.72 ± 0.47 │ 4.46 ± 0.37 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 6.18 ± 0.49 │ 4.66 ± 0.39 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 5.90 ± 0.49 │ 5.40 ± 0.44 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 5.94 ± 0.47 │ 5.95 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 3.08 ± 0.28 │ 4.41 ± 0.37 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 3.76 ± 0.32 │ 7.15 ± 0.56 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 6.78 ± 0.54 │ 5.51 ± 0.45 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 4.80 ± 0.39 │ 7.55 ± 0.61 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 6.66 ± 0.52 │ 5.37 ± 0.44 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 5.38 ± 0.43 │ 8.77 ± 0.69 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 6.71 ± 0.52 │ 8.42 ± 0.66 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 5.38 ± 0.45 │ 6.16 ± 0.51 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 6.20 ± 0.50 │ 7.66 ± 0.63 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 7.71 ± 0.61 │ 11.61 ± 0.93 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 5.06 ± 0.43 │ 6.47 ± 0.53 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 4.66 ± 0.42 │ 10.73 ± 0.84 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 7.49 ± 0.59 │ 12.72 ± 1.00 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 10.23 ± 0.84 │ 21.5 ± 1.6 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 12.8 ± 1.0 │ 15.1 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 9.91 ± 0.78 │ 21.5 ± 1.7 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 10.05 ± 0.79 │ 11.83 ± 0.95 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 9.32 ± 0.74 │ 27.7 ± 2.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 15.9 ± 1.2 │ 23.2 ± 1.8 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 24.6 ± 1.9 │ 39.1 ± 2.9 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:28:44,255 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:28:44,256 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:28:44,257 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:28:44,258 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:28:44,259 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:12<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 83 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:30:19,612 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.28 │ 0.00 ± 0.25 │ -0.000 ± 0.070 │ 0.287 ± 0.022 │ 0.256 ± 0.020 │ 0.0709 ± 0.0054 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 3.92 ± 0.38 │ 6.39 ± 0.50 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 5.01 ± 0.43 │ 9.85 ± 0.75 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 6.02 ± 0.49 │ 4.82 ± 0.41 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 14.4 ± 1.1 │ 5.66 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 7.86 ± 0.63 │ 3.61 ± 0.31 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 5.53 ± 0.43 │ 2.93 ± 0.25 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 6.85 ± 0.55 │ 5.49 ± 0.44 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 7.72 ± 0.61 │ 2.18 ± 0.21 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 10.03 ± 0.76 │ 1.99 ± 0.19 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 5.47 ± 0.44 │ 3.02 ± 0.25 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 3.54 ± 0.30 │ 6.09 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 4.13 ± 0.34 │ 3.43 ± 0.28 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 4.08 ± 0.34 │ 4.61 ± 0.38 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 4.23 ± 0.34 │ 4.93 ± 0.39 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 3.22 ± 0.27 │ 5.77 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 3.68 ± 0.30 │ 6.53 ± 0.52 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 4.40 ± 0.36 │ 6.57 ± 0.52 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 4.99 ± 0.39 │ 6.79 ± 0.53 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 3.43 ± 0.30 │ 5.68 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 6.10 ± 0.47 │ 7.53 ± 0.60 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 4.91 ± 0.40 │ 8.97 ± 0.69 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 5.45 ± 0.45 │ 11.10 ± 0.87 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 6.74 ± 0.56 │ 9.76 ± 0.77 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 12.57 ± 0.97 │ 6.78 ± 0.56 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 5.65 ± 0.48 │ 12.15 ± 0.93 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 8.14 ± 0.64 │ 14.1 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 9.20 ± 0.74 │ 13.0 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 13.2 ± 1.0 │ 13.6 ± 1.1 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 14.6 ± 1.1 │ 26.9 ± 2.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 19.4 ± 1.5 │ 59.4 ± 4.5 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:30:19,635 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:30:19,636 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:30:19,639 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:30:19,640 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:30:19,643 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:11<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 80 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:31:52,349 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.27 │ 0.00 ± 0.27 │ -0.000 ± 0.068 │ 0.270 ± 0.021 │ 0.272 ± 0.021 │ 0.0687 ± 0.0054 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 3.33 ± 0.33 │ 4.17 ± 0.36 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 4.48 ± 0.37 │ 4.63 ± 0.37 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 4.78 ± 0.39 │ 7.46 ± 0.60 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 6.06 ± 0.49 │ 6.49 ± 0.52 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 6.26 ± 0.48 │ 3.75 ± 0.31 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 4.15 ± 0.34 │ 2.19 ± 0.22 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 5.05 ± 0.40 │ 2.88 ± 0.25 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 3.45 ± 0.29 │ 2.94 ± 0.25 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 5.09 ± 0.40 │ 4.48 ± 0.35 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 6.23 ± 0.50 │ 4.35 ± 0.36 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 2.53 ± 0.23 │ 5.59 ± 0.43 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 2.43 ± 0.22 │ 6.86 ± 0.55 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 3.69 ± 0.30 │ 5.46 ± 0.43 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 3.68 ± 0.31 │ 2.70 ± 0.24 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 5.96 ± 0.47 │ 3.00 ± 0.27 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 5.46 ± 0.43 │ 5.17 ± 0.41 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 6.62 ± 0.52 │ 4.24 ± 0.34 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 5.32 ± 0.43 │ 5.54 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 4.28 ± 0.36 │ 8.53 ± 0.66 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 5.97 ± 0.48 │ 7.53 ± 0.58 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 7.17 ± 0.56 │ 8.87 ± 0.69 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 5.17 ± 0.42 │ 12.02 ± 0.92 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 6.53 ± 0.53 │ 9.95 ± 0.77 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 6.01 ± 0.50 │ 10.43 ± 0.81 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 7.28 ± 0.59 │ 11.55 ± 0.91 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 11.97 ± 0.95 │ 9.76 ± 0.78 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 11.42 ± 0.90 │ 9.85 ± 0.78 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 13.0 ± 1.0 │ 16.1 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 15.5 ± 1.2 │ 16.2 ± 1.3 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 28.1 ± 2.1 │ 45.6 ± 3.5 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:31:52,363 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:31:52,364 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:31:52,366 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:31:52,366 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:31:52,368 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:20<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 91 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:33:36,028 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.26 │ 0.00 ± 0.27 │ -0.000 ± 0.065 │ 0.267 ± 0.020 │ 0.277 ± 0.021 │ 0.0655 ± 0.0048 │\n", + "└──────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 2.91 ± 0.29 │ 4.62 ± 0.38 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 2.96 ± 0.28 │ 5.65 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 6.34 ± 0.49 │ 5.15 ± 0.43 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 6.48 ± 0.51 │ 7.13 ± 0.56 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 5.02 ± 0.41 │ 2.92 ± 0.26 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 6.16 ± 0.48 │ 3.43 ± 0.28 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 2.50 ± 0.24 │ 5.28 ± 0.42 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 5.92 ± 0.46 │ 3.40 ± 0.28 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 3.90 ± 0.32 │ 2.65 ± 0.23 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 3.83 ± 0.31 │ 3.11 ± 0.26 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 3.20 ± 0.27 │ 2.94 ± 0.26 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 3.57 ± 0.29 │ 4.68 ± 0.36 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 3.79 ± 0.31 │ 3.85 ± 0.31 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 2.20 ± 0.21 │ 4.81 ± 0.38 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 4.83 ± 0.38 │ 3.43 ± 0.29 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 5.23 ± 0.43 │ 4.71 ± 0.38 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 6.10 ± 0.49 │ 6.24 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 5.14 ± 0.41 │ 3.44 ± 0.30 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 4.56 ± 0.36 │ 6.61 ± 0.51 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 4.49 ± 0.37 │ 8.78 ± 0.70 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 6.63 ± 0.54 │ 5.14 ± 0.43 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 7.12 ± 0.57 │ 9.77 ± 0.76 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 4.64 ± 0.39 │ 9.62 ± 0.74 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 9.07 ± 0.71 │ 8.81 ± 0.69 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 7.14 ± 0.57 │ 12.52 ± 0.95 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 5.19 ± 0.42 │ 11.66 ± 0.90 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 6.18 ± 0.50 │ 9.75 ± 0.77 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 12.23 ± 0.95 │ 5.69 ± 0.48 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 11.26 ± 0.86 │ 17.5 ± 1.4 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 27.1 ± 2.1 │ 21.1 ± 1.6 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:33:36,043 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:33:36,044 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:33:36,045 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:33:36,046 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:33:36,048 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:24<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 94 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:35:22,537 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.26 │ -0.00 ± 0.26 │ 0.000 ± 0.064 │ 0.264 ± 0.020 │ 0.264 ± 0.020 │ 0.0646 ± 0.0049 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 2.34 ± 0.26 │ 3.96 ± 0.33 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 3.93 ± 0.33 │ 5.10 ± 0.40 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 5.71 ± 0.44 │ 3.59 ± 0.29 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 5.92 ± 0.48 │ 3.35 ± 0.28 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 5.09 ± 0.42 │ 2.78 ± 0.23 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 4.38 ± 0.34 │ 1.98 ± 0.18 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 4.76 ± 0.38 │ 2.15 ± 0.18 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 7.11 ± 0.56 │ 3.37 ± 0.27 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 4.94 ± 0.39 │ 1.93 ± 0.16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 5.32 ± 0.40 │ 3.07 ± 0.25 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 4.31 ± 0.34 │ 1.40 ± 0.14 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 1.58 ± 0.16 │ 2.65 ± 0.21 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 2.50 ± 0.22 │ 4.22 ± 0.33 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 5.13 ± 0.38 │ 3.86 ± 0.29 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 3.64 ± 0.29 │ 4.10 ± 0.33 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 2.50 ± 0.21 │ 4.15 ± 0.32 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 4.44 ± 0.35 │ 2.61 ± 0.22 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 4.27 ± 0.34 │ 4.97 ± 0.39 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 5.65 ± 0.44 │ 4.70 ± 0.37 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 6.37 ± 0.50 │ 5.21 ± 0.41 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 4.85 ± 0.39 │ 4.99 ± 0.40 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 6.40 ± 0.49 │ 6.83 ± 0.55 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 4.09 ± 0.34 │ 6.84 ± 0.55 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 3.97 ± 0.34 │ 9.09 ± 0.74 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 5.22 ± 0.43 │ 11.74 ± 0.91 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 9.62 ± 0.75 │ 10.42 ± 0.80 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 7.19 ± 0.57 │ 10.29 ± 0.80 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 11.35 ± 0.88 │ 11.72 ± 0.90 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 11.26 ± 0.88 │ 12.30 ± 0.94 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 16.5 ± 1.3 │ 35.0 ± 2.7 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:35:22,554 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:35:22,556 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:35:22,557 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:35:22,558 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:35:22,560 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:26<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 96 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:37:10,134 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.26 │ -0.00 ± 0.26 │ 0.000 ± 0.067 │ 0.267 ± 0.020 │ 0.263 ± 0.020 │ 0.0679 ± 0.0050 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪═════════════╡\n", + "│ 0 │ 1.93 ± 0.22 │ 4.11 ± 0.33 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 1 │ 1.93 ± 0.21 │ 4.09 ± 0.34 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 2 │ 3.32 ± 0.26 │ 3.58 ± 0.27 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 3 │ 4.63 ± 0.36 │ 4.97 ± 0.39 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 4 │ 3.59 ± 0.29 │ 1.15 ± 0.14 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 5 │ 3.38 ± 0.27 │ 2.52 ± 0.21 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 6 │ 3.17 ± 0.27 │ 2.17 ± 0.18 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 7 │ 6.15 ± 0.48 │ 1.53 ± 0.14 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 8 │ 3.22 ± 0.26 │ 2.31 ± 0.19 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 9 │ 3.39 ± 0.27 │ 2.20 ± 0.18 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 10 │ 1.71 ± 0.16 │ 1.60 ± 0.14 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 11 │ 2.30 ± 0.19 │ 1.47 ± 0.13 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 12 │ 3.06 ± 0.24 │ 2.94 ± 0.23 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 13 │ 3.54 ± 0.28 │ 2.93 ± 0.23 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 14 │ 2.92 ± 0.23 │ 3.47 ± 0.27 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 15 │ 4.48 ± 0.35 │ 2.69 ± 0.22 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 16 │ 2.54 ± 0.21 │ 5.13 ± 0.40 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 17 │ 4.21 ± 0.35 │ 4.63 ± 0.36 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 18 │ 4.35 ± 0.33 │ 5.47 ± 0.41 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 19 │ 3.53 ± 0.30 │ 2.54 ± 0.22 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 20 │ 4.15 ± 0.33 │ 7.84 ± 0.58 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 21 │ 5.25 ± 0.41 │ 6.89 ± 0.54 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 22 │ 6.55 ± 0.50 │ 5.28 ± 0.40 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 23 │ 5.71 ± 0.46 │ 6.38 ± 0.50 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 24 │ 4.41 ± 0.38 │ 5.69 ± 0.45 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 25 │ 5.09 ± 0.41 │ 7.27 ± 0.56 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 26 │ 8.14 ± 0.63 │ 9.80 ± 0.75 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 27 │ 7.64 ± 0.60 │ 6.46 ± 0.52 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 28 │ 12.73 ± 1.00 │ 17.3 ± 1.3 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 29 │ 19.5 ± 1.5 │ 19.1 ± 1.4 │\n", + "└────┴──────────────┴─────────────┘\n" + ] + } + ], + "source": [ + "nld_errors = []\n", + "gsf_errors = []\n", + "for extractor in extractors:\n", + " nld_err, gsf_err = error_estimator.evaluate(extractor.nld, extractor.gsf)\n", + " nld_errors.append(nld_err.copy())\n", + " gsf_errors.append(gsf_err.copy())" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "490ba702-a5a8-4717-a49b-2b4cf1bff5df", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8ff5556759514faa9bb3dc58ac2e0c92", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "955289786d5d41409c72be764dbca56b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "84b7609888cf45308cb7d6f841292873", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def make_vec(nlds, counts, point):\n", + " return om.Vector(E=np.array(counts)/1.0e6, values=np.array([nld.values[nld.index(point)] for nld in nlds]),\n", + " std=np.array([nld.std[nld.index(point)] for nld in nlds]))\n", + "\n", + "extractors[-1].plot()\n", + "\n", + "err = nld_errors[0].copy()\n", + "err.values *= 100.\n", + "fig, ax = err.plot(label=\"N=25000\")\n", + "for i in range(1, len(counts), 4):\n", + " err = nld_errors[i].copy()\n", + " err.values *= 100.\n", + " err.plot(ax=ax, label=f\"N={counts[i]}\")\n", + "\n", + "err = nld_errors[-1].copy()\n", + "err.values *= 100.\n", + "err.plot(ax=ax, label=f\"N={counts[-1]}\")\n", + " \n", + "ax.set_ylim(0, 100)\n", + "ax.set_ylabel(\"Relative error [%]\")\n", + "ax.legend(loc='best')\n", + "\n", + "nld_0MeV = make_vec(nld_errors, counts, 0)\n", + "nld_15MeV = make_vec(nld_errors, counts, 1.5)\n", + "nld_3MeV = make_vec(nld_errors, counts, 3)\n", + "nld_5MeV = make_vec(nld_errors, counts, 5)\n", + "\n", + "nld_0MeV = make_vec(nld_errors, counts, 0)\n", + "nld_0MeV.values *= 100.\n", + "nld_0MeV.std *= 100.\n", + "# nld_0MeV.save(\"nld_0MeV.csv\", sep=\"\\t\")\n", + "nld_2MeV = make_vec(nld_errors, counts, 2)\n", + "nld_2MeV.values *= 100.\n", + "nld_2MeV.std *= 100.\n", + "# nld_2MeV.save(\"nld_2MeV.csv\", sep=\"\\t\")\n", + "nld_4MeV = make_vec(nld_errors, counts, 4)\n", + "nld_4MeV.values *= 100.\n", + "nld_4MeV.std *= 100.\n", + "# nld_4MeV.save(\"nld_4MeV.csv\", sep=\"\\t\")\n", + "\n", + "nld_1MeV = make_vec(nld_errors, counts, 0)\n", + "_, ax = nld_0MeV.plot(label=r\"$E_x = 0$ MeV\")\n", + "nld_2MeV.plot(ax=ax, label=r\"$E_x = 2$ MeV\")\n", + "nld_4MeV.plot(ax=ax, label=r\"$E_x = 4$ MeV\")\n", + "ax.set_xlabel(\"Counts [$10^6$]\")\n", + "c = np.linspace(min(counts), max(counts), 1001)\n", + "ax.plot(c/1e6, (np.sqrt(counts[0])*20.)*1./np.sqrt(c))\n", + "ax.legend(loc='best')\n", + "ax.set_ylabel(\"Relative uncertanty [%]\");" + ] + }, + { + "cell_type": "markdown", + "id": "71efe2f7-63a5-4c3e-a0c5-723e9b0859c2", + "metadata": {}, + "source": [ + "### Investigate dependence on ensemble members" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "a48c4fb1-2d00-4073-badf-eb0dc295dae8", + "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + }, + "tags": [] + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "bbc78f1db92f418fb5b0904c0e5e3de6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/3 [00:00\n", + " \n", + " \n", + " 100.00% [8000/8000 01:10<00:00 Sampling 4 chains, 500 divergences]\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 82 seconds.\n", + "There were 310 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "The acceptance probability does not match the target. It is 0.5323431089331814, but should be close to 0.8. Try to increase the number of tuning steps.\n", + "There were 29 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "There were 117 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "The acceptance probability does not match the target. It is 0.6415360461552452, but should be close to 0.8. Try to increase the number of tuning steps.\n", + "There were 44 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "The rhat statistic is larger than 1.05 for some parameters. This indicates slight problems during sampling.\n", + "The estimated number of effective samples is smaller than 200 for some parameters.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:44:36,679 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬─────────────┬─────────────┬───────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═════════════╪═════════════╪═══════════════╡\n", + "│ 0.00 ± 0.31 │ 0.00 ± 0.32 │ -0.000 ± 0.069 │ 0.39 ± 0.15 │ 0.41 ± 0.16 │ 0.088 ± 0.037 │\n", + "└─────────────┴─────────────┴────────────────┴─────────────┴─────────────┴───────────────┘\n", + "┌────┬───────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪═══════════════╪═════════════╡\n", + "│ 0 │ 2.32 ± 0.94 │ 3.2 ± 1.3 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 1 │ 0.046 ± 0.039 │ 1.38 ± 0.58 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 2 │ 0.049 ± 0.044 │ 2.5 ± 1.1 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 3 │ 3.3 ± 1.2 │ 3.3 ± 1.3 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 4 │ 2.8 ± 1.0 │ 0.85 ± 0.49 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 5 │ 3.9 ± 1.6 │ 2.9 ± 1.5 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 6 │ 4.4 ± 1.7 │ 0.25 ± 0.32 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 7 │ 3.5 ± 1.4 │ 1.31 ± 0.55 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 8 │ 4.5 ± 1.8 │ 2.03 ± 0.91 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 9 │ 3.8 ± 1.5 │ 1.16 ± 0.46 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 10 │ 1.24 ± 0.53 │ 2.21 ± 0.95 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 11 │ 2.05 ± 0.82 │ 2.5 ± 1.2 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 12 │ 2.39 ± 1.00 │ 6.9 ± 2.7 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 13 │ 3.9 ± 1.5 │ 0.10 ± 0.11 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 14 │ 1.54 ± 0.65 │ 2.20 ± 0.84 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 15 │ 4.2 ± 1.6 │ 1.81 ± 0.74 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 16 │ 6.4 ± 3.2 │ 4.3 ± 2.5 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 17 │ 6.6 ± 2.7 │ 1.24 ± 0.80 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 18 │ 3.2 ± 1.3 │ 8.3 ± 3.6 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 19 │ 1.29 ± 0.73 │ 7.5 ± 3.1 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 20 │ 1.30 ± 0.72 │ 0.27 ± 0.19 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 21 │ 1.47 ± 0.82 │ 7.8 ± 2.9 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 22 │ 11.4 ± 4.5 │ 0.35 ± 0.31 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 23 │ 4.7 ± 2.1 │ 5.6 ± 2.2 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 24 │ 3.5 ± 1.5 │ 2.5 ± 1.0 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 25 │ 3.5 ± 1.6 │ 2.8 ± 1.3 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 26 │ 10.3 ± 4.1 │ 7.3 ± 3.2 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 27 │ 4.3 ± 1.8 │ 5.6 ± 2.6 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 28 │ 12.2 ± 4.7 │ 34 ± 16 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 29 │ 16.5 ± 6.8 │ 31 ± 11 │\n", + "└────┴───────────────┴─────────────┘\n", + "2021-08-12 10:44:36,687 - ompy.error_finder - DEBUG - Processing an ensemble with 4 members\n", + "2021-08-12 10:44:36,688 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:44:36,689 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:44:36,690 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:44:36,690 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:01<00:00 Sampling 4 chains, 19 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 71 seconds.\n", + "There were 3 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "There were 3 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "There were 13 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "The rhat statistic is larger than 1.05 for some parameters. This indicates slight problems during sampling.\n", + "The estimated number of effective samples is smaller than 200 for some parameters.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:45:57,231 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬─────────────┬────────────────┬───────────────┬─────────────┬───────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪═════════════╪════════════════╪═══════════════╪═════════════╪═══════════════╡\n", + "│ -0.00 ± 0.31 │ 0.00 ± 0.43 │ -0.000 ± 0.071 │ 0.346 ± 0.082 │ 0.48 ± 0.12 │ 0.080 ± 0.019 │\n", + "└──────────────┴─────────────┴────────────────┴───────────────┴─────────────┴───────────────┘\n", + "┌────┬─────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪═════════════╪═════════════╡\n", + "│ 0 │ 0.51 ± 0.32 │ 1.96 ± 0.49 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 1 │ 0.62 ± 0.31 │ 0.22 ± 0.15 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 2 │ 1.21 ± 0.31 │ 3.18 ± 0.76 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 3 │ 3.35 ± 0.85 │ 2.98 ± 0.73 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 4 │ 2.11 ± 0.51 │ 0.28 ± 0.15 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 5 │ 2.79 ± 0.66 │ 2.94 ± 0.69 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 6 │ 2.23 ± 0.57 │ 1.65 ± 0.41 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 7 │ 3.87 ± 0.92 │ 1.49 ± 0.37 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 8 │ 4.07 ± 0.98 │ 2.19 ± 0.54 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 9 │ 2.14 ± 0.51 │ 1.48 ± 0.38 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 10 │ 0.78 ± 0.23 │ 1.48 ± 0.38 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 11 │ 2.45 ± 0.58 │ 0.52 ± 0.25 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 12 │ 3.08 ± 0.73 │ 5.7 ± 1.3 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 13 │ 1.33 ± 0.34 │ 1.77 ± 0.46 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 14 │ 0.26 ± 0.23 │ 5.0 ± 1.2 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 15 │ 4.3 ± 1.1 │ 3.24 ± 0.83 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 16 │ 4.9 ± 1.2 │ 4.6 ± 1.1 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 17 │ 4.5 ± 1.1 │ 3.19 ± 0.77 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 18 │ 6.8 ± 1.7 │ 8.4 ± 2.0 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 19 │ 3.62 ± 0.88 │ 2.85 ± 0.69 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 20 │ 3.19 ± 0.81 │ 6.1 ± 1.5 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 21 │ 3.69 ± 0.89 │ 6.9 ± 1.7 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 22 │ 7.8 ± 1.8 │ 1.65 ± 0.54 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 23 │ 4.5 ± 1.1 │ 9.4 ± 2.3 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 24 │ 2.90 ± 0.75 │ 9.2 ± 2.3 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 25 │ 6.6 ± 1.6 │ 5.2 ± 1.3 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 26 │ 7.8 ± 1.8 │ 6.0 ± 1.5 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 27 │ 5.2 ± 1.3 │ 7.3 ± 1.8 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 28 │ 10.7 ± 2.7 │ 26.9 ± 6.3 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 29 │ 25.5 ± 6.1 │ 24.6 ± 5.9 │\n", + "└────┴─────────────┴─────────────┘\n", + "2021-08-12 10:45:57,237 - ompy.error_finder - DEBUG - Processing an ensemble with 5 members\n", + "2021-08-12 10:45:57,238 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:45:57,239 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:45:57,240 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:45:57,241 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:29<00:00 Sampling 4 chains, 57 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 98 seconds.\n", + "There was 1 divergence after tuning. Increase `target_accept` or reparameterize.\n", + "There were 8 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "The acceptance probability does not match the target. It is 0.6820218655545547, but should be close to 0.8. Try to increase the number of tuning steps.\n", + "There were 48 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "The acceptance probability does not match the target. It is 0.5621465385353126, but should be close to 0.8. Try to increase the number of tuning steps.\n", + "The acceptance probability does not match the target. It is 0.9501354874730317, but should be close to 0.8. Try to increase the number of tuning steps.\n", + "The rhat statistic is larger than 1.05 for some parameters. This indicates slight problems during sampling.\n", + "The estimated number of effective samples is smaller than 200 for some parameters.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:47:45,945 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬───────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═══════════════╡\n", + "│ 0.00 ± 0.28 │ 0.00 ± 0.34 │ -0.000 ± 0.064 │ 0.309 ± 0.058 │ 0.368 ± 0.063 │ 0.068 ± 0.012 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴───────────────┘\n", + "┌────┬───────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪═══════════════╪═════════════╡\n", + "│ 0 │ 2.22 ± 0.57 │ 1.41 ± 0.55 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 1 │ 0.41 ± 0.51 │ 3.44 ± 0.71 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 2 │ 2.12 ± 0.49 │ 4.29 ± 0.82 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 3 │ 2.80 ± 0.58 │ 3.72 ± 0.66 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 4 │ 2.74 ± 0.55 │ 1.67 ± 0.46 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 5 │ 2.85 ± 0.55 │ 1.91 ± 0.35 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 6 │ 3.10 ± 0.54 │ 2.01 ± 0.40 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 7 │ 4.17 ± 0.72 │ 0.96 ± 0.32 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 8 │ 3.54 ± 0.62 │ 1.21 ± 0.24 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 9 │ 2.09 ± 0.37 │ 3.27 ± 0.65 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 10 │ 2.74 ± 0.47 │ 1.02 ± 0.23 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 11 │ 2.08 ± 0.35 │ 0.89 ± 0.24 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 12 │ 2.15 ± 0.37 │ 5.19 ± 0.90 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 13 │ 2.73 ± 0.47 │ 2.41 ± 0.42 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 14 │ 0.128 ± 0.079 │ 3.60 ± 0.64 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 15 │ 3.33 ± 0.56 │ 2.12 ± 0.43 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 16 │ 3.74 ± 0.66 │ 4.41 ± 0.81 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 17 │ 4.43 ± 0.77 │ 3.90 ± 0.72 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 18 │ 5.13 ± 0.94 │ 6.1 ± 1.1 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 19 │ 2.85 ± 0.49 │ 2.83 ± 0.57 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 20 │ 2.94 ± 0.50 │ 6.0 ± 1.0 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 21 │ 5.77 ± 0.98 │ 6.0 ± 1.1 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 22 │ 7.5 ± 1.3 │ 1.78 ± 0.47 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 23 │ 3.26 ± 0.56 │ 8.1 ± 1.4 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 24 │ 2.88 ± 0.53 │ 7.6 ± 1.3 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 25 │ 6.2 ± 1.1 │ 5.22 ± 0.88 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 26 │ 9.2 ± 1.6 │ 7.0 ± 1.3 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 27 │ 2.37 ± 0.43 │ 6.8 ± 1.2 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 28 │ 9.5 ± 1.7 │ 24.6 ± 4.2 │\n", + "├────┼───────────────┼─────────────┤\n", + "│ 29 │ 27.3 ± 4.6 │ 27.3 ± 4.9 │\n", + "└────┴───────────────┴─────────────┘\n", + "2021-08-12 10:47:45,954 - ompy.error_finder - DEBUG - Processing an ensemble with 6 members\n", + "2021-08-12 10:47:45,956 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:47:45,957 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:47:45,957 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:47:45,959 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:08<00:00 Sampling 4 chains, 949 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 78 seconds.\n", + "There was 1 divergence after tuning. Increase `target_accept` or reparameterize.\n", + "There were 944 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "The acceptance probability does not match the target. It is 2.8572558743334045e-23, but should be close to 0.8. Try to increase the number of tuning steps.\n", + "There were 3 divergences after tuning. Increase `target_accept` or reparameterize.\n", + "There was 1 divergence after tuning. Increase `target_accept` or reparameterize.\n", + "The rhat statistic is larger than 1.4 for some parameters. The sampler did not converge.\n", + "The estimated number of effective samples is smaller than 200 for some parameters.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:49:15,688 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬───────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═══════════════╡\n", + "│ 0.00 ± 0.27 │ 0.00 ± 0.31 │ -0.000 ± 0.081 │ 0.273 ± 0.037 │ 0.316 ± 0.045 │ 0.087 ± 0.011 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴───────────────┘\n", + "┌────┬─────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪═════════════╪═════════════╡\n", + "│ 0 │ 2.86 ± 0.50 │ 1.47 ± 0.20 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 1 │ 0.26 ± 0.17 │ 3.49 ± 0.50 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 2 │ 1.55 ± 0.19 │ 3.99 ± 0.51 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 3 │ 3.58 ± 0.43 │ 3.63 ± 0.43 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 4 │ 2.85 ± 0.38 │ 1.86 ± 0.38 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 5 │ 2.97 ± 0.38 │ 1.92 ± 0.26 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 6 │ 3.97 ± 0.55 │ 2.70 ± 0.35 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 7 │ 7.31 ± 0.90 │ 1.02 ± 0.28 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 8 │ 3.50 ± 0.43 │ 2.14 ± 0.28 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 9 │ 3.21 ± 0.39 │ 2.73 ± 0.39 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 10 │ 2.68 ± 0.35 │ 1.81 ± 0.24 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 11 │ 2.38 ± 0.28 │ 0.83 ± 0.22 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 12 │ 1.83 ± 0.23 │ 3.85 ± 0.46 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 13 │ 3.64 ± 0.44 │ 2.70 ± 0.34 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 14 │ 0.15 ± 0.13 │ 3.51 ± 0.60 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 15 │ 4.97 ± 0.55 │ 3.19 ± 0.53 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 16 │ 3.25 ± 0.52 │ 4.47 ± 0.63 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 17 │ 4.39 ± 0.67 │ 5.29 ± 0.74 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 18 │ 5.15 ± 0.68 │ 6.11 ± 0.73 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 19 │ 4.01 ± 0.50 │ 3.28 ± 0.49 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 20 │ 3.50 ± 0.41 │ 8.11 ± 1.00 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 21 │ 5.02 ± 0.62 │ 5.32 ± 0.62 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 22 │ 8.3 ± 1.1 │ 3.60 ± 0.51 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 23 │ 5.03 ± 0.57 │ 6.85 ± 0.91 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 24 │ 4.46 ± 0.54 │ 5.80 ± 0.73 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 25 │ 5.80 ± 0.70 │ 5.00 ± 0.61 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 26 │ 10.2 ± 1.3 │ 6.1 ± 1.0 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 27 │ 7.81 ± 0.96 │ 6.4 ± 1.0 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 28 │ 9.7 ± 1.2 │ 24.0 ± 2.9 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 29 │ 25.3 ± 3.4 │ 22.9 ± 2.9 │\n", + "└────┴─────────────┴─────────────┘\n", + "2021-08-12 10:49:15,696 - ompy.error_finder - DEBUG - Processing an ensemble with 7 members\n", + "2021-08-12 10:49:15,697 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:49:15,698 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:49:15,699 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:49:15,700 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 00:43<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 54 seconds.\n", + "The number of effective samples is smaller than 25% for some parameters.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:50:20,251 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.25 │ -0.00 ± 0.29 │ 0.000 ± 0.077 │ 0.260 ± 0.031 │ 0.297 ± 0.033 │ 0.0791 ± 0.0088 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬─────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪═════════════╪═════════════╡\n", + "│ 0 │ 1.31 ± 0.30 │ 3.14 ± 0.39 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 1 │ 2.23 ± 0.32 │ 3.56 ± 0.42 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 2 │ 3.26 ± 0.39 │ 2.96 ± 0.36 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 3 │ 5.00 ± 0.59 │ 5.07 ± 0.58 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 4 │ 2.79 ± 0.34 │ 0.70 ± 0.20 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 5 │ 3.25 ± 0.41 │ 1.99 ± 0.23 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 6 │ 3.31 ± 0.40 │ 1.83 ± 0.22 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 7 │ 6.36 ± 0.75 │ 1.68 ± 0.24 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 8 │ 4.11 ± 0.48 │ 2.46 ± 0.30 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 9 │ 3.18 ± 0.38 │ 1.71 ± 0.23 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 10 │ 2.07 ± 0.27 │ 1.82 ± 0.23 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 11 │ 2.40 ± 0.29 │ 1.53 ± 0.22 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 12 │ 2.60 ± 0.31 │ 3.57 ± 0.42 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 13 │ 3.72 ± 0.43 │ 3.44 ± 0.40 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 14 │ 1.51 ± 0.21 │ 3.60 ± 0.44 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 15 │ 4.27 ± 0.51 │ 3.19 ± 0.40 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 16 │ 2.78 ± 0.33 │ 5.57 ± 0.65 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 17 │ 4.15 ± 0.49 │ 5.26 ± 0.62 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 18 │ 3.74 ± 0.44 │ 5.50 ± 0.62 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 19 │ 4.04 ± 0.49 │ 3.14 ± 0.40 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 20 │ 3.20 ± 0.41 │ 6.71 ± 0.76 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 21 │ 5.92 ± 0.70 │ 6.44 ± 0.75 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 22 │ 6.83 ± 0.80 │ 4.82 ± 0.57 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 23 │ 3.13 ± 0.41 │ 7.13 ± 0.84 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 24 │ 3.49 ± 0.47 │ 6.02 ± 0.71 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 25 │ 6.08 ± 0.72 │ 6.51 ± 0.78 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 26 │ 10.4 ± 1.2 │ 10.6 ± 1.2 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 27 │ 4.78 ± 0.60 │ 7.69 ± 0.92 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 28 │ 8.12 ± 0.98 │ 20.8 ± 2.4 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 29 │ 23.2 ± 2.6 │ 24.1 ± 2.7 │\n", + "└────┴─────────────┴─────────────┘\n", + "2021-08-12 10:50:20,264 - ompy.error_finder - DEBUG - Processing an ensemble with 8 members\n", + "2021-08-12 10:50:20,265 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:50:20,266 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:50:20,266 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:50:20,267 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 00:54<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 64 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:51:35,371 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.23 │ 0.00 ± 0.28 │ -0.000 ± 0.073 │ 0.238 ± 0.024 │ 0.287 ± 0.028 │ 0.0747 ± 0.0072 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬─────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪═════════════╪═════════════╡\n", + "│ 0 │ 2.19 ± 0.29 │ 2.51 ± 0.27 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 1 │ 2.23 ± 0.28 │ 4.92 ± 0.48 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 2 │ 3.14 ± 0.33 │ 3.64 ± 0.37 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 3 │ 5.16 ± 0.53 │ 4.67 ± 0.46 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 4 │ 3.37 ± 0.34 │ 1.17 ± 0.17 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 5 │ 3.00 ± 0.31 │ 1.90 ± 0.21 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 6 │ 3.37 ± 0.34 │ 2.22 ± 0.24 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 7 │ 6.16 ± 0.62 │ 1.76 ± 0.20 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 8 │ 3.70 ± 0.37 │ 2.21 ± 0.24 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 9 │ 2.97 ± 0.31 │ 2.01 ± 0.22 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 10 │ 2.14 ± 0.23 │ 1.85 ± 0.20 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 11 │ 2.34 ± 0.24 │ 1.38 ± 0.17 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 12 │ 3.12 ± 0.32 │ 3.30 ± 0.34 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 13 │ 3.47 ± 0.36 │ 3.38 ± 0.34 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 14 │ 1.79 ± 0.20 │ 3.94 ± 0.39 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 15 │ 4.88 ± 0.50 │ 3.26 ± 0.34 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 16 │ 2.60 ± 0.27 │ 5.05 ± 0.50 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 17 │ 4.16 ± 0.42 │ 4.72 ± 0.47 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 18 │ 4.13 ± 0.40 │ 5.84 ± 0.59 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 19 │ 3.89 ± 0.38 │ 2.70 ± 0.30 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 20 │ 3.14 ± 0.33 │ 6.45 ± 0.64 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 21 │ 5.42 ± 0.54 │ 6.28 ± 0.62 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 22 │ 6.46 ± 0.64 │ 5.07 ± 0.53 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 23 │ 3.85 ± 0.41 │ 6.82 ± 0.66 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 24 │ 3.98 ± 0.43 │ 6.91 ± 0.71 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 25 │ 6.04 ± 0.60 │ 7.67 ± 0.77 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 26 │ 8.89 ± 0.90 │ 9.52 ± 0.95 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 27 │ 8.94 ± 0.85 │ 7.29 ± 0.72 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 28 │ 8.15 ± 0.85 │ 19.6 ± 2.0 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 29 │ 21.9 ± 2.1 │ 22.0 ± 2.1 │\n", + "└────┴─────────────┴─────────────┘\n", + "2021-08-12 10:51:35,386 - ompy.error_finder - DEBUG - Processing an ensemble with 9 members\n", + "2021-08-12 10:51:35,387 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:51:35,388 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:51:35,389 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:51:35,390 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:08<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 78 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:53:05,209 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.22 │ -0.00 ± 0.27 │ 0.000 ± 0.069 │ 0.227 ± 0.019 │ 0.278 ± 0.024 │ 0.0698 ± 0.0063 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬─────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪═════════════╪═════════════╡\n", + "│ 0 │ 2.15 ± 0.25 │ 3.32 ± 0.31 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 1 │ 1.91 ± 0.23 │ 4.46 ± 0.39 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 2 │ 3.08 ± 0.29 │ 3.70 ± 0.33 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 3 │ 4.82 ± 0.43 │ 4.33 ± 0.38 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 4 │ 3.33 ± 0.31 │ 1.39 ± 0.17 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 5 │ 3.14 ± 0.29 │ 2.61 ± 0.24 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 6 │ 3.05 ± 0.28 │ 2.41 ± 0.22 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 7 │ 6.01 ± 0.53 │ 1.42 ± 0.15 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 8 │ 3.22 ± 0.29 │ 2.39 ± 0.23 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 9 │ 3.48 ± 0.31 │ 2.32 ± 0.21 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 10 │ 1.80 ± 0.18 │ 1.59 ± 0.16 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 11 │ 2.49 ± 0.23 │ 1.35 ± 0.14 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 12 │ 3.27 ± 0.30 │ 3.13 ± 0.28 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 13 │ 3.60 ± 0.32 │ 2.84 ± 0.26 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 14 │ 2.56 ± 0.24 │ 3.93 ± 0.35 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 15 │ 4.73 ± 0.41 │ 3.05 ± 0.28 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 16 │ 2.66 ± 0.25 │ 5.07 ± 0.45 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 17 │ 4.63 ± 0.40 │ 4.57 ± 0.41 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 18 │ 4.66 ± 0.41 │ 5.73 ± 0.50 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 19 │ 3.82 ± 0.34 │ 2.59 ± 0.25 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 20 │ 4.09 ± 0.38 │ 8.48 ± 0.73 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 21 │ 4.87 ± 0.44 │ 7.38 ± 0.66 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 22 │ 6.41 ± 0.56 │ 5.42 ± 0.48 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 23 │ 5.39 ± 0.47 │ 6.26 ± 0.53 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 24 │ 4.77 ± 0.46 │ 6.78 ± 0.61 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 25 │ 5.50 ± 0.50 │ 7.09 ± 0.63 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 26 │ 8.50 ± 0.74 │ 9.23 ± 0.80 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 27 │ 8.36 ± 0.74 │ 6.47 ± 0.58 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 28 │ 12.9 ± 1.1 │ 18.4 ± 1.6 │\n", + "├────┼─────────────┼─────────────┤\n", + "│ 29 │ 20.6 ± 1.8 │ 20.3 ± 1.8 │\n", + "└────┴─────────────┴─────────────┘\n", + "2021-08-12 10:53:05,230 - ompy.error_finder - DEBUG - Processing an ensemble with 10 members\n", + "2021-08-12 10:53:05,231 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:53:05,232 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:53:05,233 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:53:05,236 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:09<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 80 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:54:36,888 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.26 │ -0.00 ± 0.26 │ 0.000 ± 0.067 │ 0.267 ± 0.020 │ 0.263 ± 0.020 │ 0.0679 ± 0.0050 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬─────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪═════════════╡\n", + "│ 0 │ 1.93 ± 0.22 │ 4.11 ± 0.33 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 1 │ 1.93 ± 0.21 │ 4.09 ± 0.34 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 2 │ 3.32 ± 0.26 │ 3.58 ± 0.27 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 3 │ 4.63 ± 0.36 │ 4.97 ± 0.39 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 4 │ 3.59 ± 0.29 │ 1.15 ± 0.14 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 5 │ 3.38 ± 0.27 │ 2.52 ± 0.21 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 6 │ 3.17 ± 0.27 │ 2.17 ± 0.18 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 7 │ 6.15 ± 0.48 │ 1.53 ± 0.14 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 8 │ 3.22 ± 0.26 │ 2.31 ± 0.19 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 9 │ 3.39 ± 0.27 │ 2.20 ± 0.18 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 10 │ 1.71 ± 0.16 │ 1.60 ± 0.14 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 11 │ 2.30 ± 0.19 │ 1.47 ± 0.13 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 12 │ 3.06 ± 0.24 │ 2.94 ± 0.23 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 13 │ 3.54 ± 0.28 │ 2.93 ± 0.23 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 14 │ 2.92 ± 0.23 │ 3.47 ± 0.27 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 15 │ 4.48 ± 0.35 │ 2.69 ± 0.22 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 16 │ 2.54 ± 0.21 │ 5.13 ± 0.40 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 17 │ 4.21 ± 0.35 │ 4.63 ± 0.36 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 18 │ 4.35 ± 0.33 │ 5.47 ± 0.41 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 19 │ 3.53 ± 0.30 │ 2.54 ± 0.22 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 20 │ 4.15 ± 0.33 │ 7.84 ± 0.58 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 21 │ 5.25 ± 0.41 │ 6.89 ± 0.54 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 22 │ 6.55 ± 0.50 │ 5.28 ± 0.40 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 23 │ 5.71 ± 0.46 │ 6.38 ± 0.50 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 24 │ 4.41 ± 0.38 │ 5.69 ± 0.45 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 25 │ 5.09 ± 0.41 │ 7.27 ± 0.56 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 26 │ 8.14 ± 0.63 │ 9.80 ± 0.75 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 27 │ 7.64 ± 0.60 │ 6.46 ± 0.52 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 28 │ 12.73 ± 1.00 │ 17.3 ± 1.3 │\n", + "├────┼──────────────┼─────────────┤\n", + "│ 29 │ 19.5 ± 1.5 │ 19.1 ± 1.4 │\n", + "└────┴──────────────┴─────────────┘\n", + "2021-08-12 10:54:36,906 - ompy.error_finder - DEBUG - Processing an ensemble with 11 members\n", + "2021-08-12 10:54:36,907 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:54:36,908 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:54:36,909 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:54:36,911 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:16<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 87 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:56:14,609 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.24 │ 0.00 ± 0.27 │ -0.000 ± 0.066 │ 0.245 ± 0.017 │ 0.270 ± 0.018 │ 0.0665 ± 0.0047 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 2.01 ± 0.20 │ 4.70 ± 0.33 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 1.64 ± 0.19 │ 4.03 ± 0.29 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 3.42 ± 0.25 │ 3.26 ± 0.24 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 4.48 ± 0.31 │ 5.77 ± 0.40 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 3.33 ± 0.24 │ 1.33 ± 0.12 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 3.08 ± 0.22 │ 2.44 ± 0.18 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 3.03 ± 0.22 │ 2.92 ± 0.21 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 6.22 ± 0.43 │ 1.30 ± 0.11 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 3.22 ± 0.23 │ 2.67 ± 0.19 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 3.41 ± 0.25 │ 2.15 ± 0.15 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 1.72 ± 0.15 │ 1.43 ± 0.12 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 3.37 ± 0.24 │ 2.18 ± 0.16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 3.01 ± 0.22 │ 3.06 ± 0.22 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 3.75 ± 0.27 │ 2.65 ± 0.19 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 2.82 ± 0.21 │ 3.67 ± 0.26 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 4.66 ± 0.33 │ 2.53 ± 0.18 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 2.45 ± 0.19 │ 4.70 ± 0.33 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 4.45 ± 0.31 │ 4.26 ± 0.30 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 4.44 ± 0.32 │ 5.23 ± 0.37 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 3.76 ± 0.28 │ 2.48 ± 0.19 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 3.91 ± 0.29 │ 7.96 ± 0.57 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 5.03 ± 0.36 │ 6.64 ± 0.47 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 6.22 ± 0.44 │ 5.38 ± 0.39 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 5.38 ± 0.39 │ 6.27 ± 0.45 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 4.73 ± 0.36 │ 5.66 ± 0.40 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 4.78 ± 0.35 │ 6.91 ± 0.49 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 7.80 ± 0.56 │ 10.34 ± 0.71 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 7.34 ± 0.53 │ 6.59 ± 0.47 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 12.21 ± 0.83 │ 16.8 ± 1.2 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 21.5 ± 1.5 │ 19.0 ± 1.3 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:56:14,625 - ompy.error_finder - DEBUG - Processing an ensemble with 12 members\n", + "2021-08-12 10:56:14,626 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:56:14,627 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:56:14,628 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:56:14,630 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 01:29<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 99 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 10:58:05,160 - ompy.error_finder - INFO - Inference results:\n", + "┌──────────────┬──────────────┬───────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞══════════════╪══════════════╪═══════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ -0.00 ± 0.24 │ -0.00 ± 0.26 │ 0.000 ± 0.062 │ 0.244 ± 0.016 │ 0.263 ± 0.016 │ 0.0627 ± 0.0038 │\n", + "└──────────────┴──────────────┴───────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 1.65 ± 0.17 │ 4.53 ± 0.30 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 2.11 ± 0.18 │ 3.82 ± 0.25 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 3.94 ± 0.26 │ 4.11 ± 0.26 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 4.39 ± 0.29 │ 5.47 ± 0.35 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 3.22 ± 0.22 │ 1.87 ± 0.14 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 3.59 ± 0.24 │ 2.60 ± 0.17 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 3.34 ± 0.22 │ 2.84 ± 0.18 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 5.92 ± 0.37 │ 1.60 ± 0.12 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 3.19 ± 0.21 │ 2.45 ± 0.16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 3.48 ± 0.22 │ 2.15 ± 0.15 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 1.55 ± 0.13 │ 1.53 ± 0.11 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 3.39 ± 0.22 │ 2.35 ± 0.16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 3.00 ± 0.20 │ 3.18 ± 0.21 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 3.75 ± 0.24 │ 2.56 ± 0.16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 2.57 ± 0.18 │ 3.54 ± 0.23 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 4.47 ± 0.29 │ 2.76 ± 0.18 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 2.76 ± 0.19 │ 4.23 ± 0.27 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 4.33 ± 0.27 │ 4.05 ± 0.26 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 4.69 ± 0.29 │ 4.99 ± 0.32 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 3.83 ± 0.25 │ 2.64 ± 0.18 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 4.04 ± 0.27 │ 7.92 ± 0.51 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 4.78 ± 0.31 │ 6.34 ± 0.41 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 6.11 ± 0.40 │ 5.30 ± 0.34 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 5.33 ± 0.35 │ 5.92 ± 0.38 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 5.37 ± 0.36 │ 6.16 ± 0.41 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 5.02 ± 0.34 │ 7.48 ± 0.46 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 7.38 ± 0.46 │ 10.57 ± 0.69 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 7.28 ± 0.46 │ 6.96 ± 0.45 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 11.52 ± 0.74 │ 16.1 ± 1.0 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 21.5 ± 1.4 │ 20.5 ± 1.3 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 10:58:05,180 - ompy.error_finder - DEBUG - Processing an ensemble with 13 members\n", + "2021-08-12 10:58:05,181 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 10:58:05,183 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 10:58:05,183 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 10:58:05,186 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 02:04<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 134 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 11:00:31,261 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬──────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪══════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.25 │ -0.00 ± 0.26 │ -0.000 ± 0.062 │ 0.250 ± 0.014 │ 0.258 ± 0.015 │ 0.0623 ± 0.0035 │\n", + "└─────────────┴──────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬──────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪══════════════╡\n", + "│ 0 │ 1.56 ± 0.15 │ 4.46 ± 0.26 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 1 │ 2.14 ± 0.16 │ 3.91 ± 0.24 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 2 │ 3.82 ± 0.24 │ 3.97 ± 0.24 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 3 │ 4.52 ± 0.28 │ 5.29 ± 0.31 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 4 │ 5.44 ± 0.33 │ 1.88 ± 0.13 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 5 │ 3.49 ± 0.21 │ 2.48 ± 0.15 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 6 │ 3.51 ± 0.22 │ 2.65 ± 0.16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 7 │ 6.12 ± 0.35 │ 1.49 ± 0.10 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 8 │ 3.40 ± 0.20 │ 2.44 ± 0.15 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 9 │ 3.38 ± 0.21 │ 2.12 ± 0.13 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 10 │ 1.79 ± 0.12 │ 1.58 ± 0.11 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 11 │ 3.32 ± 0.20 │ 2.20 ± 0.14 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 12 │ 3.19 ± 0.19 │ 3.23 ± 0.19 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 13 │ 3.49 ± 0.21 │ 2.48 ± 0.15 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 14 │ 2.46 ± 0.16 │ 3.34 ± 0.21 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 15 │ 4.87 ± 0.28 │ 2.71 ± 0.17 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 16 │ 2.65 ± 0.17 │ 4.38 ± 0.25 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 17 │ 4.20 ± 0.25 │ 4.02 ± 0.24 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 18 │ 4.65 ± 0.28 │ 4.98 ± 0.30 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 19 │ 3.68 ± 0.23 │ 2.55 ± 0.16 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 20 │ 3.89 ± 0.24 │ 7.59 ± 0.44 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 21 │ 4.73 ± 0.29 │ 6.12 ± 0.36 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 22 │ 6.04 ± 0.37 │ 5.16 ± 0.31 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 23 │ 6.70 ± 0.40 │ 6.31 ± 0.37 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 24 │ 5.36 ± 0.34 │ 5.87 ± 0.35 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 25 │ 4.91 ± 0.30 │ 7.11 ± 0.42 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 26 │ 7.15 ± 0.43 │ 10.28 ± 0.58 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 27 │ 7.13 ± 0.42 │ 7.75 ± 0.45 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 28 │ 11.68 ± 0.69 │ 15.85 ± 0.90 │\n", + "├────┼──────────────┼──────────────┤\n", + "│ 29 │ 21.1 ± 1.2 │ 19.9 ± 1.1 │\n", + "└────┴──────────────┴──────────────┘\n", + "2021-08-12 11:00:31,269 - ompy.error_finder - DEBUG - Processing an ensemble with 14 members\n", + "2021-08-12 11:00:31,271 - ompy.error_finder - DEBUG - Before removing nan: 45 NLD values and 31 GSF values\n", + "2021-08-12 11:00:31,273 - ompy.error_finder - WARNING - NLDs and/or γSFs contains nan's. They will be removed\n", + "2021-08-12 11:00:31,274 - ompy.error_finder - DEBUG - After removing nan: 30 NLD values and 30 GSF values\n", + "2021-08-12 11:00:31,278 - ompy.error_finder - INFO - Starting pyMC3 inference - logarithmic model\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Auto-assigning NUTS sampler...\n", + "Initializing NUTS using jitter+adapt_diag...\n", + "Multiprocess sampling (4 chains in 4 jobs)\n", + "NUTS: [σ_f, σ_ρ, α, F, D, σ_α, σ_F, σ_D]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [8000/8000 02:43<00:00 Sampling 4 chains, 0 divergences]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 172 seconds.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-08-12 11:03:35,101 - ompy.error_finder - INFO - Inference results:\n", + "┌─────────────┬─────────────┬────────────────┬───────────────┬───────────────┬─────────────────┐\n", + "│ D │ F │ α │ σ_D │ σ_F │ σ_α │\n", + "╞═════════════╪═════════════╪════════════════╪═══════════════╪═══════════════╪═════════════════╡\n", + "│ 0.00 ± 0.28 │ 0.00 ± 0.27 │ -0.000 ± 0.067 │ 0.277 ± 0.015 │ 0.277 ± 0.014 │ 0.0674 ± 0.0035 │\n", + "└─────────────┴─────────────┴────────────────┴───────────────┴───────────────┴─────────────────┘\n", + "┌────┬──────────────┬───────────────┐\n", + "│ │ σ_ρ [%] │ σ_f [%] │\n", + "╞════╪══════════════╪═══════════════╡\n", + "│ 0 │ 1.57 ± 0.14 │ 4.22 ± 0.23 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 1 │ 2.08 ± 0.15 │ 3.81 ± 0.21 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 2 │ 4.02 ± 0.23 │ 3.80 ± 0.21 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 3 │ 4.37 ± 0.24 │ 5.04 ± 0.27 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 4 │ 5.32 ± 0.29 │ 1.83 ± 0.12 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 5 │ 3.60 ± 0.20 │ 2.43 ± 0.14 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 6 │ 4.06 ± 0.22 │ 2.56 ± 0.15 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 7 │ 5.69 ± 0.31 │ 1.469 ± 0.096 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 8 │ 3.92 ± 0.21 │ 2.43 ± 0.14 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 9 │ 3.67 ± 0.20 │ 2.37 ± 0.14 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 10 │ 1.82 ± 0.11 │ 2.19 ± 0.12 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 11 │ 3.17 ± 0.17 │ 2.16 ± 0.13 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 12 │ 3.24 ± 0.18 │ 3.28 ± 0.18 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 13 │ 3.40 ± 0.19 │ 2.78 ± 0.15 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 14 │ 2.30 ± 0.14 │ 3.21 ± 0.18 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 15 │ 4.86 ± 0.26 │ 2.55 ± 0.14 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 16 │ 3.77 ± 0.21 │ 4.17 ± 0.22 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 17 │ 4.54 ± 0.25 │ 3.87 ± 0.21 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 18 │ 4.51 ± 0.26 │ 4.93 ± 0.27 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 19 │ 4.26 ± 0.24 │ 2.83 ± 0.16 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 20 │ 3.71 ± 0.22 │ 8.13 ± 0.45 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 21 │ 4.55 ± 0.26 │ 5.84 ± 0.31 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 22 │ 5.87 ± 0.32 │ 5.00 ± 0.27 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 23 │ 6.56 ± 0.36 │ 6.63 ± 0.36 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 24 │ 5.31 ± 0.30 │ 5.61 ± 0.32 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 25 │ 4.73 ± 0.27 │ 6.70 ± 0.37 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 26 │ 7.32 ± 0.40 │ 10.69 ± 0.59 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 27 │ 6.94 ± 0.38 │ 7.61 ± 0.41 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 28 │ 11.93 ± 0.65 │ 15.57 ± 0.85 │\n", + "├────┼──────────────┼───────────────┤\n", + "│ 29 │ 20.0 ± 1.1 │ 22.8 ± 1.2 │\n", + "└────┴──────────────┴───────────────┘\n" + ] + } + ], + "source": [ + "nld_ens_errors = []\n", + "gsf_ens_errors = []\n", + "for extractor in extractors_ens:\n", + " nld_err, gsf_err = error_estimator.evaluate(extractor.nld, extractor.gsf)\n", + " nld_ens_errors.append(nld_err.copy())\n", + " gsf_ens_errors.append(gsf_err.copy())" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "995621af-f7b1-4d40-a0a0-b1ec9c0b989a", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "15b6a08502454707898db3bc6a1dd72f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def make_vec(nlds, point, counts=np.array(range(3,15))):\n", + " return om.Vector(E=np.array(counts),\n", + " values=np.array([nld.std[nld.index(point)] for nld in nlds])/np.array([nld.values[nld.index(point)] for nld in nlds]))\n", + "\n", + "nld_0MeV = make_vec(nld_ens_errors, 1)\n", + "nld_0MeV.values *= 100.\n", + "#nld_0MeV.save(\"nld_ens1MeV.csv\", sep=\"\\t\")\n", + "nld_2MeV = make_vec(nld_ens_errors, 3)\n", + "nld_2MeV.values *= 100.\n", + "#nld_2MeV.save(\"nld_ens3MeV.csv\", sep=\"\\t\")\n", + "nld_4MeV = make_vec(nld_ens_errors, 5)\n", + "nld_4MeV.values *= 100.\n", + "#nld_4MeV.save(\"nld_ens5MeV.csv\", sep=\"\\t\")\n", + "\n", + "_, ax = nld_0MeV.plot(label=r\"$E_x = 1$ MeV\", linestyle='')\n", + "nld_2MeV.plot(ax=ax, label=r\"$E_x = 3$ MeV\", linestyle='')\n", + "nld_4MeV.plot(ax=ax, label=r\"$E_x = 5$ MeV\", linestyle='')\n", + "ax.plot(np.linspace(3, 14, 1001), 10.*np.sqrt(8.*7.)/np.sqrt(np.linspace(3, 14, 1001)*(np.linspace(3, 14, 1001)-1)))\n", + "ax.set_xlabel(\"Ensemble members\")\n", + "ax.set_ylabel(\"Relative error [%]\")\n", + "ax.legend(loc='best');" + ] + }, + { + "cell_type": "markdown", + "id": "754fd57a-b53d-48c7-aea6-8601fb0e6e9f", + "metadata": {}, + "source": [ + "### Checking if the uncertanties makes sense\n", + "The estimated uncertanties are for NLDs and $\\gamma$SFs that are taken from a stocastic input. The relative error of the total number of counts ($\\sqrt{N}/N$) found in the first generation matrix should give the lower limit for the estimated uncertanties.\n", + "\n", + "For the NLD we are interested in the total number of counts along the diagonals $P(E_f+E_\\gamma, E_\\gamma)$, $E_\\gamma \\in [E_\\gamma^{min}, E_\\gamma^{max}]$. For the $\\gamma$SF we are interested in the horizonal lines, $P(E_x, E_\\gamma)$, $E_x \\in [E_x^{min}, E_x^{max}]$." + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "9b5af5ed-2211-4c19-8c93-e67da5b20c42", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "cde4c07b036b4aedaafe461c3bc9d9f1", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d07571f4c0484b28815906cde232a50a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "19278812b92b422d92350e509fa4167c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5b224ef22c1649d0857416c72a47ebf6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "40c8203378ce4225882af7d4e307f449", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "9badd184bfa54a538461dc967502e957", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1b80bd79f7bb4295924c160e4be9b7a2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "85264b0048a2418dbfa0f5659126ea90", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8dbf8f1918eb40a7ab2fc0b8a926036a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "acc7f381ef074e369bc59eb36c335501", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c7caa262506c45879a3b1b5cb619dc83", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "7865346f65ab45cda0671bec9e7244ed", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3929409330b043ba8fa0c81660572bde", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "for ensemble, gsf_err in zip(ensembles, gsf_errors):\n", + " \n", + " matrix = ensemble.get_raw(0).copy()\n", + " for n in range(1, len(ensemble.raw_ensemble)):\n", + " matrix.values += ensemble.get_raw(n).values\n", + " matrix.values *= 1./len(ensemble.raw_ensemble)\n", + " mat = matrix.copy()\n", + " mat.rebin('both', E_rebinned, inplace=True)\n", + " trapezoid_cut.act_on(mat)\n", + " \n", + " vals = np.zeros(len(fg_matrix.Eg))\n", + " for i in mat.range_Eg:\n", + " for j in mat.range_Ex:\n", + " vals[i] += mat.values[j,i]\n", + " gsf_counts = om.Vector(E=mat.Eg.copy(), values=vals, units='keV')\n", + " gsf_counts.to_MeV()\n", + " gsf_counts.values = 100./np.sqrt(gsf_counts.values)\n", + " \n", + " fig, ax = gsf_counts.plot()\n", + " er = gsf_err.copy()\n", + " er.values *= 100.\n", + " er.plot(ax=ax)\n", + " ax.set_ylabel(\"Relative error [%]\")" + ] + }, + { + "cell_type": "code", + "execution_count": 76, + "id": "f2204f56-aae6-4766-b5d4-4206881e1683", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.3833305269815916\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b07468db1d5a4abdafe6b02f37907c50", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Now similar but for NLD\n", + "# First we will add together all the first generation matrices in the ensemble and take the average for each bin.\n", + "fg_matrix = ensemble_full.get_firstgen(0)\n", + "for i in range(1, len(ensemble_full.raw_ensemble)):\n", + " fg_matrix.values += ensemble_full.get_firstgen(i).values\n", + "\n", + "fg_matrix.values *= 1./len(ensemble_full.raw_ensemble) # There are 15 ensemble members\n", + "\n", + "# Absolute worst would be the statistics from the raw matrix?\n", + "#fg_matrix = raw_orig.copy()\n", + "fg_matrix.rebin('both', E_rebinned, inplace=True)\n", + "\n", + "\n", + "# Next we cut\n", + "trapezoid_cut.act_on(fg_matrix)\n", + "\n", + "Ef = nld_ens_errors[-1].E*1000.\n", + "Ef0 = Ef[0]\n", + "values = np.zeros(len(Ef))\n", + "for i, ef in enumerate(Ef):\n", + " for j in fg_matrix.range_Eg:\n", + " if ef + fg_matrix.Eg[j] < min(fg_matrix.Ex) or ef + fg_matrix.Eg[j] > max(fg_matrix.Ex):\n", + " continue\n", + " index_Ex = fg_matrix.index_Ex(ef + fg_matrix.Eg[j])\n", + " values[i] += fg_matrix.values[index_Ex, j]\n", + " #print(i, \" \", index_Ex, \" \", j, \" \", fg_matrix.Ex[index_Ex], \" \", fg_matrix.Eg[j], \" \", fg_matrix.Ex[index_Ex]-fg_matrix.Eg[j])\n", + "print(100./np.sqrt(values[-1]))\n", + "nld_counts = om.Vector(E=Ef, values=values, units='keV')\n", + "nld_counts.to_MeV()\n", + "nld_counts.values = 100./np.sqrt(nld_counts.values)\n", + "fig, ax = nld_counts.plot()\n", + "er = nld_ens_errors[-1].copy()\n", + "er.values *= 100.\n", + "er.plot(ax=ax)\n", + "ax.set_ylabel(\"Relative error [%]\");" + ] + }, + { + "cell_type": "code", + "execution_count": 96, + "id": "db6d7c7b-b2e2-463f-a343-cc7e71096237", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1d2887183f554fa099a03119213770c7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8ce674475fac41a2a8279c0f4665ea8d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "851289e099104f368c86d72bdb75a077", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b9cc1bd6edc849fbab2b2a849b2f4cc3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5ed0afdac22a44499bc3f9d6a7a45904", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "64a653461dbd47768b4a85c3b5248773", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "774b19a2b4214b7cb4e485a7fb30d51e", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8630e587d7d140c8a705a0a662be7a3b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "44755721f0d7423382b8941a9f4be408", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1de80c1c83114cb59f7f6035e6eaba8b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8a7bdfbe21524f2981e139051ba52aef", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "254f55b8a37e4acaaba52b3215553435", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e77eeccc2f6449cea4e0f3a053afc4e5", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "for ensemble, nld_err in zip(ensembles, nld_errors):\n", + " \n", + " matrix = ensemble.get_raw(0).copy()\n", + " for n in range(1, len(ensemble.raw_ensemble)):\n", + " matrix.values += ensemble.get_raw(n).values\n", + " matrix.values *= 1./len(ensemble.raw_ensemble)\n", + " mat = matrix.copy()\n", + " mat.rebin('both', E_rebinned, inplace=True)\n", + " trapezoid_cut.act_on(mat)\n", + " \n", + " Ef = nld_err.E*1e3\n", + " vals = np.zeros(len(Ef))\n", + " for i, ef in enumerate(Ef):\n", + " for j in fg_matrix.range_Eg:\n", + " if ef + fg_matrix.Eg[j] < min(fg_matrix.Ex) or ef + fg_matrix.Eg[j] > max(fg_matrix.Ex):\n", + " continue\n", + " index_Ex = fg_matrix.index_Ex(ef + fg_matrix.Eg[j])\n", + " vals[i] += fg_matrix.values[index_Ex, j]\n", + " \n", + " nld_counts = om.Vector(E=Ef, values=vals, units='keV')\n", + " nld_counts.to_MeV()\n", + " nld_counts.values = 100./np.sqrt(nld_counts.values)\n", + " \n", + " #fig, ax = nld_counts.plot()\n", + " er = nld_err.copy()\n", + " er.values *= 100.\n", + " nld_counts.values *= er.values[15]/nld_counts.values[15]\n", + " fig, ax = nld_counts.plot()\n", + " er.plot(ax=ax)\n", + " ax.set_ylabel(\"Relative error [%]\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c10dd58b-0ebd-4613-b42e-850de5baff16", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/ompy/__init__.py b/ompy/__init__.py index 1b288017..c0b06e00 100644 --- a/ompy/__init__.py +++ b/ompy/__init__.py @@ -44,8 +44,15 @@ from .decomposition import nld_T_product, index from .normalizer_nld import (NormalizerNLD, load_levels_discrete, load_levels_smooth) + from .normalizer_nld_v2 import NormalizerNLD as NormalizerNLDv2 + from .normalizer_ct import NormalizerNLD as NormalizerNLDct + from .normalizer_simultan_ct import NormalizerSimultan as NormalizerSimultanCT + from .normalizer_simultan_v2 import NormalizerSimultan as NormalizerSimultanV2 + from .normalizer_simultan_spincut import NormalizerSimultan as NormalizerSimultanSC from .normalizer_gsf import NormalizerGSF from .normalizer_simultan import NormalizerSimultan - from .ensembleNormalizer import EnsembleNormalizer + from .ensemble_normalizer import EnsembleNormalizer from .models import NormalizationParameters, ResultsNormalized from .introspection import logging, hooks + from .dist import FermiDirac + from .error_finder import ErrorFinder diff --git a/ompy/dist/__init__.py b/ompy/dist/__init__.py new file mode 100644 index 00000000..a3b93d28 --- /dev/null +++ b/ompy/dist/__init__.py @@ -0,0 +1,7 @@ +import pymc as pm +from packaging import version + +if version.parse(pm.__version__) < version.parse("4.0.0"): + from .fermi_dirac import FermiDirac +else: + from .fermi_dirac_pymc4 import FermiDirac diff --git a/ompy/dist/fermi_dirac.py b/ompy/dist/fermi_dirac.py new file mode 100644 index 00000000..70cb3e4e --- /dev/null +++ b/ompy/dist/fermi_dirac.py @@ -0,0 +1,164 @@ +import numpy as np + +import theano.tensor as tt +from pymc3.distributions.continuous import ( + PositiveContinuous, + assert_negative_support, + draw_values, + generate_samples) +from pymc3.distributions.dist_math import bound +from pymc3.theanof import floatX + + +class FermiDirac(PositiveContinuous): + """ + Fermi-Dirac distribution. + + The pdf of this distribution is + + .. math:: + + f(x \mid \lambda \mu) = + \frac{\lambda}{\lambda\mu + \ln(1 + e^{-\lambda\mu})} + \frac{1}{e^{\lambda(x - \mu)} + 1} + + .. plot:: + + import matplotlib.pyplot as plt + import numpy as np + + x = np.linspace(0, 5., 1000) + lam = [1.0, 10., 25.] + mu = [0.5, 1.0, 2.0] + + def pdf(x, lam, mu): + return (lam/np.log(1 + np.exp(lam*mu))*(1/(np.exp(lam*(x-mu)) + 1)) + + for l in lam: + for m in mu: + pdf = pdf(x, l, m) + plt.plot(x, pdf, label=f"$\lambda = {l}$, $\mu = {m}$") + plt.xlabel("x", fontsize=12) + plt.ylabel("f(x)", fontsize=12) + plt.legend(loc=1) + plt.show() + + ======== ============================ + Support :math:`x \in [0, \infty)` + ======== ============================ + + + Parameters + ---------- + lam: float + Rate of decay at mu (lam > 0) + mu: float + Decay position + + Examples + -------- + .. code-block:: python + + with pm.Model(): + x = ompy.FermiDirac('x', lam=10.0, mu=1.2) + + """ + + def __init__(self, lam, mu, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.lam = tt.as_tensor_variable(floatX(lam)) + self.mu = tt.as_tensor_variable(floatX(mu)) + assert_negative_support(lam, "lam", "FermiDirac") + + self.median = self._ppf(0.5, self.lam, self.mu) + + def _ppf(self, q, lam, mu): + """ + Calculate the CDF for the Fermi-Dirac distribution. + """ + return mu - np.log((1 + np.exp(lam*mu))**(1-q) - 1)/lam + + def _random(self, lam, mu, size=None): + """ + Draw a random number from the Fermi-Dirac distribution. + """ + v = np.random.uniform(size=size) + return self._ppf(v, lam, mu) + + def random(self, point=None, size=None): + """ + Draw random values from Fermi-Dirac distribution. + + Parameters + ---------- + point: dict, optional + Dict of variable values on which random values are to be + conditioned (uses default point if not specified). + size: int, optional + Desired size of random sample (returns one sample if not + specified). + + Returns + ------- + array + """ + lam, mu = draw_values([self.lam, self.mu], point=point, size=size) + return generate_samples(self._random, lam, mu, dist_shape=self.shape, + size=size) + + def logp(self, value): + """ + Calculate log-probability of Fermi-Dirac distribution at specified + value. + + Parameters + ---------- + value: numeric + Value(s) for which log-probability is calculated. If the log + probabilities for multiple values are desired the values must be + provided in a numpy array or theano tensor + + Returns + ------- + TensorVariable + """ + + # The formula is + # p(x) = lam/ln(1 + exp(lam*mu)) * 1/(exp(lam*(x-mu)) + 1) + # ln(p(x)) = ln(lam/ln(1 + exp(lam*mu))) - ln(exp(lam*(x-mu)) + 1) + + lam = self.lam + mu = self.mu + + N = lam/tt.log(1 + tt.exp(lam*mu)) + logp = tt.log(N) - tt.log(tt.exp(lam*(value - mu)) + 1) + return bound(logp, value >= 0, lam > 0) + + def logcdf(self, value): + """ + Compute the log of cumulative distribution function for the Fermi-Dirac + distribution at the specified value. + + Parameters + ---------- + value: numeric or np.ndarray or theano.tensor + Value(s) for which log CDF is calculated. If the log CDF for + multiple values are desired the values must be provided in a numpy + array or theano tensor. + + Returns + ------- + TensorVariable + """ + + # The formula is CDF + # P(x) = 1 - ln(1 + exp(-lam*(x-mu)))/ln(1 + exp(lam*mu)) + # ln(P(x)) = ln(1 - ln(1 + exp(-lam*(x-mu)))/ln(1 + exp(lam*mu))) + + lam = self.lam + mu = self.mu + + logcdf = tt.log(1 - tt.log(1 + tt.exp(-lam*(value - mu))) / + tt.log(1 + tt.exp(lam*mu))) + return bound(logcdf, value >= 0, lam > 0) diff --git a/ompy/dist/fermi_dirac_pymc4.py b/ompy/dist/fermi_dirac_pymc4.py new file mode 100644 index 00000000..a13b8d73 --- /dev/null +++ b/ompy/dist/fermi_dirac_pymc4.py @@ -0,0 +1,128 @@ +import pymc as pm +import numpy as np +from typing import List, Tuple + +import pytensor.tensor as at +from pytensor.tensor.random.op import RandomVariable + +from pymc.distributions.continuous import (PositiveContinuous, + assert_negative_support) +#from pymc.distributions.dist_math import bound + +""" +TODO: + - Add tests according to pymc3 standard, + see https://github.com/pymc-devs/pymc3/blob/main/docs/source/developer_guide_implementing_distribution.md # noqa +""" + + +class FermiDiracRV(RandomVariable): + name: str = "fermidirac" + + # Minimum dim. is a scalar (0 = scalar, 1 = vector, etc.) + ndim_supp: int = 0 + + # Number of parameters for the RV + ndims_params: List[int] = [0, 0] + + # Datatype, floatX is continious + dtype: str = "floatX" + + # Print name + _print_name: Tuple[str, str] = ("FermiDirac", "\\operatorname{FermiDirac}") + + + @classmethod + def rng_fn(cls, + rng: np.random.RandomState, + lam: np.ndarray, + mu: np.ndarray, size: Tuple[int, ...] + ) -> np.ndarray: + q = rng.uniform(size=size) + return mu - np.log((1 + np.exp(lam*mu))**(1-q) - 1)/lam + + +fermidirac = FermiDiracRV() + + +class FermiDirac(PositiveContinuous): + """ + Fermi-Dirac distribution. + + The pdf of this distribution is + + .. math:: + + f(x \mid \lambda \mu) = + \frac{\lambda}{\lambda\mu - \ln(1 + e^{-\lambda\mu})} + \frac{1}{e^{\lambda(x - \mu)} + 1} + + .. plot:: + + + ======== ============================ + Support :math:`x \in [0, \infty)` + Mean + Variance + ======== ============================ + + + Parameters + ---------- + lam: float + Rate of decay at mu (lam > 0) + mu: float + Decay position + """ + rv_op = fermidirac + + @classmethod + def dist(cls, lam, mu, *args, **kwargs): + lam = at.as_tensor_variable(floatX(lam)) + mu = at.as_tensor_variable(floatX(mu)) + + assert_negative_support(lam, "lam", "FermiDirac") + + return super().dist([lam, mu], *args, **kwargs) + + def logp(value, lam, mu): + """ + Calculate log-probability of Fermi-Dirac distribution at spesified + value. + + Parameters + ---------- + value: numeric + Value(s) for which log-probability is calculated. If the log + probabilities for multiple values are desired the values must be + provided in a numpy array or Aesara tensor + + Returns + ------- + TensorVariable + """ + + N = lam/at.log(1 + at.exp(lam*mu)) + logp = at.log(N) - at.log(at.exp(lam*(value - mu)) + 1) + return bound(logp, value >= 0, lam > 0) + + def logcdf(value, lam, mu): + """ + Compute the log of cumulative distribution function for the Fermi-Dirac + distribution at the specified value. + + Parameters + ---------- + value: numeric or np.ndarray or aesara.tensor + Value(s) for which log CDF is calculated. If the log CDF for + multiple values are desired the values must be provided in a numpy + array or Aesara tensor. + + Returns + ------- + TensorVariable + """ + + logcdf = at.log(1 - at.log(1 + at.exp(-lam*(value - mu))) / + at.log(1 + at.exp(lam*mu))) + return bound(logcdf, value >= 0, lam > 0) diff --git a/ompy/ensemble.py b/ompy/ensemble.py index d3a3776c..9c17e390 100644 --- a/ompy/ensemble.py +++ b/ompy/ensemble.py @@ -126,6 +126,11 @@ def __init__(self, raw: Optional[Matrix] = None, self.raw.state = "raw" + def __len__(self) -> int: + """ Returns the number of ensemble members + """ + return self.size + def load(self, path: Optional[Union[str, Path]] = None): """ Loads a saved ensamble. Alternative to `regenerate`. @@ -417,7 +422,7 @@ def rebin(self, out_array: np.ndarray, member: str) -> None: self.std_firstgen = firstgen_std def generate_gaussian(self, state: str, - rstate: Optional[np.random.Generator] = np.random.default_rng) -> np.ndarray: # noqa + rstate: Optional[np.random.Generator] = np.random.default_rng) -> np.ndarray: # noqa """Generates an array with Gaussian perturbations of a matrix. Note that entries are truncated at 0 (only positive). diff --git a/ompy/ensembleNormalizer.py b/ompy/ensemble_normalizer.py similarity index 98% rename from ompy/ensembleNormalizer.py rename to ompy/ensemble_normalizer.py index 2b0937cc..f0b5a616 100644 --- a/ompy/ensembleNormalizer.py +++ b/ompy/ensemble_normalizer.py @@ -268,17 +268,22 @@ def plot(self, ax: Tuple[Any, Any] = None, else: fig, ax_stats = plt.subplots(2, 1) # dummy Emin = samples["nld"].iloc[0].E[-1] - x = np.linspace(Emin, normalizer_nld.norm_pars.Sn[0], num=20) + if self.normalizer_simultan is None: + Emin = self.normalizer_nld.limit_high[0] + elif self.normalizer_simultan is not None: + Emin = self.normalizer_simultan.normalizer_nld.limit_high[0] + x = np.linspace(Emin, normalizer_nld.norm_pars.Sn[0], num=101) stats_nld_model = \ self.plot_nld_ext_stats(ax_stats[0], x=x, samples=samples, normalizer_nld=normalizer_nld, percentiles=percentiles, color=colors[2], label="model") + stats_nld_model['x'] = x E = samples["gsf"].iloc[0].E - xlow = np.linspace(0.001, E[0], num=20) - xhigh = np.linspace(E[-1], normalizer_gsf.norm_pars.Sn[0], num=20) + xlow = np.linspace(0.001, E[0], num=101) + xhigh = np.linspace(E[-1], normalizer_gsf.norm_pars.Sn[0], num=101) stats_gsf_model = \ self.plot_gsf_ext_stats(ax_stats[1], xlow=xlow, xhigh=xhigh, samples=samples, @@ -525,6 +530,8 @@ def stats_from_df(df: pd.DataFrame, np.fromiter(map(fmap, indexed, repeat(out)), dtype=float) stats = pd.DataFrame(out[:, :]) stats = pd.DataFrame({'median': stats.median(), + 'mean': stats.mean(), + 'std': stats.std(), 'low': stats.quantile(percentiles[0], axis=0), 'high': stats.quantile(percentiles[1], axis=0)}) return stats diff --git a/ompy/error_finder.py b/ompy/error_finder.py new file mode 100644 index 00000000..8162d9a4 --- /dev/null +++ b/ompy/error_finder.py @@ -0,0 +1,338 @@ +import logging +import numpy as np +import pymc as pm +import termtables as tt + +from numpy import ndarray +from typing import Optional, Union, Any, Tuple, List, Dict + + +from .vector import Vector +from .matrix import Matrix +from .dist import FermiDirac + + +class ErrorFinder: + """ Find the relative errors from an ensemble of NLDs and γSFs. + + This class uses pyMC3 to calculate the relative errors of the data points + in the extracted NLDs and γSFs. The class implements two different models, + one logarithmic and one linear. The logarithmic model will usually be more + stable and should be used if the linear doesn't converge. + + Attributes: + algorithm (string): Indicate what algorithm to use Currently only 'log' + is implemented. Default is 'log' + seed (int): Seed used in the pyMC3 sampling. + options (dict): The pymc3.sample options to use. + testvals (dict): A dictionary with testvalues to feed to pymc3 when + declaring the prior distributions. This can probably be left to + the default, but could be changed if needed. + trace: The trace of the inference data. None if the class hasn't + ran. + TODO: + - Trace should always be saved. Calculations can take hours! + - Refactor the linear model (maybe remove?) + - Refactor how data are conditioned + """ + + LOG = logging.getLogger(__name__) # overwrite parent variable + logging.captureWarnings(True) + + def __init__(self, algorithm: Optional[str] = 'log', + options: Optional[Dict[str, Any]] = None, + seed: Optional[int] = 7392): + """ Initialize ErrorFinder. See attributes above for arguments. + + Raises: + NotImplementedError if algorithm isn't recognized. + + """ + + if not (algorithm.lower() == 'log'): + raise NotImplementedError( + f"Algorithm '{algorithm}' not implemented") + + self.algorithm = algorithm.lower() + self.options = {} if options is None else options + self.options.setdefault("return_inferencedata", False) + self.seed = seed + self.testvals = {'σ_D': 0.25, 'σ_F': 0.25, 'σ_α': 0.05, + 'σ_A': 0.25, 'σ_B': 0.25, 'A': 1.0, 'B': 1.0} + self.prior_parameters = {'σ_ρ': {'lam': 10., 'mu': 1.}, + 'σ_f': {'lam': 10., 'mu': 1.}} + + def __call__(self, *args, **kwargs) -> Union[Tuple[Vector, Vector], Any]: + """ Wrapper for evaluate """ + return self.evaluate(*args, **kwargs) + + def evaluate(self, nlds: List[Vector], + gsfs: List[Vector], + median: Optional[bool] = False, + full: Optional[bool] = False + ) -> Union[Tuple[Vector, Vector], Any]: + """ Evaluate the model and find the relative errors + of the NLDs and γSFs passed to the function. + + Args: + nlds (List[Vector]): List of the NLDs in an ensemble + gsfs (List[Vector]): List of the γSFs in an ensemble + median (bool, optional): If the mean of the relative error should + be used rather than the mean. Default is False. + full (bool, optional): If the sample trace should + be returned or not. + Returns: + The relative error of the nuclear level density and + gamma strength function. + Optionally returns the full sample trace if `trace` is true. + + Raises: + ValueError: If length of `nlds` doesn't match the length of `gsfs`. + pyMC3 may raise errors. Please report if such errors occurs. + """ + algo = None + if self.algorithm == 'log': + algo = self.logarithm + else: + raise NotImplementedError( + f"Algorithm '{algorithm}' not implemented") + + return algo(nlds, gsfs, median=median, full=full) + + def logarithm(self, nlds: List[Vector], gsfs: List[Vector], + median: Optional[bool] = False, full: Optional[bool] = False + ) -> Union[Tuple[Vector, Vector], Any]: + """Use Bayesian inference to estimate the relative errors of the NLD + and GSF from an ensemble. See Ingeberg et al., NIM (TBA) + + Args: + nlds (List[Vector]): Ensemble of NLDs + gsfs (List[Vector]): Ensemble of GSFs + median (bool, optional): If the resulting relative errors should be + the mean or the median. + full (bool, optional): If the trace from the pyMC3 sampling should + be returned. Useful for debugging. + """ + + assert len(nlds) == len(gsfs), \ + "Number of ensemble members of the NLD and GSF doesn't match." + N = len(nlds) + + E_nld, q_nld = format_data(nlds) + E_gsf, q_gsf = format_data(gsfs) + + M_nld, M_gsf = len(E_nld), len(E_gsf) + coef_mask_nld, values_mask_nld = format_mask(N, M_nld) + coef_mask_gsf, values_mask_gsf = format_mask(N, M_gsf) + + self.LOG.info("Starting pyMC3 inference - logarithmic model") + self.LOG.debug(f"Inference with an ensemble with N={N} members with " + f"{M_nld} NLD bins and {M_gsf} GSF bins.") + + with pm.Model() as model: + + σ_D = pm.HalfFlat("σ_D", testval=self.testvals['σ_D']) + σ_F = pm.HalfFlat("σ_F", testval=self.testvals['σ_F']) + σ_α = pm.HalfFlat("σ_α", testval=self.testvals['σ_α']) + + D = pm.Normal("D", mu=0, sigma=σ_D, shape=[N, N-1])[:, coef_mask_nld] # noqa + F = pm.Normal("F", mu=0, sigma=σ_F, shape=[N, N-1])[:, coef_mask_gsf] # noqa + α = pm.Normal("α", mu=0, sigma=σ_α, shape=[N, N-1]) + + σ_ρ = FermiDirac("σ_ρ", lam=self.prior_parameters['σ_ρ']['lam'], + mu=self.prior_parameters['σ_ρ']['mu'], + shape=M_nld)[values_mask_nld] + + σ_f = FermiDirac("σ_f", lam=self.prior_parameters['σ_f']['lam'], + mu=self.prior_parameters['σ_f']['mu'], + shape=M_gsf)[values_mask_gsf] + + μ_ρ = D + α[:, coef_mask_nld] * E_nld[values_mask_nld] + μ_f = F + α[:, coef_mask_gsf] * E_gsf[values_mask_gsf] + + q_ρ = pm.Normal("q_ρ", mu=μ_ρ, sigma=np.sqrt(2)*σ_ρ, + observed=np.log(q_nld)) + q_f = pm.Normal("q_f", mu=μ_f, sigma=np.sqrt(2)*σ_f, + observed=np.log(q_gsf)) + + # Perform the sampling + trace = pm.sample(random_seed=self.seed, **self.options) + self.trace = trace + self.display_results(trace) + + mid = np.median if median else np.mean + nld_rel_err = Vector(E=E_nld, values=mid(trace['σ_ρ'], axis=0), + std=np.std(trace['σ_ρ'], axis=0), units='MeV') + gsf_rel_err = Vector(E=E_gsf, values=mid(trace['σ_f'], axis=0), + std=np.std(trace['σ_f'], axis=0), units='MeV') + if full: + return nld_rel_err, gsf_rel_err, trace + else: + return nld_rel_err, gsf_rel_err + + def display_results(self, trace: Any) -> None: + """ Print the results from the pyMC3 inference to the log. + """ + + def line(idx) -> list: + mean = trace[idx].mean() + sigma = trace[idx].std() + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + return fmts % (mean, sigma) + header = [] + values = [] + try: + for idx in ['A', 'B', 'α', 'σ_A', 'σ_B', 'σ_α']: + values.append(line(idx)) + header = ['A', 'B', 'α', 'σ_A', 'σ_B', 'σ_α'] + except KeyError: + for idx in ['D', 'F', 'α', 'σ_D', 'σ_F', 'σ_α']: + values.append(line(idx)) + header = ['D', 'F', 'α', 'σ_D', 'σ_F', 'σ_α'] + + errs = [] + _, M_nld = trace['σ_ρ'].shape + _, M_gsf = trace['σ_f'].shape + errs = [[f'{m}', '', ''] for m in range(max([M_nld, M_gsf]))] + for m in range(M_nld): + mean = trace['σ_ρ'][:, m].mean() * 100. + sigma = trace['σ_ρ'][:, m].std() * 100. + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + errs[m][1] = fmts % (mean, sigma) + for m in range(M_gsf): + mean = trace['σ_f'][:, m].mean() * 100. + sigma = trace['σ_f'][:, m].std() * 100. + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + errs[m][2] = fmts % (mean, sigma) + + self.LOG.info("Inference results:\n%s\n%s", + tt.to_string([values], header=header), + tt.to_string(errs, header=['', 'σ_ρ [%]', 'σ_f [%]'])) + + +def all_equal(iterator): + """ Check if all elements in an iterator are equal. + """ + + iterator = iter(iterator) + try: + first = next(iterator) + except StopIteration: + return True + return all(first == x for x in iterator) + + +def remove_nans(vecs: List[Vector]) -> List[Vector]: + """ Remove all points that are nan's for each member of the ensemble""" + + vecs_no_nan = [vec.copy() for vec in vecs] + for vec in vecs_no_nan: + vec.cut_nan(inplace=True) + + return vecs_no_nan + + +def keep_only(vecs: List[Vector]) -> List[Vector]: + """ Deletes all the points that are not shared by all vectors in the list + and returns the list with only the points shared among all vectors. + + Args: + vecs (List): List of similar vectors + Returns: List of vectors with only the points that share x-value among all + of the input vectors. + """ + vecs = remove_nans(vecs) + E = [vec.E.copy() for vec in vecs] + energies = {} + for vec in vecs: + for E in vec.E: + if E not in energies: + energies[E] = [False] * len(vecs) + + # Next we will add if the point is present or not + for n, vec in enumerate(vecs): + for E in vec.E: + energies[E][n] = True + + keep_energy = [] + for key in energies: + if np.all(energies[key]): + keep_energy.append(key) + + vec_all_common = [vec.copy() for vec in vecs] + for vec in vec_all_common: + E = [] + values = [] + for e, value in zip(vec.E, vec.values): + if e in keep_energy: + E.append(e) + values.append(value) + vec.E = np.array(E) + vec.values = np.array(values) + + return vec_all_common + + +def format_data(vecs: List[Vector]) -> Tuple[ndarray, ndarray]: + """Find and build required variables for pymc3 model. + This function takes an ensemble (NLDs or GSFs) and finds the energy + bins that are not nan and are shared among all members. It then + builds a tensor: + + q_{r,i,j} = \ln(\frac{v_{r,j}}{v_{i,j}}) + + where i and r ≠ i are the ensemble member index and j is the + bin index. + + Args: + vecs (List[Vector]): List of vectors. Typically an ensemble + of NLDs or GSFs. + Returns: + E (array): energy bins shared among all of the input vector + q (array): Observation tensor + """ + # Remove all bins that contains nan's + N = len(vecs) + vecs = keep_only(vecs) + + E = vecs[0].E.copy() + if vecs[0].units == 'keV': + E /= 1.0e3 + M = len(E) + + # Masking to get proper broadcasting of shapes + mask = np.tile(np.arange(M, dtype=int), (N-1, 1)) + + # Next we will create the observation tensor + q = [] + for r in range(N): + not_r = [] + for i in range(N): + if i == r: + continue + not_r.append(vecs[i].values.copy()) + q.append(vecs[r].values[mask]/not_r) + q = np.array(q) + return E, q + + +def format_mask(N: int, M: int) -> Tuple[ndarray, ndarray]: + """Setup masking arrays for correct broadcasting. + + Args: + N (int): Number of ensemble members + M (int): Number of bins + Returns: + coef_mask (array): Masking broadcasting the coefficients. + values_mask (array): Masking broadcasting the values. + """ + + coef_mask = np.repeat(np.arange(N-1), M).reshape(N-1, M) + values_mask = np.array([np.tile(np.arange(M, dtype=int), (N-1, 1))] * N) + return coef_mask, values_mask diff --git a/ompy/extractor.py b/ompy/extractor.py index c421b762..fb31313a 100644 --- a/ompy/extractor.py +++ b/ompy/extractor.py @@ -14,6 +14,7 @@ from .vector import Vector from .decomposition import chisquare_diagonal, nld_T_product from .action import Action +from .error_finder import ErrorFinder if 'JPY_PARENT_PID' in os.environ: from tqdm import tqdm_notebook as tqdm @@ -47,6 +48,8 @@ class Extractor: trapezoid (Action[Matrix]): The Action cutting the matrices of the Ensemble into the desired shape where from the nld and gsf will be extracted from. + error_estimator (ErrorFinder): The algorithm used to estimate the + relative errors of the extracted NLD and γSF. path (path): The path to save and/or load nld and gsf to/from. extend_diagonal_by_resolution (bool, optional): If `True`, the fit will be extended beyond Ex=Eg by the (FWHM) of the @@ -58,6 +61,11 @@ class Extractor: seed (int): Random seed for reproducibility of results. resolution_Ex (float or np.ndarray, optional): Resolution (FWHM) along Ex axis (particle detector resolution). Defaults to 150 keV + rel_err_missing (float): Relative error used for points that cannot be + estimated by error_estimator object. + suppress_warning (bool): Suppress warnings. The warnings are usually + expected. Set this attribute to `True` in order to avoid enourmous + amounts of warnings in your notebooks. TODO: @@ -67,11 +75,13 @@ class Extractor: """ def __init__(self, ensemble: Optional[Ensemble] = None, trapezoid: Optional[Action] = None, + error_estimator: Optional[ErrorFinder] = None, path: Optional[Union[str, Path]] = 'saved_run/extractor'): """ ensemble (Ensemble, optional): see above trapezoid (Action[Matrix], optional): see above + error_estimator (ErrorFinder, optional): see above path (Path or str, optional): see above """ self.ensemble = ensemble @@ -81,6 +91,7 @@ def __init__(self, ensemble: Optional[Ensemble] = None, self.nld: List[Vector] = [] self.gsf: List[Vector] = [] self.trapezoid = trapezoid + self.error_estimator = error_estimator if path is None: self.path = None @@ -96,28 +107,47 @@ def __init__(self, ensemble: Optional[Ensemble] = None, self.extend_fit_by_resolution: bool = False self.resolution_Ex = 150 # keV + self.rel_err_missing = 0.3 + self.suppress_warning = False + def __call__(self, ensemble: Optional[Ensemble] = None, trapezoid: Optional[Action] = None): return self.extract_from(ensemble, trapezoid) def extract_from(self, ensemble: Optional[Ensemble] = None, trapezoid: Optional[Action] = None, - regenerate: Optional[bool] = None): + error_estimator: Optional[ErrorFinder] = None, + regenerate: Optional[bool] = None, + return_trace: Optional[bool] = False) -> Union[None, Any]: """Decompose each first generation matrix in an Ensemble If `regenerate` is `True` it saves the extracted nld and gsf to file, or loads them if already generated. Exposes the vectors in the attributes self.nld and self.gsf. + If the error_estimator attribute or argument is set then relative + errors will be estimated using the provided algorithm. Points in the + nld and gsf that the algorithm are unable to estimate will be set to + 30%. If error_estimator isn't set (is None), then the std attribute + of nld and gsf will not be set. + Args: ensemble (Ensemble, optional): The ensemble to extract nld and gsf from. Can be provided in when initializing instead. trapezoid (Action, optional): An Action describing the cut to apply to the matrices to obtain the desired region for extracting nld and gsf. + error_estimator (ErrorFinder, optional): Object responsible for + estimating the relative errors of the extracted nld and gsf. regenerate (bool, optional): Whether to regenerate all nld and gsf even if they are found on disk. - + return_trace (bool, optional): If the trace from the ErrorFinder + algorithm should be returned or not. If `error_estimator` + argument and `error_estimator` attribute are both None this + will have no affect. + Returns: Trace from inference sampling of the relative errors if + `return_trace` argument is true and either the `error_estimator` + argument or attribute are set. Raises: ValueError: If no Ensemble instance is provided here or earlier. """ @@ -129,6 +159,8 @@ def extract_from(self, ensemble: Optional[Ensemble] = None, self.trapezoid = trapezoid elif self.trapezoid is None: raise ValueError("A 'trapezoid' cut must be given'") + if error_estimator is not None: + self.error_estimator = error_estimator if regenerate is None: regenerate = self.regenerate self.path = Path(self.path) @@ -153,7 +185,30 @@ def extract_from(self, ensemble: Optional[Ensemble] = None, self.nld = nlds self.gsf = gsfs + trace = None + + if self.error_estimator is not None and regenerate: + LOG.debug("Estimating relative errors") + + # We allways get the trace. If the user doesn't want it, we do + # not give it. Doesn't matter really! + nld_rel_err, gsf_rel_err, trace = self.error_estimator(nlds, gsfs, + full=True) + nld_rel_err.to_keV() + gsf_rel_err.to_keV() + for i, (nld, gsf) in enumerate(zip(self.nld, self.gsf)): + nld.std = self.rel_err_missing * nld.values + gsf.std = self.rel_err_missing * gsf.values + idx_nld = nld.indices(nld_rel_err.E) + idx_gsf = gsf.indices(gsf_rel_err.E) + nld.std[idx_nld] = nld_rel_err.values * nld.values[idx_nld] + gsf.std[idx_gsf] = gsf_rel_err.values * gsf.values[idx_gsf] + nld.save(self.path / f'nld_{i}.npy') # Overwrite with errors! + gsf.save(self.path / f'gsf_{i}.npy') # Overwrite with errors! + self.check_unconstrained_results() + if return_trace: + return trace # If error_estimator is not set, then allways None. def step(self, num: int) -> Tuple[Vector, Vector]: """ Wrapper around _extract in order to be consistent with other classes @@ -298,7 +353,8 @@ def errfun(x: np.ndarray) -> float: # Convert transmission coefficient to the more useful # gamma strength function - gsf = T/(2*np.pi*matrix.Eg**3) + Eg = matrix.Eg / 1e3 # Ensure we devide by MeV and not keV + gsf = T/(2*np.pi*Eg**3) if product: nld_0 = np.where(np.isnan(nld), np.zeros_like(nld), nld) @@ -536,18 +592,20 @@ def check_unconstrained_results(self) -> bool: for i, vec in enumerate(self.nld): if np.isnan(vec.values).any(): contains_nan = True - LOG.warning(f"nld #{i} contains nan's.\n" - "Consider removing them e.g. with:\n" - "# for nld in extractor.nld:\n" - "# nld.cut_nan()\n") + if not self.suppress_warning: + LOG.warning(f"nld #{i} contains nan's.\n" + "Consider removing them e.g. with:\n" + "# for nld in extractor.nld:\n" + "# nld.cut_nan()\n") for i, vec in enumerate(self.nld): if np.isnan(vec.values).any(): contains_nan = True - LOG.warning(f"gsf #{i} contains nan's.\n" - "Consider removing them e.g. with:\n" - "# for gsf in extractor.gsf:\n" - "# gsf.cut_nan()\n") + if not self.suppress_warning: + LOG.warning(f"gsf #{i} contains nan's.\n" + "Consider removing them e.g. with:\n" + "# for gsf in extractor.gsf:\n" + "# gsf.cut_nan()\n") return contains_nan diff --git a/ompy/library.py b/ompy/library.py index c9751654..9278563d 100644 --- a/ompy/library.py +++ b/ompy/library.py @@ -294,9 +294,9 @@ def interpolate_matrix_2D(matrix_in, E0_array_in, E1_array_in, def log_interp1d(xx, yy, **kwargs): """ Interpolate a 1-D function.logarithmically """ logy = np.log(yy) - lin_interp = interp1d(xx, logy, kind='linear', **kwargs) - log_interp = lambda zz: np.exp(lin_interp(zz)) # noqa - return log_interp + kwargs.setdefault('kind', 'linear') + lin_interp = interp1d(xx, logy, **kwargs) + return lambda zz: np.exp(lin_interp(zz)) def call_model(fun,pars,pars_req): diff --git a/ompy/matrix.py b/ompy/matrix.py index 57aa1faf..b0fa4816 100644 --- a/ompy/matrix.py +++ b/ompy/matrix.py @@ -85,7 +85,8 @@ def __init__(self, std: Optional[np.ndarray] = None, path: Optional[Union[str, Path]] = None, shape: Optional[Tuple[int, int]] = None, - state: Union[str, MatrixState] = None): + state: Union[str, MatrixState] = None, + **kwargs): """ There is the option to initialize it in an empty state. In that case, all class variables will be None. @@ -132,7 +133,7 @@ def __init__(self, self.std = std if path is not None: - self.load(path) + self.load(path, **kwargs) self.verify_integrity() self.state = state @@ -680,11 +681,18 @@ def trapezoid(self, Ex_min: float, Ex_max: float, mask[Eg >= Ex + dEg] = True matrix[mask] = 0 + matrix.std = self.std + if self.std is not None: + mat = Matrix(Ex=self.Ex, Eg=self.Eg, values=self.std) + mat.trapezoid(Ex_min, Ex_max, Eg_min, Eg_max, inplace=True) + matrix.std = mat.values + if inplace: self.values = matrix.values self.Ex = matrix.Ex self.Eg = matrix.Eg self.state = matrix.state + self.std = matrix.std else: return matrix diff --git a/ompy/models.py b/ompy/models.py index 2bcd9dd0..16159fca 100644 --- a/ompy/models.py +++ b/ompy/models.py @@ -443,6 +443,9 @@ class NormalizationParameters(Model): #: Parameters necessary for the spin cut model _spincutPars: Dict[str, Any] = field(default=None, metadata='parameters necessary for the spin cut model') # noqa + #: Optional, user given NLD at Sn + rhoSn: Optional[Tuple[float, float]] = field(default=None, + metadata='Level density at the neutron separation energy') # noqa #: Optional Parameters (do not check in `is_changed`). #: Defaults to ["A", "Z", "exclude_check_change"] diff --git a/ompy/normalizer_ct.py b/ompy/normalizer_ct.py new file mode 100644 index 00000000..5085c197 --- /dev/null +++ b/ompy/normalizer_ct.py @@ -0,0 +1,729 @@ +import numpy as np +import copy +import logging +import termtables as tt +import json +import pymultinest +import matplotlib.pyplot as plt +import warnings +from contextlib import redirect_stdout +from numpy import ndarray +from scipy.optimize import differential_evolution, curve_fit +from typing import Optional, Tuple, Any, Union, Callable, Dict +from pathlib import Path + +from .stats import truncnorm_ppf +from .vector import Vector +from .library import self_if_none +from .spinfunctions import SpinFunctions +from .filehandling import load_discrete +from .models import ResultsNormalized, NormalizationParameters +from .abstract_normalizer import AbstractNormalizer + +TupleDict = Dict[str, Tuple[float, float]] + + +class NormalizerNLD(AbstractNormalizer): + """ Normalizes NLD to empirical data + + Normalizes nld/gsf according to:: + + nld' = nld * A * np.exp(alpha * Ex), and + + This is the transformation eq (3), Schiller2000 + + Takes empirical data in form of an array of discrete levels, + neutron separation energy Sn, a model to estimate what the + NLD is at Sn, and several parameters for the model as well + as bounds on normalization parameters. + + As a consequence of a complex problem, this class has a complex + interface. Much of the book-keeping associated with normalization + has been automated, but there is still a lot of settings and + parameters for the user to take care of. Some default values + has been seen, but the user must be _EXTREMELY_ careful when + evaluating the output. + + Attributes: + discrete (Vector): The discrete NLD at lower energies. [MeV] + nld (Vector): The NLD to normalize. Gets converted to [MeV] from [keV]. + norm_pars (NormalizationParameters): Normalization parameters like + experimental D₀, and spin(-cut) model + bounds (Dict[str, Tuple[float, float]): The bounds on each of + the parameters. Its keys are 'A', 'alpha', 'T', and 'D0'. The + values are on the form (min, max). + model (Callable[..., ndarray]): The model to use at high energies + to estimate the NLD. Defaults to constant temperature model. + de_kwargs(dict): Additional keywords to differential evolution. + Defaults to `{"seed": 65424}`. Note that `bounds` has been + taken out as a separate attribute, but is a keyword of de. + multinest_path (Path): Where to save the multinest output. + defaults to 'multinest'. + multinest_kwargs (dict): Additional keywords to multinest. Defaults to + `{"seed": 65498, "resume": False}` + res (ResultsNormalized): Results of the normalization + smooth_levels_fwhm (float): FWHM with which the discrete levels shall + be smoothed when loading from file. Defaults to 0.1 MeV. + path (Path): The path save the results. + + """ + LOG = logging.getLogger(__name__) # overwrite parent variable + logging.captureWarnings(True) + + def __init__(self, *, + nld: Optional[Vector] = None, + discrete: Optional[Union[str, Vector]] = None, + path: Optional[Union[str, Path]] = 'saved_run/normalizers', + regenerate: bool = False, + norm_pars: Optional[NormalizationParameters] = None) -> None: + """ Normalizes nld ang gSF. + + Note: + The prefered syntax is `Normalizer(nld=...)` + If neither is given, the nld (and other parameters) can be + explicity + be set later by:: + + `normalizer.normalize(..., nld=...)` + + or:: + + `normalizer.nld = ...` + + In the later case you *might* have to send in a copy if it's a + mutable to ensure it is not changed. + + Args: + extractor: see above + nld: see above + discrete: see above + path: see above + norm_pars: see above + TODO: + - parameter to limit the number of multinest samples to store. Note + that the samples should be shuffled to retain some "random" + samples from the pdf (not the importance weighted) + + """ + super().__init__(regenerate) + + # Create the private variables + self._discrete = None + self._discrete_path = None + self._D0 = None + self._smooth_levels_fwhm = None + self.norm_pars = norm_pars + self.bounds = {'A': [0.1, 1e3], 'alpha': [1e-1, 20]} # D0 bounds set later + self.model: Optional[Callable[..., ndarray]] = self.const_temperature + # self.curried_model = lambda *arg: None + self.de_kwargs = {"seed": 65424} + self.multinest_path = Path('multinest') + self.multinest_kwargs: dict = {"seed": 65498, "resume": False} + + # Handle the method parameters + self.smooth_levels_fwhm = 0.1 + self.nld = None if nld is None else nld.copy() + self.discrete = discrete + + self.res = ResultsNormalized(name="Results NLD") + + self.limit_low = None + self.limit_high = None + self.std_fake = None # See `normalize` + + if path is None: + self.path = None + else: + self.path = Path(path) + self.path.mkdir(exist_ok=True, parents=True) + + def __call__(self, *args, **kwargs) -> None: + """ Wrapper around normalize """ + self.normalize(*args, **kwargs) + + def normalize(self, *, limit_low: Optional[Tuple[float, float]] = None, + limit_high: Optional[Tuple[float, float]] = None, + nld: Optional[Vector] = None, + discrete: Optional[Vector] = None, + bounds: Optional[TupleDict] = None, + norm_pars: Optional[NormalizationParameters] = None, + num: int = 0) -> None: + """ Normalize NLD to a low and high energy region + + Args: + limit_low: The limits (start, stop) where to normalize + to discrete levels. + limit_high: The limits (start, stop) where to normalize to + a theoretical model and neutron separation energy at high + energies. + nld: The nuclear level density vector to normalize. + discrete: The discrete level density at low energies to + normalize to. + bounds: The bounds of the parameters + norm_pars (NormalizationParameters): Normalization parameters like + experimental D₀, and spin(-cut) model + num (optional): Loop number, defauts to 0 + regenerate: Whether to use already generated files (False) or + generate them all anew (True). + + """ + if not self.regenerate: + try: + self.load() + return + except FileNotFoundError: + pass + + # Update internal state + self.limit_low = self.self_if_none(limit_low) + self.limit_high = self.self_if_none(limit_high) + limit_low = self.limit_low + limit_high = self.limit_high + + discrete = self.self_if_none(discrete) + discrete.to_MeV() + nld = self.self_if_none(nld) + + self.norm_pars = self.self_if_none(norm_pars) + self.norm_pars.is_changed(include=["D0", "Sn", "spincutModel", + "spincutPars"]) # check that set + + self.bounds = self.self_if_none(bounds) + + # ensure that it's updated if running again + self.res = ResultsNormalized(name="Results NLD") + + self.LOG.info(f"\n\n---------\nNormalizing nld #{num}") + nld = nld.copy() + self.LOG.debug("Setting NLD, convert to MeV") + nld.to_MeV() + self.LOG.debug("Setting NLD, removing nan") + nld.cut_nan() + + # Need to give some sort of standard deviation for sensible results + # Otherwise deviations at higher level density will have an + # uncreasonably high weight. + if self.std_fake is None: + self.std_fake = False + if self.std_fake or nld.std is None: + self.std_fake = True + nld.std = nld.values * 0.3 # x% is an arb. choice + self.nld = nld + + # Use DE to get an inital guess before optimizing + args, guess = self.initial_guess(limit_low, limit_high) + # Optimize using multinest + popt, samples = self.optimize(num, args, guess) + + transformed = nld.transform(popt['A'][0], popt['alpha'][0], + inplace=False) + if self.std_fake: + nld.std = None + transformed.std = None + + self.res.nld = transformed + self.res.pars = popt + self.res.samples = samples + ext_model = lambda E: self.const_temperature(E, T=popt['T'][0], + Eshift=popt['Eshift'][0]) + self.res.nld_model = ext_model + + self.save() # save instance + + def initial_guess(self, limit_low: Optional[Tuple[float, float]] = None, + limit_high: Optional[Tuple[float, float]] = None + ) -> Tuple[Tuple[float, float, float, float], + Dict[str, float]]: + """ Find an inital guess for the constant, α, T and D₀ + + Uses differential evolution to perform the guessing. + + Args: + limit_low: The limits (start, stop) where to normalize + to discrete levels. + limit_high: The limits (start, stop) where to normalize to + a theoretical model and neutron separation energy at high + energies. + + Returns: + The arguments used for chi^2 minimization and the + minimizer. + """ + limit_low = self.self_if_none(limit_low) + limit_high = self.self_if_none(limit_high) + + bounds = list(self.bounds.values()) + spinParsstring = json.dumps(self.norm_pars.spincutPars, indent=4, + sort_keys=True) + + self.LOG.debug("Using bounds %s", bounds) + self.LOG.debug("Using spincutModel %s", self.norm_pars.spincutModel) + self.LOG.debug("Using spincutPars %s", spinParsstring) + + nld_low = self.nld.cut(*limit_low, inplace=False) + discrete = self.discrete.cut(*limit_low, inplace=False) + nld_high = self.nld.cut(*limit_high, inplace=False) + + # Artificially increase the nld rel error at low + #nld_low.std = nld_low.values * 0.3 + #nld_low.std *= np.sqrt(2) # Maybe I'm wrong, it should be twice as high? + # Instead we will be looking at the cumulative NLD + + nldSn = self.nldSn_from_D0(**self.norm_pars.asdict())[1] + rel_uncertainty = self.norm_pars.D0[1]/self.norm_pars.D0[0] + nldSn = np.array([nldSn, nldSn * rel_uncertainty]) + + # Then... + self.coefs = np.polyfit(nld_high.E, np.log(nld_high.values), 1) + self.LOG.debug(f"Found best polynomial: a={self.coefs[1]}, b={self.coefs[0]}") + + # We fit the model using curve_fit + self.ct_model = lambda E, A, alpha: A*np.exp(self.coefs[1]+(self.coefs[0]+alpha)*E) + + def neglnlike(*args, **kwargs): + return - self.lnlike(*args, **kwargs) + args = (nld_low, nld_high, discrete, self.ct_model, self.norm_pars.Sn[0], + nldSn) + res = differential_evolution(neglnlike, bounds=bounds, args=args, + **self.de_kwargs) + + self.LOG.info("DE results:\n%s", tt.to_string([res.x.tolist()], + header=['A', 'α [MeV⁻¹]']))#, 'T [MeV]', 'Eshift [MeV]'])) + + #p0 = dict(zip(["A", "alpha", "T", "Eshift"], (res.x).T)) + p0 = dict(zip(["A", "alpha"], (res.x).T)) + for key, res in p0.items(): + if res in self.bounds[key]: + self.LOG.warning(f"DE result for {key} is at edge its bound:" + f"{self.bounds[key]}. This will probably lead" + f"to wrong estimations in multinest, too.") + + return args, p0 + + def optimize(self, num: int, args, + guess: Dict[str, float]) -> Tuple[Dict[str, float], Dict[str, float]]: + """Find parameters given model constraints and an initial guess + + Employs Multinest + + Args: + num (int): Loop number + args_nld (Iterable): Additional arguments for the nld lnlike + guess (Dict[str, float]): The initial guess of the parameters + + Returns: + Tuple: + - popt (Dict[str, Tuple[float, float]]): Median and 1sigma of the + parameters + - samples (Dict[str, List[float]]): Multinest samples. + Note: They are still importance weighted, not random draws + from the posterior. + + Raises: + ValueError: Invalid parameters for automatic prior + + Note: + You might want to adjust the priors for your specific case! Here + we just propose a general solution that might often work out of + the box. + """ + if guess['alpha'] < 0: + raise NotImplementedError("Prior selection not implemented for " + "α < 0") + alpha_exponent = np.log10(guess['alpha']) + + #if guess['T'] < 0: + # raise ValueError("Prior selection not implemented for T < 0; " + # "negative temperature is unphysical") + #T_exponent = np.log10(guess['T']) + + A = guess['A'] + + # truncations from absolute values + lower_A, upper_A = 0., np.inf + mu_A, sigma_A = A, 10*A + a_A = (lower_A - mu_A) / sigma_A + b_A = (upper_A - mu_A) / sigma_A + + lower_Eshift, upper_Eshift = -5., 5 + mu_Eshift, sigma_Eshift = 0, 5 + a_Eshift = (lower_Eshift - mu_Eshift) / sigma_Eshift + b_Eshift = (upper_Eshift - mu_Eshift) / sigma_Eshift + + def prior(cube, ndim, nparams): + # NOTE: You may want to adjust this for your case! + # truncated normal prior + cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A+mu_A + # log-uniform prior + # if alpha = 1e2, it's between 1e1 and 1e3 + cube[1] = 10**(cube[1]*2 + (alpha_exponent-1)) + # log-uniform prior + # if T = 1e2, it's between 1e1 and 1e3 + # cube[2] = 10**(cube[2]*2 + (T_exponent-1)) + # truncated normal prior + # cube[3] = truncnorm_ppf(cube[3], a_Eshift, + # b_Eshift)*sigma_Eshift + mu_Eshift + + # if np.isinf(cube[3]): + # self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3]) + + def loglike(cube, ndim, nparams): + return self.lnlike(cube, *args) + + self.multinest_path.mkdir(exist_ok=True) + path = self.multinest_path / f"nld_norm_{num}_" + assert len(str(path)) < 60, "Total path length too long for multinest" + + self.LOG.info("Starting multinest") + self.LOG.debug("with following keywords %s:", self.multinest_kwargs) + # Hack where stdout from Multinest is redirected as info messages + self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n' + else None) + with redirect_stdout(self.LOG): + pymultinest.run(loglike, prior, len(guess), + outputfiles_basename=str(path), + **self.multinest_kwargs) + + # Save parameters for analyzer + names = list(guess.keys()) + json.dump(names, open(str(path) + 'params.json', 'w')) + analyzer = pymultinest.Analyzer(len(guess), + outputfiles_basename=str(path)) + + stats = analyzer.get_stats() + + samples = analyzer.get_equal_weighted_posterior()[:, :-1] + samples = dict(zip(names, samples.T)) + + T = self.coefs[0] + samples['alpha'] + Eshift = -T*(np.log(T*samples['A']) + self.coefs[1]) + samples['T'] = T + samples['Eshift'] = Eshift + + names += ['T', 'Eshift'] + stats['marginals'].append({'1sigma': (np.quantile(T, 0.16), + np.quantile(T, 0.84)), + 'median': np.quantile(T, 0.5)}) + stats['marginals'].append({'1sigma': (np.quantile(Eshift, 0.16), + np.quantile(Eshift, 0.84)), + 'median': np.quantile(Eshift, 0.5)}) + + # Format the output + popt = dict() + vals = [] + for name, m in zip(names, stats['marginals']): + lo, hi = m['1sigma'] + med = m['median'] + sigma = (hi - lo) / 2 + popt[name] = (med, sigma) + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + vals.append(fmts % (med, sigma)) + + self.LOG.info("Multinest results:\n%s", tt.to_string([vals], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', 'Eshift [MeV]'])) + + return popt, samples + + def plot(self, *, ax: Any = None, + add_label: bool = True, + results: Optional[ResultsNormalized] = None, + add_figlegend: bool = True, + plot_fitregion: bool = True, + reset_color_cycle: bool = True, + **kwargs) -> Tuple[Any, Any]: + """Plot the NLD, discrete levels and result of normalization + + Args: + ax (optional): The matplotlib axis to plot onto. Creates axis + is not provided + add_label (bool, optional): Defaults to `True`. + add_figlegend (bool, optional):Defaults to `True`. + results (ResultsNormalized, optional): If provided, nld and model + are taken from here instead. + plot_fitregion (Optional[bool], optional): Defaults to `True`. + reset_color_cycle (Optional[bool], optional): Defaults to `True` + **kwargs: Description + + Returns: + fig, ax + """ + if ax is None: + fig, ax = plt.subplots() + else: + fig = ax.figure + + if reset_color_cycle: + ax.set_prop_cycle(None) + + res = self.res if results is None else results + pars = res.pars + nld = res.nld + + labelNld = '_exp.' + labelNldSn = None + labelModel = "_model" + labelDiscrete = "_known levels" + if add_label: + labelNld = 'exp.' + labelNldSn = r'$\rho(S_n)$' + labelModel = 'model' + labelDiscrete = "known levels" + nld.plot(ax=ax, label=labelNld, **kwargs) + + self.discrete.plot(ax=ax, kind='step', c='k', label=labelDiscrete) + + nldSn = self.nldSn_from_D0(**self.norm_pars.asdict())[1] + rel_uncertainty = self.norm_pars.D0[1]/self.norm_pars.D0[0] + nldSn = np.array([nldSn, nldSn * rel_uncertainty]) + + x = np.linspace(self.limit_high[0], self.norm_pars.Sn[0]) + model = Vector(E=x, values=self.model(E=x, + T=pars['T'][0], + Eshift=pars['Eshift'][0])) + + ax.errorbar(self.norm_pars.Sn[0], nldSn[0], yerr=nldSn[1], + label=labelNldSn, fmt="ks", markerfacecolor='none') + # workaround for enseble Normalizer; always keep these label + for i in range(3): + ax.lines[-(i+1)]._label = "_nld(Sn)" + + ax.plot(model.E, model.values, "--", label=labelModel, markersize=0, + c='g', **kwargs) + + if plot_fitregion: + ax.axvspan(self.limit_low[0], self.limit_low[1], color='grey', + alpha=0.1, label="fit limits") + ax.axvspan(self.limit_high[0], self.limit_high[1], color='grey', + alpha=0.1) + + ax.set_yscale('log') + ax.set_ylabel(r"Level density $\rho(E_x)~[\mathrm{MeV}^{-1}]$") + ax.set_xlabel(r"Excitation energy $E_x~[\mathrm{MeV}]$") + ax.set_ylim(bottom=0.5/(nld.E[1]-nld.E[0])) + + if fig is not None and add_figlegend: + fig.legend(loc=9, ncol=3, frameon=False) + + return fig, ax + + @staticmethod + def lnlike(x: Tuple[float, float, float, float], nld_low: Vector, + nld_high: Vector, discrete: Vector, + model: Callable[..., ndarray], + Sn, nldSn) -> float: + """ Compute log likelihood of the normalization fitting + + This is the result up a, which is irrelevant for the maximization + + Args: + x: The arguments ordered as A, alpha, T and Eshift + nld_low: The lower region where discrete levels will be + fitted. + nld_high: The upper region to fit to model. + discrete: The discrete levels to be used in fitting the + lower region. + model: The model to use when fitting the upper region. + Must support the keyword arguments + ``model(E=..., T=..., Eshift=...) -> ndarray`` + + Returns: + lnlike: log likelihood + """ + #A, alpha, T, Eshift = x[:4] # slicing needed for multinest? + A, alpha = x[:2] # slicing needed for multinest? + transformed_low = nld_low.transform(A, alpha, inplace=False) + transformed_high = nld_high.transform(A, alpha, inplace=False) + + err_low = transformed_low.error(discrete) + expected = Vector(E=transformed_high.E, + values=model(E=transformed_high.E, + A=A, alpha=alpha)) + err_high = transformed_high.error(expected) + + nldSn_model = model(E=Sn, A=A, alpha=alpha) + err_nldSn = ((nldSn[0] - nldSn_model)/nldSn[1])**2 + + ln_stds = (np.log(transformed_low.std).sum() + + np.log(transformed_high.std).sum()) + + return -0.5*(err_low + err_high + err_nldSn + ln_stds) + + @staticmethod + def const_temperature(E: ndarray, T: float, Eshift: float) -> ndarray: + """ Constant Temperature NLD""" + ct = np.exp((E - Eshift) / T) / T + return ct + + @staticmethod + def nldSn_from_D0(D0: Union[float, Tuple[float, float]], + Sn: Union[float, Tuple[float, float]], Jtarget: float, + spincutModel: str, spincutPars: Dict[str, Any], + **kwargs) -> Tuple[float, float]: + """Calculate nld(Sn) from D0 + + + 1/D0 = nld(Sn) * ( g(Jtarget+1/2, pi_target) + + g(Jtarget1/2, pi_target) ) + Here we assume equal parity, g(J,pi) = g(J)/2 and + nld(Sn) = 1/D0 * 2/(g(Jtarget+1/2) + g(Jtarget-1/2)) + For the case Jtarget = 0, the g(Jtarget-1/2) = 0 + + Parameters: + D0 (float or [float, float]): + Average resonance spacing from s waves [eV]. If a tuple, + it is assumed that it is of the form `[value, uncertainty]`. + Sn (float or [float, float]): + Separation energy [MeV]. If a tuple, it is assumed that it is of + the form `[value, uncertainty]`. + Jtarget (float): + Target spin + spincutModel (str): + Model to for the spincut + spincutPars Dict[str, Any]: + Additional parameters necessary for the spin cut model + **kwargs: Description + + + Returns: + nld: Ex=Sn and nld at Sn [MeV, 1/MeV] + """ + + D0 = np.atleast_1d(D0)[0] + Sn = np.atleast_1d(Sn)[0] + + def g(J): + return SpinFunctions(Ex=Sn, J=J, + model=spincutModel, + pars=spincutPars).distribution() + + if Jtarget == 0: + summe = 1 / 2 * g(Jtarget + 1 / 2) + else: + summe = 1 / 2 * (g(Jtarget - 1 / 2) + g(Jtarget + 1 / 2)) + + nld = 1 / (summe * D0 * 1e-6) + return [Sn, nld] + + @staticmethod + def D0_from_nldSn(nld_model: Callable[..., Any], + Sn: Union[float, Tuple[float, float]], Jtarget: float, + spincutModel: str, spincutPars: Dict[str, Any], + **kwargs) -> Tuple[float, float]: + """Calculate D0 from nld(Sn), assuming equiparity. + + This is the inverse of `nldSn_from_D0` + + Parameters: + nld_model (Callable[..., Any]): Model for nld above data of the + from `y = nld_model(E)` in 1/MeV. + Sn (float or [float, float]): + Separation energy [MeV]. If a tuple, it is assumed that it is of + the form `[value, uncertainty]`. + Jtarget (float): + Target spin + spincutModel (str): + Model to for the spincut + spincutPars Dict[str, Any]: + Additional parameters necessary for the spin cut model + **kwargs: Description + + + Returns: + D0: D0 in eV + """ + + Sn = np.atleast_1d(Sn)[0] + nld = nld_model(Sn) + + def g(J): + return SpinFunctions(Ex=Sn, J=J, + model=spincutModel, + pars=spincutPars).distribution() + + if Jtarget == 0: + summe = 1 / 2 * g(Jtarget + 1 / 2) + else: + summe = 1 / 2 * (g(Jtarget - 1 / 2) + g(Jtarget + 1 / 2)) + + D0 = 1 / (summe * nld * 1e-6) + return D0 + + @property + def discrete(self) -> Optional[Vector]: + return self._discrete + + @discrete.setter + def discrete(self, value: Optional[Union[Path, str, Vector]]) -> None: + if value is None: + self._discretes = None + self.LOG.debug("Set `discrete` to None") + elif isinstance(value, (str, Path)): + if self.nld is None: + raise ValueError(f"`nld` must be set before loading levels") + nld = self.nld.copy() + nld.to_MeV() + self.LOG.debug("Set `discrete` levels from file with FWHM %s", + self.smooth_levels_fwhm) + self._discrete = load_levels_smooth(value, nld.E, + self.smooth_levels_fwhm) + self._discrete.units = "MeV" + self._discrete_path = value + + elif isinstance(value, Vector): + if self.nld is not None and np.any(self.nld.E != value.E): + raise ValueError("`nld` and `discrete` must" + " have same energy binning") + self._discrete = value + self.LOG.debug("Set `discrete` by Vector") + else: + raise ValueError(f"Value {value} is not supported" + " for discrete levels") + + @property + def smooth_levels_fwhm(self) -> Optional[float]: + return self._smooth_levels_fwhm + + @smooth_levels_fwhm.setter + def smooth_levels_fwhm(self, value: float) -> None: + self._smooth_levels_fwhm = value + if self._discrete_path is not None: + self.discrete = self._discrete_path + + def self_if_none(self, *args, **kwargs): + """ wrapper for lib.self_if_none """ + return self_if_none(self, *args, **kwargs) + + +def load_levels_discrete(path: Union[str, Path], energy: ndarray) -> Vector: + """ Load discrete levels without smoothing + + Assumes linear equdistant binning + + Args: + path: The file to load + energy: The binning to use + Returns: + A vector describing the levels + """ + histogram, _ = load_discrete(path, energy, 0.1) + return Vector(values=histogram, E=energy, units='MeV') + + +def load_levels_smooth(path: Union[str, Path], energy: ndarray, + resolution: float = 0.1) -> Vector: + """ Load discrete levels with smoothing + + Assumes linear equdistant binning + + Args: + path: The file to load + energy: The binning to use in MeV + resolution: The resolution (FWHM) of the smoothing to use in MeV + Returns: + A vector describing the smoothed levels + """ + histogram, smoothed = load_discrete(path, energy, resolution) + return Vector(values=smoothed if resolution > 0 else histogram, E=energy, + units='MeV') diff --git a/ompy/normalizer_gsf.py b/ompy/normalizer_gsf.py index 9d177d3d..9ac895cb 100644 --- a/ompy/normalizer_gsf.py +++ b/ompy/normalizer_gsf.py @@ -167,6 +167,10 @@ def normalize(self, *, gsf: Optional[Vector] = None, else: self.nld = self.self_if_none(nld) + self.LOG.debug("Setting NLD, convert to MeV and removing nan") + self.nld.to_MeV() + self.nld.cut_nan() + alpha = self.self_if_none(alpha, nonable=True) if alpha is None: self.LOG.debug("Setting alpha from from normalizer_nld") @@ -190,20 +194,29 @@ def normalize(self, *, gsf: Optional[Vector] = None, else: self.res = ResultsNormalized(name="Results NLD and GSF, stepwise") + self.LOG.debug("Setting GSF, convert to MeV and removing nan") + self._gsf = gsf.copy() + self._gsf.to_MeV() + self._gsf.cut_nan() + self._gsf.transform(alpha=alpha, inplace=True) + self.LOG.info(f"Normalizing #{num}") - self._gsf = gsf.copy() # make a copy as it will be transformed - gsf.to_MeV() - gsf = gsf.transform(alpha=alpha, inplace=False) - self._gsf = gsf - self.model_low.autorange(gsf) - self.model_high.autorange(gsf) - self._gsf_low, self._gsf_high = self.extrapolate(gsf) + self.model_low.autorange(self._gsf) + self.model_high.autorange(self._gsf) + self._gsf_low, self._gsf_high = self.extrapolate(self._gsf) # experimental Gg and calc. both in meV B_norm = self.norm_pars.Gg[0] / self.Gg_before_norm() - # propagate uncertainty of D0 - B_norm_unc = B_norm * self.norm_pars.D0[1] / self.norm_pars.D0[0] + # propagate uncertainty of D0 and Gg0 + B_norm_unc = B_norm * np.sqrt( + (self.norm_pars.D0[1] / self.norm_pars.D0[0])**2 + + (self.norm_pars.Gg[1] / self.norm_pars.Gg[0])**2) + + num_units = max(0, int(-np.floor(np.log10(B_norm_unc))) + 1) + num_units = f"%.{num_units}f" + self.LOG.info(f"Normalizing coeficient B = {num_units} ± {num_units}", + B_norm, B_norm_unc) # apply transformation and export results self._gsf.transform(B_norm) diff --git a/ompy/normalizer_nld.py b/ompy/normalizer_nld.py index e2317b67..d0232636 100644 --- a/ompy/normalizer_nld.py +++ b/ompy/normalizer_nld.py @@ -198,6 +198,8 @@ def normalize(self, *, limit_low: Optional[Tuple[float, float]] = None, nld = nld.copy() self.LOG.debug("Setting NLD, convert to MeV") nld.to_MeV() + self.LOG.debug("Setting NLD, removing nan") + nld.cut_nan() # Need to give some sort of standard deviation for sensible results # Otherwise deviations at higher level density will have an @@ -263,6 +265,11 @@ def initial_guess(self, limit_low: Optional[Tuple[float, float]] = None, discrete = self.discrete.cut(*limit_low, inplace=False) nld_high = self.nld.cut(*limit_high, inplace=False) + # Artificially increase the nld rel error at low + #nld_low.std = nld_low.values * 0.3 + #nld_low.std *= np.sqrt(2) # Maybe I'm wrong, it should be twice as high? + # Instead we will be looking at the cumulative NLD + nldSn = self.nldSn_from_D0(**self.norm_pars.asdict())[1] rel_uncertainty = self.norm_pars.D0[1]/self.norm_pars.D0[0] nldSn = np.array([nldSn, nldSn * rel_uncertainty]) @@ -481,6 +488,32 @@ def plot(self, *, ax: Any = None, return fig, ax + def lnlike_v2(self, x: Tuple[float, float, float, float], nld_low: Vector, + nld_high: Vector, discrete: Vector, + model: Callable[..., ndarray], + Sn, nldSn) -> float: + """ Compute log likelihood of the ... + """ + A, alpha, T, Eshift = x[:4] # slicing needed for multinest? + transformed_low = nld_low.transform(A, alpha, inplace=False) + transformed_high = nld_high.transform(A, alpha, inplace=False) + + err_low = transformed_low.error(discrete) + expected = Vector(E=transformed_high.E, + values=model(E=transformed_high.E, + T=T, Eshift=Eshift)) + err_high = transformed_high.error(expected) + + # calculate the D0-equivalent of T and Eshift used + D0 = self.D0_from_nldSn(lambda E: model(E, T=T, Eshift=Eshift), **self.norm_pars.asdict()) + + err_nldSn = ((D0 - self.norm_pars.D0[0])/self.norm_pars.D0[1])**2 + + ln_stds = (np.log(transformed_low.std).sum() + + np.log(transformed_high.std).sum()) + + return -0.5*(err_low + err_high + err_nldSn + ln_stds) + @staticmethod def lnlike(x: Tuple[float, float, float, float], nld_low: Vector, nld_high: Vector, discrete: Vector, diff --git a/ompy/normalizer_nld_v2.py b/ompy/normalizer_nld_v2.py new file mode 100644 index 00000000..e126c532 --- /dev/null +++ b/ompy/normalizer_nld_v2.py @@ -0,0 +1,731 @@ +import numpy as np +import copy +import logging +import termtables as tt +import json +import pymultinest +import matplotlib.pyplot as plt +import warnings +from contextlib import redirect_stdout +from numpy import ndarray +from scipy.optimize import differential_evolution +from typing import Optional, Tuple, Any, Union, Callable, Dict +from pathlib import Path + +from .stats import truncnorm_ppf, normal_ppf +from .vector import Vector +from .library import self_if_none +from .spinfunctions import SpinFunctions +from .filehandling import load_discrete +from .models import ResultsNormalized, NormalizationParameters +from .abstract_normalizer import AbstractNormalizer + +TupleDict = Dict[str, Tuple[float, float]] + + +class NormalizerNLD(AbstractNormalizer): + """ Normalizes NLD to empirical data + + Normalizes nld/gsf according to:: + + nld' = nld * A * np.exp(alpha * Ex), and + + This is the transformation eq (3), Schiller2000 + + Takes empirical data in form of an array of discrete levels, + neutron separation energy Sn, a model to estimate what the + NLD is at Sn, and several parameters for the model as well + as bounds on normalization parameters. + + As a consequence of a complex problem, this class has a complex + interface. Much of the book-keeping associated with normalization + has been automated, but there is still a lot of settings and + parameters for the user to take care of. Some default values + has been seen, but the user must be _EXTREMELY_ careful when + evaluating the output. + + Attributes: + discrete (Vector): The discrete NLD at lower energies. [MeV] + nld (Vector): The NLD to normalize. Gets converted to [MeV] from [keV]. + norm_pars (NormalizationParameters): Normalization parameters like + experimental D₀, and spin(-cut) model + bounds (Dict[str, Tuple[float, float]): The bounds on each of + the parameters. Its keys are 'A', 'alpha', 'T', and 'D0'. The + values are on the form (min, max). + model (Callable[..., ndarray]): The model to use at high energies + to estimate the NLD. Defaults to constant temperature model. + de_kwargs(dict): Additional keywords to differential evolution. + Defaults to `{"seed": 65424}`. Note that `bounds` has been + taken out as a separate attribute, but is a keyword of de. + multinest_path (Path): Where to save the multinest output. + defaults to 'multinest'. + multinest_kwargs (dict): Additional keywords to multinest. Defaults to + `{"seed": 65498, "resume": False}` + res (ResultsNormalized): Results of the normalization + smooth_levels_fwhm (float): FWHM with which the discrete levels shall + be smoothed when loading from file. Defaults to 0.1 MeV. + path (Path): The path save the results. + + """ + LOG = logging.getLogger(__name__) # overwrite parent variable + logging.captureWarnings(True) + + def __init__(self, *, + nld: Optional[Vector] = None, + discrete: Optional[Union[str, Vector]] = None, + path: Optional[Union[str, Path]] = 'saved_run/normalizers', + regenerate: bool = False, + norm_pars: Optional[NormalizationParameters] = None) -> None: + """ Normalizes nld ang gSF. + + Note: + The prefered syntax is `Normalizer(nld=...)` + If neither is given, the nld (and other parameters) can be + explicity + be set later by:: + + `normalizer.normalize(..., nld=...)` + + or:: + + `normalizer.nld = ...` + + In the later case you *might* have to send in a copy if it's a + mutable to ensure it is not changed. + + Args: + extractor: see above + nld: see above + discrete: see above + path: see above + norm_pars: see above + TODO: + - parameter to limit the number of multinest samples to store. Note + that the samples should be shuffled to retain some "random" + samples from the pdf (not the importance weighted) + + """ + super().__init__(regenerate) + + # Create the private variables + self._discrete = None + self._discrete_path = None + self._D0 = None + self._smooth_levels_fwhm = None + self.norm_pars = norm_pars + self.bounds = {'A': [0.1, 1e3], 'alpha': [1e-1, 20], 'T': [0.1, 1], + 'Eshift': [-5, 5]} # D0 bounds set later + self.model: Optional[Callable[..., ndarray]] = self.const_temperature + # self.curried_model = lambda *arg: None + self.de_kwargs = {"seed": 65424} + self.multinest_path = Path('multinest') + self.multinest_kwargs: dict = {"seed": 65498, "resume": False} + + # Handle the method parameters + self.smooth_levels_fwhm = 0.3 + self.nld = None if nld is None else nld.copy() + self.discrete = discrete + + self.res = ResultsNormalized(name="Results NLD") + + self.limit_low = None + self.limit_high = None + self.std_fake = None # See `normalize` + + if path is None: + self.path = None + else: + self.path = Path(path) + self.path.mkdir(exist_ok=True, parents=True) + + def __call__(self, *args, **kwargs) -> None: + """ Wrapper around normalize """ + self.normalize(*args, **kwargs) + + def normalize(self, *, limit_low: Optional[Tuple[float, float]] = None, + limit_high: Optional[Tuple[float, float]] = None, + nld: Optional[Vector] = None, + discrete: Optional[Vector] = None, + bounds: Optional[TupleDict] = None, + norm_pars: Optional[NormalizationParameters] = None, + num: int = 0) -> None: + """ Normalize NLD to a low and high energy region + + Args: + limit_low: The limits (start, stop) where to normalize + to discrete levels. + limit_high: The limits (start, stop) where to normalize to + a theoretical model and neutron separation energy at high + energies. + nld: The nuclear level density vector to normalize. + discrete: The discrete level density at low energies to + normalize to. + bounds: The bounds of the parameters + norm_pars (NormalizationParameters): Normalization parameters like + experimental D₀, and spin(-cut) model + num (optional): Loop number, defauts to 0 + regenerate: Whether to use already generated files (False) or + generate them all anew (True). + + """ + if not self.regenerate: + try: + self.load() + return + except FileNotFoundError: + pass + + # Update internal state + self.limit_low = self.self_if_none(limit_low) + self.limit_high = self.self_if_none(limit_high) + limit_low = self.limit_low + limit_high = self.limit_high + + discrete = self.self_if_none(discrete) + discrete.to_MeV() + nld = self.self_if_none(nld) + + self.norm_pars = self.self_if_none(norm_pars) + self.norm_pars.is_changed(include=["D0", "Sn", "spincutModel", + "spincutPars"]) # check that set + + self.bounds = self.self_if_none(bounds) + + # ensure that it's updated if running again + self.res = ResultsNormalized(name="Results NLD") + + self.LOG.info(f"\n\n---------\nNormalizing nld #{num}") + nld = nld.copy() + self.LOG.debug("Setting NLD, convert to MeV") + nld.to_MeV() + self.LOG.debug("Setting NLD, removing nan") + nld.cut_nan() + + # Need to give some sort of standard deviation for sensible results + # Otherwise deviations at higher level density will have an + # uncreasonably high weight. + if self.std_fake is None: + self.std_fake = False + if self.std_fake or nld.std is None: + self.std_fake = True + nld.std = nld.values * 0.3 # x% is an arb. choice + self.nld = nld + + # Use DE to get an inital guess before optimizing + args, guess = self.initial_guess(limit_low, limit_high) + # Optimize using multinest + popt, samples = self.optimize(num, args, guess) + + transformed = nld.transform(popt['A'][0], popt['alpha'][0], + inplace=False) + if self.std_fake: + nld.std = None + transformed.std = None + + self.res.nld = transformed + self.res.pars = popt + self.res.samples = samples + ext_model = lambda E: self.model(E, T=popt['T'][0], + Eshift=popt['Eshift'][0]) + self.res.nld_model = ext_model + + self.save() # save instance + + def initial_guess(self, limit_low: Optional[Tuple[float, float]] = None, + limit_high: Optional[Tuple[float, float]] = None + ) -> Tuple[Tuple[float, float, float, float], + Dict[str, float]]: + """ Find an inital guess for the constant, α, T and D₀ + + Uses differential evolution to perform the guessing. + + Args: + limit_low: The limits (start, stop) where to normalize + to discrete levels. + limit_high: The limits (start, stop) where to normalize to + a theoretical model and neutron separation energy at high + energies. + + Returns: + The arguments used for chi^2 minimization and the + minimizer. + """ + limit_low = self.self_if_none(limit_low) + limit_high = self.self_if_none(limit_high) + + bounds = list(self.bounds.values()) + spinParsstring = json.dumps(self.norm_pars.spincutPars, indent=4, + sort_keys=True) + + self.LOG.debug("Using bounds %s", bounds) + self.LOG.debug("Using spincutModel %s", self.norm_pars.spincutModel) + self.LOG.debug("Using spincutPars %s", spinParsstring) + + nld_low = self.nld.cut(*limit_low, inplace=False) + discrete = self.discrete.cut(*limit_low, inplace=False) + nld_high = self.nld.cut(*limit_high, inplace=False) + + # Artificially increase the nld rel error at low + nld_low.std = nld_low.values * 0.3 + # Instead we will be looking at the cumulative NLD + + nldSn = self.nldSn_from_D0(**self.norm_pars.asdict())[1] + rel_uncertainty = self.norm_pars.D0[1]/self.norm_pars.D0[0] + nldSn = np.array([nldSn, nldSn * rel_uncertainty]) + + if self.norm_pars.rhoSn is not None: + nldSn = self.norm_pars.rhoSn + + def neglnlike(x, *args, **kwargs): + # We have a requirement that T, Eshift should fit NLDSn, + # therefore we will add a really lot to ensure that this + # is the case in the DE + Eshift = self.norm_pars.Sn[0] - x[2] * np.log(x[2]*nldSn[0]) + EshiftSigma = nldSn[1]*x[2]/nldSn[0] + EshiftError = ((Eshift - x[3])/EshiftSigma)**2 + return EshiftError - self.lnlike(x, *args, **kwargs) + args = (nld_low, nld_high, discrete, self.model, self.norm_pars.Sn[0], + nldSn) + res = differential_evolution(neglnlike, bounds=bounds, args=args, + **self.de_kwargs) + + self.LOG.info("DE results:\n%s", tt.to_string([res.x.tolist()], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', 'Eshift [MeV]'])) + + p0 = dict(zip(["A", "alpha", "T", "Eshift"], (res.x).T)) + for key, res in p0.items(): + if res in self.bounds[key]: + self.LOG.warning(f"DE result for {key} is at edge its bound:" + f"{self.bounds[key]}. This will probably lead" + f"to wrong estimations in multinest, too.") + + return args, p0 + + def optimize(self, num: int, args, + guess: Dict[str, float]) -> Tuple[Dict[str, float], Dict[str, float]]: + """Find parameters given model constraints and an initial guess + + Employs Multinest + + Args: + num (int): Loop number + args_nld (Iterable): Additional arguments for the nld lnlike + guess (Dict[str, float]): The initial guess of the parameters + + Returns: + Tuple: + - popt (Dict[str, Tuple[float, float]]): Median and 1sigma of the + parameters + - samples (Dict[str, List[float]]): Multinest samples. + Note: They are still importance weighted, not random draws + from the posterior. + + Raises: + ValueError: Invalid parameters for automatic prior + + Note: + You might want to adjust the priors for your specific case! Here + we just propose a general solution that might often work out of + the box. + """ + if guess['alpha'] < 0: + raise NotImplementedError("Prior selection not implemented for " + "α < 0") + alpha_exponent = np.log10(guess['alpha']) + + if guess['T'] < 0: + raise ValueError("Prior selection not implemented for T < 0; " + "negative temperature is unphysical") + T_exponent = np.log10(guess['T']) + + A = guess['A'] + + # truncations from absolute values + lower_A, upper_A = 0., np.inf + mu_A, sigma_A = A, 10*A + a_A = (lower_A - mu_A) / sigma_A + b_A = (upper_A - mu_A) / sigma_A + + lower_Eshift, upper_Eshift = -5., 5 + mu_Eshift, sigma_Eshift = 0, 5 + a_Eshift = (lower_Eshift - mu_Eshift) / sigma_Eshift + b_Eshift = (upper_Eshift - mu_Eshift) / sigma_Eshift + + mu_rhoSn, sigma_rhoSn = self.norm_pars.rhoSn + Sn = self.norm_pars.Sn[0] + + def prior(cube, ndim, nparams): + # NOTE: You may want to adjust this for your case! + # truncated normal prior + cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A+mu_A + # log-uniform prior + # if alpha = 1e2, it's between 1e1 and 1e3 + cube[1] = 10**(cube[1]*2 + (alpha_exponent-1)) + # log-uniform prior + # if T = 1e2, it's between 1e1 and 1e3 + cube[2] = 10**(cube[2]*2 + (T_exponent-1)) + # truncated normal prior + rhoSn = normal_ppf(cube[3])*sigma_rhoSn + mu_rhoSn + cube[3] = Sn - cube[2]*np.log(cube[2] * rhoSn) + + #cube[3] = truncnorm_ppf(cube[3], a_Eshift, + #b_Eshift)*sigma_Eshift + mu_Eshift + + if np.isinf(cube[3]): + self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3]) + + def loglike(cube, ndim, nparams): + return self.lnlike(cube, *args) + + self.multinest_path.mkdir(exist_ok=True) + path = self.multinest_path / f"nld_norm_{num}_" + assert len(str(path)) < 60, "Total path length too long for multinest" + + self.LOG.info("Starting multinest") + self.LOG.debug("with following keywords %s:", self.multinest_kwargs) + # Hack where stdout from Multinest is redirected as info messages + self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n' + else None) + with redirect_stdout(self.LOG): + pymultinest.run(loglike, prior, len(guess), + outputfiles_basename=str(path), + **self.multinest_kwargs) + + # Save parameters for analyzer + names = list(guess.keys()) + json.dump(names, open(str(path) + 'params.json', 'w')) + analyzer = pymultinest.Analyzer(len(guess), + outputfiles_basename=str(path)) + + stats = analyzer.get_stats() + + samples = analyzer.get_equal_weighted_posterior()[:, :-1] + samples = dict(zip(names, samples.T)) + + # Format the output + popt = dict() + vals = [] + for name, m in zip(names, stats['marginals']): + lo, hi = m['1sigma'] + med = m['median'] + sigma = (hi - lo) / 2 + popt[name] = (med, sigma) + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + vals.append(fmts % (med, sigma)) + + self.LOG.info("Multinest results:\n%s", tt.to_string([vals], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', 'Eshift [MeV]'])) + + return popt, samples + + def plot(self, *, ax: Any = None, + add_label: bool = True, + results: Optional[ResultsNormalized] = None, + add_figlegend: bool = True, + plot_fitregion: bool = True, + reset_color_cycle: bool = True, + **kwargs) -> Tuple[Any, Any]: + """Plot the NLD, discrete levels and result of normalization + + Args: + ax (optional): The matplotlib axis to plot onto. Creates axis + is not provided + add_label (bool, optional): Defaults to `True`. + add_figlegend (bool, optional):Defaults to `True`. + results (ResultsNormalized, optional): If provided, nld and model + are taken from here instead. + plot_fitregion (Optional[bool], optional): Defaults to `True`. + reset_color_cycle (Optional[bool], optional): Defaults to `True` + **kwargs: Description + + Returns: + fig, ax + """ + if ax is None: + fig, ax = plt.subplots() + else: + fig = ax.figure + + if reset_color_cycle: + ax.set_prop_cycle(None) + + res = self.res if results is None else results + pars = res.pars + nld = res.nld + + labelNld = '_exp.' + labelNldSn = None + labelModel = "_model" + labelDiscrete = "_known levels" + if add_label: + labelNld = 'exp.' + labelNldSn = r'$\rho(S_n)$' + labelModel = 'model' + labelDiscrete = "known levels" + nld.plot(ax=ax, label=labelNld, **kwargs) + + self.discrete.plot(ax=ax, kind='step', c='k', label=labelDiscrete) + + nldSn = self.nldSn_from_D0(**self.norm_pars.asdict())[1] + rel_uncertainty = self.norm_pars.D0[1]/self.norm_pars.D0[0] + nldSn = np.array([nldSn, nldSn * rel_uncertainty]) + + x = np.linspace(self.limit_high[0], self.norm_pars.Sn[0]) + model = Vector(E=x, values=self.model(E=x, + T=pars['T'][0], + Eshift=pars['Eshift'][0])) + + ax.errorbar(self.norm_pars.Sn[0], nldSn[0], yerr=nldSn[1], + label=labelNldSn, fmt="ks", markerfacecolor='none') + # workaround for enseble Normalizer; always keep these label + for i in range(3): + ax.lines[-(i+1)]._label = "_nld(Sn)" + + ax.plot(model.E, model.values, "--", label=labelModel, markersize=0, + c='g', **kwargs) + + if plot_fitregion: + ax.axvspan(self.limit_low[0], self.limit_low[1], color='grey', + alpha=0.1, label="fit limits") + ax.axvspan(self.limit_high[0], self.limit_high[1], color='grey', + alpha=0.1) + + ax.set_yscale('log') + ax.set_ylabel(r"Level density $\rho(E_x)~[\mathrm{MeV}^{-1}]$") + ax.set_xlabel(r"Excitation energy $E_x~[\mathrm{MeV}]$") + ax.set_ylim(bottom=0.5/(nld.E[1]-nld.E[0])) + + if fig is not None and add_figlegend: + fig.legend(loc=9, ncol=3, frameon=False) + + return fig, ax + + @staticmethod + def lnlike(x: Tuple[float, float, float, float], nld_low: Vector, + nld_high: Vector, discrete: Vector, + model: Callable[..., ndarray], + Sn, nldSn) -> float: + """ Compute log likelihood of the normalization fitting + + This is the result up a, which is irrelevant for the maximization + + Args: + x: The arguments ordered as A, alpha, T and Eshift + nld_low: The lower region where discrete levels will be + fitted. + nld_high: The upper region to fit to model. + discrete: The discrete levels to be used in fitting the + lower region. + model: The model to use when fitting the upper region. + Must support the keyword arguments + ``model(E=..., T=..., Eshift=...) -> ndarray`` + Returns: + lnlike: log likelihood + """ + A, alpha, T, Eshift = x[:4] # slicing needed for multinest? + transformed_low = nld_low.transform(A, alpha, inplace=False) + transformed_high = nld_high.transform(A, alpha, inplace=False) + + err_low = transformed_low.error(discrete) + expected = Vector(E=transformed_high.E, + values=model(E=transformed_high.E, + T=T, Eshift=Eshift)) + err_high = transformed_high.error(expected) + + # To ensure that the nldSn is weighted about as + # much as the exp data we will multiply with + # the number of points used to fit the `high` + # part of the NLD. This is reasonable because + # we trust this measurement equally as much + # as the measured NLD. This is meant to remove + # some bias introduced by having many points and only a single + # point at Sn. + nldSn_factor = 0 # len(transformed_high.E) + + nldSn_model = model(E=Sn, T=T, Eshift=Eshift) + err_nldSn = nldSn_factor*((nldSn[0] - nldSn_model)/nldSn[1])**2 + + ln_stds = (np.log(transformed_low.std).sum() + + np.log(transformed_high.std).sum()) + + return -0.5*(err_low + err_high + err_nldSn + ln_stds) + + @staticmethod + def const_temperature(E: ndarray, T: float, Eshift: float) -> ndarray: + """ Constant Temperature NLD""" + ct = np.exp((E - Eshift) / T) / T + return ct + + @staticmethod + def nldSn_from_D0(D0: Union[float, Tuple[float, float]], + Sn: Union[float, Tuple[float, float]], Jtarget: float, + spincutModel: str, spincutPars: Dict[str, Any], + **kwargs) -> Tuple[float, float]: + """Calculate nld(Sn) from D0 + + + 1/D0 = nld(Sn) * ( g(Jtarget+1/2, pi_target) + + g(Jtarget1/2, pi_target) ) + Here we assume equal parity, g(J,pi) = g(J)/2 and + nld(Sn) = 1/D0 * 2/(g(Jtarget+1/2) + g(Jtarget-1/2)) + For the case Jtarget = 0, the g(Jtarget-1/2) = 0 + + Parameters: + D0 (float or [float, float]): + Average resonance spacing from s waves [eV]. If a tuple, + it is assumed that it is of the form `[value, uncertainty]`. + Sn (float or [float, float]): + Separation energy [MeV]. If a tuple, it is assumed that it is of + the form `[value, uncertainty]`. + Jtarget (float): + Target spin + spincutModel (str): + Model to for the spincut + spincutPars Dict[str, Any]: + Additional parameters necessary for the spin cut model + **kwargs: Description + + + Returns: + nld: Ex=Sn and nld at Sn [MeV, 1/MeV] + """ + + D0 = np.atleast_1d(D0)[0] + Sn = np.atleast_1d(Sn)[0] + + def g(J): + return SpinFunctions(Ex=Sn, J=J, + model=spincutModel, + pars=spincutPars).distribution() + + if Jtarget == 0: + summe = 1 / 2 * g(Jtarget + 1 / 2) + else: + summe = 1 / 2 * (g(Jtarget - 1 / 2) + g(Jtarget + 1 / 2)) + + nld = 1 / (summe * D0 * 1e-6) + return [Sn, nld] + + @staticmethod + def D0_from_nldSn(nld_model: Callable[..., Any], + Sn: Union[float, Tuple[float, float]], Jtarget: float, + spincutModel: str, spincutPars: Dict[str, Any], + **kwargs) -> Tuple[float, float]: + """Calculate D0 from nld(Sn), assuming equiparity. + + This is the inverse of `nldSn_from_D0` + + Parameters: + nld_model (Callable[..., Any]): Model for nld above data of the + from `y = nld_model(E)` in 1/MeV. + Sn (float or [float, float]): + Separation energy [MeV]. If a tuple, it is assumed that it is of + the form `[value, uncertainty]`. + Jtarget (float): + Target spin + spincutModel (str): + Model to for the spincut + spincutPars Dict[str, Any]: + Additional parameters necessary for the spin cut model + **kwargs: Description + + + Returns: + D0: D0 in eV + """ + + Sn = np.atleast_1d(Sn)[0] + nld = nld_model(Sn) + + def g(J): + return SpinFunctions(Ex=Sn, J=J, + model=spincutModel, + pars=spincutPars).distribution() + + if Jtarget == 0: + summe = 1 / 2 * g(Jtarget + 1 / 2) + else: + summe = 1 / 2 * (g(Jtarget - 1 / 2) + g(Jtarget + 1 / 2)) + + D0 = 1 / (summe * nld * 1e-6) + return D0 + + @property + def discrete(self) -> Optional[Vector]: + return self._discrete + + @discrete.setter + def discrete(self, value: Optional[Union[Path, str, Vector]]) -> None: + if value is None: + self._discretes = None + self.LOG.debug("Set `discrete` to None") + elif isinstance(value, (str, Path)): + if self.nld is None: + raise ValueError(f"`nld` must be set before loading levels") + nld = self.nld.copy() + nld.to_MeV() + self.LOG.debug("Set `discrete` levels from file with FWHM %s", + self.smooth_levels_fwhm) + self._discrete = load_levels_smooth(value, nld.E, + self.smooth_levels_fwhm) + self._discrete.units = "MeV" + self._discrete_path = value + + elif isinstance(value, Vector): + if self.nld is not None and np.any(self.nld.E != value.E): + raise ValueError("`nld` and `discrete` must" + " have same energy binning") + self._discrete = value + self.LOG.debug("Set `discrete` by Vector") + else: + raise ValueError(f"Value {value} is not supported" + " for discrete levels") + + @property + def smooth_levels_fwhm(self) -> Optional[float]: + return self._smooth_levels_fwhm + + @smooth_levels_fwhm.setter + def smooth_levels_fwhm(self, value: float) -> None: + self._smooth_levels_fwhm = value + if self._discrete_path is not None: + self.discrete = self._discrete_path + + def self_if_none(self, *args, **kwargs): + """ wrapper for lib.self_if_none """ + return self_if_none(self, *args, **kwargs) + + +def load_levels_discrete(path: Union[str, Path], energy: ndarray) -> Vector: + """ Load discrete levels without smoothing + + Assumes linear equdistant binning + + Args: + path: The file to load + energy: The binning to use + Returns: + A vector describing the levels + """ + histogram, _ = load_discrete(path, energy, 0.1) + return Vector(values=histogram, E=energy, units='MeV') + + +def load_levels_smooth(path: Union[str, Path], energy: ndarray, + resolution: float = 0.1) -> Vector: + """ Load discrete levels with smoothing + + Assumes linear equdistant binning + + Args: + path: The file to load + energy: The binning to use in MeV + resolution: The resolution (FWHM) of the smoothing to use in MeV + Returns: + A vector describing the smoothed levels + """ + histogram, smoothed = load_discrete(path, energy, resolution) + return Vector(values=smoothed if resolution > 0 else histogram, E=energy, + units='MeV') diff --git a/ompy/normalizer_simultan.py b/ompy/normalizer_simultan.py index d27ba5fa..a8e559ae 100644 --- a/ompy/normalizer_simultan.py +++ b/ompy/normalizer_simultan.py @@ -6,6 +6,7 @@ from numpy import ndarray from pathlib import Path from typing import Optional, Union, Tuple, Any, Callable, Dict, Iterable, List +from scipy.optimize import differential_evolution import pymultinest import matplotlib.pyplot as plt from contextlib import redirect_stdout @@ -122,19 +123,25 @@ def normalize(self, *, num: int = 0, # reset internal state self.res = ResultsNormalized(name="Results NLD") - + norm_pars_org = copy.deepcopy(self.normalizer_nld.norm_pars) self.normalizer_nld = copy.deepcopy(self.self_if_none(normalizer_nld)) self.normalizer_gsf = copy.deepcopy(self.self_if_none(normalizer_gsf)) for norm in [self.normalizer_nld, self.normalizer_gsf]: norm._save_instance = False norm.regenerate = True + self.LOG.debug("Setting NLD and GSF, convert to MeV and removing nan") gsf = self.self_if_none(gsf) + gsf = gsf.copy() + gsf.to_MeV() + gsf.cut_nan() + nld = self.self_if_none(nld) nld = nld.copy() - gsf = gsf.copy() nld.to_MeV() - gsf.to_MeV() + nld.cut_nan() + + self.LOG.info(f"Normalizing #{num}") # Need to give some sort of standard deviation for sensible results # Otherwise deviations at higher level density will have an @@ -153,7 +160,7 @@ def normalize(self, *, num: int = 0, self.normalizer_gsf.gsf_in = gsf # update before initial guess # Use DE to get an inital guess before optimizing - args_nld, guess = self.initial_guess() + args_nld, guess = self.initial_guess(num) # Optimize using multinest popt, samples = self.optimize(num, args_nld, guess) @@ -181,9 +188,11 @@ def normalize(self, *, num: int = 0, for model in [self.res.gsf_model_low, self.res.gsf_model_high]: model.shift_after = model.shift + self.normalizer_nld.norm_pars = copy.deepcopy(norm_pars_org) + self.normalizer_gsf.norm_pars = copy.deepcopy(norm_pars_org) self.save() # save instance - def initial_guess(self) -> None: + def initial_guess(self, num: int) -> None: """ Find an inital guess for normalization parameters Uses guess of normalizer_nld and corresponding normalization of gsf @@ -202,15 +211,25 @@ def initial_guess(self) -> None: nld = normalizer_nld.nld.transform(A, alpha, inplace=False) nld_model = lambda E: normalizer_nld.model(E, T=T, Eshift=Eshift) # noqa - normalizer_gsf.normalize(nld=nld, nld_model=nld_model, alpha=alpha) + normalizer_gsf.normalize(nld=nld, nld_model=nld_model, alpha=alpha, num=num) guess["B"] = normalizer_gsf.res.pars["B"][0] + # If D0 is not consistent with model prediction we should do a + # MLE from the simultan likelihood. + bounds = list(normalizer_nld.bounds.values()) + bounds.append([0, 4*guess["B"]]) + + def neglnlike(x, *args): + return -self.lnlike(x, args) + res = differential_evolution(neglnlike, bounds=bounds, args=args_nld, + **normalizer_nld.de_kwargs) + guess_print = copy.deepcopy(guess) - self.LOG.info("DE results/initial guess:\n%s", - tt.to_string([list(guess_print.values())], + self.LOG.info("DE results/initial guess (# %d):\n%s", num, + tt.to_string([res.x.tolist()], header=['A', 'α [MeV⁻¹]', 'T [MeV]', 'Eshift [MeV]', 'B'])) - + guess = dict(zip(["A", "alpha", "T", "Eshift", "B"], (res.x).T)) return args_nld, guess def optimize(self, num: int, @@ -300,10 +319,10 @@ def loglike(cube, ndim, nparams): path = self.multinest_path / f"sim_norm_{num}_" assert len(str(path)) < 60, "Total path length too long for multinest" - self.LOG.info("Starting multinest: ") + self.LOG.info("Starting multinest (# %d): ", num) self.LOG.debug("with following keywords %s:", self.multinest_kwargs) # Hack where stdout from Multinest is redirected as info messages - self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n' + self.LOG.write = lambda msg: (self.LOG.info("# %d: %s", num, msg) if msg != '\n' # noqa else None) with redirect_stdout(self.LOG): @@ -335,9 +354,9 @@ def loglike(cube, ndim, nparams): fmts = '\t'.join([fmt + " ± " + fmt]) vals.append(fmts % (med, sigma)) - self.LOG.info("Multinest results:\n%s", tt.to_string([vals], - header=['A', 'α [MeV⁻¹]', 'T [MeV]', - 'Eshift [MeV]', 'B'])) + self.LOG.info("Multinest results (# %d):\n%s", num, + tt.to_string([vals], header=['A', 'α [MeV⁻¹]', 'T [MeV]', + 'Eshift [MeV]', 'B'])) # reset state self.normalizer_gsf.norm_pars = norm_pars_org diff --git a/ompy/normalizer_simultan_ct.py b/ompy/normalizer_simultan_ct.py new file mode 100644 index 00000000..9a755693 --- /dev/null +++ b/ompy/normalizer_simultan_ct.py @@ -0,0 +1,443 @@ +import logging +import numpy as np +import copy +import json +import termtables as tt +from numpy import ndarray +from pathlib import Path +from typing import Optional, Union, Tuple, Any, Callable, Dict, Iterable, List +import pymultinest +import matplotlib.pyplot as plt +from contextlib import redirect_stdout + +from .abstract_normalizer import AbstractNormalizer +from .extractor import Extractor +from .library import log_interp1d, self_if_none +from .models import Model, ResultsNormalized, ExtrapolationModelLow,\ + ExtrapolationModelHigh, NormalizationParameters +from .normalizer_nld import NormalizerNLD +from .normalizer_gsf import NormalizerGSF +from .spinfunctions import SpinFunctions +from .vector import Vector +from .stats import truncnorm_ppf + + +class NormalizerSimultan(AbstractNormalizer): + + """ Simultaneous normalization of nld and gsf. Composed of Normalizer and NormalizerGSF as input, so read more on the normalization there + + Attributes: + extractor (Extractor): Extractor instance + gsf (Optional[Vector], optional): gsf to normalize + multinest_path (Path, optional): Default path where multinest + saves files + multinest_kwargs (dict): Additional keywords to multinest. Defaults to + `{"seed": 65498, "resume": False}` + nld (Optional[Vector], optional): nld to normalize + normalizer_nld (NormalizerNLD): `NormalizerNLD` instance to get the normalization paramters + normalizer_gsf (NormalizerGSF): `NormalizerGSF` instance to get the normalization paramters + res (ResultsNormalized): Results + std_fake_gsf (bool): Whether the std. deviation is faked + (see `normalize`) + std_fake_nld (bool): Whether the std. deviation is faked + (see `normalize`) + path (Path): The path save the results. + + + TODO: + Work with more general models, too, not just CT for nld + """ + LOG = logging.getLogger(__name__) + logging.captureWarnings(True) + + def __init__(self, *, + gsf: Optional[Vector] = None, + nld: Optional[Vector] = None, + normalizer_nld: Optional[NormalizerNLD] = None, + normalizer_gsf: Optional[NormalizerGSF] = None, + path: Optional[Union[str, Path]] = 'saved_run/normalizers', + regenerate: bool = False): + """ + TODO: + - currently have to set arguments here, an cannot set them in + "normalize" + + Args: + gsf (optional): see above + nld (optional): see above + normalizer_nld (optional): see above + normalizer_gsf (optional): see above + + """ + super().__init__(regenerate) + if normalizer_nld is None: + self.normalizer_nld = None + else: + self.normalizer_nld = copy.deepcopy(normalizer_nld) + + if normalizer_gsf is None: + self.normalizer_gsf = None + else: + self.normalizer_gsf = copy.deepcopy(normalizer_gsf) + + self.gsf = None if gsf is None else gsf.copy() + self.nld = None if nld is None else nld.copy() + + self.std_fake_nld: Optional[bool] = None # See `normalize` + self.std_fake_gsf: Optional[bool] = None # See `normalize` + + self.res: Optional[ResultsNormalized] = None + + self.multinest_path: Optional[Path] = Path('multinest') + self.multinest_kwargs: dict = {"seed": 65498, "resume": False} + + if path is None: + self.path = None + else: + self.path = Path(path) + self.path.mkdir(exist_ok=True, parents=True) + + def normalize(self, *, num: int = 0, + gsf: Optional[Vector] = None, + nld: Optional[Vector] = None, + normalizer_nld: Optional[NormalizerNLD] = None, + normalizer_gsf: Optional[NormalizerGSF] = None) -> None: + """Perform normalization and saves results to `self.res` + + Args: + num (int, optional): Loop number + gsf (Optional[Vector], optional): gsf before normalization + nld (Optional[Vector], optional): nld before normalization + normalizer_nld (Optional[NormalizerNLD], optional): NormalizerNLD + instance + normalizer_gsf (Optional[NormalizerGSF], optional): NormalizerGSF + instance + """ + if not self.regenerate: + try: + self.load() + return + except FileNotFoundError: + pass + + # reset internal state + self.res = ResultsNormalized(name="Results NLD") + + self.normalizer_nld = copy.deepcopy(self.self_if_none(normalizer_nld)) + self.normalizer_gsf = copy.deepcopy(self.self_if_none(normalizer_gsf)) + for norm in [self.normalizer_nld, self.normalizer_gsf]: + norm._save_instance = False + norm.regenerate = True + + self.LOG.debug("Setting NLD and GSF, convert to MeV and removing nan") + gsf = self.self_if_none(gsf) + gsf = gsf.copy() + gsf.to_MeV() + gsf.cut_nan() + + nld = self.self_if_none(nld) + nld = nld.copy() + nld.to_MeV() + nld.cut_nan() + + # Need to give some sort of standard deviation for sensible results + # Otherwise deviations at higher level density will have an + # uncreasonably high weight. + if self.std_fake_nld is None: + self.std_fake_nld = False + if self.std_fake_nld or nld.std is None: + self.std_fake_nld = True + nld.std = nld.values * 0.3 # x% is an arb. choice + if self.std_fake_gsf or gsf.std is None: + self.std_fake_gsf = True + gsf.std = gsf.values * 0.3 # x% is an arb. choice + + # update + self.normalizer_nld.nld = nld # update before initial guess + self.normalizer_gsf.gsf_in = gsf # update before initial guess + + # Use DE to get an inital guess before optimizing + args_nld, guess = self.initial_guess() + # Optimize using multinest + popt, samples = self.optimize(num, args_nld, guess) + + self.res.pars = popt + self.res.samples = samples + + # reset + if self.std_fake_nld is True: + self.std_fake_nld = None + nld.std = None + if self.std_fake_gsf is True: + self.std_fake_gsf = None + gsf.std = None + + self.res.nld = nld.transform(self.res.pars["A"][0], + self.res.pars["alpha"][0], inplace=False) + self.res.gsf = gsf.transform(self.res.pars["B"][0], + self.res.pars["alpha"][0], inplace=False) + + self.normalizer_gsf.model_low.autorange(self.res.gsf) + self.normalizer_gsf.model_high.autorange(self.res.gsf) + self.normalizer_gsf.extrapolate(self.res.gsf) + self.res.gsf_model_low = self.normalizer_gsf.model_low + self.res.gsf_model_high = self.normalizer_gsf.model_high + for model in [self.res.gsf_model_low, self.res.gsf_model_high]: + model.shift_after = model.shift + + self.save() # save instance + + def initial_guess(self) -> None: + """ Find an inital guess for normalization parameters + + Uses guess of normalizer_nld and corresponding normalization of gsf + + Returns: + The arguments used for chi^2 minimization and the + minimizer. + """ + normalizer_nld = self.normalizer_nld + normalizer_gsf = self.normalizer_gsf + + args_nld, guess = normalizer_nld.initial_guess() + #[A, alpha, T, Eshift] = [guess["A"], guess["alpha"], + # guess["T"], guess["Eshift"]] + [A, alpha] = [guess["A"], guess["alpha"]] + + nld = normalizer_nld.nld.transform(A, alpha, inplace=False) + nld_model = lambda E: normalizer_nld.ct_model(E, A=A, alpha=alpha) # noqa + + normalizer_gsf.normalize(nld=nld, nld_model=nld_model, alpha=alpha) + guess["B"] = normalizer_gsf.res.pars["B"][0] + + guess_print = copy.deepcopy(guess) + self.LOG.info("DE results/initial guess:\n%s", + tt.to_string([list(guess_print.values())], + header=['A', 'α [MeV⁻¹]', 'B'])) + + return args_nld, guess + + def optimize(self, num: int, + args_nld: Iterable, + guess: Dict[str, float]) -> Tuple[Dict[str, Tuple[float, float]], Dict[str, List[float]]]: # noqa + """Find parameters given model constraints and an initial guess + + Employs Multinest. + + Args: + num (int): Loop number + args_nld (Iterable): Additional arguments for the nld lnlike + guess (Dict[str, float]): The initial guess of the parameters + + Returns: + Tuple: + - popt (Dict[str, Tuple[float, float]]): Median and 1sigma of the + parameters + - samples (Dict[str, List[float]]): Multinest samplesø. + Note: They are still importance weighted, not random draws + from the posterior. + + Raises: + ValueError: Invalid parameters for automatix prior + + Note: + You might want to adjust the priors for your specific case! Here + we just propose a general solution that might often work out of + the box. + """ + if guess['alpha'] < 0: + raise NotImplementedError("Prior selection not implemented for " + "α < 0") + alpha_exponent = np.log10(guess['alpha']) + + #if guess['T'] < 0: + # raise ValueError("Prior selection not implemented for T < 0; " + # "negative temperature is unphysical") + #T_exponent = np.log10(guess['T']) + + A = guess['A'] + B = guess["B"] + + # truncations from absolute values + lower_A, upper_A = 0., np.inf + mu_A, sigma_A = A, 10*A + a_A = (lower_A - mu_A) / sigma_A + b_A = (upper_A - mu_A) / sigma_A + + lower_Eshift, upper_Eshift = -5., 5 + mu_Eshift, sigma_Eshift = 0, 5 + a_Eshift = (lower_Eshift - mu_Eshift) / sigma_Eshift + b_Eshift = (upper_Eshift - mu_Eshift) / sigma_Eshift + + lower_B, upper_B = 0., np.inf + mu_B, sigma_B = B, 10*B + a_B = (lower_B - mu_B) / sigma_B + b_B = (upper_B - mu_B) / sigma_B + + def prior(cube, ndim, nparams): + # NOTE: You may want to adjust this for your case! + # truncated normal prior + cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A + mu_A + + # log-uniform prior + # if alpha = 1e2, it's between 1e1 and 1e3 + cube[1] = 10**(cube[1]*2 + (alpha_exponent-1)) + # log-uniform prior + # if T = 1e2, it's between 1e1 and 1e3 + #cube[2] = 10**(cube[2]*2 + (T_exponent-1)) + # truncated normal prior + #cube[3] = truncnorm_ppf(cube[3], a_Eshift, + # b_Eshift)*sigma_Eshift + mu_Eshift + # truncated normal prior + cube[2] = truncnorm_ppf(cube[2], a_B, b_B)*sigma_B + mu_B + + #if np.isinf(cube[3]): + # self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3]) + + def loglike(cube, ndim, nparams): + return self.lnlike(cube, args_nld=args_nld) + + # parameters are changed in the lnlike + norm_pars_org = copy.deepcopy(self.normalizer_gsf.norm_pars) + + self.multinest_path.mkdir(exist_ok=True) + path = self.multinest_path / f"sim_norm_{num}_" + assert len(str(path)) < 60, "Total path length too long for multinest" + + self.LOG.info("Starting multinest: ") + self.LOG.debug("with following keywords %s:", self.multinest_kwargs) + # Hack where stdout from Multinest is redirected as info messages + self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n' + else None) + + with redirect_stdout(self.LOG): + pymultinest.run(loglike, prior, len(guess), + outputfiles_basename=str(path), + **self.multinest_kwargs) + + # Save parameters for analyzer + names = list(guess.keys()) + json.dump(names, open(str(path) + 'params.json', 'w')) + analyzer = pymultinest.Analyzer(len(guess), + outputfiles_basename=str(path)) + + stats = analyzer.get_stats() + + samples = analyzer.get_equal_weighted_posterior()[:, :-1] + samples = dict(zip(names, samples.T)) + + T = self.normalizer_nld.coefs[0] + samples['alpha'] + Eshift = -T*(np.log(T*samples['A']) + self.normalizer_nld.coefs[1]) + samples['T'] = T + samples['Eshift'] = Eshift + + names = ['A', 'alpha', 'T', 'Eshift', 'B'] + Bstats = copy.deepcopy(stats['marginals'][-1]) + del stats['marginals'][-1] + stats['marginals'].append({'1sigma': (np.quantile(T, 0.16), + np.quantile(T, 0.84)), + 'median': np.quantile(T, 0.5)}) + stats['marginals'].append({'1sigma': (np.quantile(Eshift, 0.16), + np.quantile(Eshift, 0.84)), + 'median': np.quantile(Eshift, 0.5)}) + stats['marginals'].append(Bstats) + + # Format the output + popt = dict() + vals = [] + for name, m in zip(names, stats['marginals']): + lo, hi = m['1sigma'] + med = m['median'] + sigma = (hi - lo) / 2 + popt[name] = (med, sigma) + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + vals.append(fmts % (med, sigma)) + + self.LOG.info("Multinest results:\n%s", tt.to_string([vals], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', + 'Eshift [MeV]', 'B'])) + + # reset state + self.normalizer_gsf.norm_pars = norm_pars_org + + return popt, samples + + def lnlike(self, x: Tuple[float, float, float, float, float], + args_nld: Iterable) -> float: + """Compute log likelihood of the normalization fitting + + This is the result up to the constant, which is irrelevant for the + maximization + + Args: + x (Tuple[float, float, float, float, float]): The arguments + ordered as A, alpha, T and Eshift, B + args_nld (TYPE): Additional arguments for the nld lnlike + + Returns: + lnlike: log likelihood + """ + A, alpha, B = x[:3] # slicing needed for multinest? + + normalizer_gsf = self.normalizer_gsf + normalizer_nld = self.normalizer_nld + + err_nld = normalizer_nld.lnlike(x[:2], *args_nld) + + nld = normalizer_nld.nld.transform(A, alpha, inplace=False) + nld_model = lambda E: normalizer_nld.ct_model(E, A=A, alpha=alpha) # noqa + + normalizer_gsf.nld_model = nld_model + normalizer_gsf.nld = nld + # calculate the D0-equivalent of T and Eshift used + D0 = normalizer_nld.D0_from_nldSn(nld_model, + **normalizer_nld.norm_pars.asdict()) + normalizer_gsf.norm_pars.D0 = [D0, np.nan] # dummy uncertainty + normalizer_gsf._gsf = normalizer_gsf.gsf_in.transform(B, alpha, + inplace=False) + normalizer_gsf._gsf_low, normalizer_gsf._gsf_high = \ + normalizer_gsf.extrapolate() + err_gsf = normalizer_gsf.lnlike() + return err_nld + err_gsf + + def plot(self, ax: Optional[Any] = None, add_label: bool = True, + add_figlegend: bool = True, + **kwargs) -> Tuple[Any, Any]: + """Plots nld and gsf + + Args: + ax (optional): The matplotlib axis to plot onto. Creates axis + is not provided + add_label (bool, optional):Defaults to `True`. + add_figlegend (bool, optional): Defaults to `True`. + results Optional[ResultsNormalized]: If provided, gsf and model + are taken from here instead. + **kwargs: kwargs for plot + + Returns: + fig, ax + """ + if ax is None: + fig, ax = plt.subplots(1, 2, constrained_layout=True) + else: + fig = ax[0].figure + + self.normalizer_nld.plot(ax=ax[0], add_label=True, results=self.res, + add_figlegend=False, **kwargs) + self.normalizer_gsf.plot(ax=ax[1], add_label=False, results=self.res, + add_figlegend=False, **kwargs) + + ax[0].set_title("Level density") + ax[1].set_title(r"$\gamma$SF") + + if add_figlegend: + fig.legend(loc=9, ncol=4, frameon=True) + fig.subplots_adjust(left=0.1, right=0.9, top=0.8, bottom=0.1) + + return fig, ax + + def self_if_none(self, *args, **kwargs): + """ wrapper for lib.self_if_none """ + return self_if_none(self, *args, **kwargs) diff --git a/ompy/normalizer_simultan_spincut.py b/ompy/normalizer_simultan_spincut.py new file mode 100644 index 00000000..2f8e0ce8 --- /dev/null +++ b/ompy/normalizer_simultan_spincut.py @@ -0,0 +1,442 @@ +import logging +import numpy as np +import copy +import json +import termtables as tt +from numpy import ndarray +from pathlib import Path +from typing import Optional, Union, Tuple, Any, Callable, Dict, Iterable, List +import pymultinest +import matplotlib.pyplot as plt +from contextlib import redirect_stdout + +from .abstract_normalizer import AbstractNormalizer +from .extractor import Extractor +from .library import log_interp1d, self_if_none +from .models import Model, ResultsNormalized, ExtrapolationModelLow,\ + ExtrapolationModelHigh, NormalizationParameters +from .normalizer_nld import NormalizerNLD +from .normalizer_gsf import NormalizerGSF +from .spinfunctions import SpinFunctions +from .vector import Vector +from .stats import truncnorm_ppf, normal_ppf + + +class NormalizerSimultan(AbstractNormalizer): + + """ Simultaneous normalization of nld and gsf. Composed of Normalizer and NormalizerGSF as input, so read more on the normalization there + + Attributes: + extractor (Extractor): Extractor instance + gsf (Optional[Vector], optional): gsf to normalize + multinest_path (Path, optional): Default path where multinest + saves files + multinest_kwargs (dict): Additional keywords to multinest. Defaults to + `{"seed": 65498, "resume": False}` + nld (Optional[Vector], optional): nld to normalize + normalizer_nld (NormalizerNLD): `NormalizerNLD` instance to get the normalization paramters + normalizer_gsf (NormalizerGSF): `NormalizerGSF` instance to get the normalization paramters + res (ResultsNormalized): Results + std_fake_gsf (bool): Whether the std. deviation is faked + (see `normalize`) + std_fake_nld (bool): Whether the std. deviation is faked + (see `normalize`) + path (Path): The path save the results. + + + TODO: + Work with more general models, too, not just CT for nld + """ + LOG = logging.getLogger(__name__) + logging.captureWarnings(True) + + def __init__(self, *, + gsf: Optional[Vector] = None, + nld: Optional[Vector] = None, + normalizer_nld: Optional[NormalizerNLD] = None, + normalizer_gsf: Optional[NormalizerGSF] = None, + path: Optional[Union[str, Path]] = 'saved_run/normalizers', + regenerate: bool = False): + """ + TODO: + - currently have to set arguments here, an cannot set them in + "normalize" + + Args: + gsf (optional): see above + nld (optional): see above + normalizer_nld (optional): see above + normalizer_gsf (optional): see above + + """ + super().__init__(regenerate) + if normalizer_nld is None: + self.normalizer_nld = None + else: + self.normalizer_nld = copy.deepcopy(normalizer_nld) + + if normalizer_gsf is None: + self.normalizer_gsf = None + else: + self.normalizer_gsf = copy.deepcopy(normalizer_gsf) + + self.gsf = None if gsf is None else gsf.copy() + self.nld = None if nld is None else nld.copy() + + self.std_fake_nld: Optional[bool] = None # See `normalize` + self.std_fake_gsf: Optional[bool] = None # See `normalize` + + self.res: Optional[ResultsNormalized] = None + + self.multinest_path: Optional[Path] = Path('multinest') + self.multinest_kwargs: dict = {"seed": 65498, "resume": False} + + if path is None: + self.path = None + else: + self.path = Path(path) + self.path.mkdir(exist_ok=True, parents=True) + + def normalize(self, *, num: int = 0, + gsf: Optional[Vector] = None, + nld: Optional[Vector] = None, + normalizer_nld: Optional[NormalizerNLD] = None, + normalizer_gsf: Optional[NormalizerGSF] = None) -> None: + """Perform normalization and saves results to `self.res` + + Args: + num (int, optional): Loop number + gsf (Optional[Vector], optional): gsf before normalization + nld (Optional[Vector], optional): nld before normalization + normalizer_nld (Optional[NormalizerNLD], optional): NormalizerNLD + instance + normalizer_gsf (Optional[NormalizerGSF], optional): NormalizerGSF + instance + """ + if not self.regenerate: + try: + self.load() + return + except FileNotFoundError: + pass + + # reset internal state + self.res = ResultsNormalized(name="Results NLD") + + self.normalizer_nld = copy.deepcopy(self.self_if_none(normalizer_nld)) + self.normalizer_gsf = copy.deepcopy(self.self_if_none(normalizer_gsf)) + for norm in [self.normalizer_nld, self.normalizer_gsf]: + norm._save_instance = False + norm.regenerate = True + + self.LOG.debug("Setting NLD and GSF, convert to MeV and removing nan") + gsf = self.self_if_none(gsf) + gsf = gsf.copy() + gsf.to_MeV() + gsf.cut_nan() + + nld = self.self_if_none(nld) + nld = nld.copy() + nld.to_MeV() + nld.cut_nan() + + # Need to give some sort of standard deviation for sensible results + # Otherwise deviations at higher level density will have an + # uncreasonably high weight. + if self.std_fake_nld is None: + self.std_fake_nld = False + if self.std_fake_nld or nld.std is None: + self.std_fake_nld = True + nld.std = nld.values * 0.3 # x% is an arb. choice + if self.std_fake_gsf or gsf.std is None: + self.std_fake_gsf = True + gsf.std = gsf.values * 0.3 # x% is an arb. choice + + # update + self.normalizer_nld.nld = nld # update before initial guess + self.normalizer_gsf.gsf_in = gsf # update before initial guess + + # Use DE to get an inital guess before optimizing + args_nld, guess = self.initial_guess() + # Optimize using multinest + popt, samples = self.optimize(num, args_nld, guess) + + self.res.pars = popt + self.res.samples = samples + + # reset + if self.std_fake_nld is True: + self.std_fake_nld = None + nld.std = None + if self.std_fake_gsf is True: + self.std_fake_gsf = None + gsf.std = None + + self.res.nld = nld.transform(self.res.pars["A"][0], + self.res.pars["alpha"][0], inplace=False) + self.res.gsf = gsf.transform(self.res.pars["B"][0], + self.res.pars["alpha"][0], inplace=False) + + self.normalizer_gsf.model_low.autorange(self.res.gsf) + self.normalizer_gsf.model_high.autorange(self.res.gsf) + self.normalizer_gsf.extrapolate(self.res.gsf) + self.res.gsf_model_low = self.normalizer_gsf.model_low + self.res.gsf_model_high = self.normalizer_gsf.model_high + for model in [self.res.gsf_model_low, self.res.gsf_model_high]: + model.shift_after = model.shift + + self.save() # save instance + + def initial_guess(self) -> None: + """ Find an inital guess for normalization parameters + + Uses guess of normalizer_nld and corresponding normalization of gsf + + Returns: + The arguments used for chi^2 minimization and the + minimizer. + """ + normalizer_nld = self.normalizer_nld + normalizer_gsf = self.normalizer_gsf + + args_nld, guess = normalizer_nld.initial_guess() + [A, alpha, T, Eshift] = [guess["A"], guess["alpha"], + guess["T"], guess["Eshift"]] + + nld = normalizer_nld.nld.transform(A, alpha, inplace=False) + nld_model = lambda E: normalizer_nld.model(E, T=T, Eshift=Eshift) # noqa + + normalizer_gsf.normalize(nld=nld, nld_model=nld_model, alpha=alpha) + guess["B"] = normalizer_gsf.res.pars["B"][0] + + guess_print = copy.deepcopy(guess) + self.LOG.info("DE results/initial guess:\n%s", + tt.to_string([list(guess_print.values())], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', + 'Eshift [MeV]', 'B'])) + + return args_nld, guess + + def optimize(self, num: int, + args_nld: Iterable, + guess: Dict[str, float]) -> Tuple[Dict[str, Tuple[float, float]], Dict[str, List[float]]]: # noqa + """Find parameters given model constraints and an initial guess + + Employs Multinest. + + Args: + num (int): Loop number + args_nld (Iterable): Additional arguments for the nld lnlike + guess (Dict[str, float]): The initial guess of the parameters + + Returns: + Tuple: + - popt (Dict[str, Tuple[float, float]]): Median and 1sigma of the + parameters + - samples (Dict[str, List[float]]): Multinest samplesø. + Note: They are still importance weighted, not random draws + from the posterior. + + Raises: + ValueError: Invalid parameters for automatix prior + + Note: + You might want to adjust the priors for your specific case! Here + we just propose a general solution that might often work out of + the box. + """ + if guess['alpha'] < 0: + raise NotImplementedError("Prior selection not implemented for " + "α < 0") + alpha_exponent = np.log10(guess['alpha']) + + if guess['T'] < 0: + raise ValueError("Prior selection not implemented for T < 0; " + "negative temperature is unphysical") + T_exponent = np.log10(guess['T']) + + A = guess['A'] + B = guess["B"] + + # truncations from absolute values + lower_A, upper_A = 0., np.inf + mu_A, sigma_A = A, 10*A + a_A = (lower_A - mu_A) / sigma_A + b_A = (upper_A - mu_A) / sigma_A + + lower_Eshift, upper_Eshift = -5., 5 + mu_Eshift, sigma_Eshift = 0, 5 + a_Eshift = (lower_Eshift - mu_Eshift) / sigma_Eshift + b_Eshift = (upper_Eshift - mu_Eshift) / sigma_Eshift + + lower_B, upper_B = 0., np.inf + mu_B, sigma_B = B, 10*B + a_B = (lower_B - mu_B) / sigma_B + b_B = (upper_B - mu_B) / sigma_B + + mu_sigmaSn, sigma_sigmaSn = self.normalizer_nld.norm_pars.spincutPars['sigmaSn'] + mu_sigmaD, sigma_sigmaD = self.normalizer_nld.norm_pars.spincutPars['sigmaD'] + + def prior(cube, ndim, nparams): + # NOTE: You may want to adjust this for your case! + # truncated normal prior + cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A + mu_A + + # log-uniform prior + # if alpha = 1e2, it's between 1e1 and 1e3 + cube[1] = 10**(cube[1]*2 + (alpha_exponent-1)) + # log-uniform prior + # if T = 1e2, it's between 1e1 and 1e3 + cube[2] = 10**(cube[2]*2 + (T_exponent-1)) + # truncated normal prior + cube[3] = truncnorm_ppf(cube[3], a_Eshift, + b_Eshift)*sigma_Eshift + mu_Eshift + # truncated normal prior + cube[4] = truncnorm_ppf(cube[4], a_B, b_B)*sigma_B + mu_B + + # sigmas + cube[5] = normal_ppf(cube[5])*sigma_sigmaD + mu_sigmaD + cube[6] = normal_ppf(cube[6])*sigma_sigmaSn + mu_sigmaSn + + + if np.isinf(cube[3]): + self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3]) + + def loglike(cube, ndim, nparams): + return self.lnlike(cube, args_nld=args_nld) + + # parameters are changed in the lnlike + norm_pars_org = copy.deepcopy(self.normalizer_gsf.norm_pars) + + self.multinest_path.mkdir(exist_ok=True) + path = self.multinest_path / f"sim_norm_{num}_" + assert len(str(path)) < 60, "Total path length too long for multinest" + + self.LOG.info("Starting multinest: ") + self.LOG.debug("with following keywords %s:", self.multinest_kwargs) + # Hack where stdout from Multinest is redirected as info messages + self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n' + else None) + + with redirect_stdout(self.LOG): + pymultinest.run(loglike, prior, len(guess)+2, + outputfiles_basename=str(path), + **self.multinest_kwargs) + + # Save parameters for analyzer + names = list(guess.keys()) + ['sigmaD', 'sigmaSn'] + json.dump(names, open(str(path) + 'params.json', 'w')) + analyzer = pymultinest.Analyzer(len(guess), + outputfiles_basename=str(path)) + + stats = analyzer.get_stats() + + samples = analyzer.get_equal_weighted_posterior()[:, :-1] + samples = dict(zip(names, samples.T)) + + # Format the output + popt = dict() + vals = [] + for name, m in zip(names, stats['marginals']): + lo, hi = m['1sigma'] + med = m['median'] + sigma = (hi - lo) / 2 + popt[name] = (med, sigma) + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + vals.append(fmts % (med, sigma)) + + self.LOG.info("Multinest results:\n%s", tt.to_string([vals], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', + 'Eshift [MeV]', 'B', 'σD', 'σSn'])) + + # reset state + self.normalizer_gsf.norm_pars = norm_pars_org + + return popt, samples + + def lnlike(self, x: Tuple[float, float, float, float, float], + args_nld: Iterable) -> float: + """Compute log likelihood of the normalization fitting + + This is the result up to the constant, which is irrelevant for the + maximization + + Args: + x (Tuple[float, float, float, float, float]): The arguments + ordered as A, alpha, T and Eshift, B + args_nld (TYPE): Additional arguments for the nld lnlike + + Returns: + lnlike: log likelihood + """ + A, alpha, T, Eshift, B, sigmaD, sigmaSn = x[:7] # slicing needed for multinest? + + normalizer_gsf = self.normalizer_gsf + normalizer_nld = self.normalizer_nld + + err_nld = normalizer_nld.lnlike_v2(x[:4], *args_nld) + + nld = normalizer_nld.nld.transform(A, alpha, inplace=False) + nld_model = lambda E: normalizer_nld.model(E, T=T, Eshift=Eshift) # noqa + + normalizer_gsf.nld_model = nld_model + normalizer_gsf.nld = nld + + # Change to the new sigmaD and sigmaSn + normalizer_nld.norm_pars.spincutPars['sigma2_disc'][1] = sigmaD**2 + normalizer_nld.norm_pars.spincutPars['sigma2_Sn'][1] = sigmaSn**2 + normalizer_gsf.norm_pars.spincutPars['sigma2_disc'][1] = sigmaD**2 + normalizer_gsf.norm_pars.spincutPars['sigma2_Sn'][1] = sigmaSn**2 + + # calculate the D0-equivalent of T and Eshift used + D0 = normalizer_nld.D0_from_nldSn(nld_model, + **normalizer_gsf.norm_pars.asdict()) + normalizer_gsf.norm_pars.D0 = [D0, np.nan] # dummy uncertainty + normalizer_gsf._gsf = normalizer_gsf.gsf_in.transform(B, alpha, + inplace=False) + normalizer_gsf._gsf_low, normalizer_gsf._gsf_high = \ + normalizer_gsf.extrapolate() + err_gsf = normalizer_gsf.lnlike() + return err_nld + err_gsf + + def plot(self, ax: Optional[Any] = None, add_label: bool = True, + add_figlegend: bool = True, + **kwargs) -> Tuple[Any, Any]: + """Plots nld and gsf + + Args: + ax (optional): The matplotlib axis to plot onto. Creates axis + is not provided + add_label (bool, optional):Defaults to `True`. + add_figlegend (bool, optional): Defaults to `True`. + results Optional[ResultsNormalized]: If provided, gsf and model + are taken from here instead. + **kwargs: kwargs for plot + + Returns: + fig, ax + """ + if ax is None: + fig, ax = plt.subplots(1, 2, constrained_layout=True) + else: + fig = ax[0].figure + + self.normalizer_nld.plot(ax=ax[0], add_label=True, results=self.res, + add_figlegend=False, **kwargs) + self.normalizer_gsf.plot(ax=ax[1], add_label=False, results=self.res, + add_figlegend=False, **kwargs) + + ax[0].set_title("Level density") + ax[1].set_title(r"$\gamma$SF") + + if add_figlegend: + fig.legend(loc=9, ncol=4, frameon=True) + fig.subplots_adjust(left=0.1, right=0.9, top=0.8, bottom=0.1) + + return fig, ax + + def self_if_none(self, *args, **kwargs): + """ wrapper for lib.self_if_none """ + return self_if_none(self, *args, **kwargs) diff --git a/ompy/normalizer_simultan_v2.py b/ompy/normalizer_simultan_v2.py new file mode 100644 index 00000000..dcf4750e --- /dev/null +++ b/ompy/normalizer_simultan_v2.py @@ -0,0 +1,438 @@ +import logging +import numpy as np +import copy +import json +import termtables as tt +from numpy import ndarray +from pathlib import Path +from typing import Optional, Union, Tuple, Any, Callable, Dict, Iterable, List +import pymultinest +import matplotlib.pyplot as plt +from contextlib import redirect_stdout + +from .abstract_normalizer import AbstractNormalizer +from .extractor import Extractor +from .library import log_interp1d, self_if_none +from .models import Model, ResultsNormalized, ExtrapolationModelLow,\ + ExtrapolationModelHigh, NormalizationParameters +from .normalizer_nld import NormalizerNLD +from .normalizer_gsf import NormalizerGSF +from .spinfunctions import SpinFunctions +from .vector import Vector +from .stats import truncnorm_ppf, normal_ppf + + +class NormalizerSimultan(AbstractNormalizer): + + """ Simultaneous normalization of nld and gsf. Composed of Normalizer and NormalizerGSF as input, so read more on the normalization there + + Attributes: + extractor (Extractor): Extractor instance + gsf (Optional[Vector], optional): gsf to normalize + multinest_path (Path, optional): Default path where multinest + saves files + multinest_kwargs (dict): Additional keywords to multinest. Defaults to + `{"seed": 65498, "resume": False}` + nld (Optional[Vector], optional): nld to normalize + normalizer_nld (NormalizerNLD): `NormalizerNLD` instance to get the normalization paramters + normalizer_gsf (NormalizerGSF): `NormalizerGSF` instance to get the normalization paramters + res (ResultsNormalized): Results + std_fake_gsf (bool): Whether the std. deviation is faked + (see `normalize`) + std_fake_nld (bool): Whether the std. deviation is faked + (see `normalize`) + path (Path): The path save the results. + + + TODO: + Work with more general models, too, not just CT for nld + """ + LOG = logging.getLogger(__name__) + logging.captureWarnings(True) + + def __init__(self, *, + gsf: Optional[Vector] = None, + nld: Optional[Vector] = None, + normalizer_nld: Optional[NormalizerNLD] = None, + normalizer_gsf: Optional[NormalizerGSF] = None, + path: Optional[Union[str, Path]] = 'saved_run/normalizers', + regenerate: bool = False): + """ + TODO: + - currently have to set arguments here, an cannot set them in + "normalize" + + Args: + gsf (optional): see above + nld (optional): see above + normalizer_nld (optional): see above + normalizer_gsf (optional): see above + + """ + super().__init__(regenerate) + if normalizer_nld is None: + self.normalizer_nld = None + else: + self.normalizer_nld = copy.deepcopy(normalizer_nld) + + if normalizer_gsf is None: + self.normalizer_gsf = None + else: + self.normalizer_gsf = copy.deepcopy(normalizer_gsf) + + self.gsf = None if gsf is None else gsf.copy() + self.nld = None if nld is None else nld.copy() + + self.std_fake_nld: Optional[bool] = None # See `normalize` + self.std_fake_gsf: Optional[bool] = None # See `normalize` + + self.res: Optional[ResultsNormalized] = None + + self.multinest_path: Optional[Path] = Path('multinest') + self.multinest_kwargs: dict = {"seed": 65498, "resume": False} + + if path is None: + self.path = None + else: + self.path = Path(path) + self.path.mkdir(exist_ok=True, parents=True) + + def normalize(self, *, num: int = 0, + gsf: Optional[Vector] = None, + nld: Optional[Vector] = None, + normalizer_nld: Optional[NormalizerNLD] = None, + normalizer_gsf: Optional[NormalizerGSF] = None) -> None: + """Perform normalization and saves results to `self.res` + + Args: + num (int, optional): Loop number + gsf (Optional[Vector], optional): gsf before normalization + nld (Optional[Vector], optional): nld before normalization + normalizer_nld (Optional[NormalizerNLD], optional): NormalizerNLD + instance + normalizer_gsf (Optional[NormalizerGSF], optional): NormalizerGSF + instance + """ + if not self.regenerate: + try: + self.load() + return + except FileNotFoundError: + pass + + # reset internal state + self.res = ResultsNormalized(name="Results NLD") + + self.normalizer_nld = copy.deepcopy(self.self_if_none(normalizer_nld)) + self.normalizer_gsf = copy.deepcopy(self.self_if_none(normalizer_gsf)) + for norm in [self.normalizer_nld, self.normalizer_gsf]: + norm._save_instance = False + norm.regenerate = True + + self.LOG.debug("Setting NLD and GSF, convert to MeV and removing nan") + gsf = self.self_if_none(gsf) + gsf = gsf.copy() + gsf.to_MeV() + gsf.cut_nan() + + nld = self.self_if_none(nld) + nld = nld.copy() + nld.to_MeV() + nld.cut_nan() + + # Need to give some sort of standard deviation for sensible results + # Otherwise deviations at higher level density will have an + # uncreasonably high weight. + if self.std_fake_nld is None: + self.std_fake_nld = False + if self.std_fake_nld or nld.std is None: + self.std_fake_nld = True + nld.std = nld.values * 0.3 # x% is an arb. choice + if self.std_fake_gsf or gsf.std is None: + self.std_fake_gsf = True + gsf.std = gsf.values * 0.3 # x% is an arb. choice + + # update + self.normalizer_nld.nld = nld # update before initial guess + self.normalizer_gsf.gsf_in = gsf # update before initial guess + + # Use DE to get an inital guess before optimizing + args_nld, guess = self.initial_guess() + # Optimize using multinest + popt, samples = self.optimize(num, args_nld, guess) + + self.res.pars = popt + self.res.samples = samples + + # reset + if self.std_fake_nld is True: + self.std_fake_nld = None + nld.std = None + if self.std_fake_gsf is True: + self.std_fake_gsf = None + gsf.std = None + + self.res.nld = nld.transform(self.res.pars["A"][0], + self.res.pars["alpha"][0], inplace=False) + self.res.gsf = gsf.transform(self.res.pars["B"][0], + self.res.pars["alpha"][0], inplace=False) + + self.normalizer_gsf.model_low.autorange(self.res.gsf) + self.normalizer_gsf.model_high.autorange(self.res.gsf) + self.normalizer_gsf.extrapolate(self.res.gsf) + self.res.gsf_model_low = self.normalizer_gsf.model_low + self.res.gsf_model_high = self.normalizer_gsf.model_high + for model in [self.res.gsf_model_low, self.res.gsf_model_high]: + model.shift_after = model.shift + + self.save() # save instance + + def initial_guess(self) -> None: + """ Find an inital guess for normalization parameters + + Uses guess of normalizer_nld and corresponding normalization of gsf + + Returns: + The arguments used for chi^2 minimization and the + minimizer. + """ + normalizer_nld = self.normalizer_nld + normalizer_gsf = self.normalizer_gsf + + args_nld, guess = normalizer_nld.initial_guess() + [A, alpha, T, Eshift] = [guess["A"], guess["alpha"], + guess["T"], guess["Eshift"]] + + nld = normalizer_nld.nld.transform(A, alpha, inplace=False) + nld_model = lambda E: normalizer_nld.model(E, T=T, Eshift=Eshift) # noqa + + normalizer_gsf.normalize(nld=nld, nld_model=nld_model, alpha=alpha) + guess["B"] = normalizer_gsf.res.pars["B"][0] + + guess_print = copy.deepcopy(guess) + self.LOG.info("DE results/initial guess:\n%s", + tt.to_string([list(guess_print.values())], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', + 'Eshift [MeV]', 'B'])) + + return args_nld, guess + + def optimize(self, num: int, + args_nld: Iterable, + guess: Dict[str, float]) -> Tuple[Dict[str, Tuple[float, float]], Dict[str, List[float]]]: # noqa + """Find parameters given model constraints and an initial guess + + Employs Multinest. + + Args: + num (int): Loop number + args_nld (Iterable): Additional arguments for the nld lnlike + guess (Dict[str, float]): The initial guess of the parameters + + Returns: + Tuple: + - popt (Dict[str, Tuple[float, float]]): Median and 1sigma of the + parameters + - samples (Dict[str, List[float]]): Multinest samplesø. + Note: They are still importance weighted, not random draws + from the posterior. + + Raises: + ValueError: Invalid parameters for automatix prior + + Note: + You might want to adjust the priors for your specific case! Here + we just propose a general solution that might often work out of + the box. + """ + if guess['alpha'] < 0: + raise NotImplementedError("Prior selection not implemented for " + "α < 0") + alpha_exponent = np.log10(guess['alpha']) + + if guess['T'] < 0: + raise ValueError("Prior selection not implemented for T < 0; " + "negative temperature is unphysical") + T_exponent = np.log10(guess['T']) + + A = guess['A'] + B = guess["B"] + + # truncations from absolute values + lower_A, upper_A = 0., np.inf + mu_A, sigma_A = A, 10*A + a_A = (lower_A - mu_A) / sigma_A + b_A = (upper_A - mu_A) / sigma_A + + lower_Eshift, upper_Eshift = -5., 5 + mu_Eshift, sigma_Eshift = 0, 5 + a_Eshift = (lower_Eshift - mu_Eshift) / sigma_Eshift + b_Eshift = (upper_Eshift - mu_Eshift) / sigma_Eshift + + lower_B, upper_B = 0., np.inf + mu_B, sigma_B = B, 10*B + a_B = (lower_B - mu_B) / sigma_B + b_B = (upper_B - mu_B) / sigma_B + + mu_rhoSn, sigma_rhoSn = self.normalizer_nld.norm_pars.rhoSn + Sn = self.normalizer_nld.norm_pars.Sn[0] + + def prior(cube, ndim, nparams): + # NOTE: You may want to adjust this for your case! + # truncated normal prior + cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A + mu_A + + # log-uniform prior + # if alpha = 1e2, it's between 1e1 and 1e3 + cube[1] = 10**(cube[1]*2 + (alpha_exponent-1)) + # log-uniform prior + # if T = 1e2, it's between 1e1 and 1e3 + cube[2] = 10**(cube[2]*2 + (T_exponent-1)) + # truncated normal prior + cube[3] = truncnorm_ppf(cube[3], a_Eshift, + b_Eshift)*sigma_Eshift + mu_Eshift + #rhoSn = normal_ppf(cube[3])*sigma_rhoSn + mu_rhoSn + #cube[3] = Sn - cube[2]*np.log(cube[2]*rhoSn) + + # truncated normal prior + cube[4] = truncnorm_ppf(cube[4], a_B, b_B)*sigma_B + mu_B + + if np.isinf(cube[3]): + self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3]) + + def loglike(cube, ndim, nparams): + return self.lnlike(cube, args_nld=args_nld) + + # parameters are changed in the lnlike + norm_pars_org = copy.deepcopy(self.normalizer_gsf.norm_pars) + + self.multinest_path.mkdir(exist_ok=True) + path = self.multinest_path / f"sim_norm_{num}_" + assert len(str(path)) < 60, "Total path length too long for multinest" + + self.LOG.info("Starting multinest: ") + self.LOG.debug("with following keywords %s:", self.multinest_kwargs) + # Hack where stdout from Multinest is redirected as info messages + self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n' + else None) + + with redirect_stdout(self.LOG): + pymultinest.run(loglike, prior, len(guess), + outputfiles_basename=str(path), + **self.multinest_kwargs) + + # Save parameters for analyzer + names = list(guess.keys()) + json.dump(names, open(str(path) + 'params.json', 'w')) + analyzer = pymultinest.Analyzer(len(guess), + outputfiles_basename=str(path)) + + stats = analyzer.get_stats() + + samples = analyzer.get_equal_weighted_posterior()[:, :-1] + samples = dict(zip(names, samples.T)) + + # Format the output + popt = dict() + vals = [] + for name, m in zip(names, stats['marginals']): + lo, hi = m['1sigma'] + med = m['median'] + sigma = (hi - lo) / 2 + popt[name] = (med, sigma) + i = max(0, int(-np.floor(np.log10(sigma))) + 1) + fmt = '%%.%df' % i + fmts = '\t'.join([fmt + " ± " + fmt]) + vals.append(fmts % (med, sigma)) + + self.LOG.info("Multinest results:\n%s", tt.to_string([vals], + header=['A', 'α [MeV⁻¹]', 'T [MeV]', + 'Eshift [MeV]', 'B'])) + + # reset state + self.normalizer_gsf.norm_pars = norm_pars_org + + return popt, samples + + def lnlike(self, x: Tuple[float, float, float, float, float], + args_nld: Iterable) -> float: + """Compute log likelihood of the normalization fitting + + This is the result up to the constant, which is irrelevant for the + maximization + + Args: + x (Tuple[float, float, float, float, float]): The arguments + ordered as A, alpha, T and Eshift, B + args_nld (TYPE): Additional arguments for the nld lnlike + + Returns: + lnlike: log likelihood + """ + A, alpha, T, Eshift, B = x[:5] # slicing needed for multinest? + + normalizer_gsf = self.normalizer_gsf + normalizer_nld = self.normalizer_nld + + err_nld = normalizer_nld.lnlike(x[:4], *args_nld) + + nld = normalizer_nld.nld.transform(A, alpha, inplace=False) + nld_model = lambda E: normalizer_nld.model(E, T=T, Eshift=Eshift)*np.exp(-alpha)/A # noqa + + normalizer_gsf.nld_model = nld_model + normalizer_gsf.nld = normalizer_nld.nld.copy() # We do not need to transform. + # calculate the D0-equivalent of T and Eshift used + D0 = normalizer_nld.D0_from_nldSn(nld_model, + **normalizer_nld.norm_pars.asdict()) + normalizer_gsf.norm_pars.D0 = [D0, np.nan] # dummy uncertainty + #normalizer_gsf._gsf = normalizer_gsf.gsf_in.transform(B, alpha, + # inplace=False) + #normalizer_gsf._gsf_low, normalizer_gsf._gsf_high = \ + # normalizer_gsf.extrapolate() + gam_theo = A*B*np.exp(alpha*self.normalizer_nld.norm_pars.Sn[0])*normalizer_gsf.Gg_standard() + + gsf = normalizer_gsf.gsf_in.transform(B, alpha, inplace=False) + ln_gsf = -0.5*np.log(gsf.std).sum() + err_gsf = -0.5*(gam_theo - normalizer_gsf.norm_pars.Gg[0])/normalizer_gsf.norm_pars.Gg[1] + + return err_nld + err_gsf + ln_gsf + + def plot(self, ax: Optional[Any] = None, add_label: bool = True, + add_figlegend: bool = True, + **kwargs) -> Tuple[Any, Any]: + """Plots nld and gsf + + Args: + ax (optional): The matplotlib axis to plot onto. Creates axis + is not provided + add_label (bool, optional):Defaults to `True`. + add_figlegend (bool, optional): Defaults to `True`. + results Optional[ResultsNormalized]: If provided, gsf and model + are taken from here instead. + **kwargs: kwargs for plot + + Returns: + fig, ax + """ + if ax is None: + fig, ax = plt.subplots(1, 2, constrained_layout=True) + else: + fig = ax[0].figure + + self.normalizer_nld.plot(ax=ax[0], add_label=True, results=self.res, + add_figlegend=False, **kwargs) + self.normalizer_gsf.plot(ax=ax[1], add_label=False, results=self.res, + add_figlegend=False, **kwargs) + + ax[0].set_title("Level density") + ax[1].set_title(r"$\gamma$SF") + + if add_figlegend: + fig.legend(loc=9, ncol=4, frameon=True) + fig.subplots_adjust(left=0.1, right=0.9, top=0.8, bottom=0.1) + + return fig, ax + + def self_if_none(self, *args, **kwargs): + """ wrapper for lib.self_if_none """ + return self_if_none(self, *args, **kwargs) diff --git a/ompy/response.py b/ompy/response.py index 10c554bf..c1a75832 100644 --- a/ompy/response.py +++ b/ompy/response.py @@ -21,6 +21,11 @@ from .matrix import Matrix from .vector import Vector +if 'JPY_PARENT_PID' in os.environ: + from tqdm import tqdm_notebook as tqdm +else: + from tqdm import tqdm + LOG = logging.getLogger(__name__) logging.captureWarnings(True) @@ -325,7 +330,7 @@ def interpolate(self, R = np.zeros((N_out, N_out)) # Loop over rows of the response matrix # TODO for speedup: Change this to a cython - for j, E in enumerate(Eout): + for j, E in enumerate(tqdm(Eout)): oneSigma = fwhm_abs_array[j] / 2.35 Egmax = E + 6 * oneSigma i_Egmax = min(index(Eout, Egmax), N_out-1) diff --git a/ompy/spinfunctions.py b/ompy/spinfunctions.py index ad2c0b0d..89168196 100644 --- a/ompy/spinfunctions.py +++ b/ompy/spinfunctions.py @@ -22,11 +22,16 @@ def __init__(self, Ex: Union[float, Sequence], J: Union[float, Sequence], self.model = model self.pars = pars - def get_sigma2(self): + def get_sigma2(self, Ex: Optional[Union[float, Sequence]] = None, + **kwargs): """ Get the square of the spin cut for a specified model """ model = self.model pars = self.pars + pars['Ex'] = Ex + for key, value in kwargs.items(): + pars[key] = value + if model == "const": pars_req = {"sigma"} return call_model(self.gconst, pars, pars_req) @@ -49,7 +54,10 @@ def get_sigma2(self): raise TypeError( "\nError: Spincut model not supported; check spelling\n") - def distribution(self) -> Tuple[float, np.ndarray]: + def distribution(self, Ex: Optional[Union[float, Sequence]] = None, + J: Optional[Union[float, Sequence]] = None, + **kwargs + ) -> Tuple[float, np.ndarray]: """Get spin distribution Note: Assuming equal parity @@ -59,17 +67,19 @@ def distribution(self) -> Tuple[float, np.ndarray]: depends on input Ex and J and is squeezed if only one of them is an array. If both are arrays: `spinDist[Ex,J]` """ - sigma2 = self.get_sigma2() + sigma2 = self.get_sigma2(Ex, **kwargs) sigma2 = sigma2[np.newaxis] # ability to transpose "1D" array - spinDist = ((2. * self.J + 1.) / (2. * sigma2.T) - * np.exp(-np.power(self.J + 0.5, 2.) / (2. * sigma2.T))) + J = self.J if J is None else J + + spinDist = ((2. * J + 1.) / (2. * sigma2.T) + * np.exp(-np.power(J + 0.5, 2.) / (2. * sigma2.T))) return np.squeeze(spinDist) # return 1D if Ex or J is single entry # different spin cut models def gconst(self, sigma: float, - Ex: Optional[Union[float, Sequence]] = None) -> Union[float, Sequence] : # noqa + Ex: Optional[Union[float, Sequence]] = None) -> Union[float, Sequence]: # noqa """ Constant spin-cutoff parameter @@ -83,7 +93,7 @@ def gconst(self, sigma: float, return np.full_like(Ex, sigma**2) def gEB05(self, mass: int, NLDa: float, Eshift: float, - Ex: Optional[Union[float, Sequence]] = None) -> Union[float, Sequence] : # noqa + Ex: Optional[Union[float, Sequence]] = None) -> Union[float, Sequence]: # noqa """ Von Egidy & B PRC72,044311(2005), Eq. (4) The rigid moment of inertia formula (RMI) diff --git a/ompy/vector.py b/ompy/vector.py index 20975a64..53814e49 100644 --- a/ompy/vector.py +++ b/ompy/vector.py @@ -562,6 +562,11 @@ def index(self, E: float) -> int: """ Returns the closest index corresponding to the E value """ return index(self.E, E) + def indices(self, E: Iterable[float]) -> ndarray: + """ Returns the closest indices correspoinding to the E values """ + indices = [self.index(e) for e in E] + return np.array(indices) + def __matmul__(self, other: Vector) -> Vector: result = self.copy() if isinstance(other, Vector): diff --git a/release_note.md b/release_note.md index f39db715..b1e45b75 100644 --- a/release_note.md +++ b/release_note.md @@ -6,6 +6,11 @@ Added: - `fill` attribute for `Matrix`, to easily fill counts in a given bin containing (Ex, Eg). - When saving and loading `Vector` from `csv` files one can now pass keyword arguments to the pandas `read_csv()` and `to_csv()` functions. - Added a keyword (`units`) to select the energy units when saving a `Vector` to file. +- Added class `ErrorFinder`. This class uses pyMC3 to estimate the relative uncertanties of the NLD and gSF in an ensemble of NLDs and gSFs. +- Added attribute `error_finder` to the `Extractor` class. If set the `Extractor` class will use the `error_finder` object to estimate the relative uncertanties of the extracted NLDs and gSFs. Points that the `ErrorFinder` are unable to estimate will be set to the largest value of those that was successfully estimated. +- Added dependency `pyMC3>=3.11.2,<4.0`. +- Added `indices()` to the `Vector` class get the index of several x-values. Similar to `Matrix.indices_Eg()` and `Matrix.indices_Ex()` +- Added a notebook `uncertanty_by_counts` exploring the new algorithm used to estimate uncertanties before normalization. Changed: - Fixed a bug where the `std` attribute of `Vector` was not saved to file. diff --git a/setup.py b/setup.py index e99ba10a..db92d882 100755 --- a/setup.py +++ b/setup.py @@ -130,6 +130,7 @@ def write_version_py(filename='ompy/version_setup.py'): fname = "ompy/decomposition.c" # otherwise it may not recompile if os.path.exists(fname): os.remove(fname) +openmp = False extra_compile_args = ["-O3", "-ffast-math", "-march=native"] extra_link_args = [] @@ -171,7 +172,8 @@ def write_version_py(filename='ompy/version_setup.py'): "uncertainties>=3.0.3", "tqdm", "pathos", - "pybind11>=2.6.0" + "pybind11>=2.6.0", + "pymc" ] setup(name='OMpy', diff --git a/tests/test_dist.py b/tests/test_dist.py new file mode 100644 index 00000000..a4f9a76c --- /dev/null +++ b/tests/test_dist.py @@ -0,0 +1,67 @@ +import pytest +import ompy as om +import numpy as np +import pymc3 as pm +from numpy.testing import assert_equal, assert_allclose + +import matplotlib.pyplot as plt + + +@pytest.mark.parametrize( + "lam,mu", + [(10., 1.), + (5., 1.), + (100., 2.), + (50., 0.5)]) +def test_fermi_dirac(lam, mu): + + prob = om.FermiDirac.dist(lam=lam, mu=mu) + + samples = prob.random(size=1000000) + + hist, bins = np.histogram(samples, bins=100, density=True) + + x = [] + for n in range(len(bins) - 1): + x.append(0.5*(bins[n+1] + bins[n])) + x = np.array(x) + y = (lam/np.log(1 + np.exp(lam*mu)))/(np.exp(lam*(x - mu)) + 1) + + # We use a fairly large tolerance since error should be stocastic + assert_allclose(hist, y, atol=0.1) + + +@pytest.mark.parametrize( + "lam,mu", + [(10., 1.), + (5., 1.), + (100., 2.), + (50., 0.5)]) +def test_fermi_dirac_logp(lam, mu): + + lam = 10. + mu = 1. + + prob = om.FermiDirac.dist(lam=lam, mu=mu) + + x = np.linspace(0, 3, 1001) + y = lam/np.log(1 + np.exp(lam*mu)) * 1/(np.exp(lam*(x - mu)) + 1) + + y_r = np.exp(prob.logp(x).eval()) + assert_allclose(y, y_r) + + +@pytest.mark.parametrize( + "lam,mu", + [(10., 1.), + (5., 1.), + (100., 2.), + (50., 0.5)]) +def test_fermi_dirac_logcdf(lam, mu): + + prob = om.FermiDirac.dist(lam=lam, mu=mu) + x = np.linspace(0, 3, 1001) + + y = 1 - np.log(1 + np.exp(-lam*(x - mu)))/np.log(1 + np.exp(lam*mu)) + y_r = np.exp(prob.logcdf(x).eval()) + assert_allclose(y, y_r) diff --git a/tests/test_error_finder.py b/tests/test_error_finder.py new file mode 100644 index 00000000..056c7446 --- /dev/null +++ b/tests/test_error_finder.py @@ -0,0 +1,246 @@ +import pytest +import ompy as om +import numpy as np +import pymc3 as pm +from scipy.interpolate import interp1d +from numpy.testing import assert_equal, assert_allclose + +def generate_data(resp_path): + + Ex_min = 4400 # keV + Ex_max = 7700 # keV + Eg_min = 1300 # keV + Eg_max = Ex_max + 200 # keV + ensemble_size = 10 + regenerate = False + + raw = om.example_raw('Dy164') + raw.cut_diagonal(E1=(800, 0), E2=(7500, 7300)) + raw.cut('Ex', 0, 8400) + + try: + trapezoid_cut = om.Action('matrix') + trapezoid_cut.trapezoid(Ex_min=Ex_min, Ex_max=Ex_max, Eg_min=Eg_min, Eg_max=Eg_max, inplace=True) + ensemble = om.Ensemble(raw=raw) + ensemble.unfolder = om.Unfolder(response=raw) + ensemble.first_generation_method = om.FirstGeneration() + ensemble.generate(ensemble_size, regenerate=regenerate) + extractor = om.Extractor(ensemble=ensemble) + extractor.trapezoid = trapezoid_cut + extractor.extract_from(regenerate=regenerate) + return extractor.nld, extractor.gsf + except: + pass + + Eg = raw.Eg + fwhm_abs = 90.44 # (90/1330 = 6.8%) + response = om.Response(resp_path) + R_ompy_unf, R_tab_unf = response.interpolate(Eg, fwhm_abs=fwhm_abs/10, return_table=True) + + fthreshold = interp1d([30., 80., 122., 183., 244., 294., 344., 562., 779., 1000.], + [0.0, 0.0, 0.0, 0.06, 0.44, 0.60, 0.87, 0.99, 1.00, 1.00], + fill_value="extrapolate") + + def apply_detector_threshold(response, table, fthreshold): + thres = fthreshold(response.Eg) + response.values = response.values * thres + # renormalize + response.values = om.div0(response.values, response.values.sum(axis=1)[:, np.newaxis]) + table["eff_tot"] *= thres + apply_detector_threshold(R_ompy_unf, R_tab_unf, fthreshold) + + unfolder = om.Unfolder(response=R_ompy_unf) + firstgen = om.FirstGeneration() + unfolder.use_compton_subtraction = True # default + unfolder.response_tab = R_tab_unf + + unfolder.FWHM_tweak_multiplier = {"fe": 1., "se": 1.1, + "de": 1.3, "511": 0.9} + + trapezoid_cut = om.Action('matrix') + trapezoid_cut.trapezoid(Ex_min=Ex_min, Ex_max=Ex_max, Eg_min=Eg_min, Eg_max=Eg_max, inplace=True) + E_rebinned = np.arange(100., 8500, 200) + ensemble = om.Ensemble(raw=raw) + + ensemble.unfolder = unfolder + ensemble.first_generation_method = firstgen + ensemble.generate(ensemble_size, regenerate=regenerate) + ensemble.rebin(E_rebinned, member="firstgen") + + extractor = om.Extractor(ensemble=ensemble) + extractor.trapezoid = trapezoid_cut + extractor.suppress_warning = True + extractor.extract_from(regenerate=regenerate) + + return extractor.nld, extractor.gsf + + +def keep_only(self, vecs): + """ Takes a list of vectors and returns a list of vectors + where only the points shared between all vectors are returned. + """ + + E = [vec.E.copy() for vec in vecs] + energies = {} + for vec in vecs: + for E in vec.E: + if E not in energies: + energies[E] = [False] * len(vecs) + + # Next we will add if the point is present or not + for n, vec in enumerate(vecs): + for E in vec.E: + energies[E][n] = True + + keep_energy = [] + for key in energies: + if np.all(energies[key]): + keep_energy.append(key) + + vec_all_common = [vec.copy() for vec in vecs] + for vec in vec_all_common: + E = [] + values = [] + for e, value in zip(vec.E, vec.values): + if e in keep_energy: + E.append(e) + values.append(value) + vec.E = np.array(E) + vec.values = np.array(values) + + return vec_all_common + + +def condition_data(_nlds, _gsfs): + """ Ensures that data are copied and that all values are in correct + units. It also checks that all lengths are correct. + Args: + nlds (List[Vector]): List of the NLDs in an ensemble + gsfs (List[Vector]): List of the γSFs in an ensemble + Returns: + Tuple of nld energy points, gsf energy points and the observed + matrix for the NLD and γSF. + Raises: + IndexError: If the length of members of the ensemble have an + equal number of points in the NLD and γSF. + Warnings: + Will raise a warning if there are members in the ensemble that + contains one or more nan's. This is mostly to inform the user + and shouldn't be an issue later on. + TODO: + - Mitigation when cut_nan() results in different length vectors + of different members of the ensemble. (e.g. len(nld[0]) = 10, + len(nld[1]) = 9). + """ + + # Ensure that the same number of NLDs and GSFs are provided + assert len(_nlds) == len(_gsfs), \ + "Number of nlds and gsfs is different" + + N = len(_nlds) + + print(f"Processing an ensemble with {N} members") + + # Make copy to ensure that we don't overwrite anything + nlds = [nld.copy() for nld in _nlds] + gsfs = [gsf.copy() for gsf in _gsfs] + + print(f"Before removing nan: {len(nlds[0])} NLD values and " + f"{len(gsfs[0])} GSF values") + + # Ensure that we have in MeV and that there isn't any nan's. + is_nan = False + for nld, gsf in zip(nlds, gsfs): + nld.to_MeV() + gsf.to_MeV() + is_nan = np.isnan(nld.values).any() or np.isnan(gsf.values).any() + nld.cut_nan() + gsf.cut_nan() + if is_nan: + print(f"NLDs and/or γSFs contains nan's." + " They will be removed") + + # Next we will ensure that all members have all energies + if (not om.error_finder.all_equal([len(nld) for nld in nlds]) or + not om.error_finder.all_equal([len(gsf) for gsf in gsfs])): + print("Some members of the ensemble have different lengths. " + "Consider re-binning or changing limits.") + nlds = keep_only(nlds) + gsfs = keep_only(gsfs) + + print(f"After removing nan: {len(nlds[0])} NLD values and " + f"{len(gsfs[0])} GSF values") + + # Next we can extract the important parts + E_nld = nlds[0].E.copy() + E_gsf = gsfs[0].E.copy() + + M_nld = len(E_nld) + M_gsf = len(E_gsf) + + nld_obs = [] + gsf_obs = [] + + idx_nld = np.tile(np.arange(M_nld, dtype=int), (N-1, 1)) + idx_gsf = np.tile(np.arange(M_gsf, dtype=int), (N-1, 1)) + + for n, (nld, gsf) in enumerate(zip(nlds, gsfs)): + + # Make a copy of the values arrays + nld_array = [nld.values.copy() for nld in nlds] + gsf_array = [gsf.values.copy() for gsf in gsfs] + + del nld_array[n] + del gsf_array[n] + + nld_array = np.array(nld_array) + gsf_array = np.array(gsf_array) + + # Set the observed data + nld_obs.append(nld.values[idx_nld]/nld_array) + gsf_obs.append(gsf.values[idx_gsf]/gsf_array) + + nld_obs = np.array(nld_obs) + gsf_obs = np.array(gsf_obs) + + idx_coef_nld = np.repeat(np.arange(N-1), M_nld).reshape(N-1, M_nld) + idx_coef_gsf = np.repeat(np.arange(N-1), M_gsf).reshape(N-1, M_gsf) + + idx_vals_nld = np.array([idx_nld] * N) + idx_vals_gsf = np.array([idx_gsf] * N) + + return E_nld, E_gsf, nld_obs, gsf_obs, \ + idx_coef_nld, idx_coef_gsf, idx_vals_nld, idx_vals_gsf + + +def test_error_finder(): + nlds, gsfs = None, None + try: + nlds, gsfs = generate_data("../OCL_response_functions/nai2012_for_opt13") + except: + nlds, gsfs = generate_data("OCL_response_functions/nai2012_for_opt13") + + E_nld, E_gsf, nld_obs, gsf_obs, \ + idx_coef_nld, idx_coef_gsf, idx_vals_nld, idx_vals_gsf = condition_data(nlds, gsfs) + + N = len(nlds) + e_nld, q_nld = om.error_finder.format_data(nlds) + e_gsf, q_gsf = om.error_finder.format_data(gsfs) + + M_nld = len(e_nld) + M_gsf = len(e_gsf) + + c_mask_nld, v_mask_nld = om.error_finder.format_mask(N, M_nld) + c_mask_gsf, v_mask_gsf = om.error_finder.format_mask(N, M_gsf) + + assert_equal(e_nld, E_nld) + assert_equal(e_gsf, E_gsf) + + assert_equal(q_nld, nld_obs) + assert_equal(q_gsf, gsf_obs) + + assert_equal(c_mask_nld, idx_coef_nld) + assert_equal(v_mask_nld, idx_vals_nld) + + assert_equal(c_mask_gsf, idx_coef_gsf) + assert_equal(v_mask_gsf, idx_vals_gsf)