Skip to content

Commit

Permalink
updated things + removed limit distr + new monteC.
Browse files Browse the repository at this point in the history
  • Loading branch information
victorballester7 committed Oct 5, 2023
1 parent b8a177c commit bbbd049
Show file tree
Hide file tree
Showing 8 changed files with 215 additions and 18 deletions.
19 changes: 9 additions & 10 deletions .github/workflows/buildpdf.yml
Original file line number Diff line number Diff line change
Expand Up @@ -177,21 +177,21 @@ jobs:
with:
root_file: Advanced_topics_in_functional_analysis_and_PDEs.tex
working_directory: Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/
- name: Compile - IEPDE
uses: xu-cheng/latex-action@v2
with:
root_file: Introduction_to_evolution_PDEs.tex
working_directory: Mathematics/5th/Introduction_to_evolution_PDEs/
# - name: Compile - IEPDE
# uses: xu-cheng/latex-action@v2
# with:
# root_file: Introduction_to_evolution_PDEs.tex
# working_directory: Mathematics/5th/Introduction_to_evolution_PDEs/
- name: Compile - INEPDE
uses: xu-cheng/latex-action@v2
with:
root_file: Introduction_to_nonlinear_elliptic_PDEs.tex
working_directory: Mathematics/5th/Introduction_to_nonlinear_elliptic_PDEs/
- name: Compile - LTLD
- name: Compile - MM
uses: xu-cheng/latex-action@v2
with:
root_file: Limit_theorems_and_large_deviations.tex
working_directory: Mathematics/5th/Limit_theorems_and_large_deviations/
root_file: Montecarlo_methods.tex
working_directory: Mathematics/5th/Montecarlo_methods/
- name: Compile - SC
uses: xu-cheng/latex-action@v2
with:
Expand Down Expand Up @@ -278,9 +278,8 @@ jobs:
Mathematics/5th/Advanced_dynamical_systems/Advanced_dynamical_systems.pdf
Mathematics/5th/Advanced_probability/Advanced_probability.pdf
Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.pdf
Mathematics/5th/Introduction_to_evolution_PDEs/Introduction_to_evolution_PDEs.pdf
Mathematics/5th/Introduction_to_nonlinear_elliptic_PDEs/Introduction_to_nonlinear_elliptic_PDEs.pdf
Mathematics/5th/Limit_theorems_and_large_deviations/Limit_theorems_and_large_deviations.pdf
Mathematics/5th/Montecarlo_methods/Montecarlo_methods.pdf
Mathematics/5th/Stochastic_calculus/Stochastic_calculus.pdf
main_physics.pdf
Physics/Basic/Electricity_and_magnetism/Electricity_and_magnetism.pdf
Expand Down
4 changes: 2 additions & 2 deletions Mathematics/3rd/Probability/Probability.tex
Original file line number Diff line number Diff line change
Expand Up @@ -1149,7 +1149,7 @@
\item $aX_n\overset{\text{d}}{\longrightarrow}aX$
\end{enumerate}
\end{corollary}
\begin{theorem}[Slutsky's theorem]
\begin{theorem}[Slutsky's theorem]\label{P:slutsky}
Let $(\Omega,\mathcal{A},\Prob)$ be a probability space, $(X_n)$, $(Y_n)$ be sequences of random variables and $X$ be a random variable and $a\in\RR$ such that $X_n\overset{\text{d}}{\longrightarrow} X$ and $Y_n\overset{\text{d}}{\longrightarrow} a$. Then:
\begin{enumerate}
\item $X_n+Y_n\overset{\text{d}}{\longrightarrow} X+ a$
Expand Down Expand Up @@ -1242,7 +1242,7 @@
Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of \iid random variables with finite 2nd moments. Let $\mu:=\Exp(X_1)$ and $\sigma^2:=\Var(X_1)$. Then: $$\frac{S_n-n\mu}{\sigma\sqrt{n}}\overset{\text{d}}{\longrightarrow} Z$$
where $Z\sim N(0,1)$.
\end{theorem}
\begin{theorem}[Lyapunov central limit theorem]
\begin{theorem}[Lyapunov central limit theorem]\label{P:central_limit_thm}
Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of independent random variables each with finite expectation $\mu_i:=\Exp(X_i)$ and variance ${\sigma_i}^2:=\Var(X_i)$ $\forall i=1,\ldots,n$. Then: $$\frac{\sum_{i=1}^n(X_i-\mu_i)}{\sqrt{\sum_{i=1}^n{\sigma_i}^2}}\overset{\text{d}}{\longrightarrow} Z$$
where $Z\sim N(0,1)$.
\end{theorem}
Expand Down
2 changes: 1 addition & 1 deletion Mathematics/3rd/Statistics/Statistics.tex
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@
\end{proposition}
\subsubsection{Confidence intervals for the relative frequency}
\begin{proposition}
Let $(\mathcal{X},\mathcal{F},\{X_1,\ldots,X_n\sim\text{Bern}(p)\ \text{i.i.d.}:p\in(0,1)\})$ be a parametric statistical model, $\vf{x}_n\in\mathcal{X}$ be a realization of $(X_1,\ldots,X_n)$ and $\alpha\in[0,1]$. Let $\hat{p}=\overline{x}_n$. Then, an asymptotic confidence interval for $p$ of confidence level $1-\alpha$ is:
Let $(\mathcal{X},\mathcal{F},\{X_1,\ldots,X_n\sim\text{Ber}(p)\ \text{i.i.d.}:p\in(0,1)\})$ be a parametric statistical model, $\vf{x}_n\in\mathcal{X}$ be a realization of $(X_1,\ldots,X_n)$ and $\alpha\in[0,1]$. Let $\hat{p}=\overline{x}_n$. Then, an asymptotic confidence interval for $p$ of confidence level $1-\alpha$ is:
$$p\in\left(\hat{p}-z_{1-\frac{\alpha}{2}}\sqrt{\frac{\hat{p}(1-\hat{p})}{n}},\hat{p}+z_{1-\frac{\alpha}{2}}\sqrt{\frac{\hat{p}(1-\hat{p})}{n}}\right)$$
\end{proposition}
\subsubsection{Confidence intervals for \texorpdfstring{$N(\mu,\sigma^2)$}{N(mu,sigma2)}}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,5 +75,77 @@
x=(0,1,\ldots,m-1,10,\ldots,1(m-1),20,\ldots,2(m-1),\ldots)
$$
\end{proof}
\subsubsection{A hyperbolic automorphism of \texorpdfstring{$T^2$}{T2}}
\begin{proposition}
Consider $\vf{A}=\begin{pmatrix}
2 & 1 \\
1 & 1
\end{pmatrix}\in \GL_2(\RR)$. Then, $\vf{A}(\ZZ^2)=\ZZ^2$ and this induces an automorphism $\vf{\tilde{A}}$ of $T^2=\quot{\RR^2}{\ZZ^2}$.
\end{proposition}
\begin{definition}
We define the set of periodic points of $\vf{\tilde{A}}$ as $\Per\vf{\tilde{A}}$.
\end{definition}
\begin{lemma}
$\Per\vf{\tilde{A}}=\quot{\QQ^2}{\ZZ^2}$. Thus, $\Per\vf{\tilde{A}}$ is dense in $T^2$.
\end{lemma}
\begin{proof}
Let $\vf{x}\in \Per\vf{\tilde{A}}$. Then, $\exists k\in\NN$ and $\vf{n}\in\ZZ^2$ such that $\vf{A}^k\vf{x}=\vf{x}+\vf{n}$. One can easily check that $\sigma(\vf{\tilde{A}})=\left\{\frac{3}{2}\pm \frac{\sqrt{5}}{2}\right\}=:\{\lambda_{\pm}\}$ with $\lambda_-<1<\lambda_+$. Thus,
$$
\det(\vf{A}^k-\vf{I})=({\lambda_+}^k-1)({\lambda_-}^k-1)\ne 0
$$
and so the equation $\vf{A}^k\vf{x}=\vf{x}+\vf{n}$ has a unique (rational) solution.
\end{proof}
\begin{remark}
The \emph{hyperbolicity} comes from the fact that there is one eigenvector with eigenvalue greater than $1$ and another with eigenvalue less than $1$.
\end{remark}
\begin{theorem}
The iterates of $\vf{\tilde{A}}$ smear every domain $F\subseteq T^2$ uniformly over $T^2$, that is, for every domain $G\subseteq T^2$, we have that the following limit exists:
$$
\abs{(\vf{\tilde{A}}^{-n} F)\cap G}\overset{n\to\infty}{\longrightarrow} \abs{F}\abs{G}
$$
This property of $\vf{\tilde{A}}$ is called \emph{mixing}.
\end{theorem}
\begin{proof}
We can prove a more general property in terms of functions in the torus (and then apply it to $f=\indi{F}$ and $g=\indi{G}$):
$$
\lim_{n\to\infty}\int_{T^2} f(\vf{\tilde{A}}^n \vf{x}) g(\vf{x})\dd{\vf{x}}=\int_{T^2} f(\vf{x})\dd{\vf{x}}\int_{T^2} g(\vf{x})\dd{\vf{x}}
$$
We will prove this for the orthonormal basis of Fourier series $\{\exp{2\pi i \vf{p}\cdot \vf{x}}\}_{\vf{p}\in\ZZ^2}$. Note that:
$$
\int_{T^2} \exp{2\pi i (\transpose{(\vf{\tilde{A}}^n)}\vf{p})\cdot \vf{x}}\dd{\vf{x}}=\begin{cases}
1 & \text{if }\vf{p}=\vf{0} \\
0 & \text{if }\vf{p}\ne \vf{0}
\end{cases}
$$
Therefore, since $\transpose{(\vf{\tilde{A}}^n)}\vf{p}$ takes infinitely many values for $\vf{p}\ne \vf{0}$, we have that if $g=\exp{2\pi i \vf{q} \cdot \vf{x}}$ then:
$$
\lim_{n\to\infty}\int_{T^2} \exp{2\pi i(\transpose{(\vf{\tilde{A}}^n)}\vf{p}+\vf{q})\cdot \vf{x}}\dd{\vf{x}}=0
$$
So for any $\vf{p}, \vf{q}\in\ZZ^2$ we have the equality. Then, we use that any function nice enough can be approximated with its Fourier series.
\end{proof}
\begin{theorem}
On the torus $T^2$ there exist two direction fields invariant with respect to the automorphism $\vf{\tilde{A}}$. The integral curves of each of these directions fields are everywhere dense on the torus. The automorphism $\vf{\tilde{A}}$ converts the integral curves of each field into integral curves of the same field, expanding by $\lambda_+$ for the first field and contracting by $\lambda_-$ for the second.
\end{theorem}
\begin{proof}
Let $\vf{e}_+$ and $\vf{e}_-$ be the eigenvectors of $\vf{A}$ with eigenvalues $\lambda_+$ and $\lambda_-$ respectively. Let $\vf{x}\in T^2$ and
$$
\function{\vf\gamma_+}{\RR}{T^2}{t}{\vf{x}+t \vf{e}_+}\quad
\function{\vf\gamma_-}{\RR}{T^2}{t}{\vf{x}+t \vf{e}_-}
$$
be the expanding and contracting curves and let $\vf{\xi}_{\vf{x}}=\im(\vf\gamma_+)$, $\vf{\eta}_{\vf{x}}=\im(\vf\gamma_-)$.
\end{proof}
\begin{definition}
Let $\vf{A},\vf{B}:T^2\rightarrow T^2$ be $\mathcal{C}^1$ functions. We say that $B$ is \emph{$\mathcal{C}^0$-close} to $\vf{A}$ if for all $\varepsilon>0$:
$$
\sup_{\vf{x}\in T^2}\norm{\vf{A}(\vf{x})-\vf{B}(\vf{x})}<\varepsilon
$$
We say that $\vf{B}$ is \emph{$\mathcal{C}^1$-close} to $\vf{A}$ if for all $\varepsilon>0$, $\vf{B}$ is $\mathcal{C}^0$-close to $\vf{A}$ and:
$$
\sup_{\vf{x}\in T^2}\norm{\vf{D}\vf{A}(\vf{x})-\vf{D}\vf{B}(\vf{x})}<\varepsilon
$$
\end{definition}
\begin{theorem}[Structal stability]
Let $\vf{B}$ be a diffeomorphism on $T^2$ $\mathcal{C}^1$-close to $\vf{\tilde{A}}$. Then, $\vf{B}$ is conjugate to $\vf{\tilde{A}}$.
\end{theorem}
\end{multicols}
\end{document}
125 changes: 125 additions & 0 deletions Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
\documentclass[../../../main_math.tex]{subfiles}

\begin{document}
\changecolor{MM}
\begin{multicols}{2}[\section{Montecarlo methods}]
The goal of Montecarlo methods is to compute $\Exp(X)$, where $X$ is a random variable. In dimension 1, deterministic methods are more efficient but in higher dimensions ($d\geq 4$), Montecarlo methods are more competitive.
\subsection{Foundations}
As always, we consider a probability space $(\Omega,\mathcal{F},\mathbb{P})$ and a random variable $Y\in L^1$.
\subsubsection{Principle}
\begin{definition}
The main idea will be to approximate $\Exp(Y)$ by $\frac{1}{n}\sum_{i=1}^n Y_i:=\overline{Y}_n$, where $Y_i$ are \iid random variables with same law as $Y$. The variable $\overline{Y}_n$ is called the \emph{Montecarlo estimator} of $\Exp(Y)$.
\end{definition}
\begin{lemma}
The Montecarlo estimator is consistent, i.e.\ $\overline{Y}_n\overset{\text{a.s.}}{\longrightarrow}\Exp(Y)$, and unbiased, i.e.\ $\Exp(\overline{Y}_n)=\Exp(Y)$.
\end{lemma}
\begin{proof}
Use the \mnameref{P:stronglawKolmo}.
\end{proof}
\begin{lemma}
Assume $Y\in L^2$ and let $\overline{Y}_n$ be the Montecarlo estimator of $\Exp(Y)$. Then:
$$
\norm{\overline{Y}_n-\Exp(Y)}_{2}=\sqrt{\frac{\Var(Y)}{n}}
$$
\end{lemma}
\begin{proof}
\begin{multline*}
\norm{\overline{Y}_n-\Exp(Y)}_{2}=\sqrt{\Exp\left(\left(\overline{Y}_n-\Exp(Y)\right)^2\right)}=\\=\sqrt{\Var(\overline{Y}_n)}=\sqrt{\frac{\Var(Y)}{n}}
\end{multline*}
\end{proof}
\begin{lemma}
Let $Y\in L^2$ and $\overline{Y}_n$ be the Montecarlo estimator of $\Exp(Y)$. Then:
$$
\sqrt{n}(\overline{Y}_n - \Exp(Y))\overset{\text{d}}{\longrightarrow}N(0,\Var(Y))
$$
\end{lemma}
\begin{proof}
Use \mnameref{P:central_limit_thm}.
\end{proof}
\begin{remark}
In practice, we do not know $\Var(Y)$, so we use an estimator of it, such as ${\overline{\sigma}_n}^2=\frac{1}{n-1}\sum_{i=1}^n {(Y_i-\overline{Y}_n)}^2$, which is a consistent unbiased estimator of $\Var(Y)$. Thus:
$$
\frac{\sqrt{n}}{\overline{\sigma}_n}(\overline{Y}_n - \Exp(Y))\overset{\text{d}}{\longrightarrow}N(0,1)
$$
by \mnameref{P:slutsky}.
\end{remark}
\begin{lemma}
Let $Y\in L^2$ and $\overline{Y}_n$ be the Montecarlo estimator of $\Exp(Y)$. Then, a confidence interval for $\Exp(Y)$ of level $1-\alpha$ is:
$$
\text{CI}_\alpha:=\left(\overline{Y}_n-z_{1-\alpha/2}\frac{\overline{\sigma}_n}{\sqrt{n}},\overline{Y}_n+z_{1-\alpha/2}\frac{\overline{\sigma}_n}{\sqrt{n}}\right)
$$
where $z_{\alpha/2}$ is the quantile of order $\alpha/2$ of the standard normal distribution.
\end{lemma}
\subsubsection{Random number generator}
In this chapter we will assume that we already now how to simulate sequences of \iid random variables with uniform distribution on $[0,1]$.
\begin{remark}
In summary, the computer generates a sequence ${(x_i)}_{0\leq i\leq m}$, with $m$ as large as possible, in the following way: $x_{i+1}=f(x_i)$ and then sets $u_i=\frac{x_i}{m}$. The value $x_0$ is called the \emph{seed} of the sequence and $f$ is chosen with periodicity as high as possible. In the early days of computers, $f(x)=ax+b\mod{m}$, which had periodicity $m\sim 2^{31}-1$. Nowadays, \emph{Mersenne Twister algorithm} is used, which has periodicity $m\sim 2^{19937}-1$.
\end{remark}
\subsubsection{Simulation of random variables}
\begin{lemma}
Let $U, {(U_i)}_{0\leq i\leq d}\sim U([0,1])$. Then:
\begin{itemize}
\item If $a,b\in\RR$ with $a<b$, then $a+(b-a)U\sim U([a,b])$.
\item If $p\in (0,1)$, then $\indi{U\leq p}\sim\text{Ber}(p)$.
\item If $p\in (0,1)$, then $\sum_{i=1}^d \indi{U_i\leq p}\sim\text{B}(d,p)$.
\item If $(x_n),(p_n)\in\RR$ be such that $\sum_{n\geq 0} p_n=1$, then $\sum_{n\geq 0} x_n\indi{\sum_{k=0}^{n-1} p_k\leq U\leq\sum_{k=0}^n p_k}\sim U\left((x_n)\right)$.
\item If $\prod_{i=1}^d (a_i,b_i)\in \RR^d$ with $a_i<b_i$, then $(a_i+(b_i-a_i)U_i)_{1\leq i\leq d}\sim U\left(\prod_{i=1}^d (a_i,b_i)\right)$.
\end{itemize}
\end{lemma}
\begin{proposition}
Let $X$ be a random variable with cdf $F$ and $U\sim U([0,1])$. Then,
$$
F^{-1}(u)=\inf\{ x\in\RR : F(x)\geq u\}
$$
satisfies $F^{-1}(U)\sim X$.
\end{proposition}
\begin{proposition}
Let $U\sim U([0,1])$, $X$ be a random variable with cdf $F$ and $a,b\in\RR$ with $a<b$ be such that $\Prob(a< X\leq b)>0$. Then:
$$
F^{-1}\left(F(a)+(F(b)-F(a))U\right)\sim \mathcal{L}(X\mid a< X\leq b)
$$
\end{proposition}
\begin{proposition}[Acceptance-rejection method]
Let ${(X_i)}_{i\geq 1}$ be \iid $\RR^d$-valued random variables, $D\in \mathcal{B}(\RR^d)$ be such that $\Prob(X_1\in D)>0$ and set:
$$
\nu := \inf\{ i\geq 1 : X_i\in D\}
$$
Then, $X_\nu\sim \mathcal{L}(X_1\mid X_1\in D)$.
\end{proposition}
\begin{remark}
The principle of the acceptance-rejection method is to simulate conditional distributions by rejecting samples that do not satisfy a prescribed condition.
\end{remark}
\begin{proposition}
Let $f$ be a pdf of some random variable, ${(X_i)}_{i\geq 1}$ be \iid with pdf $g$ and ${(U_i)}_{i\geq 1}$ be \iid $U([0,1])$ independent of ${(X_i)}_{i\geq 1}$. Assume that $\exists c\geq 1$ such that $f(x)\almoste{\leq} cg(x)$ and set:
$$
\nu := \inf\{ i\geq 1 : cg(X_i)U_i\leq f(X_i)\}
$$
Then, $X_\nu$ admits $f$ as pdf.
\end{proposition}
\begin{proposition}
Let $f$ be a pdf of some random variable and $a_1,a_2\in\RR$ with $a_2>0$ be such that
$$
D:=\{ (u,v)\in\RR_{>0}\times \RR:0<u^2<f\left(a_1+a_2\frac{v}{u}\right)\}
$$
is bounded. If $(U,V)\sim U(D)$, then $a_1+a_2\frac{V}{U}$ admits $f$ as pdf.
\end{proposition}
\subsubsection{Gaussian distribution}
\begin{proposition}[Box-Muller method]
Let $U$, $V$ be \iid $U([0,1])$ and set:
$$
X:=\sqrt{-2\log(U)}\cos(2\pi V)\quad Y:=\sqrt{-2\log(U)}\sin(2\pi V)
$$
Then, $X$, $Y$ are \iid $N(0,1)$.
\end{proposition}
\begin{proposition}[Polar method]
Let $U$, $V$ be \iid $U(\DD)$, where $\DD\subset \RR^2$ is the open unit disk. Let $R^2=U^2+V^2$ and set:
$$
X:=U\sqrt{\frac{-2\log(R^2)}{R^2}}\quad Y:=V\sqrt{\frac{-2\log(R^2)}{R^2}}
$$
Then, $X$, $Y$ are \iid $N(0,1)$.
\end{proposition}
\begin{proposition}
Let $\vf{X}\in N_d(0,\vf{I}_d)$, $\vf\mu\in\RR^d$ and $\vf{A}\in\mathcal{M}_d(\RR)$. Then, $\vf\mu+\vf{AX}\sim N_d(\vf\mu,\transpose{\vf{AA}})$.
\end{proposition}
\end{multicols}
\end{document}
2 changes: 1 addition & 1 deletion index.html
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ <h2 class="special-color">Mathematics</h2>
<li><button class="button" onclick="window.location.href='https://github.com/victorballester7/Complete-summaries/releases/latest/download/Advanced_topics_in_functional_analysis_and_PDEs.pdf';" target="_top">Advanced topics in functional analysis and PDEs</button></li>
<li><button class="button" onclick="window.location.href='https://github.com/victorballester7/Complete-summaries/releases/latest/download/Introduction_to_evolution_PDEs.pdf';" target="_top">Introduction to evolution PDEs</button></li>
<li><button class="button" onclick="window.location.href='https://github.com/victorballester7/Complete-summaries/releases/latest/download/Introduction_to_nonlinear_PDEs.pdf';" target="_top">Introduction to non linear PDEs</button></li>
<!-- <li><button class="button" onclick="window.location.href='https://github.com/victorballester7/Complete-summaries/releases/latest/download/Limit_theorems_and_large_deviations.pdf';" target="_top">Limit theorems and large deviations</button></li> -->
<li><button class="button" onclick="window.location.href='https://github.com/victorballester7/Complete-summaries/releases/latest/download/Montecarlo_methods.pdf';" target="_top">Montecarlo methods</button></li>
<li><button class="button" onclick="window.location.href='https://github.com/victorballester7/Complete-summaries/releases/latest/download/Stochastic_calculus.pdf';" target="_top">Stochastic calculus</button></li>
</ul>
</ul>
Expand Down
8 changes: 4 additions & 4 deletions main_math.tex
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,14 @@ \chapter{Fifth year}
\subfile{Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.tex}
\cleardoublepage

\subfile{Mathematics/5th/Introduction_to_evolution_PDEs/Introduction_to_evolution_PDEs.tex}
\cleardoublepage
% \subfile{Mathematics/5th/Introduction_to_evolution_PDEs/Introduction_to_evolution_PDEs.tex}
% \cleardoublepage

\subfile{Mathematics/5th/Introduction_to_nonlinear_elliptic_PDEs/Introduction_to_nonlinear_elliptic_PDEs.tex}
\cleardoublepage

% \subfile{Mathematics/5th/Limit_theorems_and_large_deviations/Limit_theorems_and_large_deviations.tex}
% \cleardoublepage
\subfile{Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex}
\cleardoublepage

\subfile{Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex}
\cleardoublepage
Expand Down
1 change: 1 addition & 0 deletions preamble_formulas.sty
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@
{LTLD}{\sta} % limit theorems and large deviations
{SC}{\sta} % stochastic calculus
{INLP}{\phy} % Instabilities and nonlinear phenomena
{MM}{\sta} % Montecarlo methods
}{\col}%
}
\ExplSyntaxOff
Expand Down

0 comments on commit bbbd049

Please sign in to comment.