From bbbd0493774b61fce7bdd1ec16de0263b5bf2386 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor?= Date: Thu, 5 Oct 2023 21:56:40 +0200 Subject: [PATCH] updated things + removed limit distr + new monteC. --- .github/workflows/buildpdf.yml | 19 ++- Mathematics/3rd/Probability/Probability.tex | 4 +- Mathematics/3rd/Statistics/Statistics.tex | 2 +- .../Advanced_dynamical_systems.tex | 72 ++++++++++ .../Montecarlo_methods/Montecarlo_methods.tex | 125 ++++++++++++++++++ index.html | 2 +- main_math.tex | 8 +- preamble_formulas.sty | 1 + 8 files changed, 215 insertions(+), 18 deletions(-) create mode 100644 Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex diff --git a/.github/workflows/buildpdf.yml b/.github/workflows/buildpdf.yml index 6cd3807..d9eacc2 100644 --- a/.github/workflows/buildpdf.yml +++ b/.github/workflows/buildpdf.yml @@ -177,21 +177,21 @@ jobs: with: root_file: Advanced_topics_in_functional_analysis_and_PDEs.tex working_directory: Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/ - - name: Compile - IEPDE - uses: xu-cheng/latex-action@v2 - with: - root_file: Introduction_to_evolution_PDEs.tex - working_directory: Mathematics/5th/Introduction_to_evolution_PDEs/ + # - name: Compile - IEPDE + # uses: xu-cheng/latex-action@v2 + # with: + # root_file: Introduction_to_evolution_PDEs.tex + # working_directory: Mathematics/5th/Introduction_to_evolution_PDEs/ - name: Compile - INEPDE uses: xu-cheng/latex-action@v2 with: root_file: Introduction_to_nonlinear_elliptic_PDEs.tex working_directory: Mathematics/5th/Introduction_to_nonlinear_elliptic_PDEs/ - - name: Compile - LTLD + - name: Compile - MM uses: xu-cheng/latex-action@v2 with: - root_file: Limit_theorems_and_large_deviations.tex - working_directory: Mathematics/5th/Limit_theorems_and_large_deviations/ + root_file: Montecarlo_methods.tex + working_directory: Mathematics/5th/Montecarlo_methods/ - name: Compile - SC uses: xu-cheng/latex-action@v2 with: @@ -278,9 +278,8 @@ jobs: Mathematics/5th/Advanced_dynamical_systems/Advanced_dynamical_systems.pdf Mathematics/5th/Advanced_probability/Advanced_probability.pdf Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.pdf - Mathematics/5th/Introduction_to_evolution_PDEs/Introduction_to_evolution_PDEs.pdf Mathematics/5th/Introduction_to_nonlinear_elliptic_PDEs/Introduction_to_nonlinear_elliptic_PDEs.pdf - Mathematics/5th/Limit_theorems_and_large_deviations/Limit_theorems_and_large_deviations.pdf + Mathematics/5th/Montecarlo_methods/Montecarlo_methods.pdf Mathematics/5th/Stochastic_calculus/Stochastic_calculus.pdf main_physics.pdf Physics/Basic/Electricity_and_magnetism/Electricity_and_magnetism.pdf diff --git a/Mathematics/3rd/Probability/Probability.tex b/Mathematics/3rd/Probability/Probability.tex index 3206144..db90516 100644 --- a/Mathematics/3rd/Probability/Probability.tex +++ b/Mathematics/3rd/Probability/Probability.tex @@ -1149,7 +1149,7 @@ \item $aX_n\overset{\text{d}}{\longrightarrow}aX$ \end{enumerate} \end{corollary} - \begin{theorem}[Slutsky's theorem] + \begin{theorem}[Slutsky's theorem]\label{P:slutsky} Let $(\Omega,\mathcal{A},\Prob)$ be a probability space, $(X_n)$, $(Y_n)$ be sequences of random variables and $X$ be a random variable and $a\in\RR$ such that $X_n\overset{\text{d}}{\longrightarrow} X$ and $Y_n\overset{\text{d}}{\longrightarrow} a$. Then: \begin{enumerate} \item $X_n+Y_n\overset{\text{d}}{\longrightarrow} X+ a$ @@ -1242,7 +1242,7 @@ Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of \iid random variables with finite 2nd moments. Let $\mu:=\Exp(X_1)$ and $\sigma^2:=\Var(X_1)$. Then: $$\frac{S_n-n\mu}{\sigma\sqrt{n}}\overset{\text{d}}{\longrightarrow} Z$$ where $Z\sim N(0,1)$. \end{theorem} - \begin{theorem}[Lyapunov central limit theorem] + \begin{theorem}[Lyapunov central limit theorem]\label{P:central_limit_thm} Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of independent random variables each with finite expectation $\mu_i:=\Exp(X_i)$ and variance ${\sigma_i}^2:=\Var(X_i)$ $\forall i=1,\ldots,n$. Then: $$\frac{\sum_{i=1}^n(X_i-\mu_i)}{\sqrt{\sum_{i=1}^n{\sigma_i}^2}}\overset{\text{d}}{\longrightarrow} Z$$ where $Z\sim N(0,1)$. \end{theorem} diff --git a/Mathematics/3rd/Statistics/Statistics.tex b/Mathematics/3rd/Statistics/Statistics.tex index 6fa7e16..5ba7a47 100644 --- a/Mathematics/3rd/Statistics/Statistics.tex +++ b/Mathematics/3rd/Statistics/Statistics.tex @@ -312,7 +312,7 @@ \end{proposition} \subsubsection{Confidence intervals for the relative frequency} \begin{proposition} - Let $(\mathcal{X},\mathcal{F},\{X_1,\ldots,X_n\sim\text{Bern}(p)\ \text{i.i.d.}:p\in(0,1)\})$ be a parametric statistical model, $\vf{x}_n\in\mathcal{X}$ be a realization of $(X_1,\ldots,X_n)$ and $\alpha\in[0,1]$. Let $\hat{p}=\overline{x}_n$. Then, an asymptotic confidence interval for $p$ of confidence level $1-\alpha$ is: + Let $(\mathcal{X},\mathcal{F},\{X_1,\ldots,X_n\sim\text{Ber}(p)\ \text{i.i.d.}:p\in(0,1)\})$ be a parametric statistical model, $\vf{x}_n\in\mathcal{X}$ be a realization of $(X_1,\ldots,X_n)$ and $\alpha\in[0,1]$. Let $\hat{p}=\overline{x}_n$. Then, an asymptotic confidence interval for $p$ of confidence level $1-\alpha$ is: $$p\in\left(\hat{p}-z_{1-\frac{\alpha}{2}}\sqrt{\frac{\hat{p}(1-\hat{p})}{n}},\hat{p}+z_{1-\frac{\alpha}{2}}\sqrt{\frac{\hat{p}(1-\hat{p})}{n}}\right)$$ \end{proposition} \subsubsection{Confidence intervals for \texorpdfstring{$N(\mu,\sigma^2)$}{N(mu,sigma2)}} diff --git a/Mathematics/5th/Advanced_dynamical_systems/Advanced_dynamical_systems.tex b/Mathematics/5th/Advanced_dynamical_systems/Advanced_dynamical_systems.tex index c689df6..d53a6fb 100644 --- a/Mathematics/5th/Advanced_dynamical_systems/Advanced_dynamical_systems.tex +++ b/Mathematics/5th/Advanced_dynamical_systems/Advanced_dynamical_systems.tex @@ -75,5 +75,77 @@ x=(0,1,\ldots,m-1,10,\ldots,1(m-1),20,\ldots,2(m-1),\ldots) $$ \end{proof} + \subsubsection{A hyperbolic automorphism of \texorpdfstring{$T^2$}{T2}} + \begin{proposition} + Consider $\vf{A}=\begin{pmatrix} + 2 & 1 \\ + 1 & 1 + \end{pmatrix}\in \GL_2(\RR)$. Then, $\vf{A}(\ZZ^2)=\ZZ^2$ and this induces an automorphism $\vf{\tilde{A}}$ of $T^2=\quot{\RR^2}{\ZZ^2}$. + \end{proposition} + \begin{definition} + We define the set of periodic points of $\vf{\tilde{A}}$ as $\Per\vf{\tilde{A}}$. + \end{definition} + \begin{lemma} + $\Per\vf{\tilde{A}}=\quot{\QQ^2}{\ZZ^2}$. Thus, $\Per\vf{\tilde{A}}$ is dense in $T^2$. + \end{lemma} + \begin{proof} + Let $\vf{x}\in \Per\vf{\tilde{A}}$. Then, $\exists k\in\NN$ and $\vf{n}\in\ZZ^2$ such that $\vf{A}^k\vf{x}=\vf{x}+\vf{n}$. One can easily check that $\sigma(\vf{\tilde{A}})=\left\{\frac{3}{2}\pm \frac{\sqrt{5}}{2}\right\}=:\{\lambda_{\pm}\}$ with $\lambda_-<1<\lambda_+$. Thus, + $$ + \det(\vf{A}^k-\vf{I})=({\lambda_+}^k-1)({\lambda_-}^k-1)\ne 0 + $$ + and so the equation $\vf{A}^k\vf{x}=\vf{x}+\vf{n}$ has a unique (rational) solution. + \end{proof} + \begin{remark} + The \emph{hyperbolicity} comes from the fact that there is one eigenvector with eigenvalue greater than $1$ and another with eigenvalue less than $1$. + \end{remark} + \begin{theorem} + The iterates of $\vf{\tilde{A}}$ smear every domain $F\subseteq T^2$ uniformly over $T^2$, that is, for every domain $G\subseteq T^2$, we have that the following limit exists: + $$ + \abs{(\vf{\tilde{A}}^{-n} F)\cap G}\overset{n\to\infty}{\longrightarrow} \abs{F}\abs{G} + $$ + This property of $\vf{\tilde{A}}$ is called \emph{mixing}. + \end{theorem} + \begin{proof} + We can prove a more general property in terms of functions in the torus (and then apply it to $f=\indi{F}$ and $g=\indi{G}$): + $$ + \lim_{n\to\infty}\int_{T^2} f(\vf{\tilde{A}}^n \vf{x}) g(\vf{x})\dd{\vf{x}}=\int_{T^2} f(\vf{x})\dd{\vf{x}}\int_{T^2} g(\vf{x})\dd{\vf{x}} + $$ + We will prove this for the orthonormal basis of Fourier series $\{\exp{2\pi i \vf{p}\cdot \vf{x}}\}_{\vf{p}\in\ZZ^2}$. Note that: + $$ + \int_{T^2} \exp{2\pi i (\transpose{(\vf{\tilde{A}}^n)}\vf{p})\cdot \vf{x}}\dd{\vf{x}}=\begin{cases} + 1 & \text{if }\vf{p}=\vf{0} \\ + 0 & \text{if }\vf{p}\ne \vf{0} + \end{cases} + $$ + Therefore, since $\transpose{(\vf{\tilde{A}}^n)}\vf{p}$ takes infinitely many values for $\vf{p}\ne \vf{0}$, we have that if $g=\exp{2\pi i \vf{q} \cdot \vf{x}}$ then: + $$ + \lim_{n\to\infty}\int_{T^2} \exp{2\pi i(\transpose{(\vf{\tilde{A}}^n)}\vf{p}+\vf{q})\cdot \vf{x}}\dd{\vf{x}}=0 + $$ + So for any $\vf{p}, \vf{q}\in\ZZ^2$ we have the equality. Then, we use that any function nice enough can be approximated with its Fourier series. + \end{proof} + \begin{theorem} + On the torus $T^2$ there exist two direction fields invariant with respect to the automorphism $\vf{\tilde{A}}$. The integral curves of each of these directions fields are everywhere dense on the torus. The automorphism $\vf{\tilde{A}}$ converts the integral curves of each field into integral curves of the same field, expanding by $\lambda_+$ for the first field and contracting by $\lambda_-$ for the second. + \end{theorem} + \begin{proof} + Let $\vf{e}_+$ and $\vf{e}_-$ be the eigenvectors of $\vf{A}$ with eigenvalues $\lambda_+$ and $\lambda_-$ respectively. Let $\vf{x}\in T^2$ and + $$ + \function{\vf\gamma_+}{\RR}{T^2}{t}{\vf{x}+t \vf{e}_+}\quad + \function{\vf\gamma_-}{\RR}{T^2}{t}{\vf{x}+t \vf{e}_-} + $$ + be the expanding and contracting curves and let $\vf{\xi}_{\vf{x}}=\im(\vf\gamma_+)$, $\vf{\eta}_{\vf{x}}=\im(\vf\gamma_-)$. + \end{proof} + \begin{definition} + Let $\vf{A},\vf{B}:T^2\rightarrow T^2$ be $\mathcal{C}^1$ functions. We say that $B$ is \emph{$\mathcal{C}^0$-close} to $\vf{A}$ if for all $\varepsilon>0$: + $$ + \sup_{\vf{x}\in T^2}\norm{\vf{A}(\vf{x})-\vf{B}(\vf{x})}<\varepsilon + $$ + We say that $\vf{B}$ is \emph{$\mathcal{C}^1$-close} to $\vf{A}$ if for all $\varepsilon>0$, $\vf{B}$ is $\mathcal{C}^0$-close to $\vf{A}$ and: + $$ + \sup_{\vf{x}\in T^2}\norm{\vf{D}\vf{A}(\vf{x})-\vf{D}\vf{B}(\vf{x})}<\varepsilon + $$ + \end{definition} + \begin{theorem}[Structal stability] + Let $\vf{B}$ be a diffeomorphism on $T^2$ $\mathcal{C}^1$-close to $\vf{\tilde{A}}$. Then, $\vf{B}$ is conjugate to $\vf{\tilde{A}}$. + \end{theorem} \end{multicols} \end{document} \ No newline at end of file diff --git a/Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex b/Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex new file mode 100644 index 0000000..f564089 --- /dev/null +++ b/Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex @@ -0,0 +1,125 @@ +\documentclass[../../../main_math.tex]{subfiles} + +\begin{document} +\changecolor{MM} +\begin{multicols}{2}[\section{Montecarlo methods}] + The goal of Montecarlo methods is to compute $\Exp(X)$, where $X$ is a random variable. In dimension 1, deterministic methods are more efficient but in higher dimensions ($d\geq 4$), Montecarlo methods are more competitive. + \subsection{Foundations} + As always, we consider a probability space $(\Omega,\mathcal{F},\mathbb{P})$ and a random variable $Y\in L^1$. + \subsubsection{Principle} + \begin{definition} + The main idea will be to approximate $\Exp(Y)$ by $\frac{1}{n}\sum_{i=1}^n Y_i:=\overline{Y}_n$, where $Y_i$ are \iid random variables with same law as $Y$. The variable $\overline{Y}_n$ is called the \emph{Montecarlo estimator} of $\Exp(Y)$. + \end{definition} + \begin{lemma} + The Montecarlo estimator is consistent, i.e.\ $\overline{Y}_n\overset{\text{a.s.}}{\longrightarrow}\Exp(Y)$, and unbiased, i.e.\ $\Exp(\overline{Y}_n)=\Exp(Y)$. + \end{lemma} + \begin{proof} + Use the \mnameref{P:stronglawKolmo}. + \end{proof} + \begin{lemma} + Assume $Y\in L^2$ and let $\overline{Y}_n$ be the Montecarlo estimator of $\Exp(Y)$. Then: + $$ + \norm{\overline{Y}_n-\Exp(Y)}_{2}=\sqrt{\frac{\Var(Y)}{n}} + $$ + \end{lemma} + \begin{proof} + \begin{multline*} + \norm{\overline{Y}_n-\Exp(Y)}_{2}=\sqrt{\Exp\left(\left(\overline{Y}_n-\Exp(Y)\right)^2\right)}=\\=\sqrt{\Var(\overline{Y}_n)}=\sqrt{\frac{\Var(Y)}{n}} + \end{multline*} + \end{proof} + \begin{lemma} + Let $Y\in L^2$ and $\overline{Y}_n$ be the Montecarlo estimator of $\Exp(Y)$. Then: + $$ + \sqrt{n}(\overline{Y}_n - \Exp(Y))\overset{\text{d}}{\longrightarrow}N(0,\Var(Y)) + $$ + \end{lemma} + \begin{proof} + Use \mnameref{P:central_limit_thm}. + \end{proof} + \begin{remark} + In practice, we do not know $\Var(Y)$, so we use an estimator of it, such as ${\overline{\sigma}_n}^2=\frac{1}{n-1}\sum_{i=1}^n {(Y_i-\overline{Y}_n)}^2$, which is a consistent unbiased estimator of $\Var(Y)$. Thus: + $$ + \frac{\sqrt{n}}{\overline{\sigma}_n}(\overline{Y}_n - \Exp(Y))\overset{\text{d}}{\longrightarrow}N(0,1) + $$ + by \mnameref{P:slutsky}. + \end{remark} + \begin{lemma} + Let $Y\in L^2$ and $\overline{Y}_n$ be the Montecarlo estimator of $\Exp(Y)$. Then, a confidence interval for $\Exp(Y)$ of level $1-\alpha$ is: + $$ + \text{CI}_\alpha:=\left(\overline{Y}_n-z_{1-\alpha/2}\frac{\overline{\sigma}_n}{\sqrt{n}},\overline{Y}_n+z_{1-\alpha/2}\frac{\overline{\sigma}_n}{\sqrt{n}}\right) + $$ + where $z_{\alpha/2}$ is the quantile of order $\alpha/2$ of the standard normal distribution. + \end{lemma} + \subsubsection{Random number generator} + In this chapter we will assume that we already now how to simulate sequences of \iid random variables with uniform distribution on $[0,1]$. + \begin{remark} + In summary, the computer generates a sequence ${(x_i)}_{0\leq i\leq m}$, with $m$ as large as possible, in the following way: $x_{i+1}=f(x_i)$ and then sets $u_i=\frac{x_i}{m}$. The value $x_0$ is called the \emph{seed} of the sequence and $f$ is chosen with periodicity as high as possible. In the early days of computers, $f(x)=ax+b\mod{m}$, which had periodicity $m\sim 2^{31}-1$. Nowadays, \emph{Mersenne Twister algorithm} is used, which has periodicity $m\sim 2^{19937}-1$. + \end{remark} + \subsubsection{Simulation of random variables} + \begin{lemma} + Let $U, {(U_i)}_{0\leq i\leq d}\sim U([0,1])$. Then: + \begin{itemize} + \item If $a,b\in\RR$ with $a0$. Then: + $$ + F^{-1}\left(F(a)+(F(b)-F(a))U\right)\sim \mathcal{L}(X\mid a< X\leq b) + $$ + \end{proposition} + \begin{proposition}[Acceptance-rejection method] + Let ${(X_i)}_{i\geq 1}$ be \iid $\RR^d$-valued random variables, $D\in \mathcal{B}(\RR^d)$ be such that $\Prob(X_1\in D)>0$ and set: + $$ + \nu := \inf\{ i\geq 1 : X_i\in D\} + $$ + Then, $X_\nu\sim \mathcal{L}(X_1\mid X_1\in D)$. + \end{proposition} + \begin{remark} + The principle of the acceptance-rejection method is to simulate conditional distributions by rejecting samples that do not satisfy a prescribed condition. + \end{remark} + \begin{proposition} + Let $f$ be a pdf of some random variable, ${(X_i)}_{i\geq 1}$ be \iid with pdf $g$ and ${(U_i)}_{i\geq 1}$ be \iid $U([0,1])$ independent of ${(X_i)}_{i\geq 1}$. Assume that $\exists c\geq 1$ such that $f(x)\almoste{\leq} cg(x)$ and set: + $$ + \nu := \inf\{ i\geq 1 : cg(X_i)U_i\leq f(X_i)\} + $$ + Then, $X_\nu$ admits $f$ as pdf. + \end{proposition} + \begin{proposition} + Let $f$ be a pdf of some random variable and $a_1,a_2\in\RR$ with $a_2>0$ be such that + $$ + D:=\{ (u,v)\in\RR_{>0}\times \RR:0Mathematics
  • - +
  • diff --git a/main_math.tex b/main_math.tex index 048a926..90e8f13 100644 --- a/main_math.tex +++ b/main_math.tex @@ -110,14 +110,14 @@ \chapter{Fifth year} \subfile{Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.tex} \cleardoublepage -\subfile{Mathematics/5th/Introduction_to_evolution_PDEs/Introduction_to_evolution_PDEs.tex} -\cleardoublepage +% \subfile{Mathematics/5th/Introduction_to_evolution_PDEs/Introduction_to_evolution_PDEs.tex} +% \cleardoublepage \subfile{Mathematics/5th/Introduction_to_nonlinear_elliptic_PDEs/Introduction_to_nonlinear_elliptic_PDEs.tex} \cleardoublepage -% \subfile{Mathematics/5th/Limit_theorems_and_large_deviations/Limit_theorems_and_large_deviations.tex} -% \cleardoublepage +\subfile{Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex} +\cleardoublepage \subfile{Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex} \cleardoublepage diff --git a/preamble_formulas.sty b/preamble_formulas.sty index 7dffaf5..69b9a45 100644 --- a/preamble_formulas.sty +++ b/preamble_formulas.sty @@ -97,6 +97,7 @@ {LTLD}{\sta} % limit theorems and large deviations {SC}{\sta} % stochastic calculus {INLP}{\phy} % Instabilities and nonlinear phenomena + {MM}{\sta} % Montecarlo methods }{\col}% } \ExplSyntaxOff