diff --git a/Mathematics/3rd/Probability/Probability.tex b/Mathematics/3rd/Probability/Probability.tex index eb165da..2fcc6d2 100644 --- a/Mathematics/3rd/Probability/Probability.tex +++ b/Mathematics/3rd/Probability/Probability.tex @@ -730,7 +730,7 @@ Check the proof of \mnameref{RFA:monotone}. \end{sproof} \begin{theorem}[Dominated convergence theorem]\label{P:dominated} - Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be sequence of random variables such that $\displaystyle\lim_{n\to\infty}X_n\overset{\text{a.s.}}{=}X$, for some random variable $X$. Suppose that there exists an integrable random variable $Y$ such that $$|X_n|\leq Y\quad\forall n\geq 1$$ Then: $$\lim_{n\to\infty} \Exp(X_n)=\Exp(X)$$ + Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be sequence of random variables such that $\displaystyle\lim_{n\to\infty}X_n\overset{\text{a.s.}}{=}X$, for some random variable $X$. Suppose that there exists an integrable random variable $Y$ such that $$\abs{X}\leq Y\quad\forall n\geq 1$$ Then: $$\lim_{n\to\infty} \Exp(X_n)=\Exp(X)$$ \end{theorem} \begin{sproof} Check the proof of \mnameref{RFA:dominated}. @@ -1162,21 +1162,21 @@ Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of random variables. We define the sequence of partial sums $(S_n)$ as: $$S_n:=\sum_{i=1}^nX_i$$ \end{definition} \subsubsection{Weak laws} - \begin{theorem}[Weak law] + \begin{theorem}[Weak law of large numbers]\label{P:weaklaw} Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of \iid random variables with finite 2nd moment. Then: $$\frac{S_n}{n}\overset{\Prob}{\longrightarrow}\Exp(X_1)\quad\text{and}\quad\frac{S_n}{n}\overset{L^2}{\longrightarrow}\Exp(X_1)$$ \end{theorem} - \begin{theorem}[Weak law] + \begin{theorem}[Weak law of large numbers] Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of pairwise uncorrelated random variables with finite 2nd moment. Suppose that: $$\lim_{n\to\infty}\frac{1}{n}\sum_{i=1}^n\Exp(X_i)=\mu<\infty\;\,\text{and}\;\,\lim_{n\to\infty}\frac{1}{n^2}\sum_{i=1}^n\Var(X_i)=0$$ Then: $$\frac{S_n}{n}\overset{\Prob}{\longrightarrow}\mu\quad\text{and}\quad\frac{S_n}{n}\overset{L^2}{\longrightarrow}\mu$$ \end{theorem} \subsubsection{Strong laws} - \begin{theorem}[Kolmogorov's strong law]\label{P:stronglawKolmo} + \begin{theorem}[Kolmogorov's strong law of large numbers]\label{P:stronglawKolmo} Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of \iid random variables. \begin{enumerate} \item If $\Exp(X_1)<\infty$, then: $$\frac{S_n}{n}\overset{\text{a.s.}}{\longrightarrow}\Exp(X_1)$$ \item If $\Exp(X_1)=\infty$, then: $$\limsup_{n\to\infty}\frac{|S_n|}{n}\overset{\text{a.s.}}{=}+\infty$$ \end{enumerate} \end{theorem} - \begin{theorem}[Strong law] + \begin{theorem}[Strong law of large numbers] Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(X_n)$ be a sequence of \iid random variables such that $\Exp({X_1}^4)<\infty$. Then: $$\frac{S_n}{n}\overset{\text{a.s.}}{\longrightarrow}\Exp(X_1)$$ \end{theorem} \begin{corollary} diff --git a/Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.tex b/Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.tex index 88a5f14..904faaa 100644 --- a/Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.tex +++ b/Mathematics/5th/Advanced_topics_in_functional_analysis_and_PDEs/Advanced_topics_in_functional_analysis_and_PDEs.tex @@ -247,13 +247,13 @@ From now on, we will denote $\RR_{\pm}^d:=\RR^{d-1}\times\RR_{\pm}$ and $\RR_0^d:=\RR^{d-1}\times\{0\}$. \end{remark} \begin{theorem} - For all $m\in\NN$ and all $1\leq p<\infty$, $\mathcal{C}^\infty(\overline{\RR_+^d})$ is dense in $W^{m,p}(\RR_+^d)$. + For all $m\in\NN$ and all $1\leq p<\infty$, $\mathcal{C}^\infty(\overline{\RR_{\geq 0}^d})$ is dense in $W^{m,p}(\RR_{\geq 0}^d)$. \end{theorem} \begin{proof} Let $$ \tau_h(u)(x_1,\ldots, x_d):=u(x_1,\ldots, x_{d-1},x_d+h) $$ - be the translation operator and set $u_\varepsilon:=\tau_{\varepsilon}(u)*\phi_\varepsilon$, where $\varepsilon>0$ and $\phi_\varepsilon$ is an approximation of identity. Then, $u_\varepsilon\in \mathcal{C}^\infty(\overline{\RR_+^d})$ by the properties of the convolution. Moreover: + be the translation operator and set $u_\varepsilon:=\tau_{\varepsilon}(u)*\phi_\varepsilon$, where $\varepsilon>0$ and $\phi_\varepsilon$ is an approximation of identity. Then, $u_\varepsilon\in \mathcal{C}^\infty(\overline{\RR_{\geq 0}^d})$ by the properties of the convolution. Moreover: \begin{multline*} \norm{\partial^\alpha u_\varepsilon-\partial^\alpha u}_p \leq \norm{\partial^\alpha u_\varepsilon-\partial^\alpha (\tau_{\varepsilon}u)}_p+\norm{\partial^\alpha (\tau_{\varepsilon}u)-\partial^\alpha u}_p \\ \leq \norm{(\partial^\alpha \tau_\varepsilon u)*\phi_\varepsilon-\partial^\alpha (\tau_{\varepsilon}u)}_p+\norm{\tau_{\varepsilon}(\partial^\alpha u)-\partial^\alpha u}_p @@ -264,24 +264,24 @@ The same proof shows that $\mathcal{C}^\infty(\overline{\Omega})$ is dense in $W^{m,p}(\Omega)$, if $\Omega$ is bounded with $\Fr{\Omega}$ of class $\mathcal{C}^1$. This time, one needs to locally translate u along the normal direction. \end{remark} \begin{theorem} - For all $m\in\NN$ and all $1\leq p<\infty$, there is an extension operator $E:W^{m,p}(\RR_+^d)\to W^{m,p}(\RR^d)$. + For all $m\in\NN$ and all $1\leq p<\infty$, there is an extension operator $E:W^{m,p}(\RR_{\geq 0}^d)\to W^{m,p}(\RR^d)$. \end{theorem} \begin{proof} - We only do the proof for $d=1$ and $m=1$ to highlight the main ideas. Let $u\in W^{1,p}(\RR_+)$. We define the \emph{first order reflection}: + We only do the proof for $d=1$ and $m=1$ to highlight the main ideas. Let $u\in W^{1,p}(\RR_{\geq 0})$. We define the \emph{first order reflection}: $$ \bar{u}:=\begin{cases} u(x) & \text{if }x\geq 0 \\ -3u(-x)+4u(-x/2) & \text{if }x<0 \end{cases} $$ - By density, it is enough to prove the result for $u\in \mathcal{C}^1(\RR_+)$. An easy check shows that $\bar{u}\in \mathcal{C}^1(\RR)$. Moreover, we have: + By density, it is enough to prove the result for $u\in \mathcal{C}^1(\RR_{\geq 0})$. An easy check shows that $\bar{u}\in \mathcal{C}^1(\RR)$. Moreover, we have: \begin{align*} - {\norm{\bar{u}}_{W^{1,p}(\RR)}}^p & =\int_{\RR}{\abs{\bar{u}}^p}+{\abs{\bar{u}'}^p} \\ + {\norm{\bar{u}}_{W^{1,p}(\RR)}}^p & =\int_{\RR}{\abs{\bar{u}}^p}+{\abs{\bar{u}'}^p} \\ \begin{split} - &=\!\int_{\RR_+}\!{\abs{u}^p}\!+\!\!{\abs{u'}^p}\!+\!\int_{\RR_-}\![{\abs{-3u(-x)+4u(-x/2)}^p}+\\ + &=\!\int_{\RR_{\geq 0}}\!{\abs{u}^p}\!+\!\!{\abs{u'}^p}\!+\!\int_{\RR_{\leq 0}}\![{\abs{-3u(-x)+4u(-x/2)}^p}+\\ &\hspace{2.75cm}+{\abs{3u'(-x)-2u'(-x/2)}^p}] \end{split} \\ - & \leq C{\norm{u}_{W^{1,p}(\RR_+)}}^p + & \leq C{\norm{u}_{W^{1,p}(\RR_{\geq 0})}}^p \end{align*} for some constant $C>0$. Thus, $E$ is a bounded extension operator. \end{proof} @@ -300,7 +300,7 @@ The proof for higher derivatives $m \geq 1$ needs to add more terms in order to make the junction smooth enough. \end{remark} \begin{definition} - We say that a domain $\Omega\subseteq \RR^d$ has boundary of class $\mathcal{C}^k$ if $\forall x\in \Fr{\Omega}$ there is a neighborhood $\varepsilon,\delta>0$ and a $\mathcal{C}^k$-diffeomorphism $\phi:B(x,\varepsilon)\to B(0,\delta)$ so that $\phi(x)=0$ and $\phi(B(x,\varepsilon)\cap \Omega)=B(0,\delta)\cap \RR_+^d$. Note that in particular this implies that $\phi(\Fr{\Omega}\cap B(x,\varepsilon))=B(0,\delta)\cap \RR_0^d$. + We say that a domain $\Omega\subseteq \RR^d$ has boundary of class $\mathcal{C}^k$ if $\forall x\in \Fr{\Omega}$ there is a neighborhood $\varepsilon,\delta>0$ and a $\mathcal{C}^k$-diffeomorphism $\phi:B(x,\varepsilon)\to B(0,\delta)$ so that $\phi(x)=0$ and $\phi(B(x,\varepsilon)\cap \Omega)=B(0,\delta)\cap \RR_{\geq 0}^d$. Note that in particular this implies that $\phi(\Fr{\Omega}\cap B(x,\varepsilon))=B(0,\delta)\cap \RR_0^d$. \end{definition} \begin{theorem} Let $\Omega\subseteq \RR^d$ be a bounded domain with $\mathcal{C}^k$ boundary. Then, $\forall m\leq k$ and all $1\leq p<\infty$, there is an extension operator $E:W^{m,p}(\Omega)\to W^{m,p}(\RR^d)$. @@ -320,16 +320,16 @@ \end{theorem} \subsubsection{Trace operators} \begin{theorem} - Let $1\leq p<\infty$ and $u\in W^{1,p}(\RR_+^d)$. Then, the function $u|_{\RR_0^d}:\RR^{d-1}\to\CC$ belongs to $L^p(\RR^{d-1})$. + Let $1\leq p<\infty$ and $u\in W^{1,p}(\RR_{\geq 0}^d)$. Then, the function $u|_{\RR_0^d}:\RR^{d-1}\to\CC$ belongs to $L^p(\RR^{d-1})$. \end{theorem} \begin{definition} We define the \emph{trace operator} as the map: \begin{align*} - \function{T}{W^{1,p}(\RR_+^d)}{L^p(\RR^{d-1})}{u}{u|_{\RR_0^d}} + \function{T}{W^{1,p}(\RR_{\geq 0}^d)}{L^p(\RR^{d-1})}{u}{u|_{\RR_0^d}} \end{align*} \end{definition} \begin{theorem} - Let $1\leq p<\infty$ and $u\in W^{1,p}(\RR_+^d)$. Then, $Tu=0$ if and only if $u\in W_0^{1,p}(\RR_+^d)$. + Let $1\leq p<\infty$ and $u\in W^{1,p}(\RR_{\geq 0}^d)$. Then, $Tu=0$ if and only if $u\in W_0^{1,p}(\RR_{\geq 0}^d)$. \end{theorem} \begin{lemma} Let $\Omega\subseteq \RR^d$ be an open set and $u\in W^{1,p}(\Omega)$ with $1\leq p\leq \infty$. Then, $\norm{\grad \abs{u}}\almoste{\leq}\norm{\grad u}$. diff --git a/Mathematics/5th/Introduction_to_non_linear_elliptic_PDEs/Introduction_to_non_linear_elliptic_PDEs.tex b/Mathematics/5th/Introduction_to_non_linear_elliptic_PDEs/Introduction_to_non_linear_elliptic_PDEs.tex index 5ad4adb..148e449 100644 --- a/Mathematics/5th/Introduction_to_non_linear_elliptic_PDEs/Introduction_to_non_linear_elliptic_PDEs.tex +++ b/Mathematics/5th/Introduction_to_non_linear_elliptic_PDEs/Introduction_to_non_linear_elliptic_PDEs.tex @@ -3,6 +3,49 @@ \begin{document} \changecolor{INLEPDE} \begin{multicols}{2}[\section{Introduction to non linear elliptic PDEs}] - + \subsection{Introduction} + \begin{definition} + Let $a_{ij}$, $b_j$, $c$, $f$ be known functions on $\Omega\subseteq \RR^d$. Usually we will denote $\vf{A}=(a_{ij})$ and $\vf{b}=(b_j)$ A \emph{linear second-order PDE} is an equation of the form: + \begin{equation*} + -\sum_{i,j=1}^da_{ij}(x){\partial_{ij}}^2u(x)+\sum_{j=1}^db_j(x)\partial_ju(x)+c(x)u(x)=f(x) + \end{equation*} + where $u:\Omega\to \RR$ is the unknown function. This form is called \emph{non-divergence form}. If we write the equation in the form: + \begin{multline*} + -\sum_{i=1}^d\pdv{}{x_i}\left(\sum_{j=1}^da_{ij}(x)\partial_ju(x)\right)+\sum_{j=1}^db_j(x)\partial_ju(x)+\\+c(x)u(x)=f(x) + \end{multline*} + then we say that the equation is in \emph{divergence form}. Together with the PDE we usually impose boundary conditions on $\partial\Omega$. The \emph{Dirichlet boundary condition} is: + $$ + u|_{\partial\Omega}=g + $$ + and it is called \emph{homogeneous} if $g=0$. The \emph{Neumann boundary condition} is: + $$ + \langle \vf{n},\vf{A} \nabla u\rangle|_{\partial\Omega}=g + $$ + where we have assumed that the boundary of $\Omega$ is smooth enough to define the normal vector $\vf{n}$. The condition is called \emph{homogeneous} if $g=0$. Note that if $\vf{A}=\vf{I}_d$, then the Neumann boundary condition is just $\partial_{\vf{n}} u=g$. + \end{definition} + \begin{definition} + Let $a_{ij},b_j,c$ be known functions on $\Omega\subseteq \RR^d$. We say that the operator $$L=-\sum_{i,j=1}^da_{ij}{\partial_{ij}}^2 + \sum_{j=1}^d b_j\partial_j+c$$ is \emph{uniformly elliptic} if there exists $\theta>0$ such that for all $x\in \Omega$ and all $p\in \RR^d$ we have: + \begin{equation} + Q_x(p)=\sum_{i,j=1}^da_{ij}(x)p_ip_j\geq \theta \sum_{i=1}^{d} {p_i}^2 + \end{equation} + \end{definition} + \begin{remark} + Geometrically speaking, this implies that the sets + $$ + \xi_{x,h}=\{ p\in \RR^d: Q_x(p)=h\} + $$ + are ellipsoids. + \end{remark} + \subsection{Hilbert space methods for divergence form linear PDEs} + In this section, we will assume that $\Omega\subset\RR^d$ is an open, bounded subset, $a_{ij}=a_{ji}$ and $a_{ij},b_j,c\in L^\infty(\Omega)$. + \subsubsection{Fredholm alternative} + \begin{theorem}[Abstract Fredholm alternative] + Let $H$ be Hilbert and $K:H\to H$ be a compact linear operator. Then: + \begin{enumerate} + \item $\ker(\id-K)$ and $\ker(\id-K^*)$ are both finite dimensional and they have the same dimension. + \item $\im(\id-K)={(\ker(\id-K^*))}^\perp$. In particular, $\im(\id-K)$ is closed. + \item Either $\ker(\id-K)\ne\{0\}$ or $\id -K$ is and isomorphism. + \end{enumerate} + \end{theorem} \end{multicols} \end{document} \ No newline at end of file diff --git a/Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex b/Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex index d169981..50c3174 100644 --- a/Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex +++ b/Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex @@ -25,7 +25,7 @@ \begin{definition} Let ${(X_t)}_{t\in \TT}$, ${(Y_s)}_{s\in \SS}$ be two stochastic processes. We say that they are \emph{jointly Gaussian} if the concatenated process $({(X_t)}_{t\in \TT},{(Y_s)}_{s\in \SS})$ is Gaussian. \end{definition} - \begin{lemma}\label{lemma:indep_joint_gauss} + \begin{lemma}\label{SC:indep_joint_gauss} Two jointly Gaussian stochastic processes ${(X_t)}_{t\in \TT}$, ${(Y_s)}_{s\in \SS}$ are independent if and only if $\forall t\in\TT$, $\forall s\in\SS$, $\cov(X_t,Y_s)=0$. \end{lemma} \begin{proposition} @@ -49,7 +49,7 @@ Let $B={(B_t)}_{t\geq 0}$ be a Brownian motion and $a\geq 0$ fixed. Then, the Brownian motion ${(B_{t+a}-B_a)}_{t\geq 0}$ is independent of ${(B_s)}_{s\in [0,a]}$. \end{theorem} \begin{proof} - The processes ${(B_s)}_{s\in[0,a]}$ and ${(B_{t+a}-B_a)}_{t\geq 0}$ are jointly Gaussian, because their coordinates are linear combinations of coordinates of the same Gaussian process $B$. Thus, by \mcref{lemma:indep_joint_gauss} it reduces to compute the following correlation: + The processes ${(B_s)}_{s\in[0,a]}$ and ${(B_{t+a}-B_a)}_{t\geq 0}$ are jointly Gaussian, because their coordinates are linear combinations of coordinates of the same Gaussian process $B$. Thus, by \mcref{SC:indep_joint_gauss} it reduces to compute the following correlation: $$ \cov(B_s,B_{t+a}-B_a)=s\wedge(t+a)-s\wedge a=0 $$ @@ -62,6 +62,14 @@ Let ${(X_t)}_{t\geq 0}$ be a stochastic process. We define the \emph{natural filtration} of $X$ as $\mathcal{F}^X:={(\mathcal{F}_t^X)}_{t\geq 0}$, where $\mathcal{F}_t^X:=\sigma(X_s:s\leq t)$. \end{definition} From now on, we will assume that we work in a filtered probability space $(\Omega,\mathcal{F},\Prob,{(\mathcal{F}_t)}_{t\geq 0})$. + \begin{definition}[Martingale] + A stochastic process ${(X_t)}_{t\geq 0}$ is a \emph{martingale} if: + \begin{enumerate} + \item it is \emph{adapted}, i.e.\ $X_t$ is $\mathcal{F}_t$-measurable for all $t\geq 0$. + \item $\Exp(\abs{X_t})<\infty$ for all $t\geq 0$. + \item $\Exp(X_t\mid \mathcal{F}_s)=X_s$ for all $0\leq s\leq t$. + \end{enumerate} + \end{definition} \begin{proposition} Let $B={(B_t)}_{t\geq 0}$ be a Brownian motion. Then, the following processes are martingales ${(M_t)}_{t\geq 0}$ with respect to the natural filtration induced by $B$: \begin{itemize} @@ -88,5 +96,153 @@ \begin{theorem}[Doob's optional sampling theorem] Let ${(M_t)}_{t\geq 0}$ be a continuous martingale and $T$ be a stopping time. Then, the \emph{stopped process} $M^T:={(M_{t\wedge T})}_{t\geq 0}$ is a continuous martingale. In particular, $\forall t\geq 0$, $\Exp(M_{t\wedge T})=\Exp(M_0)$. If $M^T$ is uniformly integrable and $T\overset{\text{a.s.}}{\leq}\infty$, then taking $t\to\infty$ we have $\Exp(M_T)=\Exp(M_0)$. \end{theorem} + \begin{lemma}[Orthogonality of martingales]\label{SC:orthogonality_martingales} + Let ${(M_t)}_{t\geq 0}$ be a continuous martingale and let $0\leq s\leq t$. Then: + $$ + \Exp({(M_t-M_s)}^2\mid \mathcal{F}_s)=\Exp({M_t}^2-{M_s}^2\mid \mathcal{F}_s) + $$ + \end{lemma} + \begin{proof} + We have that: + \begin{multline*} + \Exp({(M_t-M_s)}^2\mid \mathcal{F}_s) =\Exp({M_t}^2-2M_tM_s+{M_s}^2\mid \mathcal{F}_s) =\\=\Exp({M_t}^2+{M_s}^2\!\mid \!\mathcal{F}_s)-2M_s\Exp(M_t\!\mid\! \mathcal{F}_s) =\Exp({M_t}^2-{M_s}^2\!\mid\! \mathcal{F}_s) + \end{multline*} + \end{proof} + \subsubsection{Quadratic variation} + \begin{definition} + Let $f:\RR_{\geq 0}\to\RR$ be a function. We define the \emph{absoulte variation} of $f$ on the interval $[s,t]$ as: + $$ + V(f,s,t):=\sup_{{(t_k)}_{0\leq k\leq n}\in \mathrm{P}([s,t])}\sum_{k=1}^{n}\abs{f(t_{k+1})-f(t_k)} + $$ + where $\mathrm{P}([s,t])$ is the set of all partitions of $[s,t]$. A function has \emph{finite variation} if $V(f,s,t)<\infty$ for all $0\leq s\leq t$. + \end{definition} + \begin{lemma}\label{SC:properties_variation} + Let $f,g:\RR_{\geq 0}\to\RR$ be a function and $0\leq s\leq t$. Then: + \begin{itemize} + \item $V(f,s,t)=V(f,s,u)+V(f,u,t)$, for all $s\leq u\leq t$. + \item If $f\in C^1$, then $V(f,s,t)=\int_s^t\abs{f'(u)}\dd{u}$. + \item If $f$ is monotone, then $V(f,s,t)=\abs{f(t)-f(s)}$. + \item $\displaystyle V(f+g,s,t)\leq V(f,s,t)+V(g,s,t)$. + \item Finite variation functions form a vector space. + \end{itemize} + \end{lemma} + \begin{proposition}\label{SC:difference_of_increasing} + Let $f:\RR_{\geq 0}\to\RR$. Then, $f$ has finite variation if and only if it can be written as the difference of two non-decreasing functions. + \end{proposition} + \begin{sproof} + \mcref{SC:properties_variation} gives us the implication to the left. For the other one, note that the functions $f_1(t):=V(f,0,t)$ and $f_2(t):=V(f,0,t)-f(t)$ are non-decreasing. + \end{sproof} + \begin{theorem}[Quadratic variation] + Let $M={(M_t)}_{t\geq 0}$ be a continuous square-integrable martingale. Then, for each $t\geq 0$ the limit + $$ + {\langle M\rangle}_t:=\lim_{n\to\infty}\sum_{k=1}^{n}\abs{M_{t_k^n}-M_{t_{k-1}^n}}^2 + $$ + exists in $L^1$ and does not depend on the partition ${(t_k^n)}_{0\leq k\leq n}\in \mathrm{P}([0,t])$ chosen as long as the \emph{mesh} $\Delta_n:= \max_{1\leq k\leq n}(t_k^n-t_{k-1}^n)$ goes to $0$ as $n\to\infty$. Moreover, $\langle M\rangle=({\langle M\rangle}_t)_{t\geq 0}$ has the following properties: + \begin{enumerate} + \item\label{SC:quad_var1} ${\langle M\rangle}_0=0$ + \item\label{SC:quad_var2} $\langle M\rangle$ is non-decreasing. + \item\label{SC:quad_var3} The function $t\mapsto {\langle M\rangle}_t$ is continuous. + \item\label{SC:quad_var4} ${({M_t}^2-{\langle M\rangle}_t)}_{t\geq 0}$ is a martingale. + \end{enumerate} + \end{theorem} + \begin{proof} + We omit the proof of the existence and continuity. We will only prove the last property. Let $0\leq s\leq t$ and ${(t_k^n)}_{0\leq k\leq n}\in \mathrm{P}([s,t])$ be such that $\Delta_n\to 0$. Then: + \begin{align*} + \Exp({M_t}^2-{M_s}^2\mid \mathcal{F}_s) & =\sum_{k=1}^n \Exp({M_{t_k^n}}^2-{M_{t_{k-1}^n}}^2\mid \mathcal{F}_s) \\ + & =\sum_{k=1}^n \Exp\left({(M_{t_k}^n-M_{t_{k-1}}^n)}^2\mid \mathcal{F}_s\right) \\ + \end{align*} + by the \mnameref{SC:orthogonality_martingales}. Now since we have convergence of $\sum_{k=1}^{n}{(M_{t_k}^n-M_{t_{k-1}}^n)}^2$ to ${\langle M\rangle}_t-{\langle M\rangle}_s$ in $L^1$, we get the result: + $$ + \Exp({M_t}^2-{M_s}^2\mid \mathcal{F}_s)=\Exp({\langle M\rangle}_t-{\langle M\rangle}_s\mid \mathcal{F}_s) + $$ + \end{proof} + \begin{proposition} + Let $B$ be a Brownian motion. Then: + $$ + \Prob(\forall s,t\geq 0, V(B,s,t)=\infty)=1 + $$ + But, ${\langle B\rangle}_t=t$ for all $t\geq 0$. + \end{proposition} + \begin{proof} + Let $B={(B_t)}_{t\geq 0}$. Then: + $$ + V(B,s,t)\!\geq\! \sum_{k=1}^{n}\abs{B_{s+k\frac{t-s}{n}}-B_{s+(k-1)\frac{t-s}{n}}}\!=\!\sqrt{\frac{t-s}{n}}\sum_{k=1}^{n}\abs{\xi_k} + $$ + where $\xi_k$ are \iid $N(0,1)$. By the \mnameref{P:weaklaw} we get the result. The second part is similar, but we get convergence instead. + \end{proof} + \begin{proposition}\label{SC:prop_variation_fg} + If a function $f$ has finite variation and $g$ is continuous, then: + $$ + \sum_{k=1}^n(f(t_k)-f(t_{k-1}))(g(t_k)-g(t_{k-1}))\overset{n\to\infty}{\longrightarrow}0 + $$ + \end{proposition} + \begin{proof} + Note that: + \begin{multline*} + \abs{\sum_{k=1}^n(f(t_k)-f(t_{k-1}))(g(t_k)-g(t_{k-1}))}\leq\\\leq V(f,0,t)\max_{\substack{0\leq u\leq v\leq t\\\abs{u-v}\leq \Delta_n}}\abs{g(u)-g(v)} + \end{multline*} + which goes to zero by uniform continuity of $g$ at $[0,t]$. + \end{proof} + \begin{corollary}\label{SC:corollary_finite_variation} + Let $M={(M_t)}_{t\geq 0}$ be a continuous square-integrable martingale with finite variation a.s. Fix $t\geq 0$. By \mcref{SC:prop_variation_fg} we have that ${\langle M\rangle}_t=0$ + Then: + $$ + \Prob(\forall t\geq 0,\ M_t=M_0)=1 + $$ + \end{corollary} + \begin{proof} + By \mnameref{SC:orthogonality_martingales}, we have: + $$ + \Exp({(M_t-M_0)}^2)=\Exp({M_t}^2) - \Exp({M_0}^2) = \Exp({\langle M\rangle}_t)=0 + $$ + where the penultimate equality follows from the fact that ${M_t}^2-{\langle M\rangle}_t$ is a martingale and so it has constant expectation. This shows that $\Prob (\forall t\geq 0,\ M_t=M_0)=1$. Now we can use the fact that $M$ is continuous to conclude using $t\in\QQ$. + \end{proof} + \begin{proposition} + The quadratic variation is the unique process that satisfies \mcref{SC:quad_var1,SC:quad_var2,SC:quad_var3,SC:quad_var4}. + \end{proposition} + \begin{proof} + Let $A$ be another process satisfying such properties. Then, ${M}^2-{\langle M\rangle}$ and ${M}^2-A$ are both martingales. Thus, $A-{\langle M\rangle}$ is also a martingale. But it is also continuous and has finite variation (by \mcref{SC:difference_of_increasing}). So by \mcref{SC:corollary_finite_variation}, $A={\langle M\rangle}$. + \end{proof} + \subsubsection{Local martingales} + \begin{definition} + A stochastic process ${(M_t)}_{t\geq 0}$ is a \emph{local martingale} if there exists a sequence of stopping times ${(T_n)}_{n\in\NN}$ (called \emph{localizing sequence}) such that: + \begin{enumerate} + \item $T_n\nearrow \infty$ a.s. + \item $M^{T_n}:={(M_{t\wedge T_n})}_{t\geq 0}$ is a martingale for all $n\in\NN$. + \end{enumerate} + \end{definition} + \begin{remark} + If $M$ is a martingale, then $M$ is a local martingale by taking $T_n=+\infty$ for all $n\in\NN$. + \end{remark} + \begin{remark} + Any local martingale is adapted because it is the pointwise limit of $M^{T_n}$, which are adapted by definition. + \end{remark} + \begin{proposition} + Let $M={(M_t)}_{t\geq 0}$ be a continuous local martingale. Then, if $\forall t\geq 0$ we have + $$ + \Exp\left(\sup_{0\leq s\leq t}\abs{M_s}\right)<\infty + $$ + then $M$ is a martingale. + \end{proposition} + \begin{proof} + We've argued that local martingales are automatically adapted. Moreover: + $$ + \Exp(\abs{M_t})\leq \Exp\left(\sup_{0\leq s\leq t}\abs{M_s}\right)<\infty + $$ + Finally, fix $0\leq s\leq t$. For all $n\in\NN$ we have: + $$ + \Exp(M_{t\wedge T_n}\mid \mathcal{F}_s)=M_{s\wedge T_n} + $$ + And using the \mnameref{P:dominated} with $M_{t\wedge T_n}\leq \sup_{0\leq s\leq t}\abs{M_s}$ we conclude the result. + \end{proof} + + \begin{theorem}[Levy's characterization of Brownian motion] + Let $M={(M_t)}_{t\geq 0}$ be a stochastic process. Then, the following are equivalent: + \begin{enumerate} + \item $M$ is a continuous local square-integrable martingale with $M_0=0$ and ${\langle M\rangle}_t=t$. + \item $M$ is a ${(\mathcal{F}_t)}_{t\geq 0}$-Brownian motion. + \end{enumerate} + \end{theorem} + \end{multicols} \end{document} \ No newline at end of file