diff --git a/.vscode/ltex.hiddenFalsePositives.en-US.txt b/.vscode/ltex.hiddenFalsePositives.en-US.txt index 5bfa02a..202280a 100644 --- a/.vscode/ltex.hiddenFalsePositives.en-US.txt +++ b/.vscode/ltex.hiddenFalsePositives.en-US.txt @@ -11,3 +11,56 @@ {"rule":"EXTREME_ADJECTIVES","sentence":"^\\QOtherwise, we say that the method is conditionally absolutely stable The motivation behind this definition of stability is on the stiff equations, which are differential equations for which certain numerical methods for solving the equation are numerically unstable, unless the step size is taken extremely small.\\E$"} {"rule":"NUMEROUS_DIFFERENT","sentence":"^\\QThe shooting method is the process of solving the initial value problem for many different values of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q until one finds the solution \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q that satisfies the desired boundary conditions.\\E$"} {"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qorder 1 2 3 4 5 6 7 8 \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q 1 2 3 4 6 7 9 11 Number of stages of an explicit RK method needed for a given order of consistency Step-size control for Runge-Kutta methods.\\E$"} +{"rule":"IF_IS","sentence":"^\\QNote that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and suppose that this expectation is finite.\\E$"} +{"rule":"IF_IS","sentence":"^\\QThen: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q We have that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q solves the difference equation: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q with \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, whose solution is straightforward (\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q are particular solutions for the case \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, respectively).\\E$"} +{"rule":"IF_IS","sentence":"^\\QThen: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q We have that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q solves the difference equation \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Qruin \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Qruin \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q with \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, whose solution is straightforward.\\E$"} +{"rule":"ADVERB_OR_HYPHENATED_ADJECTIVE","sentence":"^\\QTherefore, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q And so the limit has to be \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q (note that the limit does exist because \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is an increasing bounded sequence).\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QNow using SP:2n-n_convinatoria,SP:stirling_polya1 we have: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q The simple random walk on \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is always transient.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QBy SP:thmRec,SP:period_classes we have that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is recurrent and aperiodic.\\E$"} +{"rule":"EN_UNPAIRED_BRACKETS","sentence":"^\\QWe say that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is a stopping time if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q we have: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Intuitively, this condition means that the “decision\" of whether to stop at time \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q must be based only on the information present at time \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, not on any future information.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QIf \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is transient, then \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QIf \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, then: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a time-homogeneous Markov chain, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a filtration space defined with \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a stopping time.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QThen, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThat is, if the random walk is in \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q are random variables such that: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q with probability \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q with probability \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q [mode=image|tex,width=0.75]Images/randomWalk A simple random walk of 10000 steps in \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"FILE_EXTENSIONS_CASE","sentence":"^\\QAnd this last expression is the joint pdf of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q variables.\\E$"} +{"rule":"IF_IS","sentence":"^\\QThen, for any \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, the following limits exist: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Note that if the limits are finite we have \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"IF_IS","sentence":"^\\QWe define the infinitesimal transition scheme as: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a CTHMC with infinitesimal generator \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and assume that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is finite.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[mode=image|tex,width=0.75]Images/brownianMotion A Brownian motion simulated with 7500 increments.\\E$"} +{"rule":"IF_IS","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be two stochastic processes.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QThen, the trajectories are almost surely non-decreasing and have jumps of size at most \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"FINAL_ADVERB_COMMA","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q in \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q in \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q The Brownian trajectories pass through every point \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q infinitely many times almost surely.\\E$"} +{"rule":"SENTENCE_WHITESPACE","sentence":"^\\QRn.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QIf \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, then the center of the osculating circle at \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q has coordinates \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q given by: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q [mode=image|tex,width=0.8]Images/oscu-circle Osculating circle of a cycloid at a certain point Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an open interval, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a curve and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an arc-length parametrization of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q of class \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and suppose that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"IF_IS","sentence":"^\\QThen: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q for some \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[mode=image|tex,width=]Images/involute-evolute Construction of the evolute and involute of a curve Curvature of plane curves.\\E$"} +{"rule":"EN_A_VS_AN","sentence":"^\\QRecall that an Euclidean motion is a function that preserves the distance, that is, if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is an Euclidean motion, then \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"EN_A_VS_AN","sentence":"^\\QLet \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an Euclidean motion.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QThen, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q we have: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an open interval, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a curve, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an arc-length parametrization of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q of class \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, where \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\QSO\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QThen, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is arc-length parametrized and the TNB frame of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q And the curvature and torsion of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q are: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Fundamental theorem of curves.\\E$"} +{"rule":"EN_A_VS_AN","sentence":"^\\QMoreover, if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is another curve arc-length parametrized by \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q satisfying these restrictions, then there exists an Euclidean motion that carries \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q into \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[mode=image|tex,width=0.45]Images/theorem_immersions Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q with \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an open set, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a submersion at \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be the projection map into the first coordinate.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[mode=image|tex,width=0.45]Images/theorem_submersions Submanifolds of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qif \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q such that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q then this root is at most double.\\E$"} +{"rule":"ADVERB_OR_HYPHENATED_ADJECTIVE","sentence":"^\\QLet \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an open bounded connected set such that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is of class \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ADVERB_OR_HYPHENATED_ADJECTIVE","sentence":"^\\QThen: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Qg \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a compact oriented surface without boundary.\\E$"} +{"rule":"ADVERB_OR_HYPHENATED_ADJECTIVE","sentence":"^\\QLet \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a compact oriented surface without boundary and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a vector field tangent to \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q with isolated singularities whose indexes are \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ADVERB_OR_HYPHENATED_ADJECTIVE","sentence":"^\\QThen: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a compact oriented surface without boundary and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a vector field tangent to \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ADVERB_OR_HYPHENATED_ADJECTIVE","sentence":"^\\QLet \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a compact oriented surface with boundary, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an open neighbourhood of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ADVERB_OR_HYPHENATED_ADJECTIVE","sentence":"^\\QLet \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a compact oriented surface, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be an open neighbourhood of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QWe denote by \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q the set of all differentiable vector fields on \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q that are tangent to \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QWe say that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is tangent to \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qa hyperbolic point if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qa plane point if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qa parabolic point if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q but \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qwith initial conditions \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"SENTENCE_WHITESPACE","sentence":"^\\QR3.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QWe denote by \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q the set of all differentiable vector fields defined on \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QThen, given \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q we have that: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q where \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThen \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q by RFA:measureB,RFA:measureC.\\E$"} +{"rule":"COMMA_COMPOUND_SENTENCE","sentence":"^\\QGiven a function \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, we say that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q exists and it is finite if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is integrable on \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"SENTENCE_WHITESPACE","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"SENTENCE_WHITESPACE","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Qa.e.\\E$"} +{"rule":"TO_TWO","sentence":"^\\QNow extend this to positive simple functions and the to positive measurable functions.\\E$"} +{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe equality is held if: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q [mode=image|tex,width=0.7]Images/young We say that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q are Hölder conjugates if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} +{"rule":"SENTENCE_WHITESPACE","sentence":"^\\QLp. spaces.\\E$"} +{"rule":"COMMA_COMPOUND_SENTENCE","sentence":"^\\QWe will denote \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and we will say that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is the Fourier series of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"} diff --git a/Mathematics/3rd/Differential_geometry/Differential_geometry.tex b/Mathematics/3rd/Differential_geometry/Differential_geometry.tex index d02bee3..922572d 100644 --- a/Mathematics/3rd/Differential_geometry/Differential_geometry.tex +++ b/Mathematics/3rd/Differential_geometry/Differential_geometry.tex @@ -32,7 +32,7 @@ \end{enumerate} \end{proposition} \begin{definition} - Let $I\subseteq\RR$ be an open interval and $C\subset\RR^n$ be a curve. A \emph{parametrization} of $C$ of class $\mathcal{C}^k$ is a function $\vf{\alpha}:I\rightarrow\RR^n$ of class $\mathcal{C}^k$ such that $\vf\alpha(I)=C$. The image of $\vf\alpha$, $C$, is called the \emph{trace} of $\vf\alpha$ and it is sometimes denoted by $\vf\alpha^*:=\im(\vf\alpha)$\footnote{Sometimes $\vf\alpha$ is referred to the curve as well as to the parametrization of it.}. + Let $I\subseteq\RR$ be an open interval and $C\subset\RR^n$ be a curve. A \emph{parametrization} of $C$ of class $\mathcal{C}^k$ is a function $\vf{\alpha}:I\rightarrow\RR^n$ of class $\mathcal{C}^k$ such that $\vf\alpha(I)=C$. The image of $\vf\alpha$, $C$, is called the \emph{trace} of $\vf\alpha$, and it is sometimes denoted by $\vf\alpha^*:=\im(\vf\alpha)$\footnote{Sometimes $\vf\alpha$ is referred to the curve as well as to the parametrization of it.}. \end{definition} \begin{definition} Let $I\subseteq\RR$ be an open interval, $C\subset\RR^n$ be a curve and $\vf{\alpha}:I\rightarrow\RR^n$ be a parametrization of $C$ of class $\mathcal{C}^1$. We define the \emph{tangent vector} of $\vf{\alpha}$ at $t_0\in\RR$ as $\vf{\alpha}'(t_0)$. We say that $\vf{\alpha}$ is \emph{regular} if $\vf{\alpha}'(t)\ne 0$ $\forall t\in I$. In that last case, we define the \emph{tangent line} of $\vf{\alpha}$ at $\vf{\alpha}(t_0)$ as the following parametrized line in $\RR^n$: $$s\longmapsto \vf{\alpha}(t_0)+s\vf{\alpha}'(t_0)$$ @@ -41,7 +41,7 @@ Let $C\subset\RR^n$ be a curve. We say that $C$ is a \emph{plane curve} if it is contained in a plane of $\RR^n$. \end{definition} \begin{definition}\label{DG:reparam} - Let $I,J\subseteq\RR$ be open intervals, $C\subset\RR^n$ be a curve, $\vf{\alpha}:I\rightarrow\RR^n$ be a regular parametrization of $C$ of class $\mathcal{C}^1$ and $h:J\rightarrow I$ be a diffeomorphism. Then, $\vf{\beta}:=\vf{\alpha}\circ h:J\rightarrow\RR^n$ is a regular parametrization of $C$ satisfying: $$\vf{\beta}'(s)=\vf{\alpha}'(h(s))h'(s)\quad\forall s\in J$$ It is said that $\vf{\beta}$ is a \emph{reparametrization} of $\vf{\alpha}$ and $h$ is a \emph{change of parameter}. Moreover, the reparametrization is \emph{positive} if $h'(s)>0$ $\forall s\in J$ and it is \emph{negative} if $h'(s)<0$ $\forall s\in J$. + Let $I,J\subseteq\RR$ be open intervals, $C\subset\RR^n$ be a curve, $\vf{\alpha}:I\rightarrow\RR^n$ be a regular parametrization of $C$ of class $\mathcal{C}^1$ and $h:J\rightarrow I$ be a diffeomorphism. Then, $\vf{\beta}:=\vf{\alpha}\circ h:J\rightarrow\RR^n$ is a regular parametrization of $C$ satisfying: $$\vf{\beta}'(s)=\vf{\alpha}'(h(s))h'(s)\quad\forall s\in J$$ It is said that $\vf{\beta}$ is a \emph{reparametrization} of $\vf{\alpha}$ and $h$ is a \emph{change of parameter}. Moreover, the reparametrization is \emph{positive} if $h'(s)>0$ $\forall s\in J$, and it is \emph{negative} if $h'(s)<0$ $\forall s\in J$. \end{definition} \subsubsection{Length of curves} \begin{definition} @@ -163,7 +163,7 @@ \end{enumerate} \end{proposition} \begin{theorem}[Frenet-Serret formulas] - Let $I\subseteq\RR$ be an open interval, $C\subset\RR^3$ be a curve and $\vf{\alpha}:I\rightarrow\RR^3$ be a regular arc-length parametrization of $C$ of class $\mathcal{C}^3$ such that $\vf{\alpha}''(s)\ne 0$ $\forall s\in I$. Then\footnote{Note that an inversion of the orientation of $\vf\alpha$ would change the sign of $\T\alpha$ and $\B\alpha$ but it would preserve the sign of $\N\alpha$, $k_{\alpha}$ and $\ta\alpha$.}: + Let $I\subseteq\RR$ be an open interval, $C\subset\RR^3$ be a curve and $\vf{\alpha}:I\rightarrow\RR^3$ be a regular arc-length parametrization of $C$ of class $\mathcal{C}^3$ such that $\vf{\alpha}''(s)\ne 0$ $\forall s\in I$. Then\footnote{Note that an inversion of the orientation of $\vf\alpha$ would change the sign of $\T\alpha$ and $\B\alpha$, but it would preserve the sign of $\N\alpha$, $k_{\alpha}$ and $\ta\alpha$.}: $$ \begin{pmatrix} \T\alpha \\ @@ -183,7 +183,7 @@ $$ \end{theorem} \begin{definition} - Let $I\subseteq\RR$ be an open interval, $C\subset\RR^3$ be a curve and $\vf{\alpha}:I\rightarrow\RR^3$ be a regular arc-length parametrization of $C$ of class $\mathcal{C}^3$ such that $\vf{\alpha}''(s)\ne 0$ $\forall s\in I$. and $s_0\in I$. We define the following planes of $\RR^3$: + Let $I\subseteq\RR$ be an open interval, $C\subset\RR^3$ be a curve and $\vf{\alpha}:I\rightarrow\RR^3$ be a regular arc-length parametrization of $C$ of class $\mathcal{C}^3$ such that $\vf{\alpha}''(s)\ne 0$ $\forall s\in I$ and $s_0\in I$. We define the following planes of $\RR^3$: \begin{itemize} \item \emph{Osculating plane}: plane generated by $\T\alpha(s_0)$ and $\N\alpha(s_0)$ that contains $\vf{\alpha}(s_0)$. \item \emph{Normal plane}: plane generated by $\N\alpha(s_0)$ and $\B\alpha(s_0)$ that contains $\vf{\alpha}(s_0)$. @@ -506,7 +506,7 @@ $$\function{\vf{\dd{f}}_p}{T_pS_1}{T_{\vf{f}(p)}S_2}{\vf{v}}{{(\vf{f}\circ\vf\alpha)}'(0)}$$ where $\vf\alpha:(-\varepsilon,\varepsilon)\rightarrow S_1$ is a parametrization of a curve of class $\mathcal{C}^\infty$ such that $\vf\alpha(0)=p$ and $\vf\alpha'(0)=\vf{v}$. \end{definition} \begin{proposition} - Let $S_1,S_2\subseteq\RR^3$ be surfaces, $p\in S_1$ and $\vf{f}:S_1\rightarrow S_2$ be a differentiable function. Then, $\vf{\dd{f}}_p$ is linear. Moreover if $(V_1,\vf\varphi_1(u,v))$ and $(V_2,\vf\varphi_2(\tilde{u},\tilde{v}))$ are parametrizations of $S_1$ and $S_2$ respectively, $\tilde{u}=f_1(u,v)$, $\tilde{v}=f_2(u,v)$\footnote{That is, $f_1$ and $f_2$ are the component functions of ${\vf\varphi_2}^{-1}\circ\vf{f}\circ\vf\varphi_1$.} and $\mathcal{B}_1=\left(\pdv{\vf\varphi_1}{u},\pdv{\vf\varphi_1}{v}\right)$, $\mathcal{B}_2=\left(\pdv{\vf\varphi_2}{\tilde{u}},\pdv{\vf\varphi_2}{\tilde{v}}\right)$, we have that: $$[\vf{\dd{f}}_p]_{\mathcal{B}_1,\mathcal{B}_2}= + Let $S_1,S_2\subseteq\RR^3$ be surfaces, $p\in S_1$ and $\vf{f}:S_1\rightarrow S_2$ be a differentiable function. Then, $\vf{\dd{f}}_p$ is linear. Moreover, if $(V_1,\vf\varphi_1(u,v))$ and $(V_2,\vf\varphi_2(\tilde{u},\tilde{v}))$ are parametrizations of $S_1$ and $S_2$ respectively, $\tilde{u}=f_1(u,v)$, $\tilde{v}=f_2(u,v)$\footnote{That is, $f_1$ and $f_2$ are the component functions of ${\vf\varphi_2}^{-1}\circ\vf{f}\circ\vf\varphi_1$.} and $\mathcal{B}_1=\left(\pdv{\vf\varphi_1}{u},\pdv{\vf\varphi_1}{v}\right)$, $\mathcal{B}_2=\left(\pdv{\vf\varphi_2}{\tilde{u}},\pdv{\vf\varphi_2}{\tilde{v}}\right)$, we have that: $$[\vf{\dd{f}}_p]_{\mathcal{B}_1,\mathcal{B}_2}= \renewcommand\arraystretch{2} \begin{pmatrix} \displaystyle\pdv{f_1}{u}({\vf{\varphi}}^{-1}(p)) & \displaystyle\pdv{f_1}{v}({\vf{\varphi}}^{-1}(p)) \\ @@ -574,7 +574,7 @@ \begin{definition} Let $S_1,S_2\subseteq\RR^3$ be surfaces and $\vf{f}:S_1\rightarrow S_2$ be a differentiable function. We say that $\vf{f}$ is a \emph{local isometry} if the differential function $\vf{\dd{f}}_p$ is an isometry $\forall p\in S_1$. That is, for each $p\in S_1$ we have: $$\langle\vf{v},\vf{w}\rangle_1=\langle\vf{\dd{f}}_p(\vf{v}),\vf{\dd{f}}_p(\vf{w})\rangle_2\quad\forall\vf{v},\vf{w}\in T_pS_1\footnote{Here, ${\langle\cdot,\cdot\rangle}_i$ represents the first fundamental form of $S_i$, $i=1,2$.}$$ - We say that $\vf{f}$ is an \emph{isometry} if it is a local isometry and it is invertible. + We say that $\vf{f}$ is an \emph{isometry} if it is a local isometry, and it is invertible. \end{definition} \begin{proposition} Let $S_1,S_2\subseteq\RR^3$ be surfaces and $\vf{f}:S_1\rightarrow S_2$ be a local isometry. Then, $\vf{\dd{f}}_p$ is an isomorphism. @@ -627,7 +627,7 @@ Let $S\subseteq\RR^3$ be a surface oriented with a normal unit field $\vf\nu$ and $p\in S$. Then, the Weingarten endomorphism is auto-adjoint with respect to the first fundamental form. That is: $${\langle \vf{W}_p(\vf{u}),\vf{v}\rangle}_p={\langle \vf{u},\vf{W}_p(\vf{v})\rangle}_p\quad\forall\vf{u},\vf{v}\in T_pS$$ \end{proposition} \begin{proposition} - Let $S\subseteq\RR^3$ be an orientable surface and $p\in S$. Then, the Weingarten endomorphism has real eigenvalues and it diagonalizes in an orthonormal basis of $T_pS$. + Let $S\subseteq\RR^3$ be an orientable surface and $p\in S$. Then, the Weingarten endomorphism has real eigenvalues, and it diagonalizes in an orthonormal basis of $T_pS$. \end{proposition} \begin{definition} Let $S\subseteq\RR^3$ be an orientable surface and $p\in S$. We define the \emph{principal directons} of $S$ at $p$ as the eigenspaces of $\vf{W}_p$. We define the \emph{principal curvatures} of $S$ at $p$ as the eigenvalues of $\vf{W}_p$. @@ -671,7 +671,7 @@ Let $S\subseteq\RR^3$ be an orientable surface and $\vf\alpha:I\rightarrow S$ be a regular parametrization of a curve $C$ of class $\mathcal{C}^\infty$. Let $\vf\nu(t):=(\vf\nu\circ\vf\alpha)(t)$. Then, $C$ is a line of curvature of $S$ if and only if $$\vf\nu'(t)=\lambda(t)\vf\alpha'(t)$$ where $\lambda(t)$ is a differentiable function. In this case, $-\lambda(t)$ is the principal curvature of $S$ in the direction of $\vf\alpha'(t)$. \end{proposition} \begin{proposition}[Euler's formula] - Let $S\subseteq\RR^3$ be an orientable surface, $p\in S$ and $(\vf{v}_1,\vf{v}_2)$ be an orthonormal basis of $T_pS$, where $\vf{v}_i$ is an eigenvectors of eigenvalue $k_i$ of $\vf{W}_p$ for $i=1,2$. Then: $$k_\text{n}(\cos\theta \vf{v}_1+\sin\theta \vf{v}_2)=k_1{\left(\cos\theta\right)}^2+k_2{\left(\sin\theta\right)}^2$$ + Let $S\subseteq\RR^3$ be an orientable surface, $p\in S$ and $(\vf{v}_1,\vf{v}_2)$ be an orthonormal basis of $T_pS$, where $\vf{v}_i$ is an eigenvector of eigenvalue $k_i$ of $\vf{W}_p$ for $i=1,2$. Then: $$k_\text{n}(\cos\theta \vf{v}_1+\sin\theta \vf{v}_2)=k_1{\left(\cos\theta\right)}^2+k_2{\left(\sin\theta\right)}^2$$ Hence, we will denote $k_\text{n}(\theta):=k_1{\left(\cos\theta\right)}^2+k_2{\left(\sin\theta\right)}^2$. \end{proposition} \begin{corollary} @@ -793,7 +793,7 @@ \vf\varphi_{vu} & =\Gamma_{21}^1\vf\varphi_u+\Gamma_{21}^2\vf\varphi_v+f\vf\nu \\ \vf\varphi_{vv} & =\Gamma_{22}^1\vf\varphi_u+\Gamma_{22}^2\vf\varphi_v+g\vf\nu \end{align*} - for some coefficients $\Gamma_{ij}^k$, $i,j,k\in\{1,2\}$. This coefficients are called \emph{Christoffel symbols}\footnote{Observe that $\Gamma_{ij}^k=\Gamma_{ji}^k$ $\forall i,j,k\in\{1,2\}$.}. + for some coefficients $\Gamma_{ij}^k$, $i,j,k\in\{1,2\}$. These coefficients are called \emph{Christoffel symbols}\footnote{Observe that $\Gamma_{ij}^k=\Gamma_{ji}^k$ $\forall i,j,k\in\{1,2\}$.}. \end{definition} \begin{proposition} Let $S\subseteq\RR^3$ be an orientable surface and $(V,\vf\varphi(u,v))$ be a parametrization of $S$. Then: @@ -863,7 +863,7 @@ \end{proposition} \subsubsection{Parallel transport} \begin{definition} - Let $S\subseteq\RR^3$ be a surface and $U\subseteq S$ be an open set. A \emph{vector field} tangent to $S$ defined on $U$ is a correspondence $\vf{X}$ that at each point $p\in U$ it assigns a tangent vector $\vf{X}(p)=:\vf{X}_p\in T_pS$. We say that $\vf{X}$ is \emph{differentiable} at $p\in U$ if there is a parametrization $\vf\varphi(u,v)$ of $S$ whose image contains $p$ such that $$\vf{X}=a\vf\varphi_u+b\vf\varphi_v$$ for some differentiable functions $a(u,v)$, $b(u,v)$ at $p$. We say that $\vf{X}$ is \emph{differentiable} if it is differentiable at each point $p\in U$\footnote{From now on, all the vector fields considered will be differentiable so sometimes we will omit to say it explicitly.}. + Let $S\subseteq\RR^3$ be a surface and $U\subseteq S$ be an open set. A \emph{vector field} tangent to $S$ defined on $U$ is a correspondence $\vf{X}$ that at each point $p\in U$ it assigns a tangent vector $\vf{X}(p)=:\vf{X}_p\in T_pS$. We say that $\vf{X}$ is \emph{differentiable} at $p\in U$ if there is a parametrization $\vf\varphi(u,v)$ of $S$ whose image contains $p$ such that $$\vf{X}=a\vf\varphi_u+b\vf\varphi_v$$ for some differentiable functions $a(u,v)$, $b(u,v)$ at $p$. We say that $\vf{X}$ is \emph{differentiable} if it is differentiable at each point $p\in U$\footnote{From now on, all the vector fields considered will be differentiable, so sometimes we will omit to say it explicitly.}. \end{definition} \begin{definition} Let $S\subseteq\RR^3$ be a surface, $\vf{X}$ be a differentiable vector field tangent to $S$, $p\in S$ and $\vf{w}\in T_pS$. Let $\vf\alpha:(-\varepsilon,\varepsilon)\rightarrow S$ a parametrized curve of class $\mathcal{C}^\infty$ with $\vf\alpha(0)=p$ and $\vf\alpha'(0)=\vf{w}$. We denote $\vf{X}(t):=(\vf{X}\circ\vf\alpha)(t)$. We define the \emph{covariant derivative} of $\vf{X}$ at the point $p$ in the direction of $\vf{w}$, denoted as $\frac{\text{D}\vf{X}}{\dd{t}}(0)$, as the orthogonal projection $\vf\pi^\perp$ of $\vf{X}'(0)$ over the vector field $T_pS$. That is: $$\frac{\text{D}\vf{X}}{\dd{t}}(0)=\vf\pi^\perp\left(\vf{X}'(0)\right)$$ @@ -885,7 +885,7 @@ Let $S\subseteq\RR^3$ be a surface and $\vf{X}$, $\vf{Y}$ be vector fields tangent to $S$ along a curve $\vf\alpha:I\rightarrow S$ of class $\mathcal{C}^\infty$ such that they are parallel. Then, $t\mapsto\langle \vf{X}(t),\vf{Y}(t)\rangle$ is constant. In particular, the norms $\|\vf{X}(t)\|$, $\|\vf{Y}(t)\|$ as well as the angle between $\vf{X}(t)$ and $\vf{Y}(t)$ are constant. \end{proposition} \begin{proposition} - Let $S\subseteq\RR^3$ be a surface, $(V,\vf\varphi(u,v))$ is a parametrization of $S$ and $\vf\alpha: I\rightarrow S$ be a parametrized curve of class $\mathcal{C}^\infty$ such that $\vf\alpha=\vf\varphi(u(t),v(t))$. Then, given $t_0\in I$ and $\vf{w}\in T_{\vf\alpha(t_0)}S$ there exists a unique parallel vector field $\vf{X}=a\vf\varphi_u+b\vf\varphi_v$ along $\vf\alpha$ such that $\vf{X}(t_0)=\vf{w}$. This vector field is called \emph{parallel transport} of the vector $\vf{w}$ along $\vf{\alpha}$ and it is defined on the entire interval $I$. It can be found by solving this system of odes: + Let $S\subseteq\RR^3$ be a surface, $(V,\vf\varphi(u,v))$ is a parametrization of $S$ and $\vf\alpha: I\rightarrow S$ be a parametrized curve of class $\mathcal{C}^\infty$ such that $\vf\alpha=\vf\varphi(u(t),v(t))$. Then, given $t_0\in I$ and $\vf{w}\in T_{\vf\alpha(t_0)}S$ there exists a unique parallel vector field $\vf{X}=a\vf\varphi_u+b\vf\varphi_v$ along $\vf\alpha$ such that $\vf{X}(t_0)=\vf{w}$. This vector field is called \emph{parallel transport} of the vector $\vf{w}$ along $\vf{\alpha}$, and it is defined on the entire interval $I$. It can be found by solving this system of odes: $$\left\{ \begin{aligned} a'+\Gamma_{11}^1au'+\Gamma_{12}^1av'+\Gamma_{21}^1bu'+\Gamma_{22}^1bv' & =0 \\ @@ -1015,7 +1015,7 @@ \begin{definition} Let $S\subseteq\RR^3$ be a regular surface. A \emph{vector field} defined on $S$ is a correspondence $\vf{X}$ that at each point $p\in S$ it assigns a vector $\vf{X}(p)=:\vf{X}_p\in T_p\RR^3$. If there is a parametrization $\vf\varphi(u,v)$ of $S$, we can write $$\vf{X}=\vf{X}(u,v)=\sum_{i=1}^3X^i(u,v)\pdv{}{x^i}$$ - where $X^i(u,v):=(X^i\circ\vf\varphi)(u,v)$. We say that $\vf{X}$ is \emph{differentiable} if it the functions $X^i(u,v)$ are differentiable. We say that $\vf{X}$ is \emph{tangent} to $S$ if $\vf{X}_p\in T_pS$ $\forall p\in S$. In this case we can write: $$\vf{X}=\tilde{X}^1\vf\varphi_u+\tilde{X}^2\vf\varphi_v$$ + where $X^i(u,v):=(X^i\circ\vf\varphi)(u,v)$. We say that $\vf{X}$ is \emph{differentiable} if the functions $X^i(u,v)$ are differentiable. We say that $\vf{X}$ is \emph{tangent} to $S$ if $\vf{X}_p\in T_pS$ $\forall p\in S$. In this case we can write: $$\vf{X}=\tilde{X}^1\vf\varphi_u+\tilde{X}^2\vf\varphi_v$$ \end{definition} \begin{proposition} Let $S\subseteq\RR^3$ be a regular surface and $\vf{X}$, $\vf{Y}$ be tangent differential vector fields to $S$ such that at some point $p\in S$, the vectors $\vf{X}_p$, $\vf{Y}_p$ are linearly independent. Then, there exists a local parametrization $(V,\vf\varphi(u,v))$ of $S$ such that $p\in\vf\varphi(V)$ and $$\vf{X}=\lambda\vf\varphi_u\qquad\vf{Y}=\mu\vf\varphi_v$$ for some differentiable functions $\lambda$, $\mu$. @@ -1029,7 +1029,7 @@ \end{definition} \begin{definition} Let $V$ be a vector space of dimension $n$ and $\omega:V\times\overset{(k)}{\cdots}\times V\longrightarrow \RR$ be a $k$-linear map. We say that $\omega$ is \emph{alternating} if $$\omega(\vf{u}_{\sigma(1)},\ldots,\vf{u}_{\sigma(k)})=\sign(\sigma)\omega(\vf{u}_1,\ldots,\vf{u}_k)\qquad\forall\sigma\in \text{S}_k$$ - We denote by $\Lambda^kV^*$ the vector space of the alternating $k$-linear maps. The elements of $\Lambda^kV^*$ are called \emph{multilinear forms}\footnote{Here $V^*$ denotes the dual space of $V$ (see \mcref{LA:dual}).}. By agreement we denote $\Lambda^0V^*:=\RR$ and: $$\Lambda^*V^*:=\bigoplus_{k=0}^{n}\Lambda^kV^*$$ + We denote by $\Lambda^kV^*$ the vector space of the alternating $k$-linear maps. The elements of $\Lambda^kV^*$ are called \emph{multilinear forms}\footnote{Here $V^*$ denotes the dual space of $V$ (see \mcref{LA:dual}).}. By agreement, we denote $\Lambda^0V^*:=\RR$ and: $$\Lambda^*V^*:=\bigoplus_{k=0}^{n}\Lambda^kV^*$$ \end{definition} \begin{definition} Let $V$ be a vector space, $\alpha\in\Lambda^pV^*$ and $\beta\in\Lambda^qV^*$. We define its \emph{exterior product} as the multilinear map $\alpha\wedge\beta$ defined as: @@ -1150,7 +1150,7 @@ \subsection{Integration} \subsubsection{Submanifolds of \texorpdfstring{$\RR^n$}{Rn}} \begin{definition} - Let $M\subseteq\RR^n$ be a submanifold and $p\in M$. If $\vf\alpha:(-\varepsilon,\varepsilon)\rightarrow\RR^3$ is a parametrization of a curve of class $\mathcal{C}^\infty$ such that $\vf\alpha(0)=p$, we say that $\vf\alpha'(0)$ is a \emph{tangent vector} to $M$ at $p$. The set of all such vectors is called \emph{tangent space} to $M$ at $p$ and it is denoted as $T_pM$. Moreover, $T_pM$ is a vector space of dimension $\dim M$. + Let $M\subseteq\RR^n$ be a submanifold and $p\in M$. If $\vf\alpha:(-\varepsilon,\varepsilon)\rightarrow\RR^3$ is a parametrization of a curve of class $\mathcal{C}^\infty$ such that $\vf\alpha(0)=p$, we say that $\vf\alpha'(0)$ is a \emph{tangent vector} to $M$ at $p$. The set of all such vectors is called \emph{tangent space} to $M$ at $p$, and it is denoted as $T_pM$. Moreover, $T_pM$ is a vector space of dimension $\dim M$. \end{definition} \begin{definition} Let $M\subseteq\RR^n$ be a submanifold of dimension $k$ and $U\subseteq S$ be an open set. A \emph{vector field} defined on $U$ is a correspondence $\vf{X}$ that at each point $p\in U$ it assigns a tangent vector $\vf{X}(p)=:\vf{X}_p\in T_p\RR^n$. We say that $\vf{X}$ is \emph{differentiable} at $p\in U$ if there is a parametrization $\vf\varphi(u^1,\ldots,u^k)$ of $M$ whose image contains $p$ such that $$\vf{X}=\sum X^i\vf\varphi_{u^i}$$ for some functions $X^1,\ldots,X^k$ differentiable at $p$. We say that $\vf{X}$ is \emph{differentiable} if it is differentiable at each point $p\in U$. We say that $\vf{X}$ is \emph{tangent} to $M$ if $\vf{X}_p\in T_pM$ $\forall p\in U$. We denote by $\mathcal{X}(U)$ the set of all differentiable vector fields on $U$ that are tangent to $M$. @@ -1247,10 +1247,10 @@ Let $M\subseteq\RR^n$ be an orientated submanifold of dimension $k$ with $\Fr{M}\ne\varnothing$. We say that a basis $(\vf{e}_1,\ldots,\vf{e}_{k-1})$ of $T_p\Fr{M}$ is \emph{positive} if $(\vf\nu_{\Fr{M}},\vf{e}_1,\ldots,\vf{e}_{k-1})$ is a positive basis of $T_pM$. This choice determines an orientation on $\Fr{M}$, which is called \emph{orientation induced by $M$}. \end{definition} \begin{proposition} - Let $M\subseteq\RR^n$ be an orientated submanifold of dimension $k$ with $\Fr{M}\ne\varnothing$, $\eta_M$ be the volume element of $M$ and $\vf\nu_{\Fr{M}}$ be the unit normal exterior vector field. Then the the volume element of $\Fr{M}$ associated with the orientation of $\Fr{M}$ induced by the one of $M$ is: $$\eta_{\Fr{M}}=\iota_{\vf\nu_{\Fr{M}}}\eta_M$$ + Let $M\subseteq\RR^n$ be an orientated submanifold of dimension $k$ with $\Fr{M}\ne\varnothing$, $\eta_M$ be the volume element of $M$ and $\vf\nu_{\Fr{M}}$ be the unit normal exterior vector field. Then, the volume element of $\Fr{M}$ associated with the orientation of $\Fr{M}$ induced by the one of $M$ is: $$\eta_{\Fr{M}}=\iota_{\vf\nu_{\Fr{M}}}\eta_M$$ \end{proposition} \begin{proposition} - Let $S\subseteq\RR^3$ be a regular surface oriented with a vector field $\vf\nu_S$. Then, the area element of $S$ is given by $\eta_S=\iota_{\vf\nu}\eta$, where $\eta=\dd{x}\wedge\dd{y}\wedge\dd{z}$. Moreover if $\vf\varphi(u,v)$ is a local parametrization of $S$ compatible with the orientation, then: $$\eta_S=\vf\varphi^*\eta_S=\sqrt{E_{\vf\varphi}G_{\vf\varphi}-{F_{\vf\varphi}}^2}\dd{u}\wedge\dd{v}$$ + Let $S\subseteq\RR^3$ be a regular surface oriented with a vector field $\vf\nu_S$. Then, the area element of $S$ is given by $\eta_S=\iota_{\vf\nu}\eta$, where $\eta=\dd{x}\wedge\dd{y}\wedge\dd{z}$. Moreover, if $\vf\varphi(u,v)$ is a local parametrization of $S$ compatible with the orientation, then: $$\eta_S=\vf\varphi^*\eta_S=\sqrt{E_{\vf\varphi}G_{\vf\varphi}-{F_{\vf\varphi}}^2}\dd{u}\wedge\dd{v}$$ \end{proposition} \subsubsection{Integration of differential forms} \begin{definition} @@ -1282,7 +1282,7 @@ \begin{definition} Let $M\subseteq\RR^n$ be an orientated submanifold of dimension $k$, $\{(V_\alpha,\vf\varphi_\alpha):\alpha\in A\}$ and an atlas of $M$ compatible with the orientation. Given $\omega\in\Omega_\text{c}^k(M)$, let $\{\rho_1,\ldots,\rho_m\}$ be a partition of unity of $K$ subordinated to $\{\vf\varphi(V_\alpha)\}$. We define the \emph{integral} of $\omega$ on $M$ as: $$\int_M\omega=\sum_{i= 1}^m\int_M\rho_i\omega=\sum_{i= 1}^m\int_{U_\alpha}{\vf\varphi_\alpha}^*(\rho_i\omega)$$ - where $\alpha\in A$ is such that $\supp(\rho_i)\subset \vf\varphi(V_\alpha)$\footnote{It can be seen that this definition doesn't depend neither on the atlas nor on the partition of unity chosen.}. + where $\alpha\in A$ is such that $\supp(\rho_i)\subset \vf\varphi(V_\alpha)$\footnote{It can be seen that this definition doesn't depend on either the atlas or the partition of unity chosen.}. \end{definition} \begin{proposition} Let $M\subseteq\RR^n$ be an orientated submanifold of dimension $k$, $\{(V_i,\vf\varphi_i):i=1,\ldots,m\}$ be a finite set of parametrizations of $M$ compatibles with the orientation and such that: @@ -1368,7 +1368,7 @@ \end{definition} \begin{theorem} Let $S\subset\RR^3$ be an oriented surface, $(V,\vf\varphi(u,v))$ be a parametrization of $S$ compatible with the orientation and $R\subseteq S$ be a simple region such that $R\subseteq\vf\varphi(V)$ and that $\Fr{R}$ is positively-oriented. Let $\vf\alpha:[0,\ell]\rightarrow\Fr{R}$ be a parametrization of $\Fr{R}$ which is differentiable on the intervals $[t_i,t_{i+1}]$, $i=0,\ldots,n$. Suppose $\tau_i[t_i,t_{i+1}]$ is a differentiable determination of the angle between $\vf\varphi_u$ and $\vf\alpha(t)|$, $t\in[t_i,t_{i+1}]$. Then: $$\sum_{i=0}^n[\tau_i(t_{i+1})-\tau_i(t_{i})]+\sum_{i=0}^n\theta_i=\pm 2\pi$$ - And the sign in the right hand side of the equation is positive if and only if the parametrization $\vf\alpha$ of $\Fr{R}$ is positive. + And the sign in the right-hand side of the equation is positive if and only if the parametrization $\vf\alpha$ of $\Fr{R}$ is positive. \end{theorem} \begin{theorem}[Local Gau\ss-Bonnet theorem]\label{DG:local-GB} Let $S\subset\RR^3$ be an oriented surface, $(V,\vf\varphi(u,v))$ be an orthogonal parametrization of $S$ compatible with the orientation and $R\subseteq S$ be a simple region such that $R\subseteq\vf\varphi(V)$ and that $\Fr{R}$ is positively-oriented. Let $\vf\alpha:[0,\ell]\rightarrow\Fr{R}$ be an arclength parametrization of $\Fr{R}$ which is differentiable on the intervals $[t_i,t_{i+1}]$, $i=0,\ldots,n$. Then: $$\sum_{i=0}^n\int_{t_i}^{t_{i+1}}k_{\text{g}}(s)\dd{s}+\int_{R}K\dd{S}+\sum_{i=0}^n\theta_i=2\pi\chi(R)=2\pi$$ diff --git a/Mathematics/4th/Real_and_functional_analysis/Real_and_functional_analysis.tex b/Mathematics/4th/Real_and_functional_analysis/Real_and_functional_analysis.tex index 138b92d..e4c3df2 100644 --- a/Mathematics/4th/Real_and_functional_analysis/Real_and_functional_analysis.tex +++ b/Mathematics/4th/Real_and_functional_analysis/Real_and_functional_analysis.tex @@ -14,7 +14,7 @@ \end{enumerate} \end{definition} \begin{proposition} - Let $\Sigma$ be an $\sigma$-algebra over a set $\Omega$. Then: + Let $\Sigma$ be a $\sigma$-algebra over a set $\Omega$. Then: \begin{enumerate} \item $\varnothing\in\Sigma$. \item If $A,B\in\Sigma$, then $A\setminus B\in\Sigma$. @@ -80,7 +80,7 @@ Any nonempty open set $U\subseteq\RR^n$ can be written as a countable union of disjoint dyadic cubes whose closure is in $U$. \end{proposition} \begin{proof} - Let $\mathcal{D}_0$ be a union of disjoint $0$-th dyadic cubes whose closure is contained in $U$. Now let $\mathcal{D}_1$ be a family of disjoint $1$-th dyadic cubes contained in $U\setminus\mathcal{D}_0$ whose closure is in $U\setminus\overline{\mathcal{D}_0}$. In general, let $\mathcal{D}_n$ be a family of disjoint $n$-th dyadic cubes contained in $U\setminus\bigcup_{k=0}^{n-1}\mathcal{D}_k$ whose closure is in $U\setminus\bigcup_{k=0}^{n-1}\overline{\mathcal{D}_k}$. By construction we have that $\bigcup_{n=0}^\infty \overline{\mathcal{D}_0}\subseteq U$ and the $\mathcal{D}_n$ are clearly pairwise disjoint. Moreover if $x\in U$, $\delta:=d(x,U^c)>0$ and so it will be contained (at least) in a $m$-th dyadic cube, with $2^{-m}\sqrt{n}<\delta$. Hence, $\bigcup_{n=0}^\infty \mathcal{D}_0=U$ + Let $\mathcal{D}_0$ be a union of disjoint $0$-th dyadic cubes whose closure is contained in $U$. Now let $\mathcal{D}_1$ be a family of disjoint $1$-th dyadic cubes contained in $U\setminus\mathcal{D}_0$ whose closure is in $U\setminus\overline{\mathcal{D}_0}$. In general, let $\mathcal{D}_n$ be a family of disjoint $n$-th dyadic cubes contained in $U\setminus\bigcup_{k=0}^{n-1}\mathcal{D}_k$ whose closure is in $U\setminus\bigcup_{k=0}^{n-1}\overline{\mathcal{D}_k}$. By construction, we have that $\bigcup_{n=0}^\infty \overline{\mathcal{D}_0}\subseteq U$ and the $\mathcal{D}_n$ are clearly pairwise disjoint. Moreover, if $x\in U$, $\delta:=d(x,U^c)>0$ and so it will be contained (at least) in an $m$-th dyadic cube, with $2^{-m}\sqrt{n}<\delta$. Hence, $\bigcup_{n=0}^\infty \mathcal{D}_0=U$ \end{proof} \begin{definition} Let $A\subseteq\RR^n$ be a set. We denote by $\mathcal{I}(A)$ the set of sequences of intervals that cover $A$. Analogously, we denote by $\mathcal{I}_0(A)$ the set of sequences of open intervals that cover $A$. @@ -190,7 +190,7 @@ Let's prove first that $\mathcal{M}(\RR^n)$ is a $\sigma$-algebra. \begin{itemize} \item $\RR^n\in\mathcal{M}(\RR^n)$ because $\RR^n$ is open and $0=\om{\mathcal{M}(\RR^n)\setminus\RR^n}<\varepsilon$ $\forall \varepsilon>0$ - \item Let $(A_n)\in\mathcal{M}(\RR^n)$. Then, $\forall \varepsilon>0$ for each $n\in\NN$ there exists an open set $U_n\supseteq A_n$ such that $\om{U_n\setminus A_n}<\frac{\varepsilon}{2^n}$. Now, $U:=\bigcup_{n=1}^\infty U_n$ is open an satisfy: + \item Let $(A_n)\in\mathcal{M}(\RR^n)$. Then, $\forall \varepsilon>0$ for each $n\in\NN$ there exists an open set $U_n\supseteq A_n$ such that $\om{U_n\setminus A_n}<\frac{\varepsilon}{2^n}$. Now, $U:=\bigcup_{n=1}^\infty U_n$ is open and satisfy: \begin{align*} \om{\bigcup_{n=1}^\infty U_n\setminus\bigcup_{n=1}^\infty A_n} & \leq \om{\bigcup_{n=1}^\infty (U_n\setminus A_n)} \\ & \leq\sum_{n=1}^{\infty}\om{U_n\setminus A_n}\leq\sum_{n=1}^{\infty}\frac{\varepsilon}{2^n}=\varepsilon @@ -264,7 +264,7 @@ Let $u$, $v$ be two finite measurable real functions, $U\subseteq\RR^2$ be an open set such that $(u(x),v(x))\in U$ $\forall x\in\RR^n$ and $\varphi:U\rightarrow\RR$ be a continuous function. Then, $\varphi(u(x),v(x))$ is also measurable. \end{proposition} \begin{sproof} - For any open set $V\subseteq\RR$, $\varphi^{-1}(V)$ is open and therefore it is a countable union of intervals of the form $I\times J$. Now, use the fact that $\{(u,v)\in I\times J\}=\{u\in I\}\cap\{v\in J\}$. + For any open set $V\subseteq\RR$, $\varphi^{-1}(V)$ is open, and therefore it is a countable union of intervals of the form $I\times J$. Now, use the fact that $\{(u,v)\in I\times J\}=\{u\in I\}\cap\{v\in J\}$. \end{sproof} \begin{proposition} Let $f$, $g$ be two measurable real functions. Then, so are $f\pm g$, $fg$ and $f/g$ if $g(x)\ne 0$ $\forall x\in\RR^n$. @@ -276,7 +276,7 @@ Let $f$, $g$ be two real functions such that $f$ is measurable and $f\almoste{=} g$. Then, $g$ is also measurable. \end{proposition} \begin{sproof} - Let $N:=\{f\ne g\}$ which is null. Moreover note that $\forall r\in\RR$: + Let $N:=\{f\ne g\}$ which is null. Moreover, note that $\forall r\in\RR$: $$\{g>r\}=\{f>r\} \cup (\{g>r\}\cap N)$$ which is measurable. \end{sproof} @@ -409,7 +409,7 @@ The inequality $\int_Ef_m(x)\dd{x}\leq \int_Ef(x)\dd{x}$ is obvious. We need to prove the other one. To do so it suffices to show that $\forall \varepsilon>0$ and $\forall s\in\mathcal{S}(f\indi{E})$ we have $(1-\varepsilon)\int_E s\leq {\displaystyle\lim_{m\to\infty}}\int_Ef_m(x)\dd{x}$. Let $E_m:=\{f_m\geq (1-\varepsilon)s\}$. Note that $E_m\nearrow E$ and moreover: $$\int_Ef_m\geq\int_{E_m}f_m\geq(1-\varepsilon)\int_{E_m}s$$ - Since $\mu_s$ is a measure we can use \mcref{RFA:incresingseq} to conclude that $\int_{E_m}s\nearrow \int_{E}s$. Therefore $\forall\varepsilon>0$ we have: $$(1-\varepsilon)\int_{E}s\leq \lim_{m\to\infty}\int_Ef_m$$ + Since $\mu_s$ is a measure we can use \mcref{RFA:incresingseq} to conclude that $\int_{E_m}s\nearrow \int_{E}s$. Therefore, $\forall\varepsilon>0$ we have: $$(1-\varepsilon)\int_{E}s\leq \lim_{m\to\infty}\int_Ef_m$$ \end{proof} \begin{proposition} Let $E\subseteq\RR^n$ be a measurable set with $\m{E}>0$, $f, g, (f_m)\geq 0$ be non-negative measurable functions. Then: @@ -507,10 +507,10 @@ \end{proposition} \subsubsection{Integral calculus in one variable and Riemann integral} \begin{definition} - Given a function $f:\RR\rightarrow\RR$, we say that $\int_a^bf(x)\dd{x}$ \emph{exists and it is finite} if $f$ is integrable on $(\min\{a,b\},\max\{a,b\})$\footnote{Note that if $f$ is measurable, the integral always exists but it may be $\pm\infty$.}. + Given a function $f:\RR\rightarrow\RR$, we say that $\int_a^bf(x)\dd{x}$ \emph{exists and it is finite} if $f$ is integrable on $(\min\{a,b\},\max\{a,b\})$\footnote{Note that if $f$ is measurable, the integral always exists, but it may be $\pm\infty$.}. \end{definition} \begin{theorem}[Mean value theorem for integrals] - Let $f:\RR\rightarrow\RR_{\geq 0}$ be an positive integrable function over $(a,b)$ and $g:(a,b)\rightarrow\RR$ be a measurable and bounded function such that $\alpha\leq g(x)\leq\beta$ almost everywhere on $(a,b)$. Then, $\exists\gamma\in[\alpha,\beta]$ such that: $$\int_a^bg(x)f(x)\dd{x}=\gamma\int_a^bf(x)\dd{x}$$ + Let $f:\RR\rightarrow\RR_{\geq 0}$ be a positive integrable function over $(a,b)$ and $g:(a,b)\rightarrow\RR$ be a measurable and bounded function such that $\alpha\leq g(x)\leq\beta$ almost everywhere on $(a,b)$. Then, $\exists\gamma\in[\alpha,\beta]$ such that: $$\int_a^bg(x)f(x)\dd{x}=\gamma\int_a^bf(x)\dd{x}$$ Moreover if $g$ is continuous, $\exists\xi\in(a,b)$ such that: $$\int_a^bg(x)f(x)\dd{x}=g(\xi)\int_a^bf(x)\dd{x}$$ In particular, taking $f=1$, we get: $$\int_a^bg(x)\dd{x}=g(\xi)(b-a)$$ \end{theorem} @@ -574,7 +574,7 @@ \begin{lemma} Let $E\subseteq\RR^{p+q}$ be a measurable set. Then: \begin{enumerate} - \item There exits a null set $N\subset \RR^q$ such that $E(y)$ is measurable $\forall y\in\RR^q\setminus N$ (that is $E(y)$ is measurable almost everywhere $\forall y\in\RR^q$). + \item There exists a null set $N\subset \RR^q$ such that $E(y)$ is measurable $\forall y\in\RR^q\setminus N$ (that is $E(y)$ is measurable almost everywhere $\forall y\in\RR^q$). \item The function $$\Phi(y)=\begin{cases} \m{E(y)} & \text{if } y\in\RR^q\setminus N \\ 0 & \text{if } y\in N @@ -740,14 +740,14 @@ \begin{proof} \begin{enumerate} \item Let $x\in\overline{F}$. Then, $\exists(x_n)\in F$ such that $\displaystyle \lim_{n\to\infty}x_n=x$. In particular $(x_n)$ is Cauchy and since $F$ is complete, we conclude $x\in F$. - \item Let $(x_n)\in F$ be a Cauchy sequence. In particular it is a Cauchy sequence in $E$ and so $\exists x\in E$ with $\displaystyle \lim_{n\to\infty}x_n=x$. Moreover this $x$ satisfies $x\in \overline{F}$ and since $F$ is closed, $x\in F$. + \item Let $(x_n)\in F$ be a Cauchy sequence. In particular, it is a Cauchy sequence in $E$ and so $\exists x\in E$ with $\displaystyle \lim_{n\to\infty}x_n=x$. Moreover, this $x$ satisfies $x\in \overline{F}$ and since $F$ is closed, $x\in F$. \end{enumerate} \end{proof} \begin{definition} Let $(E,\|\cdot\|)$ be a normed vector space and $A\subseteq E$ be a subset. We say that $A$ is a \emph{total subset} if $\langle A\rangle$ is dense in $E$. \end{definition} \begin{remark} - The linear span $\langle A\rangle$ exclude the infinite linear combinations of elements in $A$, even if $A$ is a in a subspace of infinite dimension. + The linear span $\langle A\rangle$ exclude the infinite linear combinations of elements in $A$, even if $A$ is a subspace of infinite dimension. \end{remark} \begin{definition} A metric space is called \emph{separable} if it contains a countable dense subset. @@ -779,7 +779,7 @@ \end{align*} \end{definition} \begin{proposition} - Let $E$ be a normed vector space and $F$ be a closed subspace. The quotient space $\quot{E}{F}$ is a normed vector space. Moreover the map $$\function{\pi}{E}{\quot{E}{F}}{x}{\overline{x}}$$ is continuous with $\norm{\overline{x}}_{\quot{E}{F}}\leq \norm{x}_E$. + Let $E$ be a normed vector space and $F$ be a closed subspace. The quotient space $\quot{E}{F}$ is a normed vector space. Moreover, the map $$\function{\pi}{E}{\quot{E}{F}}{x}{\overline{x}}$$ is continuous with $\norm{\overline{x}}_{\quot{E}{F}}\leq \norm{x}_E$. \end{proposition} \begin{sproof} Note that $\overline{x}=x+F$ and so $$\norm{\overline{x}}_{\quot{E}{F}}=d(0,x+F)=0\iff 0\in x+ F\iff \overline{x}=\overline{0}$$ @@ -892,7 +892,7 @@ Let $K\subseteq \KK^n$ be a compact set and $f,g\in\mathcal{C}(K)$. Then: $$\norm{fg}_K\leq\norm{f}_K\norm{g}_K$$ \end{proposition} \begin{definition} - Let $K\subseteq \KK^n$ be a compact set and $A\subseteq \mathcal{C}(K)$ be a subset. We say that $A$ is a \emph{subalgebra} if $A$ is a vector subspace and it is stable under the product, that is if $\forall f,g\in A$ we have $fg\in A$. + Let $K\subseteq \KK^n$ be a compact set and $A\subseteq \mathcal{C}(K)$ be a subset. We say that $A$ is a \emph{subalgebra} if $A$ is a vector subspace, and it is stable under the product, that is if $\forall f,g\in A$ we have $fg\in A$. \end{definition} \begin{proposition} Let $K\subseteq \KK^n$ be a compact set and $A\subseteq \mathcal{C}(K)$ be a subalgebra. Then, $\overline{A}$ is also a subalgebra. @@ -937,7 +937,7 @@ Let $K\subseteq \KK^n$ be a compact set, $A\subseteq \mathcal{C}(K)$ be a separating subalgebra that vanishes nowhere, $x\in K$ and $f\in\mathcal{C}(K)$. Then, $\forall \varepsilon>0$ $\exists g_x\in\overline{A}$ such that $g_x(x)=f(x)$ and $g_x0$, $\norm{f-g}_K<\varepsilon$. For each $x\in K$, let $g_x$ be the function of \mcref{RFA:lemmaStone} that satisfies $g_xf-\varepsilon$. The compactness of $K$ implies $K\subset\bigcup_{i=1}^mN_{x_i}$ for certain $x_i\in K$, $i=1,\ldots,m$. Finally take $g:=\sup\{g_{x_i}:i=1,\ldots,m\}\in\overline{A}$ that satisfies $\norm{f-g}_K<\varepsilon$. - \item[$\KK=\CC$:] Note that $A_0:=\{\Re f:f\in A\}=\{\Im f:f\in A\}$ because $A$ is self-conjugate. Moreover $A_0$ is a separating subalgebra that vanishes nowhere. By the case $\KK=\RR$ we know that exists sequences $(u_n),(v_n)\in A_0$ such that $\displaystyle\lim_{n\to\infty}\norm{\Re f-u_n}_K=\lim_{n\to\infty}\norm{\Im f-v_n}_K=0$. And it suffice to consider $g_n:=u_n+\ii v_n$ that converges uniformly to $f$. + \item[$\KK=\RR$:] Let $f\in\mathcal{C}(K)$. We should find $g\in\mathcal{A}$ such that $\forall \varepsilon>0$, $\norm{f-g}_K<\varepsilon$. For each $x\in K$, let $g_x$ be the function of \mcref{RFA:lemmaStone} that satisfies $g_xf-\varepsilon$. The compactness of $K$ implies $K\subset\bigcup_{i=1}^mN_{x_i}$ for certain $x_i\in K$, $i=1,\ldots,m$. Finally, take $g:=\sup\{g_{x_i}:i=1,\ldots,m\}\in\overline{A}$ that satisfies $\norm{f-g}_K<\varepsilon$. + \item[$\KK=\CC$:] Note that $A_0:=\{\Re f:f\in A\}=\{\Im f:f\in A\}$ because $A$ is self-conjugate. Moreover, $A_0$ is a separating subalgebra that vanishes nowhere. By the case $\KK=\RR$ we know that exists sequences $(u_n),(v_n)\in A_0$ such that $\displaystyle\lim_{n\to\infty}\norm{\Re f-u_n}_K=\lim_{n\to\infty}\norm{\Im f-v_n}_K=0$. And it suffices to consider $g_n:=u_n+\ii v_n$ that converges uniformly to $f$. \end{itemize} \end{proof} \begin{definition} @@ -985,7 +985,7 @@ \begin{theorem} Let $E\subseteq\RR^n$ be a measurable space, $({f}_k)\in L^p(E)$ be a sequence of functions and $1\leq p<\infty$. Then: \begin{enumerate} - \item\label{RFA:thmLpBanachA} If $\displaystyle\lim_{k\to\infty}f_k(x)\almoste{=}{f}(x)$ with $\abs{{f}_k}\almoste\leq g\in L^p(E)$, then ${f}\in L^p(E)$ and $\displaystyle\lim_{k\to\infty}\norm{{f}_k-{f}}_p=0$ and we will write ${f}_k\overset{L^p}{\rightarrow}{f}$. + \item\label{RFA:thmLpBanachA} If $\displaystyle\lim_{k\to\infty}f_k(x)\almoste{=}{f}(x)$ with $\abs{{f}_k}\almoste\leq g\in L^p(E)$, then ${f}\in L^p(E)$ and $\displaystyle\lim_{k\to\infty}\norm{{f}_k-{f}}_p=0$, and we will write ${f}_k\overset{L^p}{\rightarrow}{f}$. \item\label{RFA:thmLpBanachB} If $\sum_{k=1}^\infty\norm{{f}_k}_p<\infty$, then $\sum_{k=1}^\infty\abs{f_k(x)}\almoste{<}\infty$ and $\exists f\in L^p(E)$ such that $\sum_{k=1}^\infty f_k(x)\almoste{=}f(x)$ and $\sum_{k=1}^N{f}_k\overset{L^p}{\rightarrow}{f}$. In particular, $(L^p,\norm{\cdot}_p)$ is a Banach space. \item\label{RFA:thmLpBanachC} If ${f}_k\overset{L^p}{\rightarrow}{f}$, then $\exists(f_{k_j})$ such that $\displaystyle\lim_{j\to\infty}f_{k_j}(x)\almoste{=}{f}(x)$. \end{enumerate} @@ -998,7 +998,7 @@ \norm{g_N}_p & ={\left(\int_E{\abs{g_N}}^p\right)}^{1/p}\leq \sum_{k=1}^N\norm{{f}_k}_p \\ & \leq\sum_{k=1}^\infty\norm{{f}_k}_p<\infty \end{align*} - where we have use the \mref{RFA:triangularineq}. Thus, ${\norm{g_N}_p}^p<\infty$ and by the \mnameref{RFA:monotone} we have ${\norm{g}_p}^p<\infty$ which implies: + where we have used the \mref{RFA:triangularineq}. Thus, ${\norm{g_N}_p}^p<\infty$ and by the \mnameref{RFA:monotone} we have ${\norm{g}_p}^p<\infty$ which implies: $$\sum_{k=1}^\infty\abs{f_k(x)}\almoste{<}\infty$$ Now use \mcref{RFA:thmLpBanachA} to show that $F_N\overset{L^p}{\rightarrow}{f}$, where $F_N=\sum_{k=1}^{N}f_k$. \item The Cauchy condition for $(f_k)$ implies that $\forall m\in\NN$ $\exists k_m$ such that if $p,q>k_m$ then $\norm{f_p-f_q}_p<\frac{1}{2^m}$. Now consider the partial sequence defined by the series of partial sums: @@ -1174,7 +1174,7 @@ Let $X,Y\subseteq \RR^n$ be measurable spaces and $K\in L^2(X\times Y)$. We define the \emph{Hilbert-Schmidt operator with kernel $K$} as the operator $T:L^2(Y)\rightarrow L^2(X)$ defined by: $$Tf(x)\almoste{=}\int_YK(x,y)f(y)\dd{y}$$ \end{definition} \begin{proposition}\label{RFA:fredholm} - Let $X$, $Y$ be compact metric spaces spaces and $K\in\mathcal{C}(X\times Y)$. The Fredholm operator $T$ with kernel $K$ is compact and satisfies $\norm{T}\leq\norm{K}_{X\times Y}\m{Y}$ + Let $X$, $Y$ be compact metric spaces and $K\in\mathcal{C}(X\times Y)$. The Fredholm operator $T$ with kernel $K$ is compact and satisfies $\norm{T}\leq\norm{K}_{X\times Y}\m{Y}$ \end{proposition} \begin{sproof} It is a direct application of \mnameref{RFA:arzela}. The proof of the equicontinuity follows from the inequality @@ -1281,7 +1281,7 @@ Let $E$ be a normed vector space and $x\in E$ with $x\ne 0$. Then, $\norm{\hat{x}}_{E^{**}}= \norm{x}_E$. \end{proposition} \begin{proposition} - Let $E$ be a normed vector space. The function $$\function{J}{E}{E^{**}}{x}{\hat{x}}$$ is linear, continuous, injective and isometric. Thus, $J(E)=E\subseteq E^{**}$. Moreover if $J$ is surjective, we have $E=E^{**}$. In this case, $E$ is called \emph{reflexive}. + Let $E$ be a normed vector space. The function $$\function{J}{E}{E^{**}}{x}{\hat{x}}$$ is linear, continuous, injective and isometric. Thus, $J(E)=E\subseteq E^{**}$. Moreover, if $J$ is surjective, we have $E=E^{**}$. In this case, $E$ is called \emph{reflexive}. \end{proposition} \begin{definition} Let $E$, $F$ be normed vector spaces and $T\in\mathcal{L}(E,F)$. We define the \emph{dual map}, $T^*\in\mathcal{L}(F^*,E^*)$, of $T$ as $T^*(v)=v\circ T$. @@ -1294,7 +1294,7 @@ \end{proposition} \subsubsection{Spectrum and eigenvalues} \begin{proposition} - Let $E$ be a Banach space and $T\in\mathcal{L}(E)$. Then, $\forall \alpha\in\KK$, $\im(T-\alpha\id)$ and $\ker(T-\alpha\id)$ are invariant over $T$. Moreover if $\alpha\ne 0$, the function $$\function{S}{\ker(T-\alpha\id)}{\ker(T-\alpha\id)}{x}{\alpha x}$$ is an isomorphism. + Let $E$ be a Banach space and $T\in\mathcal{L}(E)$. Then, $\forall \alpha\in\KK$, $\im(T-\alpha\id)$ and $\ker(T-\alpha\id)$ are invariant over $T$. Moreover, if $\alpha\ne 0$, the function $$\function{S}{\ker(T-\alpha\id)}{\ker(T-\alpha\id)}{x}{\alpha x}$$ is an isomorphism. \end{proposition} \begin{proof} Let $y\in \im(T-\alpha \id)$. Then, $y=Tx-\alpha x$ for some $x\in E$ and so: $$Ty=T(Tx-\alpha x)=(T-\alpha\id)(Tx)\in\im(T-\alpha\id)$$ @@ -1322,7 +1322,7 @@ Now let's see that ${\sigma(T)}^c$ is open. Let $\beta\notin\sigma(T)$ and $\alpha,z\in E$ be such that $\abs{\alpha-\beta}<\frac{1}{\norm{{(T-\beta\id)}^{-1}}}$. Now consider the function: $$g(x)={(T-\beta \id)}^{-1}(\alpha z-\beta x +z)$$ - We have that $g$ is a contraction and by the \mnameref{DE:fixedpoint} $\exists!x\in E$ such that $Tx-(\alpha-\beta) x=z$, i.e. $T-(\alpha-\beta)\id$ is bijective. This is true $\forall\alpha\in E$ satisfying $\abs{\alpha-\beta}<\frac{1}{\norm{{(T-\beta\id)}^{-1}}}$. Hence ${\sigma(T)}^c$ is open. + We have that $g$ is a contraction and by the \mnameref{DE:fixedpoint} $\exists!x\in E$ such that $Tx-(\alpha-\beta) x=z$, i.e. $T-(\alpha-\beta)\id$ is bijective. This is true $\forall\alpha\in E$ satisfying $\abs{\alpha-\beta}<\frac{1}{\norm{{(T-\beta\id)}^{-1}}}$. Hence, ${\sigma(T)}^c$ is open. Finally, since $\sigma(T)$ is a bounded closed subset of $\CC$, it is compact. \end{proof} @@ -1413,7 +1413,7 @@ $$\dotp{x}{y}=\frac{1}{4}\left({\norm{x+y}}^2-{\norm{x-y}}^2\right)$$ \end{lemma} \begin{sproof} - Expand the right hand side of the equalities using the definition of norm. + Expand the right-hand side of the equalities using the definition of norm. \end{sproof} \begin{proposition} Let $(H,\dotp{\cdot}{\cdot})$ be a pre-Hilbert space. Then: @@ -1423,7 +1423,7 @@ \end{itemize} \end{proposition} \begin{definition} - Let $(H,\dotp{\cdot}{\cdot})$ be a pre-Hilbert space and $x,y\in H$. We say that $x$ and $y$ are \emph{orthogonal} if $\dotp{x}{y}=0$. Moreover we define the \emph{orthogonal complement} of a subspace $A\subseteq H$ as: $$A^\perp:=\{x\in H:\dotp{x}{a}=0\ \forall a\in A\}$$ + Let $(H,\dotp{\cdot}{\cdot})$ be a pre-Hilbert space and $x,y\in H$. We say that $x$ and $y$ are \emph{orthogonal} if $\dotp{x}{y}=0$. Moreover, we define the \emph{orthogonal complement} of a subspace $A\subseteq H$ as: $$A^\perp:=\{x\in H:\dotp{x}{a}=0\ \forall a\in A\}$$ \end{definition} \begin{lemma} Let $(H,\dotp{\cdot}{\cdot})$ be a pre-Hilbert space and $A\subseteq H$ be a subspace. Then: $$A^\perp=\bigcap_{a\in A}\ker\dotp{\cdot}{a}$$ @@ -1489,7 +1489,7 @@ \begin{enumerate} \item The equality $F\cap F^\perp=\{0\}$ follows from noting that $\dotp{u}{u}=0$ $\forall u\in F\cap F^\perp$. Now let $x\in H$ and $y=P_Fx$. We need to show that $z:=x-y\in F^\perp$. Let $u\in F$. Then, $\exists\lambda\in\KK$ such that $\norm{\lambda}= 1$ and $\lambda\dotp{u}{z}=\abs{\dotp{u}{z}}$. Now consider $f(t)=\norm{z-vt}^2$, where $v=\lambda u\in F$. Note that $f$ has a minimum at the origin because: $$f(t)=\norm{x-y -vt}^2\geq\norm{x-y}^2=\norm{z}^2=f(0)$$ - because $y+vt\in F$ and $y$ is the minimizer of $x$ in $F$. Thus, $0=f'(0)=-2\dotp{v}{z}$ and so $\dotp{u}{z}=0$ $\forall u\in F$. Moreover $z$ is the minimizer of $x$ in $F^\perp$ because $\forall w\in F^\perp$ we have by the \mnameref{RFA:pythagorean}: + because $y+vt\in F$ and $y$ is the minimizer of $x$ in $F$. Thus, $0=f'(0)=-2\dotp{v}{z}$ and so $\dotp{u}{z}=0$ $\forall u\in F$. Moreover, $z$ is the minimizer of $x$ in $F^\perp$ because $\forall w\in F^\perp$ we have by the \mnameref{RFA:pythagorean}: $$\norm{x-w}^2=\norm{x-z}^2+\norm{z-w}^2\geq \norm{z-w}^2$$ \item We have just seen the implication to the right. For the other one note that by \mcref{RFA:projA} we can write $x=P_Fx+P_{F^\perp}x$. But, $x-y=P_Fx+P_{F^\perp}x -y\in F^\perp$ and $F\cap F^\perp=\{0\}$, so $y=P_Fx$. \end{enumerate} @@ -1595,7 +1595,7 @@ \end{proposition} \subsubsection{Orthonormal systems} \begin{definition} - Let $H$ be a Hilbert space. An \emph{orthogonal system} on $H$ is a nonempty subset $E\subseteq H$ such that its vectors are pairwise orthogonal. If moreover $\norm{e}=1 $ $\forall e\in E$, we will say that $E$ is a \emph{orthonormal system}. + Let $H$ be a Hilbert space. An \emph{orthogonal system} on $H$ is a nonempty subset $E\subseteq H$ such that its vectors are pairwise orthogonal. If moreover $\norm{e}=1 $ $\forall e\in E$, we will say that $E$ is an \emph{orthonormal system}. \end{definition} \begin{definition} Let $H$ be a Hilbert space and $E$ be an orthonormal system. We say that $E$ is \emph{complete} if $E^\perp =\{0\}$. @@ -1645,7 +1645,7 @@ \end{enumerate} \end{theorem} \begin{proposition} - Let $X,Y\subseteq \RR^n$ be measurable spaces and $K\in L^2(X\times Y)$. The Hilbert-Schmidt operator $T$ with kernel $K$ is compact. Moreover if $K(x,y)=K(y,x)$ $\forall (x,y)\in X\times Y$, then $T$ is self-adjoint. + Let $X,Y\subseteq \RR^n$ be measurable spaces and $K\in L^2(X\times Y)$. The Hilbert-Schmidt operator $T$ with kernel $K$ is compact. Moreover, if $K(x,y)=K(y,x)$ $\forall (x,y)\in X\times Y$, then $T$ is self-adjoint. \end{proposition} \subsubsection{Spectral theorem} \begin{proposition} @@ -1660,7 +1660,7 @@ Let $H$ be a Hilbert space and $T\in\mathcal{L}(H)$ be compact and self-adjoint. Then, $\exists \alpha\in \KK$ such that $\alpha$ is eigenvalue of $T$ with $\abs{\alpha}=\norm{T}$. \end{theorem} \begin{corollary} - Let $H$ be a Hilbert space and $T\in\mathcal{L}(H)$ be compact and self-adjoint. Suppose $\alpha_1$ is an eigenvalue of $T$ with $\abs{\alpha_1}=\norm{T}$. Now consider $T_1:=T|_{{\ker(T-\alpha_1\id)}^\perp}$. By \mcref{RFA:spectraltheorem}, we obtain an eigenvalue $\alpha_2$ of $T_1$ (an therefore of $T$) such that $\abs{\alpha_1}\geq \abs{\alpha_2}$. Iterating the process, we get a sequence of eigenvalues $(\alpha_n)$ with the property that $\abs{\alpha_1}\geq \abs{\alpha_2}\geq\abs{\alpha_3}\geq\cdots$. + Let $H$ be a Hilbert space and $T\in\mathcal{L}(H)$ be compact and self-adjoint. Suppose $\alpha_1$ is an eigenvalue of $T$ with $\abs{\alpha_1}=\norm{T}$. Now consider $T_1:=T|_{{\ker(T-\alpha_1\id)}^\perp}$. By \mcref{RFA:spectraltheorem}, we obtain an eigenvalue $\alpha_2$ of $T_1$ (and therefore of $T$) such that $\abs{\alpha_1}\geq \abs{\alpha_2}$. Iterating the process, we get a sequence of eigenvalues $(\alpha_n)$ with the property that $\abs{\alpha_1}\geq \abs{\alpha_2}\geq\abs{\alpha_3}\geq\cdots$. \end{corollary} \begin{theorem} Let $H$ be a Hilbert space and $T\in\mathcal{L}(H)$ be compact and self-adjoint. The sequence $(\alpha_n)$ of eigenvalues of $T$ (each repeated according its multiplicity) is a sequence of real numbers. If the sequence is countable (i.e. not finite), then $\displaystyle\lim_{n\to\infty}\alpha_n=0$. Moreover, for each eigenvalue $\alpha_n$, $\dim\ker(T-\alpha_n\id)<\infty$. diff --git a/Mathematics/4th/Stochastic_processes/Stochastic_processes.tex b/Mathematics/4th/Stochastic_processes/Stochastic_processes.tex index 9938fbf..2279638 100644 --- a/Mathematics/4th/Stochastic_processes/Stochastic_processes.tex +++ b/Mathematics/4th/Stochastic_processes/Stochastic_processes.tex @@ -67,7 +67,7 @@ $$\Exp\left(\sum_{n=1}^NZ_n\right)=\mu\Exp(N)$$ \end{theorem} \begin{proof} - Note that $\Exp\left(\sum_{n=1}^NZ_n\right)=\Exp\left(\sum_{n=1}^\infty Z_n\indi{N\geq n}\right)$ and it is integrable because: + Note that $\Exp\left(\sum_{n=1}^NZ_n\right)=\Exp\left(\sum_{n=1}^\infty Z_n\indi{N\geq n}\right)$, and it is integrable because: \begin{multline*} \Exp\left(\sum_{n=1}^\infty \abs{Z_n\indi{N\geq n}}\right) =\sum_{n=1}^\infty \Exp(\abs{Z_n})\Exp(\indi{N\geq n}) \leq \\ \leq A\sum_{n=1}^\infty \Prob(N\geq n) =A\Exp(N)<\infty @@ -122,7 +122,7 @@ $$ g_{X}^{(k)}(s_n)=\Exp(X(X-1)\cdots (X-k+1){(s_n)}^{X-k}) $$ - for all $n\in\NN$. Moreover note that $X(X-1)\cdots (X-k+1){(s_n)}^{X-k}\nearrow X(X-1)\cdots (X-k+1)$. Now use the \mnameref{P:monotone}. + for all $n\in\NN$. Moreover, note that $X(X-1)\cdots (X-k+1){(s_n)}^{X-k}\nearrow X(X-1)\cdots (X-k+1)$. Now use the \mnameref{P:monotone}. \end{sproof} \begin{center} \def\arraystretch{1.3} @@ -149,11 +149,11 @@ Let $T\subseteq \RR^n$ be a set, $(E,\mathcal{E})$ be a measurable space and $(\Omega,\mathcal{A},\Prob)$ be a probability space. A \emph{stochastic process} on $(\Omega,\mathcal{A},\Prob)$ with \emph{parameter set} $T$ and \emph{state space} $(E,\mathcal{E})$ is a family of random variables ${\{X_t\}}_{t\in T}$ from $(\Omega,\mathcal{A})$ to $(E,\mathcal{E})$. That is, $X_t:\Omega\to E$ satisfies ${X_t}^{-1}(B)\in\mathcal{A}$ for all $B\in\mathcal{E}$ and all $t\in T$. \end{definition} \begin{remark} - In general we wil consider stochastic processes with parameter sets $T=\NN,\NN\cup\{0\},\ZZ,\RR,\RR_{\geq 0}$ and state spaces $(\NN\cup\{0\},\mathcal{P}(\NN \cup \{0\}))$ or $(\RR,\mathcal{B}(\RR))$. + In general, we wil consider stochastic processes with parameter sets $T=\NN,\NN\cup\{0\},\ZZ,\RR,\RR_{\geq 0}$ and state spaces $(\NN\cup\{0\},\mathcal{P}(\NN \cup \{0\}))$ or $(\RR,\mathcal{B}(\RR))$. \end{remark} \subsubsection{Galton-Watson process} \begin{model}\label{SP:galtonwatsonModel} - Let $(X_n)$, $n\in\NN\cup\{0\}$ be a sequence of discrete random vairables representing the number of new individuals of a certain population at the $n$-th generation. Suppose they are defined as $$X_{n+1}=\sum_{k=1}^{X_n}Z_{n+1}^{(k)}$$ and $X_0=1$. Here $Z_{n+1}^{(k)}$ has support $\NN\cup\{0\}$ $\forall n,k$ and represent the number of descendants (to the next generation) of the $k$-th individual of the $n$-th generation. Suppose that $Z_{n+1}^{(k)}\sim Z$ are \iid and independent from $(X_n)$. We would like to study the probability $\rho$ of extinction of this population: $$\rho=\Prob(\{X_n=0:\text{for some $n\in\NN$}\})=\Prob\left(\bigcup_{n=1}^\infty\{X_n=0\}\right)$$ + Let $(X_n)$, $n\in\NN\cup\{0\}$ be a sequence of discrete random vairables representing the number of new individuals of a certain population at the $n$-th generation. Suppose they are defined as $$X_{n+1}=\sum_{k=1}^{X_n}Z_{n+1}^{(k)}$$ and $X_0=1$. Here $Z_{n+1}^{(k)}$ has support $\NN\cup\{0\}$ $\forall n,k$ and represent the number of descendants (to the next generation) of the $k$-th individual of the $n$-th generation. Suppose that $Z_{n+1}^{(k)}\sim Z$ are \iid and independent of $(X_n)$. We would like to study the probability $\rho$ of extinction of this population: $$\rho=\Prob(\{X_n=0:\text{for some $n\in\NN$}\})=\Prob\left(\bigcup_{n=1}^\infty\{X_n=0\}\right)$$ \end{model} \begin{lemma}\label{SP:lemmaGaltonWatson} Let $(Z_n)$ be a sequence of \iid random variables distributed as $Z$ with support $\NN\cup\{0\}$, and $N$ be a random variable also with support $\NN\cup\{0\}$ and independent to $(Z_n)$. Let $X=\sum_{k=1}^NZ_k$. Then, $\forall s\in[-1,1]$ we have: $$g_X(s)=g_N(g_Z(s))$$ @@ -201,12 +201,12 @@ \begin{proof} First suppose $\Prob(Z=0)+\Prob(Z=1)=1$. Thus, $Z\almoste{\leq}1$ and so $\Exp(Z)\leq 1$. Moreover, $g_Z(s)=\Prob(Z=0)+s\Prob(Z=1)$, which is a line with slope $\Prob(Z=1)<1$. Hence, it has a unique fixed point, which is $s=1$. - Now assume $\Prob(Z=0)+\Prob(Z=1)<1$. Then, $\exists k\geq 2$ with $\Prob(Z=k)>0$. Hence, ${g_Z}'(s)>0$ and ${g_Z}''(s)>0$ $\forall s\in (0,1)$. Now consider $f(s)=g(s)-s$. Note that $f$ is strictly convex in $(0,1)$ and $f(0)=g(0)=\Prob(Z=0)>0$. Finally note that $$\lim_{t\to 1^-} f'(s)=\lim_{t\to 1^-}g(s)-1=\Exp(Z)-1$$ - and so $\displaystyle\lim_{t\to 1^-} f'(s)$ is negative in the first case and positive in the second case. This imply that $f$ has no zeros on $(0,1)$ in the first case and exactly 1 zero in $(0,1)$ in the second case. + Now assume $\Prob(Z=0)+\Prob(Z=1)<1$. Then, $\exists k\geq 2$ with $\Prob(Z=k)>0$. Hence, ${g_Z}'(s)>0$ and ${g_Z}''(s)>0$ $\forall s\in (0,1)$. Now consider $f(s)=g(s)-s$. Note that $f$ is strictly convex in $(0,1)$ and $f(0)=g(0)=\Prob(Z=0)>0$. Finally, note that $$\lim_{t\to 1^-} f'(s)=\lim_{t\to 1^-}g(s)-1=\Exp(Z)-1$$ + and so $\displaystyle\lim_{t\to 1^-} f'(s)$ is negative in the first case and positive in the second case. This implies that $f$ has no zeros on $(0,1)$ in the first case and exactly 1 zero in $(0,1)$ in the second case. It's missing to see that in the second case the probability of extinction $\rho$ is given by the fixed point in $(0,1)$, rather than 1. We have that: $$\rho=\lim_{n\to\infty}g_{X_n}(0)=\lim_{n\to\infty}{g_Z}^n(0)$$ - Since ${g_Z}'>0$, we have that ${g_Z}$ is increasing and so it is ${g_Z}^n$ $\forall n\in\NN$. Moreover if ${g_Z}(x_0)=x_0$, we have that ${g_Z}^n(x_0)=x_0$ $\forall n\in\NN$. Therefore $$0<{g_Z}(0)<{g_Z}^2(0)<\cdots<{g_Z}^n(0)<\cdots 0$, we have that ${g_Z}$ is increasing and so it is ${g_Z}^n$ $\forall n\in\NN$. Moreover, if ${g_Z}(x_0)=x_0$, we have that ${g_Z}^n(x_0)=x_0$ $\forall n\in\NN$. Therefore, $$0<{g_Z}(0)<{g_Z}^2(0)<\cdots<{g_Z}^n(0)<\cdots 0$, then $d(i)\mid n$. Since $i\leftrightarrow j$, then $\exists r,s\in I$ such that $p_{ij}^{(r)}>0$ and $p_{ji}^{(s)}>0$. So as in \mcref{SP:corolariChapKolmo}, we have $p_{ii}^{(r+s)}>0$. Thus, $d(i)\mid r+s$. Moreover if $p_{jj}^{(n)}>0$, then: + Suppose $i\ne j$. We will see that if $p_{jj}^{(n)}>0$, then $d(i)\mid n$. Since $i\leftrightarrow j$, then $\exists r,s\in I$ such that $p_{ij}^{(r)}>0$ and $p_{ji}^{(s)}>0$. So as in \mcref{SP:corolariChapKolmo}, we have $p_{ii}^{(r+s)}>0$. Thus, $d(i)\mid r+s$. Moreover, if $p_{jj}^{(n)}>0$, then: $$p_{ii}^{(r+n+s)}\geq p_{ij}^{(r)}p_{jj}^{(n)}p_{ji}^{(s)}>0$$ So $d(i)\mid r+n+s$. Thus, $d(i)\mid n$ and so $d(j)\geq d(i)$ because $d(j)$ is the greatest common divisor of all such $n$. Repeating the argument exchanging $i$ and $j$ we get $d(j)= d(i)$. \end{proof} @@ -449,7 +449,7 @@ $$C_\alpha:=\{j\in I:\exists n\in\NN\cup\{0\}\text{ with }p_{ij}^{(n d+\alpha)}>0\}$$ Clearly $C_0\cup\cdots\cup C_{d-1}=I$. Let's see that $C_\alpha\cap C_\beta=\varnothing$ if $\alpha\ne\beta$. Suppose $k\in C_\alpha\cap C_\beta$. Note that since the chain is irreducible, $\exists m\in\NN\cup\{0\}$ such that $p_{ki}^{(m)}>0$. And so, as in \mcref{SP:corolariChapKolmo} we have $p_{kk}^{(n d+\alpha+m)}>0$ because $k\in C_\alpha$. Thus, $d\mid \alpha+m$. The same argument with $\beta$ implies $d\mid \beta+m$. So $d\mid \beta -\alpha$ and $\beta=\alpha$ because $\alpha,\beta\in\{0,\cdots,d-1\}$. - Finally if $j\in C_\alpha$ is such that $p_{jk}>0$ for $k\in I$, then as in \mcref{SP:corolariChapKolmo} we have $p_{ik}^{(n d+\alpha +1)}>0$. So, if $\alpha+1\leq d-1$, then $k\in C_{\alpha+1}$. Otherwise $k\in C_0=C_{[\alpha+1]_d}$. + Finally, if $j\in C_\alpha$ is such that $p_{jk}>0$ for $k\in I$, then as in \mcref{SP:corolariChapKolmo} we have $p_{ik}^{(n d+\alpha +1)}>0$. So, if $\alpha+1\leq d-1$, then $k\in C_{\alpha+1}$. Otherwise, $k\in C_0=C_{[\alpha+1]_d}$. \end{proof} \subsubsection{Stopping time and strong Markov property} \begin{proposition}\label{SP:MarkovImproved} @@ -462,7 +462,7 @@ for all $n\geq 0$. \end{proposition} \begin{proof} - By \mcref{SP:lema2Markov} it suffices to prove the statement for $B=\{i_0\}\times\cdots\times\{i_{n-1}\}$. Moreover since $A$ is countable we can suppose $A=\{j_1\}\times\cdots\times\{j_k\}$. We will prove it by induction on $k$ the homogeneous equality (the other one is even easier). The case $k=1$ is by definition. Now suppose $k\geq 2$. Then, denoting $C:=\{X_0=i_0,\ldots,X_{n-1}=i_{n-1},X_n=i\}$ we have: + By \mcref{SP:lema2Markov} it suffices to prove the statement for $B=\{i_0\}\times\cdots\times\{i_{n-1}\}$. Moreover, since $A$ is countable we can suppose $A=\{j_1\}\times\cdots\times\{j_k\}$. We will prove it by induction on $k$ the homogeneous equality (the other one is even easier). The case $k=1$ is by definition. Now suppose $k\geq 2$. Then, denoting $C:=\{X_0=i_0,\ldots,X_{n-1}=i_{n-1},X_n=i\}$ we have: \begin{multline*} \Prob(X_{n+1}=j_1,\ldots,X_{n+k+1}=j_{k+1}\mid C) \\ =\Prob(X_{n+k+1}=j_{k+1}\mid C,X_{n+1}=j_1,\ldots,X_{n+k}=j_k)\cdot\\ @@ -477,7 +477,7 @@ Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and let $I$ be a finite or countable set. For each $i\in I$, let $\mathcal{F}_i$ be a sub $\sigma$-algebra of $\mathcal{A}$, that is a subset of $\mathcal{A}$ which also $\sigma$-algebra. We say that $(\mathcal{F}_i)_{i\in I}$ is \emph{filtration} if for all $i\in I$ we have $\mathcal{F}_i\subseteq\mathcal{F}_{i+1}$. The tuple $(\Omega,\mathcal{A},(\mathcal{F}_i)_{i\in I},\Prob)$ is called a \emph{filtration space}. \end{definition} \begin{definition} - Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $\vf{X}$ be a random vector. the \emph{$\sigma$-algebra generated by $\vf{X}$} is: + Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $\vf{X}$ be a random vector. The \emph{$\sigma$-algebra generated by $\vf{X}$} is: $$\sigma(\vf{X}):=\{\vf{X}^{-1}(B):B\in\mathcal{B}(\RR^n)\}$$ \end{definition} \begin{proposition} @@ -549,9 +549,9 @@ for all $n\geq 0$. \end{corollary} \subsubsection{Recurrence and transience} - From now on we will omit saying that a stopping time $\tau$ is defined in a filtration space $(\Omega,\mathcal{F},(\mathcal{F}_n)_{n\geq 0},\Prob)$. Moreover given a Markov chain $(X_n)$, we will denote by $\Prob_i(A):=\Prob(A\mid X_0=i)$ and $\Exp_i(A):=\Exp(A\mid X_0=i)$, for any event $A$. + From now on we will omit saying that a stopping time $\tau$ is defined in a filtration space $(\Omega,\mathcal{F},(\mathcal{F}_n)_{n\geq 0},\Prob)$. Moreover, given a Markov chain $(X_n)$, we will denote by $\Prob_i(A):=\Prob(A\mid X_0=i)$ and $\Exp_i(A):=\Exp(A\mid X_0=i)$, for any event $A$. \begin{definition} - Let $(X_n)$ be a time-homogeneous Markov chain, $i,j\in I$ and consider the stopping time $\tau_j$ of \mcref{SP:tau_i}. We define $f_{ij}:=\Prob_i(\tau_j<\infty)$. We say that $i$ is \emph{transient} if $f_{ii}<1$ and \emph{recurrent} if $f_{ii}=1$. Finally we define $N_i$ as: + Let $(X_n)$ be a time-homogeneous Markov chain, $i,j\in I$ and consider the stopping time $\tau_j$ of \mcref{SP:tau_i}. We define $f_{ij}:=\Prob_i(\tau_j<\infty)$. We say that $i$ is \emph{transient} if $f_{ii}<1$ and \emph{recurrent} if $f_{ii}=1$. Finally, we define $N_i$ as: $$ N_i:=\abs{\{n\in\NN:X_n=i\}}=\sum_{n=1}^{\infty}\indi{\{X_n=i\}} $$ @@ -564,7 +564,7 @@ $$ \tau_i^k:= \inf\{n>\tau_i^{k-1}:X_n=i\} $$ - with the convention that $\tau_i^1=\tau_i$ and $\tau_i^0=0$. Moreover we define the time difference $T_i^k:=\tau_i^k-\tau_i^{k-1}$. + with the convention that $\tau_i^1=\tau_i$ and $\tau_i^0=0$. Moreover, we define the time difference $T_i^k:=\tau_i^k-\tau_i^{k-1}$. \end{definition} \begin{lemma} Let $(X_n)$ be a time-homogeneous Markov chain. Then, $\tau_i^k$ is a stopping time $\forall k\in\NN$ and moreover $T_i^k$ are \iid random variables distributed as $\tau_i$. @@ -699,7 +699,7 @@ p_{jj}^{(n+r+s)} \geq p_{ji}^{(s)}p_{ii}^{(n)}p_{ij}^{(r)}=:C p_{ii}^{(n)} $$ And so, $\sum p_{ii}^{(n)}=\infty\implies \sum p_{jj}^{(n)} = \infty$ by \mcref{SP:thmRecA}. - \item Similarly as before, since $i\to j$, $\exists r\geq 1$ such that $p_{ij}^{(r)}>0$. Thus, $p_{ii}^{(n+r)}\geq p_{ij}^{(r)}p_{jj}^{(n)}$. So, $\sum p_{jj}^{(n)}=\infty\implies \sum p_{ii}^{(n)} = \infty$. + \item Similarly, as before, since $i\to j$, $\exists r\geq 1$ such that $p_{ij}^{(r)}>0$. Thus, $p_{ii}^{(n+r)}\geq p_{ij}^{(r)}p_{jj}^{(n)}$. So, $\sum p_{jj}^{(n)}=\infty\implies \sum p_{ii}^{(n)} = \infty$. \item It follows from \mcref{SP:thmRecA} and \mcref{SP:lemaPrerec}. \end{enumerate} \end{proof} @@ -714,7 +714,7 @@ \begin{equation*} p_{ii}^{(2n)}=\binom{2n}{n}p^nq^n \end{equation*} - because we choose $n$ steps to the right from a total of $2n$ and the rest must be steps to the left. Finally using \mcref{MA:stirling} one can check that: + because we choose $n$ steps to the right from a total of $2n$ and the rest must be steps to the left. Finally, using \mcref{MA:stirling} one can check that: \begin{equation}\label{SP:stirling_polya1} p_{ii}^{(2n)}\sim \frac{1}{\sqrt{n}}{(4pq)}^n \end{equation} @@ -803,7 +803,7 @@ \end{enumerate} \end{proposition} \begin{proof} - By \mcref{SP:thmRec,SP:period_classes} we have that $j$ is recurrent and aperiodic. Thus, by \mnameref{SP:ergotic2}, the limits $\displaystyle \lim_{n\to\infty}p_{ij}^{(n)}$ and $\displaystyle \lim_{n\to\infty}p_{ji}^{(n)}$ exist. Moreover since $i\leftrightarrow j$ $\exists r,s\in\NN$ such that $p_{ij}^{(r)}, p_{ji}^{(s)}>0$. By \mcref{SP:corolariChapKolmo} we have that $p_{jj}^{(n+r+s)}\geq C p_{ii}^{(n)}$. If $i$ is positive recurrent then: + By \mcref{SP:thmRec,SP:period_classes} we have that $j$ is recurrent and aperiodic. Thus, by \mnameref{SP:ergotic2}, the limits $\displaystyle \lim_{n\to\infty}p_{ij}^{(n)}$ and $\displaystyle \lim_{n\to\infty}p_{ji}^{(n)}$ exist. Moreover, since $i\leftrightarrow j$ $\exists r,s\in\NN$ such that $p_{ij}^{(r)}, p_{ji}^{(s)}>0$. By \mcref{SP:corolariChapKolmo} we have that $p_{jj}^{(n+r+s)}\geq C p_{ii}^{(n)}$. If $i$ is positive recurrent then: $$ \lim_{n\to\infty}p_{jj}^{(n+r+s)}\geq C\lim_{n\to\infty}p_{ii}^{(n)}>0 $$ @@ -816,7 +816,7 @@ $$ \end{theorem} \begin{proof} - $(Y_n)=(X_{nd})$ is a time-homogeneous Markov chain and $i\in I$ is recurrent and aperiodic. Thus by \mnameref{SP:ergotic2} we have that $\displaystyle\lim_{n\to\infty}p_{ii}^{(nd)}=\frac{1}{\Exp_i(\tau_i^Y)}$. But: + $(Y_n)=(X_{nd})$ is a time-homogeneous Markov chain and $i\in I$ is recurrent and aperiodic. Thus, by \mnameref{SP:ergotic2} we have that $\displaystyle\lim_{n\to\infty}p_{ii}^{(nd)}=\frac{1}{\Exp_i(\tau_i^Y)}$. But: $$ \tau_i^Y=\inf\{ n\geq 1: Y_n=i\}=\frac{1}{d}\inf\{n\geq 1: X_{n}=i\}=\frac{\tau_i}{d} $$ @@ -849,7 +849,7 @@ $$ \end{definition} \begin{remark} - In general we cannot guarantee neither existence nor uniqueness of a stationary distributions. + In general we cannot guarantee neither existence nor uniqueness of stationary distributions. \end{remark} \begin{lemma} Let $(X_n)$ be a time-homogeneous Markov chain, $\vf\nu$ be a stationary distribution and suppose $\vf\pi_0=\vf\nu$. Then, $\vf\pi_n=\vf\nu$ $\forall n\in\NN$. @@ -860,7 +860,7 @@ $ \end{proof} \begin{theorem} - Let $(X_n)$ be a time-homogeneous irreducible and aperiodic Markov chain. Then, $(X_n)$ is positive recurrent if and only if it admits a stationary distribution. Moreover this distribution is unique and it is given by $\nu_i=\frac{1}{\mu_i}$. + Let $(X_n)$ be a time-homogeneous irreducible and aperiodic Markov chain. Then, $(X_n)$ is positive recurrent if and only if it admits a stationary distribution. Moreover, this distribution is unique, and it is given by $\nu_i=\frac{1}{\mu_i}$. \end{theorem} \begin{proof} We will only proof the case when $I$ is finite. By \mcref{SP:coroClassificationStates} we only need to prove the impication to the right. Since $\displaystyle\lim_{n\to\infty}p_{ij}^{(n)}=\frac{1}{\mu_j}$ $\forall i,j\in I$ we have that $\vf\nu={(1/\mu_i)}_{i\in I}\geq 0$ satisfies: @@ -916,7 +916,7 @@ $$ p_j(t)=\sum_{i\in I}p_i(0)\Prob(X_t=j\mid X_0=i)=\sum_{i\in I}p_i(0)p_{ij}(t) $$ - The formula is a consequences of the \mnameref{P:compound}. + The formula is a consequence of the \mnameref{P:compound}. \end{sproof} \subsubsection{Poisson process} \begin{definition} @@ -967,7 +967,7 @@ $$ \bigcap_{0\leq s < t}\{N_s\leq N_t\}= \bigcap_{\substack{0\leq s < t\\s,t\in\QQ}}\{N_s\leq N_t\} $$ - because the trajectories are càd. Finally since $\Prob(N_s\leq N_t)=\Prob(N_t-N_s\geq 0)=1$, the intersection has probability $1$. Now, let: + because the trajectories are càd. Finally, since $\Prob(N_s\leq N_t)=\Prob(N_t-N_s\geq 0)=1$, the intersection has probability $1$. Now, let: \begin{align*} A & :=\{\omega\in\Omega:N(\omega)\text{ has jumps of size }\geq 2\} \\ A_R & :=\{\omega\in\Omega:N(\omega)\text{ has jumps of size }\geq 2\text{ in $[0,R]$}\} \\ @@ -1098,7 +1098,7 @@ \begin{multline*} p_{ij}(t+h)-p_{ij}(t)=\sum_{\substack{k\in I\\k\ne i}}p_{ik}(h)p_{kj}(t)+p_{ii}(h)p_{ij}(t)-p_{ij}(t)=\\=\sum_{\substack{k\in I\\k\ne i}}(q_{ik}h+\o{h})p_{kj}(t)+(1+q_{ii} h+\o{h})p_{ij}(t)-p_{ij}(t)=\\=\sum_{k\in I}q_{ik}h p_{kj}(t)+\o{h} \end{multline*} - Dividing by $h$ and taking limits we get the result with the right derivative. Now take $t>0$ and $h<0$. Then in a similar way: + Dividing by $h$ and taking limits we get the result with the right derivative. Now take $t>0$ and $h<0$. Then, similarly: $$ p_{ij}(t)-p_{ij}(t+h)=-\sum_{k\in I}q_{ik}h p_{kj}(t+h)+\o{h} $$ @@ -1223,7 +1223,7 @@ \subsection{Brownian motion} \subsubsection{Gaussian processes} \begin{proposition}\label{SP:gaussian_vector} - Let $\vf{x}\in \RR^n$ be a random vector. Then, $\vf{x}$ is a \emph{gaussian vector}, that is it distributes as a $n$-dimensional normal, if and only if there exists $k\in\NN$, $\vf{A}\in\mathcal{M}_{n\times k}(\RR)$, $\vf{z}\in\RR^k$ with \iid components distributed as $N(0,1)$, and $\vf\mu\in\RR^n$ such that: $$\vf{x}=\vf{A}\vf{z}+\vf\mu$$ + Let $\vf{x}\in \RR^n$ be a random vector. Then, $\vf{x}$ is a \emph{gaussian vector}, that is it distributes as an $n$-dimensional normal, if and only if there exists $k\in\NN$, $\vf{A}\in\mathcal{M}_{n\times k}(\RR)$, $\vf{z}\in\RR^k$ with \iid components distributed as $N(0,1)$, and $\vf\mu\in\RR^n$ such that: $$\vf{x}=\vf{A}\vf{z}+\vf\mu$$ \end{proposition} \begin{definition} A stochastic process ${(X_t)}_{t\geq 0}$ is called a \emph{gaussian process} if for all $t_1,\ldots,t_n\geq 0$ the random vector $(X_{t_1},\ldots,X_{t_n})$ is gaussian. @@ -1278,7 +1278,7 @@ 0 & \cdots & 0 & t_n- t_{n-1} \end{pmatrix} $$ - for all $00$ $\exists (\tau_n)$ such that $\Prob(\tau_n)$ - \item If $d\geq 3$, then $B$ is transient, that is $\forall T>0$ - \end{enumerate} -\end{theorem} -\begin{theorem}[Law of the iterated logarithm] - Let $B$ be a standard Brownian motion. Then: - $$ - \limsup_{t\to\infty}\frac{B_t}{\sqrt{2t\log\log t}}\almoste{=}\liminf_{t\to\infty}\frac{B_t}{\sqrt{2t\log\log t}}\almoste{=}1 - $$ -\end{theorem} -\begin{corollary} - Let $B$ be a standard Brownian motion. Then: - $$ - \limsup_{h\to 0} \frac{B_h}{\sqrt{2h\log\log\frac{1}{h}}}\almoste{=}\liminf_{h\to 0} \frac{B_h}{\sqrt{2h\log\log\frac{1}{h}}}\almoste{=}1 - $$ -\end{corollary} -\begin{sproof} - Recall that $xB_{1/x}$ is a standard Brownian motion. -\end{sproof} -\begin{proposition} - Let $S_n=\sum_{i=1}^n X_i$ be a simple random walk with $\Prob(X_i=1)=\Prob(X_i=-1)=\frac{1}{2}$ and $B=(B_t)$ be a standard Brownian motion. We define the following sequence of stochastic processes: - $$ - {Y_t^n}:=\frac{1}{\sqrt{n}}\left[S_{\floor{nt}}+(nt-\floor{nt})X_{\floor{nt}+1}\right] - $$ - Then, $Y_t^n\overset{\mathrm{d}}{\longrightarrow} B_t$. -\end{proposition} -\begin{definition}[Finite-dimensional distributions] - Let $(\Omega, \mathcal{A}, \Prob)$ be a probability space and $X: I\times\Omega\rightarrow (E,\mathcal{E})$ be a stochastic process. The \emph{finite-dimensional distributions} of $X$ are the probability measures $\Prob_{t_1,\dots,t_n}$ defined on $(E^n,\mathcal{E}^n)$ by: - $$ - \Prob_{t_1,\dots,t_n}(B):=\Prob((X_{t_1},\dots,X_{t_n})\in B) - $$ - for all $B\in\mathcal{E}^n$. -\end{definition} -\begin{lemma} - Let $(\Omega, \mathcal{A}, \Prob)$ be a probability space and $X: I\times\Omega\rightarrow (E,\mathcal{E})$ be a stochastic process. Then, the finite-dimensional distributions satisfy the following \emph{consistency condition}: - \begin{enumerate} - \item For all $n\in\NN$, $t_1,\dots,t_n\in I$, $B_1,\dots,B_n\in\mathcal{E}$ and $\sigma\in\S_n$, we have: - $$ - \Prob_{t_1,\dots,t_n}(B_1\times\dots\times B_n)=\Prob_{t_{\sigma(1)},\dots,t_{\sigma(n)}}(B_{\sigma(1)}\times\dots\times B_{\sigma(n)}) - $$ - \item For all $n\in\NN$, $t_1,\dots,t_n\in I$ and $B_1,\dots,B_{n-1}\in\mathcal{E}$, we have: - $$ - \Prob_{t_1,\dots,t_n}(B_1\times\dots\times B_{n-1}\times E)=\Prob_{t_1,\dots,t_{n-1}}(B_1\times\dots\times B_{n-1}) - $$ - \end{enumerate} -\end{lemma} -\begin{theorem}[Kolmogorov extension theorem] - Let $I$ be a set and $\{\Prob_{t_1,\dots,t_n}:n\in\NN,t_1,\ldots,t_n\in I\}$ be a family of probabilities defined on $\mathcal{B}(\RR^n)$ satisfying the consistency conditions. Then, there exists a probability space $(\Omega, \mathcal{A}, \Prob)$ and a stochastic process $X: I\times\Omega\rightarrow (\RR,\mathcal{B}(\RR))$ such that $\Prob_{t_1,\dots,t_n}$ is the finite-dimensional distribution of $X$ for all $n\in\NN$ and $t_1,\dots,t_n\in I$. -\end{theorem} + \end{proof} + \begin{definition} + An $n$-dimensional Brownian motion is a $d$-dimensional stochastic process $\vf{B}=(B^1,\dots,B^d)$ such that $\forall i\in\{1,\dots,d\}$, $B^i$ is a standard Brownian motion, and it is independent of the other components. + \end{definition} + \begin{theorem} + Let $\vf{B}$ be a $d$-dimensional Brownian motion. Then: + \begin{enumerate} + \item If $d=2$, then $B$ is recurrent, that is $\forall \vf{x}\in\RR^2$ and $\forall \delta>0$ $\exists (\tau_n)$ such that $\Prob(\tau_n)$ + \item If $d\geq 3$, then $B$ is transient, that is $\forall T>0$ + \end{enumerate} + \end{theorem} + \begin{theorem}[Law of the iterated logarithm] + Let $B$ be a standard Brownian motion. Then: + $$ + \limsup_{t\to\infty}\frac{B_t}{\sqrt{2t\log\log t}}\almoste{=}\liminf_{t\to\infty}\frac{B_t}{\sqrt{2t\log\log t}}\almoste{=}1 + $$ + \end{theorem} + \begin{corollary} + Let $B$ be a standard Brownian motion. Then: + $$ + \limsup_{h\to 0} \frac{B_h}{\sqrt{2h\log\log\frac{1}{h}}}\almoste{=}\liminf_{h\to 0} \frac{B_h}{\sqrt{2h\log\log\frac{1}{h}}}\almoste{=}1 + $$ + \end{corollary} + \begin{sproof} + Recall that $xB_{1/x}$ is a standard Brownian motion. + \end{sproof} + \begin{proposition} + Let $S_n=\sum_{i=1}^n X_i$ be a simple random walk with $\Prob(X_i=1)=\Prob(X_i=-1)=\frac{1}{2}$ and $B=(B_t)$ be a standard Brownian motion. We define the following sequence of stochastic processes: + $$ + {Y_t^n}:=\frac{1}{\sqrt{n}}\left[S_{\floor{nt}}+(nt-\floor{nt})X_{\floor{nt}+1}\right] + $$ + Then, $Y_t^n\overset{\mathrm{d}}{\longrightarrow} B_t$. + \end{proposition} + \begin{definition}[Finite-dimensional distributions] + Let $(\Omega, \mathcal{A}, \Prob)$ be a probability space and $X: I\times\Omega\rightarrow (E,\mathcal{E})$ be a stochastic process. The \emph{finite-dimensional distributions} of $X$ are the probability measures $\Prob_{t_1,\dots,t_n}$ defined on $(E^n,\mathcal{E}^n)$ by: + $$ + \Prob_{t_1,\dots,t_n}(B):=\Prob((X_{t_1},\dots,X_{t_n})\in B) + $$ + for all $B\in\mathcal{E}^n$. + \end{definition} + \begin{lemma} + Let $(\Omega, \mathcal{A}, \Prob)$ be a probability space and $X: I\times\Omega\rightarrow (E,\mathcal{E})$ be a stochastic process. Then, the finite-dimensional distributions satisfy the following \emph{consistency condition}: + \begin{enumerate} + \item For all $n\in\NN$, $t_1,\dots,t_n\in I$, $B_1,\dots,B_n\in\mathcal{E}$ and $\sigma\in\S_n$, we have: + $$ + \Prob_{t_1,\dots,t_n}(B_1\times\dots\times B_n)=\Prob_{t_{\sigma(1)},\dots,t_{\sigma(n)}}(B_{\sigma(1)}\times\dots\times B_{\sigma(n)}) + $$ + \item For all $n\in\NN$, $t_1,\dots,t_n\in I$ and $B_1,\dots,B_{n-1}\in\mathcal{E}$, we have: + $$ + \Prob_{t_1,\dots,t_n}(B_1\times\dots\times B_{n-1}\times E)=\Prob_{t_1,\dots,t_{n-1}}(B_1\times\dots\times B_{n-1}) + $$ + \end{enumerate} + \end{lemma} + \begin{theorem}[Kolmogorov extension theorem] + Let $I$ be a set and $\{\Prob_{t_1,\dots,t_n}:n\in\NN,t_1,\ldots,t_n\in I\}$ be a family of probabilities defined on $\mathcal{B}(\RR^n)$ satisfying the consistency conditions. Then, there exists a probability space $(\Omega, \mathcal{A}, \Prob)$ and a stochastic process $X: I\times\Omega\rightarrow (\RR,\mathcal{B}(\RR))$ such that $\Prob_{t_1,\dots,t_n}$ is the finite-dimensional distribution of $X$ for all $n\in\NN$ and $t_1,\dots,t_n\in I$. + \end{theorem} \end{multicols} \end{document} \ No newline at end of file diff --git a/README.md b/README.md index 2812bb4..71aef6c 100644 --- a/README.md +++ b/README.md @@ -48,20 +48,12 @@ Summary of each subject in Mathematics and Physics degree at UAB (Universitat Au - All - Arithmetic - All - - Commutative algebra - - All - Dynamical systems - Proofs - - Harmonic analysis - - All - Linear models - Proofs - More organization!!! - Add interpretation of the correlation - - Numerical calculus - - All - - Numeric integration of PDEs - - All - PDEs - Initial proofs - Entropy condition @@ -77,7 +69,5 @@ Summary of each subject in Mathematics and Physics degree at UAB (Universitat Au - General theorems on linear maps - Hilbert - Lp is Banach - - Stochastic processes - All - - All Feel free to help us with this summaries!