Skip to content

Commit

Permalink
updated many things
Browse files Browse the repository at this point in the history
  • Loading branch information
victorballester7 committed Sep 27, 2023
1 parent 5cad9d4 commit 7097fe4
Show file tree
Hide file tree
Showing 8 changed files with 269 additions and 5 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@
*.table

!**/Images/*.pdf
.vscode/**
**/.vscode/**
1 change: 1 addition & 0 deletions .vscode/ltex.hiddenFalsePositives.en-US.txt
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,4 @@
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe equality is held if: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q [mode=image|tex,width=0.7]Images/young We say that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q are Hölder conjugates if \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"}
{"rule":"SENTENCE_WHITESPACE","sentence":"^\\QLp. spaces.\\E$"}
{"rule":"COMMA_COMPOUND_SENTENCE","sentence":"^\\QWe will denote \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q and we will say that \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is the Fourier series of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"}
{"rule":"ENGLISH_WORD_REPEAT_BEGINNING_RULE","sentence":"^\\QThen \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q is a continuous square-integrable martingale with: \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q Let \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q be a Brownian motion.\\E$"}
2 changes: 1 addition & 1 deletion Mathematics/3rd/Probability/Probability.tex
Original file line number Diff line number Diff line change
Expand Up @@ -1031,7 +1031,7 @@
Let $\Omega$ be a set and $(A_n)\subset\Omega$ be a sequence of subsets. We say that $(A_n)$ has \emph{limit} if: $$\liminf_{n\to\infty}A_n=\limsup_{n\to\infty}A_n$$
In that case, $\displaystyle A:=\limsup_{n\to\infty}A_n$ is called the limit of the sequence.
\end{definition}
\begin{lemma}[First Borel-Cantelli lemma]
\begin{lemma}[First Borel-Cantelli lemma]\label{P:borel-cantelli1}
Let $(\Omega,\mathcal{A},\Prob)$ be a probability space and $(A_n)\subset\mathcal{A}$ be a sequence of events such that: $$\sum_{n=1}^\infty\Prob(A_n)<\infty$$
Then, $\displaystyle\Prob\left(\limsup_{n\to\infty} A_n\right)=0$.
\end{lemma}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
\changecolor{ADS}
\begin{multicols}{2}[\section{Advanced dynamical sytems}]
\subsection{Introduction}
\subsubsection{Rotations in \texorpdfstring{$\S^1$}{S1}}
\subsubsection{Maps in \texorpdfstring{$\S^1$}{S1}}
\begin{proposition}
Let $\alpha=\frac{p}{q}\in\QQ$ and let $R_\alpha:\S^1\to \S^1$ be the rotation of angle $\alpha$. Then, all the points of $\S^1$ are periodic for $R_\alpha$ with period $q$.
\end{proposition}
Expand All @@ -17,5 +17,63 @@
\begin{proof}
Let $\varepsilon>0$, $x,y\in \S^1$. Discretize $\S^1$ in intervals of length at most $\frac{1}{\varepsilon}$. Then, $\exists m,n\in \NN$ with $m< n\leq \frac{1}{\varepsilon}+1$ such that ${R_\alpha}^m x$ and ${R_\alpha}^nx$ are in the same interval. Thus, $\abs{{R_\alpha}^{n-m}x-x}<\varepsilon$. Now, concatenating ${R_\alpha}^{n-m}x$ repeatedly, we will eventually have $\abs{{R_\alpha}^{k(n-m)}x - y}<\varepsilon$ for some $k\in \NN$.
\end{proof}
\begin{corollary}
Let $\alpha\in\RR\setminus\QQ$ and $A\subset \S^1$ be a non-empty closed invariant set for $R_\alpha$. Then, $A=\S^1$.
\end{corollary}
\begin{proof}
Let $x\in \S^1$ and $y\in A$. Then, $\forall k\in\NN$ $\exists n_k\in\NN$ such that $R_\alpha^{n_k}y\in(x-\frac{1}{k},x+\frac{1}{k})$. Thus, $R_\alpha^{n_k}y\to x$ and $x\in A$ because $A$ is closed and $R_\alpha^{n_k}y\in A$ $\forall k\in\NN$.
\end{proof}
\begin{definition}
Consider the set $$\Sigma_m
:= \{(x_1,x_2,\ldots):x_i\in\{0,1,\ldots,m-1\}\}$$
We define the \emph{shift map} as:
$$
\function{\sigma_m}{\Sigma_m}{\Sigma_m}{(x_1,x_2,\ldots)}{(x_2,x_3,\ldots)}
$$
\end{definition}
\begin{remark}
Note that some elements in $[0,1]$ have two different representations in base-$m$ identified as elements of $\Sigma_m$. So we can think of $\Sigma_m$ a the quotient space $\quot{\Sigma_m}{\sim}$ where $(x_1,x_2,\ldots)\sim (y_1,y_2,\ldots)$ if and only if $\sum_{i=1}^\infty \frac{x_i}{m^i}=\sum_{i=1}^\infty \frac{y_i}{m^i}$.
\end{remark}
\begin{proposition}
Let $m\in\NN$. Consider the \emph{expansion map}
$$
\function{E_m}{\S^1}{\S^1}{x}{mx}
$$
Then, if $\phi:\Sigma_m\to \S^1$ is the map $\phi(x_1,x_2,\ldots)=\sum_{i=1}^\infty \frac{x_i}{m^i}$, we have that $E_m\circ \phi=\phi\circ \sigma_m$. In particular, $\phi$ is a bijection, and thus it is a conjugacy between $E_m$ and $\sigma_m$.
\end{proposition}
\begin{proof}
Let $x=(x_1,x_2,\ldots)\in \Sigma_m$. Then, $\phi\circ \sigma_m(x)=\sum_{i=1}^\infty \frac{x_{i+1}}{m^i}$. Moreover:
\begin{multline*}
E_m\circ \phi(x)=E\left(\sum_{i=1}^\infty \frac{x_i}{m^i}\right)=\sum_{i=1}^\infty \frac{x_i}{m^{i-1}}=\\=x_i+\sum_{i=1}\frac{x_{i+1}}{m^i}\equiv\sum_{i=1}\frac{x_{i+1}}{m^i}
\end{multline*}
\end{proof}
\begin{remark}
Note that $E$ preserves the Lebesgue measure \textit{backwards}: $\abs{{E_m}^{-1}(A)}=\abs{A}$ for all $A\subseteq \S^1$, but $\abs{E_m(A)}\ne \abs{A}$ in general.
\end{remark}
\begin{definition}
We define the following distance in $\Sigma_m$. For all $x,x'\in\Sigma_m$:
$$
d(x,x'):=\frac{1}{2^\ell}\quad\text{with }\ell:=\min\{i:x_i\ne x_i'\}
$$
\end{definition}
\begin{proposition}
Periodic points of $E_m$ are dense in $\S^1$.
\end{proposition}
\begin{proof}
By conjugacy it suffices to show that periodic points of $\sigma_m$ are dense in $\Sigma_m$. Let $x\in \Sigma_m$ and $\varepsilon>0$. Then, $\varepsilon>\frac{1}{2^\ell}$ for some $\ell$. And so the orbit of
$$
y=(x_1,\ldots,x_\ell,x_1,\ldots,x_\ell,x_1,\ldots,x_\ell,\ldots)
$$
is periodic and $d(x,y)<\varepsilon$. So periodic points of $\sigma_m$ are dense in $\Sigma_m$.
\end{proof}
\begin{proposition}
Le $x\in \S^1$. Then, the positive orbit of $x$ for $E_m$ is dense in $\S^1$.
\end{proposition}
\begin{proof}
By conjugacy, we only prove it for $\sigma_m$. But this is clear by taking:
$$
x=(0,1,\ldots,m-1,10,\ldots,1(m-1),20,\ldots,2(m-1),\ldots)
$$
\end{proof}
\end{multicols}
\end{document}
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,33 @@
\begin{document}
\changecolor{INLP}
\begin{multicols}{2}[\section{Instabilities and nonlinear phenomena}]

\subsection{Review of bifurcations}
\subsubsection{Hopf bifurcation}
\begin{definition}
The normal form of a Hopf bifurcation is:
\begin{equation*}
\begin{cases}
\dot{x}=\mu x-\omega y-x(x^2+y^2) \\
\dot{y}=\omega x+\mu y-y(x^2+y^2)
\end{cases}
\end{equation*}
Or in polar coordinates:
\begin{equation*}
\begin{cases}
\dot{r}=\mu r-r^3 \\
\dot{\theta}=-\omega
\end{cases}
\end{equation*}
\end{definition}
\subsubsection{Homoclinic bifurcation}
\begin{definition}
The normal form of a homoclinic bifurcation is:
\begin{equation*}
\begin{cases}
\dot{x}=y \\
\dot{y}=-\mu- x+x^2-xy
\end{cases}
\end{equation*}
\end{definition}
\end{multicols}
\end{document}
138 changes: 138 additions & 0 deletions Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,30 @@
\Exp({(M_t-M_s)}^2\mid \mathcal{F}_s) =\Exp({M_t}^2-2M_tM_s+{M_s}^2\mid \mathcal{F}_s) =\\=\Exp({M_t}^2+{M_s}^2\!\mid \!\mathcal{F}_s)-2M_s\Exp(M_t\!\mid\! \mathcal{F}_s) =\Exp({M_t}^2-{M_s}^2\!\mid\! \mathcal{F}_s)
\end{multline*}
\end{proof}
\begin{theorem}[Doob's maximal inequality]\label{SC:doob_maximal}
If $M$ is a continuous square-integrable martingale, then $\forall a,t\geq 0$ we have:
$$
\Prob\left(\sup_{0\leq s\leq t}\abs{M_s}\geq a\right)\leq \frac{\Exp({M_t}^2)}{a^2}
$$
\end{theorem}
\begin{proposition}\label{SC:limit_of_martingales}
Let $(M^n)$ be a sequence of continuous square-integrable martingales and suppose that for each $t\geq 0$, the limit $\displaystyle M_t:=\overset{L^2}{=}\lim_{n\to\infty}M_t^n$ exists. Then, $M={(M_t)}_{t\geq 0}$ is a continuous square-integrable martingale.
\end{proposition}
\begin{proof}
By \mnameref{SC:doob_maximal} applied to $M^n-M^m$ we have that for fixed $t\geq 0$ and $k\in\NN$:
$$
\Prob\left(\sup_{0\leq s\leq t}\abs{M_s^n-M_s^m}\geq \frac{1}{kmn }\right)\leq k^2\Exp({(M_t^n-M_t^m)}^2)\leq \frac{1}{k^2}
$$
where in the last inequality we have used that ${(M^n)}$ converges in $L^2$ and so we have chosen $n,m$ large enough so that the inequality holds. Thus, there is an increasing sequence $(n_k)$ such that:
$$
\Prob\left(\sup_{0\leq s\leq t}\abs{M_s^{n_{k+1}}-M_s^{n_k}}\geq \frac{1}{k}\right)\leq \frac{1}{k^2}
$$
By \mnameref{P:borel-cantelli1}, we deduce that
$$
\sum_{k=1}^{\infty}\sup_{0\leq s\leq t}\abs{M_s^{n_{k+1}}-M_s^{n_k}}<\infty
$$
which ensures that $(M^{n_k})$ is continuous in the space of continuous functions equipped with the topology of uniform convergence on every compact set. But the limit is necessarily a version of $M$, because for each $t\geq 0$ we have $M_t^n\to M_t$ in $L^2$.
\end{proof}
\subsubsection{Quadratic variation}
\begin{definition}
Let $f:\RR_{\geq 0}\to\RR$ be a function. We define the \emph{absoulte variation} of $f$ on the interval $[s,t]$ as:
Expand Down Expand Up @@ -295,5 +319,119 @@
\item $M$ is a ${(\mathcal{F}_t)}_{t\geq 0}$-Brownian motion.
\end{enumerate}
\end{theorem}
\subsection{Stochastic integration}
\subsubsection{Wiener isometry}
\begin{definition}
Let $H$, $H'$ be Hilbert. A map $I:H\to H'$ is called \emph{isometry} if it is linear and $\forall x\in H$ we have:
$$
\norm{I(x)}_{H'}=\norm{x}_H
$$
We speak of \emph{partial isometry} when $I$ is only defined on a subspace of $H$.
\end{definition}
\begin{theorem}
Let $H$, $H'$ be Hilbert, $V\subseteq H$ be a dense subspace and $I:V\to H'$ be a partial isometry. Then, there exists a unique continuous isometry extension of $I$ to $H$.
\end{theorem}
\begin{proof}
Let $x\in H\setminus V$. Then, $\exists(x_n)\in V$ such that $x_n\to x$. Clearly, any continuous extension must satisfy $I(x):=\lim_{n\to\infty}I(x_n)$, so we take it as a definition. Note that, first, the limit exists because $(I(x_n))$ is Cauchy and moreover this definition does not depend on the sequence $(x_n)$. From this definition, the extension is automatically linear and norm-preserving (because of the continuity).
\end{proof}
\begin{definition}
Let ${(B_t)}_{t\geq 0}$ be a Brownian motion and $f\in\mathcal{S}(\RR_{\geq 0})$ be a simple function such that $f=\sum_{k=1}^na_k\indi{(t_{k-1},t_k]}$ with $0=t_0\leq t_1\leq \cdots\leq t_n$. We define the \emph{Wiener integral} of $f$ as:
$$
I(f):=\sum_{k=1}^na_k(B_{t_k}-B_{t_{k-1}})
$$
\end{definition}
\begin{remark}
Recall that simple functions are dense in $L^p$ (\mcref{RFA:continuousdenseLp}).
\end{remark}
\begin{theorem}
Let ${(B_t)}_{t\geq 0}$ be a Brownian motion on $(\Omega, \mathcal{F}, \Prob)$. Then, there exists a unique linear and continuous map $I:L^2(\RR_{\geq 0})\to L^2((\Omega, \mathcal{F}, \Prob))$ such that for all $0\leq s\leq t$:
$$
I(\indi{(s,t]})=B_t-B_s
$$
Moreover, $I$ is an isometry. The map $I$ is called \emph{Wiener isometry} (or \emph{Wiener integral}) and denoted by $I(f)=\int_0^\infty f(u)\dd{B_u}$.
\end{theorem}
\begin{remark}
Recall that the limit of Gaussian variables is Gaussian.
\end{remark}
\begin{proposition}
Let ${(B_t)}_{t\geq 0}$ be a Brownian motion. Then, the following are satisfied:
\begin{itemize}
\item For any $f\in L^2(\RR_{\geq 0})$ we have:
$$
\int_0^\infty f(u)\dd{B_u}\overset{L^2}{=}\lim_{n\to\infty}\sum_{k=1}^{n^2}a_{n,k}(f)(B_{\frac{k+1}{n}}-B_{\frac{k}{n}})
$$
where $a_{n,k}(f):=n\int_{\frac{k}{n}}^{\frac{k+1}{n}}f(u)\dd{u}$ is an approximation of $f$ in the interval $[\frac{k}{n},\frac{k+1}{n}]$.
\item The Wiener integral is a Gaussian variable with zero mean and variance $\int_0^\infty{f(u)}^2\dd{u}$.
\item For any $f,g\in L^2(\RR_{\geq 0})$ we have:
$$
\cov\left(\int_0^\infty f(u)\dd{B_u},\int_0^\infty g(u)\dd{B_u}\right)=\int_0^\infty f(u)g(u)\dd{u}
$$
\end{itemize}
\end{proposition}
\subsubsection{The Wiener integral as a process}
\begin{definition}
Let $f\in L_{\text{loc}}^2(\RR_{\geq 0})$ and $0\leq s\leq t$. We define the \emph{Wiener integral} of $f$ as:
$$
\int_s^t f(u)\dd{B_u}:=\int_0^\infty f(u)\indi{(s,t]}(u)\dd{B_u}
$$
\end{definition}
\begin{lemma}[Chasles relation]
Let $f\in L_{\text{loc}}^2(\RR_{\geq 0})$ and $0\leq r\leq s\leq t$. Then:
$$
\int_r^t f(u)\dd{B_u}=\int_r^s f(u)\dd{B_u}+\int_s^t f(u)\dd{B_u}
$$
\end{lemma}
\begin{proposition}
Let ${(B_t)}_{t\geq 0}$ be a Brownian motion and $f\in L_{\text{loc}}^2(\RR_{\geq 0})$. Then, the associate process $M^f={(M_t^f)}_{t\geq 0}$ defined as:
$$
M_t^f:=\int_0^t f(u)\dd{B_u}
$$
is a centered Gaussian process with covariance function:
$$
\cov(M_s^f,M_t^f)=\int_0^{s\wedge t}{f(u)}^2\dd{u}
$$
\end{proposition}
\begin{proof}
We'll only proof that $M^f$ is Gaussian (the computation of the mean and covariance functions is easy). Let $n\in\NN$, $(t_1,\ldots,t_n)\in\RR^n$ and $(\lambda_1,\ldots,\lambda_n)\in\RR^n$. Then:
$$
\sum_{k=1}^n\lambda_k M_{t_k}^f=\int_0^\infty g(u)\dd{B_u}
$$
with $g(u)=\sum_{k=1}^n\lambda_k f(u)\indi{(0,t_k]}(u)\in L^2(\RR_{\geq 0})$, and the right-hand side is Gaussian because it is a Wiener integral.
\end{proof}
\begin{theorem}
Let $f\in L_{\text{loc}}^2(\RR_{\geq 0})$. Then, $M^f$ is a continuous square-integrable martingale with:
$$
{\langle M^f\rangle}_t=\int_0^t{f(u)}^2\dd{u}
$$
\end{theorem}
\begin{proof}
The integrability and square-integrability is clear because $M^f$ is Gaussian. Note that $t\mapsto M_t^f$ is continuous when $f=\indi{(0,a]}$, because the Brownian motion is continuous. Now using \mcref{SC:limit_of_martingales} we get the result true for any $f\in L_{\text{loc}}^2(\RR_{\geq 0})$. Now let's prove that $M^f$ is a martingale. We have:
\begin{align*}
M_t^f & =\lim_{n\to\infty}\sum_{k=1}^{n^2}a_{n,k}(f\indi{(0,t]})(B_{\frac{k+1}{n}}-B_{\frac{k}{n}}) \\
& =\lim_{n\to\infty}\sum_{k=1}^{n^2}a_{n,k}(f\indi{(0,t]})(B_{\frac{k+1}{n}\wedge t}-B_{\frac{k}{n}}\wedge t)
\end{align*}
and the last expression is $\mathcal{F}_t$-measurable. Finally, if $0\leq s\leq t$ we have that since $M_t^f-M_s^f$ is independent of $\mathcal{F}_s$:
$$
\Exp(M_t^f-M_s^f\mid \mathcal{F}_s)=\Exp(M_t^f-M_s^f)=0
$$
Moreover, $({(M_t^f)}^2)_{t\geq 0}$ is clearly adapted and:
\begin{multline*}
\Exp\left({(M_t^f)}^2-{(M_s^f)}^2\mid \mathcal{F}_s\right)=\Exp\left({(M_t^f-M_s^f)}^2\mid\mathcal{F_s}\right)=\\=\Exp\left({(M_t^f-M_s^f)}^2\right)=\norm{I(f\indi{(s,t]})}_{L^2(\Omega)}^2=\\=\norm{f\indi{(s,t]}}_{L^2(\RR_{\geq 0})}^2=\int_s^t {f(u)}^2\dd{u}
\end{multline*}
where the first equality is due to \mnameref{SC:orthogonality_martingales} and the we used the isometry property of $I$. This implies that ${{(M_t^f)}^2}_{t\geq 0}-\int_0^t{f(u)}^2\dd{u}$ is a martingale and by the uniqueness of the quadratic variation we get the result.
\end{proof}
\begin{proposition}
Let ${(B_t)}_{t\geq 0}$ be a Brownian motion. For any $f\in L_{\text{loc}}^2(\RR_{\geq 0})$, the process $Z^f={(Z_t^f)}_{t\geq 0}$ defined as:
$$
Z_t^f:=\exp{\int_0^t f(u)\dd{B_u}-\frac{1}{2}\int_0^t{f(u)}^2\dd{u}}
$$
is a continuous square-integrable martingale.
\end{proposition}
\begin{proof}
The integrability and adaptedness poses no problem. Now fix $0\leq s\leq t$. We previously saw that $\int_s^t f(u)\dd{B_u}$ is independent of $\mathcal{F}_s$ and so:
$$
\Exp\left(Z_t^f\mid \mathcal{F}_s\right)=Z_s^f\Exp\left(\exp{\int_s^t f(u)\dd{B_u}-\frac{1}{2}\int_s^t{f(u)}^2\dd{u}}\right)=Z_s^f
$$
\end{proof}
\end{multicols}
\end{document}
41 changes: 40 additions & 1 deletion Physics/Advanced/Fluid_mechanics/Fluid_mechanics.tex
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,7 @@
Follow the proof of \mnameref{FLM:conservationofmomentum} adapted to the stress tensor.
\end{sproof}
\begin{remark}
This equation together with the continuity equation and energy equation completely describe the flow if a compressible viscous fluid. In the case of an incompressible homogeneous fluid with $\rho=\rho_0=\const$, the
This equation together with the continuity equation and energy equation completely describe the flow in a compressible viscous fluid. In the case of an incompressible homogeneous fluid with $\rho=\rho_0=\const$, the
complete set of equations becomes the \emph{Navier-Stokes equations for incompressible flow}:
$$
\begin{cases}
Expand Down Expand Up @@ -567,5 +567,44 @@
and $D_2$ that are related by a scale factor $\lambda$ so that $L_1 = \lambda L_2$. Let choices
of $U_1$ and $U_2$ be made for each flow, and let the viscosities be $\nu_1$ and $\nu_2$, respectively. If $\mathrm{Re}_1=\mathrm{Re}_2$, then the dimensionless velocity fields $\vf{u}_1$ and $\vf{u}_2$ satisfy exactly the same equation on the same region.
\end{definition}
\begin{remark}
This idea of the similarity of flows is used in the design of experimental models. For example, suppose we are contemplating a new design for an aircraft wing, and we wish to know the behavior of a fluid flow around it. Rather than build the wing itself, it may be faster and more economical to perform the initial tests on a scaled-down version. We design our model so that it has the same geometry as the full-scale wing, and we choose values for the undisturbed velocity, coefficient of viscosity, and so on, such that the Reynolds number for the flow in our experiment matches that of the actual flow. We can then expect the results of our experiment to be relevant to the actual flow over the full-scale wing.
\end{remark}
\begin{theorem}[Helmholtz-Hodge decomposition theorem]
Let $D\subset\RR^3$ be a region with smooth boundary and $\vf{w}$ be a vector field on $D$. Then, there exists a unique decomposition:
$$
\vf{w}=\vf{u}+\grad p
$$
where $\vf{u}$ has zero divergences and it's parallel to $\Fr{D}$, i.e.\ $\vf{u}\cdot\vf{n}=0$ on $\Fr{D}$.
\end{theorem}
\begin{proof}
We use the fact that the following problem has existence and uniqueness of solutions up to the addition of a constant to $p$:
$$
\begin{cases}
\displaystyle\laplacian p =\div \vf{w} & \text{in $D$} \\
\displaystyle \pdv{p}{\vf{n}}=\vf{w}\cdot \vf{n} & \text{on $\Fr{D}$}
\end{cases}
$$
Now let $\vf{u}:=\vf{w}-\grad p$, where $p$ is the solution of the above problem. Then, $\div \vf{u}=0$ and $\vf{u}\cdot\vf{n}=0$. To prove uniqueness, first note that if $\vf{w}=\vf{u}+\grad p$, then:
$$
\int_D\vf{u}\cdot \grad p=0
$$
which follows automatically from the \mnameref{FSV:divergencethm}, the hypothesis on $\vf{u}$ and the relation:
$$
\div(\vf{u}p)=\vf{u}\cdot\grad p+p\div\vf{u}
$$
Now, if moreover we have $\vf{w}=\vf{u}'+\grad p'$, then $\vf{0}= \vf{u}-\vf{u}' + \grad(p-p')$ and therefore taking the inner product with $\vf{u}-\vf{u}'$ and integrating we get:
\begin{multline*}
0=\int_D\norm{\vf{u}-\vf{u}'}^2+\int_D(\vf{u}-\vf{u}')\cdot\grad(p-p')= \\
=\int_D\norm{\vf{u}-\vf{u}'}^2
\end{multline*}
Hence, $\vf{u}=\vf{u}'$ and therefore $\grad p=\grad p'$, which implies that $p-p'=\const$
\end{proof}
\begin{definition}
Let $D\subset\RR^3$ be a region with smooth boundary. Given a vector field $\vf{w}=\vf{u}+\grad p$ on $D$, we can define the orthogonal projector operator $\vf{P}$ as:
$$
\vf{P}(\vf{w}):=\vf{u}
$$
\end{definition}
\end{multicols}
\end{document}
1 change: 1 addition & 0 deletions preamble_formulas.sty
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@
{INLEPDE}{\ana} % introduction to non linear elliptic pdes
{LTLD}{\sta} % limit theorems and large deviations
{SC}{\sta} % stochastic calculus
{INLP}{\phy} % Instabilities and nonlinear phenomena
}{\col}%
}
\ExplSyntaxOff
Expand Down

0 comments on commit 7097fe4

Please sign in to comment.