Skip to content

Commit

Permalink
updated montecarlo and finished stochastic calculu
Browse files Browse the repository at this point in the history
  • Loading branch information
victorballester7 committed Oct 19, 2023
1 parent 8008c40 commit fa136db
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 2 deletions.
9 changes: 7 additions & 2 deletions Mathematics/5th/Montecarlo_methods/Montecarlo_methods.tex
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@
$$
\Var(Y+b(X-\Exp(X)))\ll \Var(Y)
$$
We define $Y(b):=Y+b(X-\Exp(X))$.
This suggests the following estimator:
$$
\overline{Y}_n(b):=\frac{1}{n}\sum_{i=1}^n [Y_i+b(X_i-\Exp(X))]
Expand All @@ -207,13 +208,17 @@
If $b=0$, the control variate estimator $\overline{Y}_n(b)$ coincides with the classical estimator $\overline{Y}_n$. Otherwise, the computational cost of $\overline{Y}_n(b)$ is higher than $\overline{Y}_n$, but it does not depend on the choice of $b\neq 0$.
\end{remark}
\begin{proposition}
The minimum of $\Var(Y(b))$ is attained for $$\hat{b}= \frac{\cov(Y,X)}{\Var(X)}$$ and in that case, $\Var(Y(\hat{b}))=\Var(Y)(1-{\rho_{XY}}^2)$, where $\rho_{XY}$ is the correlation between $X$ and $Y$.
The minimum of $\Var(Y(b))$ is attained for $$\hat{b}= -\frac{\cov(Y,X)}{\Var(X)}$$ and in that case, $\Var(Y(\hat{b}))=\Var(Y)(1-{\rho_{XY}}^2)$, where $\rho_{XY}$ is the correlation between $X$ and $Y$.
\end{proposition}
\begin{remark}
Usually, $\hat{b}$ is unknown, but we can use an estimator of it, such as:
$$
\hat{b}_n:=\frac{\sum_{i=1}^n (Y_i-\overline{Y}_n)(X_i-\overline{X}_n)}{\sum_{i=1}^n {(X_i-\overline{X}_n)}^2}
\hat{b}_n:=-\frac{\sum_{i=1}^n (Y_i-\overline{Y}_n)(X_i-\overline{X}_n)}{\sum_{i=1}^n {(X_i-\overline{X}_n)}^2}
$$
but if we know $\Exp(X)$ and $\Var(X)$ explicitly, we can use them in the formula of $\hat{b}_n$.
\end{remark}
\begin{remark}
The result of above tell us that we should pick $X$ strongly correlated to $Y$ but simple enough to know explicitly $\Exp(X)$.
\end{remark}
\begin{definition}
Let $\vf{X}$ be a random vector such that $\Exp(\vf{X})$ is known, and $\vf{b}\in \RR^d$. We define the \emph{multiple control variate estimator} as:
Expand Down
66 changes: 66 additions & 0 deletions Mathematics/5th/Stochastic_calculus/Stochastic_calculus.tex
Original file line number Diff line number Diff line change
Expand Up @@ -926,6 +926,72 @@
\end{enumerate}
\end{proposition}
\subsubsection{Generator of a diffusion}
\begin{definition}[Generator]
The \emph{generator} of the semigroup ${(P_t)}_{t\geq 0}$ is the linear operator $L$ defined by:
$$
(Lf)(x):=\lim_{t\to 0}\frac{P_tf(x)-f(x)}{t}
$$
for all $f\in L^\infty(\RR)$ and $x\in\RR$ such that the limit exists. Those functions form a vector space denoted by $\text{Dom}(L)$.
\end{definition}
\begin{theorem}
Let $f\in \mathcal{C}_\text{b}^2(\RR)$. Then:
\begin{enumerate}
\item $Lf$ is well-defined and it is given $\forall x\in\RR$ by:
$$
Lf(x)=\frac{1}{2}\sigma^2(x)f''(x)+b(x)f'(x)
$$
\item For all $t\geq 0$, we have $P_tf\in \text{Dom}(L)$ and it satisfies the \emph{Kolmogorov's equation}:
$$
\dv{}{t}P_tf=P_t(Lf)=L(P_tf)
$$
\item The process ${(M_t)}_{t\geq 0}$ defined as
$$
M_t:=f(X_t)-f(X_0)-\int_0^t Lf(X_s)\dd{s}
$$
is a continuous square-integrable martingale.
\end{enumerate}
\end{theorem}
\subsubsection{Connection with PDEs}
For this section recall the diffusion equation:
\begin{equation}\label{SC:sde_pde}
\begin{cases}
\dd{X_t^x}=b(X_t^x)\dd{t}+\sigma(X_t^x)\dd{B_t} \\
X_0^x=x
\end{cases}
\end{equation}
where $b,\sigma:\RR\to\RR$ are Lipschitz functions. Now, fix $f\in L^\infty(\RR)$ and consider the pde:
\begin{equation}\label{SC:pde_sde}
\begin{cases}
\pdv{v}{t}(t,x)=b(x)\pdv{v}{x}(t,x)+\frac{1}{2}\sigma^2(x)\pdv[2]{v}{x}(t,x) \\
v(0,x)=f(x)
\end{cases}
\end{equation}
where $v\in \mathcal{C}^{1,2}([0,\infty)\times\RR)$.
\begin{theorem}\hfill
\begin{enumerate}
\item If $v$ is a bounded solution to the pde of \mcref{SC:pde_sde}, then we must have $\forall (t,x)\in [0,\infty)\times\RR$:
\begin{equation}\label{SC:sol_v}
v(t,x)=\Exp(f(X_t^x))
\end{equation}
\item If $b,\sigma, f\in \mathcal{C}_\text{b}^2(\RR)$, then conversely, the function $v$ defined in \mcref{SC:sol_v} is a bounded solution of \mcref{SC:pde_sde}.
\end{enumerate}
\end{theorem}
\begin{remark}
The interest of this connection between SDEs and PDEs is two-fold: on the one hand, one can use tools from PDE theory to understand the distribution of $X_t^x$. Conversely, the probabilistic representation of \mcref{SC:sol_v} offers a practical way to numerically solve the PDE of \mcref{SC:pde_sde}, by simulation.
\end{remark}
\begin{theorem}[Feynman-Kac's formula]
Let $v\in \mathcal{C}^{1,2}([0,\infty)\times\RR)$ be a bounded solution to the pde
$$
\begin{cases}
\pdv{v}{t}(t,x)=-h(x)v(t,x)+b(x) \pdv{v}{x}(t,x)+\frac{1}{2}\sigma^2(x)\pdv[2]{v}{x}(t,x) \\
v(0,x)=f(x)
\end{cases}
$$
where $f,h:\RR\to\RR$ are measurable, with $h$ non-negative. Then, we have the representation
$$
v(t,x)=\Exp\left(f(X_t^x)\exp{-\int_0^t h(X_s^x)\dd{s}}\right)
$$
for all $(t,x)\in [0,\infty)\times\RR$.
\end{theorem}
\end{multicols}
\end{document}

0 comments on commit fa136db

Please sign in to comment.