This repository has been archived on 2023-10-20. You can view files and clone it, but cannot push or open issues or pull requests.
probability-theory/inputs/lecture_17.tex

212 lines
7.4 KiB
TeX
Raw Normal View History

2023-06-15 17:51:32 +02:00
\lecture{17}{2023-06-15}{}
2023-07-05 17:53:41 +02:00
\subsection{Doob's Martingale Convergence Theorem}
2023-06-29 22:18:23 +02:00
2023-06-15 17:51:32 +02:00
\begin{definition}[Stochastic process]
2023-06-28 23:40:09 +02:00
A \vocab{stochastic process} is a collection of random
variables $(X_t)_{t \in T}$ for some index set $T$.
In this lecture we will consider the case $T = \N$.
2023-06-15 17:51:32 +02:00
\end{definition}
2023-07-17 15:18:26 +02:00
\begin{definition}[Previsible process]
Consider a filtration $(\cF_n)_{n \ge 0}$.
A stochastic process $(C_n)_{n \ge 1}$
is called \vocab[Stochastic process!previsible]{previsible},
iff $C_n$ is $\cF_{n-1}$-measurable.
\end{definition}
2023-06-15 17:51:32 +02:00
\begin{goal}
What about a ``gambling strategy''?
2023-07-06 00:36:26 +02:00
2023-06-15 17:51:32 +02:00
Consider a stochastic process $(X_n)_{n \in \N}$.
Note that the increments $X_{n+1} - X_n$ can be thought of as the win
or loss per round of a game.
Suppose that there is another stochastic process
$(C_n)_{n \ge 1}$ such that $C_n$ is determined
by the information gathered up until time $n$,
2023-07-17 15:18:26 +02:00
i.e.~$C_n$ is previsible.
2023-06-15 17:51:32 +02:00
Think of $C_n$ as our strategy of playing the game.
Then $C_n(X_n - X_{n-1})$ defines the win in the $n$-th game,
while
2023-07-17 15:18:26 +02:00
\begin{equation}
Y_n \coloneqq \sum_{j=1}^n C_j(X_j - X_{j-1})
\label{eqn:cumulative-win-process}
\end{equation}
2023-06-15 17:51:32 +02:00
defines the cumulative win process.
\end{goal}
\begin{lemma}
2023-07-17 15:18:26 +02:00
\label{lem:gambling-strategy}
2023-06-15 17:51:32 +02:00
If $(C_n)_{n \ge 1}$ is previsible and $(X_n)_{n \ge 0}$
2023-07-17 15:18:26 +02:00
is a martingale
2023-06-15 17:51:32 +02:00
and there exists a constant $K_n$ such that $|C_n(\omega)| \le K_n$.
2023-07-17 15:18:26 +02:00
Then $(Y_n)_{n \ge 1}$ defined in \eqref{eqn:cumulative-win-process}
is also a martingale.
2023-06-15 17:51:32 +02:00
\end{lemma}
\begin{remark}
The assumption of $K_n$ being constant can be weakened to
2023-07-17 15:18:26 +02:00
$C_n \in L^p(\bP)$, $X_n \in L^q(\bP)$ with $\frac{1}{p} + \frac{1}{q} = 1$.
If $C_n \ge 0$ the assumption of $(X_n)_{n\ge 0}$
being a martingale can be weakened to it being
a sub-/supermartingale.
Then $(Y_n)_{n \ge 1}$ is a sub-/supermartingale as well.
2023-06-15 17:51:32 +02:00
\end{remark}
2023-07-17 15:18:26 +02:00
\begin{refproof}{lem:gambling-strategy}
It is clear that $Y_n$ is $\cF_n$-measurable.
Suppose that $C_n \in L^p(\bP)$ and $X_n \in L^{q}(\bP)$
for all $n$.
We have
\begin{IEEEeqnarray*}{rCl}
\|Y_n\|_{L^1}
&\le& \sum_{i=1}^{n} \|C_i(X_i - X_{i-1})\|_{L^1}\\
&\overset{\text{Hölder}}{\le}& \sum_{i=1}^{n} \|C_i\|_{L^p} \|(X_i - X_{i-1})\|_{L^q} \\
&<&\infty
\end{IEEEeqnarray*}
and
\begin{IEEEeqnarray*}{rCl}
\bE[Y_{n+1} - Y_n | \cF_n]
&=& \bE[C_{n+1} (X_{n+1} - X_n) | \cF_n]\\
&=& C_{n+1} (\bE[X_{n+1} | \cF_n] - X_n)\\
&=& 0.
\end{IEEEeqnarray*}
\end{refproof}
2023-06-15 17:51:32 +02:00
Suppose we have $(X_n)$ adapted, $X_n \in L^1(\bP)$,
$(C_n)_{n \ge 1}$ previsible.
We play according to the following principle:
Pick two real numbers $a < b$.
Wait until $X_n \le a$, then start playing.
Stop playing when $X_n \ge b$.
I.e.~define
2023-07-17 15:18:26 +02:00
\begin{equation}
\begin{aligned}
C_1 &\coloneqq 0,\\
C_n &\coloneqq
\One_{\{C_{n-1} = 1\}} \cdot \One_{\{X_{n-1} \le b\}}
+ \One_{\{C_{n-1} = 0\} } \One_{\{X_{n-1} < a\}}.
\end{aligned}
\label{eqn:upcrossing-strategy}
\end{equation}
2023-06-15 17:51:32 +02:00
\begin{definition}
Fix $N \in \N$ and let
2023-07-17 15:18:26 +02:00
\[U_N^X([a,b]) \coloneqq \# \{\text{Upcrossings of $[a,b]$ made by $n \mapsto X_n(\omega)$ by time $N$}\},\]
i.e.~$U_N([a,b])(\omega)$ is the largest $k \in \N_0$ such that we can find a
2023-06-15 17:51:32 +02:00
sequence
$0 \le s_1 < t_1 < s_2 < t_2 < \ldots < s_k < t_k \le N$
such that $X_{s_j}(\omega) < a$ and $X_{t_j}(\omega) > b$ for all $1 \le j \le k$.
\end{definition}
Clearly $U_N^X([a,b]) \uparrow$ as $N$ increases.
2023-07-17 15:18:26 +02:00
It follows that the monotonic limit
\[U_\infty([a,b]) \coloneqq \lim_{N \to \infty} U_N([a,b])\]
exists pointwise.
2023-06-15 17:51:32 +02:00
\begin{lemma} % Lemma 1
2023-06-29 22:18:23 +02:00
\label{lec17l1}
2023-06-15 17:51:32 +02:00
\[
\{\omega | \liminf_{N \to \infty} Z_N(\omega) < a < b <
2023-07-06 00:36:26 +02:00
\limsup_{N \to \infty} Z_N(\omega)\} \subseteq
2023-06-15 17:51:32 +02:00
\{\omega: U^{Z}_\infty([a,b])(\omega) = \infty\}
2023-07-06 00:36:26 +02:00
\]
2023-06-15 17:51:32 +02:00
for every sequence of measurable functions $(Z_n)_{n \ge 1}$.
\end{lemma}
\begin{lemma} % 2
2023-06-29 22:18:23 +02:00
\label{lec17l2}
2023-07-17 15:18:26 +02:00
Let $Y_n(\omega) \coloneqq \sum_{j=1}^n C_j(X_j - X_{j-1})$,
where $C_n $ is defined as in \eqref{eqn:upcrossing-strategy}
2023-06-29 22:18:23 +02:00
Then \[Y_N \ge (b -a) U_N([a,b]) - (X_N - a)^{-}.\]
2023-06-15 17:51:32 +02:00
\end{lemma}
\begin{proof}
Every upcrossing of $[a,b]$ increases the value of $Y$ by $(b-a)$,
2023-07-17 15:18:26 +02:00
while the last interval of play $(X_n -a)^{-}$ overemphasizes the loss.
2023-06-15 17:51:32 +02:00
\end{proof}
\begin{lemma} %3
2023-06-29 22:18:23 +02:00
\label{lec17l3}
2023-06-15 17:51:32 +02:00
Suppose $(X_n)_n$ is a supermartingale.
2023-06-29 22:18:23 +02:00
Then in the above setup
2023-07-17 15:18:26 +02:00
\[(b-a) \bE[U_N([a,b])] \le \bE[(X_N - a)^-].\]
2023-06-15 17:51:32 +02:00
\end{lemma}
\begin{proof}
2023-07-17 15:18:26 +02:00
Since $C_n \ge 0$,
by \autoref{lem:gambling-strategy} we have that $Y_n$ is a supermartingale.
Hence $\bE[Y_N] \le \bE[Y_1] = 0$.
From \autoref{lec17l2} it follows that
\[
(b-a) \bE[U_N([a,b])] \le \bE[Y_n] + \bE[(X_N-a)^-] \le \bE[(X_N-a)^-].
\]
2023-06-15 17:51:32 +02:00
\end{proof}
\begin{corollary}
2023-06-29 22:18:23 +02:00
Let $(X_n)_n$ be a
2023-07-17 15:18:26 +02:00
\vocab[Supermartingale!bounded in $L^1$]{supermartingale bounded in $L^1(\bP)$},
2023-06-29 22:18:23 +02:00
i.e.~$\sup_n \bE[|X_n|] < \infty$.
2023-06-15 17:51:32 +02:00
Then $(b-a) \bE(U_\infty) \le |a| + \sup_n \bE(|X_n|)$.
In particular, $\bP[U_\infty = \infty] = 0$.
\end{corollary}
\begin{proof}
2023-06-29 22:18:23 +02:00
By \autoref{lec17l3}
2023-06-15 17:51:32 +02:00
we have that
\[(b-a) \bE[U_N([a,b])] \le \bE[ | X_N| ] + |a| \le \sup_n \bE[|X_n|] + |a|.\]
Since $U_N(\cdot) \ge 0$ and $U_N(\cdot ) \uparrow U_\infty(\cdot )$,
by the monotone convergence theorem
\[
2023-07-17 15:18:26 +02:00
\bE(U_N([a,b])] \uparrow \bE[U_\infty([a,b])].
2023-07-06 00:36:26 +02:00
\]
2023-06-15 17:51:32 +02:00
\end{proof}
2023-07-17 15:18:26 +02:00
Let us now consider the case that our process $(X_n)_{n \ge 1}$ is a supermartingale
2023-06-15 17:51:32 +02:00
bounded in $L^1(\bP)$.
2023-07-17 15:18:26 +02:00
\begin{theorem}[Doob's martingale convergence theorem]
\label{doobmartingaleconvergence}
\label{doob}
Any supermartingale bounded in $L^1$ converges almost surely to a
random variable, which is almost surely finite.
In particular, any non-negative supermartingale converges a.s.~to a finite random variable.
\end{theorem}
\begin{refproof}{doobmartingaleconvergence}
2023-06-15 17:51:32 +02:00
Let
\[
\Lambda \coloneqq \{\omega | X_n(\omega) \text{ does not converge to anything in $[-\infty,\infty]$}\}.
2023-07-06 00:36:26 +02:00
\]
2023-06-15 17:51:32 +02:00
We have
\begin{IEEEeqnarray*}{rCl}
\Lambda &=& \{\omega | \liminf_N X_N(\omega) < \limsup_N X_N(\omega)\}\\
2023-07-17 19:57:26 +02:00
&=& \{\omega | \liminf_N X_N(\omega) < a < b < \limsup_N X_N(\omega)\} \\
2023-06-15 17:51:32 +02:00
&=& \bigcup_{a,b \in \Q} \underbrace{\{\omega | \liminf_N X_N(\omega) < a < b < \limsup_N X_N(\omega)\}}_{\Lambda_{a,b}} \\
\end{IEEEeqnarray*}
2023-07-17 15:18:26 +02:00
We have $\Lambda_{a,b} \subseteq \{\omega : U_{\infty}([a,b])(\omega) = \infty\}$
by \autoref{lec17l1}.
By \autoref{lec17l3} we have $\bP(\Lambda_{a,b}) = 0$,
hence $\bP(\Lambda) = 0$.
Thus there exists a random variable $X_\infty$ such that $X_n \xrightarrow{a.s.} X_\infty$.
2023-06-15 17:51:32 +02:00
\begin{claim}
$\bP[X_\infty \in \{\pm \infty\}] = 0$.
\end{claim}
\begin{subproof}
It suffices to show that $\bE[|X_\infty|] < \infty$.
We have.
\begin{IEEEeqnarray*}{rCl}
\bE[|X_\infty|] &=& \bE[\liminf_{n \to \infty} |X_n|]\\
&\overset{\text{Fatou}}{\le }& \liminf_n \bE[|X_n|]\\
&\le & \sup_n \bE[|X_n|]\\
&<& \infty.
\end{IEEEeqnarray*}
\end{subproof}
2023-07-17 15:18:26 +02:00
2023-06-15 17:51:32 +02:00
The second part follows from
\begin{claim}
Any non-negative supermartingale is bounded in $L^1$.
\end{claim}
\begin{subproof}
We need to show $\sup_n \bE(|X_n|) < \infty$.
2023-07-06 00:36:26 +02:00
Since the supermartingale is non-negative, we have $\bE[|X_n|] = \bE[X_n]$
2023-06-15 17:51:32 +02:00
and since it is a supermartingale $\bE[X_n] \le \bE[X_0]$.
\end{subproof}
2023-07-17 15:18:26 +02:00
\end{refproof}