lecture 17
This commit is contained in:
parent
284ddd423a
commit
5304f38fbb
3 changed files with 172 additions and 5 deletions
|
@ -221,8 +221,7 @@ Typically $\cF_n = \sigma(X_1, \ldots, X_n)$ for a sequence of random variables.
|
|||
|
||||
|
||||
\begin{example}
|
||||
\begin{itemize}
|
||||
\item The simple random walk:
|
||||
The simple random walk:
|
||||
|
||||
Let $\xi_1, \xi_2, ..$ iid,
|
||||
$\bP[\xi_i = 1] = \bP[\xi_i = -1] = \frac{1}{2}$,
|
||||
|
@ -231,7 +230,8 @@ Typically $\cF_n = \sigma(X_1, \ldots, X_n)$ for a sequence of random variables.
|
|||
Then $X_n$ is $\cF_n$-measurable.
|
||||
Showing that $(X_n)_n$ is a martingale
|
||||
is left as an exercise.
|
||||
\item See exercise sheet 9.
|
||||
\item The branching process (next lecture).
|
||||
\end{itemize}
|
||||
\end{example}
|
||||
\begin{example}
|
||||
See exercise sheet 9.
|
||||
\todo{Copy}
|
||||
\end{example}
|
||||
|
|
166
inputs/lecture_17.tex
Normal file
166
inputs/lecture_17.tex
Normal file
|
@ -0,0 +1,166 @@
|
|||
\lecture{17}{2023-06-15}{}
|
||||
|
||||
\begin{definition}[Stochastic process]
|
||||
% TODO
|
||||
\end{definition}
|
||||
|
||||
\begin{goal}
|
||||
What about a ``gambling strategy''?
|
||||
|
||||
Consider a stochastic process $(X_n)_{n \in \N}$.
|
||||
|
||||
Note that the increments $X_{n+1} - X_n$ can be thought of as the win
|
||||
or loss per round of a game.
|
||||
Suppose that there is another stochastic process
|
||||
$(C_n)_{n \ge 1}$ such that $C_n$ is determined
|
||||
by the information gathered up until time $n$,
|
||||
i.e.~$C_n$ is measurable with respect to $\cF_{n-1}$.
|
||||
If such process $C_n$ exists, we say that $ C_n$ is
|
||||
\vocab[Stochastic process!previsible]{previsible}.
|
||||
Think of $C_n$ as our strategy of playing the game.
|
||||
Then $C_n(X_n - X_{n-1})$ defines the win in the $n$-th game,
|
||||
while
|
||||
\[
|
||||
Y_n \coloneqq \sum_{j=1}^n C_j(X_j - X_{j-1})
|
||||
\]
|
||||
defines the cumulative win process.
|
||||
\end{goal}
|
||||
\begin{lemma}
|
||||
If $(C_n)_{n \ge 1}$ is previsible and $(X_n)_{n \ge 0}$
|
||||
is a (sub/super-) martingale
|
||||
and there exists a constant $K_n$ such that $|C_n(\omega)| \le K_n$.
|
||||
Then $(Y_n)_{n \ge 1}$ is also a (sub/super-) martingale.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Exercise. \todo{Copy}
|
||||
\end{proof}
|
||||
\begin{remark}
|
||||
The assumption of $K_n$ being constant can be weakened to
|
||||
$C_n \in L^p(\bP)$, $X_n \in ^q(\bP)$ with $\frac{1}{p} + \frac{1}{q} = 1$.
|
||||
\end{remark}
|
||||
|
||||
Suppose we have $(X_n)$ adapted, $X_n \in L^1(\bP)$,
|
||||
$(C_n)_{n \ge 1}$ previsible.
|
||||
We play according to the following principle:
|
||||
Pick two real numbers $a < b$.
|
||||
Wait until $X_n \le a$, then start playing.
|
||||
Stop playing when $X_n \ge b$.
|
||||
I.e.~define
|
||||
\begin{itemize}
|
||||
\item $C_1 \coloneqq 0$,
|
||||
\item $C_n \coloneqq \One_{\{C_{n-1} = 1\}} \cdot \One_{\{X_{n-1} \le b\}} + \One_{\{C_{n-1} = 0\} } \One_{\{X_{n-1}\} < a}$.
|
||||
\end{itemize}
|
||||
|
||||
\begin{definition}
|
||||
Fix $N \in \N$ and let
|
||||
\[U_n^X([a,b]) \coloneqq \# \{\text{Upcrossings of $[a,b]$ made by $n \mapsto X_n(\omega)$ by time $n$}\},\]
|
||||
i.e.~$U_n([a,b])(\omega)$ is the largest $k \in \N_0$ such that we can find a
|
||||
sequence
|
||||
$0 \le s_1 < t_1 < s_2 < t_2 < \ldots < s_k < t_k \le N$
|
||||
such that $X_{s_j}(\omega) < a$ and $X_{t_j}(\omega) > b$ for all $1 \le j \le k$.
|
||||
\end{definition}
|
||||
Clearly $U_N^X([a,b]) \uparrow$ as $N$ increases.
|
||||
It follows that the monotonic limit $U_\infty([a,b]) \coloneqq \lim_{N \to \infty} U_N([a,b])$ exists pointwise.
|
||||
\begin{lemma} % Lemma 1
|
||||
\[
|
||||
\{\omega | \liminf_{N \to \infty} Z_N(\omega) < a < b <
|
||||
\limsup_{N \to \infty} Z_N(\omega)\} \subseteq
|
||||
\{\omega: U^{Z}_\infty([a,b])(\omega) = \infty\}
|
||||
\]
|
||||
for every sequence of measurable functions $(Z_n)_{n \ge 1}$.
|
||||
|
||||
\end{lemma}
|
||||
\begin{lemma} % 2
|
||||
Let $Y_n(\omega) \coloneqq \sum_{j=1}^n C_j(X_j - X_{j-1})$.
|
||||
Then $Y_N \ge (b -a) U_N([a,b]) - (X_N - a)^{-}$.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Every upcrossing of $[a,b]$ increases the value of $Y$ by $(b-a)$,
|
||||
while the last intverval of play $(X_n -a)^{-}$ overemphasizes the loss.
|
||||
\end{proof}
|
||||
|
||||
\begin{lemma} %3
|
||||
Suppose $(X_n)_n$ is a supermartingale.
|
||||
Then in the above setup, $(b-a) \bE[U_N([a,b])] \le \bE[(X_n - a)^-]$.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Obvious from lemma 2 % TODO REF
|
||||
and the supermartingale property.
|
||||
\end{proof}
|
||||
\begin{corollary}
|
||||
Let $(X_n)_n$ be a \vocab[Supermartingale!bounded]{supermartingale bounded in $L^1(\bP)$ },
|
||||
i.e.~$\sup_n \bE[|X_n| ] < \infty$.
|
||||
Then $(b-a) \bE(U_\infty) \le |a| + \sup_n \bE(|X_n|)$.
|
||||
In particular, $\bP[U_\infty = \infty] = 0$.
|
||||
\end{corollary}
|
||||
\begin{proof}
|
||||
By lemma 3 % TODO REF
|
||||
we have that
|
||||
\[(b-a) \bE[U_N([a,b])] \le \bE[ | X_N| ] + |a| \le \sup_n \bE[|X_n|] + |a|.\]
|
||||
Since $U_N(\cdot) \ge 0$ and $U_N(\cdot ) \uparrow U_\infty(\cdot )$,
|
||||
by the monotone convergence theorem
|
||||
\[
|
||||
\bE(U_n([a,b])] \uparrow \bE[U_\infty([a,b])].
|
||||
\]
|
||||
\end{proof}
|
||||
|
||||
Assume now, that our process $(X_n)_{n \ge 1}$ is a supermartingale
|
||||
bounded in $L^1(\bP)$.
|
||||
Let
|
||||
\[
|
||||
\Lambda \coloneqq \{\omega | X_n(\omega) \text{ does not converge to anything in $[-\infty,\infty]$}\}.
|
||||
\]
|
||||
We have
|
||||
\begin{IEEEeqnarray*}{rCl}
|
||||
\Lambda &=& \{\omega | \liminf_N X_N(\omega) < \limsup_N X_N(\omega)\}\\
|
||||
&=& \{\omega | \liminf_N X_N(\omega) < a < b \limsup_N X_N(\omega)\} \\
|
||||
&=& \bigcup_{a,b \in \Q} \underbrace{\{\omega | \liminf_N X_N(\omega) < a < b < \limsup_N X_N(\omega)\}}_{\Lambda_{a,b}} \\
|
||||
\end{IEEEeqnarray*}
|
||||
|
||||
We have $\Lambda_{a,b} \subseteq \{\omega : U_{\infty}([a,b])(\omega) = \infty\} $ by lemma 1. % TODO REF
|
||||
By lemma 3 % TODO REF
|
||||
we have $\bP(\Lambda_{a,b}) = 0$, hence $\bP(\Lambda) = 0$.
|
||||
Hence there exists a random variable $X_\infty$ such that $X_n \xrightarrow{a.s.} X_\infty$.
|
||||
|
||||
\begin{claim}
|
||||
$\bP[X_\infty \in \{\pm \infty\}] = 0$.
|
||||
\end{claim}
|
||||
\begin{subproof}
|
||||
It suffices to show that $\bE[|X_\infty|] < \infty$.
|
||||
We have.
|
||||
\begin{IEEEeqnarray*}{rCl}
|
||||
\bE[|X_\infty|] &=& \bE[\liminf_{n \to \infty} |X_n|]\\
|
||||
&\overset{\text{Fatou}}{\le }& \liminf_n \bE[|X_n|]\\
|
||||
&\le & \sup_n \bE[|X_n|]\\
|
||||
&<& \infty.
|
||||
\end{IEEEeqnarray*}
|
||||
\end{subproof}
|
||||
|
||||
We have thus shown
|
||||
\begin{theorem}[Doob's martingale convergence theorem]
|
||||
\label{doobmartingaleconvergence}
|
||||
Any supermartingale bounded in $L^1$ converges almost surely to a
|
||||
random variable, which is almost surely finite.
|
||||
In particular, any non-negative supermartingale converges a.s.~to a finite random variable.
|
||||
\end{theorem}
|
||||
The second part follows from
|
||||
\begin{claim}
|
||||
Any non-negative supermartingale is bounded in $L^1$.
|
||||
\end{claim}
|
||||
\begin{subproof}
|
||||
We need to show $\sup_n \bE(|X_n|) < \infty$.
|
||||
Since the supermartingale is non-negative, we have $\bE[|X_n|] = \bE[X_n]$
|
||||
and since it is a supermartingale $\bE[X_n] \le \bE[X_0]$.
|
||||
|
||||
\end{subproof}
|
||||
|
||||
\todo{rearrange proof}
|
||||
|
||||
|
||||
|
||||
\begin{example}[Branching process]
|
||||
% TODO
|
||||
|
||||
\end{example}
|
||||
|
||||
|
|
@ -40,6 +40,7 @@
|
|||
\input{inputs/lecture_14.tex}
|
||||
\input{inputs/lecture_15.tex}
|
||||
\input{inputs/lecture_16.tex}
|
||||
\input{inputs/lecture_17.tex}
|
||||
|
||||
\cleardoublepage
|
||||
|
||||
|
|
Reference in a new issue