Merge branch 'main' of https://gitlab.com/jrpie-notes/probability-theory
This commit is contained in:
commit
eeb3208de4
5 changed files with 160 additions and 19 deletions
|
@ -16,7 +16,7 @@ First, let us recall some basic definitions:
|
||||||
\item $\bP$ is a \vocab{probability measure}, i.e.~$\bP$ is a function $\bP: \cF \to [0,1]$
|
\item $\bP$ is a \vocab{probability measure}, i.e.~$\bP$ is a function $\bP: \cF \to [0,1]$
|
||||||
such that
|
such that
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item $\bP(\emptyset) = 1$, $\bP(\Omega) = 1$,
|
\item $\bP(\emptyset) = 0$, $\bP(\Omega) = 1$,
|
||||||
\item $\bP\left( \bigsqcup_{n \in \N} A_n \right) = \sum_{n \in \N} \bP(A_n)$
|
\item $\bP\left( \bigsqcup_{n \in \N} A_n \right) = \sum_{n \in \N} \bP(A_n)$
|
||||||
for mutually disjoint $A_n \in \cF$.
|
for mutually disjoint $A_n \in \cF$.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
By the tower property (\autoref{cetower})
|
By the tower property (\autoref{cetower})
|
||||||
it is clear that $(\bE[X | \cF_n])_n$
|
it is clear that $(\bE[X | \cF_n])_n$
|
||||||
is a martingale.
|
is a martingale.
|
||||||
|
|
||||||
First step:
|
First step:
|
||||||
Assume that $X$ is bounded.
|
Assume that $X$ is bounded.
|
||||||
Then, by \autoref{cejensen}, $|X_n| \le \bE[|X| | \cF_n]$,
|
Then, by \autoref{cejensen}, $|X_n| \le \bE[|X| | \cF_n]$,
|
||||||
|
@ -84,7 +84,7 @@ we need the following theorem, which we won't prove here:
|
||||||
L^p &\longrightarrow & (L^q)^\ast \\
|
L^p &\longrightarrow & (L^q)^\ast \\
|
||||||
f &\longmapsto & (g \mapsto \int g f \dif d\bP)
|
f &\longmapsto & (g \mapsto \int g f \dif d\bP)
|
||||||
\end{IEEEeqnarray*}
|
\end{IEEEeqnarray*}
|
||||||
|
|
||||||
We also have $(L^1)^\ast \cong L^\infty$,
|
We also have $(L^1)^\ast \cong L^\infty$,
|
||||||
however $ (L^\infty)^\ast \not\cong L^1$.
|
however $ (L^\infty)^\ast \not\cong L^1$.
|
||||||
\end{fact}
|
\end{fact}
|
||||||
|
@ -95,7 +95,7 @@ we need the following theorem, which we won't prove here:
|
||||||
$(X_{n_k})_k$ such that for all $Y \in L^q$ ($\frac{1}{p} + \frac{1}{q} = 1$ )
|
$(X_{n_k})_k$ such that for all $Y \in L^q$ ($\frac{1}{p} + \frac{1}{q} = 1$ )
|
||||||
\[
|
\[
|
||||||
\int X_{n_k} Y \dif \bP \to \int XY \dif \bP
|
\int X_{n_k} Y \dif \bP \to \int XY \dif \bP
|
||||||
\]
|
\]
|
||||||
(Note that this argument does not work for $p = 1$,
|
(Note that this argument does not work for $p = 1$,
|
||||||
because $(L^\infty)^\ast \not\cong L^1$).
|
because $(L^\infty)^\ast \not\cong L^1$).
|
||||||
|
|
||||||
|
@ -116,14 +116,14 @@ we need the following theorem, which we won't prove here:
|
||||||
\subsection{Stopping times}
|
\subsection{Stopping times}
|
||||||
|
|
||||||
\begin{definition}[Stopping time]
|
\begin{definition}[Stopping time]
|
||||||
A random variable $T: \Omega \to \N \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time},
|
A random variable $T: \Omega \to \N_0 \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time},
|
||||||
if
|
if
|
||||||
\[
|
\[
|
||||||
\{T \le n\} \in \cF_n
|
\{T \le n\} \in \cF_n
|
||||||
\]
|
\]
|
||||||
for all $n \in \N$.
|
for all $n \in \N$.
|
||||||
Equivalently, $\{T = n\} \in \cF_n$ for all $n \in \N$.
|
Equivalently, $\{T = n\} \in \cF_n$ for all $n \in \N$.
|
||||||
|
|
||||||
\end{definition}
|
\end{definition}
|
||||||
|
|
||||||
\begin{example}
|
\begin{example}
|
||||||
|
@ -131,21 +131,21 @@ we need the following theorem, which we won't prove here:
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
\begin{example}[Hitting times]
|
\begin{example}[Hitting times]
|
||||||
For an adapted process $(X_n)_n$
|
For an adapted process $(X_n)_n$
|
||||||
with values in $\R$ and $A \in \cB(\R)$, the \vocab{hitting time}
|
with values in $\R$ and $A \in \cB(\R)$, the \vocab{hitting time}
|
||||||
\[
|
\[
|
||||||
T \coloneqq \inf \{n \in \N : X_n \in A\}
|
T \coloneqq \inf \{n \in \N : X_n \in A\}
|
||||||
\]
|
\]
|
||||||
is a stopping time,
|
is a stopping time,
|
||||||
as
|
as
|
||||||
\[
|
\[
|
||||||
\{T \le n \} = \bigcup_{k=1}^n \{X_k \in A\} \in \cF_n.
|
\{T \le n \} = \bigcup_{k=1}^n \{X_k \in A\} \in \cF_n.
|
||||||
\]
|
\]
|
||||||
|
|
||||||
However, the last exit time
|
However, the last exit time
|
||||||
\[
|
\[
|
||||||
T \coloneqq \sup \{n \in \N : X_n \in A\}
|
T \coloneqq \sup \{n \in \N : X_n \in A\}
|
||||||
\]
|
\]
|
||||||
is not a stopping time.
|
is not a stopping time.
|
||||||
|
|
||||||
\end{example}
|
\end{example}
|
||||||
|
@ -158,7 +158,7 @@ we need the following theorem, which we won't prove here:
|
||||||
Then
|
Then
|
||||||
\[
|
\[
|
||||||
T \coloneqq \inf \{n \in \N : S_n \ge A \lor S_n \le B\}
|
T \coloneqq \inf \{n \in \N : S_n \ge A \lor S_n \le B\}
|
||||||
\]
|
\]
|
||||||
is a stopping time.
|
is a stopping time.
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
|
@ -173,11 +173,11 @@ we need the following theorem, which we won't prove here:
|
||||||
are stopping times.
|
are stopping times.
|
||||||
|
|
||||||
Note that $T_1 - T_2$ is not a stopping time.
|
Note that $T_1 - T_2$ is not a stopping time.
|
||||||
|
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
\begin{remark}
|
\begin{remark}
|
||||||
There are two ways to interpret the interaction between a stopping time $T$
|
There are two ways to interpret the interaction between a stopping time $T$
|
||||||
and a stochastic process $(X_n)_n$.
|
and a stochastic process $(X_n)_n$.
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item The behaviour of $ X_n$ until $T$,
|
\item The behaviour of $ X_n$ until $T$,
|
||||||
|
@ -193,22 +193,22 @@ we need the following theorem, which we won't prove here:
|
||||||
If we look at a process
|
If we look at a process
|
||||||
\[
|
\[
|
||||||
S_n = \sum_{i=1}^{n} X_i
|
S_n = \sum_{i=1}^{n} X_i
|
||||||
\]
|
\]
|
||||||
for some $(X_n)_n$, then
|
for some $(X_n)_n$, then
|
||||||
\[
|
\[
|
||||||
S^T = (\sum_{i=1}^{T \wedge n} X_i)_n
|
S^T = (\sum_{i=1}^{T \wedge n} X_i)_n
|
||||||
\]
|
\]
|
||||||
and
|
and
|
||||||
\[
|
\[
|
||||||
S_T = \sum_{i=1}^{T} X_i.
|
S_T = \sum_{i=1}^{T} X_i.
|
||||||
\]
|
\]
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
\begin{theorem}
|
\begin{theorem}
|
||||||
If $(X_n)_n$ is a supermartingale and $T$ is a stopping time,
|
If $(X_n)_n$ is a supermartingale and $T$ is a stopping time,
|
||||||
then $X^T$ is also a supermartingale,
|
then $X^T$ is also a supermartingale,
|
||||||
and we have $\bE[X_{T \wedge n}] \le \bE[X_0]$ for all $n$.
|
and we have $\bE[X_{T \wedge n}] \le \bE[X_0]$ for all $n$.
|
||||||
If $(X_n)_n$ is a martingale, then so is $X^T$
|
If $(X_n)_n$ is a martingale, then so is $X^T$
|
||||||
and $\bE[X_{T \wedge n}] \le \bE[X_0]$.
|
and $\bE[X_{T \wedge n}] \le \bE[X_0]$.
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
\begin{proof}
|
\begin{proof}
|
||||||
|
|
136
inputs/lecture_21.tex
Normal file
136
inputs/lecture_21.tex
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
\lecture{21}{2023-06-29}{}
|
||||||
|
% TODO: replace bf
|
||||||
|
|
||||||
|
This is the last lecture relevant for the exam.
|
||||||
|
(Apart from lecture 22 which will be a repetion).
|
||||||
|
|
||||||
|
\begin{goal}
|
||||||
|
We want to see an application of the
|
||||||
|
optional stopping theorem \ref{optionalstopping}.
|
||||||
|
\end{goal}
|
||||||
|
|
||||||
|
\begin{notation}
|
||||||
|
Let $E$ be a complete, separable metric space (e.g.~$E = \R$).
|
||||||
|
Suppose that for all $x \in E$ we have a probability measure
|
||||||
|
$\bfP(x, \dif y)$ on $E$.
|
||||||
|
% i.e. $\mu(A) \coloneqq \int_A \bP(x, \dif y)$ is a probability measure.
|
||||||
|
Such a probability measure is a called
|
||||||
|
a \vocab{transition probability measure}.
|
||||||
|
\end{notation}
|
||||||
|
\begin{examle}
|
||||||
|
$E =\R$,
|
||||||
|
\[\bfP(x, \dif y) = \frac{1}{\sqrt{2 \pi} } e^{- \frac{(x-y)^2}{2}} \dif y\]
|
||||||
|
is a transition probability measure.
|
||||||
|
\end{examle}
|
||||||
|
\begin{example}[Simple random walk as a transition probability measure]
|
||||||
|
$E = \Z$, $\bfP(x, \dif y)$
|
||||||
|
assigns mass $\frac{1}{2}$ to $y = x+1$ and $y = x -1$.
|
||||||
|
\end{example}
|
||||||
|
|
||||||
|
\begin{definition}
|
||||||
|
For every bounded, measurable function $f : E \to \R$,
|
||||||
|
$x \in E$
|
||||||
|
define
|
||||||
|
\[
|
||||||
|
(\bfP f)(x) \coloneqq \int_E f(y) \bfP(x, \dif y).
|
||||||
|
\]
|
||||||
|
This $\bfP$ is called a \vocab{transition operator}.
|
||||||
|
\end{definition}
|
||||||
|
\begin{fact}
|
||||||
|
If $f \ge 0$, then $(\bfP f)(\cdot ) \ge 0$.
|
||||||
|
|
||||||
|
If $f \equiv 1$, we have $(\bfP f) \equiv 1$.
|
||||||
|
\end{fact}
|
||||||
|
|
||||||
|
\begin{notation}
|
||||||
|
Let $\bfI$ denote the \vocab{identity operator},
|
||||||
|
i.e.
|
||||||
|
\[
|
||||||
|
(\bfI f)(x) = f(x)
|
||||||
|
\]
|
||||||
|
for all $f$.
|
||||||
|
Then for a transition operator $\bfP$ we write
|
||||||
|
\[
|
||||||
|
\bfL \coloneqq \bfI - \bfP.
|
||||||
|
\]
|
||||||
|
\end{notation}
|
||||||
|
|
||||||
|
\begin{goal}
|
||||||
|
Take $E = \R$.
|
||||||
|
Suppose that $A^c \subseteq \R$ is a bounded domain.
|
||||||
|
Given a bounded function $f$ on $\R$,
|
||||||
|
we want a function $u$ which is bounded,
|
||||||
|
such that
|
||||||
|
$Lu = 0$ on $A^c$ and $u = f$ on $A$.
|
||||||
|
\end{goal}
|
||||||
|
|
||||||
|
We will show that $u(x) = \bE_x[f(X_{T_A})]$
|
||||||
|
is the unique solution to this problem.
|
||||||
|
|
||||||
|
\begin{definition}
|
||||||
|
Let $(\Omega, \cF, \{\cF_n\}_n, \bP_x)$
|
||||||
|
be a filtered probability space, where for every $x \in \R$,
|
||||||
|
$\bP_x$ is a probability measure.
|
||||||
|
Let $\bE_x$ denote expectation with respect to $\bfP(x, \cdot )$.
|
||||||
|
Then $(X_n)_{n \ge 0}$ is a \vocab{Markov chain} starting at $x \in \R$
|
||||||
|
with \vocab[Markov chain!Transition probability]{transition probability}
|
||||||
|
$\bfP(x, \cdot )$ if
|
||||||
|
\begin{enumerate}[(i)]
|
||||||
|
\item $\bP_x[X_0 = x] = 1$,
|
||||||
|
\item for all bounded, measurable $f: \R \to \R$,
|
||||||
|
\[\bE_x[f(X_{n+1}) | \cF_n] \overset{\text{a.s.}}{=}%
|
||||||
|
\bE_{x}[f(X_{n+1}) | X_n] = %
|
||||||
|
\int f(y) \bfP(X_n, \dif y).\]
|
||||||
|
\end{enumerate}
|
||||||
|
(Recall $\cF_n = \sigma(X_1,\ldots, X_n)$.)
|
||||||
|
\end{definition}
|
||||||
|
\begin{example}
|
||||||
|
Suppose $B \in \cB(\R)$ and $f = \One_B$.
|
||||||
|
Then the first equality of (ii) simplifies to
|
||||||
|
\[
|
||||||
|
\bP_x[X_{n+1} \in B | \cF_n] = \bP_x[X_{n+1} \in B | \sigma(X_n)].
|
||||||
|
\]
|
||||||
|
\end{example}
|
||||||
|
|
||||||
|
\begin{definition}[Conditional probability]
|
||||||
|
\[
|
||||||
|
\bP[A | \cG] \coloneqq \bE[\One_A | \cG].
|
||||||
|
\]
|
||||||
|
\end{definition}
|
||||||
|
|
||||||
|
\begin{example}
|
||||||
|
Let $\xi_i$ be i.i.d.~with$\bP[\xi_i = 1] = \bP[\xi_i = -1] = \frac{1}{2}$
|
||||||
|
and define $X_n \coloneqq \sum_{i=1}^{n} \xi_i$.
|
||||||
|
|
||||||
|
Intuitively, conditioned on $X_n$, $X_{n+1}$ should
|
||||||
|
be independent of $\sigma(X_1,\ldots, X_{n-1})$.
|
||||||
|
|
||||||
|
For a set $B$, we have
|
||||||
|
\[
|
||||||
|
\bP_0[X_{n+1} \in B| \sigma(X_1,\ldots, X_n)]
|
||||||
|
= \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_1,\ldots, X_n)]
|
||||||
|
= \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_n)].
|
||||||
|
\]
|
||||||
|
|
||||||
|
\begin{claim}
|
||||||
|
$\bE[\One_{X_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] = \bE[\One_{X_{n+1} \in B} | \sigma(X_n)]$.
|
||||||
|
\end{claim}
|
||||||
|
\begin{subproof}
|
||||||
|
The rest of the lecture was very chaotic...
|
||||||
|
\end{subproof}
|
||||||
|
\end{example}
|
||||||
|
|
||||||
|
|
||||||
|
%TODO
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
{ \huge\color{red}
|
||||||
|
New information after this point is not relevant for the exam.
|
||||||
|
}
|
||||||
|
Stopping times and optional stopping are very relevant for the exam,
|
||||||
|
the Markov property is not.
|
|
@ -40,6 +40,10 @@
|
||||||
\RequirePackage{mkessler-faktor}
|
\RequirePackage{mkessler-faktor}
|
||||||
\RequirePackage{mkessler-mathsymb}
|
\RequirePackage{mkessler-mathsymb}
|
||||||
\RequirePackage[extended]{mkessler-mathalias}
|
\RequirePackage[extended]{mkessler-mathalias}
|
||||||
|
% \makeatletter
|
||||||
|
% \expandafter\MakeAliasesForwith\expandafter\mathbf\expandafter{\expandafter bf\expandafter}\expandafter{\mkessler@mathalias@all}
|
||||||
|
% \makeatother
|
||||||
|
|
||||||
\RequirePackage{mkessler-refproof}
|
\RequirePackage{mkessler-refproof}
|
||||||
|
|
||||||
% mkessler-mathfont has already been imported
|
% mkessler-mathfont has already been imported
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
\input{inputs/lecture_18.tex}
|
\input{inputs/lecture_18.tex}
|
||||||
\input{inputs/lecture_19.tex}
|
\input{inputs/lecture_19.tex}
|
||||||
\input{inputs/lecture_20.tex}
|
\input{inputs/lecture_20.tex}
|
||||||
|
\input{inputs/lecture_21.tex}
|
||||||
|
|
||||||
\cleardoublepage
|
\cleardoublepage
|
||||||
|
|
||||||
|
|
Reference in a new issue