lecture 16

This commit is contained in:
Josia Pietsch 2023-06-13 17:59:57 +02:00
parent 684e920f14
commit b299e37c96
Signed by: jrpie
GPG key ID: E70B571D66986A2D
4 changed files with 278 additions and 3 deletions

View file

@ -229,7 +229,7 @@ Assume $Y = \One_B$, then $Y$ simple, then take the limit (using that $Y$ is bou
of $\sigma(\sigma(X), \cG)$, then of $\sigma(\sigma(X), \cG)$, then
\[ \[
\bE[X | \sigma(\cG, \cH)] \overset{\text{a.s.}}{=} \bE[X | \cG]. \bE[X | \sigma(\cG, \cH)] \overset{\text{a.s.}}{=} \bE[X | \cG].
\] \]
\end{theorem} \end{theorem}
\begin{example} \begin{example}
If $X$ is independent of $\cG$, If $X$ is independent of $\cG$,
@ -249,7 +249,6 @@ Assume $Y = \One_B$, then $Y$ simple, then take the limit (using that $Y$ is bou
\bE[S_n | \cF_n] + \bE[X_{n+1} | \cF_n]\\ \bE[S_n | \cF_n] + \bE[X_{n+1} | \cF_n]\\
&\overset{\text{a.s.}}{=}& S_n + \bE[X_{n+1} | \cF_n]\\ &\overset{\text{a.s.}}{=}& S_n + \bE[X_{n+1} | \cF_n]\\
&\overset{\text{\autoref{ceprop12}}}{=}& S_{n} + \bE[X_n]\\ &\overset{\text{\autoref{ceprop12}}}{=}& S_{n} + \bE[X_n]\\
&=& S_n &=& S_n.
\end{IEEEeqnarray*} \end{IEEEeqnarray*}
\end{example} \end{example}

247
inputs/lecture_16.tex Normal file
View file

@ -0,0 +1,247 @@
\lecture{16}{2023-06-13}{}
\subsection{Conditional expectation}
\begin{theorem}
\label{ceprop11}
\label{ceroleofindependence}
Let $X$ be a random variable,
and let $\cG, \cH$ be $\sigma$-algebras.
If $\cH$ is independent of $\sigma\left( \sigma(X), \cG \right)$,
then
\[
\bE[X | \sigma(\cG, \cH)] \overset{\text{a.s.}}{=} \bE[X | \cG].
\]
In particular, if $X$ is independent of $\cG$,
then
\[
\bE[X | \cG] \overset{\text{a.s.}}{=} \bE[X].
\]
\end{theorem}
\todo{Definition of independence wrt a $\sigma$-algebra}
\begin{proof}
Let $\cH$ be independent of $\sigma(\sigma(X), \cG)$.
Then for all $H \in \cH$, we have that $\One_H$
and any random variable measurable with respect to either $\sigma(X)$
or $\cG$ must be independent.
It suffices to consider the case of $X \ge 0$.
Let $G \in \cG$ and $H \in \cH$.
By assumption, $X \One_G$ and $\One_H$ are independent.
Let $Z \coloneqq \bE[X | \cG]$.
Then
\begin{IEEEeqnarray*}{rCl}
\underbrace{\bE[X;G \cap H]}_{\coloneqq \int_{G \cap H} X \dif \bP} &=& \bE[(X \One_G) \One_H]\\
&=& \bE[X \One_G] \bE[\One_H]\\
&=& \bE[Z \One_G] \bP(H)\\
&=& \bE[Z; G \cap H]
\end{IEEEeqnarray*}
The identity above means, that the measures $A \mapsto \bE[X; A]$
and $A \mapsto \bE[Z; A]$
agree on the $\sigma$-algebra $\sigma(\cG, \cH)$ for events
of the form $G \cap H$.
Since sets of this form generate $\sigma(\cG, \cH)$,
these two measures must agree on $\sigma(\cG, \cH)$.
The claim of the theorem follows by the uniqueness of conditional expectation.
To deduce the second statement, choose $\cG = \{\emptyset, \Omega\}$.
\end{proof}
\subsection{The Radon Nikodym theorem}
First, let us recall some basic facts:
\begin{fact}
Let $(\Omega, \cF, \mu)$ be a \vocab[Measure space!$\sigma$-finite]{$\sigma$-finite
measure space},
i.e.~$\Omega$ can be decomposed into countably many subsets of finite measure.
Let $f: \Omega \to [0, \infty)$ be measurable.
Define $\nu(A) \coloneqq \int_A f \dif \mu$.
Then $\nu$ is also a $\sigma$-finite measure on $(\Omega, \cF)$.\todo{Application of mct}
Moreover, $\nu$ is finite iff $f$ is integrable.
\end{fact}
Note that in this setting, if $\mu(A) = 0$ it follows that $\nu(A) = 0$.
The Radon Nikodym theorem is the converse of that:
\begin{theorem}[Radon-Nikodym]
\label{radonnikodym}
Let $\mu$ and $\nu$ be two $\sigma$-finite measures
on $(\Omega, \cF)$.
Suppose
\[
\forall A \in \cF . ~ \mu(A) = 0 \implies \nu(A) = 0.
\]
Then
\begin{enumerate}[(1)]
\item there exists $Z: \Omega \to [0, \infty)$ measurable,
such that
\[\forall A \in \cF . ~ \nu(A) = \int_A Z \dif \mu.\]
\item Such a $Z$ is unique up to equality a.e.~(w.r.t. $\mu$).
\item $Z$ is integrable w.r.t.~$ \mu$ iff $\nu$ is a finite measure.
\end{enumerate}
Such a $Z$ is called the \vocab{Radon-Nikodym derivative}.
\end{theorem}
\begin{definition}
Whenever the property $\forall A \in \cF, \mu(A)= 0 \implies \nu(A) = 0$,
we say that $\nu$ is \vocab{absolutely continuous}
w.r.t.~$\mu$.
This is written as $\nu \ll \mu$.
\end{definition}
With \autoref{radonnikodym} we get a very short proof of the existence
of conditional expectation:
\begin{proof}[Second proof of \autoref{conditionalexpectation}]
Let $(\Omega, \cF, \bP)$ as always, $X \in L^1(\bP)$ and $\cG \subseteq \cF$.
It suffices to consider the case of $X \ge 0$.
For all $G \in \cG$, define $\nu(G) \coloneqq \int_G X \dif \bP$.
Obviously, $\nu \ll \bP$ on $\cG$.
Then apply \autoref{radonnikodym}.
\end{proof}
\begin{refproof}{radonnikodym}
We will only sketch the proof. A full proof can be found in the notes.
\paragraph{Step 1: Uniqueness}
See notes.
\paragraph{Step 2: Reduction to the finite measure case}
See notes.
\paragraph{Step 3: Getting hold of $Z$}
Assume now that $\mu$ and $\nu$ are two finite measures.
Let
\[\cC \coloneqq \{f: \Omega \to [0,\infty] | \forall A \in \cF.~\int_A f \dif \mu \le \nu(A)\}.\]
We have $\cC \neq \emptyset$ since $0 \in \cC$.
The goal is to find a maximal function $Z$ in $\cC$.
Obviously its integral will also be maximal.
\begin{enumerate}[(a)]
\item If $f,g \in \cC$, than $f \lor g$ (the pointwise maximum)
s also in $\cC$.
\item Suppose $\{f_n\}_{n \ge 1}$ is an increasing sequence in $\cC$.
Let $f$ be the pointwise limit.
Then $f \in \cC$.
\item For all $f \in \cC$, we have
\[
\int_\Omega f \dif \mu \le \nu(\Omega) < \infty.
\]
\end{enumerate}
Define $\alpha \coloneqq \sup \{ \int f \dif \mu : f \in \cC\} \le \nu(\Omega) < \infty$.
Let $f_n \in \cC, n\in \N$ be a sequence
with $\int f_n \dif \mu \to \alpha$.
Define $g_n \coloneqq \max \{f_1,\ldots,f_n\} \in \cC$.
Applying (b), we get that the pointwise limit, $Z$,
is an element of $\cC$.
\paragraph{Step 4: Showing that our choice of $Z$ works}
Define $\lambda(A) \coloneqq \nu(A) - \int_A Z \dif \mu \ge 0$.
$\lambda$ is a measure.
\begin{claim}
$\lambda = 0$.
\end{claim}
\begin{subproof}
Call $G \in \cF$ \emph{good} if the following hold:
\begin{enumerate}[(i)]
\item $\lambda(G) - \frac{1}{k}\mu(G) > 0$.
\item $\forall B \subseteq G, B \in \cF. ~ \lambda(B) - \frac{1}{k}\mu(B) \ge 0$.
\end{enumerate}
Suppose we know that for all $A \in \cF, k \in \N$
we have
$\lambda(A) \le \frac{1}{k} \mu(A)$.
Then $\lambda(A) = 0$ since $\mu$ is finite.
Assume the claim does not hold.
Then there must be some $k \in \N$, $A \in \cF$
such that $\lambda(A) - \frac{1}{k} \mu(A) > 0$.
Fix this $A$ and $k$.
Then $A$ satisfies condition (i) of being good,
but it need not satisfy (ii).
The tricky part is to make $A$ smaller such that it also
satisfies (ii).\todo{Copy from notes}
\end{subproof}
\end{refproof}
\section{Martingales}
We have already worked with martingales, but we will define them
rigorously now.
\begin{definition}[Filtration]
A \vocab{filtration} is a sequence $(\cF_n)$ of $\sigma$-algebras
such that $\cF_n \subseteq \cF_{n+1}$ for all $n \ge 1$.
\end{definition}
Intuitively, we can think of a $\cF_n$ as the set of information
we have gathered up to time $n$.
Typically $\cF_n = \sigma(X_1, \ldots, X_n)$ for a sequence of random variables.
\begin{definition}
Let $(\cF_n)$ be a filtration and
$X_1,\ldots,X_n$ be random variables such that $X_i \in L^1(\bP)$.
Then we say that $(X_n)_{n \ge 1}$ is an $(\cF_n)_n$-\vocab{martingale}
if
\begin{itemize}
\item $X_n$ is $\cF_n$-measurable for all $n$
($X_n$ is \vocab[Sequence!adapted to a filtration]{adapted to the filtration} $\cF_n$ ).
\item $\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{=} X_n$
for all $n$.
\end{itemize}
$(X_n)$ is called a \vocab{sub-martingale},
if it is adapted to $\cF_n$ but
\[
\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\ge} X_n.
\]
It is called a \vocab{super-martingale}
if it is adapted but $\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\le} X_n$.
\end{definition}
\begin{corollary}
Suppose that $f: \R \to \R$ is a convex function such that $f(xn) \in L^1(\bP)$.
Suppose that $(X_n)_n$ is a martingal\footnote{In this form it means, that there is some filtration, that we don't explicitly specify}.
Then $(f(X_n))_n$ is a sub-martingale.
\end{corollary}
\begin{proof}
Apply \autoref{cejensensinequality}.
\end{proof}
\begin{corollary}
If $(X_n)_n$ is a martingale,
then $\bE[X_n] = \bE[X_0]$.
\end{corollary}
\begin{example}
\begin{itemize}
\item The simple random walk:
Let $\xi_1, \xi_2, ..$ iid,
$\bP[\xi_i = 1] = \bP[\xi_i = -1] = \frac{1}{2}$,
$X_n \coloneqq \xi_1 + \ldots + \xi_n$
and $\cF_n \coloneqq \sigma(\xi_1, \ldots, \xi_n) = \sigma(X_1, \ldots, X_n)$.
Then $X_n$ is $\cF_n$-measurable.
Showing that $(X_n)_n$ is a martingale
is left as an exercise.
\item See exercise sheet 9.
\item The branching process (next lecture).
\end{itemize}
\end{example}

View file

@ -39,6 +39,7 @@
\input{inputs/lecture_13.tex} \input{inputs/lecture_13.tex}
\input{inputs/lecture_14.tex} \input{inputs/lecture_14.tex}
\input{inputs/lecture_15.tex} \input{inputs/lecture_15.tex}
\input{inputs/lecture_16.tex}
\cleardoublepage \cleardoublepage

28
temp_lenovo Normal file
View file

@ -0,0 +1,28 @@
coretemp-isa-0000
Adapter: ISA adapter
Package id 0: +47.0°C (high = +86.0°C, crit = +100.0°C)
Core 0: +47.0°C (high = +86.0°C, crit = +100.0°C)
Core 1: +42.0°C (high = +86.0°C, crit = +100.0°C)
BAT0-acpi-0
Adapter: ACPI interface
in0: 11.88 V
curr1: 1.93 A
thinkpad-isa-0000
Adapter: ISA adapter
fan1: 1969 RPM
CPU: +42.0°C
GPU: +0.0°C
temp3: +0.0°C
temp4: +0.0°C
temp5: +0.0°C
temp6: +0.0°C
temp7: +0.0°C
temp8: +0.0°C
acpitz-acpi-0
Adapter: ACPI interface
temp1: +42.0°C (crit = +100.0°C)
temp2: +26.8°C (crit = +99.0°C)