diff --git a/inputs/lecture_13.tex b/inputs/lecture_13.tex index 7823b41..2ddbf0b 100644 --- a/inputs/lecture_13.tex +++ b/inputs/lecture_13.tex @@ -46,7 +46,7 @@ We will not prove the \autoref{lindebergclt} or \autoref{lyapunovclt} in this lecture. However, they are quite important. We will now sketch the proof of \autoref{levycontinuity}, -details can be found in the notes.\todo{Complete this} +details can be found in the notes.\notes A generalized version of \autoref{levycontinuity} is the following: \begin{theorem}[A generalized version of Levy's continuity \autoref{levycontinuity}] \label{genlevycontinuity} @@ -120,7 +120,7 @@ A generalized version of \autoref{levycontinuity} is the following: We will prove \autoref{levycontinuity} assuming \autoref{lec10_thm1}. -\autoref{lec10_thm1} will be shown in the notes.\todo{TODO} +\autoref{lec10_thm1} will be shown in the notes.\notes We will need the following: \begin{lemma} \label{lec13_lem1} @@ -239,7 +239,7 @@ We still need to show that $\mu_n \implies \mu$. Then $a_n \to a$. \end{fact} \begin{subproof} - \todo{in the notes} + \notes \end{subproof} Assume that $\mu_n$ does not converge to $\mu$. By \autoref{lec10_thm1}, pick a continuity point $x_0$ of $F$, diff --git a/inputs/lecture_14.tex b/inputs/lecture_14.tex index 441f31a..d189f3e 100644 --- a/inputs/lecture_14.tex +++ b/inputs/lecture_14.tex @@ -87,7 +87,8 @@ We now want to generalize this to arbitrary random variables. \] \end{definition} -\paragraph{Plan} +\subsection{Existence of conditional probability} + We will give two different proves of \autoref{conditionalexpectation}. The first one will use orthogonal projections. The second will use the Radon-Nikodym theorem. @@ -108,7 +109,7 @@ and then do the harder proof. \end{enumerate} \end{lemma} \begin{proof} - \todo{Notes} + \notes \end{proof} \begin{refproof}{conditionalexpectation} @@ -162,7 +163,7 @@ and then do the harder proof. $0 \overset{\text{a.s.}}{\le} Z_n \uparrow$. \end{claim} \begin{subproof} - \todo{Notes} + \notes \end{subproof} Define $Z(\omega) \coloneqq \limsup_{n \to \infty} Z_n(\omega)$. diff --git a/inputs/lecture_15.tex b/inputs/lecture_15.tex index 1230ff4..235948f 100644 --- a/inputs/lecture_15.tex +++ b/inputs/lecture_15.tex @@ -1,4 +1,5 @@ \lecture{15}{2023-06-06}{} +\subsection{Properties of conditional expectation} We want to derive some properties of conditional expectation. @@ -12,16 +13,20 @@ We want to derive some properties of conditional expectation. \begin{proof} Apply (b) from the definition for $G = \Omega \in \cG$. \end{proof} -\begin{theorem} % Thm 2 +\begin{theorem} \label{ceprop2} - If $X$ is $\cG$-measurable, then $X = \bE[X | \cG]$ a.s.. + If $X$ is $\cG$-measurable, then $X \overset{\text{a.s.}}{=}\bE[X | \cG]$. \end{theorem} \begin{proof} Suppose $\bP[X \neq Y] > 0$. Without loss of generality $\bP[X > Y] > 0$. Hence $\bP[ X > Y + \frac{1}{n}]> 0$ for some $n \in \N$. Let $A \coloneqq \{X > Y + \frac{1}{n}\}$. - % TODO + Then + \[ + \int_A X \dif \bP \ge \frac{1}{n}\bP(A) + \int_A Y \dif \bP, + \] + contradicting property (b) from \autoref{conditionalexpectation}. \end{proof} \begin{example} @@ -40,7 +45,7 @@ We want to derive some properties of conditional expectation. \] \end{theorem} \begin{proof} - Trivial % TODO + trivial\todo{add details} \end{proof} \begin{theorem}[Positivity] @@ -52,7 +57,7 @@ We want to derive some properties of conditional expectation. \begin{proof} Let $W $ be a version of $\bE[X | \cG]$. Suppose $\bP[ W < 0] > 0$. - Then $G \coloneqq \{W < -\frac{1}{n}\} \in \cG$ + Then \[G \coloneqq \{W < -\frac{1}{n}\} \in \cG.\] For some $n \in \N$, we have $\bP[G] > 0$. However it follows that \[ @@ -106,7 +111,7 @@ We want to derive some properties of conditional expectation. \] \end{theorem} \begin{proof} - \todo{in the notes} + \notes \end{proof} \begin{theorem}[Conditional dominated convergence theorem] \label{ceprop7} @@ -118,14 +123,14 @@ We want to derive some properties of conditional expectation. \end{theorem} \begin{proof} - \todo{in the notes} + \notes \end{proof} Recall -\begin{theorem}[Jensen's inequality] +\begin{fact}[Jensen's inequality] If $c : \R \to \R$ is convex and $\bE[|c \circ X|] < \infty$, - then $\bE[c \circ X] \ge c(\bE[X])$. -\end{theorem} + then $\bE[c \circ X] \overset{\text{a.s.}}{\ge} c(\bE[X])$. +\end{fact} For conditional expectation, we have \begin{theorem}[Conditional Jensen's inequality] @@ -150,7 +155,7 @@ For conditional expectation, we have Hence \[ \bE[c(X) | \cG] \ge a_n \bE[X | \cG] + \bE[b_n | \cG] - = a_n \bE[X | \cG] + b_n \text{a.s.} + = a_n \bE[X | \cG] + b_n \text{ a.s.} \] for all $n$. Using that a countable union of sets o f measure zero has measure zero, @@ -162,14 +167,14 @@ For conditional expectation, we have \end{refproof} Recall -\begin{theorem}[Hölder's inequality] +\begin{fact}[Hölder's inequality] Let $p,q \ge 1$ such that $\frac{1}{p} + \frac{1}{q} = 1$. Suppose $X \in L^p(\bP)$ and $Y \in L^q(\bP)$. Then \[ \bE(X Y) \le \underbrace{\bE(|X|^p)^{\frac{1}{p}}}_{\text{\reflectbox{$\coloneqq$}} \|X\|_{L^p}} \bE(|Y|^q)^{\frac{1}{q}}. \] -\end{theorem} +\end{fact} \begin{theorem}[Conditional Hölder's inequality] \label{ceprop9} @@ -193,7 +198,7 @@ Recall Suppose $\cF \supset \cG \supset \cH$ are sub-$\sigma$-algebras. Then \[ - \bE\left[\bE[X | \cG] \mid \cH\right] = \bE[X | \cH]. + \bE\left[\bE[X | \cG] \mid \cH\right] \overset{\text{a.s.}}{=} \bE[X | \cH]. \] \end{theorem} \begin{proof} @@ -219,7 +224,8 @@ Assume $Y = \One_B$, then $Y$ simple, then take the limit (using that $Y$ is bou \begin{definition} Let $\cG$ and $\cH$ be $\sigma$-algebras. We call $\cG$ and $\cH$ \vocab[$\sigma$-algebra!independent]{independent}, - \todo{TODO} + if $\bP(G \cap H) = \bP(G) \bP(H)$ + for all events $G \in \cG$, $H \in \cH$. \end{definition} \begin{theorem}[Role of independence] @@ -251,7 +257,7 @@ Assume $Y = \One_B$, then $Y$ simple, then take the limit (using that $Y$ is bou For $\bE[S_{n+1} | \cF_n]$ we obtain \begin{IEEEeqnarray*}{rCl} - \bE[S_{n+1} | \cF_n] &\overset{\autoref{celinearity}}{=}& + \bE[S_{n+1} | \cF_n] &\overset{\text{\autoref{celinearity}}}{=}& \bE[S_n | \cF_n] + \bE[X_{n+1} | \cF_n]\\ &\overset{\text{a.s.}}{=}& S_n + \bE[X_{n+1} | \cF_n]\\ &\overset{\text{\autoref{ceprop12}}}{=}& S_{n} + \bE[X_n]\\ diff --git a/inputs/lecture_16.tex b/inputs/lecture_16.tex index 91182b0..e95dd4e 100644 --- a/inputs/lecture_16.tex +++ b/inputs/lecture_16.tex @@ -68,7 +68,8 @@ The Radon Nikodym theorem is the converse of that: Such a $Z$ is called the \vocab{Radon-Nikodym derivative}. \end{theorem} \begin{definition} - Whenever the property $\forall A \in \cF, \mu(A)= 0 \implies \nu(A) = 0$, + Whenever the property $\forall A \in \cF, \mu(A) = 0 \implies \nu(A) = 0$ + holds for two measures $\mu$ and $\nu$, we say that $\nu$ is \vocab{absolutely continuous} w.r.t.~$\mu$. This is written as $\nu \ll \mu$. @@ -86,16 +87,19 @@ of conditional expectation: \begin{refproof}{radonnikodym} - We will only sketch the proof. A full proof can be found in the notes. + We will only sketch the proof. + A full proof can be found in the official notes. - \paragraph{Step 1: Uniqueness} - See notes. + \paragraph{Step 1: Uniqueness} \notes \paragraph{Step 2: Reduction to the finite measure case} - See notes. + \notes \paragraph{Step 3: Getting hold of $Z$} Assume now that $\mu$ and $\nu$ are two finite measures. Let - \[\cC \coloneqq \{f: \Omega \to [0,\infty] | \forall A \in \cF.~\int_A f \dif \mu \le \nu(A)\}.\] + \[ + \cC \coloneqq \left\{f: \Omega \to [0,\infty] \middle| % + \forall A \in \cF.~\int_A f \dif \mu \le \nu(A)\right\}. + \] We have $\cC \neq \emptyset$ since $0 \in \cC$. The goal is to find a maximal function $Z$ in $\cC$. @@ -145,7 +149,7 @@ of conditional expectation: but it need not satisfy (ii). The tricky part is to make $A$ smaller such that it also - satisfies (ii).\todo{Copy from notes} + satisfies (ii).\notes \end{subproof} \end{refproof} @@ -153,6 +157,8 @@ of conditional expectation: \section{Martingales} +\subsection{Definition} + We have already worked with martingales, but we will define them rigorously now. @@ -168,15 +174,16 @@ Typically $\cF_n = \sigma(X_1, \ldots, X_n)$ for a sequence of random variables. Let $(\cF_n)$ be a filtration and $X_1,\ldots,X_n$ be random variables such that $X_i \in L^1(\bP)$. Then we say that $(X_n)_{n \ge 1}$ is an $(\cF_n)_n$-\vocab{martingale} - if + if the following hold: \begin{itemize} - \item $X_n$ is $\cF_n$-measurable for all $n$ - ($X_n$ is \vocab[Sequence!adapted to a filtration]{adapted to the filtration} $\cF_n$ ). + \item $X_n$ is $\cF_n$-measurable for all $n$. + + ($X_n$ is \vocab[Sequence!adapted to a filtration]{adapted to the filtration} $\cF_n$ ). \item $\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{=} X_n$ for all $n$. \end{itemize} - $(X_n)$ is called a \vocab{sub-martingale}, + $(X_n)_n$ is called a \vocab{sub-martingale}, if it is adapted to $\cF_n$ but \[ \bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\ge} X_n. @@ -185,12 +192,12 @@ Typically $\cF_n = \sigma(X_1, \ldots, X_n)$ for a sequence of random variables. if it is adapted but $\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\le} X_n$. \end{definition} \begin{corollary} - Suppose that $f: \R \to \R$ is a convex function such that $f(xn) \in L^1(\bP)$. - Suppose that $(X_n)_n$ is a martingal\footnote{In this form it means, that there is some filtration, that we don't explicitly specify}. + Suppose that $f: \R \to \R$ is a convex function such that $f(X_n) \in L^1(\bP)$. + Suppose that $(X_n)_n$ is a martingale\footnote{In this form it means, that there is some filtration, that we don't explicitly specify}. Then $(f(X_n))_n$ is a sub-martingale. \end{corollary} \begin{proof} - Apply \autoref{cejensensinequality}. + Apply \autoref{cjensen}. \end{proof} \begin{corollary} diff --git a/inputs/lecture_17.tex b/inputs/lecture_17.tex index ae63e06..c8ad703 100644 --- a/inputs/lecture_17.tex +++ b/inputs/lecture_17.tex @@ -1,5 +1,8 @@ \lecture{17}{2023-06-15}{} +\subsection{Doob's martingale convergence theorem} + + \begin{definition}[Stochastic process] A \vocab{stochastic process} is a collection of random variables $(X_t)_{t \in T}$ for some index set $T$. @@ -64,6 +67,7 @@ such that $X_{s_j}(\omega) < a$ and $X_{t_j}(\omega) > b$ for all $1 \le j \le Clearly $U_N^X([a,b]) \uparrow$ as $N$ increases. It follows that the monotonic limit $U_\infty([a,b]) \coloneqq \lim_{N \to \infty} U_N([a,b])$ exists pointwise. \begin{lemma} % Lemma 1 + \label{lec17l1} \[ \{\omega | \liminf_{N \to \infty} Z_N(\omega) < a < b < \limsup_{N \to \infty} Z_N(\omega)\} \subseteq @@ -73,8 +77,9 @@ It follows that the monotonic limit $U_\infty([a,b]) \coloneqq \lim_{N \to \inf \end{lemma} \begin{lemma} % 2 + \label{lec17l2} Let $Y_n(\omega) \coloneqq \sum_{j=1}^n C_j(X_j - X_{j-1})$. - Then $Y_N \ge (b -a) U_N([a,b]) - (X_N - a)^{-}$. + Then \[Y_N \ge (b -a) U_N([a,b]) - (X_N - a)^{-}.\] \end{lemma} \begin{proof} Every upcrossing of $[a,b]$ increases the value of $Y$ by $(b-a)$, @@ -82,21 +87,24 @@ It follows that the monotonic limit $U_\infty([a,b]) \coloneqq \lim_{N \to \inf \end{proof} \begin{lemma} %3 + \label{lec17l3} Suppose $(X_n)_n$ is a supermartingale. - Then in the above setup, $(b-a) \bE[U_N([a,b])] \le \bE[(X_n - a)^-]$. + Then in the above setup + \[(b-a) \bE[U_N([a,b])] \le \bE[(X_n - a)^-].\] \end{lemma} \begin{proof} - Obvious from lemma 2 % TODO REF + This is obvious from \autoref{lec17l2} and the supermartingale property. \end{proof} \begin{corollary} - Let $(X_n)_n$ be a \vocab[Supermartingale!bounded]{supermartingale bounded in $L^1(\bP)$ }, - i.e.~$\sup_n \bE[|X_n| ] < \infty$. + Let $(X_n)_n$ be a + \vocab[Supermartingale!bounded]{supermartingale bounded in $L^1(\bP)$}, + i.e.~$\sup_n \bE[|X_n|] < \infty$. Then $(b-a) \bE(U_\infty) \le |a| + \sup_n \bE(|X_n|)$. In particular, $\bP[U_\infty = \infty] = 0$. \end{corollary} \begin{proof} - By lemma 3 % TODO REF + By \autoref{lec17l3} we have that \[(b-a) \bE[U_N([a,b])] \le \bE[ | X_N| ] + |a| \le \sup_n \bE[|X_n|] + |a|.\] Since $U_N(\cdot) \ge 0$ and $U_N(\cdot ) \uparrow U_\infty(\cdot )$, diff --git a/inputs/lecture_18.tex b/inputs/lecture_18.tex index 9a3d5f4..a0cd1ce 100644 --- a/inputs/lecture_18.tex +++ b/inputs/lecture_18.tex @@ -1,10 +1,9 @@ \lecture{18}{2023-06-20}{} -Recall our key lemma for supermartingales from last time: +Recall our key lemma \ref{lec17l3} for supermartingales from last time: \[ (b-a) \bE[U_N([a,b])] \le \bE[(X_n - a)^-]. \] -% TODO Ref What happens for submartingales? If $(X_n)_{n \in \N}$ is a submartingale, then $(-X_n)_{n \in \N}$ is a supermartingale. @@ -14,11 +13,14 @@ Hence the same holds for submartingales, i.e. a.s.~to a finite limit, which is a.s.~finite. \end{lemma} +\subsection{Doob's $L^p$ inequality} + + \begin{question} What about $L^p$ convergence of martingales? \end{question} -\begin{example}[Branching process] +\begin{example}[\vocab{Branching process}] Let $ (Z_n)_{n \ge 1}$ be i.i.d.~$\pm 1$ with $\bP[Z_n = 1] = p \in (0,1)$. @@ -26,11 +28,11 @@ Hence the same holds for submartingales, i.e. Define $X_{n+1} \coloneqq u^{Z_{n+1}} X_{n}$. - \paragraph{Exercise} - Given $u \ge 0$, find $p = p(u)$ - such that $(X_n)_n$ is a martingale w.r.t.~the canonical filtration. - - % TODO + \begin{exercise} + Given $u \ge 0$, find $p = p(u)$ + such that $(X_n)_n$ is a martingale w.r.t.~the canonical filtration. + \end{exercise} + \todo{TODO} By \autoref{doobmartingaleconvergence}, there is an @@ -51,10 +53,11 @@ Hence the same holds for submartingales, i.e. $N_0(\epsilon)$ (possibly random) such that for all $n > N_0(\epsilon)$ \[ - \left( \frac{X_n}{x} \right)^{\frac{1}{n}} \le u^{2p - 1}(1 + \epsilon) \implies x [\underbrace{u^{2p - 1} (1+\epsilon)}_{<1}]^n \xrightarrow{n \to \infty} 0. + \left( \frac{X_n}{x} \right)^{\frac{1}{n}} \le u^{2p - 1}(1 + \epsilon) % + \implies x [\underbrace{u^{2p - 1} (1+\epsilon)}_{<1}]^n \xrightarrow{n \to \infty} 0. \] - Hence it can not converge in $L^1$. - % TODO Confusion + Thus it can not converge in $L^1$. + % TODO Make this less confusing \end{example} @@ -75,16 +78,21 @@ consider $L^2$. Since $\bE[X_n | \cF_{n-1}] = X_{n-1}$ a.s., by induction $\bE[X_n | \cF_{k}] = X_k$ a.s.~for all $k \le n$. Play with conditional expectation. - % TODO Exercise + \todo{Exercise} \end{proof} -\begin{fact}[Parallelogram identity] - % TODO +\begin{fact}[\vocab{Parallelogram identity}] + Let $X, Y \in L^2$. + Then + \[ + 2 \bE[X^2] + 2 \bE[Y^2] = \bE[(X+Y)^2] + \bE[(X-Y)^2]. + \] \end{fact} -\begin{theorem} +\begin{theorem}\label{martingaleconvergencel2} Suppose that $(X_n)_n$ is a martingale bounded in - $L^2$, i.e.~$\sup_n \bE[X_n^2] < \infty$. + $L^2$,\\ + i.e.~$\sup_n \bE[X_n^2] < \infty$. Then there is a random variable $X_\infty$ such that \[ X_n \xrightarrow{L^2} X_\infty. @@ -100,7 +108,7 @@ consider $L^2$. \bE[X_n^2] = \bE[X_0^2] + \sum_{j=1}^{n} \bE[Y_j^2] \] by \autoref{martingaleincrementsorthogonal} - (this is known as the \vocab{parallelogram identity}). % TODO Move + % (this is known as the \vocab{parallelogram identity}). % TODO how exactly is this used here? In particular, \[ \sup_n \bE[X_n^2] < \infty \iff \sum_{j=1}^{\infty} \bE[Y_j^2] < \infty. @@ -141,7 +149,7 @@ First, we need a very important inequality: \end{enumerate} \end{theorem} -We first need +In order to prove \autoref{dooblp}, we first need \begin{lemma} \label{dooplplemma} Let $p > 1$ and $X,Y$ non-negative random variable @@ -176,7 +184,7 @@ We first need Suppose now $Y \not\in L^p$. Then look at $Y_M = Y \wedge M$. - Apply the case of $Y \in L^p$ and use the monotone convergence theorem. + Apply the above to $Y_M \in L^p$ and use the monotone convergence theorem. \end{proof} \begin{refproof}{dooblp} @@ -185,9 +193,13 @@ We first need \[ E_j = \{|X_1| \le \ell, |X_2| \le \ell, \ldots, |X_{j-1}| \le \ell, |X_j| \ge \ell\}. \] - Then $\bP[E_j] \overset{\text{Markov}}{\le } \frac{1}{\ell} \int_{E_j} |X_j| \dif \bP (\ast\ast)$. + Then + \begin{equation} + \bP[E_j] \overset{\text{Markov}}{\le } \frac{1}{\ell} \int_{E_j} |X_j| \dif \bP + \label{lec18eq2star} + \end{equation} Since $(X_n)_n$ is a sub-martingale, $(|X_n|)_n$ is also a sub-martingale - (by \autoref{jensen}). + (by \autoref{cjensen}). Hence \begin{IEEEeqnarray*}{rCl} \bE[\One_{E_j}(|X_n| - |X_{j}|) | \cF_j] @@ -196,14 +208,14 @@ We first need \end{IEEEeqnarray*} By the law of total expectation, \autoref{totalexpectation}, it follows that - \[ - \bE[\One_{E_j} (|X_n| - |X_j|)] \ge 0 (\ast\ast\ast). - \] + \begin{equation} + \bE[\One_{E_j} (|X_n| - |X_j|)] \ge 0. \label{lec18eq3star} + \end{equation} Now \begin{IEEEeqnarray*}{rCl} \bP(E) &=& \sum_{j=1}^n \bP(E_j)\\ - &\overset{(\ast\ast) (\ast\ast\ast)}{\le }& \frac{1}{\ell} \left( \int_{E_1} |X_n| \dif \bP + \ldots + \int_{E_n} |X_n| \dif \bP \right)\\ + &\overset{\eqref{lec18eq2star}, \eqref{lec18eq3star}}{\le }& \frac{1}{\ell} \left( \int_{E_1} |X_n| \dif \bP + \ldots + \int_{E_n} |X_n| \dif \bP \right)\\ &=& \frac{1}{\ell} \int_E |X_n| \dif \bP \end{IEEEeqnarray*} diff --git a/inputs/lecture_19.tex b/inputs/lecture_19.tex index 1d56682..f66d521 100644 --- a/inputs/lecture_19.tex +++ b/inputs/lecture_19.tex @@ -54,7 +54,7 @@ However, some subsets can be easily described, e.g. where we have applied Markov's inequality. % TODO REF Since $\sup_n \bE[|X_n|^{1+\delta}] < \infty$, - we have that $\sup_n \bE[|X_n|] < \infty$ by Jensen. % TODO REF + we have that $\sup_n \bE[|X_n|] < \infty$ by Jensen (\autoref{cjensen}). Hence, choose $k$ large enough to make the relevant term less than $\epsilon$. \end{proof} @@ -94,7 +94,7 @@ However, some subsets can be easily described, e.g. &\ge & \epsilon \end{IEEEeqnarray*} where the assumption that $X$ is in $L^1$ was used to apply - the reverse of Fatou's lemma. + the reverse of Fatou's lemma. % TODO reverse fatou This yields a contradiction since $\bP(F) = 0$. \item We want to apply part (a) to $F = \{ |X| > k\}$. @@ -120,7 +120,10 @@ However, some subsets can be easily described, e.g. \begin{proof} Fix $\epsilon > 0$. Choose $\delta > 0$ such that - \[\forall F \in \cF.~ \bP(F) < \delta \implies \bE[|X| \One_F] <\epsilon. (\ast)\] + \begin{equation} + \forall F \in \cF.~ \bP(F) < \delta \implies \bE[|X| \One_F] <\epsilon. + \label{lec19eqstar} + \end{equation} Let $Y = \bE[X | \cG]$ for some sub-$\sigma$-algebra $\cG$. Then, by \autoref{condjensen}, $|Y| \le \bE[ |X| | \cG]$. Hence $\bE[|Y|] \le \bE[|X|]$. @@ -132,7 +135,7 @@ However, some subsets can be easily described, e.g. \begin{IEEEeqnarray*}{rCl} \bE[|Y| \One_{\{|Y| > k\} }] &<& \epsilon \end{IEEEeqnarray*} - by $(\ast)$, since $\bP[|Y| > k] < \delta$. + by \eqref{lec19eqstar}, since $\bP[|Y| > k] < \delta$. \end{proof} \begin{theorem} @@ -155,7 +158,7 @@ However, some subsets can be easily described, e.g. \end{cases} \end{IEEEeqnarray*} - $\phi$ is $1$-Lipshitz. % TODO + $\phi$ is $1$-Lipschitz. % TODO We have \begin{IEEEeqnarray*}{rCl} @@ -168,7 +171,7 @@ However, some subsets can be easily described, e.g. \autoref{lec19f4} part (b). Similarly $\int_{|X| > k} |X - \phi(X)| \dif \bP < \epsilon$. - Since $\phi$ is Lipshitz, + Since $\phi$ is Lipschitz, $ X_n \xrightarrow{\bP} X \implies \phi(X_n) \xrightarrow{\bP} \phi(X)$. By the bounded convergence theorem % TODO $|\phi(X_n)| \le k \implies \int | \phi(X_n) - \phi(X)| \dif \bP \to 0$. @@ -232,6 +235,3 @@ Let $(\Omega, \cF, \bP)$ as always and let $(\cF_n)_n$ always be a filtration. Then there exists a random variable $X \in L^p$, such that $X_n = \bE[X | \cF_n]$ for all $n$. \end{theorem} - - - diff --git a/inputs/lecture_20.tex b/inputs/lecture_20.tex index 6551f96..9207a55 100644 --- a/inputs/lecture_20.tex +++ b/inputs/lecture_20.tex @@ -1,3 +1,4 @@ +\lecture{20}{2023-06-27}{} \begin{refproof}{ceismartingale} By the tower property (\autoref{cetower}) it is clear that $(\bE[X | \cF_n])_n$ @@ -5,10 +6,11 @@ First step: Assume that $X$ is bounded. - Then, by \autoref{cejensen}, $|X_n| \le \bE[|X| | \cF_n]$, + Then, by \autoref{cjensen}, $|X_n| \le \bE[|X| | \cF_n]$, hence $\sup_{\substack{n \in \N \\ \omega \in \Omega}} | X_n(\omega)| < \infty$. Thus $(X_n)_n$ is a martingale in $L^{\infty} \subseteq L^2$. - By the convergence theorem for martingales in $L^2$ % TODO REF + By the convergence theorem for martingales in $L^2$ + (\autoref{martingaleconvergencel2}) there exists a random variable $Y$, such that $X_n \xrightarrow{L^2} Y$. @@ -42,28 +44,31 @@ \begin{IEEEeqnarray*}{rCl} \int | X - X'|^p \dif \bP &=& \int_{\{|X| > M\} } |X|^p \dif \bP \xrightarrow{M \to \infty} 0 \end{IEEEeqnarray*} - as $\bP$ is \vocab{regular}, \todo{Definition?} - i.e.~$\forall \epsilon > 0 \exists k . \bP[|X|^p \in [-k,k] \ge 1-\epsilon$. + as $\bP$ is \vocab[Measure!regular]{regular}, \todo{Make this a definition?} + i.e.~$\forall \epsilon > 0 . ~\exists k . ~\bP[|X|^p \in [-k,k] \ge 1-\epsilon$. - % Take some $\epsilon > 0$ and $M$ large enough such that - % \[ - % \int |X - X'| \dif \bP < \epsilon. - % \] + Take some $\epsilon > 0$ and $M$ large enough such that + \[ + \int |X - X'| \dif \bP < \epsilon. + \] - % Let $(X_n')_n$ be the martingale given by $(\bE[X' | \cF_n])_n$. - % Then $X_n' \xrightarrow{L^p} X'$ by the first step. + Let $(X_n')_n$ be the martingale given by $(\bE[X' | \cF_n])_n$. + Then $X_n' \xrightarrow{L^p} X'$ by the first step. - % It is - % \begin{IEEEeqnarray*}{rCl} - % \|X_n - X_n'\|_{L^p}^p &=& \bE[\bE[X - X' | \cF_n]^{p}]\\ - % &\overset{\text{Jensen}}{\le}& \bE[\bE[(X- X')^p | \cF_n]\\ - % &=& \|X - X'\|_{L^p}^p\\ - % &<& \epsilon. - % \end{IEEEeqnarray*} + It is + \begin{IEEEeqnarray*}{rCl} + \|X_n - X_n'\|_{L^p}^p + &=& \bE[\bE[X - X' | \cF_n]^{p}]\\ + &\overset{\text{Jensen}}{\le}& \bE[\bE[(X - X')^p | \cF_n]]\\ + &=& \|X - X'\|_{L^p}^p\\ + &<& \epsilon. + \end{IEEEeqnarray*} Hence \[ - \|X_n - X\|_{L^p} \le |X_n - X_n'|_{L^p} + |X_n' - X'|_{L^p} + | X - X'|_{L^p} \le 3 \epsilon. + \|X_n - X\|_{L^p} % + \le \|X_n - X_n'\|_{L^p} + \|X_n' - X'\|_{L^p} + \|X - X'\|_{L^p} % + \le 3 \epsilon. \] Thus $X_n \xrightarrow{L^p} X$. \end{refproof} @@ -226,15 +231,16 @@ we need the following theorem, which we won't prove here: We have \begin{IEEEeqnarray*}{rCl} - \bE[X^T_n - X^T_{n-1} | \cF_{n-1}] - &=& \bE[X_n \One_{\{T \ge n\}} + \sum_{k=1}^{n-1} X_k \One_{\{ T = k\} } - X_{n-1}(\One_{T \ge n} + \One_{\{T = n-1\}}) - + \sum_{k=1}^{n-2} X_k \One_{\{T = k\} } | \cF_{n-1}]\\ + &&\bE[X^T_n - X^T_{n-1} | \cF_{n-1}]\\ + &=& \bE\left[X_n \One_{\{T \ge n\}} + \sum_{k=1}^{n-1} X_k \One_{\{ T = k\} } + - X_{n-1}(\One_{T \ge n} + \One_{\{T = n-1\}})\right.\\ + &&\left.+ \sum_{k=1}^{n-2} X_k \One_{\{T = k\} } \middle| \cF_{n-1}\right]\\ &=& \bE[(X_n - X_{n-1}) \One_{\{ T \ge n\} } | \cF_{n-1}]\\ - &=& \One_{\{ T \ge n\}} (\bE[X_n | \cF_{n-1}] - X_{n-1})\\ - && \begin{cases} + &=& \One_{\{ T \ge n\}} (\bE[X_n | \cF_{n-1}] - X_{n-1}) + \begin{cases} \le 0\\ = 0 \text{ if $(X_n)_n$ is a martingale}. - \end{cases}. + \end{cases} \end{IEEEeqnarray*} \end{proof} diff --git a/inputs/lecture_21.tex b/inputs/lecture_21.tex index a8ae54e..8573ec7 100644 --- a/inputs/lecture_21.tex +++ b/inputs/lecture_21.tex @@ -1,4 +1,5 @@ \lecture{21}{2023-06-29}{} +\subsection{An Application of the Optional Stopping Theorem} This is the last lecture relevant for the exam. (Apart from lecture 22 which will be a repetion). @@ -60,7 +61,7 @@ Suppose that $A^c \subseteq \R$ is a bounded domain. Given a bounded function $f$ on $\R$, we want a function $u$ which is bounded, such that -$Lu = 0$ on $A^c$ and $u = f$ on $A$. +$\mathbf{L}u = 0$ on $A^c$ and $u = f$ on $A$. \end{goal} We will show that $u(x) = \bE_x[f(X_{T_A})]$ @@ -98,12 +99,18 @@ is the unique solution to this problem. Intuitively, conditioned on $X_n$, $X_{n+1}$ should be independent of $\sigma(X_1,\ldots, X_{n-1})$. - \begin{claim} + \begin{claim*} For a set $B$, we have - $\bE[\One_{X_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] = \bE[\One_{X_{n+1} \in B} | \sigma(X_n)]$. - \end{claim} + \[ + \bE[\One_{X_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] % + = \bE[\One_{X_{n+1} \in B} | \sigma(X_n)].\] + \end{claim*} \begin{subproof} - The rest of the lecture was very chaotic... + \todo{TODO} + % We have $\sigma(\One_{X_{n+1} \in B}) \subseteq \sigma(X_{n}, \xi_{n+1})$. + % $\sigma(X_1,\ldots,X_{n-1})$ + % is independent of $\sigma( \sigma(\One_{X_{n+1} \in B}), X_n)$. + % Hence the claim follows from \autoref{ceroleofindependence}. \end{subproof} \end{example} diff --git a/inputs/lecture_3.tex b/inputs/lecture_3.tex index 8708d78..960d9f3 100644 --- a/inputs/lecture_3.tex +++ b/inputs/lecture_3.tex @@ -1,5 +1,5 @@ \lecture{3}{}{} -\todo{Lecture 3 needs to be finished} +\todo{My battery died during this lecture, so this still needs to be finished} \begin{notation} Let $\cB_n$ denote $\cB(\R^n)$. \end{notation} diff --git a/inputs/prerequisites.tex b/inputs/prerequisites.tex index 8bfd880..5d2ed8f 100644 --- a/inputs/prerequisites.tex +++ b/inputs/prerequisites.tex @@ -121,6 +121,7 @@ How do we prove that something happens almost surely? The first thing that should come to mind is: \begin{lemma}[Borel-Cantelli] + \label{borelcantelli} If we have a sequence of events $(A_n)_{n \ge 1}$ such that $\sum_{n \ge 1} \bP(A_n) < \infty$, then $\bP[ A_n \text{for infinitely many $n$}] = 0$ diff --git a/wtheo.sty b/wtheo.sty index 56f2b50..c3e5b72 100644 --- a/wtheo.sty +++ b/wtheo.sty @@ -104,3 +104,4 @@ \newcommand*\dif{\mathop{}\!\mathrm{d}} \newcommand\lecture[3]{\hrule{\color{darkgray}\hfill{\tiny[Lecture #1, #2]}}} +\newcommand\notes{\todo{TODO: copy from official notes}}