\lecture{21}{2023-06-29}{} % TODO: replace bf This is the last lecture relevant for the exam. (Apart from lecture 22 which will be a repetion). \begin{goal} We want to see an application of the optional stopping theorem \ref{optionalstopping}. \end{goal} \begin{notation} Let $E$ be a complete, separable metric space (e.g.~$E = \R$). Suppose that for all $x \in E$ we have a probability measure $\bfP(x, \dif y)$ on $E$. % i.e. $\mu(A) \coloneqq \int_A \bP(x, \dif y)$ is a probability measure. Such a probability measure is a called a \vocab{transition probability measure}. \end{notation} \begin{examle} $E =\R$, \[\bfP(x, \dif y) = \frac{1}{\sqrt{2 \pi} } e^{- \frac{(x-y)^2}{2}} \dif y\] is a transition probability measure. \end{examle} \begin{example}[Simple random walk as a transition probability measure] $E = \Z$, $\bfP(x, \dif y)$ assigns mass $\frac{1}{2}$ to $y = x+1$ and $y = x -1$. \end{example} \begin{definition} For every bounded, measurable function $f : E \to \R$, $x \in E$ define \[ (\bfP f)(x) \coloneqq \int_E f(y) \bfP(x, \dif y). \] This $\bfP$ is called a \vocab{transition operator}. \end{definition} \begin{fact} If $f \ge 0$, then $(\bfP f)(\cdot ) \ge 0$. If $f \equiv 1$, we have $(\bfP f) \equiv 1$. \end{fact} \begin{notation} Let $\bfI$ denote the \vocab{identity operator}, i.e. \[ (\bfI f)(x) = f(x) \] for all $f$. Then for a transition operator $\bfP$ we write \[ \bfL \coloneqq \bfI - \bfP. \] \end{notation} \begin{goal} Take $E = \R$. Suppose that $A^c \subseteq \R$ is a bounded domain. Given a bounded function $f$ on $\R$, we want a function $u$ which is bounded, such that $Lu = 0$ on $A^c$ and $u = f$ on $A$. \end{goal} We will show that $u(x) = \bE_x[f(X_{T_A})]$ is the unique solution to this problem. \begin{definition} Let $(\Omega, \cF, \{\cF_n\}_n, \bP_x)$ be a filtered probability space, where for every $x \in \R$, $\bP_x$ is a probability measure. Let $\bE_x$ denote expectation with respect to $\bfP(x, \cdot )$. Then $(X_n)_{n \ge 0}$ is a \vocab{Markov chain} starting at $x \in \R$ with \vocab[Markov chain!Transition probability]{transition probability} $\bfP(x, \cdot )$ if \begin{enumerate}[(i)] \item $\bP_x[X_0 = x] = 1$, \item for all bounded, measurable $f: \R \to \R$, \[\bE_x[f(X_{n+1}) | \cF_n] \overset{\text{a.s.}}{=}% \bE_{x}[f(X_{n+1}) | X_n] = % \int f(y) \bfP(X_n, \dif y).\] \end{enumerate} (Recall $\cF_n = \sigma(X_1,\ldots, X_n)$.) \end{definition} \begin{example} Suppose $B \in \cB(\R)$ and $f = \One_B$. Then the first equality of (ii) simplifies to \[ \bP_x[X_{n+1} \in B | \cF_n] = \bP_x[X_{n+1} \in B | \sigma(X_n)]. \] \end{example} \begin{definition}[Conditional probability] \[ \bP[A | \cG] \coloneqq \bE[\One_A | \cG]. \] \end{definition} \begin{example} Let $\xi_i$ be i.i.d.~with$\bP[\xi_i = 1] = \bP[\xi_i = -1] = \frac{1}{2}$ and define $X_n \coloneqq \sum_{i=1}^{n} \xi_i$. Intuitively, conditioned on $X_n$, $X_{n+1}$ should be independent of $\sigma(X_1,\ldots, X_{n-1})$. For a set $B$, we have \[ \bP_0[X_{n+1} \in B| \sigma(X_1,\ldots, X_n)] = \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] = \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_n)]. \] \begin{claim} $\bE[\One_{X_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] = \bE[\One_{X_{n+1} \in B} | \sigma(X_n)]$. \end{claim} \begin{subproof} The rest of the lecture was very chaotic... \end{subproof} \end{example} %TODO { \huge\color{red} New information after this point is not relevant for the exam. } Stopping times and optional stopping are very relevant for the exam, the Markov property is not.