$$
% create the definition symbol
\def\bydef{\stackrel{\Delta}{=}}
%\def\circconv{\otimes}
\def\circconv{\circledast}
\newcommand{\qed}{\mbox{ } \Box}
\newcommand{\infint}{\int_{-\infty}^{\infty}}
% z transform
\newcommand{\ztp}{ ~~ \mathop{\mathcal{Z}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iztp}{ ~~ \mathop{\mathcal{Z}^{-1}}\limits_{\longleftrightarrow} ~~ }
% fourier transform pair
\newcommand{\ftp}{ ~~ \mathop{\mathcal{F}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iftp}{ ~~ \mathop{\mathcal{F}^{-1}}\limits_{\longleftrightarrow} ~~ }
% laplace transform
\newcommand{\ltp}{ ~~ \mathop{\mathcal{L}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iltp}{ ~~ \mathop{\mathcal{L}^{-1}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\ftrans}[1]{ \mathcal{F} \left\{#1\right\} }
\newcommand{\iftrans}[1]{ \mathcal{F}^{-1} \left\{#1\right\} }
\newcommand{\ztrans}[1]{ \mathcal{Z} \left\{#1\right\} }
\newcommand{\iztrans}[1]{ \mathcal{Z}^{-1} \left\{#1\right\} }
\newcommand{\ltrans}[1]{ \mathcal{L} \left\{#1\right\} }
\newcommand{\iltrans}[1]{ \mathcal{L}^{-1} \left\{#1\right\} }
% coordinate vector relative to a basis (linear algebra)
\newcommand{\cvrb}[2]{\left[ \vec{#1} \right]_{#2} }
% change of coordinate matrix (linear algebra)
\newcommand{\cocm}[2]{ \mathop{P}\limits_{#2 \leftarrow #1} }
% Transformed vector set
\newcommand{\tset}[3]{\{#1\lr{\vec{#2}_1}, #1\lr{\vec{#2}_2}, \dots, #1\lr{\vec{#2}_{#3}}\}}
% sum transformed vector set
\newcommand{\tsetcsum}[4]{{#1}_1#2(\vec{#3}_1) + {#1}_2#2(\vec{#3}_2) + \cdots + {#1}_{#4}#2(\vec{#3}_{#4})}
\newcommand{\tsetcsumall}[4]{#2\lr{{#1}_1\vec{#3}_1 + {#1}_2\vec{#3}_2 + \cdots + {#1}_{#4}\vec{#3}_{#4}}}
\newcommand{\cvecsum}[3]{{#1}_1\vec{#2}_1 + {#1}_2\vec{#2}_2 + \cdots + {#1}_{#3}\vec{#2}_{#3}}
% function def
\newcommand{\fndef}[3]{#1:#2 \to #3}
% vector set
\newcommand{\vset}[2]{\{\vec{#1}_1, \vec{#1}_2, \dots, \vec{#1}_{#2}\}}
% absolute value
\newcommand{\abs}[1]{\left| #1 \right|}
% vector norm
\newcommand{\norm}[1]{\left|\left| #1 \right|\right|}
% trans
\newcommand{\trans}{\mapsto}
% evaluate integral
\newcommand{\evalint}[3]{\left. #1 \right|_{#2}^{#3}}
% slist
\newcommand{\slist}[2]{{#1}_{1},{#1}_{2},\dots,{#1}_{#2}}
% vectors
\newcommand{\vc}[1]{\textbf{#1}}
% real
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
% imaginary
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\newcommand{\mcal}[1]{\mathcal{#1}}
\newcommand{\bb}[1]{\mathbb{#1}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\I}{\mathbb{I}}
\newcommand{\Th}[1]{\mathop\mathrm{Th(#1)}}
\newcommand{\intersect}{\cap}
\newcommand{\union}{\cup}
\newcommand{\intersectop}{\bigcap}
\newcommand{\unionop}{\bigcup}
\newcommand{\setdiff}{\backslash}
\newcommand{\iso}{\cong}
\newcommand{\aut}[1]{\mathop{\mathrm{Aut(#1)}}}
\newcommand{\inn}[1]{\mathop{\mathrm{Inn(#1)}}}
\newcommand{\Ann}[1]{\mathop{\mathrm{Ann(#1)}}}
\newcommand{\dom}[1]{\mathop{\mathrm{dom} #1}}
\newcommand{\cod}[1]{\mathop{\mathrm{cod} #1}}
\newcommand{\id}{\mathrm{id}}
\newcommand{\st}{\ |\ }
\newcommand{\mbf}[1]{\mathbf{#1}}
\newcommand{\enclose}[1]{\left\langle #1\right\rangle}
\newcommand{\lr}[1]{\left( #1\right)}
\newcommand{\lrsq}[1]{\left[ #1\right]}
\newcommand{\op}{\mathrm{op}}
\newcommand{\dotarr}{\dot{\rightarrow}}
%Category Names:
\newcommand{\Grp}{\mathbf{Grp}}
\newcommand{\Ab}{\mathbf{Ab}}
\newcommand{\Set}{\mathbf{Set}}
\newcommand{\Matr}{\mathbf{Matr}}
\newcommand{\IntDom}{\mathbf{IntDom}}
\newcommand{\Field}{\mathbf{Field}}
\newcommand{\Vect}{\mathbf{Vect}}
\newcommand{\thm}[1]{\begin{theorem} #1 \end{theorem}}
\newcommand{\clm}[1]{\begin{claim} #1 \end{claim}}
\newcommand{\cor}[1]{\begin{corollary} #1 \end{corollary}}
\newcommand{\ex}[1]{\begin{example} #1 \end{example}}
\newcommand{\prf}[1]{\begin{proof} #1 \end{proof}}
\newcommand{\prbm}[1]{\begin{problem} #1 \end{problem}}
\newcommand{\soln}[1]{\begin{solution} #1 \end{solution}}
\newcommand{\rmk}[1]{\begin{remark} #1 \end{remark}}
\newcommand{\defn}[1]{\begin{definition} #1 \end{definition}}
\newcommand{\ifff}{\LeftRightArrow}
<!-- For the set of reals and integers -->
\newcommand{\rr}{\R}
\newcommand{\reals}{\R}
\newcommand{\ii}{\Z}
\newcommand{\cc}{\C}
\newcommand{\nn}{\N}
\newcommand{\nats}{\N}
<!-- For terms being indexed.
Puts them in standard font face and creates an index entry.
arg: The term being defined.
\newcommand{\pointer}[1]{#1\index{#1}} -->
<!-- For bold terms to be index, but defined elsewhere
Puts them in bold face and creates an index entry.
arg: The term being defined. -->
\newcommand{\strong}[1]{\textbf{#1}}
<!-- For set names.
Puts them in italics. In math mode, yields decent spacing.
arg: The name of the set. -->
\newcommand{\set}[1]{\textit{#1}}
$$
\documentclass{article}
\usepackage{latex2html5}
\usepackage{writer}
\usepackage{auto-pst-pdf}
\usepackage{pstricks-add}
\usepackage{graphicx}
\usepackage{hyperref}
\definecolor{lightblue}{rgb}{0.0,0.24313725490196078,1.0}
\title{{\Huge ee120}}
\author{
\textbf{Dan Lynch} \\
UC Berkeley \\
EECS Department \\
D@nLynch.com \\
}
\date{1st of December 2012}
\begin{document}
\maketitle
\newpage
\tableofcontents
\newpage
\section{Week 1}
\subsection{LTI Systems}
\subsubsection{LTI Systems}
Linear Time-Invariant (LTI) Systems have properties of \emph{linearity} and \emph{time-invariance}. Consider a system with input signal $x \in X$, where $X$ is the set of input signals (input-signal space), and the output signal $y \in Y$, where $Y$ is the set of output signals (output-signal space).
$$ x \to \fbox{F} \to y $$
Then we say that $F$ maps from $X$ to $Y$, or $F: X \to Y$.
\subsubsection{Linearity}
There are two properties that make a system linear: \emph{homogeneity} and \emph{additivity}.
{\bf Scaling (homogeneity) property}
Given $\alpha \in \C$, or $\alpha \in \R$, If you scale the input signal by $\alpha$, then the output signal will also be scaled by $\alpha$.
$$\alpha x \to \fbox{F} \to \alpha y$$
{\bf Additivity property}
Given two input signals and their respective outputs after passing them through a system $F$, the signal representing the sum of the input signals corresponds to the output signal representing the sum of the original output signals.
\begin{align*}
x_1 \to &\fbox{F} \to y_1 \\
x_2 \to &\fbox{F} \to y_2 \\
\Rightarrow x_1 + x_2 \to &\fbox{F} \to y_1 + y_2
\end{align*}
\emph{Superposition} combines these two properties and characterizes LTI systems:
$$ \alpha_1 x_1 + \alpha_2 x_2 \to \fbox{F} \to \alpha_1 y_1 + \alpha_2 y_2 $$
\begin{example}
In discrete-time (DT), consider a signal representing a 3-day moving average of a stock quote:
$$ y(n) = \frac{x(n) + x(n-1) + x(n-2)}{3} $$
where $x(n)$ is he closing price of a stock, and $y(n)$ is the 3-day moving average. It can be shown that this signal is linear.
\end{example}
\begin{example}
In continuous-time (CT), consider a circuit where the voltage across the circuit is given by Ohm's Law, $V=IR$, where $I$ is the current, and $R$ is the resistance.
%\begin{center}
%\includegraphics{images/circuits/resistor.ps}
%\end{center}
Let $\hat{x} = \alpha x$., then $\hat{y} = R\hat{x} = \alpha R x = \alpha y$. Let $\tilde{x} = x_1 + x_2$, then $\tilde{y}=R\tilde{x}=Rx_1+Rx_2=y_1+y_2$. Therefore the system is linear. Since we never used the constancy of $R$, we can observe that in both cases we have linearity.
\end{example}
\subsubsection{Time-Invariance}
Note we use time when describing signals, but position or other spaces may be used. Hence, this property time-invariance (TI) is generally called \emph{shift invariance}.
$$ x \to \fbox{F} \to y $$
Let us define $\hat{x} = x(t-T)$, where $x$ is closed under shifts, that is, $x, \hat{x} \in X$. Then when we pass this signal through $F$, we get the output signal $\hat{y}$:
$$ \hat{x} \to \fbox{F} \to \hat{y} $$
where $\hat{y} = y(t-T)$. Both previous examples above can be shown to be TI. In the example of the resistor, the reason for time-invariance is the fact that $R$ is constant. For example, to show that the resistor whose voltage is determined by $y(t) = R(t)x(t)$ is not TI, we need to show that $\hat{y}(t) \neq y(t-T)$.
\begin{align*}
y(t) &= R(t)x(t) \\
\hat{y}(t) &= R(t)\hat{x}(t) \\
\hat{y}(t) &= R(t)x(t-T) \neq R(t-T)x(t-T) = y(t-T) \\
\hat{y}(t) &\neq y(t-T)
\end{align*}
\subsubsection{Time-Domain Behavior}
The properties of LTI systems are important in two aspects:
\begin{enumerate}
\item time-domain behavior
\item frequency-domain behavior
\end{enumerate}
Lets first look at the time domain. Consider a signal $x: \Z \to \R$.
%\img{images/stem/stemex.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1.35)(3.5,2)
\psline{-*}(-3,0)(-3,0)
\rput(-3,-0.4){-3}
\psline{-*}(-2,0)(-2,0)
\rput(-2,-0.4){-2}
\psline{-*}(-1,0)(-1,1)
\rput(-1,1.3){$x(-1)$}
\rput(-1,-0.4){-1}
\psline{-*}(0,0)(0,-1)
\rput(0.45,-1){$x(0)$}
\rput(0.000,0.4){0}
\psline{-*}(1,0)(1,1.5)
\rput(1.4,1.5){$x(1)$}
\rput(1,-0.4){1}
\psline{-*}(2,0)(2,1)
\rput(2.4,1){$x(2)$}
\rput(2,-0.4){2}
\psline{-*}(3,0)(3,0)
\rput(3,-0.4){3}
% axes
\psline{->}(-3,0)(3,0)
\rput(3.3,-0.3){ $n$ }
\rput(3.4,1.5){ $x(n)$ }
\end{pspicture}
\end{center}
How do we split the signal into its components? We can write the signal as a sum of scaled and shifted versions of the most fundamental signal, the Kronecker delta. Therefore, we can write the signal as $x(n) = \sum_k x(k)\delta(n-k)$. The \emph{Kronecker Delta} function, also known as \emph{Discrete-Time Impulse Signal} (aka Unit Impulse), is defined by
$$\delta(n) = \begin{cases}
1 & n=0 \\
0 & \mbox{otherwise} \\
\end{cases}
$$
%\img{images/kronecker.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1)(3.5,2)
\psline{-*}(-3,0)(-3,0)
\rput(-3,-0.4){-3}
\psline{-*}(-2,0)(-2,0)
\rput(-2,-0.4){-2}
\psline{-*}(-1,0)(-1,0)
\rput(-1,-0.4){-1}
\psline{-*}(0.000,0)(0.000,1)
\rput(0.25,1){(1)}
\rput(0.000,-0.4){0}
\psline{-*}(1,0)(1,0)
\rput(1,-0.4){1}
\psline{-*}(2,0)(2,0)
\rput(2,-0.4){2}
\psline{-*}(3,0)(3,0)
\rput(3,-0.4){3}
% axes
\psline{->}(-3,0)(3.428,0)
\rput(3.4,-0.3){ $n$ }
\rput(3,1.857){ $\delta(n)$ }
\end{pspicture}
\end{center}
In the case of the signal below, we have $x(n) = x(-1)\delta(n+1) + x(0)\delta(n) + x(1)\delta(n-1) + x(2)\delta(n-2)$.
%\img{images/stem/stemex.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1.35)(3.5,2)
\psline{-*}(-3,0)(-3,0)
\rput(-3,-0.4){-3}
\psline{-*}(-2,0)(-2,0)
\rput(-2,-0.4){-2}
\psline{-*}(-1,0)(-1,1)
\rput(-1,1.2){$x(-1)$}
\rput(-1,-0.4){-1}
\psline{-*}(0,0)(0,-1)
\rput(0.4,-1.2){$x(0)$}
\rput(0,0.4){0}
\psline{-*}(1,0)(1,1.5)
\rput(1.4,1.5){$x(1)$}
\rput(1,-0.4){1}
\psline{-*}(2,0)(2,1)
\rput(2.4,1){$x(2)$}
\rput(2,-0.4){2}
\psline{-*}(3,0)(3,0)
\rput(3,-0.4){3}
% axes
\psline{->}(-3,0)(3,0)
\rput(3.4,-0.3){ $n$ }
\rput(3,1.5){ $x(n)$ }
\end{pspicture}
\end{center}
%%%
If $\delta(n) \to \fbox{F} \to f(n)$, then $f(n)$ is the \emph{impulse response} of the LTI system $F$. This is important because for any LTI system in discrete-time, if the response to the unit impulse ($\delta(n)$) is known, then the output can be determined.
\begin{align*}
x(n)& = x(-1)\delta(n+1) + x(0)\delta(n) + x(1)\delta(n-1) + x(2)\delta(n-2) \\
y(n)& = x(-1)f(n+1) + x(0)f(n) + x(1)f(n-1) + x(2)f(n-2)
\end{align*}
More generally, any input signal can be represented as a linear combination of scaled and shifted impulses.
\begin{align*}
x(n) &= \sum \limits_{m\in\Z} x(m)\delta(n-m) \\
y(n) &= \sum \limits_{m\in\Z} x(m)f(n-m) \\
\end{align*}
%%% CONVOLUTION SUM
The output signal $y$ can be written as a sum, also known as the convolution sum:
$$y(n) = \sum \limits_{m\in\Z} x(m)f(n-m) = (x * f)(n)$$.
Convolution has the property of commutativity: Let $m = n-k$: % k = n-m
$$y(n) = \sum \limits_{k\in\Z} f(m)x(n-k) = (f*x)(n)$$.
Hence, $(x*f)(n) = (f*x)(n)$:
\begin{align*}
x \to & \fbox{f} \to y = x * f \\
f \to & \fbox{x} \to y = f * x
\end{align*}
Any function can be thought of as a signal, and any signal may be thought of as a response---but the input signal and the impulse response must share the same domain.
{\bf Continuous-time}
We also have a continuous-time impulse, the Dirac Delta, which allows us to characterize continuous-time signals. This function can be characterized by integration: $ \int_{-\infty}^{\infty} \delta(\tau)d\tau = 1 $
%\img{images/dirac/dirac.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-1)(3,2)
\rput(-3,-0.4){-3}
\rput(-2,-0.4){-2}
\rput(-1,-0.4){-1}
\psline{->}(0,0)(0,1)
\rput(1,1){(1)}
\rput(0,-0.4){0}
\rput(1,-0.4){1}
\rput(2,-0.4){2}
\rput(3,-0.4){3}
\psline{->}(-3.428,0)(3.428,0)
\rput(3.428,-0.3){ $t$ }
\end{pspicture}
\end{center}
Hence, the input signal can be written as scaled and shifted versions of the Dirac delta:
$$ x(t) = \infint x(\tau)\delta(t-\tau) $$
Which leads us to the convolution integral. The corresponding convolution formula in continuous-time is given by
$$ y(t) = \infint x(\tau) f(t-\tau) d\tau $$
\subsubsection{Frequency-Domain Behavior}
Lets look at the frequency domain behavior of LTI systems. To do this, we pass in a pure frequency by letting $x(n) = e^{i\omega n}$, or $x(t) = e^{i\omega t}$. We know that we can write a signal as a convolution sum, so lets start with this and pass in a pure frequency:
\begin{align*}
y(n) &= \sum \limits_{k\in\Z} f(k) x(n-k) \\
y(n) &= \sum \limits_{k\in\Z} f(k) e^{i\omega(n-k)} \\
y(n) &= \sum \limits_{k\in\Z} f(k) e^{i\omega n}e^{i\omega k} \\
y(n) &= \lr{\sum \limits_{k\in\Z} f(k)e^{i\omega k}} e^{i\omega n} \\
y(n) &= F(\omega) e^{i\omega n} \\
\end{align*}
The same frequency came out of the signal, only scaled by the frequency response of the system.
\begin{nicebox}
$$ F(\omega) = \sum \limits_{k \in \Z} f(k) e^{-i\omega k} $$
\end{nicebox}
The meaning of $F(\omega)$, the frequency response, has to do with frequency content. This tells us how much the system "likes" $\omega$, by suppressing or amplifying it. This motivates the term "filter", since certain frequencies are removed, kept, or amplified. In general, $F(\omega) \in \C$, since it is a linear combination of complex exponentials.
\begin{example}
Given a the impulse response in the graph below,
\begin{enumerate}
\item Express $y(n)$ in terms of $x$
\item Determine expression for $F(\omega)$
\item Plot $\abs{F(\omega)}$
\item Plot $ \angle F(\omega)$
\end{enumerate}
% \img{images/stem/stemex2.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-2.25,-1)(3.5,2)
% line
\psline{-*}(-2.000,0)(-2.000,0)
% position
\rput(-2.000,-0.4){-2}
% line
\psline{-*}(-1.000,0)(-1.000,0)
% position
\rput(-1.000,-0.4){-1}
% line
\psline{-*}(0.000,0)(0.000,0.5)
% value
\rput(0.36,0.5){(0.5)}
% position
\rput(0.000,-0.4){0}
% line
\psline{-*}(1.000,0)(1.000,0.5)
% value
\rput(1.36,0.5){(0.5)}
% position
\rput(1.000,-0.4){1}
% line
\psline{-*}(2.000,0)(2.000,0)
% position
\rput(2.000,-0.4){2}
% axes
\psline{->}(-2,0)(3,0)
\rput(3,-0.3){ $n$ }
\rput(2.5,0.5){ $f(n)$ }
\end{pspicture}
\end{center}
\begin{align*}
f(n) &= \frac{1}{2}\delta(n) + \frac{1}{2}\delta(n-1) \\
y(n) &= \frac{1}{2}x(n) + \frac{1}{2}x(n-1)
\end{align*}
What about the frequency response? Well, we know that $F(\omega) = \sum \limits_{\ell \in \Z} f(\ell)e^{-i\omega\ell}$ Think about how many non-zero terms there are:
\begin{align*}
F(\omega) &= \sum \limits_{\ell \in \Z} f(\ell)e^{-i\omega\ell} \\
F(\omega) &= \sum \limits_{\ell = 0}^1 f(\ell)e^{-i\omega\ell} \quad \mbox{only looking at } \ell \in \{0,1\}\\
F(\omega) &= h(0)e^{0} + h(1)e^{-i\omega}\\
F(\omega) &= \frac{1}{2} + \frac{1}{2}e^{-i\omega}
\end{align*}
Note that we could have used another method to find $F(\omega)$
\begin{align*}
y(n) &= \frac{1}{2}x(n) + \frac{1}{2}x(n-1) \\
y(n) &= \frac{1}{2}e^{i\omega n} + \frac{1}{2}e^{i\omega (n-1)} \quad \mbox{let } x(n) = e^{i\omega n} \\
y(n) &= \lr{\frac{1}{2} + \frac{1}{2}e^{-i\omega}}e^{i\omega n} \\
F(\omega)e^{i\omega n} &= \lr{\frac{1}{2} + \frac{1}{2}e^{-i\omega}}e^{i\omega n} \quad \mbox{since } y(n) = F(\omega)e^{i\omega n} \\
F(\omega) &= \frac{1}{2} + \frac{1}{2}e^{-i\omega}
\end{align*}
We can simplify this expression for $F(\omega)$ further by realizing the symmetry in the coefficients. We can factor out half of the power:
\begin{align*}
F(\omega) &= \frac{1}{2} + \frac{1}{2}e^{-i\omega} \\
F(\omega) &= e^{-i\omega/2} \left[ \frac{1}{2}e^{i\omega/2} + \frac{1}{2}e^{-i\omega/2} \right] \quad \mbox{factor out } e^{-i\omega/2} \\
F(\omega) &= e^{-i\omega/2} \frac{\left[ e^{i\omega/2} + e^{-i\omega/2} \right]}{2} \\
F(\omega) &= e^{-i\omega/2} \cos(\omega/2) \\
F(\omega) &= \cos(\omega/2) e^{-i\omega/2}
\end{align*}
Since $\cos(\omega/2)$ is not always positive, we need to take a slight detour. $F(\omega + 2\pi) = F(\omega)$ in \emph{discrete time}.
\end{example}
\begin{proof}
\begin{align*}
F(\omega + 2\pi) &= \sum \limits_{\ell \in \Z} f(\ell)e^{-i(\omega + 2\pi)\ell} \\
F(\omega + 2\pi) &= \sum \limits_{\ell \in \Z} f(\ell)e^{-i\omega} e^{-i 2\pi\ell} \\
F(\omega + 2\pi) &= \sum \limits_{\ell \in \Z} f(\ell)e^{-i\omega} \quad \mbox{since } \ell \in \Z \to e^{-i 2\pi\ell} = 1 \\
F(\omega + 2\pi) &= F(\omega)
\end{align*}
\end{proof}
So we only plot in some contiguous interval of length $2\pi$. So we have $F(\omega) = \cos(\omega/2)e^{-i\omega/2}$ This gives us $\left| F(\omega) \right| = \left| \cos(\omega/2) \right| = \cos(\omega/2) $ for $\left| \omega \right| \leq \pi$.
% \img{images/magres.ps}
% \psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1.25)(3.5,2)
\psplot[algebraic,linewidth=1.5pt]{-3.14}{3.14}{cos(x/2)}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,0)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\rput(1.25,1.25 ){$\left| H(\omega) \right| =\cos(\omega/2)$}
\end{pspicture}
\end{center}
To determine the phase, we can recognize a pattern by looking at the frequency response in terms of its magnitude and phase:
\begin{align*}
F(\omega) &= \abs{F(\omega)}e^{i\angle F(\omega)}
\end{align*}
if $F(\omega) = \cos(\omega/2) e^{-i\omega/2}$, then by pattern recognition, $\abs{F(\omega)} = \cos(\omega/2)$, and $\angle F(\omega) = -\omega/2$. In this case, we have established that the cosine is positive on $(-\pi,\pi)$, but if it were to have been negative, we would have to shift the phase by $\pm \pi$.
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 2}
\subsection{Infinite Duration Impulse Response}
When we looked at filters, we found that the signal $y(n)=\frac{1}{2}x(n)+\frac{1}{2}x(n-1)$ is a low pass filter. This was an example of a \emph{Finite Duration Impulse Response}, otherwise known as a finite impulse response, or FIR. FIR filters have no recursion. On the other hand, consider the \emph{Infinite Duration Impulse Response} (IIR filter) characterized by the Linear Constant Coefficient Difference Equation (LCCDE):
$$ y(n) = \alpha y(n-1) + x(n) \mbox{ where } \alpha \in (0,1) \mbox{, the system is causal, and initially at rest. } $$
Consider what would happen if the system were not initially at rest, that is, what if $y(-1) = 1 \neq 0$? This violates linearity because the system does not satisfy the zero-input-zero-output (ZIZO) property of linear systems. Let $x(n) = 0$ $ \forall n \in \Z$:
$$ y(0) = \alpha y(-1) + x(0) = \alpha \neq 0 $$
Since the zero signal did not produce a zero output, the system doesn't satisfy ZIZO, and is therefore not linear. What about Time Invariance (TI)? Let $\hat{x}(n) = x(n-N)$:
$$ \hat{y}(n) = \alpha \hat{y}(n-1) + \hat{x}(n) $$
To be valid, $\hat{x}$ and $\hat{y}$ must also satisfy the initial conditions (note that if the system was not recursive, the initial conditions would not be so important). So, the following must hold:
$$
\hat{y}(-1) = 1
$$
Since $\hat{y}(-1) = y(-1-N)$, then $y(-1-N) = 1$ must also hold. But this information was not given, therefore the system is not TI. Lets determine the frequency response of the system.
The first method is to let $x(n) = e^{i\omega n}$ and $y(n) = H(\omega)e^{i\omega n}$, then plug into the recursive formula:
\begin{align*}
y(n) &= \alpha y(n-1) + x(n) \\
H(\omega)e^{i\omega n} &= \alpha H(\omega)e^{i\omega (n-1)} + e^{i\omega n} \\
H(\omega)e^{i\omega n} &= \alpha H(\omega)e^{i\omega n} e^{-i\omega)} + e^{i\omega n} \\
H(\omega) &= \alpha H(\omega) e^{-i\omega)} + 1 \quad \mbox{ divide by } e^{i\omega n} \\
H(\omega) - \alpha H(\omega) e^{-i\omega)} &= 1 \\
H(\omega) \left(1 - \alpha e^{-i\omega)} \right) &= 1 \\
H(\omega) &= \frac{1}{1 - \alpha e^{-i\omega}} \\
\end{align*}
We implicity used the fact that $\left| \alpha \right| \lt 1$ by the fact that $H(\omega) = \sum \limits_{n\in\Z} h(n)e^{-i\omega n}$ would be divergent otherwise.
Alternatively, we could have used the following formula for the frequency response:
\begin{nicebox}
$$H(\omega) = \sum \limits_{k \in \Z} h(n) e^{-i\omega n}$$
\end{nicebox}
\begin{align*}
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} h(n) e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} \alpha^n u(n) e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n=0}^{\infty} \alpha^n e^{-i\omega n} \quad \mbox{ change limits since } h(n) \alpha \cdot u(n)\\
H(\omega) &= \sum \limits_{n=0}^{\infty} \left( \alpha e^{-i\omega} \right)^n \quad \mbox{ group as one exponential } \\
H(\omega) &= \frac{1}{1-\alpha e^{-i \omega}} \quad \mbox{ since } \left| \alpha e^{-i \omega} \right| = \left| \alpha \right| \left| e^{-i\omega} \right| = \left| \alpha \right| \lt 1
\end{align*}
Another method is to solve utilizing properties of the Discrete-Time Fourier Transform (DTFT). Recall that convolution in the time-domain corresponds to multiplication in the frequency domain:
\begin{nicebox}
$$y(n) = (x*h)(n) \ftp Y(\omega) = X(\omega)H(\omega) $$
\end{nicebox}
This implies the following relationship:
$$ H(\omega) = \frac{Y(\omega)}{X(\omega)} $$
Lets derive the fourier transform pair for $y(n-1)$:
\begin{align*}
\tilde{y}(n) &= y(n-1) \\
\tilde{y}(n) &\ftp \tilde{Y}(\omega) = \sum \limits_{n\in\Z} \tilde{y}(n)e^{-i\omega n} \\
y(n-1) &\ftp \tilde{Y}(\omega) = \sum \limits_{n\in\Z} y(n-1)e^{-i\omega n} \\
y(n-1) &\ftp \tilde{Y}(\omega) = \sum \limits_{m\in\Z} y(m)e^{-i\omega (m+1)} \quad \mbox{let } m = n-1 \\
y(n-1) &\ftp \tilde{Y}(\omega) = \lr{\sum \limits_{m\in\Z} y(m)e^{-i\omega m}} e^{-i\omega} \\
y(n-1) &\ftp \tilde{Y}(\omega) = Y(\omega)e^{-i\omega}
\end{align*}
We can generalize this to
\begin{nicebox}
$$y(n-N) \ftp Y(\omega)e^{-i\omega N}$$
\end{nicebox}
Now we can solve the recursive formula using the fourier transform:
\begin{align*}
y(n) &= \alpha y(n-1) + x(n) \\
\ftrans{y(n)} &= \ftrans{\alpha y(n-1) + x(n)} \\
Y(\omega) &= \ftrans{\alpha y(n-1)} + \ftrans{x(n)} \\
Y(\omega) &= \alpha Y(\omega)e^{-i\omega} + X(\omega) \\
H(\omega) &= \alpha H(\omega)e^{-i\omega} + 1 \quad \mbox{divide by } X(\omega) \\
H(\omega) \left(1 - \alpha e^{-i\omega)} \right) &= 1 \\
H(\omega) &= \frac{1}{1-\alpha e^{-i \omega}}
\end{align*}
One method to find the impulse response is to use a little intuition and analyze the recursive formula:
First, lets solve for a few values of $h(n)$:
\begin{align*}
h(n) &= \alpha h(n-1) + \delta(n) \\
h(0) &= \alpha h(-1) + \delta(0) \\
h(0) &= 0 + 1 \\
h(1) &= \alpha h(0) + \delta(1) \\
h(1) &= \alpha + 0 \\
h(2) &= \alpha h(1) + \delta(2) \\
h(2) &= \alpha^2 \\
h(3) &= \alpha^3
\end{align*}
Remember that because the system is causal, we know that $h(n) = 0$ $ \forall n \lt 0$. This seems to show that $h(n) = \begin{cases} \alpha^n & n \geq 0 \\ 0 & n \lt 0 \end{cases}$
We can simplify this expression by using the \emph{Discrete-Time Unit Step} function, $u(n) \begin{cases} 0 & n \lt 0 \\ 1 & n\geq0 \end{cases}$.
Now we can define the impulse response as $ h(n) = \alpha^n \cdot u(n)$. Given that $H(\omega) = \frac{1}{1 - \alpha e^{-i\omega}}$, we can find the magnitude response $\left| H(\omega) \right|$.
\begin{align*}
H(\omega) &= \frac{1}{1 - \alpha e^{-i\omega}} \\
H(\omega) &= \frac{e^{i\omega}}{e^{i\omega} - \alpha } \quad \mbox{multiply by } e^{i\omega} \\
\left| H(\omega) \right| &= \left| \frac{e^{i\omega}}{e^{i\omega} - \alpha } \right| \\
\left| H(\omega) \right| &= \frac{\left| e^{i\omega} \right|}{\left| e^{i\omega} - \alpha \right| } \\
\left| H(\omega) \right| &= \frac{1}{\left| e^{i\omega} - \alpha \right| } \\
\end{align*}
%\img{images/filters/eminusalpha.ps}
\begin{center}
\begin{pspicture}(-5,-5)(5,5)
% y-axis
\rput(0.3,3.75){ $Im$ }
\psline{->}(0,-3.75)(0,3.75)
% x-axis
\rput(3.75,0.3){ $Re$ }
\psline{->}(-3.75,0)(3.75,0)
% the circle
\pscircle(0,0){ 3 }
% quadrants
\rput(0.3,3.3){ $i^1$ }
\rput(0.3,-3.3){ $i^2$ }
\rput(-3.3,0.3){ $i^3$ }
\rput(3.3,0.3){ $i^0$ }
% new vector
\rput(1.75,2){$e^{i\omega}$}
\psline[linewidth=1.25 pt]{->}(2.121,2.121)
% new vector
\rput(0.725,-0.2){$\alpha$}
\psline[linewidth=1.25 pt]{->}(1.500,0.000)
% new vector
\rput(2.3,1){$e^{i\omega}-\alpha$}
\psline[linewidth=1.5 pt]{->}(1.500,0.000)(2.121,2.121)
\rput(-0.75,-4.25){$1+\alpha$}
\rput(2.25,-4.25){$1-\alpha$}
\psline{<->}(-3,-4)(1.5,-4)
\psline{<->}(1.5,-4)(3,-4)
\psline[linestyle=dashed](3,-4.5)(3,0)
\psline[linestyle=dashed](-3,-4.5)(-3,0)
\psline[linestyle=dashed](1.5,-4.5)(1.5,0)
\end{pspicture}
\end{center}
By inspection, we can find that this represents a low pass filter. When $\omega = 0$, we have $\left| e^{i\omega} - \alpha \right| = \left| 1 - \alpha \right|$, and when $\omega = \pi$ or $\omega = -\pi$, then $\left| e^{i\omega} - \alpha \right| = \left| 1 + \alpha \right|$. By looking at this on a unit circle. We have the maximum length when $\omega = \pm \pi$, so the magnitude response is least since the magnitude of this vector is in the denominator. We have the maximum of the magnitude response when the denominator is the least, when $\omega = 0$. This is why the filter is a low pass filter. We can also take note of the curvature by acknowledging that when $\omega$ is near $\pm \pi$, the magnitude of $e^{i\omega}-\alpha$ changes much less than when near 0.
%\img{images/filters/lpf.ps}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1)(3.75,3.5)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(x)+1.3}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
Design-oriented analysis with an eye toward design allows you to generalize for similar systems. Notice that in the above example, if we wanted to make a high-pass filter, all we would need to do is make $\alpha \in (-1,0)$. Looking at the last figure, we can see that the vector without the negative gets the arrow. Thus, we would have a vector pointing to $e^{i\omega}$ from $\alpha$ on the left side of the unit circle. The magnitude response would still have a value of $\frac{1}{1-\alpha}$ at $\omega = 0$, and $\frac{1}{1+\alpha}$ at $\omega = \pm \pi$. The only difference is that the values at $\omega = \pm \pi$ are the maxima.
To make a LPF or HPF sharper, simply let $\alpha$ approach 1, but not equal 1. This is because we need a safety margin since numerical approximations and noise could send a vector outside of the unit circle and we would have an unstable system. ($\sum \alpha^n$ diverges)
Lets define a new frequency response to be $H_N(\omega) = \frac{1}{1-\alpha e^{-i\omega N}}$. Notice that this corresponds to the difference equation $y(n) - \alpha y(n-N) = x(n)$:
\begin{align*}
H(\omega) &= \frac{1}{1-\alpha e^{-i\omega N}} \\
H(\omega) - \alpha H(\omega) e^{-i\omega N} &= 1 \\
H(\omega) e^{i\omega n} - \alpha H(\omega)e^{i\omega n} e^{-i\omega N} &= e^{i\omega n} \\
y(n) - \alpha y(n-N) &= x(n) \\
\end{align*}
To graph the magnitude response for a particular value of $N$, we can analyze the vectors representing the numerator and denominator. But, we can also perform a simple trick, since we know the graph of $H_1(\omega)$:
%\img{images/comb/comb1.ps}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1)(3.75,3.5)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(x)+1.3}
\psaxes[showorigin=false,labels=none, Dx=1.62](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(0,3){$N=1$}
\rput(3.14, -0.35){$\pi$}
\rput(1.62, -0.35){$\pi/2$}
\rput(-1.62, -0.35){$-\pi/2$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
Now let $H_2(\omega) = H_1(2\omega)$, then we need to divide by 2 on our axes:
%\img{images/comb/comb2.ps}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1)(3.75,3.5)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(4*x/2)+1.3}
\psaxes[showorigin=false,labels=none, Dx=1.62](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(0,3){$N=2$}
\rput(3.14, -0.35){$\pi$}
\rput(1.62, -0.35){$\pi/2$}
\rput(-1.62, -0.35){$-\pi/2$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
If you notice there will always be $N$ peaks and $N$ valleys. In general, we can use the first graph to find the graph of the magnitude response for all value of $N$. The higher the $N$, the more oscillations.
%\img{images/comb/comb4.ps}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1)(3.75,3.5)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(8*x/2)+1.3}
\psaxes[showorigin=false,labels=none, Dx=1.62](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(0,3){$N=4$}
\rput(3.14, -0.35){$\pi$}
\rput(1.62, -0.35){$\pi/2$}
\rput(-1.62, -0.35){$-\pi/2$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
\subsection{Basic Circuits}
Consider a circuit with a resistor and capacitor in series.
%\img{images/circuits/combo/combo.ps}
Assume the capacitor is initially uncharged. The voltage across the capacitor is given by
$$ y(t) = \frac{1}{C} \int_{-\infty}^t f(\tau) d\tau $$
where $c$ is the capacitance, and $f(\tau)$ is the current. If we look at a resistor, the voltage across the resistor is given by
$$ g(t) = R f(t) $$
where $R$ is the resistance and $f(t)$ is the current. Thus, the current can also be written as $f(t) = \frac{g(t)}{R}$. The voltage across the resistor can also be given by $x-y$, thus we can solve for the equation that governs the current:
\begin{align*}
g(t) &= x(t) - y(t) \\
Rf(t) &= x(t) - y(t) \\
f(t) &= \frac{x(t)-y(t)}{R}
\end{align*}
Now plug $f(t)$ into the voltage equation for the capacitor:
\begin{align*}
y(t) &= \frac{1}{C} \int_{-\infty}^t f(\tau) d\tau \\
y(t) &= \frac{1}{C} \int_{-\infty}^t \frac{x(\tau)-y(\tau)}{R} d\tau \\
\frac{d}{dt}y(t) &= \frac{d}{dt}\frac{1}{C} \int_{-\infty}^t \frac{x(\tau)-y(\tau)}{R} d\tau \\
\frac{d}{dt}y(t) &= \frac{1}{RC} \lr{x(t)-y(t)} \\
RC\frac{d}{dt}y(t) + y(t) &= x(t) \\
\end{align*}
We can also the equation that governs the current as
$$ RC \dot{y} + y = x $$
We can determine the frequency response using a few methods:
\begin{enumerate}
\item Letting $x(t) = e^{i\omega t}$, and $y(t) = H(\omega) e^{i\omega t}$
\item Using the Continuous-time Fourier Transform (CTFT)
\end{enumerate}
First, lets solve for the frequency response using the first method:
\begin{align*}
RC\dot{y}(t) + y(t) &= x(t) \\
RCi\omega H(\omega)e^{i\omega t} + H(\omega)e^{i\omega t} &= e^{i\omega t} \\
RCi\omega H(\omega) + H(\omega) &= 1 \\
H(\omega) &= \frac{1}{1 + i\omega RC}
\end{align*}
Now for the CTFT method. Recall the synthesis equation for the CTFT:
\begin{nicebox}
$$x(t) = \frac{1}{2\pi} \infint X(\omega) e^{i\omega t}d\omega $$
\end{nicebox}
Lets determine the fourier transform pair for differentiation in the time domain:
\begin{align*}
x(t) = \frac{1}{2\pi} \infint X(\omega) e^{i\omega t}d\omega \\
\frac{d}{dt}x(t) = \frac{1}{2\pi} \frac{d}{dt}\infint X(\omega) e^{i\omega t}d\omega \\
\frac{d}{dt}x(t) = \frac{1}{2\pi} \infint X(\omega) \frac{d}{dt}e^{i\omega t}d\omega \\
\frac{d}{dt}x(t) = \frac{1}{2\pi} \infint X(\omega) i\omega e^{i\omega t}d\omega \\
\frac{d}{dt}x(t) = \frac{1}{2\pi} \infint \tilde{X}{\omega} e^{i\omega t}d\omega \\
\end{align*}
This tells us that differentiation in the time domain corresponds to multiplication of $i\omega$ in the frequency domain:
\begin{nicebox}
$$ \frac{d}{dt}x(t) \ftp i\omega X(\omega) $$
\end{nicebox}
Using this new relationship, lets solve for $H(\omega)$:
\begin{align*}
RC\dot{y}(t) + y(t) &= x(t) \\
RCi\omega Y(\omega) + Y(\omega) &= X(\omega) \\
RCi\omega H(\omega) + H(\omega) &= 1 \quad \mbox{since } H(\omega) =\frac{Y(\omega)}{X(\omega)} \\
H(\omega) &= \frac{1}{1 + i\omega RC}
\end{align*}
How do we plot $\abs{H(\omega)}$? What type of filter is it? Note that in continuous-time, to analyze this function, we will not use the unit circle. Lets first massage the equation for $H(\omega)$:
\begin{align*}
H(\omega) &= \frac{1}{1 + i\omega RC} \\
H(\omega) &= \frac{1/RC}{1/RC + i\omega} \\
H(\omega) &= \frac{1/RC}{i\omega - \lr{-1/RC}}
\end{align*}
Now lets look at the vector that this creates:
%\img{images/iomegaaxis/overrc/overrc.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
% y-axis
\rput(0.3,2.5){ $i\omega$ }
\psline{->}(0,-1.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
\rput(-2,0.4){$-1/RC$}
\psline(-2,-0.2)(-2,0.2)
\rput(0.4,-1){$i\omega$}
\psline(-0.2,-1)(0.2,-1)
% new vector
\rput(-1.7,-0.8){$i\omega-\left(-1/RC\right)$}
\psline[linewidth=1.5 pt]{->}(-2,0)(0,-1)
\end{pspicture}
\end{center}
We can see that at $\omega=0$, the vector $i\omega - \lr{-1/RC}$ is the shortest. As $\omega \to \infty$, the magnitude response decays. To get the curvature of the plot, consider the change in magnitude over time.
% \img{images/graphs/respmag/respmag.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\fileplot[linewidth=1.5pt]{magresp.dat}
% function name
\rput(3.14159265,1){$\left|H(\omega)\right|$}
% x-axis
\psline(-3.54159265, 0)(3.54159265, 0)
\rput(3.54159265,-0.3){$\omega$}
\rput(0.85840735, 1.2){$(1/\sqrt{2})$}
\rput(-1.14159265, 1.2){$(1/\sqrt{2})$}
\rput(0.85840735, -0.4){$1/RC$}
\rput(-1.14159265, -0.4){$-1/RC$}
\psline[linestyle=dashed](0.85840735, 0)(0.85840735, 0.85)
\psline[linestyle=dashed](-1.14159265, 0)(-1.14159265, 0.85)
\psline[linestyle=dashed](-1.14159265, 0.85)(0.85840735, 0.85)
% begin x-axis labels:
\psline(-3.14159265, -0.1)(-3.14159265, 0.1)
\psline(-1.14159265, -0.1)(-1.14159265, 0.1)
\psline(0.85840735, -0.1)(0.85840735, 0.1)
\psline(2.85840735, -0.1)(2.85840735, 0.1)
% end x-axis labels:
\end{pspicture}
\end{center}
When is the magnitude response have a value of $\frac{1}{\sqrt{2}}$? Using pythagorean theorem, we can see that this occurs at $\omega = \pm \frac{1}{RC}$.
\begin{align*}
\abs{H(\omega)} &= \abs{\frac{1/RC}{i\omega - \lr{-1/RC}}} \\
\abs{H(1/RC)} &= \frac{\abs{1/RC}}{\abs{i \lr{1/RC} - \lr{-1/RC}}} \\
\abs{H(1/RC)} &= \frac{1}{\abs{i + 1}} = \frac{1}{\sqrt{2}}
\end{align*}
To increase the bandwidth of this signal, what do we do? Note that we can decrease either $R$ or $C$ and $\frac{1}{RC}$ will increase. Now, to plot the phase response of the system:
\begin{align*}
\angle H(\omega) &= \angle \frac{1/RC}{i\omega - \lr{-1/RC}} \\
\angle H(\omega) &= \angle \lr{1/RC} - \angle \lr{i\omega - \lr{-1/RC}} \\
\angle H(\omega) &= 0 - \tan^{-1}(\omega RC) \\
\angle H(\omega) &= \tan^{-1}(\omega RC) \\
\end{align*}
% \img{images/graphs/respmag/freq/phaseresp.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\fileplot[linewidth=1.5pt]{phaseresp.dat}
% function name
\rput(3.14159265,0.99783018390619){$\angle H(\omega)$}
% x-axis
\psline{->}(-3.54159265, 0)(3.54159265, 0)
\rput(3.54159265,-0.3){$\omega$}
% y-axis
\psline{->}(0,-2)(0,2)
% begin x-axis labels:
\psline(-0.1, -1.570796325)(0.1, -1.570796325)
\rput(3.2, -1.570796325){$-\frac{\pi}{2}$}
\psline(-0.1, 1.570796325)(0.1, 1.570796325)
\rput(3.2, 1.570796325){$\frac{\pi}{2}$}
% end y-axis labels:
\psline[linestyle=dashed](-3.14, -1.570796325)(3.14, -1.570796325)
\psline[linestyle=dashed](-3.14, 1.570796325)(3.14, 1.570796325)
\end{pspicture}
\end{center}
What if we take the voltage across the resistor instead of the capacitor?
% \img{images/circuits/combo/variation/combo.ps}
If $\omega = 0$, the capacitor opens, no current around the loop, and no voltage drop across the resistor. Therefore, there is zero gain. In other words, $G(0)=0$. If $\omega \to \infty$, the capacitor shorts, becoming a short circuit, so the voltage drop across the resistor, $y$, becomes $x$. In other words, as $\omega \to \infty$, $G(\omega) \to 1$. So this is a high pass filter.
% \img{images/circuits/combo/both/combo.ps}
The voltage drop across the circuit is the sum of the voltage drops across the resistor and the capacitor, $y_r + y_c$.
\begin{align*}
x(t) &= y_r(t) + y_c(t) \\
X(\omega) &= Y_r(\omega) + Y_c(\omega) \\
X(\omega) &= G(\omega)X(\omega) + G(\omega)X(\omega) \\
1 &= G(\omega) + H(\omega) \\
G(\omega) &= 1 - H(\omega) = 1 - \frac{1}{1+i\omega RC} = \frac{i\omega RC}{1 + i\omega RC} \\
G(\omega) &= \frac{i\omega RC}{1 + i\omega RC}
\end{align*}
Note that as $\omega \to \infty$, $i\omega RC$ dominates the denominator and the 1 is negligible. Thus, as $\omega \to \infty$, $G(\omega) \to 1$.
\subsection{Decomposition of Signals}
Suppose we have a periodic signal with fundamental period 2.
% \img{images/deltas/periodic/periodic.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-1)(4,4)
\rput(-2.5,1.5){$\cdots$}
\rput(3.5,1.5){$\cdots$}
% line
\psline{-*}(-2,0)(-2,2)
% value
\rput(-1.7,2){(2)}
% position
\rput(-2,-0.3){-2}
% line
\psline{-*}(-1,0)(-1,3)
% value
\rput(-0.7,3){(3)}
% position
\rput(-1,-0.3){-1}
% line
\psline{-*}(0,0)(0,2)
% value
\rput(0.3,2){(2)}
% position
\rput(0,-0.3){0}
% line
\psline{-*}(1,0)(1,3)
% value
\rput(1.3,3){(3)}
% position
\rput(1,-0.3){1}
% line
\psline{-*}(2,0)(2,2)
% value
\rput(2.3,2){(2)}
% position
\rput(2,-0.3){2}
% line
\psline{-*}(3,0)(3,3)
% value
\rput(3.3,3){(3)}
% position
\rput(3,-0.3){3}
% axes
\psline{->}(-3,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4,3){ $x(n)$ }
\end{pspicture}
\end{center}
We can then write the signal $x$ as a linear combination of two signals $\Psi_0(n)$, and $\Psi_1(n)$:
% \img{images/deltas/bases/periodic.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-6,-1)(6,6)
\rput(-4.4, 0.5){$\cdots$}
\rput(5.4, 0.5){$\cdots$}
\rput(-4.4, 3.5){$\cdots$}
\rput(5.4, 3.5){$\cdots$}
% line
\psline{-*}(-4,3)(-4,4)
% value
\rput(-3.7,4){(1)}
% position
\rput(-4,2.7){-4}
% line
\psline{-*}(-3,3)(-3,3)
% position
\rput(-3,2.7){-3}
% line
\psline{-*}(-2,3)(-2,4)
% value
\rput(-1.7,4){(1)}
% position
\rput(-2,2.7){-2}
% line
\psline{-*}(-1,3)(-1,3)
% position
\rput(-1,2.7){-1}
% line
\psline{-*}(0,3)(0,4)
% value
\rput(0.3,4){(1)}
% position
\rput(0,2.7){0}
% line
\psline{-*}(1,3)(1,3)
% position
\rput(1,2.7){1}
% line
\psline{-*}(2,3)(2,4)
% value
\rput(2.3,4){(1)}
% position
\rput(2,2.7){2}
% line
\psline{-*}(3,3)(3,3)
% position
\rput(3,2.7){3}
% line
\psline{-*}(4,3)(4,4)
% value
\rput(4.3,4){(1)}
% position
\rput(4,2.7){4}
% axes
\psline{->}(-5,3)(5,3)
\rput(5,2.7){ $n$ }
\rput(5,4){ $\Psi_0(n)$ }
% line
\psline{-*}(-4,0)(-4,0)
% position
\rput(-4,-0.3){-4}
% line
\psline{-*}(-3,0)(-3,1)
% value
\rput(-2.7,1){(1)}
% position
\rput(-3,-0.3){-3}
% line
\psline{-*}(-2,0)(-2,0)
% position
\rput(-2,-0.3){-2}
% line
\psline{-*}(-1,0)(-1,1)
% value
\rput(-0.7,1){(1)}
% position
\rput(-1,-0.3){-1}
% line
\psline{-*}(0,0)(0,0)
% position
\rput(0,-0.3){0}
% line
\psline{-*}(1,0)(1,1)
% value
\rput(1.3,1){(1)}
% position
\rput(1,-0.3){1}
% line
\psline{-*}(2,0)(2,0)
% position
\rput(2,-0.3){2}
% line
\psline{-*}(3,0)(3,1)
% value
\rput(3.3,1){(1)}
% position
\rput(3,-0.3){3}
% line
\psline{-*}(4,0)(4,0)
% position
\rput(4,-0.3){4}
% axes
\psline{->}(-5,0)(5,0)
\rput(5,-0.3){ $n$ }
\rput(5,1){ $\Psi_1(n)$ }
\end{pspicture}
\end{center}
Therefore, we can write
$$x(n) = 2\Psi_0(n) + 3\Psi_1(n)$$
Here is another way to decompose the signal:
\begin{align*}
\left[ \begin{array}{r} 2 \\ 3 \\ \end{array} \right] &= 2 \left[ \begin{array}{r} 1 \\ 0 \\ \end{array} \right] + 3 \left[ \begin{array}{r} 0 \\ 1 \\ \end{array} \right] \\
x &= x(0)\Psi_0 + x(1)\Psi_1
\end{align*}
Fourier analysis is a way of decomposing signals in terms of some special signals that tell us information:
% \img{images/projections/bases/bases.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-6)(3,6)
% y-axis
\psline{->}(0,-6)(0,-1)
% x-axis
\psline{->}(-2.5,-3.5)(2.5,-3.5)
% new vector
\rput(0.8,-4.8){$\Psi_1 = \left[ \begin{array}{r} 1 \\ -1 \\ \end{array} \right]$}
\psline[linewidth=1.5 pt]{->}(0,-3.5)(1,-4.5)
% new vector
\rput(2.22487174976714,-2){$\vec{x}$}
\psline[linewidth=1.5 pt]{->}(0,-3.5)(2.42487174976714,-2.10000107243568)
% new vector
\rput(0.8,-2.5){$\Psi_0 = \left[ \begin{array}{r} 1 \\ 1 \\ \end{array} \right]$}
\psline[linewidth=1.5 pt]{->}(0,-3.5)(1,-2.5)
% y-axis
\psline{->}(0,0)(0,5)
% x-axis
\psline{->}(-2.5,2.5)(2.5,2.5)
% new vector
\rput(2.22487174976714,3.95){$\vec{x}$}
\psline[linewidth=1.5 pt]{->}(0,2.5)(2.42487174976714,3.89999892756432)
% new vector
\rput(1,2){$\Psi_0 = \left[ \begin{array}{r} 1 \\ 0 \\ \end{array} \right]$}
\psline[linewidth=1.5 pt]{->}(0,2.5)(0,3.5)
% new vector
\rput(0,3.5){$\Psi_1 = \left[ \begin{array}{r} 0 \\ 1 \\ \end{array} \right]$}
\psline[linewidth=1.5 pt]{->}(0,2.5)(1,2.5)
\end{pspicture}
\end{center}
What we want is to write $x$ as a linear combination of these signals:
$$ x = X_0\Psi_0 + X_1\Psi_1 $$
So to find $X_0$, we can project $\vec{x}$ onto $\Psi_0$:
\begin{align*}
\langle\vec{x}, \Psi_0\rangle &= \langle X_0\Psi_0 + X_1\Psi_1, \Psi_0\rangle \\
\langle\vec{x}, \Psi_0\rangle &= X_0\langle\Psi_0,\Psi_0\rangle +X_1\langle\Psi_1,\Psi_0\rangle \\
\langle\vec{x}, \Psi_0\rangle &= X_0\langle\Psi_0,\Psi_0\rangle + 0\quad \mbox{(because of orthogonality)}\\
X_0 &= \frac{\langle\vec{x}, \Psi_0\rangle }{\langle\Psi_0,\Psi_0\rangle}\\
\end{align*}
Fourier analysis says if we have a signal with period $p$, then we can represent the signal with complex exponentials that are harmonically related by a fundamental frequency $\omega_0 = 2\pi/p$:
\begin{nicebox}
$$ x(n) = \sum \limits_{k=0}^{p-1}X_ke^{ik\omega_0 n} $$
\end{nicebox}
We can also write this relationship as
\begin{align*}
x(n) &= \sum \limits_{k=0}^{p-1}X_k\Psi_k(n) \quad \mbox{where } e^{ik\omega_0 n} = \Psi_k(n) \\
\end{align*}
Or even more simplified:
$$ x = \sum \limits_{k=\langle p\rangle}X_k\Psi_k $$
% Examples
Let $p=2$, then $\omega_0 = 2\pi/2 = \pi$.
\begin{align*}
\Psi_0(n) &= e^{i0\omega_0n} = 1 \\
\Psi_1(n) &= e^{i\omega_0n} = e^{i\pi n} = \lr{-1}^n \\
\end{align*}
% \img{images/deltas/single/single.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-1)(4,4)
% line
\psline{-*}(0,0)(0,2)
% value
\rput(0.3,2){(2)}
% position
\rput(0,-0.3){0}
% line
\psline{-*}(1,0)(1,3)
% value
\rput(1.3,3){(3)}
% position
\rput(1,-0.3){1}
% axes
\psline{->}(-1,0)(2,0)
\rput(2,-0.3){ $n$ }
\rput(2,3){ $x(n)$ }
\end{pspicture}
\end{center}
How can we determine $\Psi_0$ and $\Psi_1$? Since the fundamental period is two, we need only one contiguous interval of 2. Lets use 0 and 1:
\begin{align*}
\Psi_0(0) = 1 \\
\Psi_0(1) = 1 \\
\Psi_1(0) = \lr{-1}^0 = 1 \\
\Psi_1(1) = \lr{-1}^1 = -1 \\
\end{align*}
Hence the basis vectors are as shown:
% \img{images/deltas/bases/single/single.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-2)(5,5)
% line
\psline{-*}(0,3)(0,4)
% value
\rput(0.3,4){(1)}
% position
\rput(0,2.7){0}
% line
\psline{-*}(1,3)(1,4)
% value
\rput(1.3,4){(1)}
% position
\rput(1,2.7){1}
% axes
\psline{->}(-1,3)(2,3)
\rput(2,2.7){ $n$ }
\rput(2,4){ $\Psi_0(n)$ }
% line
\psline{-*}(0,0)(0,1)
% value
\rput(0.3,1){(1)}
% position
\rput(0,-0.3){0}
% line
\psline{-*}(1,0)(1,-1)
% value
\rput(1.3,-1){(-1)}
% position
\rput(1,0.3){1}
% axes
\psline{->}(-1,0)(2,0)
\rput(2,-0.3){ $n$ }
\rput(2,1){ $\Psi_1(n)$ }
\end{pspicture}
\end{center}
What frequencies can be present in a signal $\vec{x}$? The harmonics are given by a signal with period $p$ are $\{0,\omega_0,2\omega_0,\dots,(p-1)\omega_0\}$. Notice that $p\omega_0$ is missing. This is because $e^{ip\omega_0n}=e^{i2\pi n} = e^{i0n}$. Keep in mind that $\omega_0 = 2\pi/p$. In fact, the complex exponential basis of vectors is periodic with respect to $n$ and their indices $k$.
\begin{align*}
\Psi_k(n+p) &= e^{ik\omega_0(n+p)}=e^{ik\omega_0n}e^{ik\omega_0p}=e^{ik\omega_0n} = \Psi_k(n) \\
\Psi_{k+p}(n) &= e^{i(k+p)\omega_0n} = e^{ik\omega_0n}e^{ip\omega_0n} = \Psi_k(n)
\end{align*}
So we know that $\Psi_{-1}(n) = e^{-i\omega_0n} = \Psi_{p-1}(n) = e^{i(p-1)\omega_0n }$. So we can see that in general, we can write out a discrete fourier series expansion over any continuous set of $p$ integers. The notation used is
$$ x(n) = \sum \limits_{k=\langle p \rangle} X_ke^{ik\omega_0n}$$
\subsection{Inner Products and Fourier Series}
\begin{claim}
For a signal written in terms of vectors that represent complex exponentials,
$$x(n) = \sum_{k=\langle p\rangle}X_k\Psi_k$$
the spanning set that makes up the set of vectors is an orthogonal set, in other words,
$$ \Psi_k \perp \Psi_\ell \quad k \neq \ell$$
\end{claim}
\begin{proof}
For $\Psi_k(n) = e^{ik\omega_0}$, and $\Psi_\ell = e^{i\ell\omega_0n}$, we can write
$\Psi_k =
\left[
\begin{array}{c}
\Psi_k(0) \\
\Psi_k(1) \\
\vdots \\
\Psi_k(p-1) \\
\end{array}
\right]
$, and
$\Psi_\ell =
\left[
\begin{array}{c}
\Psi_\ell(0) \\
\Psi_\ell(1) \\
\vdots \\
\Psi_\ell(p-1) \\
\end{array}
\right]
$.
Note that for an aperiodic signal, the inner product can be written as $<\Psi_k,\Psi_\ell> = \sum \limits_{n=-\infty}^{\infty}\Psi_k(n)\Psi_\ell^*(n) $ or $\int_{-\infty}^{\infty}\Psi_k(t)\Psi_\ell^*(t)dt$. For this proof, we will use the summation for a periodic signal. We can take the inner product
\begin{align*}
\langle\Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1}\Psi_k(n)\Psi_\ell^*(n) \\
\langle\Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} e^{ik\omega_0n}e^{-i\ell\omega_0n} \\
\langle\Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} e^{i(k-\ell)\omega_0n} \\
\langle\Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} \lr{e^{i(k-\ell)\omega_0}}^n \\
\end{align*}
Before we arrive at our answer, lets take a slight mathematical detour. Recall that we can write a sum $S$ as
\begin{align*}
S &= \sum \limits_{k=0}^{\infty}\alpha^k \\
S &= 1 + \alpha + \alpha^2 + \cdots \\
\alpha S &= \alpha + \alpha^2 + \alpha^3 + \cdots \\
(1-\alpha)S &= 1 \\
S &= \frac{1}{1-\alpha} \quad \mbox {if } \alpha \neq 1
\end{align*}
or for a finite sum
\begin{align*}
S &= \sum \limits_{n=A}^{B}\alpha^n \\
S &= \alpha^A + \alpha^{A+1} + \cdots + \alpha^B \\
\alpha S &= \alpha^{A+1} + \alpha^{A+2} + \cdots + \alpha^{B+1} \\
(1-\alpha)S &= \alpha^{B+1} - \alpha^A \\
S &= \frac{\alpha^{B+1}-\alpha^{A}}{1-\alpha} \quad \mbox{if }\alpha \neq 1 \\
S &= \begin{cases}
\frac{\alpha^{B+1}-\alpha^{A}}{1-\alpha} & \alpha\neq1 \\
B-A+1 & \alpha=1 \\
\end{cases}
\end{align*}
Now back to our proof:
\begin{align*}
\langle\Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} \lr{e^{i(k-\ell)\omega_0}}^n \\
\langle\Psi_k,\Psi_\ell\rangle &= \begin{cases} \frac{\lr{e^{i(k-\ell)\omega_0}}^p-\lr{e^{i(k-\ell)\omega_0}}^0}{1-e^{i(k-\ell)\omega_0}} & k\neq\ell \\ (p-1)-0+1 & k=l \\ \end{cases} \\
\langle\Psi_k,\Psi_\ell\rangle &= \begin{cases} 0 & k\neq\ell\\ p & k=l \\ \end{cases}
\end{align*}
\end{proof}
We can generalize the result of the above proof further as
$$ \langle\Psi_k,\Psi_\ell\rangle = p\delta(k-\ell) $$
This works well because the inner product of a vector with itself as the \emph{norm} squared, and the inner product with any other orthogonal vector is 0.
$$\langle\Psi_k,\Psi_k\rangle = \norm{\Psi_k}^2 = p$$
A few properties about the inner product:
\begin{align*}
\langle f,g\rangle &= f^Tg^* \\
\langle \alpha f,g \rangle &= \alpha\langle f,g\rangle \\
\langle f,\beta g \rangle &= \beta^* \langle f,g\rangle \\
\langle \sum_k \alpha_k f_k, g\rangle &= \sum_k \alpha_k\langle f_k,g\rangle \\
\end{align*}
So to find $X_\ell$, we can project $\vec{x}$ onto $\Psi_\ell$.
\begin{align*}
\langle\vec{x}, \Psi_\ell\rangle &= \langle\langle X_0\Psi_0 + \cdots + X_\ell\Psi_\ell + \cdots + X_{p-1}\Psi_{p-1}, \Psi_\ell\rangle \\
\langle\vec{x}, \Psi_\ell\rangle &= X_0\langle\Psi_0,\Psi_\ell\rangle + \cdots + X_\ell\langle\Psi_\ell,\Psi_\ell\rangle + \cdots + X_{p-1}\langle\Psi_{p-1},\Psi_\ell\rangle \\
\langle\vec{x}, \Psi_\ell\rangle &= X_\ell\langle\Psi_\ell,\Psi_\ell\rangle \quad \mbox{(all other inner products were 0)}\\
X_\ell &= \frac{\langle\vec{x}, \Psi_\ell\rangle }{\langle\Psi_\ell,\Psi_\ell\rangle}\\
X_\ell &= \frac{\langle\vec{x}, \Psi_\ell\rangle }{p}\\
X_\ell &= \frac{\vec{x}^T\Psi_\ell^* }{p}\\
X_\ell &= \frac{1}{p}\sum \limits_{n=0}^{p-1}x(n)\Psi_\ell^*(n) \\
X_\ell &= \frac{1}{p}\sum \limits_{n=0}^{p-1}x(n)e^{-i\ell\omega_0n} \\
\end{align*}
With the discrete fourier series, we can write signals in two distinct forms. We can write a signal in the time domain as a series of complex exponentials which can tell us what happens in the frequency domain:
\begin{nicebox}
\begin{align*}
x(n) &= \sum \limits_{k=\langle p\rangle}X_ke^{ik\omega_0n} \quad &\mbox{(synthesis equation)}
\end{align*}
\end{nicebox}
The coefficient in the frequency domain tells us the amount that a certain frequency contributes to the overall signal, which is given by the running sum of the product of the signal in the time domain and the frequency of interest:
\begin{nicebox}
\begin{align*}
X_k &= \frac{1}{p} \sum \limits_{k=\langle p\rangle}x(n)e^{-ik\omega_0n} \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 3}
\subsection{Discrete Fourier Series}
We can write a periodic signal $x$ as a linear combination of terms:
\begin{align*}
x &= X_0\Psi_0 + X_1\Psi_1 + \cdots + X_{p-1}\Psi_{p-1}
\end{align*}
Since we are familiar with the linear algebraic language, we can write this in a much more compact form. First we can write $X$ as a $p \times 1$ matrix:
\begin{align*}
X = \left[ \begin{array}{c} X_0 \\ X_1 \\ \vdots \\ X_{p-1} \\ \end{array} \right]
\end{align*}
We know that $\Psi_k(n) = e^{ik\omega_0 n}$, where $\omega_0 = 2\pi/p$. We can then write $\Psi_k$ as:
\begin{align*}
\Psi_k = \left[ \begin{array}{c} \Psi_k(0) \\ \Psi_k(1) \\ \vdots \\ \Psi_k(p-1) \\ \end{array} \right] = \left[ \begin{array}{c} 1 \\ e^{ik\omega_0} \\ \vdots \\ e^{ik\omega_0(p-1)} \\ \end{array} \right] \\
\end{align*}
Remember that the $k^{th}$ function, or basis signal is denoted by $e^{ik\omega}$, not $e^{i\omega n}$. We don't use $p$ as a value because $e^{ik\omega_0 p}=e^{ik2\pi}$, so we don't need any more terms. We have already shown that these basis signals are orthogonal, hence we can write:
\begin{align*}
\Psi_k^T\Psi_\ell^* &= p\delta(k-\ell) = \left[ \begin{array}{rr} 0 & k\neq\ell \\ p & k=l \\ \end{array} \right] \\
\lr{\Psi_k^T}^*\Psi_\ell &= p\delta(k-\ell) = \left[ \begin{array}{rr} 0 & k\neq\ell \\ p & k=l \\ \end{array} \right] \quad \mbox{take the conjugate of both sides}\\
\Psi_k^H\Psi_\ell &= p\delta(k-\ell) = \left[ \begin{array}{rr} 0 & k\neq\ell \\ p & k=l \\ \end{array} \right] \\
\end{align*}
Here we can define the \emph{Hermitian Transpose} as the conjugate of the transpose:
\begin{nicebox}
$$ \lr{\Psi_k^T}^* = \Psi_k^H $$
\end{nicebox}
Now we can continue with our signal $x$. So now let's go back to initial equation, $x = X_0\Psi_0 + X_1\Psi_1 + \cdots + X_{p-1}\Psi_{p-1}$. Lets write this in a slightly different form by creating a matrix with columns that are made up of the basis vectors multiplied by a coefficient matrix:
\begin{align*}
x &=
\left[
\begin{array}{cccc}
\Psi_0(0) & \Psi_1(0) & \cdots & \Psi_{p-1}(0) \\
\Psi_0(1) & \Psi_1(1) & \cdots & \Psi_{p-1}(1) \\
\vdots & \vdots & \ddots & \vdots \\
\Psi_0(p-1) & \Psi_1(p-1) & \cdots & \Psi_{p-1}(p-1) \\
\end{array}
\right]
\left[ \begin{array}{c} X_0 \\ X_1 \\ \vdots \\ X_{p-1} \\ \end{array} \right]
\\
x &= \left[ \begin{array}{rrrr} \Psi_0 & \Psi_1 & \cdots & \Psi_{p-1} \\ \end{array} \right] \left[ \begin{array}{c} X_0 \\ X_1 \\ \vdots \\ X_{p-1} \\ \end{array} \right] \\
x &= \Psi X
\end{align*}
This is the synthesis equation. This is exactly what tells us how to obtain $x$ by the linear combination of coefficients in $X$ and exponential vectors in the matrix $\Psi$. How do we solve for $X$? What we are after is the coefficients. These coefficients are what determine the linear combination of the complex exponential basis vectors that would in turn produce the signal in the period $p$. To do this, we use some more linear algebra:
\begin{align*}
x &= \Psi X \\
\Psi^{-1}x &= \Psi^{-1}\Psi X \\
\Psi^{-1}x &= X \\
X &= \Psi^{-1}x
\end{align*}
But in our case, we don't need to invert $\Psi$ explicity. Taking inverses is numerically expensive, so any opportunity to work around it should be taken. We can multiply by the Hermitian Transpose of $\Psi$:
$$
\Psi^H = \left[ \begin{array}{c} \Psi_0^H \\ \Psi_1^H \\ \vdots \\ \Psi_{p-1}^H \\ \end{array} \right]
$$
where each $\Psi_k^H$ is a $1 \times p$ row:
$$
\Psi_k^H = \left[ \begin{array}{rrrr} \Psi_k(0)^H & \Psi_k(1)^H & \cdots & \Psi_k(p-1)^H \\ \end{array} \right]
$$
Hence we have the matrix:
$$
\Psi^H =
\left[
\begin{array}{cccc}
\Psi_0^H(0) & \Psi_0^H(1) & \cdots & \Psi_0^H(p-1) \\
\Psi_1^H(0) & \Psi_1^H(1) & \cdots & \Psi_1^H(p-1) \\
\vdots & \vdots & \ddots & \vdots \\
\Psi_{p-1}^H(0) & \Psi_{p-1}^H(1) & \cdots & \Psi_{p-1}^H(p-1) \\
\end{array}
\right]
$$
So when we multiply $\Psi^H\Psi$, we get:
$$
\Psi^H\Psi =
\left[
\begin{array}{cccc}
\Psi_0^H(0) & \Psi_0^H(1) & \cdots & \Psi_0^H(p-1) \\
\Psi_1^H(0) & \Psi_1^H(1) & \cdots & \Psi_1^H(p-1) \\
\vdots & \vdots & \ddots & \vdots \\
\Psi_{p-1}^H(0) & \Psi_{p-1}^H(1) & \cdots & \Psi_{p-1}^H(p-1) \\
\end{array}
\right]
\left[
\begin{array}{cccc}
\Psi_0(0) & \Psi_1(0) & \cdots & \Psi_{p-1}(0) \\
\Psi_0(1) & \Psi_1(1) & \cdots & \Psi_{p-1}(1) \\
\vdots & \vdots & \ddots & \vdots \\
\Psi_0(p-1) & \Psi_1(p-1) & \cdots & \Psi_{p-1}(p-1) \\
\end{array}
\right]
$$
Lets just simplify things a bit and keep a compact form by using row vectors and column vectors in our matrices:
\begin{align*}
\Psi^H\Psi &= \left[ \begin{array}{c} \Psi_0^H \\ \Psi_1^H \\ \vdots \\ \Psi_{p-1}^H \\ \end{array} \right] \left[ \begin{array}{rrrr} \Psi_0 & \Psi_1 & \cdots & \Psi_{p-1} \\ \end{array} \right] \\
\Psi^H\Psi &= \left[
\begin{array}{cccc}
\Psi_0^H\Psi_0 & \Psi_0^H\Psi_1 & \cdots & \Psi_0^H\Psi_{p-1} \\
\Psi_1^H\Psi_0 & \Psi_1^H\Psi_1 & \cdots & \Psi_1^H\Psi_{p-1} \\
\vdots & \vdots & \ddots & \vdots \\
\Psi_{p-1}^H\Psi_0 & \Psi_{p-1}^H\Psi_1 & \cdots & \Psi_{p-1}^H\Psi_{p-1} \\
\end{array}
\right] \\
\Psi^H\Psi &= \left[
\begin{array}{cccc}
p & 0 & \cdots & 0 \\
0 & p & \cdots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \cdots & p \\
\end{array}
\right] \\
\Psi^H\Psi &= pI \\
\end{align*}
We know that we get $p$ down the diagonal because of orthogonality as shown earlier:
$$
\Psi_k^H\Psi_\ell = p\delta(k-\ell) = \left[ \begin{array}{rr} 0 & k\neq\ell \\ p & k=l \\ \end{array} \right]
$$
Now lets find our analysis equation:
\begin{align*}
x &= \Psi X \\
\Psi^H x &= \Psi^H\Psi X \\
\Psi^H x &= pIX \\
\Psi^H x &= pX \\
\frac{1}{p}\Psi^H x &= X \\
X &= \frac{1}{p}\Psi^H x \\
\end{align*}
Now we have our analysis equation in linear algebra form. To conclude, as long as the basis vectors are mutually orthogonal, and we assume that any inner product of a basis vector with itself is $p$, then this equation holds.
\begin{nicebox}
\begin{align*}
X &= \frac{1}{p}\Psi^H x \quad \quad & \mbox{(analysis equation)} \\
x &= \Psi X \quad \quad & \mbox{(synthesis equation)}
\end{align*}
\end{nicebox}
\subsection{Examples}
\begin{example}
Find the DFS coefficients of the infinite duration impulse train:
% \img{images/deltas/periodic/dfs/dfs.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-3)(3.5,3)
\rput(-3.6,0.4){$\cdots$}
\rput(4,0.4){$\cdots$}
% line
\psline{-*}(-3,0)(-3,1)
% value
\rput(-2.7,1){(1)}
% position
\rput(-3,-0.3){-p}
% line
\psline{-*}(0,0)(0,1)
% value
\rput(0.3,1){(1)}
% position
\rput(0,-0.3){0}
% line
\psline{-*}(3,0)(3,1)
% value
\rput(3.3,1){(1)}
% position
\rput(3,-0.3){p}
% axes
\psline{->}(-4,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4,1){ $x(n)$ }
\end{pspicture}
\end{center}
$$ x(n) = \sum \limits_{\ell=-\infty}^\infty \delta(n-\ell p)$$
Lets use the analysis equation:
\begin{align*}
X_k &= \frac{1}{p} \sum \limits_{n=0}^{p-1}x(n)e^{-ik\omega_0 n} \\
X_k &= \frac{1}{p} \sum \limits_{n=0}^{p-1}\sum \limits_{\ell=-\infty}^\infty \delta(n-\ell p)e^{-ik\omega_0 n} \\
X_k &= \frac{1}{p} \sum \limits_{n=0}^{p-1}\delta(n)e^{-ik\omega_0 n} \\
X_k &= \frac{1}{p} e^{0} \\
X_k &= \frac{1}{p}
\end{align*}
This implies that there is an equal contribution of all frequencies. The frequencies are $0, \omega_0, 2\omega_0, \dots, (p-1)\omega_0$. This signal corresponds to white noise.
\end{example}
\begin{example}
Determine the signal $x: \Z \to \R$ given the following information:
\begin{enumerate}
\item $x(n+4\ell) = x(n) $ $\forall \ell \in \Z$
\item $\sum \limits_{n=-1}^2 x(n) = 2 $
\item $\sum \limits_{n=-1}^2 (-1)^nx(n) = 4 $
\item $\sum \limits_{n=-1}^2 x(n)\cos\lr{\frac{\pi}{2}n} = \sum \limits_{n=-1}^2 x(n)\sin\lr{\frac{\pi}{2}n} = 0$
\end{enumerate}
Since $x(n+4\ell) = x(n)$, this implies that we can use $p=4$ as a period. This does not necessarily mean that it is the fundamental period. Lets start by solving for $X_0$ using the analysis equation:
\begin{align*}
X_0 &= \frac{1}{4}\sum \limits_{n=-1}^2 x(n)e^{0} \\
X_0 &= \frac{1}{4}2 = \frac{1}{2} \\
\end{align*}
Now lets solve for $X_2$:
\begin{align*}
X_2 &= \frac{1}{4}\sum \limits_{n=-1}^2 x(n)e^{-i\pi n} \\
X_2 &= \frac{1}{4}\sum \limits_{n=-1}^2 x(n)(-1)^n \\
X_2 &= \frac{1}{4} 4 = 1 \\
\end{align*}
Note that if we choose either $X_{-1}$ or $X_1$, we get the same answer for the same reason:
\begin{align*}
X_{-1} &= \frac{1}{4}\sum \limits_{n=-1}^2 x(n)e^{-i\frac{\pi}{2} n} \\
X_{-1} &= \frac{1}{4}\sum \limits_{n=-1}^2 x(n)\lr{\cos\lr{\frac{\pi}{2}n} + i\sin\lr{\frac{\pi}{2}n}} \\
X_{-1} &= \frac{1}{4}\sum \limits_{n=-1}^2 x(n)\cos\lr{\frac{\pi}{2}n} + \frac{i}{4}\sum \limits_{n=-1}^2 x(n)\sin\lr{\frac{\pi}{2}n} \\
X_{-1} &= 0
\end{align*}
So we have
\begin{align*}
X_{-1} &= 0 \\
X_{0} &= \frac{1}{2} \\
X_{1} &= 0 \\
X_{2} &= 1 \\
\end{align*}
Now synthesize $x$:
\begin{align*}
x(n) &= X_0 + X_2e^{i\pi n} \\
x(n) &= \frac{1}{2} + (-1)^n
\end{align*}
This actually has a fundamental period of two and a very high frequency. Note that it is safe to assume an integer multiple of the fundamental period. Our fundamental frequency is $2\pi/p = \pi$.
% \img{images/deltas/periodic/dfsex/final.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1.5,-1)(3.5,3)
% line
\psline{-*}(-2,0)(-2,-0.5)
% value
\rput(-1.5,-0.5){(-1/2)}
% position
\rput(-2,0.3){-2}
% line
\psline{-*}(-1,0)(-1,1.5)
% value
\rput(-0.5,1.5){(3/2)}
% position
\rput(-1,-0.3){-1}
% line
\psline{-*}(0,0)(0,-0.5)
% value
\rput(0.5,-0.5){(-1/2)}
% position
\rput(0,0.3){0}
% line
\psline{-*}(1,0)(1,1.5)
% value
\rput(1.5,1.5){(3/2)}
% position
\rput(1,-0.3){1}
% line
\psline{-*}(2,0)(2,-0.5)
% value
\rput(2.5,-0.5){(-1/2)}
% position
\rput(2,0.3){2}
% line
\psline{-*}(3,0)(3,1.5)
% value
\rput(3.5,1.5){(3/2)}
% position
\rput(3,-0.3){3}
% axes
\psline{->}(-3,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4.2,1.5){ $x(n)$ }
\rput(-3,0.75){$\cdots$}
\rput(4,0.75){$\cdots$}
\end{pspicture}
\end{center}
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 4}
\subsection{Continuous-time Fourier Series}
The Continuous-Time Fourier Series (CTFS), often called just Fourier Series (FS), tells us that we can write a signal as a linear combination of orthogonal functions, or bases. In general, we can use any bases of orthogonal functions, but we will mainly use complex exponentials.
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty} X_k e^{ik\omega_0 t} \\
x(t) &= \sum \limits_{k=-\infty}^{\infty} X_k \Psi_k(t) \\
\end{align*}
This brings us to our synthesis equation:
$$
x = \sum \limits_{k=-\infty}^{\infty}X_k\Psi_k
$$
We use the inner product to find the spectral coefficient $X_k$, which brings us to our analysis equation:
$$
X_k = \frac{\langle x,\Psi_k\rangle}{\langle \Psi_k,\Psi_k\rangle}
$$
But how do we define our inner product? In discrete-time, we used
\begin{nicebox}
$$
\langle f,g\rangle = \sum \limits_{n=\langle p\rangle} f(n)g^*(n) \quad \mbox{(Discrete-time inner product)}
$$
\end{nicebox}
We can simplify this using linear algebraic language.
\begin{align*}
f &= \left[ \begin{array}{c} f(0) \\ f(1) \\ \vdots \\ f(p-1) \\ \end{array} \right] \\
g &= \left[ \begin{array}{c} g(0) \\ g(1) \\ \vdots \\ g(p-1) \\ \end{array} \right]
\end{align*}
Therefore, we can write $\langle f,g \rangle$ as multiplication of a tranposed vector with another vector:
$$ \langle f,g\rangle = f^Tg^* $$
But now that we are in continuous-time, we need more. We have a continuum of values, not a discrete, countable set. Hence, we define the inner product as an integral:
\begin{nicebox}
$$
\langle f,g\rangle = \int_{\langle p\rangle} f(t)g^*(t) dt \quad \mbox{(Continuous-time inner product)}
$$
\end{nicebox}
Now lets look at $\langle \Psi_k, \Psi_\ell\rangle$:
\begin{align*}
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} \Psi_k(t)\Psi_\ell^*(t) dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{ik\omega_0 t}e^{-i\ell \omega_0 t} dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{i(k-\ell)\omega_0 t} dt \\
\end{align*}
We have two cases, where the exponent is zero when $k=\ell$, and then when it is non-zero and $k\neq \ell$.
I) $k=\ell$
This one is more simple. We are integrating 1 over a contiguous interval of length $p$, hence the value is $p$:
\begin{align*}
\langle \Psi_\ell, \Psi_\ell\rangle &= \int_{\langle p\rangle} dt = p
\end{align*}
II) $k\neq\ell$
This case requires a bit more work. Lets integrate.
\begin{align*}
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{i(k-\ell)\omega_0 t} dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \left.\frac{e^{i(k-\ell)\omega_0 t}}{i(k-\ell)\omega_0} \right|_{\langle p\rangle} \\
\langle\Psi_k, \Psi_\ell\rangle &= \frac{e^{i(k-\ell)\omega_0 p}- e^{0}}{i(k-\ell)\omega_0} \\
\langle\Psi_k, \Psi_\ell\rangle &= \frac{e^{i(k-\ell)2\pi}- e^{0}}{i(k-\ell)\omega_0} \\
\langle\Psi_k, \Psi_\ell\rangle &= \frac{0}{i(k-\ell)\omega_0} = 0\\
\end{align*}
Lets look at this in another perspective and change to sines and cosines instead of integrating:
\begin{align*}
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{i(k-\ell)\omega_0 t} dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} \cos((k-\ell)\omega_0 t) + i\sin((k-\ell)\omega_0 t) dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} \cos((k-\ell)\omega_0 t) dt + \int_{\langle p\rangle}i\sin((k-\ell)\omega_0 t) dt \\
\end{align*}
Consider the case where $k-\ell=1$. We are summing over a full period of the cosine function:
% \img{images/trigonometric/cos1/cos.ps}
Notice that there is an equal amount of positive and negative area, hence the value is zero. If $k-\ell=2$, then we are integrating $\cos(2\omega_0 t)$, which has a period $p/2$, hence we would integrate over two full periods of the cosine function giving us zero:
% \img{images/trigonometric/cos2/cos.ps}
If you were to let $k-\ell=N$, where $N\in \Z$, then you would be integrating over $N$ periods since the function would have a period of $p/N$. This same phenomenon occurs with the sine function.
% \img{images/trigonometric/sin1/sin.ps}
Therefore, any time we have $k-\ell \neq 0$, we are integrating over integer multiples of the period, hence
$$\langle \Psi_k, \Psi_\ell\rangle = p\delta(k-\ell)$$
Looking back at the analysis equation, we have
$$ X_k = \frac{\langle x,\Psi_k\rangle}{\langle\Psi_k, \Psi_k\rangle} = \frac{1}{p} \int_{\langle p\rangle}x(t)e^{-ik\omega_0 t} dt $$
Note that since $\omega_0$ and $k$ are both required, the equation relies on them, and sometimes you may see $X(k\omega_0)$.
To sum everything up, we have two important equations regarding the continuous-time fourier series (FS):
\begin{nicebox}
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty}X_ke^{ik\omega_0t} \quad &\mbox{(synthesis equation)} \\
X_k &= \frac{1}{p} \int_{\langle p\rangle}x(t)e^{-ik\omega_0t}dt \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
Because we can write $x(t)$ as an infinite number of $X_k$s, $X_k$ is not necessarily periodic, but $x(t)$ is $p$-periodic in continuous-time. Note that we can write $x(t)$ using any orthogonal set of basis vectors:
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty}X_k\Psi_k(t) \\
x(t) &= \sum \limits_{k=0}^{\infty}A_k \cos(k\omega_0 t) + \sum \limits_{k=1}^{\infty}B_k \sin(k\omega_0 t)
\end{align*}
Consider the following bases:
\begin{align*}
\Psi_k(t) &= \cos(\omega_0 t) \quad k\in\Z_{\oplus} \\
\Phi_\ell(t) &= \sin(\omega_0 t) \quad \ell \in \Z_+
\end{align*}
In order to use these as bases, we need to show the following:
\begin{align*}
\langle \Psi_k, \Psi_m\rangle &= 0 \quad \mbox{if } k\neq m \\
\langle \Psi_k, \Phi_\ell\rangle &= 0 \quad \forall\mbox{ } k,\ell \\
\langle \Phi_\ell, \Phi_r\rangle &= 0 \quad \mbox{if } \ell\neq r \\
\end{align*}
Then to determine the coefficients, we can use the inner product.
\subsection{FS Examples}
The Continuous Fourier Series is defined as
\begin{nicebox}
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty}X_ke^{ik\omega_0t} \quad &\mbox{(synthesis)} \\
X_k &= \frac{1}{p} \int_{\langle p\rangle }x(t)e^{-ik\omega_0t}dt \quad &\mbox{(analysis)}
\end{align*}
\end{nicebox}
\begin{example}
1) Given an inpulse train $x(t)$ of Dirac deltas every $p$ samples, find the spectral coefficients and corresponding frequencies within the signal.
% \includegraphics{images/pulsetrains/continuous.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1.5,-1)(6.5,2)
\psline{->}(-1,0)(6,0)
\rput(6,-0.25){$t$}
\rput(6,1){$x(t)$}
\rput(6,0.5){$\cdots$}
\rput(-1,0.5){$\cdots$}
\psline{->}(0,0)(0,1)
\rput(0,-0.25){-p}
\psline{->}(2.5,0)(2.5,1)
\rput(2.5,-0.25){0}
\psline{->}(5,0)(5,1)
\rput(5,-0.25){p}
\end{pspicture}
\end{center}
\begin{align*}
X_k &= \frac{1}{p} \int_{\langle p\rangle }x(t)e^{-ik\omega_0t}dt \\
X_k &= \frac{1}{p} \int_{-p/2}^{p/2}x(t)e^{-ik\omega_0t}dt \\
X_k &= \frac{1}{p} \int_{-p/2}^{p/2}\delta(t)e^{-ik\omega_0t}dt \\
X_k &= \frac{1}{p} \cdot e^{0} \\
X_k &= \frac{1}{p} \\
\end{align*}
Therefore, we can determine that $\forall k$, $X_k = \frac{1}{p}$, and the signal is made up of every harmonic of $\omega_0$, and so we may write the signal in its continuous-time fourier series expansion as
$$ x(t) = \frac{1}{p} \sum \limits_{k=-\infty}^{\infty}e^{ik\omega_0 t} $$
Note that there are infinitely many harmonics of the fundamental frequency, not $p$ harmonics. This is a unique feature of the impulse train in continuous time. How do we know that $e^{ik\frac{\omega_0}{2}t}$ doesn't contibute to the signal? For $k=1$, $e^{i\frac{\omega_0}{2}t}$ has a period $2p$ which contradicts the periodicity of the signal $x$.
\end{example}
\begin{example}
2) Consider a periodic signal in continuous defined by $x(t) = \begin{cases} 1 & -T/2 \leq t \leq T/2 \\ 0 & \mbox{otherwise} \end{cases}$, where $T \in \R$, and $T/2 \langle p/2$.
%\includegraphics{images/idealpass/periodicpass.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-2,-1)(5,2)
\psline{->}(-1,0)(5,0)
% 0
\psline(2,0.1)(2,-0.1)
% -p
\psline(0,0.1)(0,-0.1)
% p
\psline(4,0.1)(4,-0.1)
\psline(1,0.1)(1,-0.1)
\rput(1,-0.25){$-\frac{p}{2}$}
\psline(3,0.1)(3,-0.1)
\rput(3,-0.25){$\frac{p}{2}$}
\rput(5,-0.25){$t$}
\rput(5,1){$x(t)$}
% dots
\rput(-1.5,0.5){$\cdots$}
\rput(5,0.5){$\cdots$}
% height
\psline[linestyle=dashed](-1.25,1)(-0.5,1)
\psline{->}(-1,0.6)(-1,1)
\psline{->}(-1,0.4)(-1,0)
\rput(-1,0.5){$1$}
\psline(1.5,0)(1.5,1)
\psline(1.5,1)(2.5,1)
\psline(2.5,1)(2.5,0)
\psline(3.5,0)(3.5,1)
\psline(3.5,1)(4.5,1)
\psline(4.5,1)(4.5,0)
\psline(-0.5,0)(-0.5,1)
\psline(-0.5,1)(0.5,1)
\psline(0.5,1)(0.5,0)
\rput(2,-0.25){0}
\rput(0,-0.25){$-p$}
\rput(4,-0.25){$p$}
\rput(1.5,-0.25){$-\frac{T}{2}$}
\rput(2.5,-0.25){$\frac{T}{2}$}
\end{pspicture}
\end{center}
\begin{align*}
X_k &= \frac{1}{p} \int_{\langle p\rangle }x(t)e^{-ik\omega_0 t}dt \\
X_k &= \frac{1}{p} \int_{-T/2}^{T/2}e^{-ik\omega_0 t}dt \\
X_k &= \frac{1}{p} \lr{ \left. \frac{e^{-ik\omega_0 t}}{-ik\omega_0} \right|_{-T/2}^{T/2}} \\
X_k &= \frac{1}{p} \frac{e^{-ik\omega_0 T/2} - e^{ik\omega_0 T/2}}{-ik\omega_0} \\
X_k &= \frac{1}{p} \frac{e^{ik\omega_0 T/2} - e^{-ik\omega_0 T/2}}{ik\omega_0} \\
X_k &= \frac{1}{p} \frac{2i\sin(k\omega_0 T/2)}{ik\omega_0} \\
X_k &= \frac{1}{k\pi} \sin(k\omega_0 T/2) \\
\end{align*}
Seems as though we have found a link between the analysis equation and frequency response of the system.
$$ X_k = \left. X(\omega) \right|_{w=k\omega_0} \Rightarrow X(\omega) = \frac{2}{p}\frac{\sin(\omega T/2)}{\omega} $$
Since $X_k$ has a dependence on $k$, not all frequencies contribute equally. As $k\to \infty$, $\abs{X_k}\to 0$.
\end{example}
\subsection{Approximations}
Given a periodic signal $x$ in CT, we can estimate the signal using the formula:
$$ \hat{x}(t) = \sum \limits_{k=-N}^{N}\alpha_k e^{ik\omega_0 t} $$
How can we determine the coefficients that make the estimates closest in terms of error energy? Let $W\in\R^{2N+1}$ be a subspace spanned by the set of orthogonal basis vectors $\Psi_0, \Psi_1, \Psi_{-1} \dots, \Psi_{N}, \Psi_{-N}$. If $x$ is a vector that is not in the column space of $W$, then we can project $x$ onto the column space of $W$, producing an approximation vector $\hat{x}$, which has a distance of $\abs{\left| \mathcal{E}_N\right|}$ from the vector $x$. The vectors $\mathcal{E}_N$ and $\hat{x}$ are orthogonal.
% \img{images/bases/projtheory/proj.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-3)(9,4)
\pscustom[fillstyle=solid,fillcolor=gray!40,linestyle=none]{
\psline[linewidth=1 pt](0,0)(4,1.2)
\psline[linewidth=1 pt](4,1.2)(8.4,0)
\psline[linewidth=1 pt](8.4,0)(4,-1.2)
\psline[linewidth=1 pt](4,-1.2)(0,0)
}
\psline[linewidth=1 pt](0,0)(4,1.2)
\psline[linewidth=1 pt](4,1.2)(8.4,0)
\psline[linewidth=1 pt](8.4,0)(4,-1.2)
\psline[linewidth=1 pt](4,-1.2)(0,0)
\rput(0.78,0){$W$}
% new vector
\rput(6,3.3){$x$}
\psline[linewidth=1.5 pt,linecolor=red]{->}(2.2,0.2)(6,3)
% new vector
\rput(6.35,1.5){$\mathcal{E}_N$}
\psline[linewidth=1.5 pt]{->}(6,0)(6,3)
% new vector
\rput(4,-0.3){$\hat{x}_N$}
\psline[linewidth=1.5 pt]{->}(2.2,0.2)(6,0)
% new vector
\psline[linewidth=1.5 pt](2.2,0.2)(6,0)
\end{pspicture}
\end{center}
We want to minimize $\abs{\left| \mathcal{E}_N\right|}$ to make the best approximation. $\mathcal{E}_N = x - \hat{x}_N$, hence $\abs{\left| \mathcal{E}_N\right|} = \abs{\left| x-\hat{x}_N\right|}$. Since $\mathcal{E} \perp W$, then we can use the inner product and the properties of orthogonality to solve for the coefficients. Since $W \in \R^{2N+1}$, we have $2N+1$ equations and $2N+1$ coefficients.
\begin{align*}
\langle \hat{x}_N, \Psi_\ell\rangle &= \langle \sum \limits_{k=-N}^{N}\alpha_k e^{ik\omega_0 t} , \Psi_\ell\rangle \\
\langle \hat{x}_N, \Psi_\ell\rangle &= \sum \limits_{k=-N}^{N}\alpha_k \langle e^{ik\omega_0 t} , \Psi_\ell\rangle \\
\langle \hat{x}_N, \Psi_\ell\rangle &= \alpha_\ell \langle e^{i\ell\omega_0 t} , \Psi_\ell\rangle \\
\langle \hat{x}_N, \Psi_\ell\rangle &= \alpha_\ell \langle \Psi_\ell , \Psi_\ell\rangle \\
\alpha_\ell &= \frac{\langle \hat{x}_N, \Psi_\ell\rangle }{\langle \Psi_\ell , \Psi_\ell\rangle } \\
\end{align*}
if $\hat{x}(t) = \sum \limits_{k=0}^{M-1}\alpha_k \Psi_k$, where $x$ is a $p$-periodic signal, which $\Psi_k$'s would you choose? Since we are taking a subset, the larger exponentials are better since they dominate. Otherwise, using $\{0,1,\dots,M\}\in\Z$ is not using very much. Pick the largest in magnitude FS coefficient, and then go down from there, but it doesn't have to be continguous.
\subsection{DTFT}
Recall that we can write the frequency response of a system as
$$ H(\omega) = \sum \limits_{n\in \Z} h(n) e^{-i\omega n} $$
Also, recall the $2\pi$-periodicity of the frequency response:
$$ H(\omega + 2\pi) = H(\omega) \forall \mbox{ } \omega $$
The fourier series was written as $ x(t) = \sum \limits_{k=-\infty}^{\infty}X_ke^{ik\omega_0t} $, and $x(t+p) = x(t)$. We can combine these principles to come up with the DTFT. $x$ was $p$-periodic, $H$ is $2\pi$-periodic.
\begin{align*}
x &\leftrightarrow e^{ik\omega t} \leftrightarrow X_n \\
H &\leftrightarrow e^{-i\Omega_0\omega n} \leftrightarrow h(n) \quad \mbox{where } \Omega_0 = 1 \\
\end{align*}
$X_n$ and $h(n)$ are the coefficients. $x$ is a periodic signal and so is $H$. $x(t)$ and $H(\omega)$ are functions over continuous variables.
\begin{align*}
x(t) & = \sum \limits_{n=-\infty}^{\infty}X_ne^{in\omega_0t} = \sum \limits_{n=-\infty}^{\infty} X_n\Psi_n\\
H(\omega) &= \sum \limits_{n=-\infty}^{\infty}h(n)e^{-in\Omega_0\omega} = \sum \limits_{n=-\infty}^{\infty} H_n\Phi_n
\end{align*}
Assume for now that $\langle \Phi_n, \Phi_m\rangle = 0 $ when $n\neq m$. We want to find $h(n)$ in terms of $H(\omega)$. So we can project $H$ onto $\Phi_m$ to determine $h(m)$, or $H_m$:
\begin{align*}
\langle H, \Phi_m\rangle &= \langle \sum_n\in\Z h(n)\Phi_n, \Phi_m\rangle \\
\langle H, \Phi_m\rangle &= \sum_n\in\Z h(n)\langle \Phi_n, \Phi_m\rangle \\
\langle H, \Phi_m\rangle &= h(m)\langle \Phi_m, \Phi_m\rangle \\
h(m) &= \frac{\langle H, \Phi_m\rangle }{\langle \Phi_m, \Phi_m\rangle } \\
\end{align*}
We can always rely on the ratio of two inner products. $H(\omega)$ is a $2\pi$-periodic function over a continuous variable $\omega$, and $\Phi_m(\omega)$ is also a $2\pi$-periodic function over a continuous variable $\omega$.
\begin{align*}
\langle H, \Phi_m\rangle &= \int_{\langle 2\pi\rangle }H(\omega)\Phi_m^*(\omega)d\omega \\
\langle H, \Phi_m\rangle &= \int_{\langle 2\pi\rangle }H(\omega)e^{i\omega m}d\omega \\
\end{align*}
We still need to prove orthogonality and determine the value of $\langle \Phi_m, \Phi_m\rangle $:
\begin{align*}
\langle \Phi_n, \Phi_m\rangle &= \int_{\langle 2\pi\rangle }\Phi_n(\omega)\Phi_m^*(\omega)d\omega \\
\langle \Phi_n, \Phi_m\rangle &= \int_{\langle 2\pi\rangle }e^{-i\omega n}e^{i\omega m}d\omega \\
\langle \Phi_n, \Phi_m\rangle &= \int_{\langle 2\pi\rangle }e^{i\omega(m-n)}d\omega \\
\end{align*}
We have two cases, where the exponent is zero when $m=n$, and then when it is non-zero and $m\neq n$. The first is more simple. We are integrating 1 over a contiguous interval of length $2\pi$, hence the value is $2\pi$:
\begin{align*}
\langle \Phi_m, \Phi_m\rangle &= \int_{\langle 2\pi\rangle } dt = 2\pi
\end{align*}
This case requires a bit more work. Lets integrate.
\begin{align*}
\langle \Phi_n, \Phi_m\rangle &= \int_{\langle 2\pi\rangle } e^{i(m-n)\omega} d\omega \\
\langle \Phi_n, \Phi_m\rangle &= \left.\frac{e^{i(m-n)\omega}}{i(m-n)} \right|_{\langle 2\pi\rangle } \\
\langle \Phi_n, \Phi_m\rangle &= \frac{e^{i(m-n)2\pi}- e^{0}}{i(m-n)} \\
\langle \Phi_n, \Phi_m\rangle &= \frac{0}{i(m-n)} = 0\\
\end{align*}
Now we can come to a conclusion regarding the DTFT.
\begin{nicebox}
\begin{align*}
h(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle } H(\omega)e^{i\omega n}d\omega \quad &\mbox{(synthesis equation)} \\
H(\omega) &= \sum \limits_{n=-\infty}^{\infty}h(n)e^{-i\omega n} \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
$H(\omega)$ is $2\pi$-periodic, but $h(n)$ is not necessarily periodic. We can make a meaningful interpretation of the synthesis equation if we write the synthesis equation as a linear combination of complex exponentials:
$$ h(n) = \int_{\langle 2\pi\rangle }\lr{\frac{H(\omega)d\omega}{2\pi}}e^{i\omega m} $$
A discrete-time function can be broken down into a linear combination of complex exponentials. How much of each complex exponential is present in the function is determined by the coefficient. What distinguished this coefficient is $H(\omega)$. So the fourier transform motivates the term \emph{filter} since it can break signals up into their constinuent frequencies and filter them in or out of the output signal.
\subsection{DTFT Examples}
\begin{example}
1) Find the DTFT of $x(n) = \alpha^n u(n)$ where $\abs{\alpha} \lt 1$.
\begin{align*}
X(\omega) &= \sum \limits_{n=-\infty}^{\infty}\alpha^n u(n)e^{-i\omega n} \\
X(\omega) &= \sum \limits_{n=0}^{\infty}\alpha^n e^{-i\omega n} \\
X(\omega) &= \frac{1}{1-\alpha e^{-i\omega}} \\
\end{align*}
If $\alpha \gt 0$, this is a low-pass filter:
If $\alpha \lt 0$, this is a high-pass filter:
What about if $\alpha \in \C$? Lets consider an example where $\abs{\alpha} \lt 1$, and $\alpha$ is in the $\pi/4$ direction. First, analyze the vectors on the unit circle:
% \img{images/unitcircles/complexalpha/unit.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(9,3)
% y-axis
\rput(0.3,2.5){ $\Im \mit{m}$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re \mit{e}$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
% the circle
\pscircle(0,0){2}
% new vector
\rput(1,1.2){$\alpha$}
\psline[linewidth=1.5 pt]{->}(0,0)(1.2,1.2)
% new vector
\rput(1.55,1.45){$e^{i\omega}-\alpha$}
\psline[linewidth=2.5 pt, linecolor=blue]{->}(1.2,1.2)(1.73205124983367,0.999999233974511)
% new vector
\rput(1.53205124983367,0.65){$e^{i\omega}$}
\psline[linewidth=1.5 pt]{->}(0,0)(1.73205124983367,0.999999233974511)
% y-axis
\rput(6.3,2.5){ $\Im \mit{m}$ }
\psline{->}(6,-2.5)(6,2.5)
% x-axis
\rput(8.5,0.3){ $\Re \mit{e}$ }
\psline{->}(3.5,0)(8.5,0)
% quadrants
\rput(6.3,2.2){ $\pi/2$ }
\rput(6.3,-2.5){ $\pi$ }
\rput(3.5,0.3){ $3\pi/2$ }
\rput(8.2,0.3){ $0$ }
% the circle
\pscircle(6,0){2}
% new vector
\rput(7,0.65){$\alpha$}
\psline[linewidth=1.5 pt]{->}(6,0)(7.2,1.2)
% new vector
\rput(5,0.5){$e^{i\omega}-\alpha$}
\psline[linewidth=2.5 pt, linecolor=blue]{->}(7.2,1.2)(4.12061274158933,-0.684034745406704)
% new vector
\rput(3.92061274158933,-0.684034745406704){$e^{i\omega}$}
\psline[linewidth=1.5 pt]{->}(6,0)(4.12061274158933,-0.684034745406704)
\end{pspicture}
\end{center}
Notice that the denominator, $e^{i\omega} - \alpha$ is at a maximum at $-3\pi/4$, giving the magnitude response a minimum. The same vector has its minimum at $\pi/4$, giving the magnitude response a maximum at $\pi/4$.
% \img{images/magnituderesp/complexalpha/mag.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(3,3)
% \fileplot[linewidth=1.5pt]{xomega.dat}
% function name
\rput(3.9269908125,2.79932218491997){$X(\omega)$}
% x-axis
\psline(-4.3269908125, 0)(4.3269908125, 0)
\rput(4.3269908125,-0.3){$\omega$}
% begin x-axis labels:
\psline(-3.9269908125, -0.1)(-3.9269908125, 0.1)
\rput(-3.9269908125, -0.3){$-\frac{5\pi}{4}$}
\psline(-3.14159265, -0.1)(-3.14159265, 0.1)
\rput(-3.14159265, -0.3){$-\pi$}
\psline(-2.3561944875, -0.1)(-2.3561944875, 0.1)
\rput(-2.3561944875, -0.3){$-\frac{3\pi}{4}$}
\psline(-1.570796325, -0.1)(-1.570796325, 0.1)
\rput(-1.570796325, -0.3){$-\frac{\pi}{2}$}
\psline(-0.7853981625, -0.1)(-0.7853981625, 0.1)
\rput(-0.7853981625, -0.3){$-\frac{\pi}{4}$}
\psline(0.7853981625, -0.1)(0.7853981625, 0.1)
\rput(0.7853981625, -0.3){$\frac{\pi}{4}$}
\psline(1.570796325, -0.1)(1.570796325, 0.1)
\rput(1.570796325, -0.3){$\frac{\pi}{2}$}
\psline(2.3561944875, -0.1)(2.3561944875, 0.1)
\rput(2.3561944875, -0.3){$\frac{3\pi}{4}$}
\psline(3.14159265, -0.1)(3.14159265, 0.1)
\rput(3.14159265, -0.3){$\pi$}
\psline(3.9269908125, -0.1)(3.9269908125, 0.1)
\rput(3.9269908125, -0.3){$\frac{5\pi}{4}$}
% end x-axis labels:
% y-axis
\psline(0,-0.4)(0,3.4)
% begin x-axis labels:
\psline(-0.1, 1)(0.1, 1)
\psline(-0.1, 2)(0.1, 2)
\psline(-0.1, 3)(0.1, 3)
% end y-axis labels:
\end{pspicture}
\end{center}
Remember that we only need to graph over on period of $2\pi$, but it demonstrates the periodicity of the magnitude response.
\end{example}
\begin{example}
2) Consider an ideal low-pass filter that kills all frequencies except frequencies in $[-A,A]$. What is the impulse response $h(n)$?
% \includegraphics{images/idealpass/idealpass.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(4,1.5)
%axis
\psline(1,0)(3,0)
% 0
\psline(2,0.1)(2,-0.1)
% pi and -pi
\psline(1,0.1)(1,-0.1)
\rput(1,-0.25){$-\pi$}
\psline(3,0.1)(3,-0.1)
\rput(3,-0.25){$\pi$}
\rput(3.3,-0.25){$\omega$}
\rput(3,1){$H(\omega)$}
\rput(1.35,1){$(1)$}
\psline(1.5,0)(1.5,1)
\psline(1.5,1)(2.5,1)
\psline(2.5,1)(2.5,0)
\rput(2,-0.25){0}
\rput(1.5,-0.25){-A}
\rput(2.5,-0.25){A}
\end{pspicture}
\end{center}
Use the discrete-time fourier transform.
\begin{align*}
h(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle}H(\omega)e^{i\omega n} d\omega \\
h(n) &= \frac{1}{2\pi} \int_{-A}^{A}e^{i\omega n} d\omega \\
h(n) &= \frac{1}{2\pi} \lr{\left. \frac{e^{i\omega n}}{in}\right|_{-A}^{A}} \\
h(n) &= \frac{1}{2\pi} \lr{\frac{e^{iAn}-e^{-iAn}}{in}} \\
h(n) &= \frac{1}{2\pi} \frac{2i\sin (An)}{in} \\
h(n) &= \frac{1}{\pi} \frac{\sin (An)}{n} \\
\end{align*}
Note that we could have also written this as
\begin{align*}
h(n) &= \frac{1}{2\pi}\left[ \int_{-A}^{A}\cos(\omega n) d\omega + i \int_{-A}^{A}\sin(\omega n) d\omega\right] \\
h(n) &= \frac{1}{2\pi}\int_{-A}^{A}\cos(\omega n) d\omega \\
h(n) &= \left. \frac{1}{2\pi} \sin(\omega n) \right|_{-A}^{A} \\
h(n) &= \frac{1}{\pi} \frac{\sin (An)}{n} \\
\end{align*}
Since sine is an odd function, then integrating from $-A$ to $A$ gives us zero, so we are left with integrating the cosine only. What happens to the impulse response $h(n)$ as $A$ approaches $\pi$?
$$ \lim_{A\to\pi} \frac{\sin (An)}{\pi n} = \frac{\sin (\pi n)}{\pi n} $$
Now for all $n \neq 0$, $h(n) = 0$, because we are feeding a sine function multiples of $\pi$, and $h(0)=1$ by l'Hopital's rule. This means that as $A$ approaches $\pi$, the impulse response becomes the identity element of convolution:
$$ \lim_{A\to\pi} \frac{\sin (An)}{\pi n} = \delta(n) $$
Another important thing to note is that this signal cannot exist in reality. This is because signals that are absolutely summable have DTFTs that are continuous functions of $\omega$. That means
$$ \sum \limits_{n=-\infty}^{\infty}\abs{x(n)} \lt \infty $$
Since $\frac{1}{\pi} \frac{\sin (An)}{n}$ decays on the order of $\frac{1}{n}$, it is not absolutley summable. But, the absolute sum of its energy is finite.
$$ \sum \limits_{n=-\infty}^{\infty}\abs{x(n)}^2 \lt \infty $$
This is because $\sum\lr{\frac{1}{n}}^2$ converges. Functions that are not absolutely summable but are square summable (have finite energy) have DTFT's that are discontinuous. You cannot use the analysis equation, but you can use the synthesis equation.
\end{example}
\begin{example}
3) Find the DTFT of $x(n)=e^{i\omega_0 n}$.
\begin{align*}
\sum \limits_{n=-\infty}^{\infty}\abs{x(n)} \\
\sum \limits_{n=-\infty}^{\infty}\abs{e^{i\omega_0 n}} \\
\sum \limits_{n=-\infty}^{\infty}1 = \infty \\
\end{align*}
This is not absolutely summable or square summable. The discontinuity gets worse and $X(\omega)$ has deltas!
We know that this represents a dirac delta in the frequency domain centered at $\omega_0$. This gives us a frequency response of $A\delta(\omega -\omega_0)$ for some constant $A$.
% \includegraphics{images/pulsetrains/freqpulse.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(5,5)
\psline{->}(-1,0)(6,0)
\rput(6,-0.25){$\omega$}
\rput(6,1){$X(\omega)$}
\rput(6,0.5){$\cdots$}
\rput(-1,0.5){$\cdots$}
\rput(2.8,1){$(A)$}
\psline{->}(0,0)(0,1)
\rput(0,-0.25){$\omega_0-2\pi$}
\psline{->}(2.5,0)(2.5,1)
\rput(2.5,-0.25){$\omega_0$}
\psline{->}(5,0)(5,1)
\rput(5,-0.25){$\omega_0+2\pi$}
\end{pspicture}
\end{center}
So we can use the synthesis equation over an interval of $2\pi$:
\begin{align*}
x(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle} X(\omega) e^{i\omega n}d\omega \\
e^{i\omega_0 n} &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle} A \delta(\omega-\omega_0) e^{i\omega n}d\omega \\
e^{i\omega_0 n} &= \frac{A}{2\pi} \int_{-\pi}^{\pi} \delta(\omega-\omega_0) e^{i\omega n}d\omega \\
e^{i\omega_0 n} &= \frac{A}{2\pi} e^{i\omega_0 n} \\
A &= 2\pi \\
\end{align*}
This tells us that $X(\omega) = 2\pi\delta(\omega - \omega_0)$, but we know that $X(\omega + 2\pi k) = X(\omega) $ $ \forall k \in \Z$, so we must write the frequency response in the form:
$$ X(\omega) = \sum \limits_{k=-\infty}^{\infty} 2\pi \delta(\omega-\omega_0 +2\pi k) $$
If we restricted the domain, for example, $-\pi \leq \omega \lt \pi$, then $X(\omega) = 2\pi\delta(\omega-\omega_0)$.
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 5}
\subsection{DTFT of Complex Exponentials}
Recall the DTFT equations:
\begin{nicebox}
\begin{align*}
x(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle } X(\omega)e^{i\omega n}d\omega \quad &\mbox{(synthesis equation)} \\
X(\omega) &= \sum \limits_{n=-\infty}^{\infty}x(n)e^{-i\omega n} \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
\begin{example}
1) Find the DTFT for the signal $x(n) = 1 $ $ \forall n$
\begin{align*}
x(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle } X(\omega)e^{i\omega n}d\omega \\
1 &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle } X(\omega)e^{i\omega n}d\omega \\
e^{i\omega 0} &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle } X(\omega)e^{i\omega n}d\omega \\
e^{i\omega 0} &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle } A\delta(\omega)e^{i\omega n}d\omega \\
e^{i\omega 0} &= \frac{1}{2\pi} Ae^{i\omega 0} \\
1 &= \frac{1}{2\pi} A \\
A &= 2\pi \\
\end{align*}
Hence, $X(\omega) = 2\pi\delta(\omega)$ for $\abs{\omega} \lt \pi$, or $X(\omega) = 2\pi\sum\limits_{k\in\Z}\delta(w-2\pi k)$. Lets take this a little further, utilizing the analysis equation:
\begin{align*}
X(\omega) &= \sum \limits_{n=-\infty}^{\infty}x(n)e^{-i\omega n} \\
X(\omega) &= \sum \limits_{n=-\infty}^{\infty}e^{-i\omega n} \quad \mbox{let }x(n) = 1 \\
X(\omega) &= \sum \limits_{n=-\infty}^{\infty}e^{-i\omega n} = 2\pi\sum\limits_{k\in\Z}\delta(w-2\pi k) \\
\end{align*}
Hence we have the relation known as \emph{Poisson's Identity}:
$$
\sum \limits_{n\in\Z}e^{-i\omega n} = 2\pi\sum\limits_{k\in\Z}\delta(w-2\pi k) \\
$$
Notice that the left side is a continuous function where the right side is of discrete dirac deltas. Hence, the equality is not pointwise, but more that both representations treat other functions the same way when they come into contact. This relationship is not consistent with our intuition and therefore must be derived.
\end{example}
\begin{example}
2) Find the DTFT of the signal $x(n)=\sin(2\pi n/3)$.
\begin{align*}
x(n) &= \sin(2\pi n/3) \\
x(n) &= \frac{1}{2i}e^{i2\pi n/3} - \frac{1}{2i}e^{-i2\pi n/3} \\
\ftrans{x(n)} &= \ftrans{\frac{1}{2i}e^{i2\pi n/3}} - \ftrans{\frac{1}{2i}e^{-i2\pi n/3}} \\
\end{align*}
Now since the DTFT in linear, we can look at these individually. Consider an arbitrary complex exponential:
\begin{align*}f(n) &= \frac{1}{2\pi}\int_{\langle 2\pi\rangle }F(\omega)e^{i\omega n}d\omega \\
Re^{i\omega_0 n} &= \frac{1}{2\pi}\int_{\langle 2\pi\rangle }A\delta(\omega - \omega_0)e^{i\omega n}d\omega \\
Re^{i\omega_0 n} &= \frac{1}{2\pi}Ae^{i\omega_0 n} \\
R &= \frac{1}{2\pi}A \\
A &= 2\pi R \\
\end{align*}
This shows that $F(\omega) = 2\pi R \delta(\omega - \omega_0)$:
\begin{nicebox}
$$
Re^{i\omega} \ftp 2\pi R \delta(\omega - \omega_0)
$$
\end{nicebox}
So in our case we can use the derived formula to find the coefficients:
\begin{align*}
\ftrans{x(n)} &= \ftrans{\frac{1}{2i}e^{i2\pi n/3}} - \ftrans{\frac{1}{2i}e^{-i2\pi n/3}} \\
X(\omega) &= \frac{\pi}{i}\delta(\omega - 2\pi n/3) - \frac{\pi}{i}\delta(\omega + 2\pi n/3) \\
\end{align*}
Remember that we have to restrict our domain of $\omega$ to $\abs{\omega} \lt \pi$ for the equation to hold.
\end{example}
\subsection{Circular Convolution}
We know the following pairs:
\begin{align*}
x(n) &\ftp X(\omega) \\
y(n) &\ftp Y(\omega)
\end{align*}
What is the transform of $x(n)y(n)$? It may seem that multiplication in the time domain corresponds to convolution in the frequency domain, but there are issues with the $2\pi$ periodicity of the frequency response. So we do not use ordinary convolution.
\begin{align*}
\mbox{let } q(n) = x(n)y(n) \\
Q(\omega) &= \sum \limits_{n\in\Z}x(n)y(n)e^{-i\omega n} \\
Q(\omega) &= \sum \limits_{n\in\Z}\lr{\frac{1}{2\pi}\int_{\langle 2\pi\rangle}X(\lambda)e^{i\lambda n}d\lambda}y(n)e^{-i\omega n} \\
Q(\omega) &= \frac{1}{2\pi}\int_{\langle 2\pi\rangle} X(\lambda) \lr{\sum \limits_{n\in\Z}y(n)e^{i\lambda n}e^{-i\omega n}}d\lambda \\
Q(\omega) &= \frac{1}{2\pi}\int_{\langle 2\pi\rangle} X(\lambda) \lr{\sum \limits_{n\in\Z}y(n) e^{-i(\omega-\lambda) n}} d\lambda \\
Q(\omega) &= \frac{1}{2\pi}\int_{\langle 2\pi\rangle} X(\lambda) Y(\omega - \lambda) d\lambda \\
\end{align*}
Thus, the fourier transform pair is:
\begin{nicebox}
$$
x(n)y(n) \ftp \frac{1}{2\pi}\int_{\langle 2\pi\rangle} X(\lambda) Y(\omega - \lambda) d\lambda
$$
\end{nicebox}
So we can define this result as \emph{circular convolution}:
\begin{nicebox}
$$
(X \circconv Y)(\omega) = \int_{\langle 2\pi\rangle} X(\lambda) Y(\omega - \lambda) d\lambda
$$
\end{nicebox}
Do not confuse this with regular convolution:
\begin{nicebox}
$$
(X * Y)(\omega) = \int_{\R} X(\lambda) Y(\omega - \lambda) d\lambda
$$
\end{nicebox}
We use ordinary convolution with non-periodic functions and circular convolution with periodic functions. How can we solve a circular convolution?
\begin{enumerate}
\item Keep on function as it is.
\item Restrict the other function to one period
\item Perform a normal convolution
\item Divide by $2\pi$ (if necessary as in above example)
\end{enumerate}
\begin{example}
1) Consider the two following $2\pi$-periodic frequency responses:
% \img{images/boxes/circularconv/a/boxes.ps}
% \img{images/boxes/circularconv/b/boxes.ps}
How would we perform a circular convolution? Limit $Y(\lambda)$ to one period and then perform a single convolution.
% \img{images/boxes/circularconv/b/oneperiod/boxes.ps}
\end{example}
\begin{example}
2) Lets look at an example of DT amplitude modulation. This system takes in $x(n)$ and multiplies it by $y(t)$:
% \img{images/modulators/dtampmod/am.ps}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
% in from x
\rput(-3.2,0){$x(n)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(-2.7,0)(-0.25,0)
% out to y
\rput(4.25,0){$q(n)=x(n)y(n)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0.25,0)(2.7,0)
% up arrow
\rput(0,-2){$y(n)=e^{i\omega_0 n}$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0,-1.65)(0,-0.25)
% multiplier
\pscircle(0,0){0.25}
\psline(-0.175,0.175)(0.175,-0.175)
\psline(0.175,0.175)(-0.175,-0.175)
% box
\pspolygon(-1.5,-2.65)(-1.5,1)(1.5,1)(1.5,-2.65)
\end{pspicture}
\end{center}
$y(n) = e^{i\omega_0 n}$, and $x(n)$ is defined by its frequency response, $X(\lambda)$, which is shown below:
% \img{images/boxes/circularconv/a/simple/boxes.ps}
$Y(\lambda)$ can be shown to be an infinite impulse train, such that when limited for our circular convolution method, we have a single dirac delta:
% \img{images/pulsetrains/diracs/freq/yomega.ps}
% \img{images/pulsetrains/diracs/freq/limited.ps}
Now we perform regular convolution. Any time we convolve a signal with a single dirac at a particular frequency, we simple shift the signal such that it is centered at that frequency:
% \img{images/boxes/circularconv/a/convolved/boxes.ps}
This is equal to $\frac{1}{2\pi}\int_{\langle 2\pi\rangle}x(\lambda)Y(\omega - \lambda)d\lambda = \lr{X \circconv Y }\lr{\omega}$.
\end{example}
\subsection{CTFT}
We define the continuous-time fourier transform pair as:
\begin{nicebox}
\begin{align*}
x(t) &= \frac{1}{2\pi}\int_{-\infty}^{\infty}X(\omega)e^{i\omega t}d\omega \quad &\mbox{(synthesis equation)} \\
X(\omega) &= \int_{-\infty}^{\infty}x(t)e^{-i\omega t}dt \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
Neither $x(t)$ or $X(\omega)$ has to be periodic.
\begin{example}
1) If $x(t) = \begin{cases} B & t<\abs{A} \\ 0 & \mbox{otherwise} \\ \end{cases}$, what is $\ftrans{x(t)}$?
\begin{align*}
X(\omega) &= \int_{-\infty}^{\infty} x(t) e^{-i\omega t}dt \\
X(\omega) &= B\int_{-A}^{A} e^{-i\omega t}dt \\
X(\omega) &= B\left. \frac{e^{-i\omega t}}{-i\omega} \right|_{-A}^{A} \\
X(\omega) &= B\frac{e^{i\omega A} - e^{-i\omega A}}{i\omega} \\
X(\omega) &= \frac{2B\sin(\omega A)}{\omega} \\
\end{align*}
Note that $\lim_{\omega \to 0} \frac{2B\sin(\omega A)}{\omega} = \lim_{\omega \to 0} \frac{2AB\cos(\omega A)}{1} = 2AB$ by L'Hopital's Rule.
%\img{images/sinc/timedomain/new.ps}
Also note that $\lim_{A \to \infty} X(\omega) = \delta(\omega)$.
\end{example}
\begin{example}
2) If $x$ is $p$-periodic, then
\begin{align*}
x(t) &= \sum_{k\in\Z}X_ke^{ik\omega_0 t} \\
e^{i\omega_0 t} &\ftp 2\pi \delta(\omega - \omega_0)
\end{align*}
We can find $X(\omega)$:
\begin{align*}
X(\omega) &= \int_{-\infty}^{\infty} x(t) e^{-i\omega t}dt \\
X(\omega) &= \int_{-\infty}^{\infty} \lr{\sum_{k\in\Z}X_ke^{ik\omega_0 t}} e^{-i\omega t}dt \\
X(\omega) &= \sum_{k\in\Z} X_k \int_{-\infty}^{\infty} e^{ik\omega_0 t} e^{-i\omega t}dt \\
X(\omega) &= \sum_{k\in\Z} X_k \ftrans{ e^{ik\omega_0 t} } \\
X(\omega) &= \sum_{k\in\Z} X_k 2\pi\delta(\omega - k\omega_0) \\
\end{align*}
This tells us that we have an amount $X_k$ of frequency $k\omega_0$:
$$
X(\omega) = \sum_{k\in\Z} X_k 2\pi\delta(\omega - k\omega_0)
$$
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 6}
\subsection{Useful Concepts for AM}
There are three things that should be understood before going into amplitude modulation:
1) A complex exponential in time domain corresponds to a shifted and scaled dirac delta in the frequency domain:
\begin{nicebox}
$$
e^{i\omega_0} \ftp 2\pi \delta (\omega -\omega_0)
$$
\end{nicebox}
There exists a frequency $\omega_0$ in the signal such that all of the signals energy is at the one frequency:
% \img{images/dirac/singles/2pi.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-2,-1)(3,2)
\psline{->}(-2,0)(2,0)
\rput(2,-0.25){$\omega$}
\rput(2,1){$X(\omega)$}
\psline{->}(0,0)(0,1)
\rput(0.3,1){$(2\pi)$}
\rput(0,-0.25){$\omega_0$}
\end{pspicture}
\end{center}
2) Using the above formula we can determine the fourier transform pair for a cosine function in the time domain:
\begin{align*}
\cos(\omega_0 t) = \frac{1}{2}\lr{e^{i\omega_0} + e^{-i\omega_0}} &\ftp \frac{1}{2} \lr{ 2\pi\delta (\omega - \omega_0) + 2\pi \delta(\omega + \omega_0) }
\end{align*}
\begin{nicebox}
$$
\cos(\omega_0 t) \ftp \pi\delta(\omega + \omega_0) + \pi\delta(\omega - \omega_0)
$$
\end{nicebox}
% \img{images/dirac/singles/doublefreq.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-2,-1)(3,2)
\psline{->}(-2,0)(2,0)
\rput(2,-0.25){$\omega$}
\rput(2,1){$X(\omega)$}
\psline{->}(-1,0)(-1,1)
\rput(-1.3,1){$(\pi)$}
\rput(-1,-0.25){$-\omega_0$}
\psline{->}(1,0)(1,1)
\rput(1.3,1){$(\pi)$}
\rput(1,-0.25){$\omega_0$}
\end{pspicture}
\end{center}
3) Multiplication in the time domain corresponds to convolution in the frequency domain.
We have shown that convolution in the time domain is multiplication in the frequency domain:
$$(f*g)(t) \ftp F(\omega)G(\omega)$$
Consider the Modulation Property of the Continuous-Time Fourier Transform:
\begin{nicebox}
\begin{align*}
f(t)g(t) \ftp \frac{1}{2\pi} (F*G)(\omega)
\end{align*}
\end{nicebox}
\begin{proof}
\begin{align*}
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}(F*G)(\omega)e^{i\omega t}d\omega \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}F(W)G(\omega-W) dW e^{i\omega t}d\omega \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)\int_{-\infty}^{\infty}G(\omega-W) e^{i\omega t} d\omega dW \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)\int_{-\infty}^{\infty}G(\Omega) e^{i(\Omega+W) t} d\Omega dW \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)\lr{\int_{-\infty}^{\infty}G(\Omega) e^{i\Omega t} d\Omega }e^{iWt}dW \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)e^{iWt}dW \lr{\int_{-\infty}^{\infty}G(\Omega) e^{i\Omega t} d\Omega}\\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\lr{2\pi f(t)}\lr{2\pi g(t)} \\
\iftrans{(F*G)(\omega)} &= 2\pi f(t)g(t) \\
\iftrans{\frac{1}{2\pi}(F*G)(\omega)} &= f(t)g(t) \\
\end{align*}
Which shows that multiplication in the time domain is the product of a scalar and the convolution in the frequency domain.
\begin{nicebox}
$$
f(t)g(t) \ftp \frac{1}{2\pi} (F * G) (\omega)
$$
\end{nicebox}
\end{proof}
\subsection{Amplitude Modulation - Sending}
Consider a voice signal that we need to transfer over a long distance, where its frequency spectrum lives within the frequency band $(-A,A)$.
% \img{images/triangles/triangle.ps}
\begin{center}
\begin{pspicture}(-3.2,-3.2)(6,6)
% axis
\psline{->}(-3,0)(3,0)
\psline(-2,0)(0,1)
\psline(0,1)(2,0)
\rput(-2,-0.25){$-A$}
\rput(0,-0.25){0}
\rput(2,-0.25){$A$}
\rput(3.25,-0.25){$\omega$}
\rput(-2,1){$X(\omega)$}
\rput(1.12,1){$(1)$}
\end{pspicture}
\end{center}
This in reality is not possible to send. Generally an antenna must be approximately a fourth of the size of a cycle of the wave it transmits. ANy antenna shorter tha $\lambda/4$ is inefficient. The speed of light can be given in terms of a frequency $f$ in Hertz multiplied by the corresponding wavelength $\lambda$:
$$c=f\lambda = 2.9979 \times 10^8 m/s $$
Lets say the spectrum of ones voice fell along a range from $-3.3kHz$ to $3.3kHz$, then we can approximate the length of the antenna needed by first determining the wavelength:
\begin{align*}
\lambda &= c/f \\
\lambda &\approx 98 km\\
\end{align*}
This tells us we would need about $25km$ of antenna to transmit this persons voice. This is why we use amplitude modulation---to transmit a signal at higher frequencies.
Lets consider an amplitude modulator with a sinusoidal carrier given by
%\img{images/modulators/amcos.ps}
The reason this is called amplitude modulation is because given an input signal $x(t)$, that signal multiplied with a $\cos(\omega_0 t)$ will give us a new signal $y(t)$, which is a cosine wave oscillating between $\pm x(t)$, where $x(t)$ serves as an envelope. \\ \\
Consider an arbitrary input signal, the info-bearing modulator signal:
% \img{images/modulators/ampmod/signal.ps}
Here we have an oscillator signal, the carrier signal whose amplitude will be modulated:
%\img{images/modulators/ampmod/cos.ps}
Here is the final result of the multiplication of the two above signals, where the amplitude of the carrier signal is modulated by the info-bearing signal:
% \img{images/modulators/ampmod/amp.ps}
The info-bearing input signal has a lower frequency and thus acts as an envelope for the carrier signal. \\ \\
$x(t)$ is the \emph{information-bearing} signal (a.k.a. the modulating signal), and $h(t) = \cos(\omega_0 t)$ is the \emph{carrier} signal (a.k.a. the modulated signal). The output signal $y(t)$ is the product of these two signals, $x(t)\cos(\omega_0 t)$. We can begin solving for the spectrum of $y$ by first finding the spectrum of $h$.
\begin{align*}
h(t) &= \cos(\omega_0 t) \\
h(t) &= \frac{1}{2}e^{i\omega_0 t} + \frac{1}{2}e^{-i\omega_0 t} \\
\ftrans{h(t)} &= \frac{1}{2}\ftrans{e^{i\omega_0 t}} + \frac{1}{2}\ftrans{e^{-i\omega_0 t}} \\
H(\omega) &= \frac{1}{2}2\pi\delta(\omega-\omega_0) + \frac{1}{2}2\pi\delta(\omega+\omega_0) \\
H(\omega) &= \pi\delta(\omega-\omega_0) + \pi\delta(\omega+\omega_0) \\
\end{align*}
$H(\omega)$ is two dirac deltas symmetrically shifted each by $\omega_0$,
% \img{images/dirac/doublefreq.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(5,5)
\psline{->}(-2,0)(2,0)
\rput(2,-0.25){$\omega$}
\rput(2,1){$H(\omega)$}
\psline{->}(-1,0)(-1,1)
\rput(-1.3,1){$(\pi)$}
\rput(-1,-0.25){$-\omega_0$}
\psline{->}(1,0)(1,1)
\rput(1.3,1){$(\pi)$}
\rput(1,-0.25){$\omega_0$}
\end{pspicture}
\end{center}
which gives us an output signal of shifted versions of the input signal with half of the original height:
% \img{images/triangles/double.ps}
\begin{center}
\begin{pspicture}(-3.2,-3.2)(6,6)
% axis
\psline{->}(-5,0)(5,0)
\psline(-3,0)(-2,1)
\psline(-2,1)(-1,0)
\psline(1,0)(2,1)
\psline(2,1)(3,0)
\rput(-3,-0.25){$-\omega_0-A$}
\rput(-2,-1.25){$-\omega_0$}
\rput(-1,-0.25){$-\omega_0+A$}
\rput(3,-0.25){$\omega_0+A$}
\rput(2,-1.25){$\omega_0$}
\rput(1,-0.25){$\omega_0-A$}
\psline[linestyle=dashed](-2,-1)(-2,1)
\psline[linestyle=dashed](2,-1)(2,1)
\rput(5.25,-0.25){$\omega$}
\rput(-3.5,1){$Y(\omega)$}
\rput(2.6,1){$(1/2)$}
\rput(-1.4,1){$(1/2)$}
\end{pspicture}
\end{center}
where we must require that $-\omega_0+A \lt \omega_0-A$, so that $A\lt\omega_0$. This saves us a few complications when $\omega_0 \gt\gt A$.
\subsection{Amplitude Modulation - Receiving}
What do we do at the receiver? How can we demodulate this signal? Once way would be to pass it through a amplitude modulator using a complex exponential $e^{-i\omega_0 t}$, and then we would get the signal shifted over to the left. We can then pass it through a low-pass filter with a pass band amplification of 2 to get the original height back. But, what if we had an oscillator using a cosine function at the receiving end instead of a complex exponential?
The following scheme is called \emph{synchronous demodulation}, and is when we have an identical cosine at the receiving end. Looking at what happened upon transmission, the signal was shifted $\pm \omega_0$ and the amplitude was cut in half. Applying this again, we would get a signal $q(t) = y(t)\cos(\omega_0 t)$, where we shift the shifted signal $\pm \omega_0$. This gives us three disjoint triangular components in the frequency spectrum. The first shift to the left $\omega_0$ gives us two triangles with a height of $1/4$ centered at $-2\omega_0$ and $0$. The shift to the right gives us two triangles with a height of $1/4$ centered at $2\omega_0$ and $0$. The two triangles centered at 0 add to form the original triangle halfed. So we can now put this signal $q(t)$ through a low-pass filter and amplify the signal by 2 to retrieve the original signal.
% \img{images/triangles/three.ps}
\begin{center}
\begin{pspicture}(-5,-3.2)(6,6)
% axis
\psline{->}(-5,0)(5,0)
\psline(-4,0)(-3,0.5)
\psline(-3,0.5)(-2,0)
\psline(2,0)(3,0.5)
\psline(3,0.5)(4,0)
\psline(-1,0)(0,1)
\psline(0,1)(1,0)
\rput(-3,-0.25){$-2\omega_0$}
\rput(0,-0.25){$0$}
\rput(3,-0.25){$2\omega_0$}
\rput(5.25,-0.25){$\omega$}
\rput(-3.5,1){$Q(\omega)$}
\rput(0.5,1){$(1/2)$}
\rput(3.7,0.5){$(1/4)$}
\rput(-2.3,0.5){$(1/4)$}
\end{pspicture}
\end{center}
At the receiving end the local oscillator has potentially a phase difference and frequency difference relative to the transmitter. This demodulation scheme assumes the same frequency and phase at the sender and receiver. What happens if at the receiver we have $q(t) = y(t)\cos((\omega_0+\Delta\omega)t + \theta)$?
% \img{images/modulators/receiver/amcos.ps}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
% in from x
\rput(-3.1,0.3){$x(t)\cos(\omega_0t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(-2.7,0)(-0.25,0)
% out to y
\rput(4.2,0.3){$x(t)\cos(\omega_0t)\cos((\omega_0+\Delta\omega) t+\theta)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0.25,0)(2.7,0)
% up arrow
\rput(0,-2){$\cos((\omega_0+\Delta\omega) t+\theta)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0,-1.65)(0,-0.25)
% multiplier
\pscircle(0,0){0.25}
\psline(-0.175,0.175)(0.175,-0.175)
\psline(0.175,0.175)(-0.175,-0.175)
% box
\pspolygon(-1.6,-2.65)(-1.6,1)(1.6,1)(1.6,-2.65)
\end{pspicture}
\end{center}
\begin{align*}
y(t) &= x(t)\cos(\omega_0 t) \\
r(t) &= y(t)\cos((\omega_0+\Delta\omega) t + \theta) \\
r(t) &= x(t)\cos(\omega_0 t)\cos((\omega_0+\Delta\omega) t + \theta) \\
\end{align*}
We can use the trigonometric identity:
$$ \cos\alpha \cos\beta = \frac{1}{2}\lr{\cos\lr{\alpha+\beta}+\cos\lr{\alpha-\beta}} $$
\begin{align*}
r(t) &= x(t)\cos(\omega_0 t)\cos((\omega_0+\Delta\omega) t + \theta) \\
r(t) &= \frac{x(t)}{2} \lr{\cos((2\omega_0 + \Delta\omega)t + \theta) + \cos(\Delta \omega t + \theta)} \\
\end{align*}
After we pass this signal through a low pass filter, we get rid of the first term containing the frequency $\omega_0+\Delta\omega)$. So if $g(t)$ denotes our signal after the low pass filter, then $g(t) = x(t)\cos(\Delta\omega t + \theta)$. We hope this is equal to $x(t)$.
Lets consider the different cases:
1) no frequency jitter ($\Delta\omega = 0$), such that $g(t) = x(t)\cos(\theta)$
If $\theta \approx \frac{\pi}{2}$, you get an extremely weak baseband. When $\theta = 0$, you have a synchronous demodulation scheme.
2) the phase is zero, but there is frequency jitter, then $g(t) = x(t)\cos(\Delta\omega t)$.
This is amplitude modulation with a very slow carrier signal, where we will have overlap in the frequency domain, causing echos and other problems.
\subsection{Quadrature Multiplexing}
The previous method of transmission was known as \emph{Coherent Detection}, or \emph{Coherent Demodulation}, or \emph{Synchronous Detection/Demodulation}. The transmitter and receiver may be even hundreds of miles and in the case of short wave radio, thousands of miles away. What you transmit is not what you receive. The atmosphere degrades the signal in various ways---all kinds of interference, climate issues can affect the integrity of the signal. This major assumption that we made in the last discussion in regards to transmitting signals is that we send $y(t)$ and receive $y(t)$. In particular, in the city, the signal you transmit gets reflected off buildings. At the receiver you not only get the original signal but echoes of that signal with attenuated amplitudes. This level of generalization by allowing frequency jitter and the phase difference is just a small level of generalization. There are all kinds of things that can go wrong. Can we use a better method? Lets look at \emph{Quadrature Multiplexing}, otherwise known as \emph{Quadrature Amplitude Modulation}:
%\img{images/modulators/quadmult/quad.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(6,4)
%text
\rput(0.8,3){$x_1(t)$}
%text
\rput(0.8,1.5){$x_2(t)$}
%text
\rput(5.5,2.25){$y(t)$}
%text
\rput(2.75,0.5){$sin(\omega_0t)$}
%text
\rput(2.75,2){$cos(\omega_0t)$}
% wire with arrow
\psline{->}(1.2,3)(2.5,3)
% wire with arrow
\psline{->}(1.2,1.5)(2.5,1.5)
% wire with arrow
\psline{}(3,3)(4.5,3)
% wire with arrow
\psline{}(3,1.5)(4.5,1.5)
% wire with arrow
\psline{->}(4.5,3)(4.5,2.5)
% wire with arrow
\psline{->}(4.5,1.5)(4.5,2)
% wire with arrow
\psline{->}(4.75,2.25)(5.25,2.25)
% wire with arrow
\psline{->}(2.75,2.25)(2.75,2.75)
% wire with arrow
\psline{->}(2.75,0.75)(2.75,1.25)
% multiplier
\pscircle(2.75,3){0.25}
\psline(2.575,3.175)(2.925,2.825)
\psline(2.925,3.175)(2.575,2.825)
% multiplier
\pscircle(2.75,1.5){0.25}
\psline(2.575,1.675)(2.925,1.325)
\psline(2.925,1.675)(2.575,1.325)
% adder
\pscircle(4.5,2.25){0.25}
\psline(4.25,2.25)(4.75,2.25)
\psline(4.5,2.5)(4.5,2)
\end{pspicture}
\end{center}
This is used to transmit color signals in the standard television system before HDTV. It works in the following way: The system transmits two signals, carrying them on to the same frequency and at the receiver separates the signals:
% \img{images/modulators/quadmult/rec.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(6,4)
%frame
\rput(4.5,3){$\mbox{LPF}$}
\psframe(5,2.75)(4,3.25)
%frame
\rput(4.5,1.5){$\mbox{LPF}$}
\psframe(5,1.25)(4,1.75)
%text
\rput(6,3){$g_1(t)$}
%text
\rput(6,1.5){$g_2(t)$}
%text
\rput(0.8,2.25){$y(t)$}
%text
\rput(2.75,0.5){$sin(\omega_0t)$}
%text
\rput(2.75,2){$cos(\omega_0t)$}
% wire with arrow
\psline{->}(1.2,2.25)(1.5,2.25)
% wire with arrow
\psline{->}(1.5,3)(2.5,3)
% wire with arrow
\psline{->}(1.5,1.5)(2.5,1.5)
% wire with arrow
\psline{->}(5,3)(5.5,3)
% wire with arrow
\psline{->}(5,1.5)(5.5,1.5)
% wire with arrow
\psline{}(3,3)(4,3)
% wire with arrow
\psline{}(3,1.5)(4,1.5)
% wire with arrow
\psline{}(1.5,3)(1.5,1.5)
% wire with arrow
\psline{->}(2.75,2.25)(2.75,2.75)
% wire with arrow
\psline{->}(2.75,0.75)(2.75,1.25)
% multiplier
\pscircle(2.75,3){0.25}
\psline(2.575,3.175)(2.925,2.825)
\psline(2.925,3.175)(2.575,2.825)
% multiplier
\pscircle(2.75,1.5){0.25}
\psline(2.575,1.675)(2.925,1.325)
\psline(2.925,1.675)(2.575,1.325)
\end{pspicture}
\end{center}
Hopefully $g_1(t),g_2(t)$ are equal to $x_1(t),x_2(t)$ respectively. Lets do the math for a received signal $y(t) = x_1(t)\cos(\omega_0t) + x_2(t)\sin(\omega_0t)$, by passing it through the cosine portion of the receiving end system:
\begin{align*}
y(t) &= x_1(t)\cos(\omega_0t) + x_2(t)\sin(\omega_0t) \\
y(t)\cos(\omega_0t) &= x_1(t)\cos^2(\omega_0t) + x_2(t)\sin(\omega_0t)\cos(\omega_0t) \\
r_1(t) &= x_1(t)\lr{\frac{1+\cos(2\omega_0t)}{2}} + x_2(t)\frac{1}{2}\sin(2\omega_0t) \\
r_1(t) &= \frac{x_1(t)}{2} + \frac{x_1(t)}{2}\cos(2\omega_0t) + \frac{x_2(t)}{2}\sin(2\omega_0t) \\
\end{align*}
Once we pass $r_1(t)$ through a low-pass filter with a gain of 2 to get $g_1(t)$, we can recover the signal $x_1(t)$ because all terms with frequency $2\omega_0$ will be cut out.
\subsection{Large Carrier AM}
Recall we can take a signal $x(t)$:
% \img{images/modulators/envelope/justx.ps}
and use amplitude modulation to send the signal across a network:
% \img{images/modulators/envelope/combined.ps}
If you recall, we made assumptions that the transmitter and receiver were in sync. If we want to bypass all of that oscillator synchronization issue at the receiver, we can hookup a receiver that somehow tracks this envelope at the receiver. Consider the following circuit:
% \img{images/modulators/envelope/circuit/combo.ps}
What happens is $y(t)$ comes in, and we have capacitor and a resister and something proportional to $x(t)$ shows up at the other end, what does this do? A diode is device where if the voltage after the diode is greater than the voltage before it, the diode acts like a short circuit, cutting off all voltage. The moment the voltage preceeding the diode is greater, this diode opens up, thus charging the capacitor. This allows us to track the envelope of the incoming signal.
% \img{images/modulators/envelope/track/combined.ps}
If the incoming signal $y(t)$ is near one of those peaks, it is positive. This is in the positive part of the signals cycles, and the diode shorts. When it shorts, the capacitor starts charging up until the capacitor voltage exceeds the modulated signal. So as the signal is coming on its way down, the capacitor has been charging up. At some point they become equal and the diode opens. So initially, the capacitor starts to track the peak and as the voltage drops its voltage drops. But at some point, the diode opens and nothing gets transmitted until the next cycle.
Note that this assumes that $x(t) \gt 0$ $\forall t$, so if the signal is ever negative, this method breaks down. We need to shift the signal up. This brings us to \emph{large carrier amplitude modulation}. Basically you also send a copy of the carrier signal with the modulated signal. If $\abs{x(t)} \leq k \in \R_{\oplus}$, then modulate the carrier with $A + x(t)$, where $A \gt 0$. Then we can utilize the envelope tracking and the receiver must shift the signal down after the tracking. This system works as follows:
% \img{images/modulators/largecarrier/largec.ps}
\begin{center}
\begin{pspicture}(1,1)(6.5,4.5)
%text
\rput(1.3,3){$x(t)$}
%text
\rput(5.75,3){$y(t)$}
\rput(3.75,4){$y(t) = \left( A + x(t) \right) \cos(\omega_0t)$}
%text
\rput(2.75,2){$cos(\omega_0t)$}
\pspolygon(4.5,1.8)(5,1.8)(4.75,2.25)
\rput(4.75,2){$A$}
% wire with arrow
\psline{->}(1.5,3)(2.5,3)
% wire with arrow
\psline{->}(3,3)(4.5,3)
% wire with arrow
\psline{->}(5,3)(5.5,3)
% wire with arrow
\psline{->}(2.75,2.25)(2.75,2.75)
% wire with arrow
\psline{->}(3.25,2)(4.5,2)
% wire with arrow
\psline{->}(4.75,2.25)(4.75,2.75)
% multiplier
\pscircle(2.75,3){0.25}
\psline(2.575,3.175)(2.925,2.825)
\psline(2.925,3.175)(2.575,2.825)
% adder
\pscircle(4.75,3){0.25}
\psline(4.5,3)(5,3)
\psline(4.75,3.25)(4.75,2.75)
\end{pspicture}
\end{center}
Given a generic signal with bandwidth of $2A$, lets determine what $Y(\omega)$ looks like.
% \img{images/modulators/largecarrier/freqdomain/double.ps}
\begin{center}
\begin{pspicture}(-4,-1)(4,3)
% axis
\psline{->}(-5,0)(5,0)
\psline(-3,0)(-2,1)
\psline(-2,1)(-1,0)
\psline(1,0)(2,1)
\psline(2,1)(3,0)
\rput(-2, 2.3){$(A\pi)$}
\rput(2, 2.3){$(A\pi)$}
\psline{->}(-2,0)(-2,2)
\psline{->}(2,0)(2,2)
\rput(-3,-0.35){$-\omega_0-A$}
\rput(-2,-0.25){$-\omega_0$}
\rput(0,-0.35){$0$}
\rput(-1,-0.35){$-\omega_0+A$}
\rput(3,-0.35){$\omega_0+A$}
\rput(2,-0.25){$\omega_0$}
\rput(1,-0.35){$\omega_0-A$}
\psline(-3,-0.1)(-3,0.1)
\psline(-2,-0.1)(-2,0.1)
\psline(-1,-0.1)(-1,0.1)
\psline(0,-0.1)(0,0.1)
\psline(1,-0.1)(1,0.1)
\psline(2,-0.1)(2,0.1)
\psline(3,-0.1)(3,0.1)
\rput(5.25,-0.25){$\omega$}
\rput(-3.5,2){$Y(\omega)$}
\rput(2.6,1){$(1/2)$}
\rput(-1.4,1){$(1/2)$}
\end{pspicture}
\end{center}
\subsection{Frequency Division Multiplexing}
The FCC controls something on the order of 10 $kHz$ and 2.75 $GHz$. The frequency band is in the control of the FCC like public land. The FCC sells frequency ranges. Radio stations and cable companies pay for that, and in the end, that frequency spectrum belongs to you and me.
\emph{Frequency Division Multiplexing} enables simultaneous transmission of multiple signals.
% \img{images/modulators/freqdiv/freq.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(0,-3)(5.5,4)
%text
\rput(0.8,3){$x_1(t)$}
%text
\rput(0.8,1.5){$x_2(t)$}
%text
\rput(0.8,-1){$x_n(t)$}
%text
\rput(0.8,0){$\vdots$}
%text
\rput(2.75,0){$\vdots$}
%text
\rput(5.5,2.25){$y(t)$}
%text
\rput(2.75,-2){$cos(\omega_nt)$}
%text
\rput(2.75,0.5){$cos(\omega_2t)$}
%text
\rput(2.75,2){$cos(\omega_1t)$}
% wire with arrow
\psline{->}(1.2,3)(2.5,3)
% wire with arrow
\psline{->}(1.2,1.5)(2.5,1.5)
% wire with arrow
\psline{->}(1.2,-1)(2.5,-1)
% wire with arrow
\psline{}(3,3)(4.5,3)
% wire with arrow
\psline{}(3,1.5)(4.5,1.5)
% wire with arrow
\psline{}(3,-1)(4.5,-1)
% wire with arrow
\psline{->}(4.5,3)(4.5,2.5)
% wire with arrow
\psline{->}(4.5,-1)(4.5,2)
% wire with arrow
\psline{->}(4.75,2.25)(5.25,2.25)
% wire with arrow
\psline{->}(2.75,2.25)(2.75,2.75)
% wire with arrow
\psline{->}(2.75,0.75)(2.75,1.25)
% wire with arrow
\psline{->}(2.75,-1.75)(2.75,-1.25)
% multiplier
\pscircle(2.75,3){0.25}
\psline(2.575,3.175)(2.925,2.825)
\psline(2.925,3.175)(2.575,2.825)
% multiplier
\pscircle(2.75,1.5){0.25}
\psline(2.575,1.675)(2.925,1.325)
\psline(2.925,1.675)(2.575,1.325)
% multiplier
\pscircle(2.75,-1){0.25}
\psline(2.575,-0.825)(2.925,-1.175)
\psline(2.925,-0.825)(2.575,-1.175)
% adder
\pscircle(4.5,2.25){0.25}
\psline(4.25,2.25)(4.75,2.25)
\psline(4.5,2.5)(4.5,2)
\end{pspicture}
\end{center}
We can multiply each incoming signal by a mutually orthogonal functions, in this case we use the cosine function with frequencies $\omega_1, \omega_2, \dots, \omega_n$. So, you have a frequency spectrum, you split it into different ranges and distribute different frequency bands to different stations. Let's say we are talking about three radio stations. So, you have multiple signals awaiting transmission:
% \img{images/modulators/freqdiv/stations/justd.ps}
% \img{images/modulators/freqdiv/stations/justamadou.ps}
% \img{images/modulators/freqdiv/stations/justbabak.ps}
What happens when we use frequency division multiplexing? We get the following spectra for $Y(\omega)$:
% \img{images/modulators/freqdiv/stations/all.ps}
How would we get a particular station? Lets suppose we want to get the station at $\omega_2$. A crude solution would be to use a band pass filter. At the receiver, lets say you have a knob, and by turning that knob you determine which frequency you are interested in, which station. So, with that knob you would have to control, not only this frequency, but also this band-pass filter. For practical reasons building a band pass filter with that high frequency cutoff when these frequency bands are so close together, it's like city of Berkeley, there is no empty land. Lots of homes are small, and they are all tightly connected to each other---there is not a whole lot of wiggle room. This is an ideal band pass filter. Things have to roll out. The higher you go on frequency band the more difficult it is for circuit designers to build band pass fillers.
There has got to be another solution. Your knob will control the local oscillator, and maps whatever part of the spectrum you are interested in to one intermediate frequency. Then you have only one band pass filter for which you can design a filter and pass that through. So, regardless of where you are in the spectrum, by tuning the knob you mapped and grab this part and map it to that intermediate frequency. Grab the region around that intermediate frequency, and you have got a built-in band pass filter already. Then you demodulate. When the knob is tuned to a particular frequency, the circuit shifts it to a fixed intermediate frequency that is well within the range of our filter---not necessarily baseband but maps it to something close to zero.
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 7}
\subsection{Sampling}
\subsubsection{Sampling using a pulse train}
The sampling theorem is what relates DT to CT. Consider a system where we multply the input signal $x(t)$ by a pulse train $q(t)$.
%\img{images/sampling/mult/justboxes.ps}
%\img{images/sampling/mult/justfunction.ps}
When we multiply the signals, we get the original signal scaled by the pulse train. The figure below shows the case where $A=1$:
%\img{images/sampling/mult/both.ps}
In essence we are tossing out values from the signal. That is the critical thing that happens when you sample a signal. How much can we afford to throw out and still recover a signal? This is what the sampling theroem tells us. Lets start by looking at the frequency spectrum of this signal when we have a bandlimited input signal given by the following spectra:
%\img{images/triangles/voice/triangle.ps}
We know that multiplication in the time domain corresponds to convolution in the frequency domain:
$$
X_q(\omega) = \frac{1}{2\pi} (X * Q)(\omega)
$$
What is $Q(\omega)$? Why not just plug in $q(t)$ into the CTFT?
\begin{align*}
Q(\omega) &= \int_{-\infty}^{\infty} q(t) e^{-i\omega t}dt \\
\end{align*}
No periodic signal can be plugged in to the analysis equation to get a fourier transform---it doesn't have finite energy and it is not absolutely integrable. So, we have got to go through a round about way. For periodic signals we talk about energy in a period. We have to use something else to figure out what the transform is. In general, no periodic signal can directly go into the CTFT.
To bypass this, we can :
\begin{enumerate}
\item Express $q(t)$ in terms of its fourier series coefficients.
\begin{align*}
q(t) &= \sum \limits_{k\in\Z} Q_k e^{ik\omega_s t} \quad \mbox{where } \omega_s = 2\pi/T \\
\end{align*}
\item Then use the CTFT on these terms.
\begin{align*}
Q(\omega) &= 2\pi \sum \limits_{k\in\Z} Q_k \delta(\omega - k\omega_s) \\
\end{align*}
\end{enumerate}
Lets derive the CTFT for this periodic signal:
We first equate the CFS synthesis with the CTFT synthesis equations:
\begin{align*}
q(t) &= \sum \limits_{k \in \Z} Q_k e^{ik\omega_s t} = \frac{1}{2\pi} \int_{\R} Q(\omega) e^{i\omega t} d\omega \\
\end{align*}
Then we can solve:
\begin{align*}
\sum \limits_{k \in \Z} Q_k e^{ik\omega_s t} &= \frac{1}{2\pi} \int_{\R} Q(\omega) e^{i\omega t} d\omega \\
\sum \limits_{k \in \Z} Q_k e^{ik\omega_s t} &= \frac{1}{2\pi} \int_{\R} \sum \limits_{k\in \Z} A_k \delta(\omega - k\omega_s) e^{i\omega t} d\omega \\
\sum \limits_{k \in \Z} Q_k e^{ik\omega_s t} &= \sum \limits_{k\in \Z} \frac{A_k}{2\pi} \int_{\R} \delta(\omega - k\omega_s) e^{i\omega t} d\omega \\
\sum \limits_{k \in \Z} Q_k e^{ik\omega_s t} &= \sum \limits_{k\in \Z} \frac{A_k}{2\pi} e^{ik\omega_s t} \\
\end{align*}
Now we can equate the coefficients:
\begin{align*}
\sum \limits_{k \in \Z} Q_k e^{ik\omega_s t} &= \sum \limits_{k\in \Z} \frac{A_k}{2\pi} e^{ik\omega_s t} \\
Q_k &= \frac{A_k}{2\pi} \\
2\pi Q_k &= A_k \\
\end{align*}
This tells us what the fourier transform is:
\begin{align*}
Q(\omega) &= 2\pi \sum \limits_{k\in \Z} Q_k \delta(\omega - k\omega_s) \\
\end{align*}
Now lets find the fourier series coefficients:
\begin{align*}
Q_k &= \frac{1}{T} \int_{\langle T \rangle} q(t) e^{-ik\omega_st}dt \\
Q_k &= \frac{A}{T} \int_{-\Delta/2}^{\Delta/2} e^{-ik\omega_st}dt \\
Q_k &= \frac{A}{T} \left. \frac{e^{-ik\omega_st}}{-ik\omega_s} \right|^{\Delta/2}_{-\Delta/2} \\
Q_k &= \frac{A}{T} \frac{2\sin( k \omega_s \Delta/2)}{k\omega_s} \\
Q_k &= \frac{A\sin( k \omega_s \Delta/2)}{k\pi} \\
\end{align*}
% image of Q_k as kronecker deltas
%\img{images/sampling/samples/qk/graph.ps}
Now that we know $Q_k$, we can determine $Q(\omega)$:
\begin{align*}
Q(\omega) &= 2\pi \sum \limits_{k\in\Z} Q_k \delta(\omega - k\omega_s) \\
Q(\omega) &= 2\pi \sum \limits_{k\in\Z} \frac{A\sin( k \omega_s \Delta/2)}{k\pi} \delta(\omega - k\omega_s) \\
\end{align*}
% image of Q(\omega) as dirac deltas spaces by \omega_s
% \img{images/sampling/samples/qw/graph.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-5,-1)(5,3)
\rput(4,1.79990000199998){$Q(\omega)$}
% x-axis
\psline{->}(-4.6, 0)(4.4, 0)
\rput(4.4,-0.3){$\omega$}
% begin x-axis labels:
\psline(-4.2, -0.1)(-4.2, 0.1)
\rput(-4.2, -0.3){$-7\omega_s$}
\psline(-3.6, -0.1)(-3.6, 0.1)
\rput(-3.6, -0.3){$-6\omega_s$}
\psline(-3, -0.1)(-3, 0.1)
\rput(-3, -0.3){$-5\omega_s$}
\psline(-2.4, -0.1)(-2.4, 0.1)
\rput(-2.4, -0.3){$-4\omega_s$}
\psline(-1.8, -0.1)(-1.8, 0.1)
\rput(-1.8, -0.3){$-3\omega_s$}
\psline(-1.2, -0.1)(-1.2, 0.1)
\rput(-1.2, -0.3){$-2\omega_s$}
\psline(-0.6, -0.1)(-0.6, 0.1)
\rput(-0.6, -0.3){$-\omega_s$}
\psline(0, -0.1)(0, 0.1)
\rput(0, -0.3){$0$}
\psline(0.6, -0.1)(0.6, 0.1)
\rput(0.6, -0.3){$\omega_s$}
\psline(1.2, -0.1)(1.2, 0.1)
\rput(1.2, -0.3){$2\omega_s$}
\psline(1.8, -0.1)(1.8, 0.1)
\rput(1.8, -0.3){$3\omega_s$}
\psline(2.4, -0.1)(2.4, 0.1)
\rput(2.4, -0.3){$4\omega_s$}
\psline(3, -0.1)(3, 0.1)
\rput(3, -0.3){$5\omega_s$}
\psline(3.6, -0.1)(3.6, 0.1)
\rput(3.6, -0.3){$6\omega_s$}
% end x-axis labels:
\fileplot[linewidth=1.0pt,linestyle=dashed,linecolor=blue!40]{sinc.dat}
%dirac
\psline[linewidth=1.25pt]{->}(-3.6, 0)(-3.6, 0.16534747163524)
%dirac
\psline[linewidth=1.25pt]{->}(-3, 0)(-3, -0.0698538745497315)
%dirac
\psline[linewidth=1.25pt]{->}(-2.4, 0)(-2.4, -0.3113014402612)
%dirac
\psline[linewidth=1.25pt]{->}(-1.8, 0)(-1.8, -0.184383518039522)
%dirac
\psline[linewidth=1.25pt]{->}(-1.2, 0)(-1.2, 0.422164487844469)
%dirac
\psline[linewidth=1.25pt]{->}(-0.6, 0)(-0.6, 1.16504885745903)
%dirac
\psline[linewidth=1.25pt]{->}(0, 0)(0, 1.49990000199998)
\rput(0, 1.7){$(2\pi Q_0)$}
%dirac
\psline[linewidth=1.25pt]{->}(0.6, 0)(0.6, 1.16504885745903)
%dirac
\psline[linewidth=1.25pt]{->}(1.2, 0)(1.2, 0.422164487844469)
%dirac
\psline[linewidth=1.25pt]{->}(1.8, 0)(1.8, -0.184383518039522)
%dirac
\psline[linewidth=1.25pt]{->}(2.4, 0)(2.4, -0.3113014402612)
%dirac
\psline[linewidth=1.25pt]{->}(3, 0)(3, -0.0698538745497315)
%dirac
\psline[linewidth=1.25pt]{->}(3.6, 0)(3.6, 0.16534747163524)
\end{pspicture}
\end{center}
Now we are ready to perform the convolution:
$$
X_q(\omega) = \frac{1}{2\pi} (X * Q)(\omega)
$$
Recall that the spectrum of our input signal was a triangle. What happens when the triangle is convolved with scaled and shifted impulses? We get scaled and shifted triangles:
% image of X_q(\omega) with the signal X convolved with Q
%\img{images/sampling/samples/xq/axis.ps}
Recoverability is based on not overlapping replicated spectra. The sampling frequency $\omega_s$ is sufficiently large such that when I replicate the triangle on adjacent multiples of fundamental frequencies they don't overlap.
%\img{images/sampling/impulse/triangles/zoom2/impulse.ps}
%\img{images/sampling/impulse/triangles/zoom/threetris2.ps}
Notice that $D \leq \omega_s - D \Rightarrow \omega_s \leq 2D$, where $2D$ is the bandwidth of the spectra. We refer to bandwidth in this course as the total footprint of the signal on the omega axis. Some books use half of this value. They would say you need two times the bandwidth to recover the original signal. Note that $2D \leq \omega_s$ is sufficient, but not necessary. Compressed sampling is an area of research that studies this concept.
\subsubsection{Ideal Sampling with an impulse train}
What if we multiply with an impulse train instead of a pulse train? Not that this is ideal because in real life you cannot generate these impulses. Normally, signals that can be generated approximate the pulse trains. Consider a system where we multply the input signal $x(t)$ by an impulse train $g(t)$.
% \img{images/sampling/impulse/impulse/impuls.eps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-5,-1)(5,3)
% function name
\rput(4,1.7){$g(t) = \sum \limits_{k=-\infty}^{\infty} \delta(t-kT)$}
\rput(-4.4, 0.5){$\cdots$}
\rput(4.4, 0.5){$\cdots$}
%dirac
\psline[linewidth=1.25pt]{->}(-4, 0)(-4, 1)
%dirac
\psline[linewidth=1.25pt]{->}(-3, 0)(-3, 1)
%dirac
\psline[linewidth=1.25pt]{->}(-2, 0)(-2, 1)
%dirac
\psline[linewidth=1.25pt]{->}(-1, 0)(-1, 1)
%dirac
\psline[linewidth=1.25pt]{->}(0, 0)(0, 1)
\rput(0,1.3){(1)}
%dirac
\psline[linewidth=1.25pt]{->}(1, 0)(1, 1)
%dirac
\psline[linewidth=1.25pt]{->}(2, 0)(2, 1)
%dirac
\psline[linewidth=1.25pt]{->}(3, 0)(3, 1)
%dirac
\psline[linewidth=1.25pt]{->}(4, 0)(4, 1)
% x-axis
\psline{->}(-4.4, 0)(4.9, 0)
\rput(4.4,-0.3){$t$}
% begin x-axis labels:
\psline(-4, -0.1)(-4, 0.1)
\rput(-4, -0.3){$-4T$}
\psline(-3, -0.1)(-3, 0.1)
\rput(-3, -0.3){$-3T$}
\psline(-2, -0.1)(-2, 0.1)
\rput(-2, -0.3){$-2T$}
\psline(-1, -0.1)(-1, 0.1)
\rput(-1, -0.3){$-T$}
\psline(0, -0.1)(0, 0.1)
\rput(0, -0.3){$0$}
\psline(1, -0.1)(1, 0.1)
\rput(1, -0.3){$T$}
\psline(2, -0.1)(2, 0.1)
\rput(2, -0.3){$2T$}
\psline(3, -0.1)(3, 0.1)
\rput(3, -0.3){$3T$}
\psline(4, -0.1)(4, 0.1)
\rput(4, -0.3){$4T$}
% end x-axis labels:
\end{pspicture}
\end{center}
How does this impules train $g(t)$ relate to pulse train $q(t)$?
$$
g(t) = \lim_{\Delta \to 0} q(t)
$$
% \img{images/sampling/impulse/multiplied/impulse.ps}
%% image of multiplication
%%% graph of x_g(t) and x(t)
What is the fourier transform of the impulse train?
%% image of impulse train
%% image of new impulse train
\begin{align*}
G(\omega) &= 2\pi \sum \limits_{k\in\Z} G_k \delta(\omega - k\omega_s) \\
G(\omega) &= 2\pi \sum \limits_{k\in\Z} \frac{1}{T} \delta(\omega - k\omega_s) \\
G(\omega) &= \frac{2\pi}{T} \sum \limits_{k\in\Z} \delta(\omega - k\omega_s) \\
G(\omega) &= \omega_s \sum \limits_{k\in\Z} \delta(\omega - k\omega_s) \\
\end{align*}
% \img{images/sampling/impulse/ftrans/impulse.ps}
\begin{center}
\begin{pspicture}(-5,-1)(5,3)
% function name
\rput(3.5,1.7){$G(\omega) = \omega_s \sum \limits_{k=-\infty}^{\infty} \delta(\omega - k\omega_s)$}
\rput(-4.4, 0.5){$\cdots$}
\rput(4.4, 0.5){$\cdots$}
%dirac
\psline[linewidth=1.25pt]{->}(-4, 0)(-4, 1)
%dirac
\psline[linewidth=1.25pt]{->}(-3, 0)(-3, 1)
%dirac
\psline[linewidth=1.25pt]{->}(-2, 0)(-2, 1)
%dirac
\psline[linewidth=1.25pt]{->}(-1, 0)(-1, 1)
%dirac
\psline[linewidth=1.25pt]{->}(0, 0)(0, 1)
\rput(0,1.3){$\left(\frac{2\pi}{T}\right)$}
%dirac
\psline[linewidth=1.25pt]{->}(1, 0)(1, 1)
%dirac
\psline[linewidth=1.25pt]{->}(2, 0)(2, 1)
%dirac
\psline[linewidth=1.25pt]{->}(3, 0)(3, 1)
%dirac
\psline[linewidth=1.25pt]{->}(4, 0)(4, 1)
% x-axis
\psline{->}(-4.4, 0)(4.9, 0)
\rput(4.6,-0.3){$\omega$}
% begin x-axis labels:
\psline(-4, -0.1)(-4, 0.1)
\rput(-4, -0.3){$-4\omega_s$}
\psline(-3, -0.1)(-3, 0.1)
\rput(-3, -0.3){$-3\omega_s$}
\psline(-2, -0.1)(-2, 0.1)
\rput(-2, -0.3){$-2\omega_s$}
\psline(-1, -0.1)(-1, 0.1)
\rput(-1, -0.3){$-\omega_s$}
\psline(0, -0.1)(0, 0.1)
\rput(0, -0.3){$0$}
\psline(1, -0.1)(1, 0.1)
\rput(1, -0.3){$\omega_s$}
\psline(2, -0.1)(2, 0.1)
\rput(2, -0.3){$2\omega_s$}
\psline(3, -0.1)(3, 0.1)
\rput(3, -0.3){$3\omega_s$}
\psline(4, -0.1)(4, 0.1)
\rput(4, -0.3){$4\omega_s$}
% end x-axis labels:
\end{pspicture}
\end{center}
Convolution of a function with a shifted dirac shifts the function by an amount equal to the shift of the dirac and scales it by the strength of the dirac. We have found that periodic signals have impulsive fourier transforms where impulses are uniformly spaced. Note that the strength of the specta is $\frac{2\pi}{T} = \omega_s$, and the periodicity and spacing is $\omega_s$. Recall that the impulse train has coefficients that don't depend on $k$. All frequencies equally contibute. Now we can find the convolution of the original input signal $x(t)$ and $g(t)$:
%% image of X_g(\omega)
% \img{images/sampling/impulse/triangles/impulse.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-5,-1)(5,3)
% function name
\rput(4,1.7){$X_g(\omega) = \frac{1}{2\pi}(X * G) (\omega)$}
\rput(-4.5, 0.5){$\cdots$}
\rput(4.5, 0.5){$\cdots$}
%fill plot for triangle.dat
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\fileplot[linewidth=0pt]{triangle.dat}
}
\fileplot[linewidth=1.5pt]{triangle.dat}
\rput(0,1.4){$\left(\frac{1}{T}\right)$}
% x-axis
\psline{->}(-4.4, 0)(4.9, 0)
\rput(4.6,-0.3){$\omega$}
% begin x-axis labels:
\psline(-4, -0.1)(-4, 0.1)
\rput(-4, -0.3){$-4\omega_s$}
\psline(-3, -0.1)(-3, 0.1)
\rput(-3, -0.3){$-3\omega_s$}
\psline(-2, -0.1)(-2, 0.1)
\rput(-2, -0.3){$-2\omega_s$}
\psline(-1, -0.1)(-1, 0.1)
\rput(-1, -0.3){$-\omega_s$}
\psline(0, -0.1)(0, 0.1)
\rput(0, -0.3){$0$}
\psline(1, -0.1)(1, 0.1)
\rput(1, -0.3){$\omega_s$}
\psline(2, -0.1)(2, 0.1)
\rput(2, -0.3){$2\omega_s$}
\psline(3, -0.1)(3, 0.1)
\rput(3, -0.3){$3\omega_s$}
\psline(4, -0.1)(4, 0.1)
\rput(4, -0.3){$4\omega_s$}
% end x-axis labels:
\end{pspicture}
\end{center}
We can simply do a low pass filter to recover the signal from its samples. But we cannot over look the fact that we must sample at a certain rate to recover the signal. Lets take a closer look at the signal and its boundaries:
% \img{images/sampling/impulse/triangles/zoom/threetris.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-5,-1)(5,3)
% function name
\rput(2.5,1.3){$X_g(\omega) = \frac{1}{2\pi}(X * G) (\omega)$}
\rput(-2.6, 0.5){$\cdots$}
\rput(3, 0.5){$\cdots$}
%fill plot for triangle.dat
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\fileplot[linewidth=0pt]{1.dat}
\fileplot[linewidth=0pt]{2.dat}
\fileplot[linewidth=0pt]{3.dat}
}
\fileplot[linewidth=1.5pt]{1.dat}
\fileplot[linewidth=1.5pt]{2.dat}
\fileplot[linewidth=1.5pt]{3.dat}
\rput(0,1.3){$\left(1/T_s\right)$}
% x-axis
\psline{->}(-2.7, 0)(3, 0)
\rput(3.1,-0.2){$\omega$}
% critical points
\rput(-0.5,-0.2){$-D$}
\rput(0.5,-0.2){$D$}
\psline(-0.5, -0.05)(-0.5, 0.05)
\psline(0.5, -0.05)(0.5, 0.05)
\rput(-2.5,-0.2){$-\omega_s - D$}
\rput(-1.5,-0.2){$-\omega_s + D$}
\psline(-2.5, -0.05)(-2.5, 0.05)
\psline(-1.5, -0.05)(-1.5, 0.05)
\rput(2.5,-0.2){$\omega_s + D$}
\rput(1.5,-0.2){$\omega_s - D$}
\psline(2.5, -0.05)(2.5, 0.05)
\psline(1.5, -0.05)(1.5, 0.05)
% begin x-axis labels:
\psline(-2, -0.05)(-2, 0.05)
\rput(-2, -0.2){$-\omega_s$}
\psline(-1, -0.05)(-1, 0.05)
\psline(0, -0.05)(0, 0.05)
\rput(0, -0.2){$0$}
\psline(1, -0.05)(1, 0.05)
\psline(2, -0.05)(2, 0.05)
\rput(2, -0.2){$\omega_s$}
\end{pspicture}
\end{center}
We must have $D \leq \omega_s - D$. This implies the same condition, $2D \leq \omega_s$. This is called the \emph{Nyquist Criterion}. If you have a bandlimited signal, you can sample it at a rate of the bandwidth or faster and still recover the signal. In conclusion, a set of sufficient conditions to recover a signal $x$ from its samples $x(nT)$ are:
\begin{enumerate}
\item $x$ is bandlimited
\item $\omega_s = \frac{2\pi}{T} \leq 2D$
\end{enumerate}
Note that in reality you want to make room for the rolloff of the low pass filter, since the Nyquist rate assumes an ideal low pass filter. What happens if you don't sample fast enough?
% \img{images/sampling/impulse/triangles/aliased/impulse.ps}
Notice that the higher frequency content gets ``folded'' into the lower frequency range. This phenomenon is called aliasing.
\subsection{Aliasing}
\subsubsection{The Carriage Wheel Effect}
In old movies with carriage wheels, sometimes it appears that the wheel is moving backwards at a slower rate, or maybe slower in the correct direction. Suppose you flash a strobe at a particular frequency on a carriage wheel. If the strobe light and the carriage wheel both share the frequency $\omega_0$, then carriage wheel would seem to stand still, you would see a ``frozen'' image. Lets see what happens when we sample at other rates. We can model the carriage wheel as $e^{i\omega_o t}$ or $\sin(\omega_0 t)$.
%\img{images/sampling/aliasing/carriagewheel/wheel.ps}
We can sample $x(t)$ by multiplying it with $g(t) = \sum \limits_{k \in \Z} \delta(t - kT_s)$, where $\omega_s = \frac{2\pi}{T_s}$:
%\img{images/sampling/system/sys.ps}
The spectra of our signal $x$ is two complex valued diracs:
\begin{align*}
x(t) &= \sin(\omega_0 t) \\
x(t) &= \frac{e^{i\omega_0 t} - e^{-i\omega_0 t}}{2i} \\
X(\omega) &= \ftrans{\frac{e^{i\omega_0 t} - e^{-i\omega_0 t}}{2i}} \\
X(\omega) &= \frac{\pi}{i}\delta(\omega-\omega_0) - \frac{\pi}{i}\delta(\omega+\omega_0) \\
\end{align*}
%\img{images/dirac/sin/sin.ps}
Note that the Nyquist rate tells us we must sample at $\omega_s \geq 2\omega_0$ to recover the signal, since $2\omega_0$ is the bandwidth of the signal. Lets use $\omega_s = \frac{3}{2}\omega_0$. The frequency spectra of the signal $g$ is an impulse train where each impulse has a strength of $\omega_s$ and is shifted by $\omega_s$, given by: $G(\omega) = \omega_s \sum \limits_{k\in\Z} \delta(\omega - k\omega_s)$.
%\img{images/sampling/aliasing/sin/gw.ps}
Multiplication in the time domain corresponds to convolution in the frequency domain, hence we have $\ftrans{x(t)g(t)} = \frac{1}{2\pi} \lr{ X * G } (\omega)$. This gives us shifted versions of the frequency spectra of $x$ at every dirac in the frequency spectra of $g$:
\begin{align*}
\ftrans{x(t)g(t)} &= \frac{1}{2\pi} \lr{ X * G } (\omega) = X_g(\omega) \\
x_g(\omega) &= \frac{1}{2\pi} \infint X(\Omega) G(\omega - \Omega) d\Omega \\
x_g(\omega) &= \frac{1}{2\pi} \infint X(\Omega) \omega_s \sum \limits_{k \in Z} \delta(\omega - \Omega - k\omega_s) d\Omega \\
x_g(\omega) &= \frac{\omega_s}{2\pi} \sum \limits_{k \in Z} \infint X(\Omega) \delta(\omega - \Omega - k\omega_s) d\Omega \\
x_g(\omega) &= \frac{1}{T_s} \sum \limits_{k \in Z} X(\omega - k\omega_s) \\
x_g(\omega) &= \frac{1}{T_s} \sum \limits_{k \in Z} \lr{\frac{\pi}{i}\delta(\omega - k\omega_s -\omega_0) - \frac{\pi}{i}\delta(\omega - k\omega_s+\omega_0)} \\
x_g(\omega) &= \frac{1}{T_s} \sum \limits_{k \in Z} \lr{\frac{\pi}{i}\delta(\omega - \omega_0(3k/2+1)) - \frac{\pi}{i}\delta(\omega - \omega_0(3k/2-1))} \\
\end{align*}
%\img{images/sampling/aliasing/sin/xg.ps}
So what is it that we see? The human visual system behaves like a low pass filter. We have spatial and temportal frequency responses that look something like a family of curves:
%\img{images/hvs/hvs.ps}
This captures the lower frequencies of $X_g(\omega)$, where $\omega \in \left( -\omega_0 , \omega_0 \right)$.
%\img{images/sampling/aliasing/sin/xgframed.ps}
What does the final signal look like? Let $y(t)$ be what you see after the the signal is passed into the human visual system:
%\img{images/hvs/sys/sys.ps}
\begin{align*}
x_g(\omega) &= \frac{1}{T_s} \sum \limits_{k \in Z} \lr{\frac{\pi}{i}\delta(\omega - \omega_0(3k/2+1)) - \frac{\pi}{i}\delta(\omega - \omega_0(3k/2-1))} \\
Y(\omega) &= \frac{\pi}{i}\delta(\omega + \omega_0/2) - \frac{\pi}{i}\delta(\omega - \omega_0/2) \\
\iftrans{Y(\omega)} &= \frac{1}{2i} e^{-i\omega_0/2} - \frac{1}{2i} e^{i\omega_0/2 t} \\
y(t) &= -\sin(\frac{\omega_0}{2} t) = \sin(-\frac{\omega_0}{2}t) \\
\end{align*}
The human visual system filters the higher frequencies out, and you see the half of the reversed speed. The negative indicates time reversal, and the division by 2 indicates a slower speed.
\subsubsection{Anti-Aliasing}
Suppose there are two signals $x_1(t)$ and $x_2(t)$ that pass through the same sampling points generated by sampling $x_1(t)$. Also, assume that $x_2(t)$ has a higher frequency. This implies that $X_2(\omega)$ has a larger bandwidth than $X_1(\omega)$. Even though the signal is bandlimited, it is not bandlimited enough, and we would need to increase $\omega_s$ (decrease $T_s$) to recover the signal $x_2(t)$.
%\img{images/sampling/sincs/func/graph.ps}
%In this case we are looking at two trigonometric functions, which happen to be $\cos(\omega_0 t)$ and $\cos((\omega_0 + 2\pi)t)$. This demonstrates why in discrete time we have $2\pi$-periodicity of trigonometric functions. \\ \\
We saw previously what happens when you sample below the Nyquist rate to a bandlimited signal $x$, such that $\omega_s \lt 2D$:
%\img{images/sampling/aliasing/overlap/overlap.ps}
The higher frequencies fold into the lower frequencies, and the area where the triangles collide adds up and we get echos that cause the higher frequency content of the signal to be unrecoverable.
%\img{images/sampling/impulse/triangles/aliased/impulse.ps}
After we sample below the Nyquist rate, we pass the signal through a low pass filter.
%\img{images/sampling/aliasing/aliasing/antialiassys/orig.ps}
If we had an ideal low pass filter that only kept values from $-\frac{\omega_s}{2}$ to $\frac{\omega_s}{2}$, then we would bandlimit the signal, in this case, directly where the overlap occurs:
%\img{images/sampling/aliasing/overlap/lpf.ps}
We would end up with ``garbled'' information in the intervals $(-\omega_s/2,-\omega_s + D)$ and $(\omega_s - D, \omega_s/2)$.
%\img{images/sampling/aliasing/aliasing/triangles/filtered.ps}
How can use solve this? Using \emph{Anti-alias Filtering}. This consists of bandlimiting the signal $x$ in a pre-processing stage, and then sampling the bandlimited version:
%\img{images/sampling/aliasing/aliasing/antialiassys/anti.ps}
If we set the cutoff frequency $\omega_c \leq \frac{\omega_s}{2}$ in the low pass filter, then we have the Nyquist rate for this bandlimited signal. So then we would have our anti-aliased signal $x_{aa}(t)$ and its corresponding frequency spectra:
%\img{images/sampling/aliasing/aliasing/antialiased/filtered.ps}
Now we sample this frequency spectra, resulting with the following spectra:
%\img{images/sampling/impulse/triangles/aliased/anti/freq.ps}
Now if we use an ideal low pass filter with a cutoff frequency of $\omega_s/2$, we can fully recover the anti-aliased version of our signal $x$. This is preferable, because there is more reliable data than before when we had ``garbled'' information in the intervals $(-\omega_s/2,-\omega_s + D)$ and $(\omega_s - D, \omega_s/2)$. That is the difference between sampling first versus bandlimited and then sampling first.
\subsubsection{Example}
Lets revisit $x(t) = \sin(\omega_0t)$. Now, that signal, if we sample it at a particular rate and create a discrete time signal, which is essentially a sampled version of the continuous time signal: we can get a discrete time signal out of continuous time signal.
%\img{images/sampling/sin/graph.ps}
and let $x_d(n) =x(nT_s) = \sin(n\omega_0 T_S)$.
%\img{images/sampling/sin/justk.ps}
What if we let our sampling frequency be $\omega_s = \frac{3}{2}\omega_0$? We know we will have aliasing based on the Nyquist rate since we would need $2\omega_0$ to recover the signal safely. Our sampling period would be $T_s = \frac{4\pi}{3\omega_0}$. This gives us $x_d(n) = \sin(4\pi n/3)$.
\begin{align*}
\hat{x}(t) &= \sin(-\omega_0t/2) \\
\hat{x}_d(n) &= \hat{x}(nT_s) \\
\hat{x}_d(n) &= \sin(-\omega_0nT_s/2) \\
\hat{x}_d(n) &= \sin(-2\pi n/3) \\
\hat{x}_d(n) &= \sin(-2\pi n/3 + 2\pi n) = \sin(4\pi n/3) = x_d(n) \\
\hat{x}_d(n) &= x_d(n) \\
\end{align*}
The result is that the two signals when sampled, produce the same discrete time signal.
%\img{images/sampling/ratevsresolution/sin/graph.ps}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 8}
\subsection{Overview of Sampling}
There are cases where you want to use digital signal processing on a continuous time signal. So you have got to convert the input signal to discrete time signal, do the processing in discrete time, then finally reconstruct the continuous time signal. We do DT processing of CT signals. This allows us to use digital circuits.
% \img{images/conversions/processing/dtpofct.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-2)(8,1.5)
%frame
\rput(1,0){$C/D$}
\psframe(1.5,-0.25)(0.5,0.25)
%frame
\rput(3,0){$H(\Omega)$}
\psframe(3.5,-0.25)(2.5,0.25)
%frame
\rput(5,0){$D/C$}
\psframe(5.5,-0.25)(4.5,0.25)
%text
\rput(-0.5,0){$x(t)$}
%text
\rput(6.5,0){$y(t)$}
%text
\rput(1,-1.2){$T_s$}
%text
\rput(5,-1.2){$T_r$}
%text
\rput(2,0.2){$x_d(n)$}
%text
\rput(4,0.2){$y_d(n)$}
% wire with arrow
\psline{->}(0,0)(0.5,0)
% wire with arrow
\psline{->}(1.5,0)(2.5,0)
% wire with arrow
\psline{->}(3.5,0)(4.5,0)
% wire with arrow
\psline{->}(5.5,0)(6,0)
% wire with arrow
\psline{->}(1,-1)(1,-0.25)
% wire with arrow
\psline{->}(5,-1)(5,-0.25)
% wire with arrow
\psline{}(0.25,-1.5)(5.75,-1.5)
% wire with arrow
\psline{}(0.25,0.7)(5.75,0.7)
% wire with arrow
\psline{}(0.25,0.7)(0.25,-1.5)
% wire with arrow
\psline{}(5.75,0.7)(5.75,-1.5)
\end{pspicture}
\end{center}
Where $T_s$ is the sampling period, $T_r$ is the reconstruction period, C/D means CT to DT conversion, and D/C means DT to CT conversion. We also may want to utilize analog cicuits to process digital signals.
% \img{images/conversions/processing/ctpofdt.ps}
\begin{center}
\begin{pspicture}(-1,-2)(8,1.5)
%frame
\rput(1,0){$D/C$}
\psframe(1.5,-0.25)(0.5,0.25)
%frame
\rput(3,0){$H(\omega)$}
\psframe(3.5,-0.25)(2.5,0.25)
%frame
\rput(5,0){$C/D$}
\psframe(5.5,-0.25)(4.5,0.25)
%text
\rput(-0.5,0){$x_d(n)$}
%text
\rput(6.5,0){$y_d(n)$}
%text
\rput(1,-1.2){$T_r$}
%text
\rput(5,-1.2){$T_s$}
%text
\rput(2,0.2){$x(t)$}
%text
\rput(4,0.2){$y(t)$}
% wire with arrow
\psline{->}(0,0)(0.5,0)
% wire with arrow
\psline{->}(1.5,0)(2.5,0)
% wire with arrow
\psline{->}(3.5,0)(4.5,0)
% wire with arrow
\psline{->}(5.5,0)(6,0)
% wire with arrow
\psline{->}(1,-1)(1,-0.25)
% wire with arrow
\psline{->}(5,-1)(5,-0.25)
% wire with arrow
\psline{}(0.25,-1.5)(5.75,-1.5)
% wire with arrow
\psline{}(0.25,0.7)(5.75,0.7)
% wire with arrow
\psline{}(0.25,0.7)(0.25,-1.5)
% wire with arrow
\psline{}(5.75,0.7)(5.75,-1.5)
\end{pspicture}
\end{center}
So far, we have are talked about taking a continuous time signal, sampling it with an impulse train so we get $x_g(t)$. Then there is a box that converts the signal, let's call this the ``Dirac to Kronecker'', and puts out an $x_d(n)$. If we look closely at the C/D box, we have the following system:
% \img{images/conversions/ctod/cd.ps}
\begin{center}
\begin{pspicture}(-1,-2)(8,1.5)
%frame
\rput(5,0.2){Dirac}
\rput(5,0){to}
\rput(5,-0.2){Kronecker}
\psframe(4.3,-0.5)(5.7,0.5)
%text
\rput(-0.5,0){$x(t)$}
%text
\rput(7,0){$x_d(n)$}
%text
\rput(2,-1.2){$g(t)=\sum\limits_{k=-\infty}^{\infty}\delta(t-kT_s)$}
%text
\rput(3.25,0.2){$x_g(n)$}
% wire with arrow
\psline{->}(0,0)(1.75,0)
% wire with arrow
\psline{->}(2.25,0)(4.3,0)
% wire with arrow
\psline{->}(5.7,0)(6.6,0)
% wire with arrow
\psline{->}(2,-1)(2,-0.25)
% wire with arrow
\psline{}(0.25,-1.7)(6,-1.7)
% wire with arrow
\psline{}(0.25,0.7)(6,0.7)
% wire with arrow
\psline{}(0.25,0.7)(0.25,-1.7)
% wire with arrow
\psline{}(6,0.7)(6,-1.7)
% multiplier
\pscircle(2,0){0.25}
\psline(1.825,0.175)(2.175,-0.175)
\psline(2.175,0.175)(1.825,-0.175)
\end{pspicture}
\end{center}
Lets start out relating the discrete time signal $x_d(n)$ to the continuous time signal $x(t)$. When we look at the system, we start with $x(t)$ and multiply that by an impulse train $g(t)$ to obtain $x_g(t)$.
% \img{images/sampling/impulse/multiplied/dirac.ps}
\begin{center}
\begin{pspicture}(-5,-1.5)(6,2.5)
% function name
\rput(4,1.7){$x_g(t)$}
\fileplot[linewidth=1.0pt,linestyle=dashed,linecolor=blue!40]{function.dat}
%dirac
\psline[linewidth=1.25pt]{->}(-3, 0)(-3, 1.7)
%dirac
\psline[linewidth=1.25pt]{->}(-2, 0)(-2, 1.2176)
%dirac
\psline[linewidth=1.25pt]{->}(-1, 0)(-1, 0.5408)
%dirac
\psline[linewidth=1.25pt]{->}(0, 0)(0, 0.152)
%dirac
\psline[linewidth=1.25pt]{->}(1, 0)(1, -0.7408)
%dirac
\psline[linewidth=1.25pt]{->}(2, 0)(2, -0.4816)
%dirac
\psline[linewidth=1.25pt]{->}(3, 0)(3, -0.064)
%dirac
\psline[linewidth=1.25pt]{->}(4, 0)(4, 1.2032)
% x-axis
\psline{->}(-4.4, 0)(4.9, 0)
\rput(4.4,-0.3){$t$}
% begin x-axis labels:
\psline(-4, -0.1)(-4, 0.1)
\rput(-4, -0.3){$-4T_s$}
\psline(-3, -0.1)(-3, 0.1)
\rput(-3, -0.3){$-3T_s$}
\psline(-2, -0.1)(-2, 0.1)
\rput(-2, -0.3){$-2T_s$}
\psline(-1, -0.1)(-1, 0.1)
\rput(-1, -0.3){$-T_s$}
\psline(0, -0.1)(0, 0.1)
\rput(0, -0.3){$0$}
\psline(1, -0.1)(1, 0.1)
\rput(1, 0.3){$T_s$}
\psline(2, -0.1)(2, 0.1)
\rput(2, 0.3){$2T_s$}
\psline(3, -0.1)(3, 0.1)
\rput(3, -0.3){$3T_s$}
\psline(4, -0.1)(4, 0.1)
\rput(4, -0.3){$4T_s$}
% end x-axis labels:
\end{pspicture}
\end{center}
We pass this signal into the ``Dirac to Kronecker'' box. From there we get our discrete time signal.
% \img{images/sampling/impulse/multiplied/kronecker.ps}
\begin{center}
\begin{pspicture}(-5,-1.5)(6,2.5)
% function name
\rput(4,1.7){$x_d(n)$}
\fileplot[linewidth=1.0pt,linestyle=dashed,linecolor=blue!40]{function.dat}
%dirac
\psline[linewidth=1.25pt]{-*}(-3, 0)(-3, 1.7)
%dirac
\psline[linewidth=1.25pt]{-*}(-2, 0)(-2, 1.2176)
\rput(-2, 1.5){$(x(-2T_s))$}
%dirac
\psline[linewidth=1.25pt]{-*}(-1, 0)(-1, 0.5408)
\rput(-1, 0.8){$(x(-T_s))$}
%dirac
\psline[linewidth=1.25pt]{-*}(0, 0)(0, 0.152)
\rput(0, 0.4){$(x(0))$}
%dirac
\psline[linewidth=1.25pt]{-*}(1, 0)(1, -0.7408)
\rput(1, -1){$(x(T_s))$}
%dirac
\psline[linewidth=1.25pt]{-*}(2, 0)(2, -0.4816)
%dirac
\psline[linewidth=1.25pt]{-*}(3, 0)(3, -0.064)
%dirac
\psline[linewidth=1.25pt]{-*}(4, 0)(4, 1.2032)
% x-axis
\psline{->}(-4.4, 0)(4.9, 0)
\rput(4.4,-0.3){$n$}
% begin x-axis labels:
\psline(-4, -0.1)(-4, 0.1)
\rput(-4, -0.3){$-4$}
\psline(-3, -0.1)(-3, 0.1)
\rput(-3, -0.3){$-3$}
\psline(-2, -0.1)(-2, 0.1)
\rput(-2, -0.3){$-2$}
\psline(-1, -0.1)(-1, 0.1)
\rput(-1, -0.3){$-1$}
\psline(0, -0.1)(0, 0.1)
\rput(0, -0.3){$0$}
\psline(1, -0.1)(1, 0.1)
\rput(1, 0.3){$1$}
\psline(2, -0.1)(2, 0.1)
\rput(2, 0.3){$2$}
\psline(3, -0.1)(3, 0.1)
\rput(3, -0.3){$3$}
\psline(4, -0.1)(4, 0.1)
\rput(4, -0.3){$4$}
% end x-axis labels:
\end{pspicture}
\end{center}
Notice the relationship between $x_d(n)$ and $x(t)$:
\begin{align*}
x_d(n) &= \left. x(t) \right|_{t = nT_s} \\
x_d(n) &= x(nT_s) \\
\end{align*}
Lets express $x_g(t)$ in terms of $x_d(n)$. This will give us intuition for plotting $X_d(\Omega)$. Looking at the two plots above for $x_g(t)$ and $x_d(n)$, we can see that $x_g(t)$ is just a bunch of shifted and scaled deltas. Each delta has a strength of $x(nT_s) = x_d(n)$.
$$
x_g(t) = \sum \limits_{n \in \Z} x(nT_s) \delta(t - nT_s) = \sum \limits_{n \in \Z} x_d(n) \delta(t - nT_s)
$$
Now lets solve for the fourier transform of $x_g(t)$ when in terms of $x_d(n)$:
\begin{align*}
x_g(t) &= \sum \limits_{n \in \Z} x_d(n) \delta(t - nT_s) \\
\ftrans{x_g(t)} &= \sum \limits_{n \in \Z} x_d(n) e^{-i\omega nT_s} \\
X_g(\omega) &= \sum \limits_{n \in \Z} x_d(n) e^{-i (\omega T_s) n} \\
X_g(\omega) &= X_d(\omega T_s) \\
\end{align*}
This gives us the relation:
$$
X_g(\omega) = \left. X_d(\Omega) \right|_{\Omega = \omega T_s}
$$
Note that we are using $\Omega$ to denote radians per sample and $\omega$ radians per second. We can check if the units make sense:
$$
\Omega \frac{rad}{sample} = \omega \frac{rad}{sec} T_s \frac{sec}{sample}
$$
Here is a way to solve for the freqency response $X_d(\Omega)$ explicity by first solving for $X_g(\omega)$ and then using the relation $X_g(\omega) = \left. X_d(\Omega) \right|_{\Omega = \omega T_s}$:
\begin{align*}
x_g(t) &= x(t)g(t) \\
\ftrans{x_g(t)} &= \frac{1}{2\pi} \lr{X(\omega) * G(\omega)} \\
X_g(\omega) &= \frac{1}{2\pi} \lr{\infint X(\omega - \Omega) G(\Omega) d\Omega} \\
X_g(\omega) &= \frac{1}{2\pi} \lr{\infint X(\omega - \Omega) 2\pi \sum \limits_{k\in\Z} G_k \delta(\Omega - k\omega_s)d\Omega} \\
X_g(\omega) &= \sum \limits_{k\in\Z} G_k \infint X(\omega - \Omega) \delta(\Omega - k\omega_s)d\Omega \\
X_g(\omega) &= \sum \limits_{k\in\Z} G_k X(\omega - k\omega_s) \\
X_g(\omega) &= \frac{1}{T_s} \sum \limits_{k\in\Z} X(\omega - k\omega_s) \\
\end{align*}
If you look at the relation between $X_d(\Omega)$ and $X_g(\omega)$,
\begin{align*}
X_g(\omega) &= \left. X_d(\Omega) \right|_{\Omega = \omega T_s} \\
X_d(\Omega) &= \left. X_g(\omega) \right|_{\omega = \Omega/T_s} \\
\end{align*}
this relation can help us solve explicity for $X_d(\Omega)$:
\begin{align*}
X_d(\omega T_s) &= X_g(\omega) = \frac{1}{T_s} \sum \limits_{k\in\Z} X(\omega - k\omega_s) \\
X_d(\Omega) &= X_g\lr{\frac{\Omega}{T_s}} = \frac{1}{T_s} \sum \limits_{k\in\Z} X\lr{\frac{\Omega}{T_s} - k\omega_s} \quad \mbox{let } \Omega = \omega T_s \\
X_d(\Omega) &= \frac{1}{T_s} \sum \limits_{k\in\Z} X\lr{\frac{\Omega}{T_s} - k\omega_s} \\
X_d(\Omega) &= \frac{1}{T_s} \sum \limits_{k\in\Z} X\lr{\frac{\Omega - 2\pi k}{T_s}} \\
\end{align*}
% \img{images/sampling/impulse/triangles/zoom/twopi.ps}
If you look at either the calculations or the graph, you will see that all we did was multiply $\omega$ by $T_s$, hence, $\Omega = \omega T_s$. So each replica of the original spectrum, which was at multiple of $\omega_s$, now comes down to be at a multiple of $2\pi$. That is a big relief because the DTFT has to be periodic with period $2\pi$. Scaling is done on the horizontal axis, the frequency axis---not an the vertical axis. The vertical peak is still $\frac{1}{T_s}$. This result is frequency rescaling. Looking at the plot of $X_g(\omega)$, you can see this very clearly; we have only multiplied the $\omega$ axis by $T_s$:
% \img{images/sampling/impulse/triangles/zoom/threetris.ps}
If we want to relate $X_d(\Omega)$ to $X(\omega)$, we find that
$$
X_d(\Omega) = \left. \frac{1}{T_s} X(\omega) \right|_{\omega=\frac{\Omega}{T_s}} \quad \forall \abs{\Omega} \lt \pi
$$
We cannot forget to limit $\Omega$ when writing the signal in this form. The sampling creates more replicas, so we must limit the domain to allow this definition.
\begin{align*}
X_d(\Omega) &= \frac{1}{T_s} \sum \limits_{k\in\Z} X\lr{\frac{\Omega - 2\pi k}{T_s}} \\
X_d(\Omega) &= \frac{1}{T_s} X\lr{\frac{\Omega}{T_s}} \quad \mbox{(restricted domain)}\\
\end{align*}
Here we can see that we are effectively scaling the frequency and the amplitude.
\subsection{Time Domain Representation of a Low Pass Filter}
Suppose we pass our sampled signal $x_g(t)$ through a low pass filter.
% \img{images/sampling/lowpass/sys.ps}
\begin{center}
\begin{pspicture}(-3,-5)(8,3)
% in from x
\rput(-3.2,0){$x(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(-2.7,0)(-0.25,0)
% out to y
\rput(2,0.3){$x_g(t)$}
\rput(8.6,0){$y_g(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0.25,0)(2.7,0)
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(6,0)(8,0)
% up arrow
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0,-1.65)(0,-0.25)
% multiplier
\pscircle(0,0){0.25}
\psline(-0.175,0.175)(0.175,-0.175)
\psline(0.175,0.175)(-0.175,-0.175)
\rput(3.2, 0.5){$H(\omega)$}
\rput(5.3, 0.6){$(T_s)$}
\psline{->}(3.25, 0)(5.5,0)
\psline(3.75, 0.0)(3.75, 0.5)
\psline(4.75, 0.0)(4.75, 0.5)
\psline(3.75, 0.5)(4.75, 0.5)
\psline(3.75, 0.1)(3.75, -0.1)
\rput(3.75, -0.3){$-\frac{\omega_s}{2}$}
\psline(4.75, 0.1)(4.75, -0.1)
\rput(4.75, -0.3){$\frac{\omega_s}{2}$}
\psframe(2.65, -0.75)(6, 1)
% impulses
\rput(2.3,-1.7){$g(t) = \sum \limits_{k = -\infty}^{\infty}\delta(t-kT_S)$}
\rput(-1.1,-2.1){$(1)$}
\rput(-1.3,-2.5){$\cdots$}
\rput(1.3,-2.5){$\cdots$}
\psline{->}(-1.5,-3)(1.5,-3)
\psline[linewidth=1.25pt]{->}(-0.75,-3)(-0.75,-2)
\rput(-0.75,-3.3){$-T_s$}
\psline[linewidth=1.25pt]{->}(0,-3)(0,-2)
\rput(0,-3.3){$0$}
\psline[linewidth=1.25pt]{->}(0.75,-3)(0.75,-2)
\rput(0.75,-3.3){$T_s$}
% box
\psframe(-1.75,-3.65)(7, 1.2)
\end{pspicture}
\end{center}
We know that $Y_g(\omega) = H(\omega)X_g(\omega)$, and hence, $y_g(t) = (h * x_g)(t)$. Suppose $H(\omega)$ filters out values that are outside of $(-\omega_s/2, \omega_s/2)$ and has a gain of $T_s$. What is $h(t)$?
\begin{align*}
h(t) &= \frac{1}{2\pi} \int_{-\infty}^{\infty} H(\omega) e^{-i\omega t} d\omega \\
h(t) &= \frac{1}{2\pi} \int_{-\omega_s/2}^{\omega_s/2} T_s e^{-i\omega t} d\omega \\
h(t) &= \frac{T_s}{2\pi} \left. \frac{e^{-i\omega t}}{-it} \right|_{-\omega_s/2}^{\omega_s/2} \\
h(t) &= \frac{1}{\omega_s} \frac{e^{-i(\omega_s/2) t} - e^{i(\omega_s/2) t}}{-it} \\
h(t) &= \frac{1}{\omega_s} \frac{2\sin((\omega_s/2) t)}{t} \\
h(t) &= \frac{\sin(\frac{\omega_s}{2}t)}{\frac{\omega_s}{2}t} \\
\end{align*}
Recall the relation between $x_g(t)$ and $x_d(n)$, $x_g(t) = \sum \limits_{n\in\Z} x_d(n) \delta(t - nT_s)$.
\begin{align*}
(h * x_g)(t) &= \infint h(\tau) x_g(t - \tau) d\tau \\
(h * x_g)(t) &= \infint h(\tau) \sum \limits_{n\in\Z} x_d(n) \delta(t - \tau - nT_s) d\tau \\
(h * x_g)(t) &= \sum \limits_{n\in\Z} x_d(n) \infint h(\tau) \delta(t - \tau - nT_s) d\tau \\
(h * x_g)(t) &= \sum \limits_{n\in\Z} x_d(n) h(t - nT_s) \\
\end{align*}
In this case, $y_g(t) = (h * x_g)(t) = x(t)$. This is because we pass it through a low pass filter. This means that $x$ is being expressed as a linear combination of its samples and its shifted impulse responses $h(t)$, where $h(t)$ is a sinc function. Our input signal $x$ was bandlimited. We sampled it at or above the Nyquist rate. If you take a bandlimited signal and sample it at or above the Nyquist rate, you can represent that signal as a linear combination of shifted sincs. Before when doing fourier analysis, we could represent functions as a linear combination of complex exponentials. Now that we are dealing with the space of bandlimited signals (in this case from $-\omega_s/2$ to $\omega/2$), we can represent functions as a linear combination of sincs.
% \img{images/sampling/sincs/impulseresp/graph.ps}
Notice that this sinc function has a very special property. It is 1 at 0, and is 0 at every other crossing. That is, any time it crosses the horizontal axis at $t = kT_s$ such that $t \neq 0$, the function is zero. We can shift the sinc to the right and or the left, but at any of these sample points are zero except the sinc whose peak is at that value. This allows us to take this as a basic function and replicate it where we took our samples from $x(t)$:
% \img{images/sampling/sincs/sincs/multiplied/g3.ps}
Notice that all but the peak are at zero crossings of the sample rate.
% \img{images/sampling/sincs/sincs/multiplied/g2.ps}
% \img{images/sampling/sincs/sincs/multiplied/g1.ps}
The final result is the addition of the sinc functions:
% \img{images/sampling/sincs/sincs/multiplied/graph.ps}
We have used the fact that these sincs form an orthogonal basis---in the space of bandlimited signals. It is hard to prove directly that these form an orthogonal basis, but you can do so by first showing the the inner product in the time domain is equal to $\frac{1}{2\pi}$ times the inner product in the frequency domain:
\begin{align*}
\langle x,y\rangle &\bydef \infint x(t) y^*(t) dt \\
\langle X,Y\rangle &\bydef \infint X(\omega) Y^*(\omega) d\omega \\
\end{align*}
Want to show that $\langle x,y\rangle = \frac{1}{2\pi} \langle X,Y\rangle $.
\begin{proof}
\begin{align*}
\langle x,y\rangle &= \infint x(t) y^*(t) dt \\
\langle x,y\rangle &= \infint x(t) \lr{\frac{1}{2\pi} \infint Y(\omega) e^{i\omega t} d\omega}^* dt \\
\langle x,y\rangle &= \frac{1}{2\pi} \infint x(t) \infint Y^*(\omega) e^{-i\omega t} d\omega dt \\
\langle x,y\rangle &= \frac{1}{2\pi} \infint Y^*(\omega) \lr{\infint x(t) e^{-i\omega t} dt} d\omega \\
\langle x,y\rangle &= \frac{1}{2\pi} \infint Y^*(\omega) X(\omega) d\omega\\
\langle x,y\rangle &= \frac{1}{2\pi} \langle X,Y\rangle \\
\end{align*}
Now to show orthogonality of the sinc functions:
\begin{align*}
\phi_k(t) = sinc(t-k) \bydef \frac{\sin(\pi(t-k))}{\pi(t-k)} \\
\end{align*}
Want to show $\langle \phi_k, \phi_\ell\rangle = c \delta(k-\ell) \mbox{ } \exists c \in \Z\setminus \{0\}$
\begin{align*}
\langle \Phi_k, \Phi_\ell\rangle &= \infint \Phi_k(\omega) \Phi_\ell^*(\omega) d\omega \\
\langle \Phi_k, \Phi_\ell\rangle &= \infint \lr{\Phi_0(\omega) e^{i\omega k}} \lr{\Phi_0(\omega) e^{i\omega \ell}}^* d\omega \\
\langle \Phi_k, \Phi_\ell\rangle &= \infint \abs{\Phi_0(\omega)}^2 e^{i\omega k} e^{-i\omega \ell} d\omega \\
\langle \Phi_k, \Phi_\ell\rangle &= \int_{-\pi}^{\pi} e^{i\omega k} e^{-i\omega \ell} d\omega \\
\langle \Phi_k, \Phi_\ell\rangle &= 2\pi \delta(k-\ell) \\
2\pi\langle \phi_k, \phi_\ell\rangle &= 2\pi \delta(k-\ell) \quad \mbox{ (by proof above)} \\
\langle \phi_k, \phi_\ell\rangle &= \delta(k-\ell) \\
\end{align*}
Therefore, $\phi_k \perp \phi_\ell \mbox{ } \forall k \neq \ell$.
\end{proof}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 9}
\subsection{Sampling}
\subsubsection{Relations in Sampling}
We have been going over sampling for some time now. Lets determine some relationships within this system:
% \img{images/conversions/processing/dtpofct.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1.75)(6.5,1)
%frame
\rput(1,0){$C/D$}
\psframe(1.5,-0.25)(0.5,0.25)
%frame
\rput(3,0){$H(\Omega)$}
\psframe(3.5,-0.25)(2.5,0.25)
%frame
\rput(5,0){$D/C$}
\psframe(5.5,-0.25)(4.5,0.25)
%text
\rput(-0.5,0){$x(t)$}
%text
\rput(6.5,0){$y(t)$}
%text
\rput(1,-1.2){$T_s$}
%text
\rput(5,-1.2){$T_r$}
%text
\rput(2,0.2){$x_d(n)$}
%text
\rput(4,0.2){$y_d(n)$}
% wire with arrow
\psline{->}(0,0)(0.5,0)
% wire with arrow
\psline{->}(1.5,0)(2.5,0)
% wire with arrow
\psline{->}(3.5,0)(4.5,0)
% wire with arrow
\psline{->}(5.5,0)(6,0)
% wire with arrow
\psline{->}(1,-1)(1,-0.25)
% wire with arrow
\psline{->}(5,-1)(5,-0.25)
% wire with arrow
\psline{}(0.25,-1.5)(5.75,-1.5)
% wire with arrow
\psline{}(0.25,0.7)(5.75,0.7)
% wire with arrow
\psline{}(0.25,0.7)(0.25,-1.5)
% wire with arrow
\psline{}(5.75,0.7)(5.75,-1.5)
\end{pspicture}
\end{center}
How is the transform of $Y_d(\omega)$ related to $X_d$? We can use the fact that convolution in time is multiplication in the frequency domain:
$$
Y_d(\omega) = H_d(\omega)X_d(\omega)
$$
After we pass $y_d(n)$ through the ``Kronecker to Dirac'' box, we have $y_g(t) = \sum \limits_{n\in\Z} y_d(n)\delta(t - n T_r)$. This gives us the relationship between $y_g(t)$ and $y_d(n)$, but what is $Y_g(\omega)$ in terms of $Y_d(\Omega)$?
\begin{align*}
y_g(t) &= \sum \limits_{n\in\Z} y_d(n)\delta(t - n T_r) \\
Y_g(\omega) &= \sum \limits_{n\in\Z} y_d(n) e^{-i\omega n T_r} \\
Y_g(\omega) &= \sum \limits_{n\in\Z} y_d(n) e^{-i\lr{\omega T_r} n} \\
Y_g(\omega) &= Y_d(\omega T_r) = \left. Y_d(\Omega) \right|_{\Omega = \omega T_r}
\end{align*}
The kronecker to dirac conversion requires that you specify a reconstruction period. It may or may not be the same as $T_s$. Remember when the signal goes through the kronecker to dirac box, all that happens is a rescaling of the frequency axis. Going from frequency that is, in this case, radians per second to radians per sample.
Lets follow the process from start to finish, and then derive the various relationships. We started off with a signal $x(t)$, and replicated this signal into a train of impulses every $\omega_s$ with the amplitude scaled:
% image of train
% \imgsize{0.75}{images/sampling/impulse/triangles/zoom/threetris.ps}
This gives us a relation between $X_g(\omega)$ and $X(\omega)$:
\begin{nicebox}
$$
X_g(\omega) = \frac{1}{T_s} X(\omega) \quad \abs{\omega} \lt \frac{\omega_s}{2}
$$
\end{nicebox}
We then scaled the frequency axis:
% image of scaling
% \imgsize{0.75}{images/sampling/impulse/triangles/zoom/twopi.ps}
This gives us a relation between $X_d(\Omega)$ and $X_g(\omega)$:
\begin{nicebox}
$$
X_d(\Omega) = X_g\lr{\frac{\Omega}{T_s}}
$$
$$
X_g(\omega) = X_d(\omega T_s)
$$
\end{nicebox}
Now we pass this signal through a discrete-time filter, $H_d(\Omega)$:
% image of Y_d(omega)
% \imgsize{0.75}{images/sampling/impulse/triangles/zoom/circles/yd.ps}
This gives us a relation between $Y_d(\Omega)$ and $X_d(\Omega)$:
\begin{nicebox}
$$
Y_d(\Omega) = H_d(\Omega)X_d(\Omega)
$$
\end{nicebox}
We then scale the frequency axis again:
% image of Y_g
% \imgsize{0.75}{images/sampling/impulse/triangles/zoom/circles/yg.ps}
This gives us a relation between $Y_g(\omega)$ and $Y_g(\Omega)$:
\begin{nicebox}
$$
Y_d(\Omega) = Y_g\lr{\frac{\Omega}{T_r}}
$$
$$
Y_g(\omega) = Y_d(\omega T_r)
$$
\end{nicebox}
At last, we pass this through a low pass filter with a gain of $G$ to obtain $Y(\omega)$:
% \imgsize{0.75}{images/sampling/impulse/triangles/zoom/circles/y.ps}
This gives us a relation between $Y(\omega)$ and $Y_g(\omega)$:
\sbx{5cm}{
$$
Y(\omega) = G Y_g(\omega) \quad \abs{\omega} \lt \frac{\omega_r}{2}
$$
}
Starting with the relationship between $Y(\omega)$ and $Y_g(\omega)$, lets work backwards and determine a relationship between $Y(\omega)$ and $X(\omega)$. For simplicity, lets restrict the domain of $\omega$. Consider $\abs{\omega} \lt \frac{\omega_s}{2}$.
\begin{align*}
Y(\omega) &= G Y_g(\omega) \\
Y(\omega) &= G Y_g(\omega) = \left. GY_d(\Omega) \right|_{\Omega = \omega T_r} \\
Y(\omega) &= G Y_g(\omega) = GY_d(\omega T_r) \\
\end{align*}
We can then use what we know about $Y_d(\omega)$ to expand this further. We know that $Y_d(\Omega) = H_d(\Omega)X_d(\Omega)$, and $Y(\omega) = GY_d(\omega T_r)$. Combining these equations gives us
\begin{align*}
Y(\omega) &= G Y_d(\omega T_r) \\
Y(\omega) &= G H_d(\omega T_r)X_d(\omega T_r) \\
\end{align*}
Recall that $X_d(\Omega) = X_g\lr{\frac{\Omega}{T_s}}$, and that $X_g(\omega)$ is just an amplitude-scaled replica of $X(\omega)$: $X_g(\omega) = \frac{1}{T_s} X(\omega)$ for $\abs{\omega} \lt \omega_s$. Combining these relations:
\begin{align*}
Y(\omega) &= G H_d(\omega T_r)X_d(\omega T_r) \\
Y(\omega) &= G H_d(\omega T_r)X_g\lr{\frac{\omega T_r}{T_s}} \\
Y(\omega) &= G H_d(\omega T_r)\frac{1}{T_s}X\lr{\frac{\omega T_r}{T_s}} \\
\end{align*}
So we finally can arrive at an equation relating $X(\omega)$ and $Y(\omega)$ from start to finish:
\begin{nicebox}
$$
Y(\omega) = \frac{G}{T_s} H_d(\omega T_r)X\lr{\frac{T_r}{T_s} \omega} \\
$$
\end{nicebox}
Lets look at the different results of various cases. Consider the case where there is no aliasing, and the reconstruction period is equal to the sampling period and the gain on the final low pass filter. In other words, the signal is bandlimited and the sampling is fast enough, and in addition, $T_r = T_s = G = T$.
\begin{align*}
Y(\omega) &= \frac{G}{T_s} H_d(\omega T_r)X\lr{\frac{T_r}{T_s} \omega} \quad \abs{\omega} \lt \frac{\omega_s}{2}\\
Y(\omega) &= H_d(\omega T)X\lr{\omega} \quad \abs{\omega} \lt \frac{\omega_s}{2}\\
\end{align*}
Basically we find here that $H(\omega) = H_d(\omega T)$. Lets look at another more interesting example. Consider the case where $H_d(\Omega) = 1$, $T_s = G \neq T_r$.
\begin{align*}
Y(\omega) &= \frac{G}{T_s} H_d(\omega T_r)X\lr{\frac{T_r}{T_s} \omega} \quad \abs{\omega} \lt \frac{\omega_s}{2}\\
Y(\omega) &= X\lr{\frac{T_r}{T_s} \omega} \\
\end{align*}
What is $y(t)$? We can look at some fourier transform pairs to find a relation:
\begin{align*}
x(t) &\ftp X(\omega) \\
x(\alpha t) &\ftp \frac{1}{\abs{\alpha}} X\lr{\frac{\omega}{\alpha}} \\
\abs{\alpha} x(\alpha t) &\ftp X\lr{\frac{\omega}{\alpha}} \\
\end{align*}
Now we can solve for the time-domain representation:
\begin{align*}
Y(\omega) &= X\lr{\frac{T_r}{T_s} \omega} \\
Y(\omega) &= X\lr{\frac{\omega}{T_s/T_r}} \\
y(t) &= \abs{\frac{T_s}{T_r}}x\lr{\frac{T_s}{T_r}t} \\
\end{align*}
\begin{example}
What if $T_s = 2T_r$? We get $y(t) = 2 x(2t)$. The signal will be twice as loud and twice as fast. If $G=T_s/2$, then $y(t) = x(2t)$.
\end{example}
\subsection{Z-Transforms}
\subsubsection{Why use Z-Transforms}
In the next phase of the course we are going to introduce some new transforms in order to handle a class of problems that we could not handle previously with the Fourier transform. Recall some of the Fourier transform issues. We talked about
(a) {\bf absolutely summable signals}
These signals have fourier transforms that are continuously infinitely differentiable and well-defined. Let's say that we have $x(n) = \alpha^n u(n)$ where $\abs{\alpha} \lt 1$. What was the frequency response? The transform of this particular signal is $\frac{1}{1-\alpha e^{-i\omega}}$. If you were to write this in terms of $e^{i\omega}$, this expression is differentiable and smooth when you plot it. As a complex function it is a perfectly smooth function and infinitely differentiable. All finite-length signals also have this property. For example, $x(n) = \delta(n+1) + \delta(n) + \delta(n-1)$. With finite length signals we never have an issue the convergence of the fourier transform. \\
Absolutely summable signals can be defined as
\begin{nicebox}
$$l^1 : \{ x: \Z \to \C \st \sum_{n \in \Z} \abs{x(n)} \lt \infty \}$$
\end{nicebox}
Another class of signals that we have discussed are
(b) {\bf square summable (finite-energy signals)}
\begin{nicebox}
$$l^2 : \{ x: \Z \to \C \st \sum_{n \in \Z} \abs{x(n)}^2 \lt \infty \}$$
\end{nicebox}
Consider a function that is square summable but not absolutely summable, that is, $x \in l^2 \setminus l^1$. An example of this is $x(n) = \frac{1}{\pi n} \sin(A n)$. $X(\omega)$ has discontinuities. We have also discussed signals that are
(c) not absolutely or square summable signals
If $x \notin l^1 \cup l^2 \Rightarrow$ ``All bets are off''. There are only two cases:
i) DTFT Defined $\Rightarrow$ DTFT will have diracs.
An example of this type of signal is $x(n) = e^{i\omega_0 n}$. $X(\omega) = \delta(\omega - \omega_0)$ for $\abs{\omega} \lt \pi$.
ii) DTFT not defined.
An example of this type of signal is $x(n) = \alpha^n u(n)$ where $\abs{\alpha} \gt 1$.
There is another class of signals called bounded signals, which are given by
\begin{nicebox}
$$l^\infty : \{ x: \Z \to \C \st \abs{x(n)} \lt M \lt \infty \mbox{ } \forall n \}$$
\end{nicebox}
\subsubsection{Right and Left-sided Functions}
In the time domain convolution can be well-defined even if one of the signals $x \notin l^1 \cup l^2$ (not absolutely summable or square summable) and $x$ has no DTFT. This is a category where we cannot talk about a frequency response or fourier transform. Consider where this type of a signal is passed through a system with a finite impulse response:
$$
x(n) = \alpha^n u(n) \to \fbox{FIR} \to y(n)
$$
We know that $y(n)$ is the convolution of the two signals.
\begin{align*}
y(n) &= \sum\limits_{k\in\Z}x(k)h(n-k) \\
y(n) &= \sum\limits_{k\in\Z}\alpha^{k}u(k)h(n-k) \\
\end{align*}
This system has well defined response for this growing exponential. Why is the output bounded even though the input to this system is unbounded and has no transform? We can't talk about frequency response or about multiplying $X(\omega)$ with the filter's frequency response because $X(\omega)$ does not exist. But this system has a well-defined response! Suppose for now that the domain of $h$ is in $\{-1,0,1\}$ (it is a FIR).
\begin{align*}
y(n) &= \sum\limits_{k\in\Z}h(k)x(n-k) \\
y(n) &= h(-1)x(n+1) + h(0)x(n) + h(1)x(n-1) \\
\end{align*}
We have a finite number of terms, so we are only summing a finite number of the values of the input. Another example is when you have two right-sided functions.
\begin{definition}
A function is \emph{right-sided} $\iff x(n) = 0 \mbox{ } \forall n \lt N \lt \infty$
\end{definition}
% \img{images/rightleftsided/def/r.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-4,-1)(4,3)
\rput(3.3,0.5){$\cdots$}
\psline{-*}(-3,0)(-3,0)
\psline{-*}(-2,0)(-2,0)
\psline{-*}(-1,0)(-1,0)
\psline{-*}(0,0)(0,1)
\rput(0,-0.3){N}
\psline{-*}(1,0)(1,0.1)
\psline{-*}(2,0)(2,0.4)
\psline{-*}(3,0)(3,0.9)
% axes
\psline{->}(-4,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4,1){ $x(n)$ }
\end{pspicture}
\end{center}
Note that $x$ is causal if $N=0$. This implies that causal signals are a subset of right-sided signals.
\begin{definition}
A function is \emph{left-sided} $\iff x(n) = 0 \mbox{ } \forall M \lt n \lt \infty$
\end{definition}
% \img{images/rightleftsided/def/l.ps}
\begin{center}
\begin{pspicture}(-4,-1)(4,3)
\psline{-*}(-3,0)(-3,0.7)
\psline{-*}(-2,0)(-2,0.4)
\psline{-*}(-1,0)(-1,1)
\psline{-*}(0,0)(0,0.4)
\rput(0,-0.3){M}
\psline{-*}(1,0)(1,0)
\psline{-*}(2,0)(2,0)
\psline{-*}(3,0)(3,0)
% axes
\psline{->}(-4,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4,1){ $x(n)$ }
\rput(-3.3,0.5){ $\cdots$ }
\end{pspicture}
\end{center}
Note that $x$ is anti-causal if $M=0$. This implies that anti-causal signals are a subset of left-sided signals. Lets continue with some examples.
\begin{example}
Lets look at a situation where convolution is well-defined but at least one of the functions doesn't have a DTFT.
$$
x(n) = \alpha^n u(n) \to \bbox[5px,border:1px solid black]{\beta^n u(n)} \to y(n)
$$
Note that convolution is defined if $\abs{\alpha} \gt 1$ or $\abs{\beta} \gt 1$ or both.
\begin{align*}
y(n) &= \sum\limits_{k\in\Z} \alpha^k u(k) \beta^{n-k} u(n-k) \\
y(n) &= \sum\limits_{k = 0}^{\infty} \alpha^k \beta^{n-k} u(n-k) \\
y(n) &= \begin{cases} \sum\limits_{k=0}^{n}\alpha^k\beta^{n-k} & n\geq0 \\ 0 & \mbox{otherwise} \\ \end{cases}
\end{align*}
We can also see that the ``overlap'' of the signals is finite because we are convolving two right-sided signals. When you have $x(n)$ and $h(n-m)$, there must be some $N$ that zeros out all values $n \lt N$ due to $x$, and zeros for all $n \gt n-M$ due to $h$. There are two finite endpoints, and they have a finite overlap.
% \img{images/rightleftsided/finite/comb.ps}
\begin{center}
\begin{pspicture}(-3,-4)(6,3)
\psframe[linestyle=dashed](0.5,-3.5)(3.5,1.5)
% axes
\psline{->}(-2,-3)(6,-3)
\rput(6,-3.3){ $k$ }
\rput(-1.75,-1.5){ $\cdots$ }
\rput(6,-1.5){ $h(n-k)$ }
% axes
\psline{->}(-2,0)(6,0)
\rput(6,-0.3){ $k$ }
\rput(6,2.5){ $x(k)$ }
\rput(5.75,1.5){ $\cdots$ }
\psline{-*}(-1,-3)(-1,-0.5)
\psline{-*}(0,-3)(0,-1.4)
\psline{-*}(1,-3)(1,-2.1)
\psline{-*}(2,-3)(2,-2.6)
\psline{-*}(3,-3)(3,-2.9)
\rput(3,-3.2){n-M}
\psline{-*}(1,0)(1,0.1)
\rput(1,-0.3){N}
\psline{-*}(2,0)(2,0.4)
\psline{-*}(3,0)(3,0.9)
\psline{-*}(4,0)(4,1.6)
\psline{-*}(5,0)(5,2.5)
\end{pspicture}
\end{center}
\end{example}
\subsubsection{The Z-Transform}
Consider an LTI system with an impulse response $h(n)$:
$$
x(n) \to \bbox[5px,border:1px solid black]{h(n)} \to y(n)
$$
What happens when $x(n) = z^n$, where $z = r e^{i\omega}$? Plug these two functions in to the convolution sum and solve:
\begin{align*}
y(n) &= \sum \limits_{k\in\Z} h(k) z^{n-k} \\
y(n) &= z^n \lr{\sum \limits_{k\in\Z} h(k) z^{-k}} \\
\end{align*}
We get $z^n \sum_{k\in\Z} h(k) z^{-k}$. We did exactly this in EE20 when we talked about the Eigen function properties of the complex exponential. It turns out $e^{i\omega n}$ does not define the entire space of complex exponentials that can have that property. \emph{Any} complex number raised to the $n^{th}$ power will come out at the other end scaled by some number. With that said, $\sum \limits_{k\in\Z} h(k) z^{-k}$ defines the transfer function of the system, $\hat{H}(z)$, a.k.a. the system function. This function is the \emph{Z-Transform} of the impulse response:
\begin{nicebox}
$$
\hat{X}(z) = \sum \limits_{n\in\Z} h(n) z^{-n}
$$
\end{nicebox}
Just as before we had a frequency response with fourier transforms, we have the transfer function with Z transforms.
\begin{nicebox}
\begin{align*}
h(n) &\ftp H(\omega) = \sum \limits_{n\in\Z} h(n) e^{-i\omega n} \\
h(n) &\ztp \hat{H}(z) = \sum \limits_{n\in\Z} h(n) z^{-n}
\end{align*}
\end{nicebox}
Lets expand this where $z = re^{i\omega}$:
$$
\hat{H}(z) = \sum \limits_{n\in\Z} h(n) r^{-n}e^{-i\omega n} = \ftrans{h(n)r^{-n}}
$$
This $r$ allows us to fly off the nest, which has been the unit circle. In the Z-transform we allow ourselves to go inside or outside the unit circle and gain flexibility in that way it is possible to deal with a function $h(n)$ that does not have fourier transform, because $h(n)r^{-n}$ does. We can potentially pick values of $r$, not on the unit circle, such that multiplying the function $h(n)$ with $r^{-n}$ tames whatever growth might occur.
\begin{example}
Consider an impulse response given by $h(n) = 3^n u(n)$. $H(\omega)$ is undefined. Lets use the transfer function:
\begin{align*}
\hat{H}(z) &= \sum \limits_{n\in\Z} h(n)z^{-n} \\
\hat{H}(z) &= \sum \limits_{n\in\Z} 3^n u(n) z^{-n} \\
\hat{H}(z) &= \sum \limits_{n = 0}^{\infty} 3^n z^{-n} \\
\hat{H}(z) &= \sum \limits_{n = 0}^{\infty} \lr{3z^{-1}}^n \\
\hat{H}(z) &= \frac{1}{1 - 3z^{-1}} \mbox { if } \abs{3z^{-1}} \lt 1 \\
\end{align*}
The sum converges provided $\abs{3z^{-1}} \lt 1$, which is reduces to $\abs{z} \gt 3$. This makes sense because the radius is the magnitude of $z$. If we look at the sum and explicity solve with $r$, we get the same result:
\begin{align*}
\hat{H}(z) &= \sum \limits_{n = 0}^{\infty} 3^n \lr{re^{i\omega}}^{-n} \\
\hat{H}(z) &= \sum \limits_{n = 0}^{\infty} \lr{\frac{3}{z}}^n e^{-i\omega n} \\
\end{align*}
We found that the Z-transform was associated with a \emph{Region of Convergence}, an $RoC$. This regions contains the set of points in the complex plane which correspond to values of $z$ which provide convergence for the Z-transform. Note that when you break up $z$ into its polar form, $re^{i\omega}$, only the radius $r$ determines the range of convergence. If you find a particular radius for which an infinite series converges, any point on that circle will be perfectly fine as far as convergence is concerned.
\end{example}
{\bf Question}
1) For a function to have both a Z-transform and a DTFT, what must be true about the $RoC$ of the function?
2) How can you get $H(\omega)$ from $\hat{H}(z)$?
{\bf Answer}
1) The $RoC$ must include the unit circle. That is, $\abs{z} = 1 \subset RoC(h)$.
2) $H(\omega) = \left. \hat{H}(z) \right|_{z = e^{i\omega}}$
\subsubsection{Examples}
1) Given a signal $x(n) = \lr{\frac{3}{4}}^nu(n)$, find the Z-transform and the DTFT.
First, the Z-transform:
\begin{align*}
x(n) &= \lr{\frac{3}{4}}^n u(n) \\
\hat{X}(z) &= \sum \limits_{n\in\Z}\lr{\frac{3}{4}}^n u(n)z^{-n} \\
\hat{X}(z) &= \sum \limits_{n = 0}^{\infty}\lr{\frac{3}{4}}^n z^{-n} \\
\hat{X}(z) &= \sum \limits_{n = 0}^{\infty}\lr{\frac{3}{4} z^{-1}}^n \\
\hat{X}(z) &= \frac{1}{1 - \frac{3}{4} z^{-1}} \mbox{ if } \abs{\frac{3}{4} z^{-1}} \lt 1 \\
\end{align*}
The convergence of this sum relies on $\abs{\frac{3}{4} z^{-1}} \lt 1$, which reduces to $\frac{3}{4} \lt \abs{z}$. Now lets find the DTFT:
\begin{align*}
X(\omega) &= \sum \limits_{n \in \Z} \lr{\frac{3}{4}}^n u(n) e^{-i\omega n} \\
X(\omega) &= \sum \limits_{n \in \Z_\oplus} \lr{\frac{3}{4}}^n e^{-i\omega n} \\
X(\omega) &= \sum \limits_{n \in \Z_\oplus} \lr{\frac{3}{4} e^{-i\omega}}^n \\
X(\omega) &= \frac{1}{1 - \frac{3}{4} e^{-i\omega}} \\
\end{align*}
Note that we could have also used the fact that $X(\omega) = \left. \hat{X}(z)\right|_{z = e^{i\omega}}$.
2) Given a signal $g(n) = -\lr{\frac{3}{4}}^n u(-n-1)$, what is $\hat{G}(z)$?
We know that $u(-n-1) = 0 $ whenever $-n -1 \lt 0 \Rightarrow u(-n-1) = 0 \mbox { } \forall n \gt -1$.
\begin{align*}
\hat{G}(z) &= \sum \limits_{n = -\infty}^{\infty}-\lr{\frac{3}{4}}^n u(-n-1)z^{-n} \\
\hat{G}(z) &= -\sum \limits_{n = -\infty}^{-1}\lr{\frac{3}{4}}^n z^{-n} \\
\hat{G}(z) &= -\sum \limits_{n = -\infty}^{-1}\lr{\frac{3}{4}z^{-1}}^n \\
\hat{G}(z) &= -\sum \limits_{n = 1}^{\infty}\lr{\frac{3}{4}z^{-1}}^{-n} \quad \mbox{ let } n = -n \\
\hat{G}(z) &= -\sum \limits_{n = 1}^{\infty}\lr{\frac{4}{3}z}^n \\
\hat{G}(z) &= -\sum \limits_{n = 0}^{\infty}\lr{\frac{4}{3}z}^{n+1} \quad \mbox{ shift index by } 1 \\
\hat{G}(z) &= -\frac{4}{3}z \sum \limits_{n = 0}^{\infty}\lr{\frac{4}{3}z}^n\\
\hat{G}(z) &= -\frac{\frac{4}{3}z}{1- \frac{4}{3}z} \quad \mbox{ if } \abs{\frac{4}{3}z} \lt 1\\
\end{align*}
The region of convergence reduces to $\abs{Z} \lt \frac{3}{4}$, and the expression for $\hat{G}(z)$ reduces to $\frac{4z}{4z-3}$.
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 10}
\subsection{Region of Convergence}
We can summarize the Z-Transform as follows:
\begin{nicebox}
\begin{align*}
x(n) &\ztp \hat{X}(z) = \sum \limits_{n=-\infty}^{\infty} x(n) z^{-n} \\
RoC(x) &= \{ z \in \C \st \sum \limits_{n=-\infty}^{\infty} \abs{x(n) z^{-n}} \lt \infty \}
\end{align*}
\end{nicebox}
Since the convention used is $z = Re^{i\omega}$, we can also write the region of convergence as
\begin{nicebox}
\begin{align*}
RoC(x) &= \{ z \in \C \st \sum \limits_{n=-\infty}^{\infty} \abs{x(n) R^{-n}} \lt \infty \}
\end{align*}
\end{nicebox}
This is because only $R \bydef \abs{z}$ is important in defining the region of convergence. The math is also very clear:
\begin{align*}
\abs{x(n)z^{-n}} = \abs{x(n)R^{-n}e^{-i\omega n}} = \abs{x(n)R^{-n}}
\end{align*}
\begin{example}
$x(n) = \alpha^n u(n)$ is causal. Determine the Z-Transform.
\begin{align*}
\hat{X}(z) &= \sum \limits_{n=-\infty}^{\infty} x(n) z^{-n} \\
\hat{X}(z) &= \sum \limits_{n=-\infty}^{\infty} \alpha^n u(n) z^{-n} \\
\hat{X}(z) &= \sum \limits_{n= 0}^{\infty} \lr{\alpha z^{-1}}^n \\
\hat{X}(z) &= \frac{1}{1 - \alpha z^{-1} } \quad \mbox{ if } \abs{\alpha z^{-1}} \lt 1 \\
\end{align*}
We find the the Z-Transform is $\frac{1}{1 - \alpha z^{-1} }$ with a radius of convergence $\abs{\alpha} \lt \abs{z}$.
%\img{images/roc/roc/causal.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-2,-2)(2,2)
}
% the circle
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\pscircle(0,0){ 1.0 }
}
\pscircle[linestyle=dashed](0,0){ 1 }
\rput(1.33,-0.45){ $\alpha$ }
\pscurve{->}(1.3,-0.3)(1.2,-0.1)(1,0)
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
\end{example}
\begin{example}
Given $y(n) = -\alpha^n u(-n-1)$, determine the Z-Transform.
\begin{align*}
\hat{Y}(z) &= \sum \limits_{n=-\infty}^{\infty} y(n) z^{-n} \\
\hat{Y}(z) &= \sum \limits_{n=-\infty}^{\infty} -\alpha^n u(-n-1) z^{-n} \\
\hat{Y}(z) &= -\sum \limits_{n=-\infty}^{-1} \lr{\alpha z^{-1}}^n \\
\hat{Y}(z) &= -\sum \limits_{n= 1}^{\infty} \lr{\alpha^{-1} z}^n \quad \mbox{ let } n = -n \\
\hat{Y}(z) &= -\alpha^{-1} z \sum \limits_{n= 0}^{\infty} \lr{\alpha^{-1} z}^n \\
\hat{Y}(z) &= -\frac{\alpha^{-1} z}{1 - \alpha^{-1} z} \quad \mbox{ if } \abs{\alpha^{-1} z} \lt 1 \\
\hat{Y}(z) &= \frac{1}{1 - \alpha z^{-1}} \\
\end{align*}
We find the the Z-Transform is $\frac{1}{1 - \alpha z^{-1} }$ with a radius of convergence $\abs{\alpha} \gt \abs{z}$.
%\img{images/roc/roc/anticausal.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\psframe(-2,-2)(2,2)
}
% the circle
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\pscircle(0,0){ 1.0 }
}
\pscircle[linestyle=dashed](0,0){ 1 }
\rput(1.33,-0.45){ $\alpha$ }
\pscurve{->}(1.3,-0.3)(1.2,-0.1)(1,0)
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
\end{example}
\begin{claim}
If $x$ is causal ($x(n) = 0 \mbox{ } \forall n \lt0$), and $R \in RoC(x)$, then $\tilde{R} \gt R \in Roc(x)$. In other words, causal signals have $RoC$'s that extend from outside a circle all of the way to infinity.
\end{claim}
\begin{proof}
First determine a relationship between $\tilde{R}^{-n}$ and $R^{-n}$:
\begin{align*}
\tilde{R} &\gt R \\
\tilde{R}^{-1} &\lt R^{-1} \\
\tilde{R}^{-n} &\lt R^{-n} \\
\end{align*}
We need to consider the absolute summability of the tranfer function. $\sum \limits_{n=-\infty}^{\infty} \abs{x(n) z^{-n}} = \sum \limits_{n=0}^{\infty} \abs{x(n) R^{-n}}$
If sum converges absolutely for $R$, then certainly when $R^{-n}$ is replaced with a smaller term $\tilde{R}^{-n}$, the sum will still converge. Therefore, if $R \in RoC(x)$, then $\tilde{R} \in Roc(x)$.
\end{proof}
\begin{example}
Given our signal from a few examples ago, $x(n) = \alpha^n u(n)$, find the Z-transform of $g(n) = x(n+1) = \alpha^{n+1} u(n+1)$. This example brings out one of the properties of the Z-transform which you will use frequently, which is the shifting property. Lets solve this for an arbitrary shift.
\begin{align*}
\hat{G}(z) &= \sum \limits_{n=-\infty}^{\infty} g(n) z^{-n} \\
\hat{G}(z) &= \sum \limits_{n=-\infty}^{\infty} x(n-N) z^{-n} \\
\hat{G}(z) &= \sum \limits_{n=-\infty}^{\infty} x(m) z^{-\lr{m+N}} \quad \mbox{ let } m = n-N \\
\hat{G}(z) &= z^{-N}\sum \limits_{n=-\infty}^{\infty} x(m) z^-m = z^{-N}\hat{X}(z)\\
\hat{G}(z) &= z^{-N}\hat{X}(z)\\
\end{align*}
This defines the shifting property for the Z-transform:
\begin{nicebox}
\begin{align*}
x(n) &\ztp \hat{X}(z) \\
x(n-N) &\ztp z^{-N}\hat{X}(z) \\
\end{align*}
\end{nicebox}
In our case we have $N = -1$, which means that $\hat{G}(z) = z\hat{X}(z) = \frac{z}{1-\alpha z^{-1}}$, where $\abs{\alpha z^{-1}} \lt 1$. What happens at infinity? Lets write out $\hat{G}(z)$ term by term:
$$
\hat{G}(z) = z + \alpha + \alpha^2 z^{-1} + \alpha^3 z^{-2} + \cdots
$$
Notice that as $z \to \infty$, it blows up. This demonstrates how right-sided signals that are not causal have $RoC$s of the form $R \lt \abs{z} \lt \infty$. An analogous statement can be made about left-sided signals that are not anti-causal, they are of the form $0 \lt \abs{z} \lt R$. The first term that blows up these types of signals is $x(1)z^{-1}$.
\end{example}
\subsection{Two-sided signals}
We have discussed right- and left-sided signals, but what about two-sided signals? Consider the figure of an annular region of convergence of a Z-transform:
%\img{images/roc/twosided/twosided.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\psframe(-2,-2)(2,2)
}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\pscircle(0,0){ 1.75 }
}
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\pscircle(0,0){ 0.75 }
}
\pscircle[linestyle=dashed](0,0){ 1.75 }
\pscircle[linestyle=dashed](0,0){ 0.75 }
\rput(1.1,-0.5){ $R_1$ }
\rput(2,-0.5){ $R_2$ }
\pscurve{->}(2,-0.3)(1.96,-0.1)(1.75,0)
\pscurve{->}(1.05,-0.3)(0.95,-0.1)(0.75,0)
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
The causal part of the signal is represented by the region outside of $R_1$, and the anti-causal part of the signal corresponds to the region inside $R_2$. The intersection is the region of convergence for the transform. If, for a given signal, the anti-causal and causal regions do not overlap, the Z-transform is not defined.
\begin{example}
Find the Z-transform for $f(n) = \lr{\frac{1}{2}}^n u(n) - \lr{\frac{3}{4}}^n u(-n-1)$.
\begin{align*}
\hat{F}(z) &= \frac{1}{1 - \frac{1}{2}z^{-1}} + \frac{1}{1-\frac{3}{4}z^{-1}} \quad \abs{z} \lt \frac{3}{4} \mbox{ and } \abs{z} \gt \frac{1}{2} \\
\hat{F}(z) &= \frac{z}{z - \frac{1}{2}} + \frac{z}{z-\frac{3}{4}} \\
\hat{F}(z) &= \frac{2z\lr{z - \frac{5}{8}}}{\lr{z-\frac{1}{2}}\lr{z-\frac{3}{4}}} \\
\end{align*}
%\img{images/roc/examples/twosided/twosided.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\psframe(-2,-2)(2,2)
}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\pscircle(0,0){ 1.5 }
}
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\pscircle(0,0){ 1 }
}
\pscircle[linestyle=dashed](0,0){ 1.5 }
\pscircle[linestyle=dashed](0,0){ 1 }
\rput(0.45,-0.55){ $\frac{1}{2}$ }
\pscurve{->}(0.5,-0.5)(0.75,-0.25)(1,0)
\rput(1.75,-0.55){ $\frac{3}{4}$ }
\pscurve{->}(1.75,-0.3)(1.65,-0.1)(1.5,0)
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
The poles are where the denominator is zero, $z = \frac{1}{2}$, and $z = \frac{3}{4}$. The zeros come from the numerator, $z = 0$, and $z=\frac{5}{8}$. Below is the pole-zero diagram for $\hat{F}(z)$.
%\img{images/roc/examples/twosided/polezeros.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\psframe(-2,-2)(2,2)
}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\pscircle(0,0){ 1.5 }
}
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\pscircle(0,0){ 1 }
}
\pscircle[linestyle=dashed](0,0){ 1.5 }
\pscircle[linestyle=dashed](0,0){ 1 }
\rput[scale=1.25](0,0){${\bf O}$}
\rput[scale=1.25](1.25,0){${\bf O}$}
\rput[scale=1.25](1.5,0){${\bf X}$}
\rput[scale=1.25](1,0){${\bf X}$}
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
The region of convergence is bounded by the poles. The zeros play no role. However, there can be pole/zero cancellations. Recall that the Z-transform can only exist when there is a non-trivial overlap for the region of convergence. For example, the signal $q(n) = \lr{\frac{3}{4}}^n u(n) - \lr{\frac{1}{2}}^n u(-n-1)$ has no overlap for the region of convergence, hence its Z-transform doesn't exist.
\end{example}
\subsection{Poles and Zeros}
Lets look at the poles and zeros of a finite duration signal $v(n) = \delta(n-N)$.
\begin{align*}
\hat{V}(z) &= \sum \limits_{n=-\infty}^{\infty} \delta(n-N) z^{-n} \\
\hat{V}(z) &= z^{-N}
\end{align*}
Lets consider the cases for different values of $N$:
I) $N \gt 0$
$$
\hat{V}(z) = z^{-N} = \frac{1}{z^N}
$$
There are $N$ poles at zero, hence, the region of convergence must exclude $0$.
II) $N = 0$
$$
\hat{V}(z) = 1
$$
The $RoC$ includes all $z$ including $0$ and $\infty$.
III) $N \lt 0$
$$
\hat{V}(z) = z^{-N} = z^M \mbox { where } M \gt 0
$$
We have $M$ poles at $\infty$ which is excluded from the $RoC$.
\begin{example}
Find the poles for the Z-transform of the signal $r(n) = \delta(n-1) + 2\delta(n-2) + 3\delta(n-3)$.
%\img{images/deltas/examples/one.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(5,2)
% line
\psline{-*}(1,0)(1,0.5)
% value
\rput(1.5,0.5){(1)}
% position
\rput(1,-0.3){1}
% line
\psline{-*}(2,0)(2,1)
% value
\rput(2.3,1){(2)}
% position
\rput(2,-0.3){2}
% line
\psline{-*}(3,0)(3,1.5)
% value
\rput(3.5,1.5){(3)}
% position
\rput(3,-0.3){3}
% axes
\psline{->}(0,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4,1.25){ $r(n)$ }
\end{pspicture}
\end{center}
\begin{align*}
\hat{R}(z) &= \sum \limits_{n = -\infty}^{\infty} r(n) z^{-n} \\
\hat{R}(z) &= \sum \limits_{n = -\infty}^{\infty}\lr{\delta(n-1) + 2\delta(n-2) + 3\delta(n-3)} z^{-n} \\
\hat{R}(z) &= z^{-1} + 2z^{-2} + 3z^{-3} \\
\hat{R}(z) &= \frac{z^2 + 2z + 3}{z^3} \\
\end{align*}
We have a triple pole at 0. $RoC(v): 0 \lt \abs{z}$.
\end{example}
\begin{example}
Find the poles for the anti-casual signal $g(n)$ represented in the figure below.
%\img{images/deltas/examples/two.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(5,2)
% line
\psline{-*}(3,0)(3,0.5)
% value
\rput(3.5,0.5){(1)}
% position
\rput(3,-0.3){0}
% line
\psline{-*}(2,0)(2,1)
% value
\rput(2.3,1){(2)}
% position
\rput(2,-0.3){-1}
% line
\psline{-*}(1,0)(1,1.5)
% value
\rput(1.5,1.5){(3)}
% position
\rput(1,-0.3){-2}
% axes
\psline{->}(0,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4,1.5){ $g(n)$ }
\end{pspicture}
\end{center}
\begin{align*}
\hat{G}(z) &= \sum \limits_{n = -\infty}^{\infty} g(n) z^{-n} \\
\hat{G}(z) &= 1 + 2z + 3z^2 \\
\end{align*}
The poles are at infinity, $RoC(g): \abs{z} \lt \infty$.
\end{example}
\begin{example}
Find the poles for the two-sided signal $h(n)$ represented in the figure below.
%\img{images/deltas/examples/three.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-1)(5,2)
% line
\psline{-*}(3,0)(3,0.5)
% value
\rput(3.5,0.5){(1)}
% position
\rput(3,-0.3){1}
% line
\psline{-*}(2,0)(2,1)
% value
\rput(2.3,1){(2)}
% position
\rput(2,-0.3){0}
% line
\psline{-*}(1,0)(1,1.5)
% value
\rput(1.5,1.5){(3)}
% position
\rput(1,-0.3){-1}
% axes
\psline{->}(0,0)(4,0)
\rput(4,-0.3){ $n$ }
\rput(4,1.5){ $h(n)$ }
\end{pspicture}
\end{center}
\begin{align*}
\hat{H}(z) &= \sum \limits_{n = -\infty}^{\infty} h(n) z^{-n} \\
\hat{H}(z) &= z^{-1} + 2 + 3z \\
\hat{H}(z) &= \frac{1 + 2z + 3z^2}{z} \\
\end{align*}
Poles are at 0 and $\infty$, $RoC(h): 0 \lt \abs{z} \lt \infty$.
\end{example}
\subsection{Rational Z-Transforms}
We can write $\hat{R}(z)$ as a rational function of a polynomial $P(z)$ and $Q(z)$:
$$
\hat{R}(z) = \frac{P(z)}{Q(z)} = \frac{b_M z^M + b_{M-1}z^{M-1} + \cdots + b_0}{a_N z^N + a_{N-1} z^{N-1} + \cdots + a_0}
$$
For rational Z-transforms, we say that
\begin{enumerate}
\item $\hat{R}$ is strictly proper is $M \lt N$ (the power in the numerator is less than the power in the denominator).
\item $\hat{R}$ is proper if $M \leq N$.
\item $\hat{R}$ is improper if $M \gt N$.
\item The roots of $Q(z)$ are the poles.
\item The roots of $P(z)$ are the zeros.
\item If $M \lt N \Rightarrow M - N $ poles at $\infty$.
\item If $N \lt M \Rightarrow N - M $ zeros at $\infty$.
\item The number of poles is equal to the number of zeros if we account for what happens at infinity.
\end{enumerate}
In general, the set of signals that produce rational Z-transforms are obtained from, and only from, linear combinations of $\alpha^n u(n)$, $\alpha^n u(-n)$, $n^k \alpha^n u(n)$, and $n^k \alpha^n u(-n)$, where $k \in \N$, and shifted versions thereof.
\begin{example}
\begin{align*}
\hat{R}(z) &= \frac{(z-1)(z-2)}{(z-3)} \\
\end{align*}
Here $M \gt N$, so we have a pole at infinity. Poles are $3,\infty$. Zeros are $1,2$.
\end{example}
\begin{example}
\begin{align*}
\hat{R}(z) = \frac{(z-3)}{(z-1)(z-2)} \\
\end{align*}
Here $N\gt M$, so we have a zero at infinity. Poles are $1,2$. Zeros are $3,\infty$.
\end{example}
\begin{example}
\begin{align*}
\hat{R}(z) &= \frac{z^2 + 1}{z} \\
\end{align*}
Can $r$ be causal?
\begin{align*}
\hat{R}(z) &= \frac{z^2 + 1}{z} \\
\hat{R}(z) &= \frac{(z-i)(z+i)}{z} \\
\end{align*}
We have zeros at $\pm i$ and poles at $0,\infty$. If there is a pole at infinity, then $r$ cannot be causal. If there is a pole at 0, then $r$ cannot be anti-causal. We can also see this fact using an inverse transform $\hat{R}(z) = z + \frac{1}{z} \iztp \delta(n-1) + \delta(n+1)$.
\end{example}
\subsection{Relation to Fourier Transform}
Lets look into the relation between the Z-transform and the Fourier Transform:
\begin{nicebox}
\begin{align*}
X(\omega) &= \sum \limits_{n=-\infty}^{\infty} x(n) e^{-i\omega n} \\
\hat{X}(z) &= \sum \limits_{n=-\infty}^{\infty} x(n) z^{-n} \\
\end{align*}
\end{nicebox}
There is an affiliated $RoC(x)$ with the Z-Transform. If the $RoC$ included the unit circle, then the following statement is true:
\begin{nicebox}
$$
X(\omega) = \left. \hat{X}(z) \right|_{z = e^{i\omega}}
$$
\end{nicebox}
\begin{example}
Lets look at the function $x(n) = \alpha^n u(n)$ for two cases, the first where $\abs{\alpha} \lt 1$, and the other where $\abs{\alpha} \gt 1$.
\begin{align*}
\hat{X}(z) &= \sum \limits_{n=-\infty}^{\infty} \alpha^n u(n) z^{-n} \\
\hat{X}(z) &= \sum \limits_{n = 0}^{\infty} \lr{\alpha z^{-1}}^n \\
\hat{X}(z) &= \frac{1}{1- \alpha z^{-1}} \quad \mbox{ if } \abs{\alpha z^{-1}} \lt 1 \\
\end{align*}
This means that $RoC(x): \abs{z} \gt \abs{\alpha}$.
I) $\abs{\alpha} \lt 1$ We have a fourier transform.
This works because the $RoC$ includes all values such that $\abs{z} \gt \abs{\alpha}$. Since $1 \gt \abs{\alpha}$, 1 is a value in the $RoC$.
\begin{align*}
X(\omega) &= \left. \hat{X}(z) \right|_{z = e^{i\omega}} \\
X(\omega) &= \frac{1}{1-\alpha e^{-i\omega }} \\
\end{align*}
II) $\abs{\alpha} \gt 1$ This function doesn't have a fourier transform.
Since $\abs{\alpha} \gt 1$, then $\abs{z} \gt \abs{\alpha} \gt 1$. This means that the unit circle is excluded from the region of convergence.
\end{example}
\subsection{BIBO Stability and the RoC}
If a system is BIBO stable, then the unit circle is included in the $RoC$. First recall that a system is BIBO if and only if its impulse response is absolutely summable.
\begin{align*}
\sum \abs{h(n)} \lt \infty \iff BIBO
\end{align*}
We can see that the absolute summability relies on $R$, and that the $RoC$ must include the unit circle for stability:
\begin{align*}
\sum \limits_{n \in \Z} \abs{h(n) z^{-n}} = \sum \limits_{n \in \Z} \abs{h(n) R^{-n}} \lt \infty
\end{align*}
\subsection{Systems in Cascade}
When cascading to systems $g$ and $h$, what happens if we pass in a signal $x(n) = z^n$?
%\img{images/cascading/ztrans/cascade.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-2,-2)(5,1)
%frame
\rput(1,0){$g$}
\psframe(1.5,-0.25)(0.5,0.25)
%frame
\rput(3,0){$h$}
\psframe(3.5,-0.25)(2.5,0.25)
%text
\rput(-0.5,0){$x(n)$}
%text
\rput(2,0.3){$q(n)$}
%text
\rput(4.5,0){$y(n)$}
% wire with arrow
\psline{->}(0,0)(0.5,0)
% wire with arrow
\psline{->}(1.5,0)(2.5,0)
% wire with arrow
\psline{->}(3.5,0)(4,0)
\end{pspicture}
\end{center}
Let $f = g * h$. If we follow the path of the signal piecewise, we can first solve the system for $q(n)$, and then pass $q(n)$ through $h$ to determine $y$.
\begin{align*}
q(n) &= x(n) * g(n) \\
q(n) &= \sum \limits_{k=-\infty}^{\infty} g(k) x(n-k) \\
q(n) &= \sum \limits_{k=-\infty}^{\infty} g(k) z^{n-k} \\
q(n) &= z^n \sum \limits_{k=-\infty}^{\infty} g(k) z^{-k} \\
q(n) &= z^n \hat{G}(z) \\
\end{align*}
Now the second part of the system:
\begin{align*}
y(n) &= q(n) * h(n) \\
y(n) &= z^n \hat{G}(z) * h(n) \\
y(n) &= \sum \limits_{k=-\infty}^{\infty} h(k) z^{n-k} \hat{G}(z) \\
y(n) &= z^n \hat{G}(z) \sum \limits_{k=-\infty}^{\infty} h(k) z^{-k} \\
y(n) &= z^n \hat{G}(z) \hat{H}(z) \\
\end{align*}
We find that convolution in the time domain is multiplication in the transform domain---a relationship similar to what you knew from the fourier transform:
\begin{nicebox}
$$
f(n) = (g * h)(n) \ztp \hat{F}(z) = \hat{G}(z) \hat{H}(z)
$$
\end{nicebox}
\begin{proof}
\begin{align*}
f(n) &= (g*h)(n) \\
\hat{F}(z) &= \sum \limits_{n\in\Z}(g*h)(n)z^{-n} \\
\hat{F}(z) &= \sum \limits_{n\in\Z} \sum \limits_{k\in\Z}g(k)h(n-k)z^{-n} \\
\hat{F}(z) &= \sum \limits_{k\in\Z} g(k) \sum \limits_{n\in\Z}h(n-k)z^{-n} \quad \mbox{swap summations}\\
\hat{F}(z) &= \sum \limits_{k\in\Z} g(k) \sum \limits_{m\in\Z}h(m)z^{-(m+k)} \quad \mbox{let } m = n-k\\
\hat{F}(z) &= \sum \limits_{k\in\Z} g(k) z^{-k} \sum \limits_{m\in\Z}h(m)z^{-m} \\
\hat{F}(z) &= \hat{G}(z) \hat{H}(z) \\
\end{align*}
For the system $f = g * h$, the $RoC(g) \cap RoC(h) \subset RoC(f)$. If $\hat{G}(z)$ and $\hat{H}(z)$ are rational and there are no pole-zero cancellations, then $RoC(g) \cap RoC(h) = RoC(f)$.
\end{proof}
\begin{example}
Two systems are in cascade, $\hat{G}$ and $\hat{H}$. Find the overall $RoC$ of the system.
\begin{align*}
\hat{G}(z) = \frac{1}{1 - \frac{1}{2}z^{-1}} \quad \mbox{(causal)} \\
\hat{H}(z) = \frac{z - \frac{1}{2}}{z + \frac{1}{3}} \quad \mbox{(causal)} \\
\end{align*}
First find $\hat{F}$ and solve for the poles and zeros:
\begin{align*}
\hat{F}(z) &= \hat{G}(z)\hat{H}(z) \\
\hat{F}(z) &= \lr{\frac{z}{z - \frac{1}{2}}} \lr{\frac{z - \frac{1}{2}}{z + \frac{1}{3}}} \\
\hat{F}(z) &= \frac{z\lr{z - \frac{1}{2}}}{\lr{z - \frac{1}{2}} \lr{z + \frac{1}{3}}} \\
\hat{F}(z) &= \frac{z}{\lr{z + \frac{1}{3}}} \\
\end{align*}
So we have a pole at $-\frac{1}{3}$ and a zero at 0.
\begin{align*}
\hat{F}(z) = \frac{z}{\lr{z + \frac{1}{3}}} \iztp -\frac{1}{3}^n u(n) \quad \mbox{ where } \abs{z} \gt \frac{1}{3}
\end{align*}
Looking at the regions of convergence we find that the pole-zero cancellation changed the region of convergence.
\begin{align*}
RoC(g): &\frac{1}{2} \lt \abs{z} \\
RoC(h): &\frac{1}{3} \lt \abs{z} \\
RoC(f): &\frac{1}{2} \lt \abs{z} \\
\end{align*}
In this case $RoC(f) \gt RoC(g) \cap Roc(h)$.
In general, when given two systems in cascade, there is a good method for solving for the system function without calculating it explicitly. However, you must be given the input/output pair. For example, given a system
\begin{align*}
x \to &\fbox{h} \to y \\
y(n) = (x*h)(n) &\ztp \hat{Y}(z) = \hat{X}(z)\hat{H}(z) \\
\end{align*}
We can use the relation $\hat{H}(z) = \frac{\hat{Y}(z)}{\hat{X}(z)}$. You should be able to determing the system, but $RoC(x) \cap RoC(h)$ must have non-trivial overlap. This is because $RoC(y) \supseteq RoC(x) \cap RoC(h)$.
\end{example}
\subsection{Shifting Property}
Lets look into the shifting property for the Z-transform:
\begin{nicebox}
\begin{align*}
x(n) &\ztp \hat{X}(z) \\
x(n-N) &\ztp z^{-N}\hat{X}(z) \\
\end{align*}
\end{nicebox}
There are situations that modify the $RoC$.
I) $N \gt 0$ (shift to the right)
\begin{align*}
x(n-N) &\ztp z^{-N}\hat{X}(z) \\
x(n-N) &\ztp \frac{\hat{X}(z)}{z^{N}} \\
\end{align*}
Potentially $z=0$ has up to $N$ poles, so 0 may be excluded from the $RoC$.
II) $N \lt 0$ (shift to the left)
Potentially add poles at $z=\infty$, and $N$ zeros at 0. For both cases of $N$, the $RoC$ may exclude 0, $\infty$, or neither if there are cancellations.
\subsection{A recursive example}
\begin{align*}
y(n) &= \alpha y(n-1) + x(n) - x(n-1) \\
\end{align*}
Lets utilize the fact $\hat{H}(z) = \frac{\hat{Y}(z)}{\hat{X}(z)}$:
\begin{align*}
y(n) &= \alpha y(n-1) + x(n) - x(n-1) \\
\hat{Y}(z) &= \alpha z^{-1}\hat{Y}(z) + \hat{X}(z) - z^{-1}\hat{X}(z) \\
\hat{Y}(z)\lr{1 - \alpha z^{-1}} &= \hat{X}(z)\lr{1 - z^{-1}} \\
\hat{H}(z) &= \frac{\lr{1 - z^{-1}}}{\lr{1 - \alpha z^{-1}}} \\
\hat{H}(z) &= \frac{\lr{z - 1}}{\lr{z - \alpha}} \\
\end{align*}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 11}
\subsection{Properties of the Z-transform}
\subsubsection{Modulation by Complex Exponentials}
What happens when we modulate a function in time with a complex exponential?
\begin{align*}
g(n) &= \hat{G}(z) \quad R_1 \lt \abs{z} \lt R_2 \\
h(n) &= z_0^ng(n) \ztp \hat{H}(z) = \mbox{ } ?
\end{align*}
The result must be consistent with our result for the fourier transform version.
\begin{align*}
\hat{H}(z) &= \sum \limits_{n\in\Z} h(n) z^{-n} \\
\hat{H}(z) &= \sum \limits_{n\in\Z} z_0^ng(n) z^{-n} \\
\hat{H}(z) &= \sum \limits_{n\in\Z} g(n) \lr{\frac{z}{z_0}}^{-n} \\
\hat{H}(z) &= \hat{G}\lr{\frac{z}{z_0}} \quad \quad R_1 \lt \abs{\frac{z}{z_0}} \lt R_2
\end{align*}
The $RoC$ simplifies to $\{ z \st \abs{z_0}R_1 \lt \abs{z} \lt \abs{z_0}R_2\}$, such that $RoC(h) = \abs{z_0}Roc(g)$.
\begin{example}
Given the following transform pair, determine what happens to the poles and zeros.
$$
e^{i\omega_0 n} g(n) \ztp \hat{G}(e^{-i\omega_0 }z)
$$
We know that the $RoC$ is the same as $RoC(g)$, since $\abs{e^{i\omega}} = 1$. Assume that $z=p$ is a pole of $\hat{G}$. Now we have $e^{-i\omega_0} z = p$ is a pole. This implies that when $z = e^{i\omega_0} p$, we have a pole rotated by $\omega_0$ from the position of the original pole.
Suppose that $\omega_0 = \frac{\pi}{4}$, and $g(n) = u(n)$.
\begin{align*}
u(n) &\ztp \hat{U}(z) \\
u(n) &\ztp \frac{1}{1-z^{-1}} = \frac{z}{z-1}\\
e^{i\omega_0}u(n) &\ztp \hat{U}(e^{-i\omega_0}z) \\
e^{i\omega_0}u(n) &\ztp \frac{e^{-i\omega_0}z}{e^{-i\omega_0}z -1} \\
e^{i\omega_0}u(n) &\ztp \frac{z}{z - e^{i\omega_0}} \\
\end{align*}
%\img{images/roc/rotated/rot.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-4,-4)(3,3)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-2,-2)(2,2)
}
% the circle
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\pscircle(0,0){ 1.0 }
}
\pscircle[linestyle=dashed](0,0){ 1 }
\rput(1.33,-0.45){ original pole }
\pscurve{->}(1.3,-0.3)(1.2,-0.1)(1,0)
\rput(1,0){ x }
\rput(1.3,0.3){ new pole }
\pscurve{->}(1.3,0.4)(0.9,0.8)(0.7,0.7)
\rput(0.7,0.7){ x }
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
Here the $RoC$ stays the same since the magnitudes are equal.
\end{example}
\begin{example}
Determine what happens to the $RoC$ for $z_0 = 2$.
\begin{align*}
h(n) = 2^n u(n) &\ztp \hat{U}\lr{ \frac{z}{2} } \\
h(n) = 2^n u(n) &\ztp \frac{1}{1-2z^{-1}} \\
\end{align*}
The $RoC$ gets scaled:
%\img{images/roc/scaled/scale.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-4,-4)(3,3)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-2.2,-2.2)(2.2,2.2)
}
% the circle
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\pscircle(0,0){ 2.0 }
}
\pscircle[linestyle=dashed](0,0){ 2 }
\pscircle[linestyle=dashed](0,0){ 1 }
\rput(1.33,-0.65){ original pole }
\pscurve{->}(1.3,-0.4)(1.2,-0.1)(1,0)
\rput(1,0){ x }
\rput(2.3,-0.3){ new pole }
\pscurve{->}(2.3,-0.3)(2.2,-0.1)(2,0)
\rput(2,0){ x }
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
\end{example}
\begin{example}
Determine what happens to the $RoC$ for $z_0 = 2e^{-i\frac{\pi}{4}}$.
\begin{align*}
f(n) = \lr{2e^{-i\frac{\pi}{4}}}^n u(n) &\ztp \hat{U}\lr{ \frac{z}{2} } \\
f(n) = \lr{2e^{-i\frac{\pi}{4}}}^n u(n) &\ztp \frac{1}{1-\lr{2e^{-i\frac{\pi}{4}}}z^{-1}} \\
\end{align*}
The $RoC$ gets rotated and scaled, a combination of the above two cases.
\end{example}
\subsubsection{Time Reversal}
What happens when we do time-reversal on a function in time?
\begin{align*}
g(n) &= \hat{G}(z) \quad R_1 \lt \abs{z} \lt R_2 \\
h(n) &= g(-n) \ztp \hat{H}(z) = \mbox{ } ?
\end{align*}
\begin{align*}
\hat{H}(z) &= \sum \limits_{n\in\Z} h(n) z^{-n} \\
\hat{H}(z) &= \sum \limits_{n\in\Z} g(-n) z^{-n} \\
\hat{H}(z) &= \sum \limits_{n\in\Z} g(n) z^{n} \mbox{ let } n = -n \\
\hat{H}(z) &= \sum \limits_{n\in\Z} g(n) \lr{\frac{1}{z}}^{-n} \\
\hat{H}(z) &= \hat{G}\lr{z^{-1}} \quad R_1 \lt \abs{z^{-1}} \lt R_2\\
\end{align*}
The $RoC$ is changed, we must take the reciprocal: $RoC(h) = \frac{1}{RoC(g)}$. This gives us $\{ z \st \frac{1}{R_2} \lt \abs{z} \lt \frac{1}{R_1} \}$.
\begin{example}
Find what happens to the $RoC$ for $h(n) = u(-n)$.
\begin{align*}
u(n) &\ztp \frac{1}{1-z^{-1}} \quad \abs{z} \gt 1 \\
h(n) = u(-n) &\ztp \frac{1}{1-z} \quad \abs{z} \lt 1\\
\end{align*}
All poles and zeros are inverted. If $z=p$ is a pole of $\hat{G}(z)$, the $z=\frac{1}{p}$ is a pole of $\hat{H}(z)$. The same is true for every zero.
\end{example}
\subsubsection{Trigonometric Example}
Determine the Z-transform and $RoC$ for $h(n) = \cos(\omega_0 n) u(n)$.
\begin{align*}
h(n) = \cos(\omega_0 n) u(n) &\ztp \hat{H}(z) \\
h(n) = \frac{1}{2} e^{i\omega_0 n}u(n) + \frac{1}{2} e^{-i\omega_0 n} u(n) &\ztp \frac{1}{2} \hat{U}\lr{\frac{z}{e^{-i\omega_0}}} + \frac{1}{2} \hat{U}\lr{\frac{z}{e^{i\omega_0}}} \\
\end{align*}
We can use the fact that $\hat{U}(z) = \frac{z}{z - 1}$.
\begin{align*}
\hat{H}(z) &= \frac{1}{2} \lr{\frac{z}{z - e^{i\omega_0}}} + \frac{1}{2} \lr{\frac{z}{z - e^{-i\omega_0}}} \\
\hat{H}(z) &= \frac{z}{2}\lr{\frac{2z - e^{-i\omega_0} - e^{i\omega_0}}{\lr{z-e^{i\omega_0}}\lr{z-e^{-i\omega_0}}}} \\
\hat{H}(z) &= \frac{z\lr{z - \cos\omega_0}}{\lr{z-e^{i\omega_0}}\lr{z-e^{-i\omega_0}}} \\
\end{align*}
Lets use $\omega_0 = \frac{\pi}{4}$.
%\img{images/roc/cosine/rot.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-2,-2)(2,2)
}
% the circle
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\pscircle(0,0){ 1.0 }
}
\pscircle[linestyle=dashed](0,0){ 1 }
%\rput(1.33,-0.45){ original pole }
%\pscurve{->}(1.3,-0.3)(1.2,-0.1)(1,0)
%\rput(1.3,0.3){ new pole }
%\pscurve{->}(1.3,0.4)(0.9,0.8)(0.7,0.7)
\rput(0.7,0.7){ {\bf X} }
\rput(0.7,-0.7){ {\bf X} }
\rput(0,0){ {\bf 0} }
\rput(0.7,0){ {\bf 0} }
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
We have zeros at 0, and $z = \cos\omega_0 = \frac{1}{\sqrt{2}}$. We have poles at $e^{\pm i \omega_0}$. There no fourier transform since the unit circle is not included.
\subsubsection{Differentiation in the Z Domain}
Lets see what happens when we differentiate in the Z domain for $g(n) \ztp \hat{G}(z)$:
\begin{align*}
\hat{G}(z) &= \sum \limits_{n\in\Z} g(n) z^{-n} \\
\frac{d}{dz}\hat{G}(z) &= -\sum \limits_{n\in\Z} ng(n) z^{-n-1} \\
\frac{d}{dz}\hat{G}(z) &= -z^{-1}\sum \limits_{n\in\Z} ng(n) z^{-n} \\
\end{align*}
This gives us the differentiation property of the Z-transform:
\begin{nicebox}
\begin{align*}
g(n) &\ztp \hat{G}(z) \\
ng(n) &\ztp -z \frac{d}{dz} \hat{G}(z) \\
\end{align*}
\end{nicebox}
We can see that differentiation of a rational function will produce a rational Z-transform. If you recall, the set of signals that produce rational Z-transforms are obtained from, and only from, linear combinations of $\alpha^n u(n)$, $\alpha^n u(-n)$, $n^k \alpha^n u(n)$, and $n^k \alpha^n u(-n)$, where $k \in \N$, and shifted versions thereof.
\subsubsection{Dilation}
What if we have a signal that upsamples another signal by dialating it in the time domain?
$$ h(n) = \begin{cases} g\lr{\frac{n}{N}} & n\bmod N=0 \\ 0 & \mbox{otherwise} \\ \end{cases} $$
The input signal becomes dialated in the time domain by a factor of $N$---the system inserts $N-1$ zeros between samples.
\begin{align*}
\hat{H}(z) &= \sum \limits_{n\in\Z} h(n) z^{-n} \\
\hat{H}(z) &= \sum \limits_{n\in N\Z} g\lr{\frac{n}{N}} z^{-n} \\
\end{align*}
I want to mention that we performed a \emph{dilation} of the integers. A dilation of a set $X$ is a subset $cX = \{ cx \st x \in X \}$, and $c \in \Z$. Here we have successfully dilated the integers such that we are summing over the set $\{Nn \st n \in \Z \}$.
\begin{align*}
\hat{H}(z) &= \sum \limits_{mN\in N\Z} g(m) z^{-mN} \quad \mbox{ let } m = \frac{n}{N} \\
\hat{H}(z) &= \sum \limits_{mN\in N\Z} g(m) z^{-mN} \\
\hat{H}(z) &= \sum \limits_{m\in \Z} g(m) z^{-mN} \\
\end{align*}
The reason that we can now let $m$ sum over all integers is because every $mN$ element in the set $N\Z$ is factoring out $m$ implicitly, so the $N$ drops out of our limits in the sum.
\begin{align*}
\hat{H}(z) &= \sum \limits_{m\in \Z} g(m) \lr{z^{N}}^{-m}\\
\hat{H}(z) &= \hat{G}(z^N) \\
\end{align*}
Lets look at the $RoC$ for a dilated signal.
\begin{align*}
RoC(g) &: R_1 \lt \abs{z} \lt R_2 \\
RoC(h) &: R_1 \lt \abs{z^N} \lt R_2 \\
RoC(h) &: \sqrt[n]{R_1} \lt \abs{z} \lt \sqrt[n]{R_2} \\
\end{align*}
We find that $RoC(h) = \sqrt[n]{RoC(g)}$. But what happens to the poles and the zeros? Assume $z=p$ is a pole of $\hat{G}(z)$, then $\hat{H}(z)$ has $N$ poles at the $N^{th}$ roots of $p$. For demonstrative purposes, assume that $N = 3$, and $\frac{1}{8}$ is a pole of $\hat{G}(z)$.
%\img{images/roc/dilated/orig.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\psframe(-2,-2)(2,2)
}
\rput(2,1.5){ $\hat{G}(z)$ }
\pscircle[linestyle=dashed](0,0){ 1 }
\rput(1, 0){{\bf X}}
\rput(1.3,-0.5){ $\frac{1}{8}$ }
\pscurve{->}(1.3,-0.3)(1.2,-0.1)(1,0)
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
\begin{align*}
z = \frac{1}{8} = \frac{1}{8}e^{i2\pi k}
\end{align*}
We can take any number and multiply it by 1. Then we can take the root.
\begin{align*}
z &= \sqrt[3]{\frac{1}{8}e^{i2\pi k}} \\
z &= \frac{1}{2}e^{i\frac{2\pi}{3} k} \\
\end{align*}
This gives us the set $\{ \frac{1}{2}, \frac{1}{2}e^{i2\pi/3}, \frac{1}{2}e^{i4\pi/3} \}$. Notice that the $RoC$ has been pushed out to $\frac{1}{2}$ and now we have 3 roots:
%\img{images/roc/dilated/dilated.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\psframe(-2,-2)(2,2)
}
\rput(2,1.5){ $\hat{H}(z)$ }
\pscircle[linestyle=dashed](0,0){ 1.75 }
\rput(1.75, 0){{\bf X}}
\rput(-0.875, 1.516){{\bf X}}
\rput(-0.875, -1.516){{\bf X}}
\rput(2,-0.6){ $\frac{1}{2}$ }
\pscurve{->}(2,-0.3)(1.96,-0.1)(1.75,0)
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
(1) If $g(n)$ is stable, what can you say about $H(\omega)$?
If $g(n)$ is stable, then $G(\omega) = \left. \hat{G}(z) \right|_{z = e^{i\omega}}$, and it is absolutely summable. We aren't adding any new values of $G$ to $H$. We are simply interspersing the same values of $G$, which implies that $H$ is also absolutely summable. We are inserting zeros between adjacent samples in $h$, so we don't change its stability.
$$ \sum \limits_{n \in Z} \abs{g(n)} \lt \infty \Rightarrow \sum \limits_{n \in Z} \abs{h(n)} \lt \infty $$
$H(\omega)$ is also well-defined.
\begin{align*}
\hat{H}(z) &= \hat{G}(z^N) \\
H(\omega) &= \hat{G}(e^{i\omega N}) = G(\omega N) \\
H(\omega) &= G(\omega N) \\
\end{align*}
\subsubsection{Initial Value Theorem}
Given the following information and pole-zero diagram, find an explicit formula for $\hat{H}(z)$ and the value of $h(0)$.
\begin{enumerate}
\item $\hat{H}(z)$ is rational
\item $\hat{H}(z)$ is causal
\item given $x(n) = 1$, $y(n) = -4/3$
\end{enumerate}
%\img{images/roc/puzzle/puzzle.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
\pscustom[fillstyle=solid,fillcolor=white,linestyle=none]{
\psframe(-2,-2)(2,2)
}
\rput(2,1.5){ $\hat{H}(z)$ }
\pscircle[linestyle=dashed](0,0){ 1 }
\rput(1.5, 0){{\bf 0}}
\rput(0.75, 0){{\bf X}}
\rput(-0.5, 0){{\bf X}}
\rput(1.5,-0.4){ $\frac{3}{2}$ }
\rput(0.75,-0.4){ $\frac{3}{4}$ }
\rput(-0.5,-0.4){ $-\frac{1}{2}$ }
% y-axis
\rput(0.3,2.5){ $\Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.5,0.3){ $\Re$ }
\psline{->}(-2.5,0)(2.5,0)
% quadrants
\rput(0.3,2.2){ $\pi/2$ }
\rput(0.3,-2.5){ $\pi$ }
\rput(-2.5,0.3){ $3\pi/2$ }
\rput(2.2,0.3){ $0$ }
\end{pspicture}
\end{center}
Looking at the pole-zero diagram, we can write an explicit formula for $\hat{H}(z)$:
\begin{align*}
\hat{H}(z) &= H_0 \frac{ \lr{z - \frac{3}{2} } } { \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} } } \\
\end{align*}
We also can determine the $RoC$ of $H$ because it is causal. This tells us $RoC(h) = \{ z \st \abs{z} \gt \frac{3}{4} \}$. Lets find $H_0$. Recall from the fourier transform days that we found $x(n) = 1 = e^{i 0 n}$, and $y(n) = H(0)e^{i 0 n}$. Here we can't use this exact trick. Recall that when the input of a system is a complex exponential, we get $y(n) = z_0^n\hat{H}(z)$. The trick here is to let $z_0 = 1$ (note that $\hat{H}(1) = H(0)$):
\begin{align*}
y(n) &= z_0^n \hat{H}(z) \\
-\frac{4}{3} &= 1^n \hat{H}(1) \\
-\frac{4}{3} &= \hat{H}(1) \\
\end{align*}
Now we can use the $\hat{H}(1) = -\frac{4}{3}$ to find $H_0$:
\begin{align*}
\hat{H}(z) &= H_0 \frac{ \lr{z - \frac{3}{2} } } { \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} } } \\
\hat{H}(1) &= H_0 \frac{ \lr{ 1- \frac{3}{2} } } { \lr{ 1 + \frac{1}{2}} \lr{ 1 - \frac{3}{4} } } \\
\hat{H}(1) &= H_0 \frac{ \lr{ - \frac{1}{2} } } { \lr{ \frac{3}{2}} \lr{ \frac{1}{4} } } \\
-\frac{4}{3} &= -H_0\frac{4}{3} \\
H_0 &= 1 \\
\end{align*}
This tells the exact formula for $\hat{H}$:
$$
\hat{H}(z) = \frac{ \lr{z - \frac{3}{2} } } { \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} } } \\
$$
How can we find $h(0)$? If $h$ is causal, then $h(n) = 0$ for $n \lt 0$.
\begin{align*}
\hat{H}(z) &= \sum \limits_{n \in \Z} h(n) z^{-n} \\
\hat{H}(z) &= h(0) + \frac{h(1)}{z} + \frac{h(2)}{z^2} + \cdots \\
\end{align*}
This brings us to the \emph{Initial Value Theorem}:
$$
h(0) = \lim_{z \to \infty} \hat{H}(z) \\
$$
The initial value theorem applies when the system is causal. There is a corresponding theorem called the final value theorem for anti-caual systems.
\begin{align*}
h(0) &= \lim_{z \to \infty} \hat{H}(z) \\
h(0) &= \lim_{z \to \infty} \frac{ \lr{z - \frac{3}{2} } } { \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} } } \\
h(0) &= 0 \\
\end{align*}
\subsubsection{Inverse Transforms}
There are quite a few methods two find inverse Z-transforms, a few of which we will discuss here.
I) Identify familiar forms.
We have used particular forms involving Z-transforms over and over, so we can use pattern recognition:
\begin{example}
\begin{align*}
\hat{X}(z) &= \frac{1}{1 - \alpha Z^{-1}} \quad \abs{\alpha} \lt \abs{z} \\
x(n) &= \alpha^n u(n)
\end{align*}
\end{example}
\begin{example}
\begin{align*}
\hat{X}(z) &= z^{-5} \\
x(n) &= \delta(n-5)
\end{align*}
II) Power Series Expansions
Recall the formula
$$
\sum \limits_{n=0}^{\infty} \beta^n = \frac{1}{1 - \beta} \quad \abs{\beta} \lt 1
$$
\end{example}
\begin{example}
\begin{align*}
\hat{X}(z) &= \frac{1}{1 - \alpha z^{-1}} \quad \abs{\alpha} \lt \abs{z} \\
\hat{X}(z) &= \sum \limits_{n=0}^{\infty} \lr{\alpha z^{-1}}^n \\
\hat{X}(z) &= \sum \limits_{n=0}^{\infty} \alpha^n z^{-n}\\
\hat{X}(z) &= \sum \limits_{n=-\infty}^{\infty} \alpha^n u(n) z^{-n}\\
x(n) &= \alpha^n u(n) \\
\end{align*}
Recall the expansion for $\log(1 + \beta)$:
$$
\log(1 + \beta) = \sum \limits_{n=1}^{\infty} \frac{ (-1)^{n+1} \beta^n } { n} \quad \abs{\beta} \lt 1
$$
\end{example}
\begin{example}
\begin{align*}
\hat{X}(z) &= \log( 1 + \alpha z^{-1}) \quad \abs{\alpha} \lt \abs{z} \\
\hat{X}(z) &= \sum \limits_{n=1}^{\infty} \frac{ (-1)^{n+1} \lr{\alpha z^{-1}}^n } { n} \\
\hat{X}(z) &= \sum \limits_{n=1}^{\infty} \lr{\frac{ (-1)^{n+1} \alpha^n } { n}} z^{-n} \\
\end{align*}
This tells us that
$$x(n) = \begin{cases} 0 & n\leq 0 \\ \frac{(-1)^{n+1}\alpha^n}{n} & n \geq 1 \\ \end{cases}$$
Hence, $x(n) = \frac{(-1)^{n+1}\alpha^n}{n} u(n-1)$. Note that $\log( 1 + \alpha z^{-1})$ is not rational in $Z$. $x(n) = \frac{(-1)^{n+1}\alpha^n}{n} u(n-1)$ is not a linear combination of shifted one-sided exponentials multiplied by polynomials.
III) Differentiation
From the above example, let $y(n) = nx(n) = (-1)^{n+1}\alpha^n u(n-1)$.
\begin{align*}
y(n) &= (-1)^{n+1} \alpha^n u(n-1) \\
y(n) &= -(-1)^n \alpha^n u(n-1) \\
y(n) &= -(-\alpha)^n u(n-1) \\
y(n) &= \alpha (-\alpha)^{n-1} u(n-1) \\
\end{align*}
We have $y(n)$ is a shifted and scaled version of $\hat{G}(z) = \frac{1}{1 + \alpha z^{-1}}$.
\begin{align*}
\hat{Y}(z) = \frac{\alpha z^{-1}}{1 + \alpha z^{-1}} \\
\end{align*}
Or, we could use the differentiation property:
\begin{nicebox}
\begin{align*}
x(n) &\ztp \hat{X}(z) \\
n x(n) &\ztp -z \frac{d}{dz} \hat{X}(z) \\
\end{align*}
\end{nicebox}
\begin{align*}
\hat{X} &= \log(1 + \alpha z^{-1}) \\
\frac{d}{dz}\hat{X} &= \frac{1}{1 + \alpha z^{-1}} \lr{-\alpha z^{-2}} \\
-z \frac{d}{dz}\hat{X} &= \frac{\alpha z^{-1}}{1 + \alpha z^{-1}} \\
\end{align*}
We arrive at the same answer.
IV) Complex Integration (outside the scope of this course)
$$
x(n) = \frac{1}{2\pi i} \oint_c \hat{X}(z)z^{n-1}dz \ztp \hat{X}(z) = \sum \limits_{n\in\Z} x(n) z^{-n}
$$
Where $c$ is the closed contour within the $RoC$.
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 12}
\subsection{Partial Fraction Decomposition}
Another method for inverse Z-transforms is partial fraction decomposition. Lets start by example.
\begin{align*}
\hat{H}(z) &= \frac{z - \frac{3}{2}}{ \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} }} \quad h \mbox{ is causal } \\
\end{align*}
Since the system is causal and the largest pole is $\frac{3}{4}$, we know that the $RoC = \{ z \st \abs{z} \gt \frac{3}{4} \}$, and this includes the unit circle which implies BIBO stability. In this case, $\hat{H}(z)$ is strictly proper and the poles are unique. This is the most straight forward case of partial fraction decomposition.
\begin{align*}
\hat{H}(z) &= \frac{z - \frac{3}{2}}{ \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} }} \\
\hat{H}(z) &= \frac{A}{ \lr{ z + \frac{1}{2}}} + \frac{B}{ \lr{ z - \frac{3}{4} }} \\
\hat{H}(z) &= \frac{Az^{-1}}{ \lr{ 1 + \frac{1}{2}z^{-1}}} + \frac{Bz^{-1}}{ \lr{ 1 - \frac{3}{4}z^{-1} }} \\
h(n) &= A\lr{-\frac{1}{2}}^{n-1} u(n-1) + B\lr{\frac{3}{4}}^{n-1} u(n-1)
\end{align*}
Note that if the order of the numerator is greater than the order of the denominator, then the $RoC$ doesn't include infinity which implies that the system cannot be causal. To solve for $A$ and $B$, we have to continue with the partial fraction decomposition to set up a system of equations:
\begin{align*}
\hat{H}(z) &= \frac{A}{ \lr{ z + \frac{1}{2}}} + \frac{B}{ \lr{ z - \frac{3}{4} }} \\
\hat{H}(z) &= \frac{ A\lr{z - \frac{3}{4}} + B\lr{ z + \frac{1}{2} } } { \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} }} \\
\frac{z - \frac{3}{2}}{ \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} }} &= \frac{ A\lr{z - \frac{3}{4}} + B\lr{ z + \frac{1}{2} } } { \lr{ z + \frac{1}{2}} \lr{ z - \frac{3}{4} }} \\
z - \frac{3}{2} &= A\lr{z - \frac{3}{4}} + B\lr{ z + \frac{1}{2} } \\
z - \frac{3}{2} &= z\lr{A + B} + -\frac{3}{4}A + \frac{1}{2}B \\
\end{align*}
Equating the coefficients gives us the system of equations:
\begin{align*}
A + B &= 1 \\
-\frac{3}{4}A + \frac{1}{2}B &= -\frac{3}{4} \\
\end{align*}
Solving the system gives $A = \frac{8}{5}$, and $B = -\frac{3}{5}$. Putting this all together gives us $\hat{H}(z)$:
$$
\hat{H}(z) = \frac{\frac{8}{5}}{z + \frac{1}{2}} - \frac{ \frac{3}{5} }{ z - \frac{3}{4} } \ztp \frac{8}{5}\lr{-\frac{1}{2}}^{n-1}u(n-1) - \frac{3}{5}\lr{\frac{3}{4}}^{n-1} u(n-1)
$$
A partial fraction shortcut is to use the Heaviside method.
\begin{align*}
\hat{H}(z) &= \frac{A}{ \lr{ z + \frac{1}{2}}} + \frac{B}{ \lr{ z - \frac{3}{4} }} \\
\lr{z + \frac{1}{2}} \hat{H}(z) &= A + \frac{B\lr{z + \frac{1}{2}}}{ \lr{ z - \frac{3}{4} }} \\
\left. \lr{z + \frac{1}{2}} \hat{H}(z) \right|_{z=-\frac{1}{2}} &= A \\
\end{align*}
To solve for $B$ just to the corresponding multiplication and evaluation. Isolate the solution of each parameter instead of solving systems.
\begin{example}
Find the inverse transform of the following system.
\begin{align*}
\hat{X}(z) &= \frac{ 1 + 2z^{-1} + z^{-2}}{\lr{ 1 - \frac{1}{2}z^{-1}} \lr{ 1 - z^{-1}}} \quad \abs{z} \gt 1 \\
\end{align*}
Note that the order of the numerator is equatl to the order of the denominator. This causes some issues when using partial fraction decomposition. One method is to add an addition coefficient into the system:
\begin{align*}
\hat{X}(z) &= A + \frac{B}{z - \frac{1}{2}} + \frac{C}{z-1} \\
x(n) &= A\delta(n) + B\lr{\frac{1}{2}}^{n-1} u(n-1) + C u(n-1) \\
\end{align*}
We can use the initial value theorem to determine $A$ if $x$ is causal. $x(0) = \lim_{z\to\infty} \hat{X}(z) = 1$. Then use the Heaviside method to solve for $B$ and $C$. Another method to find these coefficients in a more algorithmic manner is long division.
\begin{center}
\polyset{vars=z}
\polylongdiv{z^2 + 2z + 1}{z^2 -\frac{3}{2}z + \frac{1}{2}}
\end{center}
\begin{align*}
\hat{X}(z) &= \frac{ 1 + 2z^{-1} + z^{-2}}{\lr{ 1 - \frac{1}{2}z^{-1}} \lr{ 1 - z^{-1}}} \\
\hat{X}(z) &= \frac{ z^2 + 2z + 1}{\lr{ z - \frac{1}{2}} \lr{ z - 1}} \\
\hat{X}(z) &= 1 + \frac{7z + 1}{2\lr{z^2 - \frac{3}{2}z + \frac{1}{2}}} \quad \mbox{ after division } \\
\hat{X}(z) &= 1 + \frac{7z + 1}{2\lr{z-\frac{1}{2}}\lr{z-1}} \\
\end{align*}
Now we have the first coefficient $A = 1$ and a proper rational function in $z$. Now we have a system which we already know how to solve for---using partial fraction decomposition or the shortcut with the Heaviside method.
\end{example}
\begin{example}
Consider the following problem:
\begin{align*}
\hat{G}(z) &= \frac{ z^3 + 2z^2 + z }{ z^2 -\frac{3}{2}z + \frac{1}{2} } \\
\end{align*}
The system is not causal because the order of the numerator is higher than the denominator. This is because this arrangement implies that there are poles at $\infty$. Nevertheless, we can still solve this problem:
\begin{align*}
\hat{G}(z) &= \frac{ z^3 + 2z^2 + z }{ z^2 -\frac{3}{2}z + \frac{1}{2} } \\
\hat{G}(z) &= Az + B + \frac{C}{z - \frac{1}{2}} + \frac{D}{z - 1} \\
g(n) &= A\delta(n-1) + B\delta(n) + C\lr{\frac{1}{2}}^{n-1} + D u(n-1)\\
\end{align*}
How do we find the coefficients $A$ and $B$? Long division will ameliorate the problem.
\begin{center}
\polyset{vars=z}
\polylongdiv{z^3 + 2z^2 + z}{z^2 -\frac{3}{2}z + \frac{1}{2}}
\end{center}
\begin{align*}
\hat{G}(z) &= \frac{ z^3 + 2z^2 + z }{ z^2 -\frac{3}{2}z + \frac{1}{2} } \\
\hat{G}(z) &= z + \frac{7}{2} + \frac{ 23z - 7 }{ 4\lr{z - \frac{1}{2}} \lr{z-2}} \quad \mbox{after division} \\
\end{align*}
This gives us $A=1$ and $B = \frac{7}{2}$. Now we just have to solve for $C$ and $D$ using the equation:
\begin{align*}
\frac{ 23z - 7 }{ 4\lr{z - \frac{1}{2}} \lr{z-2}} &= \frac{C}{ \lr{z - \frac{1}{2}}} + \frac{D}{ \lr{z-2}} \\
\end{align*}
Now just solve for $C$ and $D$ as usual.
\end{example}
\begin{example}
Suppose we have a problem with duplicate poles.
\begin{align*}
\hat{F}(z) &= \frac{ \frac{2}{3} z^2 -\frac{1}{3}z = \frac{1}{2} }{\lr{z - \frac{1}{2}}^2 \lr{z - 2}} \\
\end{align*}
Assume that the system is causal. How do you find the inverse Z-transform? We can break it up into fractions with constant coefficients for each power in the denominator:
\begin{align*}
\hat{F}(z) &= \frac{A}{z-\frac{1}{2}} + \frac{B}{\lr{z - \frac{1}{2}}^2} + \frac{C}{z - 2} \\
\end{align*}
We can use a combination of the Heaviside method and regular partial fraction decomposition to solve this problem:
\begin{align*}
B &= \left. \lr{z - \frac{1}{2}} \hat{F}(z) \right|_{z = \frac{1}{2}} \\
C &= \left. \lr{z - 2} \hat{F}(z) \right|_{z = 2} \\
\end{align*}
To find $A$, multiply all terms until we have common denominator on both sides and compare coefficients of the numerators:
\begin{align*}
\frac{2}{3}z^2 - \frac{1}{3}z - \frac{1}{2} &= (A + C)z^2 + \lr{\frac{5}{2}A + B - C}z + \lr{A -2B + \frac{1}{4}C}
\end{align*}
We find that the only system that matters is $A + C = \frac{2}{3}$. This is because we already know the value of $C$. Another method for getting $A$ is by using differentiation:
\begin{align*}
\hat{F}(z) &= \frac{A}{z-\frac{1}{2}} + \frac{B}{\lr{z - \frac{1}{2}}^2} + \frac{C}{\lr{z - 2}} \\
\lr{z - \frac{1}{2}}^2 \hat{F}(z) &= A\lr{z-\frac{1}{2}} + B + C\frac{\lr{z - \frac{1}{2}}^2}{\lr{z - 2}} \\
\frac{d}{dz}\lr{z - \frac{1}{2}}^2 \hat{F}(z) &= A + C\lr{\frac{2\lr{z - \frac{1}{2}}\lr{z-2} - \lr{z-\frac{1}{2}}^2 }{\lr{z - 2}^2}} \\
\left. \frac{d}{dz}\lr{z - \frac{1}{2}}^2 \hat{F}(z) \right|_{z = \frac{1}{2}}&= A \\
\end{align*}
After differentiating, we can use the Heaviside method. If we had a cubed term, we would have to differentiate again, and so on with higher powers.
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 13}
\subsection{Steady-State}
\begin{example}
Consider a causal LTI system given by the recursive formula:
$$
y(n) - \alpha y(n-1) = x(n) \quad \abs{\alpha} \lt 1
$$
Suppose that $x(n) = u(n)$. Determine $y(n)$. You will find that $y(n) = y_{tr}(n) + y_{ss}(n)$, where $y_{tr}(n)$ is the transient term, and $y_{ss}(n)$ is the steady-state term.
\begin{align*}
y(n) - \alpha y(n-1) &= x(n) \\
\hat{Y}(z) - \alpha z^{-1} \hat{Y}(z) &= \hat{X}(z) \\
\hat{Y}(z)(1 - \alpha z^{-1}) &= \hat{X}(z) \\
\hat{H}(z) &= \frac{1}{(1 - \alpha z^{-1})} \\
\end{align*}
We know that $\hat{Y}(z) = \hat{H}(z)\hat{X}(z)$ and $\hat{X}(z) = \frac{1}{1 - z^{-1}}$.
\begin{align*}
\hat{Y}(z) &= \hat{H}(z)\hat{X}(z) \\
\hat{Y}(z) &= \lr{\frac{1}{1 - \alpha z^{-1}}} \lr{\frac{1}{1 - z^{-1}}} \\
\hat{Y}(z) &= \frac{z^2}{\lr{ z - 1} \lr{ z - \alpha }} \\
\end{align*}
We know that $RoC(h) = \{ z \st \abs{\alpha} \lt \abs{z} \}$, $RoC(x) = \{ z \st \abs{z} \gt 1 \}$. The $RoC(y)$ is the intersection of these two regions, hence $RoC(y) = \{ z \st \abs{z} \gt 1 \}$. Now we can break $\hat{Y}(z)$ into parts to determine the inverse transform.
\begin{align*}
\hat{Y}(z) &= \frac{z^2}{\lr{ z - 1} \lr{ z - \alpha }} \\
\frac{\hat{Y}(z)}{z} &= \frac{z}{\lr{ z - 1} \lr{ z - \alpha }} \\
\frac{\hat{Y}(z)}{z} &= \frac{A}{z - 1} + \frac{B}{z-\alpha} \\
\end{align*}
Using the Heaviside method we can find $A$ and $B$:
\begin{align*}
A &= \left. \frac{z}{z-\alpha}\right|_{z = 1} = \frac{1}{1-\alpha} \\
B &= \left. \frac{z}{z-1}\right|_{z = \alpha} = \frac{\alpha}{\alpha - 1} \\
\end{align*}
Now we can solve for the inverse Z-transform:
\begin{align*}
\frac{\hat{Y}(z)}{z} &= \frac{\frac{1}{1-\alpha}}{z - 1} + \frac{\frac{\alpha}{1-\alpha}}{z-\alpha} \\
z^{-1}\hat{Y}(z) &= \frac{\frac{1}{1-\alpha}z^{-1}}{1 - z^{-1}} + \frac{\frac{\alpha}{1-\alpha}z^{-1}}{1-\alpha z^{-1}} \\
\hat{Y}(z) &= \frac{\frac{1}{1-\alpha}}{1 - z^{-1}} + \frac{\frac{\alpha}{1-\alpha}}{1-\alpha z^{-1}} \\
y(n) &= \frac{1}{1-\alpha} u(n) + \frac{\alpha}{1-\alpha} \alpha^n u(n) \\
\end{align*}
Note that $y_{tr}(n) = \frac{1}{1-\alpha} u(n)$ since it emanates from the input. As $n \to \infty$, $y_{tr}(n) \to 0$. The steady state function $y_{ss}(n) = \frac{\alpha}{1-\alpha}\alpha^n u(n)$ because it emanates from the system. As $n \to \infty$, $y_{ss}(n) \to \frac{1}{1-\alpha}$. If $x(n) = 1$ instead of $u(n)$, note that the output is equal the limit of the steady state function as $n$ approaches infinity: $\lim_{n\to\infty} y_{ss}(n) = \frac{1}{1-\alpha}$.
\begin{align*}
y(n) &= z_0 \hat{H}(z) = \frac{z_0}{1 - \alpha z^{-1}} \\
y(n) &= 1^n \hat{H}(1) = \frac{1}{1-\alpha} \\
\end{align*}
In general if you have a BIBO stable system and pass an input $x(n)$ through it, you will have a term due to the system pole and a term due to the suddenly applied input. Consider two simplified cases, one where $x(n) = 1$ and another where $x(n) = u(n)$. After $n$ approaches infinity, the responses are indistinguishable. What if $x(n) = \beta^n$?
I) $\abs{\alpha} \lt \abs{\beta}$
\begin{align*}
y(n) &= k_1 \beta^n + k_2 \alpha^n u(n) \\
\end{align*}
$\beta^n$ persists longer as $n$ approaches $\infty$. How can you find $k_1$?
\begin{align*}
k_1 &= \left. \hat{H}(z) \right|_{z = \beta} = \frac{\beta}{\beta - \alpha} \\
\end{align*}
We can also determine the coefficient through the Z domain representation:
\begin{align*}
\hat{Y}(z) &= \hat{H}(z) \hat{X}(z) \\
\hat{Y}(z) &= \frac{z}{z - \alpha} \frac{z}{z - \beta} \\
\hat{Y}(z) &= \frac{A}{z - \alpha} + \frac{B}{z - \beta} \\
\hat{Y}(z) &= \frac{k_2}{z - \alpha} + \frac{\frac{\beta}{\beta - \alpha}}{z - \beta} \\
\end{align*}
\end{example}
\subsection{Summary of Z-Transforms}
\begin{nicebox}
{\bf Time-Shifting (Translation) Property }
\begin{align*}
x(n - N) &\ztp z^{-N} \hat{X}(z)
\end{align*}
The $RoC$ is the same as $RoC(x)$ except possibly addition or deletion of 0 or $\infty$.
\end{nicebox}
\begin{nicebox}
{\bf Convolution in the Time Domain }
\begin{align*}
x(n) * h(n) &\ztp \hat{H}(z)\hat{X}(z)
\end{align*}
$RoC \supseteq RoC(x) \cap RoC(h)$.
\end{nicebox}
\begin{nicebox}
{\bf Modulation with a Complex Exponential }
\begin{align*}
z_0^n x(n) &\ztp \hat{X}\lr{\frac{z}{z_0}}
\end{align*}
$RoC:\abs{z_0} RoC(x)$
\end{nicebox}
\begin{nicebox}
{\bf Time-Reversal Property }
\begin{align*}
x(-n) &\ztp \hat{X}(z^{-1})
\end{align*}
$RoC : \frac{1}{RoC(x)}$
\end{nicebox}
\begin{nicebox}
{\bf Z-domain Differentiation Property }
\begin{align*}
n x(n) &\ztp -z \frac{d}{dz} \hat{X}(z)
\end{align*}
The $RoC$ is the same as $RoC(x)$ except possibly addition or deletion of 0 or $\infty$.
\end{nicebox}
\begin{nicebox}
{\bf Conjugation Property }
\begin{align*}
x^*(n) &\ztp \hat{X}^*(z^*)
\end{align*}
$RoC = RoC(x)$
\end{nicebox}
\begin{nicebox}
{\bf Dilation }
$$
\begin{cases}
x\lr{\frac{n}{N}} & n\bmod N=0 \\
0 & \mbox{otherwise} \\
\end{cases}
\ztp
\hat{X}(z^N)
$$
$RoC = \sqrt[n]{RoC(x)}$
\end{nicebox}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 14}
\subsection{Zero-State and Zero-Input Responses of LTI Systems}
We talked about splitting up an output into a transient and steady state representations. Now we are going to talk about zero-input and zero-state responses of a system. Consider the following system:
\begin{align*}
y(n) - 0.9y(n-1) = x(n) \quad \lr{\mbox{system is causal}}\\
\end{align*}
The order of the system is the number of delay elements required for implementation. So we need one initial condition (in this case) to determine the response. What is $y(-1) \neq 0$? In other words, this means that you could pass in the zero signal $x(n) = 0$ and you won't get a zero output; the system is not ZIZO.
\begin{problem}
What is the zero input response of the system?
\end{problem}
\begin{solution}
\begin{align*}
\mbox{let } x(n) = 0 \quad n \geq 0 \\
\end{align*}
The system's causality implies that we should write $y(n)$ in terms of its past values and solve in the forward direction ( $n=0,1,2,\dots$ ):
\begin{align*}
y(n) = 0.9y(n-1) + x(n) \\
\end{align*}
Remember that we have let $x(n) = 0$ $ \forall $ $n \geq 0$.
\begin{align*}
y(0) &= (0.9) y(-1) \\
y(1) &= (0.9)^2 y(-1) \\
y(2) &= (0.9)^3 y(-1) \\
&\vdots \\
y(n) &= (0.9)^{n+1} y(-1) = y_{ZIR}(n)\\
\end{align*}
This result is called the \emph{zero-input response}, or zero-input solution (homogeneous solution). We have turned off the input but don't get a zero output.
\end{solution}
\begin{problem}
What if $y(-1) = 0$ and $x(n) = u(n)$? In other words, what is the zero-state response of the system?
\end{solution}
\begin{solution}
First we need to find the transform everything into the transform domain, and find the equation for the system. We then pass a unit step through this system.
\begin{align*}
y(n) &= 0.9y(n-1) + x(n) \quad \abs{z} \gt 0.9 \\
\hat{Y}(z) &= z^{-1}0.9\hat{Y}(z) + \hat{X}(z) \\
\hat{Y}(z)(1 - 0.9z^{-1}) &= \hat{X}(z) \\
\hat{H}(z) &= \frac{1}{1 - 0.9z^{-1}} \\
\hat{H}(z) &= \frac{z}{z - 0.9} \\
\end{align*}
We know that the Z-transform of the unit step:
\begin{align*}
\hat{X} &= \frac{1}{1 - z^{-1}} \quad \abs{z} \gt 1 \\
\hat{X} &= \frac{z}{z - 1} \quad \abs{z} \gt 1 \\
\end{align*}
Convolution in the time domain is multiplication in the Z-domain:
\begin{align*}
\hat{Y}(z) &= \hat{H}(z) \hat{X}(z) \\
\hat{Y}(z) &= \frac{z^2}{(z - 0.9)(z-1)} \quad \abs{z} \gt 1 \\
\end{align*}
Now we break $\hat{Y}(z)$ into its components:
\begin{align*}
\hat{Y}(z) &= \frac{z^2}{(z - 0.9)(z-1)} \\
\frac{\hat{Y}(z)}{z} &= \frac{A}{(z-0.9)} + \frac{B}{(z-1)} \\
\frac{z}{(z - 0.9)(z-1)} &= \frac{A}{(z-0.9)} + \frac{B}{(z-1)} \\
A &= \left. \frac{z}{z-1} \right|_{z=0.9} = -9\\
B &= \left. \frac{z}{z-0.9} \right|_{z=1} = 10\\
\end{align*}
Now putting it all together:
\begin{align*}
\frac{\hat{Y}(z)}{z} &= \frac{A}{(z-0.9)} + \frac{B}{(z-1)} \\
\frac{\hat{Y}(z)}{z} &= \frac{-9}{(z-0.9)} + \frac{10}{(z-1)} \\
\hat{Y}(z) &= \frac{-9z}{(z-0.9)} + \frac{10z}{(z-1)} \\
y(n) &=-9(0.9)^n u(n) +10 u(n) = y_{ZSR}(n) \\
\end{align*}
This result is the \emph{zero-state response} of the system, (a.k.a. the particular solution).
\end{solution}
\begin{problem}
What if $y(-1) = 0$ and $x(n) = u(n)$?
\end{problem}
\begin{solution}
By superposition, the general solution is the sum or the homogenous and particular solution.
\begin{align*}
y(n) &= y_{\small ZSR}(n) + y_{ZIR}(n) \\
\end{align*}
Is there a way to solve for all of these in one step? Start off by truncating both sides of the equation:
\begin{align*}
y(n) &= 0.9y(n-1) + x(n) \\
y(n)u(n) &= 0.9y(n-1)u(n) + x(n)u(n) \\
\end{align*}
Then Z-transform both sides:
\begin{align*}
\sum \limits_{n=-\infty}^{\infty} y(n)u(n) z^{-n} &= \sum \limits_{n=-\infty}^{\infty}0.9y(n-1)u(n) z^{-n} + \sum \limits_{n=-\infty}^{\infty}x(n)u(n) z^{-n} \\
\sum \limits_{n=0}^{\infty} y(n)z^{-n} &= \sum \limits_{n=0}^{\infty}0.9y(n-1) z^{-n} + \sum \limits_{n=0}^{\infty}x(n) z^{-n} \\
\end{align*}
We need to add a new definition to our toolbox, the \emph{Unilateral Z-transform}:
\begin{nicebox}
\begin{align*}
\mathcal{Y}(z) \bydef \sum \limits_{n=0}^{\infty} y(n) z^{-n}
\end{align*}
\end{nicebox}
This is used in contrast to the \emph{Bilateral Z-transform} which is what we have been using up until this point.
\begin{align*}
\sum \limits_{n=0}^{\infty} y(n)z^{-n} &= 0.9\sum \limits_{n=0}^{\infty}y(n-1) z^{-n} + \sum \limits_{n=0}^{\infty}x(n) z^{-n} \\
\mathcal{Y}(z) &= 0.9\sum \limits_{n=-1}^{\infty}y(m) z^{-(m+1)} + \mathcal{X}(z) \quad \mbox{let } m = n -1\\
\mathcal{Y}(z) &= z^{-1}0.9\sum \limits_{m=-1}^{\infty}y(m) z^{-m} + \mathcal{X}(z) \\
\mathcal{Y}(z) &= 0.9y(-1) + z^{-1}0.9\sum \limits_{m=0}^{\infty}y(m) z^{-m} + \mathcal{X}(z) \quad \mbox{ (evaluate first term in sum)} \\
\mathcal{Y}(z) &= 0.9y(-1) + z^{-1}0.9\mathcal{Y}(z) + \mathcal{X}(z) \\
\end{align*}
We have successfully captured the initial conditions, $y(-1)$ in the formula. Note that for causal systems, $\hat{X}(z) = \mathcal{X}(z)$, the bilateral Z-transform is equal to the unilateral Z-transform.o
\begin{align*}
\mathcal{Y}(z) &= 0.9y(-1) + z^{-1}0.9\mathcal{Y}(z) + \mathcal{X}(z) \\
\mathcal{Y}(z)(1 - 0.9z^{-1}) &= 0.9y(-1) + \mathcal{X}(z) \\
\mathcal{Y}(z) &= \frac{0.9y(-1)}{(1 - 0.9z^{-1})} + \frac{1}{(1 - 0.9z^{-1})}\mathcal{X}(z) \\
\mathcal{Y}(z) &= \frac{0.9y(-1)}{(1 - 0.9z^{-1})} + \hat{H}(z)\mathcal{X}(z) \\
\mathcal{Y}(z) &= \frac{0.9y(-1)}{(1 - 0.9z^{-1})} + \hat{H}(z)\hat{X}(z) \\
\mathcal{Y}(z) &= \frac{0.9y(-1)}{(1 - 0.9z^{-1})} + \hat{Y}_{ZSR}(z) \\
\mathcal{Y}(z) &= \hat{Y}_{ZIR}(n) + \hat{Y}_{ZSR}(z) \\
\end{align*}
Now we can transform these responses back to the time domain:
\begin{align*}
\hat{Y}_{ZIR}(z) &= \frac{0.9y(-1)}{(1 - 0.9z^{-1})} \\
y_{ZIR}(n) &= 0.9y(-1)(0.9)^n u(n) \\
y_{ZIR}(n) &= y(-1)(0.9)^{n+1} u(n) \\
\end{align*}
\begin{align*}
\hat{Y}_{ZSR}(z) &= \hat{H}(z)\hat{X}(z) \\
y_{ZSR}(n) &= -9(0.9)^n u(n) + 10 u(n) \\
\end{align*}
Now for the final result:
$$
y(n) = y(-1)(0.9)^{n+1} u(n) - 9(0.9)^n u(n) + 10 u(n)
$$
Here we find all of the various responses:
\begin{align*}
y_{ZIR}(n) &= y(-1)(0.9)^{n+1} u(n) \\
y_{ZSR}(n) &= -9(0.9)^n u(n) + 10 u(n) \\
y_{tr}(n) &= y(-1)(0.9)^{n+1} u(n) -9(0.9)^n u(n)\\
y_{ss}(n) &= 10 u(n) \\
\end{align*}
Note that the steady-state response is equal to the transfer function evaluated at $z = 1$:
\begin{align*}
y_{ss}(n) &= \left. \hat{H}(z) \right|_{z=1} = 10 u(n)
\end{align*}
The natural response of the system, the contribution from the pole (which is inside of the unit circle), dies out. What remains is the response from the input $x(n) = 1$ $\forall n$. In other words, $H$ is BIBO, so the natural response or response due to the poles of the system decays, so for $n$ sufficiently large, the system can't distinguish between $x(n) = 1$ $\forall n$ and $x(n) = u(n)$.
\end{solution}
\begin{example}
Given the following system, find zero-input response.
\begin{align*}
y(n) - \alpha y(n-1) + \beta y(n-2) &= x(n) \\
\end{align*}
Take the unilateral Z-transform:
\begin{align*}
\mathcal{Y}(z) - \alpha \sum \limits_{n=0}^{\infty} y(n-1)z^{-n} + \beta \sum \limits_{n=0}^{\infty} y(n-2)z^{-n} = \mathcal{X}(z) \\
\mathcal{Y}(z) - \alpha z^{-1} \sum \limits_{m=-1}^{\infty} y(m)z^{-m} + \beta z^{-2} \sum \limits_{\ell=-2}^{\infty} y(\ell)z^{-\ell} = \mathcal{X}(z) \\
\mathcal{Y}(z) - \alpha y(-1) - \alpha z^{-1} \sum \limits_{m=0}^{\infty} y(m)z^{-m} + \beta y(-2) + \beta y(-1) z^{-1} + \beta z^{-2} \sum \limits_{\ell=0}^{\infty} y(\ell)z^{-\ell} = \mathcal{X}(z) \\
\mathcal{Y}(z) - \alpha y(-1) - \alpha z^{-1} \mathcal{Y}(z) + \beta y(-2) + \beta y(-1) z^{-1} + \beta z^{-2} \mathcal{Y}(z) = \mathcal{X}(z) \\
\end{align*}
Since we are finding the zero-input response (homogeneous solution), we set $x(n) = 0 \ztp 0$:
\begin{align*}
\mathcal{Y}(z) - \alpha y(-1) - \alpha z^{-1} \mathcal{Y}(z) + \beta y(-2) + \beta y(-1) z^{-1} + \beta z^{-2} \mathcal{Y}(z) &= 0 \\
\mathcal{Y}(z)\lr{ 1 - \alpha z^{-1} + \beta z^{-2} } &= \alpha y(-1) - \beta y(-2) - \beta y(-1) z^{-1} \\
\mathcal{Y}(z) &= \frac{\alpha y(-1) - \beta y(-2) - \beta y(-1) z^{-1}}{\lr{ 1 - \alpha z^{-1} + \beta z^{-2} }} \\
y_{ZIR}(n) &= \iztrans{\frac{\alpha y(-1) - \beta y(-2) - \beta y(-1) z^{-1}}{\lr{ 1 - \alpha z^{-1} + \beta z^{-2} }}} \\
\end{align*}
\end{example}
\subsection{The Laplace Transform}
For the same reasons we discussed Z-transforms as a way to manipulate functions where the Fourier Transform could not (in discrete time), we now discuss the Laplace Transform for continuous time.
\begin{nicebox}
The \emph{Laplace Transform} of a function $h(t)$ is defined as
\begin{align*}
\hat{H}(s) &= \int_{-\infty}^{\infty} h(t) e^{-st} dt \quad s \in \C \\
\end{align*}
where $ s = \sigma + i\omega$. The associated Region of Convergence is
\begin{align*}
RoC(h) &\bydef \{ s = \sigma + i\omega \in \C \st \int_{-\infty}^{\infty} \abs{h(t)e^{-st}} dt \lt \infty \} \\
\end{align*}
\end{nicebox}
We can now handle systems that don't have Fourier Transforms. Because $s = \sigma + i\omega$, we can ``veer off'' the $i\omega$ axis:
\begin{align*}
\hat{H}(s) &= \int_{-\infty}^{\infty} h(t) e^{-st} dt \\
\hat{H}(s) &= \int_{-\infty}^{\infty} h(t) e^{-(\sigma + i\omega)t} dt \\
\hat{H}(s) &= \int_{-\infty}^{\infty} h(t) e^{-\sigma t} e^{-i\omega t} dt \\
\hat{H}(s) &= \ftrans{h(t) e^{-\sigma t}} \\
\end{align*}
If $h(t)$ is growing exponentially, we can tame that growth with $e^{-\sigma t}$. In the definition of the Laplace transform, we see that $h(t)e^{-st}$ must be absolutely integrable. Does the $\omega$ is $s = \sigma + i\omega$ matter?
\begin{align*}
&\int_{-\infty}^{\infty} \abs{h(t)e^{-st}} dt \\
&\int_{-\infty}^{\infty} \abs{h(t)e^{-\sigma t}}\abs{e^{-i\omega t}} dt \\
&\int_{-\infty}^{\infty} \abs{h(t)e^{-\sigma t}} dt \\
\end{align*}
Clearly only $\sigma$ matters for absolute integrability since $e^{-i\omega t} = 1$. So what determines the $RoC$ is $\sigma = \Real{s}$.
\begin{claim}
Right-sided signals have right-sided $RoC$s.
\end{claim}
\begin{proof}
Let $x$ be right-sided such that $x(t) = 0 $ $ \forall t \lt T$. Then
\begin{align*}
\hat{X}(s) &= \int_{T}^{\infty}e^{-st} \\
\hat{X}(s) &= \int_{T}^{0}e^{-st} + \int_{0}^{\infty}e^{-st} \\
\end{align*}
We only care about the integral with limits from 0 to $\infty$, since the other integral is finite and thus converges.
\begin{align*}
&\mbox{Suppose } \sigma_0 \in RoC(x) \\
&\Rightarrow \int_0^\infty \abs{x(t) e^{-\sigma_0 t} } dt \lt \infty \\
\end{align*}
Let $\sigma_1 \gt \sigma_0$ be arbitrary. Then we have the following relations:
\begin{align*}
\sigma_1 &\gt \sigma_0 \\
-\sigma_1 &\lt -\sigma_0 \\
-\sigma_1t &\lt -\sigma_0t \quad (\mbox{for } t \in (0,\infty))\\
e^{-\sigma_1t} &\lt e^{-\sigma_0t} \\
\end{align*}
Therefore $e^{-\sigma_1t}$ decays faster than $e^{-\sigma_0t}$. Now we use this to show how this affects absolute integrability:
\begin{align*}
e^{-\sigma_1t} &\lt e^{-\sigma_0t} \\
\abs{x(t)e^{-\sigma_1t}} &\lt \abs{x(t)e^{-\sigma_0t}} \\
\int_0^\infty \abs{x(t)e^{-\sigma_1t}} dt &\lt \int_0^\infty \abs{x(t)e^{-\sigma_0t}} dt \lt \infty \\
\int_0^\infty \abs{x(t)e^{-\sigma_1t}} dt &\lt \infty \\
\end{align*}
Therefore if $\sigma_1 \gt \sigma_0$ and $\sigma_0 \in RoC(x)$, then $\sigma_1 \in RoC(x)$.
\end{proof}
Below is the $RoC$ of a right-sided signal. In this case the $RoC$ includes the $i\omega$-axis, or $\Imag{s}$-axis:
% \imgsize{0.75}{images/roc/laplace/causal.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{\psframe(-1,-2)(2,2)}
\psline[linestyle=dashed](-1,-2.2)(-1,2.2)
\psline[linestyle=dashed](-0.5,-2.2)(-0.5,2.2)
\rput(-0.5,-2.4){$\sigma_1$}
\rput(-1,-2.4){$\sigma_0$}
% x-axis
\rput(2.5,0.3){ $\Real{s}$ }
\psline{->}(-2.5,0)(2.5,0)
% y-axis
\rput(0.6,2.5){ $\Imag{s}$ }
\psline{->}(0,-2.5)(0,2.5)
\end{pspicture}
\end{center}
Here is an $RoC$ for an left-sided system. In this case the $RoC$ includes the $i\omega$-axis:
% \imgsize{0.75}{images/roc/laplace/anticausal.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-2,-2)(1,2)
}
\psline[linestyle=dashed](1,-2.2)(1,2.2)
\rput(1,-2.4){$\sigma_0$}
% x-axis
\rput(2.5,0.3){ $\Real{s}$ }
\psline{->}(-2.5,0)(2.5,0)
% y-axis
\rput(0.6,2.5){ $\Imag{s}$ }
\psline{->}(0,-2.5)(0,2.5)
\end{pspicture}
\end{center}
Here is an example $RoC$ for a two-sided system:
% \imgsize{0.75}{images/roc/laplace/leftright.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-1,-2)(1,2)
}
\psline[linestyle=dashed](-1,-2.2)(-1,2.2)
\psline[linestyle=dashed](1,-2.2)(1,2.2)
\rput(1,-2.4){$\sigma_1$}
\rput(-1,-2.4){$\sigma_0$}
% x-axis
\rput(2.5,0.3){ $\Real{s}$ }
\psline{->}(-2.5,0)(2.5,0)
% y-axis
\rput(0.6,2.5){ $\Imag{s}$ }
\psline{->}(0,-2.5)(0,2.5)
\end{pspicture}
\end{center}
\begin{example}
Find the Laplace Transform for the function $h(t) = e^{-2t} u(t)$.
% \img{images/graphs/expun/graph.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\fileplot[linewidth=0pt]{fillgraph.dat}
}
\fileplot[linewidth=1.5pt]{graph.dat}
% function name
\rput(1.7,1.30815){$h(t) = e^{-2t} u(t)$}
% x-axis
\psline{->}(-1.4, 0)(2.4, 0)
\rput(2.4,-0.3){$t$}
% begin x-axis labels:
\psline(-1, -0.1)(-1, 0.1)
\psline(-0.5, -0.1)(-0.5, 0.1)
\psline(0.5, -0.1)(0.5, 0.1)
\psline(1, -0.1)(1, 0.1)
\psline(1.5, -0.1)(1.5, 0.1)
\psline(2, -0.1)(2, 0.1)
% end x-axis labels:
% y-axis
\psline{->}(0,-0.4)(0,1.4)
% begin x-axis labels:
\psline(-0.1, 1)(0.1, 1)
% end y-axis labels:
\end{pspicture}
\end{center}
\begin{align*}
\hat{H}(s) &= \int_{-\infty}^{\infty} e^{-2t} u(t) e^{-st} dt \\
\hat{H}(s) &= \int_{0}^{\infty} e^{-2t}e^{-st} dt \\
\hat{H}(s) &= \int_{0}^{\infty} e^{-(2+s)t} dt \\
\hat{H}(s) &= \left. \frac{e^{-(2+s)t}}{-(2+s)} \right|_{0}^{\infty} \\
\hat{H}(s) &= \lim_{t \to \infty} \frac{e^{-(2+s)t}}{-(2+s)} - \frac{1}{-(2+s)} \\
\hat{H}(s) &= \frac{1}{(2+s)} \\
\end{align*}
In order for absolute integrability, $\Real{ -(2+s) } \lt 0$. Otherwise the limit in the above formulae would not have converged to zero as $t$ approaches $\infty$. In other words,
\begin{align*}
\Real{-(2+s)} \lt 0 \\
-2 - \sigma \lt 0 \\
-2 \lt \sigma \\
\end{align*}
So $RoC(h) : \{ s \st \Real{s} \gt -2 \}$:
% \img{images/roc/laplace/example/causal.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-1,-2)(2,2)
}
\psline[linestyle=dashed](-1,-2)(-1,2)
\rput(-1,2.2){(-2)}
\rput(-1.75,1){$RoC(h)$}
% x-axis
\rput(2.5,0.3){ $\Real{s}$ }
\psline{->}(-2.5,0)(2.5,0)
% y-axis
\rput(0.6,2.5){ $\Imag{s}$ }
\psline{->}(0,-2.5)(0,2.5)
\end{pspicture}
\end{center}
Notice that the Fourier Transform for this function is related to the Laplace Transform because the $RoC$ includes the $i\omega$-axis:
\begin{align*}
\ftrans{h(t)} &= \frac{1}{2 + i\omega} \\
\ltrans{h(t)} &= \frac{1}{2 + s} \\
\ftrans{h(t)} &= \left. \ltrans{h(t)} \right|_{s = i\omega} \\
\end{align*}
\end{example}
\subsection{The Shifting Property}
What if we shift a signal in time? What happens to its Laplace Transform?
\begin{align*}
y(t) &= x(t + T) \quad T \gt 0 \\
\hat{Y}(s) &= \int_{-\infty}^{\infty} x(t + T)e^{-st}dt \\
\hat{Y}(s) &= \int_{-\infty}^{\infty} x(t)e^{-s(t-T)}dt \quad \mbox{let } t = t + T \\
\hat{Y}(s) &= e^{sT}\int_{-\infty}^{\infty} x(t)e^{-st}dt \\
\hat{Y}(s) &= e^{sT}\hat{X}(s) \\
\end{align*}
This result is the Shifting Property of the Laplace Transform:
\begin{nicebox}
\begin{align*}
x(t) &\ltp \hat{X}(s) \\
x(t+T) &\ltp e^{sT}\hat{X}(s) \\
\end{align*}
The $RoC$ excludes $\infty$ if the signal is not causal, or excludes $-\infty$ if the signal is not anti-causal.
\end{nicebox}
\begin{example}
Consider the causal signal $x(t) = e^{-3t}u(t)$. What is the Laplace Transform for $x(t+T)$?
\begin{align*}
y(t) &= x(t+T) \\
y(t) &= e^{-3(t+T)}u(t+T) \\
\end{align*}
Note that the signal is no longer causal, although it is still right-sided. We know the transform for $x(t)$ and can utilize the shifting property:
\begin{align*}
x(t)&\ltp \hat{X}(s) = \frac{1}{3+s} \\
x(t+T) &\ltp e^{sT}\hat{X}(s) = \frac{e^{sT}}{3+s} \\
\end{align*}
The $RoC$ is the same in this case, except that it excludes $\infty$:
$$
RoC(y) : \{ s \st -3 \lt \Real{s} \lt \infty \}
$$
\end{example}
\subsection{Stability}
Lets consider the signal whose Laplace Transform is $\hat{X}(s) = \frac{1}{s + 3}$. If the signal is causal, then we have
\begin{align*}
x(t) &= e^{-3t}u(t) \\
RoC(x) &= \{ s \st \Real{s} \gt -3 \} \\
\end{align*}
This signal decays as $t$ approaches $\infty$. This is a stable signal, which can be confirmed by the fact that the signal's region of convergence includes the $i\omega$-axis. If this signal signal was anti-causal, then we have
\begin{align*}
x(t) &= -e^{-3t}u(-t) \\
RoC(x) &= \{ s \st \Real{s} \lt -3 \} \\
\end{align*}
Notice that the region of convergence is flipped, and doesn't contain the $i\omega$-axis. This signal is unstable. This brings us to the statement:
\begin{claim}
$x$ is a stable signal if and only if the region of convergence contains the $i\omega$-axis.
\end{claim}
\begin{proof}
Assume that $x$ is stable. This means that $x$ is absolutely integrable.
\begin{align*}
&\infint \abs{x(t)} dt \lt \infty \\
&\infint \abs{x(t)}\abs{e^{-i\omega t}} dt \lt \infty \quad \mbox{multiply by 1}\\
&\infint \abs{x(t) e^{-(\sigma + i\omega) t}} dt \lt \infty \quad \mbox{for } \sigma = 0\\
&\infint \abs{x(t) e^{-st}} dt \lt \infty \\
&\Rightarrow \mbox{for the chosen values of $s = i\omega$, the Laplace Transform converges}
\end{align*}
Therefore the stable signal $x$'s absolute integrability implies that $i\omega \in RoC(x)$ since $\infint \abs{x(t) e^{-st}} dt \lt \infty$ for $\sigma = 0$. The reverse direction is practically the same. Assume that $i\omega \in RoC(x)$. This implies that $\infint \abs{x(t) e^{-st}} dt \lt \infty$ for $s = 0 + i\omega$.
\begin{align*}
&\infint \abs{x(t) e^{-st}} dt \lt \infty \\
&\infint \abs{x(t) e^{-i\omega t}} dt \lt \infty \quad s = i\omega \\
&\infint \abs{x(t)}\abs{e^{-i\omega t}} dt \lt \infty \\
&\infint \abs{x(t)}dt \lt \infty \\
&\Rightarrow \mbox{ $x$ is absolutely integrable } \\
\end{align*}
Lets compare the CTFT and the Laplace Transform:
\begin{align*}
X(\omega) &= \infint x(t) e^{-i\omega t} dt \\
\hat{X}(s) &= \infint x(t) e^{-s t} dt \\
\end{align*}
If $i\omega \in RoC(x)$, then
\begin{align*}
X(\omega) &= \left. \hat{X}(s) \right|_{s = i\omega} \\
\end{align*}
\end{proof}
\begin{example}
Given the following signal,
\begin{align*}
x(t) &= e^{-3t}u(t) \ltp \frac{1}{s + 3} \quad \Real{s} \gt -3 \\
\end{align*}
we can evaluate the Laplace Transform at $s = i\omega$ to find the Fourier Transform:
\begin{align*}
\ftrans{x(t)} &= \left. \hat{X}(s) \right|_{s=i\omega} = \frac{1}{3 + i\omega} \\
\end{align*}
\end{example}
\subsection{Trigonometric Toneburst}
Lets look at the Laplace Transform of a \emph{Toneburst}, $x(t) = \cos(\omega_0 t) u(t)$:
\begin{align*}
x(t) &= \cos(\omega_0 t) u(t) \\
\hat{X}(s) &= \int_{-\infty}^{\infty} \cos(\omega_0 t) u(t) e^{-st} dt \\
\hat{X}(s) &= \int_{0}^{\infty} \cos(\omega_0 t) e^{-st} dt \\
\hat{X}(s) &= \int_{0}^{\infty} \lr{ \frac{1}{2} e^{i\omega_0 t} + \frac{1}{2}e^{-i\omega_0 t} } e^{-st} dt \\
\hat{X}(s) &= \frac{1}{2} \int_{0}^{\infty} e^{i\omega_0 t} e^{-st} dt + \frac{1}{2} \int_{0}^{\infty} e^{-i\omega_0 t} e^{-st} dt \\
\hat{X}(s) &= \frac{1}{2} \int_{0}^{\infty} e^{-(s - i\omega_0) t} dt + \frac{1}{2} \int_{0}^{\infty} e^{-(s + i\omega_0) t} dt \\
\hat{X}(s) &= \left. \frac{1}{2} \frac{e^{-(s - i\omega_0) t}}{-(s - i\omega_0)} \right|_0^\infty + \left. \frac{1}{2} \frac{e^{-(s + i\omega_0) t}}{-(s + i\omega_0)} \right|_0^\infty \\
\hat{X}(s) &= \lim_{t \to \infty} \frac{1}{2} \frac{e^{-(s - i\omega_0) t}}{-(s - i\omega_0)} - \frac{1}{2} \frac{1}{-(s - i\omega_0)} + \lim_{t \to \infty} \frac{1}{2} \frac{e^{-(s + i\omega_0) t}}{-(s + i\omega_0)} - \frac{1}{2} \frac{1}{-(s + i\omega_0)} \\
\end{align*}
Before evaluating, we must note conditions for the limits to converge for the evaluation at infinity. The limit $\lim_{t \to \infty} \frac{1}{2} \frac{e^{-(s - i\omega_0) t}}{-(s - i\omega_0)} $ converges to 0 if and only if $\Real{s - i\omega_0} \gt 0$. The limit $\lim_{t \to \infty} \frac{1}{2} \frac{e^{-(s + i\omega_0) t}}{-(s + i\omega_0)}$ converges to 0 if and only if $\Real{s +i\omega_0} \gt 0$. Both of these simplify to $\Real{s} \gt 0$. Thus, for $\Real{s} \gt 0$ we have
\begin{align*}
\hat{X}(s) &= 0 - \frac{1}{2} \frac{1}{-(s - i\omega_0)} + 0 - \frac{1}{2} \frac{1}{-(s + i\omega_0)} \\
\hat{X}(s) &=\frac{1}{2} \frac{1}{(s - i\omega_0)} + \frac{1}{2} \frac{1}{(s + i\omega_0)} \\
\hat{X}(s) &=\frac{1}{2} \lr{ \frac{1}{(s - i\omega_0)} + \frac{1}{(s + i\omega_0)}} \\
\hat{X}(s) &=\frac{1}{2} \lr{ \frac{2s}{(s^2 + \omega_0^2)} } \\
\hat{X}(s) &=\frac{s}{(s^2 + \omega_0^2)} \\
\end{align*}
This gives us the transform pair:
\begin{nicebox}
$$
\cos(\omega_0 t) u(t) \ltp \frac{s}{(s^2 + \omega_0^2)} \quad \Real{s} \gt 0
$$
\end{nicebox}
Note that we have two finite poles as $\pm i\omega$. The zeros are at 0 and $\infty$. The $RoC$ is to the right of the right-most pole, which is on the $i\omega$-axis. Here is a question: What is the Laplace Transform of $\cos(\omega_0 t)$? This doesn't have a Laplace transform, although it does have a Fourier Transform.
\subsection{Convolution in Time}
Lets see what happens with convolution in the time domain.
\begin{align*}
\lr{x * h}(t) &\ltp \infint \lr{x * h}(t) e^{-st} dt \\
\lr{x * h}(t) &\ltp \infint \infint x(\tau) h(t - \tau) d\tau e^{-st} dt \\
\lr{x * h}(t) &\ltp \infint x(\tau) \infint h(t - \tau) e^{-st} dt d\tau \\
\lr{x * h}(t) &\ltp \infint x(\tau) \infint h(t) e^{-s(t+\tau)} dt d\tau \quad \mbox{let } t = t - \tau \\
\lr{x * h}(t) &\ltp \infint x(\tau)e^{-s\tau} d\tau \infint h(t) e^{-st} dt \\
\lr{x * h}(t) &\ltp \hat{X}(s) \hat{H}(s) \\
\end{align*}
So we have the convolution property:
\begin{nicebox}
\begin{align*}
h(t) &= f(t) * g(t) \\
\hat{H}(s) &= \hat{F}(s)\hat{G}(s) \\
\end{align*}
The $RoC(h) \supseteq RoC(f) \cap RoC(g)$.
\end{nicebox}
This property is what we can use to solve for the system function $\hat{H}(s)$ when given an input $x(t)$ and an output $y(t)$: we have the relation $\hat{H}(s) = \frac{\hat{Y}(s)}{\hat{X}(s)}$.
\subsection{Integration in the Time Domain}
When we integrate a function in time, what happens to its Laplace Transform?
\begin{align*}
x(t) \ltp \hat{X}(s) \\
y(t) = \int_{-\infty}^t x(\tau) d\tau
\end{align*}
We can solve this by using the fact that $y(t) = x(t) * u(t) = \infint x(\tau) u(t - \tau) d\tau$
\begin{align*}
x(t) &\ltp \hat{X}(s) \\
u(t) &\ltp \frac{1}{s} \\
y(t) &\ltp \frac{\hat{X}(s)}{s} \quad \mbox{ (using the convolution property)} \\
\end{align*}
The $RoC(u) = \{ s \st \Real{s} \gt 0 \}$, and the new region of convergence is at least the intersection of $RoC(x)$ and $RoC(u)$. Hence, we have our Integration in Time property:
\begin{nicebox}
\begin{align*}
\int_{-\infty}^t x(\tau) d\tau &\ltp \frac{\hat{X}(s)}{s}
\end{align*}
$RoC(y) \supseteq RoC(x) \cap \{ s \st \Real{s} \gt 0 \} $
\end{nicebox}
\subsection{Differentiation in the Time Domain}
Now, lets find out what happens when we differentiate in the time domain. Recall the formula for the inverse Laplace Transform:
\begin{align*}
x(t) &= \frac{1}{2\pi i} \int_{\sigma_0 -i\infty}^{\sigma_0 + i\infty} \hat{X}(s) e^{st} ds \\
\end{align*}
We shall use this esoteric formula to solve for the differentiation property, although we most likely will not encounter it elsewhere in this course.
\begin{align*}
x(t) &= \frac{1}{2\pi i} \int_{\sigma_0 -i\infty}^{\sigma_0 + i\infty} \hat{X}(s) e^{st} ds \\
\frac{d}{dt}x(t) &= \frac{d}{dt} \frac{1}{2\pi i} \int_{\sigma_0 -i\infty}^{\sigma_0 + i\infty} \hat{X}(s) e^{st} ds \\
\frac{d}{dt}x(t) &= \frac{1}{2\pi i} \int_{\sigma_0 -i\infty}^{\sigma_0 + i\infty} \hat{X}(s) \frac{d}{dt}e^{st} ds \\
\frac{d}{dt}x(t) &= \frac{1}{2\pi i} \int_{\sigma_0 -i\infty}^{\sigma_0 + i\infty} \lr{s \hat{X}(s)} e^{st} ds = \iltrans{s\hat{X}(s)} \\
\end{align*}
This gives us the time differentiation property of the Laplace Transform:
\begin{nicebox}
\begin{align*}
\frac{d}{dt} x(t) \ltp s\hat{X}(s) \\
\end{align*}
$RoC \supseteq RoC(x)$
\end{nicebox}
\subsection{Multiplication by Complex Exponential}
What happens if we multiply a signal in the time-domain by a complex exponential?
\begin{align*}
x(t) &\ltp \infint x(t) e^{-st} dt \hat{X}(s) \\
e^{s_0 t} x(t) &\ltp \infint e^{s_0 t} x(t) e^{-st} dt \\
e^{s_0 t} x(t) &\ltp \infint x(t) e^{-(s - s_0) t} dt = \hat{X}(s - s_0)\\
\end{align*}
What happens to the region of convergence?
\begin{align*}
\alpha \lt &\Real{s} \lt \beta \\
\alpha \lt &\Real{s - s_0} \lt \beta \\
\alpha + \Real{s_0} \lt &\Real{s} \lt \beta + \Real{s_0} \\
\end{align*}
Therefore the new $RoC$ is equal to $RoC(x) + \Real{s_0}$. This gives us the following property:
\begin{nicebox}
\begin{align*}
e^{s_0 t} x(t) &\ltp \hat{X}(s-s_0) \quad RoC = RoC(x) + \Real{s_0} \\
\end{align*}
\end{nicebox}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 15}
\subsection{More on the Differentiation Property}
We derived the differentiation property previously using the inverse transform formula.
\begin{nicebox}
\begin{align*}
\frac{d}{dt} x(t) \ltp s\hat{X}(s) \\
\end{align*}
$RoC \supseteq RoC(x)$
\end{nicebox}
Now we can solve this another way. Suppose we have a differentiator:
$$
e^{st} \to \fbox{$\frac{d}{dt}$} \to se^{st}
$$
We get $x(t) = e^{st}$, and we get $y(t) = se^{st}$. We know that when we pass an exponential as input, we get the system function multiplied by that exponential. We get a scaled version of the same exponential:
\begin{align*}
y(t) &= \infint h(\tau) x(t-\tau) d\tau \\
y(t) &= \infint h(\tau) e^s{t-\tau} d\tau \quad \mbox{ let } x(t) = e^{st} \\
y(t) &= \lr{ \infint h(\tau) e^{-st} d\tau } e^{st} \\
y(t) &= \hat{H}(s) e^{st} \\
\end{align*}
Using this information, and the convolution property ($\hat{Y}(s) = \hat{H}(s)\hat{X}(s)$), we can see that $\hat{H}(s) = s$.
\begin{align*}
y(t) &= (x*h)(t) = \frac{d}{dt} x(t) \\
\hat{Y}(s) &= \hat{H}(s)\hat{X}(s) \\
\hat{Y}(s) &= s\hat{X}(s) \\
\end{align*}
A side note on the differentiator: The time domain representation is the \emph{unit doublet}.
\begin{align*}
h(t) &= \dot{\delta}(t) = \frac{d}{dt} \delta(t) \\
\end{align*}
\subsection{Frequency Differentiation}
The dual of differentiation in the time domain is differentiation in the Laplace domain.
\begin{align*}
\hat{X}(s) &= \infint x(t) e^{-st} dt \\
\frac{d}{ds} \hat{X}(s) &= \frac{d}{s} \infint x(t) e^{-st} dt \\
\frac{d}{ds} \hat{X}(s) &= \infint x(t) \frac{d}{ds} e^{-st} dt \\
\frac{d}{ds} \hat{X}(s) &= \infint \lr{-tx(t)} e^{-st} dt \\
\end{align*}
This gives us the Frequency Differentiation property:
\begin{nicebox}
\begin{align*}
-tx(t) \ltp \frac{d}{ds}\hat{X}(s) \\
\end{align*}
$RoC = RoC(x)$
\end{nicebox}
\begin{example}
Given a transfer function $\hat{Y}(s) = \frac{1}{(s+a)^2}$, determine the time domain representation of the signal, $y(t)$. Assume the signal is causal.
\begin{align*}
\hat{Y}(s) &= \frac{1}{(s+a)^2} \\
\hat{Y}(s) &= -\frac{d}{ds}\frac{1}{(s+a)} \\
\end{align*}
We already know the inverse transform of the causal signal whose Laplace transform is $\frac{1}{s + a}$. This signal is given by $e^{-at}u(t)$, where the $RoC = \{ s \st \Real{s} \gt -\Real{a} \}$.
\begin{align*}
\iltrans{\hat{Y}(s)} &= -\lr{ -t e^{-at}u(t) } \\
y(t) &= t e^{-at}u(t) \\
\end{align*}
We know have a double pole at $-a$. But the $RoC$ stays the same.
\end{example}
\subsection{Rational Laplace Transforms}
Lets compare continuous time and discrete time representations of shifted delta functions:
\begin{align*}
\delta(n) &\ztp 1 & \delta(t) &\ltp 1 \\
\delta(n-N) &\ztp z^{-N} & \delta(t-T) &\ltp e^{-sT}\\
\end{align*}
We can see that the $Z$-transform will give us a rational function in $z$ when we shift, whereas the Laplace Transform will not produce a rational function in $s$. A rational function in $s$ would be polynomial in $s$ over another polynomial in $s$:
$$
\frac{P(s)}{Q(s)} = \frac{ b_m s^m + b_{m-1} s^{m-1} + \cdots + b_1 s + b_0 }{a_n s^n + a_{n-1} s^{n-1} + \cdots + a_1 s + a_0}
$$
The shifting property of the Laplace Transform,
$$x(t-T) \ltp \hat{X}(s) e^{-sT}$$
tells us that we produce a transform which is not rational. Recall that in discrete time, a rational $Z$-transform can be produce by linear combinations of $\alpha^n u(n)$, $\alpha^n u(-n)$, $n^k \alpha^n u(n)$, $n^k \alpha^n u(-n)$, where $k \in \N$, and shifted versions thereof. Note that the differentiation property for Laplace Transforms,
$$ \frac{d}{dt} x(t) \ltp s\hat{X}(s) $$
tells us that we can produce a rational transform in $s$. In continuous time, a rational Laplace transform can be produce by linear combinations of $e^{-\alpha t} u(t)$, $e^{-\alpha t}u(-t)$, $t^k e^{-\alpha t}u(t)$, $t^k, e^{-\alpha t}u(-t)$, where $k \in \N$, and differentiated versions thereof with respect to $t$. The only difference between CT and DT when producing rational transforms is we have shifted versions in DT and differentiated versions in CT.
\begin{example}
Consider two causal signals, one in discrete time, and one in continuous time: $y(n) = \alpha y(n-1) + x(n)$ and $\dot{y}(t) = -\alpha y(t) + x(t)$. One is a first order difference equation, the other a first order differential equation.
\begin{align*}
y(n) &= \alpha y(n-1) + x(n) & \dot{y}(t) &= -\alpha y(t) + x(t) \\
\hat{Y}(z) &= \alpha z^{-1}\hat{Y}(z) + \hat{X}(z) & s\hat{Y}(s) &= -\alpha \hat{Y}(s) + \hat{X}(s) \\
\hat{H}(z) &= \frac{1}{1 - \alpha z^{-1}} & \hat{H}(s) &= \frac{1}{s + \alpha} \\
h(n) &= \alpha^n u(n) & h(t) &= e^{-\alpha t}u(t) \\
\end{align*}
\end{example
\begin{example}
Consider a first order RC-circuit. The voltage across its capacitor is governed by the following equation: $y(t) = \frac{1}{C} \int_{-\infty}^t g(\tau) d\tau$, where $g(t)$ is the current, and $C$ is the capacitance.
% \img{images/circuits/combo/combo.ps}
Ohm's law tells us that $I = \frac{V}{R}$, where $V$ is the voltage drop across the circuit, and $R$ is the resistance. In our case, $I = g(t)$, and $V = x(t) - y(t)$. This gives us $g(t) = \frac{x(t) - y(t)}{R}$. Now we can solve for $y(t)$:
\begin{align*}
y(t) &= \frac{1}{C} \int_{-\infty}^t g(\tau) d\tau \\
y(t) &= \frac{1}{C} \int_{-\infty}^t \frac{x(\tau) - y(\tau)}{R} d\tau \\
y(t) &= \frac{1}{RC} \int_{-\infty}^t x(\tau)d\tau - \frac{1}{C} \int_{-\infty}^t y(\tau) d\tau \\
\frac{d}{dt} y(t) &= \frac{d}{dt} \frac{1}{RC} \int_{-\infty}^t x(\tau)d\tau - \frac{d}{dt} \frac{1}{RC} \int_{-\infty}^t y(\tau) d\tau \\
\end{align*}
Now we have an equation we can solve using Laplace Transforms:
\begin{align*}
\dot{y}(t) &= \frac{1}{RC}x(t) - \frac{1}{RC}y(t) \\
s\hat{Y}(s) &= \frac{1}{RC} \hat{X}(s) - \frac{1}{RC} \hat{Y}(s) \\
\hat{Y}(s) \lr{ s + \frac{1}{RC}} &= \frac{1}{RC} \hat{X}(s) \\
\hat{H}(s) &= \frac{\frac{1}{RC}}{ \lr{ s + \frac{1}{RC}}} \\
\end{align*}
So we find that the system function is $\hat{H}(t) = \frac{\frac{1}{RC}}{ \lr{ s + \frac{1}{RC}}} $ with $RoC(h) = \{ s \st \Real{s} \gt -\frac{1}{RC} \}$.
% \img{images/roc/laplace/circuit/causal.ps}
We know the time domain representation (assuming causal):
\begin{align*}
\hat{H}(s) &= \frac{\frac{1}{RC}}{ \lr{ s + \frac{1}{RC}}} \\
h(t) &= \frac{1}{RC} e^{-\frac{1}{RC}t}u(t) \\
\end{align*}
Here the $RoC$ includes the $i\omega$-axis, thus,
$$
H(\omega) = \left. \hat{H}(s) \right|_{s = i\omega}
$$
We can find the magnitude response to determine what type of filter this circuit is:
$$
\abs{H(\omega)} = \frac{\frac{1}{RC}}{\abs{i\omega + \frac{1}{RC}}}
$$
% \img{images/roc/laplace/circuit/mag.ps}
At $i\omega = 0$, $\abs{H(\omega)}$ is at a maximum. As $\omega$ approaches $\pm \infty$, the magnitude is 0. This first order circuit is thus a low-pass filter.
% \img{images/graphs/ctlowpass/graph.ps}
\end{example}
\subsection{Generalized Differentiation}
Consider the causal system represented by the following $n^{th}$ order differential equation:
\begin{align*}
a_n \frac{d^n}{dt^n}y(t) + a_{n-1} \frac{d^{n-1}}{dt^{n-1}}y(t) + \cdots + a_1 \frac{d}{dt} y(t) + a_0 y(t) = b_n \frac{d^n}{dt^n}x(t) + b_{n-1} \frac{d^{n-1}}{dt^{n-1}}x(t) + \cdots + b_1 \frac{d}{dt} x(t) + b_0 x(t) \\
\end{align*}
We can use the generalized differentiation property of the Laplace Transform:
\begin{nicebox}
$$
\frac{d^k}{dt^k} x(t) \ltp s^k \hat{X}(s)
$$
\end{nicebox}
This allows us to solve for the system function $\hat{H}(s) = \frac{\hat{Y}(s)}{\hat{X}(s)}$ of an $n^{th}$ order differential equation:
\begin{align*}
\hat{H}(s) &= \frac{ b_n s^n + b_{n-1} s^{n-1} + \cdots + b_1 s + b_0}{a_n s^n + a_{n-1} s^{n-1} + \cdots + a_1 s + a_0}
\end{align*}
\subsection{Example}
Given the following system function, determine all of the possible inverse Laplace Transforms:
$$
\hat{H}(s) = \frac{ (s-1) }{ (s+1)(s-2) }
$$
We can see that the system has two poles, one at -1 and the other at 2. The system has one zero at 1, and the other at $\infty$. We know that the system can be broken up into components:
\begin{align*}
\hat{H}(s) = \frac{A}{s+1} + \frac{B}{s-2} \\
\end{align*}
We can find the coefficients using the Heaviside method:
\begin{align*}
A &= \left. (s+1)\hat{H}(s) \right|_{s = -1} = \frac{2}{3} \\
B &= \left. (s-2)\hat{H}(s) \right|_{s = 2} = \frac{1}{3} \\
\hat{H}(s) &= \frac{2/3}{s+1} + \frac{1/3}{s-2} \\
\end{align*}
We now have three cases with non-trivial regions of convergence. The first is where we have two right-sided signals. In this case $RoC = \{ s \st \Real{s} \gt 2 \}$:
% \imgsize{0.75}{images/roc/laplace/allpossible/causal.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(2,-2)(2.5,2)
}
\psline[linestyle=dashed](-1,-2.2)(-1,2.2)
\psline[linestyle=dashed](2,-2.2)(2,2.2)
\rput(2,-0.4){$2$}
\rput(1,-0.4){$1$}
\rput(-1,-0.4){$-1$}
\rput(2,0){$ {\bf X} $}
\rput(1,0){$ {\bf 0} $}
\rput(-1,0){$ {\bf X} $}
% x-axis
\rput(2.5,0.3){ $\Real{s}$ }
\psline{->}(-2.5,0)(2.5,0)
% y-axis
\rput(0.6,2.5){ $\Imag{s}$ }
\psline{->}(0,-2.5)(0,2.5)
\end{pspicture}
\end{center}
This system is causal but not BIBO stable. Here the possible inverse transform is
\begin{align*}
h(t) &= \frac{2}{3} e^{-t}u(t) + \frac{1}{3} e^{2t}u(t) \\
\end{align*}
Here we have $ \frac{1}{3} e^{2t}u(t)$ is the unstable term. The next possibility is where we have two left-sided signals. In this case $RoC = \{ s \st \Real{s} \lt -1 \}$:
% \imgsize{0.75}{images/roc/laplace/allpossible/anticausal.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-2,-2)(-1,2)
}
\psline[linestyle=dashed](-1,-2.2)(-1,2.2)
\psline[linestyle=dashed](2,-2.2)(2,2.2)
\rput(2,-0.4){$2$}
\rput(1,-0.4){$1$}
\rput(-1,-0.4){$-1$}
\rput(2,0){$ {\bf X} $}
\rput(1,0){$ {\bf 0} $}
\rput(-1,0){$ {\bf X} $}
% x-axis
\rput(2.5,0.3){ $\Real{s}$ }
\psline{->}(-2.5,0)(2.5,0)
% y-axis
\rput(0.6,2.5){ $\Imag{s}$ }
\psline{->}(0,-2.5)(0,2.5)
\end{pspicture}
\end{center}
This system is anti-causal and not BIBO stable. Here the inverse transform is
\begin{align*}
h(t) &= -\frac{2}{3} e^{-t}u(-t) - \frac{1}{3} e^{2t}u(-t) \\
\end{align*}
In this case we have $- \frac{1}{3} e^{2t}u(-t)$ is the stable term. The last possibility is where we have right and left sided signals whose regions of convergence have a non-trivial overlap. In this case $RoC = \{ s \st -1 \lt \Real{s} \lt 2 \}$:
% \imgsize{0.75}{images/roc/laplace/allpossible/leftright.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none]{
\psframe(-1,-2)(2,2)
}
\psline[linestyle=dashed](-1,-2.2)(-1,2.2)
\psline[linestyle=dashed](2,-2.2)(2,2.2)
\rput(2,-0.4){$2$}
\rput(1,-0.4){$1$}
\rput(-1,-0.4){$-1$}
\rput(2,0){$ {\bf X} $}
\rput(1,0){$ {\bf 0} $}
\rput(-1,0){$ {\bf X} $}
% x-axis
\rput(2.5,0.3){ $\Real{s}$ }
\psline{->}(-2.5,0)(2.5,0)
% y-axis
\rput(0.6,2.5){ $\Imag{s}$ }
\psline{->}(0,-2.5)(0,2.5)
\end{pspicture}
\end{center}
This system is two-sided and is BIBO stable. Here the inverse transform is
\begin{align*}
h(t) &= \frac{2}{3} e^{-t}u(t) - \frac{1}{3} e^{2t}u(-t) \\
\end{align*}
Now both terms are stable, which can be verified by the fact that the $RoC$ includes the $i\omega$-axis implying BIBO stability.
\subsection{Steady State and Transient Responses of CT-LTI Systems}
Assume that we have a causal, stable system that is rational in $s$. If we pass a complex exponential as input, we know that we get the frequency response of the system multiplied by the same exponential:
$$
x(t) = e^{i\omega t} \to \fbox{$H$} \to y(t) = H(\omega) e^{i\omega t}
$$
What is we apply the toneburst, $e^{i\omega t} u(t)$, a suddenly-applied sinusoid? The system is describable by a LCCDE,
$$
\hat{H}(s) = \frac{ B(s) }{ A(s) } = \frac{ B(s) }{ (s-p_1) (s-p_2) \cdots (s-p_n) }
$$
The stability of the system implies that all of the poles are to the left of the $i\omega$-axis. In other words, the stability implies $\Real{p_\ell} \lt 0 $ $ \forall \ell$. We can now apply the toneburst to this system. We know that the transform of the toneburst is given by
$$
e^{i\omega_0 t} u(t) \ltp \frac{1}{s - i\omega_0} \quad \Real{s} \gt 0
$$
We also know that convolution in the time domain corresponds to multiplication in the frequency domain, thus
\begin{align*}
\hat{Y}(s) &= \hat{H}(s) \hat{X}(s) \\
\hat{Y}(s) &= \frac{ B(s) }{ A(s) } = \frac{ B(s) }{ (s-p_1) (s-p_2) \cdots (s-p_n)(s - i\omega_0) } \quad RoC \supseteq RoC(h) \cap RoC(x) \\
\end{align*}
The new region of convergence is $\Real{s} \gt 0$. Lets apply partial fraction decomposition to determine the time domain expression. For simplicity, assume that there are no duplicate poles.
\begin{align*}
\hat{Y}(s) &= \frac{R_1}{s - p_1} + \frac{R_2}{s - p_2} + \cdots + \frac{R_n}{s - p_n} + \frac{Q}{s - i\omega_0} \\
y(t) &= R_1 e^{p_1 t}u(t) + R_2 e^{p_2 t} u(t) + \cdots + R_n e^{R_n t} u(t) + Q e^{i\omega_0 t} u(t) \\
\end{align*}
Since all poles $P_\ell$ were to the left of the $i\omega$-axis, and the pole from $Q e^{i\omega_0 t} u(t)$ is the $i\omega$-axis, we can see why the intersection of the regions of convergence is $\{ s \st \Real{s} \gt 0 \}$; the rightmost pole is zero. In this case, all of the poles that were to the left of the $i\omega$-axis are apart of the \emph{transient response}. This is because all $p_\ell \lt 0 \quad \forall \ell$, so the terms associated with these poles decay.
\begin{align*}
y_{tr}(t) &= R_1 e^{p_1 t}u(t) + R_2 e^{p_2 t} u(t) + \cdots + R_n e^{R_n t} u(t) \\
y_{ss}(t) &= Q e^{i\omega_0 t} u(t) \\
\end{align*}
When we look at the transient response, we see that the terms emanate from the stable poles of the system. We can see that the \emph{steady state response} is due to the pole of the input. The steady state response's pole is to the right of the right-most pole of the transient state's. When you introduce an input with a pole to the right of the systems right-most pole it then becomes steady state response. \\
How can we find $Q$?
\begin{align*}
\hat{Y}(s) &= \frac{R_1}{s - p_1} + \frac{R_2}{s - p_2} + \cdots + \frac{R_n}{s - p_n} + \frac{Q}{s - i\omega_0} \\
\hat{Y}(s) &= \sum \limits_{\ell = 1}^{n} \frac{R_\ell}{s - p_\ell} + \frac{Q}{s - i\omega_0} \\
(s - i\omega_0)\hat{Y}(s) &= \sum \limits_{\ell = 1}^{n} \frac{R_\ell(s-i\omega_0)}{s - p_\ell} + Q \\
\end{align*}
If we evaluate $\hat{Y}(s)(s - i\omega_0)$, we can get the coefficient $Q$:
\begin{align*}
Q &= \left. (s - i\omega_0)\hat{Y}(s) \right|_{s = i\omega_0} \\
Q &= \left. \frac{ B(s) }{ (s-p_1) (s-p_2) \cdots (s-p_n) } \right|_{s = i\omega_0} \\
Q &= \left.\hat{H}(s) \right|_{s = i\omega_0} \\
Q &= H(\omega_0) \\
\end{align*}
Now we have our steady state response
\begin{align*}
y_{ss}(t) &= Q e^{i\omega_0 t} u(t) \\
y_{ss}(t) &= H(\omega_0) e^{i\omega_0 t} u(t) \\
\end{align*}
If we apply a one-sided exponential to the system, $e^{i\omega_0 t} u(t)$, as $t$ becomes large enough, the system cannot distinguish between the one-sided exponential and the infinite duration exponential, $e^{i\omega_0 t}$. The coefficient of the steady state response should then be the frequency response of the system evaluated at the frequency of the input; this is what we knew from before when applying the infinite duration exponential as input to an LTI system:
\begin{align*}
e^{i\omega_0 t} \to \fbox{$H$} \to H(\omega_0) e^{i\omega_0 t}
\end{align*}
In other words, if $y(t) = y_{tr}(t) + y_{ss}(t)$, then as time goes on, we have
\begin{align*}
\lim_{t \to \infty} y(t) = y_{ss}(t) = e^{i\omega_0 t}u(t) = e^{i\omega_0 t}
\end{align*}
If $\omega_0 = 0$, then $x(t) = u(t)$, and then we have $y_{ss}(t) = H(0)u(t) = H(0)$. We find that we get the DC gain of the system.
\subsection{Example DC Motor}
<h4>Example</h4>
Consider an example model of a DC motor. The system is based on simple harmonic motion; there is inertia $M$, and damping $D$. The system function is given by
\begin{align*}
\hat{G}(s) &= \frac{1}{Ms^2 + Ds} = \frac{1/M}{s(s + D/M)} \\
\end{align*}
For this example, consider $M = 1$, and $D=16$.
(a) is this system BIBO?
The $i\omega$-axis is not in the region of convergence. The right-most pole of the system happens to be on the $i\omega$-axis.
(b) what is the LCCDE of the system?
\begin{align*}
\hat{G}(s) &= \frac{ \hat{Y}(s) }{ \hat{Q}(s) } \\
\frac{ \hat{Y}(s) }{ \hat{Q}(s) } &= \frac{1}{s^2 + 16} \\
\lr{s^2 + 16} \hat{Y}(s) &= \hat{Q}(s) \\
\end{align*}
Recall the differentiation property of the laplace transform:
\begin{nicebox}
\begin{align*}
\frac{d}{dt} x(t) \ltp s\hat{X}(s) \\
\end{align*}
$RoC \supseteq RoC(x)$
\end{nicebox}
We can use this to solve for the time domain expression:
\begin{align*}
\lr{s^2 + 16} \hat{Y}(s) &= \hat{Q}(s) \\
s^2\hat{Y}(s) + 16\hat{Y}(s) &= \hat{Q}(s) \\
\ddot{y}(t) + 16\dot{y}(t) &= q(t) \\
\end{align*}
(c) Stabilizing the system via the feedback loop in the image below, determine the overall system function $\hat{H}(s)$
% image of feedback
% \img{images/feedback/motor/motor.ps}
\begin{center}
\begin{pspicture}(0,-4)(12,4)
\rput(0.25,0){$x$}
\rput(2.3,0.35){$\epsilon$}
\rput(8,0){$y$}
\rput(5.75,0){$\hat{G}(s)$}
\rput(3.1,0){$K$}
\rput(1.5,-1.75){$H$}
% plus or minus for adder
\rput(2.1,-0.5){$-$}
\rput(1.45,0.35){$+$}
\psframe(0.75,-2)(7.25,1)
\pspolygon(2.8,0.3)(2.8,-0.3)(4,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.5,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.75,-1.25)(1.75,-0.25)
%\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5.5,-2)(4,-2)
\psline[linewidth=1.25 pt](6.75,-1.25)(1.75,-1.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(2,0)(2.8,0) % out to y
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(4,0)(5,0) % out to y
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.5,0)(7.85,0) % out to y
\psline[linewidth=1.25 pt](6.75,0)(6.75,-1.25)
\psframe(5, -.5)(6.5,0.5)
\pscircle(1.75,0){0.25}
\psline(1.5,0)(2,0)
\psline(1.75,0.25)(1.75,-0.25)
\end{pspicture}
\end{center}
$x$ is the desired position of the system, $y$ is the actual position of the system, and $\epsilon$ is the error signal, given by $x - y$. This system is called a proportional feedback controller.
\begin{align*}
\ltrans{\epsilon(t)} \hat{K}(s) \hat{G}(s) &= \hat{Y}(s) \\
\ltrans{ x(t) - y(t) } \hat{K}(s) \hat{G}(s) &= \hat{Y}(s) \\
\hat{X}(s) \hat{K}(s) \hat{G}(s) - \hat{Y}(s) \hat{K}(s) \hat{G}(s) &= \hat{Y}(s) \\
\hat{X}(s) \hat{K}(s) \hat{G}(s) &= \hat{Y}(s) ( 1 + \hat{K}(s) \hat{G}(s) ) \\
\hat{H}(s) &= \frac{ \hat{K}(s) \hat{G}(s) }{ 1 + \hat{K}(s) \hat{G}(s) } \\
\end{align*}
In this case, we have $\hat{K}(s) = K \gt 0$.
\begin{align*}
\hat{H}(s) &= \frac{ K \hat{G}(s) }{ 1 + K \hat{G}(s) } \\
\hat{H}(s) &= \frac{ K \frac{1}{s^2 + 16} }{ ( 1 + K \frac{1}{s^2 + 16} ) } \\
\hat{H}(s) &= \frac{K}{s^2 + 16s + K} \\
\end{align*}
Another way to solve for this is using Blacks Formula, $\frac{\mbox{Forward Gain}}{1 - \mbox{Loop Gain}}$:
\begin{align*}
\frac{\mbox{Forward Gain}}{1 - \mbox{Loop Gain}} \\
\frac{K \hat{G}(s) }{1 + K\hat{G}(s) } \\
\end{align*}
(c) Is $H$ guaranteed to be BIBO for $K \gt 0$?
We need to look at the poles of the system. We can determine the poles by finding the roots of $s^2 + 16s + K$, using the quadratic formula:
\begin{align*}
\frac{-16 \pm \sqrt{ 16^2 - 4K} } { 2} \\
-8 \pm \sqrt{ 64 - K }
\end{align*}
We have two cases that we need to analyze:
I $64 - K \geq 0$
Here we have $64 - K \in \R$ and $\sqrt{ 64 - K} \lt 8$. This means that $-8 \pm \sqrt{ 64 - K} $ is always less than 0, or to the left of the $i\omega$-axis.
II $64 - K \lt 0$.
In this case we have $\sqrt{ 64 - K} \in \C$, and we have $-8 \pm i\sqrt{K - 64}$. This is to the left of the $i\omega$-axis.
In all cases, we have a causal, stable signal whose poles are always to the left of the $i\omega$-axis.
(e) if $x(t) = u(t)$, what is $\hat{Y}(s)$?
\begin{align*}
\hat{Y}{s} &= \hat{H}(s)\hat{X}(s) \\
\hat{Y}(s) &= \frac{ K } { (s^2 + 16s + K)s } \\
\end{align*}
(f) Let $K = 63$, what is the coefficient of the steady state response?
\begin{align*}
\hat{Y}(s) &= \frac{63}{s(s+7)(s+9)} \\
\hat{Y}(s) &= \frac{A}{s} + \frac{B}{(s+7)} + \frac{C}{(s+9)} \\
A &= \left. s \hat{Y}(s) \right|_{s = 0} = \hat{H}(0) = 1 \\
\end{align*}
\subsection{Summary of Laplace Transforms}
\begin{nicebox}
{\bf Time-Shifting (Translation) Property }
\begin{align*}
x(t+T) &\ltp e^{sT}\hat{X}(s) \\
\end{align*}
The $RoC$ excludes $\infty$ if the signal is not causal, or excludes $-\infty$ if the signal is not anti-causal.
\end{nicebox}
\begin{nicebox}
{\bf Convolution in the Time Domain }
\begin{align*}
h(t) &= f(t) * g(t) \\
\hat{H}(s) &= \hat{F}(s)\hat{G}(s) \\
\end{align*}
The $RoC(h) \supseteq RoC(f) \cap RoC(g)$.
\end{nicebox}
\begin{nicebox}
{\bf Modulation with a Complex Exponential }
\begin{align*}
e^{s_0 t} x(t) &\ltp \hat{X}(s-s_0) \\
\end{align*}
$RoC = RoC(x) + \Real{s_0} $
\end{nicebox}
\begin{nicebox}
{\bf Integration in the Time Domain }
\begin{align*}
\int_{-\infty}^t x(\tau) d\tau &\ltp \frac{\hat{X}(s)}{s}
\end{align*}
$RoC(y) \supseteq RoC(x) \cap \{ s \st \Real{s} \gt 0 \} $
\end{nicebox}
\begin{nicebox}
{\bf Differentiation in the Time Domain }
\begin{align*}
\frac{d}{dt} x(t) \ltp s\hat{X}(s) \\
\end{align*}
$RoC \supseteq RoC(x)$
\end{nicebox}
\begin{nicebox}
{\bf Generalized Differentiation in the Time Domain }
\begin{align*}
\frac{d^k}{dt^k} x(t) \ltp s^k \hat{X}(s) \quad k \in \N \\
\end{align*}
$RoC \supseteq RoC(x)$
\end{nicebox}
\begin{nicebox}
{\bf Frequency Differentiation }
\begin{align*}
-tx(t) \ltp \frac{d}{ds}\hat{X}(s) \\
\end{align*}
$RoC = RoC(x)$
\end{nicebox}
\begin{nicebox}
{\bf Conjugation Property }
\begin{align*}
x^*(t) &\ltp \hat{X}^*(s^*)
\end{align*}
$RoC = RoC(x)$
\end{nicebox}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}