$$
% create the definition symbol
\def\bydef{\stackrel{\Delta}{=}}
%\def\circconv{\otimes}
\def\circconv{\circledast}
\newcommand{\qed}{\mbox{ } \Box}
\newcommand{\infint}{\int_{-\infty}^{\infty}}
% z transform
\newcommand{\ztp}{ ~~ \mathop{\mathcal{Z}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iztp}{ ~~ \mathop{\mathcal{Z}^{-1}}\limits_{\longleftrightarrow} ~~ }
% fourier transform pair
\newcommand{\ftp}{ ~~ \mathop{\mathcal{F}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iftp}{ ~~ \mathop{\mathcal{F}^{-1}}\limits_{\longleftrightarrow} ~~ }
% laplace transform
\newcommand{\ltp}{ ~~ \mathop{\mathcal{L}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iltp}{ ~~ \mathop{\mathcal{L}^{-1}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\ftrans}[1]{ \mathcal{F} \left\{#1\right\} }
\newcommand{\iftrans}[1]{ \mathcal{F}^{-1} \left\{#1\right\} }
\newcommand{\ztrans}[1]{ \mathcal{Z} \left\{#1\right\} }
\newcommand{\iztrans}[1]{ \mathcal{Z}^{-1} \left\{#1\right\} }
\newcommand{\ltrans}[1]{ \mathcal{L} \left\{#1\right\} }
\newcommand{\iltrans}[1]{ \mathcal{L}^{-1} \left\{#1\right\} }
% coordinate vector relative to a basis (linear algebra)
\newcommand{\cvrb}[2]{\left[ \vec{#1} \right]_{#2} }
% change of coordinate matrix (linear algebra)
\newcommand{\cocm}[2]{ \mathop{P}\limits_{#2 \leftarrow #1} }
% Transformed vector set
\newcommand{\tset}[3]{\{#1\lr{\vec{#2}_1}, #1\lr{\vec{#2}_2}, \dots, #1\lr{\vec{#2}_{#3}}\}}
% sum transformed vector set
\newcommand{\tsetcsum}[4]{{#1}_1#2(\vec{#3}_1) + {#1}_2#2(\vec{#3}_2) + \cdots + {#1}_{#4}#2(\vec{#3}_{#4})}
\newcommand{\tsetcsumall}[4]{#2\lr{{#1}_1\vec{#3}_1 + {#1}_2\vec{#3}_2 + \cdots + {#1}_{#4}\vec{#3}_{#4}}}
\newcommand{\cvecsum}[3]{{#1}_1\vec{#2}_1 + {#1}_2\vec{#2}_2 + \cdots + {#1}_{#3}\vec{#2}_{#3}}
% function def
\newcommand{\fndef}[3]{#1:#2 \to #3}
% vector set
\newcommand{\vset}[2]{\{\vec{#1}_1, \vec{#1}_2, \dots, \vec{#1}_{#2}\}}
% absolute value
\newcommand{\abs}[1]{\left| #1 \right|}
% vector norm
\newcommand{\norm}[1]{\left|\left| #1 \right|\right|}
% trans
\newcommand{\trans}{\mapsto}
% evaluate integral
\newcommand{\evalint}[3]{\left. #1 \right|_{#2}^{#3}}
% slist
\newcommand{\slist}[2]{{#1}_{1},{#1}_{2},\dots,{#1}_{#2}}
% vectors
\newcommand{\vc}[1]{\textbf{#1}}
% real
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
% imaginary
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\newcommand{\mcal}[1]{\mathcal{#1}}
\newcommand{\bb}[1]{\mathbb{#1}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\I}{\mathbb{I}}
\newcommand{\Th}[1]{\mathop\mathrm{Th(#1)}}
\newcommand{\intersect}{\cap}
\newcommand{\union}{\cup}
\newcommand{\intersectop}{\bigcap}
\newcommand{\unionop}{\bigcup}
\newcommand{\setdiff}{\backslash}
\newcommand{\iso}{\cong}
\newcommand{\aut}[1]{\mathop{\mathrm{Aut(#1)}}}
\newcommand{\inn}[1]{\mathop{\mathrm{Inn(#1)}}}
\newcommand{\Ann}[1]{\mathop{\mathrm{Ann(#1)}}}
\newcommand{\dom}[1]{\mathop{\mathrm{dom} #1}}
\newcommand{\cod}[1]{\mathop{\mathrm{cod} #1}}
\newcommand{\id}{\mathrm{id}}
\newcommand{\st}{\ |\ }
\newcommand{\mbf}[1]{\mathbf{#1}}
\newcommand{\enclose}[1]{\left\langle #1\right\rangle}
\newcommand{\lr}[1]{\left( #1\right)}
\newcommand{\lrsq}[1]{\left[ #1\right]}
\newcommand{\op}{\mathrm{op}}
\newcommand{\dotarr}{\dot{\rightarrow}}
%Category Names:
\newcommand{\Grp}{\mathbf{Grp}}
\newcommand{\Ab}{\mathbf{Ab}}
\newcommand{\Set}{\mathbf{Set}}
\newcommand{\Matr}{\mathbf{Matr}}
\newcommand{\IntDom}{\mathbf{IntDom}}
\newcommand{\Field}{\mathbf{Field}}
\newcommand{\Vect}{\mathbf{Vect}}
\newcommand{\thm}[1]{\begin{theorem} #1 \end{theorem}}
\newcommand{\clm}[1]{\begin{claim} #1 \end{claim}}
\newcommand{\cor}[1]{\begin{corollary} #1 \end{corollary}}
\newcommand{\ex}[1]{\begin{example} #1 \end{example}}
\newcommand{\prf}[1]{\begin{proof} #1 \end{proof}}
\newcommand{\prbm}[1]{\begin{problem} #1 \end{problem}}
\newcommand{\soln}[1]{\begin{solution} #1 \end{solution}}
\newcommand{\rmk}[1]{\begin{remark} #1 \end{remark}}
\newcommand{\defn}[1]{\begin{definition} #1 \end{definition}}
\newcommand{\ifff}{\LeftRightArrow}
<!-- For the set of reals and integers -->
\newcommand{\rr}{\R}
\newcommand{\reals}{\R}
\newcommand{\ii}{\Z}
\newcommand{\cc}{\C}
\newcommand{\nn}{\N}
\newcommand{\nats}{\N}
<!-- For terms being indexed.
Puts them in standard font face and creates an index entry.
arg: The term being defined.
\newcommand{\pointer}[1]{#1\index{#1}} -->
<!-- For bold terms to be index, but defined elsewhere
Puts them in bold face and creates an index entry.
arg: The term being defined. -->
\newcommand{\strong}[1]{\textbf{#1}}
<!-- For set names.
Puts them in italics. In math mode, yields decent spacing.
arg: The name of the set. -->
\newcommand{\set}[1]{\textit{#1}}
$$
\documentclass{article}
\usepackage{latex2html5}
\usepackage{writer}
\usepackage{auto-pst-pdf}
\usepackage{pstricks-add}
\usepackage{graphicx}
\usepackage{hyperref}
\definecolor{lightblue}{rgb}{0.0,0.24313725490196078,1.0}
\title{{\Huge ee20}}
\author{
\textbf{Dan Lynch} \\
UC Berkeley \\
EECS Department \\
D@nLynch.com \\
}
\date{1st of December 2012}
\begin{document}
\maketitle
\newpage
\tableofcontents
\newpage
\section{Week 1}
\subsection{Signals}
\subsubsection{Continous-Time Signals}
\begin{definition}
If the function $x \in X$ is defined as $x : A \to B$, and $A$ is uncountable, then we say that the signal $x$ is a \emph{Continuous-Time Signal} (a.k.a. CT Signal).
\end{definition}
An example of a CT signal:
$$x : \mathbb{R} \to \mathbb{R}$$
In describing a system $\fbox{F}$ that uses CT signals, $F$ can be defined as $F : \left[ \mathbb{R} \to \mathbb{R} \right] \to \left[ \mathbb{R} \to \mathbb{R} \right]$, which means the signal is a \emph{real-valued} continuous-time signal. $F$ can also be defined as $F : \left[ \mathbb{R} \to \mathbb{C} \right] \to \left[ \mathbb{R} \to \mathbb{C} \right]$, which means the signal is a \emph{complex-valued} CT signal. Here, $\left[ \mathbb{R} \to \mathbb{R} \right] \cup \left[ \mathbb{R} \to \mathbb{C} \right] $ represents the set of all CT Signals.
\subsubsection{Discrete-Time Signals}
\begin{definition}
If the function $x \in X$ is defined as $x : A \to B$, and $A$ is countable, then we say that the signal $x$ is a \emph{Discrete-Time Signal}(a.k.a. DT signal).
\end{definition}
An example of a DT signal:
$$x : \mathbb{Z_+} \to \mathbb{R}$$
In describing a system $\fbox{F}$ that uses DT signals, $F$ can be defined as $F : A \to B$, where the sets $X$ and $Y$, for example, can be the set of functions $\left[ \mathbb{Z} \to \mathbb{C} \right]$, or $\left[ \mathbb{Z} \to \mathbb{R} \right]$.
\subsubsection{Analog Signals}
\begin{definition}
If the function $x \in X$ is defined as $x : A \to B$, and both $A$ and $B$ are uncountable, then we say $x$ is an \emph{analog signal}.
\end{definition}
An example of an analog signal:
$$x : \mathbb{R} \to (0,1)$$
This signal is analog because the interval between 0 and 1 is not countable (which can be proved using Cantor's Diagonalization argument).
\subsubsection{Digital Signals}
\begin{definition}
If the function $x \in X$ is defined as $x : A \to B$, and $A$ is countable, $B$ is finite, then we say the signal $x$ is a \emph{digital signal}.
\end{definition}
An example of a digital signal:
$$x : \mathbb{Z} \to \{ 0, 1 \}$$
\subsection{Systems}
In describing systems, we typically will use the notation
$$x \to \fbox{F} \to y,$$
where $x$ is the input signal, $\fbox{F}$ is the system, and $y$ is the output signal.
The system $F$ can be described as
$$F : X \to Y,$$
where $X$ and $Y$ are sets of functions $x_i$ and $y_i$, where $x_i \in X$ is a function, and $y_i \in Y$ is also a function for some $i \in \Z$.
The domain and codomain of a function $x \in X$ determines the type of signal. The input signal $x$ can be defined by $x : A \to B$.
When we refer to $x$, we mean a function in entirety. $x(t)$ refers to a specific value of $x$ at $t$.
\subsection{Kronecker Delta}
The \emph{Kronecker Delta} function, also known as \emph{Discrete-Time Impulse Signal} (aka Unit Impulse).
The delta function can be defined by:
$$
\delta(n) = \left\{ \begin{array}{rl}
1 & n = 0 \\
0 &\mbox{ otherwise}
\end{array} \right.
$$
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-4,-1)(4,3)
% line
\psline{-*}(-2.571,0)(-2.571,0)
% position
\rput(-2.571,-0.4){-3}
% line
\psline{-*}(-1.714,0)(-1.714,0)
% position
\rput(-1.714,-0.4){-2}
% line
\psline{-*}(-0.857,0)(-0.857,0)
% position
\rput(-0.857,-0.4){-1}
% line
\psline{-*}(0.000,0)(0.000,1)
% value
\rput(0.25,1){(1)}
% position
\rput(0.000,-0.4){0}
% line
\psline{-*}(0.857,0)(0.857,0)
% position
\rput(0.857,-0.4){1}
% line
\psline{-*}(1.714,0)(1.714,0)
% position
\rput(1.714,-0.4){2}
% line
\psline{-*}(2.571,0)(2.571,0)
% position
\rput(2.571,-0.4){3}
% axes
\psline{->}(-3.428,0)(3.428,0)
\rput(3.428,-0.3){ $n$ }
\rput(2.928,1.857){ $\delta(n)$ }
\end{pspicture}
\end{center}
$\delta$ is important because any discrete-time signal can be expressed as a linear combination of \emph{scaled} and \emph{shifted} versions of it. For example, $\hat{\delta}(n)=\delta(n-1)$
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 2}
\subsection{Complex Numbers}
\subsubsection{Cartesian Representation}
$$z = \alpha + i\beta,$$ where $\alpha,\beta \in \R$. $\alpha$ is the real part, denoted by Re$(z)$. $\beta$ is the complex part, denoted as Im$(z)$.
\subsubsection{Polar Representation}
$$z = Re^{i\theta},$$ where $R$ is the \emph{magnitude}, or norm, and $\theta$ is the \emph{phase}, or angle.
$$R = \left| z \right| \geq 0$$
$$\theta = \angle z$$
Note that $\theta = $undefined if $R=0$. In other words, $\left| z \right| = 0 \Rightarrow \theta $ is undefined.
Generally keep in these in mind:
\begin{enumerate}
\item $R \in \R_{\oplus}$
\item $\left| z \right| = 0 \Rightarrow \theta $ is undefined.
\item Calculators use the interval $\left[-\frac{\pi}{2}, \frac{\pi}{2}\right]$
\end{enumerate}
\subsubsection{Complex Conjugate}
\begin{definition}
Let $z = x + iy = Re^{i \theta}$, then the \emph{complex conjugate} of $z$ is defined:
$$z^* = x-iy=Re^{-i \theta}$$
\end{definition}
Some properties of the complex conjugate:
\begin{claim}
The product of a complex number and its conjugate is the magnitude squared.
$$ z \cdot z^* = r^2 = \left| z \right|^2 $$
\end{claim}
\begin{proof}
\begin{align*}
z \cdot z^* &= (a + ib)(a - ib) \quad \mbox{Using rectangular representation}\\
z \cdot z^* &= a^2 +aib - aib -i^2b^2 \\
z \cdot z^* &= a^2 +b^2 \\
z \cdot z^* &= \sqrt{a^2 +b^2}^2 \\
z \cdot z^* &= R^2
\end{align*}
\end{proof}
\begin{proof}
\begin{align*}
z \cdot z^* &= Re^{i \theta} \cdot Re^{-i \theta} \quad \mbox{using Polar representation} \\
z \cdot z^* &= R^2e^{i(\theta -\theta)} \\
z \cdot z^* &= R^2e^0 \\
z \cdot z^* &= R^2
\end{align*}
\end{proof}
\begin{claim}
The sum of a complex number and its conjugate is twice the complex number's real part.
$$ z + z^* = 2\mbox{Re}(z) $$
\end{claim}
\begin{proof}
\begin{align*}
z + z^* &= (a + ib) + (a - ib) \\
z + z^* &= 2a
\end{align*}
\end{proof}
\begin{claim}
The difference of a complex number and its conjugate is twice the complex number's imaginary part.
$$ z - z^* = 2i\mbox{Im}(z) $$
\end{claim}
\begin{proof}
\begin{align*}
z - z^* &= (a + ib) - (a - ib) \\
z - z^* &= 2ib
\end{align*}
\end{proof}
\begin{claim}
The division of a complex number by its conjugate results in a doubling of the angle $\theta$
$$ z / z^* = e^{i2 \theta} $$
\end{claim}
\begin{proof}
\begin{align*}
z / z^* &= e^{i\theta} / e^{-i\theta} \\
z / z^* &= e^{i\theta} \cdot e^{i\theta} \\
z / z^* &= e^{i(\theta + \theta)} = e^{i2 \theta}
\end{align*}
\end{proof}
\subsubsection{Choosing a Representation}
$z_1 + z_2$ is easy in Caresian coordinates.
$$ z_1 + z_2 = (\alpha_1 + \alpha_2) + i(\beta_1 + \beta_2) $$
$z_1 \cdot z_2$ is easy in Polar coordinates.
$$z_1 \cdot z_2 = R_1R_2e^{i(\theta_1 + \theta_2)} $$
\begin{problem}
Solve $z^3 - 1= 0$ in terms of the magnitude and phase.
\end{problem}
\begin{solution}
To solve for the magnitude is simple. $z^3 - 1= 0 \Rightarrow z^3 = 1 \Rightarrow z = 1 \Rightarrow \left| z \right| = 1$. The phase requires a little more work:
\begin{align*}
z^3 - 1&= 0 \\
(e^{i\theta})^3 - 1&= 0 \quad \mbox{(Polar Representation)} \\
e^{i3\theta} - 1&= 0 \\
e^{i3\theta} &= 1 \\
\cos(3\theta) + i\sin(3 \theta) &= 1 \quad \mbox{(Euler's Formula)} \\
3\theta &= 0 + 2 \pi k \mbox{ where } k \in \Z \\
\theta &= 0 + \frac{2 \pi}{3} k
\end{align*}
So the solution set gives us $ \theta \in \{ 0, \frac{2\pi}{3}, \frac{4\pi}{3} \}$.
\end{solution}
\subsubsection{Useful Information}
A brief note for when $z$ is real:
If $z \in \R_{\oplus} \Rightarrow \angle z = 2\pi k $, where $k\in\Z$. The standard is to use $\angle z = 0$. Also when $z$ is a positive real number $\left| z \right| = z$. If the notation $z \gt 0 $ or $z \lt 0$ is used, this means that $z$ is \emph{not} imaginary. For example, if $z<0 \Rightarrow \angle z = \pi $ and $ \left| z \right| = -z$. For instance, $-2 = -2e^{i\pi}$, and $2=2e^{i2\pi}$
Some useful formulae:
$$ \frac{z_1}{z_2} = \frac{R_1}{R_2}e^{i(\theta_1 -\theta_2)}$$
$$ \left| z_1 \cdot z_2 \right| = \left| z_1 \right| \cdot \left| z_2 \right| $$
$$ \left| \frac{z_1}{z_2} \right| = \frac{\left| z_1 \right|}{\left| z_2 \right|} $$
$$ \angle \left( z_1 \cdot z_2 \right) = \angle z_1 + \angle z_2 $$
$$ \angle \frac{z_1}{z_2} = \angle z_1 - \angle z_2 $$
\subsubsection{Polar Representation Ambiquity}
Polar representations have $2 \pi k$ ambiquiity in phase, where $k \in \Z$. This means that $e^{i(\theta - 2\pi)} = e^{i\theta} = e^{i(\theta+2\pi)} = \cdots$
Usually we will use the range [$ 0, 2\pi)$ or [$-\pi,\pi)$.
\begin{problem}
Given the complex number $z = e^{i\frac{2\pi}{3}}$, determine $\sqrt[4]{z}$ . Be mindful of how many fourth roots $z$ has and identify each
of them.
\end{problem}
\begin{solution}
\begin{align*}
z &= e^{i\frac{2\pi}{3}} \\
z &= e^{i(\frac{2\pi}{3} + 2\pi k) } \mbox{ for some } k \in \Z \\
\sqrt[4]{z} &= e^{i(\frac{2\pi}{3} + 2\pi k) \frac{1}{4}} \\
\sqrt[4]{z} &= e^{i(\frac{\pi}{6} + \frac{\pi}{2} k) \frac{1}{4}}
\end{align*}
So we can determine that within the range [$0,2\pi$), we find that $\sqrt[4]{z} = e^{i\frac{\pi}{6}}, e^{i\frac{2\pi}{3}}, e^{i\frac{7\pi}{6}}, e^{i\frac{5\pi}{3}}$
\end{solution}
\subsubsection{The Imaginary Unit}
\begin{align*}
i &= \sqrt{-1} \\
i &= 0 + 1i \\
i &= 1e^{i\frac{\pi}{2}} \\
i^0 &= 1 \\
i^1 &= i \\
i^2 &= -1 \\
i^3 &= -i \\
i^4 &= 1 \\
\frac{1}{i} &= \frac{1}{i} \cdot \frac{i}{i} = \frac{i}{i^2} = \frac{i}{-1} = -i \\
i^i &= (e^{i\frac{\pi}{2}})^i = e^{i^2\frac{\pi}{2}} = e^{-\frac{\pi}{2}}
\end{align*}
%\includegraphics{images/imag.ps}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
% y-axis
\rput(0.3,2.8){ $Im$ }
\psline{->}(0,-2.5)(0,2.5)
% x-axis
\rput(2.8,0.3){ $Re$ }
\psline{->}(-2.5,0)(2.5,0)
% the circle
\pscircle(0,0){ 2 }
% quadrants
\rput(0.3,2.3){ $i^1$ }
\rput(0.3,-2.3){ $i^2$ }
\rput(-2.3,0.3){ $i^3$ }
\rput(2.3,0.3){ $i^0$ }
\end{pspicture}
\end{center}
\subsubsection{Derivatives of Complex Exponentials}
$$\frac{d}{dt}e^{it}$$
When taking the derivative of a \emph{complex-valued exponential}, we insist on the same property as a \emph{real-valued exponential}:
$$\frac{d}{dt}e^{\omega t} = \omega e^{\omega t} $$
$$\frac{d}{dt}e^{it} = i \cdot e^{it} e^{i\frac{\pi}{2}} \cdot e^{it} = e^{i(t + \frac{\pi}{2})}$$
\begin{nicebox}
$z \cdot i$ is rotation by $90^o$
\end{nicebox}
$$ z \cdot i = e^{i\theta} \cdot i = e^{i\theta} \cdot e^{i\frac{\pi}{2}} = e^{i(\theta + \frac{\pi}{2})}$$
$$ (\alpha + i\beta)i = i\alpha + i^2\beta = -\beta + i\alpha $$
% \includegraphics{images/euler/multrot.ps}
% \caption{ Multiplication and Rotation }
\begin{center}
\begin{pspicture}(-4,-4)(4,4)
% y-axis
\rput(0.3,3.75){ $Im$ }
\psline{->}(0,-3.75)(0,3.75)
% x-axis
\rput(3.75,0.3){ $Re$ }
\psline{->}(-3.75,0)(3.75,0)
% the circle
\pscircle(0,0){ 3 }
% quadrants
\rput(0.3,3.3){ $i^1$ }
\rput(0.3,-3.3){ $i^3$ }
\rput(-3.3,0.3){ $i^2$ }
\rput(3.3,0.3){ $i^0$ }
% new vector
\psline[linewidth=1.5 pt]{->}(0,0)(2.598,1.500)
\rput(2,1.500){$\alpha+i\beta$}
%\pcline[linewidth=1.5 pt]{->}(0,0)(2.598,1.500)
%\Aput{\small $\alpha+i\beta$}
% new vector
\psline[linewidth=1.5 pt]{->}(0,0)(-1.500,2.598)
\rput(-1.500,1.5){$-\beta+i\alpha$}
%\pcline[linewidth=1.5 pt]{->}(0,0)(-1.500,2.598)
%\Aput{\small $-\beta+i\alpha$}
\end{pspicture}
\end{center}
\subsubsection{Converting Betweeen Representations}
Converting from Cartesian to Polar:
$$ \alpha + i\beta = \left| \sqrt{ \alpha^2 + \beta^2 } \right| e^{i\tan^{-1}(\frac{\beta}{\alpha})} $$
Converting from Polar to Cartesian:
$$ Re^{i\theta} = R\left( \cos \theta + i \sin \theta \right) $$
\begin{problem}
Express $z = 1 + i\sqrt{3}$ in rectangular coordinates.
\end{problem}
\begin{solution}
\begin{align*}
z &= 2\left(\frac{1}{2} + i\frac{\sqrt{3}}{2}\right) \quad \mbox{If you see a way to pull out typical trig values, do it} \\
z &= 2e^{i\frac{\pi}{3}} \quad \mbox{Since we know $\theta = \frac{\pi}{3}$ when $\sin \theta = \frac{\sqrt{3}}{2}$ and $\cos \theta = \frac{1}{2}$}
\end{align*}
\end{solution}
\subsection{The Complex Exponential}
\subsubsection{Exponentials as Vectors}
$$x(t) = e^{it}$$
Think of this as the position of a particle on the complex plane. The derivative is defined in the same way, $\frac{d}{dt}e^{st} = se^{st}$ where $s \in \C$, but we will also introduce some new notation. The derivative with respect to time is defined as
$$\dot{x}(t)=\frac{d}{dt}x(t)=ie^{it}$$
At $t = 0$, the \emph{velocity vector} $\dot{x}(0)=i$, and the \emph{position vector} $x(0)=1 + 0i$. The relation between $\dot{x}(t)$ and $x(t)$ is orthogonal, that is
$$\forall t, \dot{x}(t) \perp x(t). $$
$z=e^{i \theta t}$ is on the unit circle, that is $\left| e^{i \theta t}\right| = 1$ for all $t$. $z$ must stay on the unit circle, if the magnitude of $z$ gets smaller over time, this implies a velocity vector $\dot{x}(t)$ pointing inward, which contradicts the orthogonality of the vectors $x(t)$ and $\dot{x}(t)$.
\subsubsection{Eulers Formula}
\begin{center}
\begin{pspicture}(-3.5,-3.5)(3.5,3.5)
% y-axis
\rput(0.3,3.75){ $Im$ }
\psline{->}(0,-3.75)(0,3.75)
% x-axis
\rput(3.75,0.3){ $Re$ }
\psline{->}(-3.75,0)(3.75,0)
% i sin \theta
\userline[linestyle=dashed](0,2.553)(1.576,2.553){0}{(x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}{(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{ (x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}
% cos \theta
\userline[linestyle=dashed](1.576,0)(1.576,2.553){(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{0}{(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{ (x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}
\userline[linewidth=1.5 pt,linecolor=blue]{->}(0,0.000)(2.121,2.121){(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{ (x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}
% the circle
\pscircle(0,0){ 3 }
% quadrants
\rput(0.3,3.3){ $i^1$ }
\rput(0.3,-3.3){ $i^3$ }
\rput(-3.3,0.3){ $i^2$ }
\rput(3.3,0.3){ $i^0$ }
\end{pspicture}
\end{center}
Leonhard Euler demonstrated a relationship between the base of the natural logarithm, sine, cosine, and the imaginary unit, which is now coined \emph{Euler's Formula}:
\begin{nicebox}
$$ e^{i \theta} = \cos \theta + i \sin \theta $$
\end{nicebox}
%\begin{figure}[htp]
%\centering
%\includegraphics{images/euler/eulers.ps}
%\caption{ A visual look at Euler's Formula }
%\label{ fig:eulers }
%\end{figure}
\begin{center}
\begin{pspicture}(-3.5,-3.5)(3.5,3.5)
% y-axis
\rput(0.3,3.75){ $Im$ }
\psline{->}(0,-3.75)(0,3.75)
% x-axis
\rput(3.75,0.3){ $Re$ }
\psline{->}(-3.75,0)(3.75,0)
% i sin \theta
%\userline[linestyle=dashed](0,2.553)(1.576,2.553){0}{(x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}{(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{ (x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}
\psline[linestyle=dashed](0,2.553)(1.576,2.553)
\rput(2,1.2765){$i\sin\theta $}
% cos \theta
%\userline[linestyle=dashed](1.576,0)(1.576,2.553){(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{0}{(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{ (x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}
\psline[linestyle=dashed](1.576,0)(1.576,2.553)
\rput(0.788,2.68065){$ \cos\theta$}
%\userline[linewidth=1.5 pt,linecolor=blue]{->}(0,0.000)(2.121,2.121){(x>0) ? 3 * cos( atan(-y/x) ) : -3 * cos( atan(-y/x) ) }{ (x>0) ? -3 * sin( atan(-y/x) ) : 3 * sin( atan(-y/x) )}
% the circle
\pscircle(0,0){ 3 }
% quadrants
\rput(0.3,3.3){ $i^1$ }
\rput(0.3,-3.3){ $i^3$ }
\rput(-3.3,0.3){ $i^2$ }
\rput(3.3,0.3){ $i^0$ }
% magnitude
\psline{->}(1.576,2.553)
\rput(0.5,1.3){$\mathbf{|z|}$}
% angle line
\psarc(0,0){0.3}{360}{60}
% angle omega
\rput(0.4,0.2){$\mathbf{\theta}$}
% z = e^{i \omega}
\rput(3,2.8){$\mathbf{z} = e^{i \theta} = \cos\theta + i\sin\theta$}
\end{pspicture}
\end{center}
Euler's formula can be derived by looking at the complex exponential on the unit circle. The horizontal component of $e^{i \theta}$ is $ \cos \theta$, and the vertical component is $i\sin \theta$.
The formula can also be derived using infinite series.
\begin{align*}
e^\theta &= \sum \limits_{n = 0}^{\infty} \frac{\theta^n}{n!} = 1 + \theta + \frac{\theta^2}{2!} + \frac{\theta^3}{3!} + \frac{\theta^4}{4!} + \cdots \\
\sin \theta &= \sum \limits_{n = 0}^{\infty} \frac{(-1)^n\theta^{2n+1}}{(2n+1)!} = \theta - \frac{\theta^3}{3!} + \frac{\theta^5}{5!} + \cdots \\
\cos \theta &= \sum \limits_{n = 0}^{\infty} \frac{(-1)^n\theta^{2n}}{(2n)!} = 1 - \frac{\theta^2}{2!} + \frac{\theta^4}{4!} + \cdots
\end{align*}
Given the above formulae, we can derive $e^{i\theta}$ in terms of $\sin \theta$ and $\cos \theta$:
\begin{align*}
e^{i\theta} &= \sum \limits_{n = 0}^{\infty} \frac{(i\theta)^n}{n!} = 1 + i\theta + \frac{i^2\theta^2}{2!} + \frac{i^3\theta^3}{3!} + \frac{i^4\theta^4}{4!} + \cdots \\
e^{i\theta} &= \sum \limits_{n = 0}^{\infty} \frac{(i\theta)^n}{n!} = 1 + i\theta - \frac{\theta^2}{2!} - \frac{i\theta^3}{3!} + \frac{\theta^4}{4!} + \frac{i\theta^5}{5!} -\frac{\theta^6}{6!} - \cdots \\
e^{i\theta} &= \sum \limits_{n = 0}^{\infty} \frac{(i\theta)^n}{n!} = \left[ 1 - \frac{\theta^2}{2!} + \frac{\theta^4}{4!} - \cdots \right] + \left[ i\theta - \frac{i\theta^3}{3!} + \frac{i\theta^5}{5!} - \cdots\right] \\
e^{i\theta} &= \sum \limits_{n = 0}^{\infty} \frac{(i\theta)^n}{n!} = \sum \limits_{n = 0}^{\infty} \frac{(-1)^n\theta^{2n}}{(2n)!} + i\sum \limits_{n = 0}^{\infty} \frac{(-1)^n\theta^{2n+1}}{(2n+1)!} \\
e^{i \theta} &= \cos \theta + i \sin \theta
\end{align*}
We can also use this formula to come up with useful identities:
For $\cos \theta$:
\begin{align*}
e^{-i \theta} &= \cos (\theta) + i \sin(- \theta) \\
e^{-i \theta} &= \cos \theta - i \sin \theta \quad \mbox{(since cosine is even and sine is odd)} \\
e^{i \theta} + e^{-i \theta} &= (\cos \theta + i \sin \theta) + (\cos \theta - i \sin \theta) \\
e^{i \theta} + e^{-i \theta} &= 2\cos \theta \\
\frac{e^{i \theta} + e^{-i \theta}}{2} &= \cos \theta
\end{align*}
This demonstrates that a cosine wave defined by $\cos \omega_0t = \frac{1}{2}e^{i\omega_0t} + \frac{1}{2}e^{-i\omega_0t}$ has two frequencies, namely $\omega_0$ and $-\omega_0$. The magnitude represents how much each frequency contributes. The same goes for the sine wave:
For $\sin \theta$:
\begin{align*}
e^{i \theta} - e^{-i \theta} &= (\cos \theta + i \sin \theta) - (\cos \theta - i \sin \theta) \\
e^{i \theta} - e^{-i \theta} &= 2i\sin \theta \\
\frac{e^{i \theta} - e^{-i \theta}}{2i} &= \sin \theta
\end{align*}
For example, the A major triad defined by $x(t) = sin(\omega_0t) + sin(\omega_1t) + sin(\omega_2t)$, where
\begin{align*}
\omega_0 &= 2\pi\cdot 440 = \mbox{A} \\
\omega_1 &= 2\pi \cdot 554 = \mbox{C\#} \\
\omega_2 &= 2\pi \cdot 554 \cdot (\sqrt[12]{2})^3 = \mbox{E}
\end{align*}
This implies that there exists 6 frequencies in the A major triad!
\subsubsection{Radial Frequency}
The complex exponential can be written in the form
$$e^{i \omega t},$$
where $\omega$ represents the \emph{radial frequency}, and is defined by
$$\omega = 2\pi f,$$
where $f$ is measured in \emph{cycles per second} (\emph{Hz}), and $2\pi$ is measured in \emph{radians per cycle}. This gives us a measure for $\omega$ of
$$2\pi\frac{\mbox{radians}}{\mbox{cycle}} \cdot f\frac{\mbox{cycles}}{\mbox{second}} = \omega\frac{\mbox{radians}}{\mbox{second}}$$
\subsubsection{Derivatives}
If we view the complex exponential as a particle traveling around the unit circle, it does not have to be traveling around at a constant rate. For instance, we can represent $\theta$ as a function. If $\theta(t) = \omega_0t + \phi_0$, the particle goes around at a constant rate. We call $\theta(t)$ the \emph{instantaneous angle}, and $\omega(t)$ the \emph{instantaneous frequency} at time $t$:
$$ \omega(t) = \dot{\theta}(t) = \frac{d\theta}{dt} $$
The prototype of a \emph{pure frequency} is $e^{i \omega_0t}$, and represents a phasor going around the unit circle at a constant rate $\omega \frac{\mbox{rad}}{\mbox{sec}}$. If $z = e^{i \theta(t)}$ and $\theta(t) = \omega_0t + \phi_0$, then $\phi_0$ represents the \emph{phase shift}, or initial phase offset.
\subsection{Signals to Decompose Signals}
In Continuous Time, we use the \emph{Dirac Delta} function. (Technically not a function, and the height is actually infinite)
%\begin{figure}[htp]
%\centering
%\includegraphics{images/dirac/dirac.ps}
%\caption{ The Dirac Delta }
%\label{ fig:dirac }
%\end{figure}
\begin{center}
\begin{pspicture}(-3,-1)(3,1.2)
% position
\rput(-2.571,-0.4){-3}
% position
\rput(-1.714,-0.4){-2}
% position
\rput(-0.857,-0.4){-1}
% line
\psline{->}(0.000,0)(0.000,1)
% value
\rput(0.35,1){(1)}
% position
\rput(0.000,-0.4){0}
% position
\rput(0.857,-0.4){1}
% position
\rput(1.714,-0.4){2}
% position
\rput(2.571,-0.4){3}
% axes
\psline{->}(-3.428,0)(3.428,0)
\rput(3.428,-0.3){ $t$ }
\end{pspicture}
\end{center}
In Discrete Time, we use the \emph{Kronecker Delta} function.
%\begin{figure}[htp]
%\centering
%\includegraphics{images/kronecker.ps}
%\caption{ The Unit Impulse, a.k.a Kronecker Delta }
%\label{ fig:kronecker }
%\end{figure}
\begin{center}
\begin{pspicture}(-3,-1)(3,1.2)
% line
\psline{-*}(-2.571,0)(-2.571,0)
% position
\rput(-2.571,-0.4){-3}
% line
\psline{-*}(-1.714,0)(-1.714,0)
% position
\rput(-1.714,-0.4){-2}
% line
\psline{-*}(-0.857,0)(-0.857,0)
% position
\rput(-0.857,-0.4){-1}
% line
\psline{-*}(0.000,0)(0.000,1)
% value
\rput(0.25,1){(1)}
% position
\rput(0.000,-0.4){0}
% line
\psline{-*}(0.857,0)(0.857,0)
% position
\rput(0.857,-0.4){1}
% line
\psline{-*}(1.714,0)(1.714,0)
% position
\rput(1.714,-0.4){2}
% line
\psline{-*}(2.571,0)(2.571,0)
% position
\rput(2.571,-0.4){3}
% axes
\psline{->}(-3.428,0)(3.428,0)
\rput(3.428,-0.3){ $n$ }
\rput(2.928,1.857){ $\delta(n)$ }
\end{pspicture}
\end{center}
The Kronecker Delta function is used in the linear combination:
$$ \sum_{m \in M} x(m) \delta(n-m) $$
Another signal that we use to decompose signals is the \emph{Complex Exponential}.
This signal is given by
$$ e^{i \theta t}; t \in \R$$
$$ e^{i \theta n}; n \in \Z$$
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 3}
\subsection{Properties of Systems}
\subsubsection{Linearity}
A system is \emph{linear} if a system defined by $y = F(x)$ such that
$$x \to \fbox{F} \to y $$
satisfies two properties. The first property is \emph{homogeneity}, where $\alpha y = F(\alpha x)$ such that
$$\alpha x \to \fbox{F} \to \alpha y $$
where $\alpha \in \C$. The second property is \emph{additivity}, where $y_1 = F(x_1)$ and $y_2 = F(x_2)$ such that
$$x_1 + x_2 \to \fbox{F} \to y_1 + y_2. $$
As a result, these two properties together form \emph{superposition}, where we have
$$\alpha_1 x_1 + \alpha_2 x_2 \to \fbox{F} \to \alpha_1 y_1 + \alpha_2 y_2 $$
Lets look at some examples:
\begin{problem}
Consider a circuit where the voltage across the circuit is given by Ohm's Law, $V=IR$, where $I$ is the current, and $R$ is the resistance.
%\begin{center}
%\includegraphics{images/circuits/resistor.ps}
%\end{center}
\begin{enumerate}
\item If the resistance is constant, is $y(t)=Rx(t)$ linear?
\item What if $R$ varies with time, $y(t)=R(t)x(t)$?
\end{enumerate}
\end{problem}
\begin{solution}
Let $\hat{x} = \alpha x$., then $\hat{y} = R\hat{x} = \alpha R x = \alpha y$. Let $\tilde{x} = x_1 + x_2$, then $\tilde{y}=R\tilde{x}=Rx_1+Rx_2=y_1+y_2$. Therefore the system is linear. Since we never used the constancy of $R$, we can observe that in both cases we have linearity.
\end{solution}
\begin{problem}
Are the following signals linear?
\begin{enumerate}
\item $y(t) = x^2(t)$
\item $y(t) = \Re(x(t))$
\item $y(t)=\log _i e$
\end{enumerate}
\end{problem}
\begin{solution}
\begin{enumerate}
None of the systems were linear.
\item Not linear. Let $\hat{x} = \alpha x(t)$, then $\hat{y} = \left( \alpha x(t) \right)^2= \alpha^2 x^2(t) \neq \alpha y(t)$
\item Not linear. Let $\hat{x} = \alpha x(t)$, then $\hat{y} = \Re(\alpha x(t))$ where $\alpha \in \C$. When $\alpha = i$, then we have $\hat{y} = \Re(i a(t) - b(t))=-b(t)$, and if $\alpha = 1$, then $\hat{y} = \Re( a(t) +i b(t))=a(t)$. In both cases, $\hat{y} \neq \alpha y(t)$.
\item Not linear, but a fun problem to solve for. Let $\log _i e=y$, then $i^y = e \Rightarrow e^{i \frac{\pi}{2}y} = e \Rightarrow i \frac{\pi}{2}y = 1 \Rightarrow y = \frac{2}{\pi i}$
\end{enumerate}
\end{solution}
\subsubsection{ZIZO}
Consider the system
$$ x \to \fbox{F} \to y \mbox{.}$$
If we multiply the input signal by $0$, we should have a $0$ output:
$$ 0 \cdot x \to \fbox{F} \to 0 \cdot y $$
This property known as \emph{ZIZO}, can be stated as follows:
$$ \mbox{system is linear} \Rightarrow \mbox{zizo}$$
ZIZO simply means \emph{zero input zero output}. Consider the system defined by:
%\begin{center}
%\includegraphics{images/modulators/nonzizo.ps}
%\end{center}
\begin{center}
\begin{pspicture}(-3,-3)(4,1.2)
% in from x
\rput(-3.2,0){$x(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.25]{->}(-2.7,0)(-0.25,0)
% out to y
\rput(3.9,0){$y(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.25]{->}(0.25,0)(1.25,0)
% extended
\psline[linewidth=1.25 pt, arrowscale=1.25]{->}(1.75,0)(3.5,0)
% up arrow
\rput(1.5,-2){$g(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.25]{->}(1.5,-1.65)(1.5,-0.25)
% up arrow
\rput(0,-2){$f(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.25]{->}(0,-1.65)(0,-0.25)
% multiplier
\psline{-}(-0.1315,-0.2125)(0.1315,0.2125)
\psline{-}(-0.1315,0.2125)(0.1315,-0.2125)
\pscircle(0,0){0.25}
% adder
\psline{-}(1.25,0)(1.75,0)
\psline{-}(1.5,0.25)(1.5,-0.25)
\pscircle(1.5,0){0.25}
% box
\pspolygon(-1.25,-2.65)(-1.25,1)(2.65,1)(2.65,-2.65)
\end{pspicture}
\end{center}
$y(t) = x(t)f(t) + g(t)$, since zero input still gives us $g(t)$, the system is not linear, since the contrapositive is also true:
$$ \lnot \mbox{zizo} \Rightarrow \lnot \mbox{system is linear} $$
Although some systems may not be linear, they may be \emph{incrementally linear}, for example, consider the following two signals given by
$y_1(t)=f(t)x_1(t)+g(t)$, and $y_2(t)=f(t)x_2(t)+g(t)$. Then $y_1(t) - y_2(t)=f(t)\left[x_1(t)-x_2(t)\right]$ is incrementally linear, or \emph{affine}, meaning that the difference between the two functions is linear.
\subsubsection{Time Invariance}
Consider the system
$$x \to \fbox{F} \to y $$
where
$$y = F(x) \mbox{, or } y(t) = (F(x))(t)$$
Let $\widehat{x}=x(t-T)$ be a time-shifted version of $x(t)$. Then the system $F$ is \emph{Time Invariant} if $\widehat{y} = y(t-T) \forall x\in X, \forall T \in \R$. There is an assumption that all shifts of $x$ along time are signals that are in $X$. In other words, $X$ is closed under time shifts and so is $Y$.
\begin{problem}
Is the system $y(t) = \cos (\omega_0t)x(t)$ time invariant?
%\begin{center}
%\includegraphics{images/modulators/amcos.ps}
%\end{center}
\begin{center}
\begin{pspicture}(-3,-3)(3,1.2)
% in from x
\rput(-3.2,0){$x(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(-2.7,0)(-0.25,0)
% out to y
\rput(3.2,0){$y(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0.25,0)(2.7,0)
% up arrow
\rput(0,-2){$\cos(\omega_0 t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0,-1.65)(0,-0.25)
% multiplier
\pscircle(0,0){0.25}
\psline(-0.175,0.175)(0.175,-0.175)
\psline(0.175,0.175)(-0.175,-0.175)
% box
\pspolygon(-1.5,-2.65)(-1.5,1)(1.5,1)(1.5,-2.65)
\end{pspicture}
\end{center}
\end{problem}
\begin{solution}
$y(t) = \cos (\omega_0t)x(t)$ is time varying.
Let $\widehat{x}(t) = x(t-T)$. Then $\widehat{y}(t) = \cos( \omega_0 t)\widehat{x}(t) = \cos(\omega_0t)x(t-T) \neq y(t-T)$
\end{solution}
\begin{problem}
Is this system time invariant if the system involving a capacitor, where the voltage $y(t)$ is defined by:
\begin{enumerate}
\item $y(t) = \frac{1}{C}\int^t_{-\infty}x(\tau)d\tau$
\item $y(t) = \frac{1}{C}\int^t_0x(\tau)d\tau$
\end{enumerate}
%\begin{center}
%\includegraphics{images/circuits/capacitor.ps}
%\end{center}
\end{problem}
\begin{solution}
\begin{enumerate}
\item $y(t) = \frac{1}{C}\int^t_{-\infty}x(\tau)d\tau$ is time invariant.
Let $\widehat{x}(t) = x(t-T)$. Then $$\widehat{y}(t) = \frac{1}{C} \int^t_{-\infty}\widehat{x}(\tau)d\tau = \frac{1}{C} \int^t_{-\infty} x(\tau-T) d\tau .$$ Let $\lambda = \tau -T \Rightarrow d\lambda = d\tau$, and the limits of the integral change from $-\infty$ to $t-T$ since $\tau = -\infty \Rightarrow \lambda = -\infty$, and $\tau = t \Rightarrow \lambda = t - T$. So now we have$\frac{1}{C} \int ^{t-T}_{-\infty}x(\lambda)d\lambda = y(t-T)$
\item $y(t) = \frac{1}{C}\int^t_0x(\tau)d\tau$ is time varying.
When we do a variable change, the integral becomes $\frac{1}{C} \int^{t-T}_{-T}x(\lambda)d\lambda \neq y(t-T)$
\end{enumerate}
\end{solution}
\begin{problem}
Given a system involving a resistor with voltage $y(t)$, is the system LTI when the voltage is given by:
\begin{enumerate}
\item $y(t) = Rx(t)$
\item $y(t) = R(t)x(t)$
\end{enumerate}
%\begin{center}
%\includegraphics{images/circuits/resistor.ps}
%\end{center}
\end{problem}
\begin{solution}
\begin{enumerate}
\item $y(t) = Rx(t)$ is time invariant.
Let $\widehat{x}(t) = x(t-T)$. Then $\widehat{y}(t) = R\widehat{x} = R x(t-T) = y(t-T)$
\item $y(t) = R(t)x(t)$ is time varying.
Let $\widehat{x}(t) = x(t-T)$. Then $\widehat{y}(t) = R(t)\widehat{x}(t) = R(t) x(t-T) \neq y(t-T)$
\end{enumerate}
\end{solution}
\begin{problem}
Determine if the following signals are time-invariant.
\begin{enumerate}
\item $y(t) = x^2(t)$
\item $y(n) = \frac{x(n) + x(n-1)}{2}$
\end{enumerate}
\end{enumerate}
\begin{solution}
Here are the solutions:
\begin{enumerate}
\item $y(t) = x^2(t)$ is time invariant.
Let $\widehat{x}(t) = x(t-T)$. Then $$\widehat{y}(t) = \widehat{x}^2(t) = x^2(t-T) = y(t-T).$$ Therefore it is time invariant $\forall T \in \R, \forall x \in X$
\item $y(n) = \frac{x(n) + x(n-1)}{2}$ is time invariant.
Let $\widehat{x}(n) = x(n-N)$. Then $\widehat{y}(n) = \frac{\widehat{x}(n) + \widehat{x}(n-1)}{2} = \frac{x(n-N) + x(n-N-1)}{2} = y(t-T)$
\end{enumerate}
\end{solution}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 4}
\subsection{Linear Time-Invariant Systems}
Let $h(n)$ denote the \emph{impulse response} of systems $H$ to the input signal $x(n)$. For example, let $x(n)=\delta (n) + 2\delta (n-1)$ then $y(n) = h(n) + 2h(n-1)$, such that
$$ x(n) = \delta(n) + 2\delta(n-1) \to \fbox{H} \to h(n) + 2h(n-1) = y(n)$$
This is because the system is defined to be time invariant. An arbitrary signal can then be decomposed into linear combinations of shifted impulses:
$$ x(n) =\cdots + x(-1)\delta(n+1) + x(0)\delta(n) + x(1)\delta(n-1) + \cdots $$
$$ y(n) =\cdots + x(-1)h(n+1) + x(0)h(n) + x(1)h(n-1) + \cdots $$
Since we are talking about DT signals, this brings us to a summation formula. The \emph{convolution sum} is denoted as
$$ y(n) = \sum \limits _{m=-\infty}^\infty x(m)h(n-m) = (x * h)(n)$$
The convolution of the input signal with the impulse response gives us the output of a linear time-invariant system. But what about the convolution of the impulse response with the input signal? Let $k = n - m$, then we write the formula as follows:
$$ y(n) = \sum \limits _{k=\infty}^{-\infty} x(n-k)h(k) = \sum \limits _{k=-\infty}^{\infty} h(k)x(n-k) = (h * x)(n)$$
Notice that the limits did switch, but since it is a summation it does not affect the value. This demonstrates an important property of convolution sums, \emph{commutativity}:
$$ x * h = h * n $$
The roles of input response can be change or reversed:
$$ x \to \fbox{h} \to y $$
$$\mbox{ or }$$
$$ h \to \fbox{x} \to y$$
Both produce the same output signal of the LTI. Note, if a system has an impulse response $h$, this usually implies that it is an LTI system.
\subsection{Convolution}
As shown earlier, the \emph{convolution sum} can be defined as
\begin{nicebox}
\begin{align*}
y(n) &= \sum \limits _{m \in \Z} x(m)h(n-m) = (x * h)(n)
\end{align*}
\end{nicebox}
or using the commutative property of convolution we can write
\begin{nicebox}
\begin{align*}
y(n) &= \sum \limits _{k \in \Z} h(k)x(n-k) = (h * x)(n)
\end{align*}
\end{nicebox}
where $h(n)$ denotes the \emph{impulse response} of systems $H$ to the input signal $x(n)$, defined as
$$x(n) =\cdots + x(-1)\delta(n+1) + x(0)\delta(n) + x(1)\delta(n-1) + \cdots \mbox{,}$$
and the output signal of the LTI system $y(n)$ is defined as
$$ y(n) =\cdots + x(-1)h(n+1) + x(0)h(n) + x(1)h(n-1) + \cdots \mbox{.} $$
Note that $x * \delta = \delta * x = x$. In other words, $\delta(n)$ is the identity element of the convolution sum.
\begin{problem}
Determine the output signal of the following:
%\begin{figure}[htp]
%\centering
%\includegraphics{images/conv50.ps}
%\caption{ $h(n) = \delta(n-50)$, $x(n)=\delta(n) + 2\delta(n-1)$ }
%\label{ fig:conv50 }
%\end{figure}
$h(n) = \delta(n-50)$, $x(n)=\delta(n) + 2\delta(n-1)$
\begin{center}
\begin{pspicture}(-3,-1)(3,4.3)
% line
\psline{-*}(0,0)(0,1)
% value
\rput(0.25,1){(1)}
% position
\rput(0,-0.4){50}
% axes
\psline{->}(-3,0)(3,0)
\rput(3,-0.3){ $n$ }
\rput(2.5,4){ $x(n)$ }
\rput(3,1.7){ $n$ }
\rput(2.5,1.2){ $h(n)$ }
\psline{-*}(0.000,2)(0.000,3)
\psline{-*}(1.000,2)(1.000,4)
% value
\rput(0.25,3){(1)}
\rput(1.25,4){(2)}
% position
\rput(0,1.6){0}
\rput(1,1.6){1}
% axes
\psline{->}(-3,2)(3,2)
\end{pspicture}
\end{center}
$h(n) = \delta(n-50)$, $\quad x(n)=\delta(n) + 2\delta(n-1)$.
\end{problem}
\begin{solution}
There are many ways to look at this, here we go over an algebraic approach:
\item $h(n) = \delta(n-50)$, $\quad x(n)=\delta(n) + 2\delta(n-1)$.
\begin{align*}
y(n) &= \sum \limits _{m \in \Z} x(m)h(n-m) = (x * h)(n) \\
y(n) &= x(0)h(n) + x(1)h(n-1) \\
y(n) &= 1\cdot h(n) + 2\cdot h(n-1) \\
y(n) &= \delta(n-50) + 2\delta(n-51)
\end{align*}
This shows us that $y(n) = x(n-50) = \delta(n-50) + 2\delta(n-51)$. Note that this could have been done with one term if we use the formula $y(n) = (h * x)(n) = \sum_{k\in\Z} h(k)x(n-k)$. In that form we could have done $h(50)x(n-50) = \delta(n-50) + 2\delta(n-51)$. Here the system can be represented as $ x(n) \to \fbox{ $\delta(n-50)$} \to x(n-50) = y(n) $.
%\begin{figure}[htp]
%\centering
%\includegraphics{images/conv50ans.ps}
%\caption{ $y(n) = x(n-50)$ }
%\label{ fig:conv50ans }
%\end{figure}
\begin{center}
\begin{pspicture}(-1,-1)(3,3)
% line
\psline{-*}(0.000,0)(0.000,1)
% value
\rput(0.25,1){(1)}
% position
\rput(0.000,-0.4){50}
% line
\psline{-*}(1.000,0)(1.000,2)
% value
\rput(1.25,2){(2)}
% position
\rput(1.000,-0.4){51}
% axes
\psline{->}(-1.5,0)(3,0)
\rput(2.5,-0.3){ $n$ }
\rput(2,2){ $y(n)$ }
\end{pspicture}
\end{center}
\end{solution}
\begin{problem}
$h(n) = \delta(n) + \delta(n-1)$, $\quad x(n)=\delta(n) + 2\delta(n-1)$.
\end{problem}
\begin{solution}
Here is our system: $$ x(n) \to \fbox{ $\delta(n) + \delta(n-1)$} \to x(n) + x(n-1) = y(n) $$
\begin{align*}
y(n) &= \sum \limits _{m \in \Z} x(m)h(n-m) = (x * h)(n) \\
y(n) &= x(0)h(n) + x(1)h(n-1) \\
y(n) &= 1\cdot h(n) + 2\cdot h(n-1) \\
y(n) &= \left[ \delta(n) + \delta(n-1) \right] + 2 \left[ \delta(n-1) + \delta(n-2) \right] \\
y(n) &= \delta(n) + 3\delta(n-1) + 2\delta(n-2)
\end{align*}
\end{solution}
Here is a cool method for solving a convolution problem in discrete time:
Let the impulse response $h(n) = 5\delta(n) + 3\delta(n-1) +5\delta(n-2) + 2\delta(n-3)$ be represented as a vector,
$
\left[
\begin{array}{c}
5 \\
3 \\
5 \\
2
\end{array}
\right]
$.
Now, let the input signal $x(n) = 3\delta(n) + 2\delta(n-1) + \delta(n-2)$ be represented as a matrix:
$
\left[
\begin{array}{cccc}
3 & 0 & 0 & 0 \\
2 & 3 & 0 & 0 \\
1 & 2 & 3 & 0 \\
0 & 1 & 2 & 3 \\
0 & 0 & 1 & 2 \\
0 & 0 & 0 & 1
\end{array}
\right]
$.
Then multiply the matrices:
$
\left[
\begin{array}{cccc}
3 & 0 & 0 & 0 \\
2 & 3 & 0 & 0 \\
1 & 2 & 3 & 0 \\
0 & 1 & 2 & 3 \\
0 & 0 & 1 & 2 \\
0 & 0 & 0 & 1
\end{array}
\right]
\cdot
\left[
\begin{array}{c}
5 \\
3 \\
5 \\
2
\end{array}
\right]
=
\left[
\begin{array}{c}
15 \\
19 \\
26 \\
19 \\
9 \\
2
\end{array}
\right]
= 15\delta(n) + 19\delta(n-1) + 26\delta(n-2) + 19\delta(n-3) + 9\delta(n-4) + 2\delta(n-5)
$
\subsection{Frequency Response}
When determining $y(n) = (x * h)(n)$, what if we use $x(n) = e^{i\omega n}$?
\begin{align*}
y(n) &= \sum \limits_{k\in\Z}h(k)x(n-k) \\
y(n) &= \sum \limits_{k\in\Z}h(k)e^{i \omega (n-k)} \\
y(n) &= \left( \sum \limits_{k\in\Z}h(k)e^{-i \omega k} \right) e^{i \omega n} \\
y(n) &= H(\omega)e^{i \omega n}
\end{align*}
Since the summation part of the formula has no dependency on $n$, it relies entirely on the frequency $\omega$, thus $H(\omega)$ is the \emph{frequency response}. $H(\omega)$ is generally complex-valued. The magnitude $\left| H(\omega) \right|$ is known as the \emph{magnitude response}, or magnitude of frequency response, and the phase $\angle H(\omega)$ is the \emph{phase response}.
A system where the input signal is a complex exponential can be described as follows:
$$ e^{i \omega n} \to \fbox{H} \to H(\omega) e^{i \omega n}$$
We say that complex exponentials are eigenfunctions of LTI systems($A\vec{v}=\lambda\vec{v}$). Note that $\omega$ plots are continuous, even though the output signal is in discrete time. Generally we plot $\omega$ in $2\pi$ intervals from $-\pi$ to $\pi$.
\begin{problem}
Can the system be LTI?
$$ e^{i\frac{\pi}{4}n} \to \fbox{F} \to e^{i\frac{\pi}{3}n} $$
\end{problem}
\begin{solution}
Not linear time-invariant, since LTI systems cannot create new frequencies. They can, however, eliminate frequencies in the input.
\end{solution}
\subsection{Additional Problems}
\begin{problem}
Determine if the following signals are time-invariant.
\begin{enumerate}
\item $y(t) = \Re(x(t))$
\item $y(t) = \cos ( x(t) )$
\item $y(t) = \sum \limits_{k=0}^{n}x(k)$
\item $y(t) = x(4n+1)$
\item $y(t) = \int^{2t}_{-\infty}x(\tau)d\tau$
\end{enumerate}
\end{problem}
\begin{solution}
Here are the solutions:
\begin{enumerate}
\item $y(t) = \Re(x(t))$ is time invariant.
Let $\hat{x}(t) = x(t-T)$, then $\hat{y}(t) = \Re(\hat{x}(t)) = \Re{x(t-T)} = \frac{x(t-T) + x^*(t-T)}{2} = y(t-T) $
\item $y(t) = \cos ( x(t) )$ is time invariant.
Let $\hat{x}(t) = x(t-T)$, then $\hat{y}(t) = \cos(\hat{x}(t)) = \cos(x(t-T)) = y(t-T)$
\item $y(n) = \sum \limits_{k=0}^{n} x(k)$ is not time invariant.
Let $\hat{x}(n) = x(n-N)$. Then we have:
\begin{align*}
\hat{y}(n) &= \sum \limits_{k=0}^{n} \hat{x}(k) \quad \mbox{note that the sum uses }k \mbox{, } n \mbox{ is in the limit} \\
\hat{y}(n) &= \sum \limits_{k=0}^{n} x(k-N)\\
\hat{y}(n) &= \sum \limits_{\omega=-N}^{n-N} x(\omega) \quad \mbox{change of variable } (\omega = k-N) \\
\hat{y}(n) &\neq y(n-N)
\end{align*}
\item $y(n) = x(4n+1)$ is not time invariant. This one can be slightly tricky:
Let $\hat{x}(n) = x(n-N)$, then $\hat{y}(n) = \hat{x}(4n+1) = x((4n + 1) - N) \neq x(4(n-N) + 1) = y(n-N)$
\item $y(t) = \int^{2t}_{-\infty}x(\tau)d\tau$ is not time invariant.
Let $\hat{x}(t) = x(t-T)$, then
\begin{align*}
\hat{y}(t) &= \int^{2t}_{-\infty}\hat{x}(\tau)d\tau \\
\hat{y}(t) &= \int^{2t}_{-\infty}x(\tau -T)d\tau \\
\hat{y}(t) &= \int^{2t-T}_{-\infty}x(\lambda)d\lambda \quad \mbox{variable change } (\lambda = \tau - T) \\
\hat{y}(t) &\neq \int^{2t-2T}_{-\infty}x(\lambda)d\lambda = y(t-T)
\end{align*}
\end{enumerate}
\end{solution}
\subsection{Filters}
What will a system do in response to a pure frequency? In other words, when we pass a complex exponential through a system, what does its frequency response tell us about that system?
\begin{problem}
Determine the frequency response when the impulse response is defined $h(n) = \delta(n-N)$, $N \in \Z$, where the system takes an arbitrary signal $x(n)$:
$$ x(n) \to \fbox{$h(n)$} \to y(n) $$
\end{problem}
\begin{solution}
\begin{align*}
y(n) &= \sum \limits_{\ell \in \Z} = 1 \cdot x(n-N) \\
H(\omega_0) &= \sum \limits_{\ell \in \Z} h(\ell)e^{-i\omega_0\ell} = 1 \cdot e^{-i\omega_0N}
\end{align*}
If $N=0$, $h(n)=\delta(n)$, nothing happens. $\left| H(\omega_0) \right| = 1$, doesn't alter frequency content. The frequency response passes the signal through with the same amplitude and frequency --- it only alters \emph{phase}.
$\angle H(\omega) = -\omega N$
\end{solution}
\begin{problem}
Given a \emph{two-point moving average filter} $y(n) = \frac{x(n) + x(n-1)}{2}$, determine the impulse response $h(n)$ and frequency response $H(\omega)$. Also sketch a graph of the magnitude response.
\end{problem}
\begin{solution}
To find the impulse response, simply let $x(n) = \delta(n)$ to get $h(n) = \frac{1}{2}\delta(n) + \frac{1}{2}\delta(n-1)$.
What about the frequency response? Well, we know that $H(\omega) = \sum \limits_{\ell \in \Z} h(\ell)e^{-i\omega\ell}$ First thing to think about --- how many non-zero terms?
\begin{align*}
H(\omega) &= \sum \limits_{\ell \in \Z} h(\ell)e^{-i\omega\ell} \\
H(\omega) &= \sum \limits_{\ell = 0}^1 h(\ell)e^{-i\omega\ell} \quad \mbox{only looking at } \ell \in \{0,1\}\\
H(\omega) &= h(0)e^{0} + h(1)e^{-i\omega}\\
H(\omega) &= \frac{1}{2} + \frac{1}{2}e^{-i\omega}\\
H(\omega) &= e^{-i\omega/2} \left[ \frac{1}{2}e^{i\omega/2} + \frac{1}{2}e^{-i\omega/2} \right] \quad \mbox{factor out } e^{-i\omega/2} \\
H(\omega) &= e^{-i\omega/2} \frac{\left[ e^{i\omega/2} + e^{-i\omega/2} \right]}{2} \\
H(\omega) &= e^{-i\omega/2} \cos(\omega/2) \\
H(\omega) &= \cos(\omega/2) e^{-i\omega/2}
\end{align*}
Since $\cos(\omega/2)$ is not always positive, we need to take a slight detour. $H(\omega + 2\pi) = H(\omega)$ in \emph{discrete time}.
\begin{proof}
\begin{align*}
H(\omega + 2\pi) &= \sum \limits_{\ell \in \Z} h(\ell)e^{-i(\omega + 2\pi)\ell} \\
H(\omega + 2\pi) &= \sum \limits_{\ell \in \Z} h(\ell)e^{-i\omega} e^{-i 2\pi\ell} \\
H(\omega + 2\pi) &= \sum \limits_{\ell \in \Z} h(\ell)e^{-i\omega} \quad \mbox{since } \ell \in \Z \to e^{-i 2\pi\ell} = 1 \\
H(\omega + 2\pi) &= H(\omega)
\end{align*}
\end{proof}
So we only plot in some contiguous interval of length $2\pi$. So we have $H(\omega) = \cos(\omega/2)e^{-i\omega/2}$ This gives us $\left| H(\omega) \right| = \left| \cos(\omega/2) \right| = \cos(\omega/2) $ for $\left| \omega \right| \leq \pi$.
%\begin{figure}[htp]
%\centering
%\includegraphics{images/magres.ps}
%\caption{ Magnitude response of a low pass filter }
%\label{ fig:magres }
%\end{figure}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-3.25)(3.5,2)
\psplot[algebraic,linewidth=1.5pt]{-3.14}{3.14}{cos(x/2)}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,0)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\rput(1.25,1.25 ){$\left| H(\omega) \right| =\cos(\omega/2)$}
\end{pspicture}
\end{center}
If we look at the graph of the magnitude response $\left| H(\omega) \right|$, we find that it favors lower frequencies. Therefore, we can determine that this system is a \emph{Low Pass Filter}. This is because it zeros out any frequencies near $\pi$ or $-\pi$, and is roughly 1 for values near 0. In general, values near $0$ represent lower frequencies, values near $\pi$ and $-\pi$ represent higher frequencies, and values near $\pi/2$ and $-\pi/2$ represent the fequencies in the range between. The highest frequency discrete-time signal = $(-1)^n = e^{i\pi n}$
\end{solution}
\begin{problem}
Consider the \emph{High Pass Filter} defined by $y(n) = \frac{x(n)-x(n-1)}{2}$. Determine its impulse response $g(n)$ and frequency response $G(\omega)$. Graph its magnitude response $\left| G(\omega) \right|$. Also find the phase response $\angle G(\omega)$
\end{problem}
\begin{solution}
Plugging in $x(n) = \delta(n)$, we get $g(n) = \frac{1}{2}\delta(n) - \frac{1}{2}\delta(n-1)$. Solving for the frequency response we get:
\begin{align*}
G(\omega) &= \sum \limits_{\ell \in \Z} g(\ell)e^{-i\omega\ell} \\
G(\omega) &= \frac{1}{2} - \frac{1}{2}e^{-i\omega} \\
G(\omega) &= e^{-i\omega/2} \left[ \frac{1}{2}e^{i\omega/2} - \frac{1}{2}e^{-i\omega/2} \right] \\
G(\omega) &= e^{-i\omega/2} i\sin(\omega/2) \\
G(\omega) &= e^{-i\omega/2} e^{i\pi/2}\sin(\omega/2) \\
G(\omega) &= \sin(\omega/2)e^{-i(\pi/2 -\omega/2)}
\end{align*}
We can then come up with the phase response, subtracting $\pi$ from $\omega \in (-\pi,0) $ to make sure we have positive values:
$$\angle G(\omega) = \begin{cases} \frac{\pi}{2}-\frac{\omega}{2} & 0 \leq \omega \lt \pi \\ (\frac{\pi}{2}-\frac{\omega}{2})-\pi & -\pi \lt \omega \lt 0 \\ \end{cases}$$
%\begin{figure}[htp]
%\centering
%\includegraphics{images/magres2.ps}
%\caption{ Magnitude response of a high pass filter }
%\label{ fig:magres2 }
%\end{figure}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-1.25)(3.5,2)
\psplot[algebraic,linewidth=1.5pt]{-3.14}{3.14}{abs(sin(x/2))}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,0)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\rput(1,1.25 ){$\left| G(\omega) \right| =\left| \sin(\omega/2) \right|$}
\end{pspicture}
\end{center}
\end{solution}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 5}
\subsection{LPFs and HPFs}
When we looked at filters, we found that the signal $y(n)=\frac{x(n)+x(n-1)}{2}$ is a low pass filter. This is an example of a \emph{Finite Duration Impulse Response}, otherwise known as a finite impulse response, or FIR. FIR filters have no recursion.
The \emph{Region of Support} is a finite region of support defined as the set of values of $n$ for which $h(n) \neq 0$.
\begin{problem}
Here is an example of recursion. This is a feedback loop represented by the first order difference equation:
$$ y(n) = \alpha y(n-1) + x(n) \mbox{ where } \alpha \in (0,1) \mbox{ and initial conditions } y(-1) = 0$$
Determine the frequency response $H(\omega)$ and the magnitude response $\left| H(\omega) \right|$.
\end{problem}
\begin{solution}
First, lets solve for a few values of $h(n)$:
\begin{align*}
h(n) &= \alpha h(n-1) + \delta(n) \\
h(0) &= \alpha h(-1) + \delta(0) \\
h(0) &= 0 + 1 \\
h(1) &= \alpha h(0) + \delta(1) \\
h(1) &= \alpha + 0 \\
h(2) &= \alpha h(1) + \delta(2) \\
h(2) &= \alpha^2 \\
h(3) &= \alpha^3\\
\end{align*}
This seems to show that $h(n) = \begin{cases} \alpha^n & n \geq 0 \\ 0 & n\lt 0 \end{cases}$
We can simplify this expression by using the \emph{Discrete-Time Unit Step} function, $u(n) \begin{cases} 0 & n\lt 0 \\ 1 & n\geq0 \end{cases}$. Now we can define the impulse response as
$$ h(n) = \alpha^n \cdot u(n) $$
This is an example of an \emph{Infinite-Duration Impulse Response}, an IIR. Lets write $y(n)$ in terms of its input signal $x$ to change the recursive form by convolving $h$ with an arbitrary input signal.
\begin{enumerate}
\item Convolving $h$ with an arbitrary input signal $x(n)$
\end{enumerate}
\begin{align*}
y(n) &= (x*h)(n) \\
y(n) &= \sum \limits_{k=-\infty}^{\infty}h(k)x(n-k) \\
y(n) &= \sum \limits_{k=0}^{\infty}\alpha^k x(n-k) \quad \mbox{ change limits since } h(n) \alpha \cdot u(n)\\
y(n) &= x(n) + \alpha x(n-1) + \cdots
\end{align*}
\begin{enumerate}
\item Finding the frequency response using $H(\omega) = \sum \limits_{n=-\infty}^{\infty} h(n)e^{-i\omega n}$
\end{enumerate}
\begin{align*}
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} h(n) e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} \alpha^n u(n) e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n=0}^{\infty} \alpha^n e^{-i\omega n} \quad \mbox{ change limits since } h(n) \alpha \cdot u(n)\\
H(\omega) &= \sum \limits_{n=0}^{\infty} \left( \alpha e^{-i\omega} \right)^n \quad \mbox{ group as one exponential } \\
H(\omega) &= \frac{1}{1-\alpha e^{-i \omega}} \quad \mbox{ since } \left| \alpha e^{-i \omega} \right| = \left| \alpha \right| \left| e^{-i\omega} \right| = \left| \alpha \right| \lt 1\\
\end{align*}
Since $\left| \alpha e^{-i \omega} \right| \lt 1$, the sum $\sum \limits_{n=-\infty}^{\infty} \left( \alpha e^{-i\omega } \right)^n$ converges to $ \frac{1}{1-\alpha e^{-i \omega}}$.
\begin{enumerate}
\item Find the frequency response using the recursive formula:
\end{enumerate}
Let $x(n) = e^{i\omega n}$ and $y(n) = H(\omega)e^{i\omega n}$, then plug into the recursive formula:
\begin{align*}
y(n) &= \alpha y(n-1) + x(n) \\
H(\omega)e^{i\omega n} &= \alpha H(\omega)e^{i\omega (n-1)} + e^{i\omega n} \\
H(\omega)e^{i\omega n} &= \alpha H(\omega)e^{i\omega n} e^{-i\omega)} + e^{i\omega n} \\
H(\omega) &= \alpha H(\omega) e^{-i\omega)} + 1 \quad \mbox{ divide by } )e^{i\omega n} \\
H(\omega) - \alpha H(\omega) e^{-i\omega)} &= 1 \\
H(\omega) \left(1 - \alpha e^{-i\omega)} \right) &= 1 \\
H(\omega) &= \frac{1}{1 - \alpha e^{-i\omega}} \\
\end{align*}
We implicity used the fact that $\left| \alpha \right| \lt 1$ by the fact that $H(\omega) = \sum \limits_{n\in\Z} h(n)e^{-i\omega n}$ would be divergent otherwise.
Given that $H(\omega) = \frac{1}{1 - \alpha e^{-i\omega}}$, we can find the magnitude response $\left| H(\omega) \right|$.
\begin{align*}
H(\omega) &= \frac{1}{1 - \alpha e^{-i\omega}} \\
H(\omega) &= \frac{e^{i\omega}}{e^{i\omega} - \alpha } \quad \mbox{multiply by } e^{i\omega} \\
\left| H(\omega) \right| &= \left| \frac{e^{i\omega}}{e^{i\omega} - \alpha } \right| \\
\left| H(\omega) \right| &= \frac{\left| e^{i\omega} \right|}{\left| e^{i\omega} - \alpha \right| } \\
\left| H(\omega) \right| &= \frac{1}{\left| e^{i\omega} - \alpha \right| } \\
\end{align*}
%\begin{center}
%\includegraphics{images/filters/eminusalpha.ps}
%\end{center}
\begin{center}
\begin{pspicture}(-4,-5)(4,4)
% y-axis
\rput(0.3,3.75){ $Im$ }
\psline{->}(0,-3.75)(0,3.75)
% x-axis
\rput(3.75,0.3){ $Re$ }
\psline{->}(-3.75,0)(3.75,0)
% the circle
\pscircle(0,0){ 3 }
% quadrants
\rput(0.3,3.3){ $i^1$ }
\rput(0.3,-3.3){ $i^2$ }
\rput(-3.3,0.3){ $i^3$ }
\rput(3.3,0.3){ $i^0$ }
% new vector
\rput(1.75,2){$e^{i\omega}$}
\psline[linewidth=1.25 pt]{->}(2.121,2.121)
% new vector
\rput(0.725,-0.2){$\alpha$}
\psline[linewidth=1.25 pt]{->}(1.500,0.000)
% new vector
\rput(2.3,1){$e^{i\omega}-\alpha$}
\psline[linewidth=1.5 pt]{->}(1.500,0.000)(2.121,2.121)
\rput(-0.75,-4.25){$1+\alpha$}
\rput(2.25,-4.25){$1-\alpha$}
\psline{<->}(-3,-4)(1.5,-4)
\psline{<->}(1.5,-4)(3,-4)
\psline[linestyle=dashed](3,-4.5)(3,0)
\psline[linestyle=dashed](-3,-4.5)(-3,0)
\psline[linestyle=dashed](1.5,-4.5)(1.5,0)
\end{pspicture}
\end{center}
By inspection, we can find that this represents a low pass filter. When $\omega = 0$, we have $\left| e^{i\omega} - \alpha \right| = \left| 1 - \alpha \right|$, and when $\omega = \pi$ or $\omega = -\pi$, then $\left| e^{i\omega} - \alpha \right| = \left| 1 + \alpha \right|$. By looking at this on a unit circle. We have the maximum length when $\omega = \pm \pi$, so the magnitude response is least since the magnitude of this vector is in the denominator. We have the maximum of the magnitude response when the denominator is the least, when $\omega = 0$. This is why the filter is a low pass filter. We can also take note of the curvature by acknowledging that when $\omega$ is near $\pm \pi$, the magnitude of $e^{i\omega}-\alpha$ changes much less than when near 0.
%\begin{center}
%\includegraphics{images/filters/lpf.ps}
%\end{center}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-1)(3.75,3.5)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(x)+1.3}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
\end{solution}
Design-oriented analysis with an eye toward design allows you to generalize for similar systems. Notice that in the above example, if we wanted to make a high-pass filter, all we would need to do is make $\alpha \in (-1,0)$. Looking at the last figure, we can see that the vector without the negative gets the arrow. Thus, we would have a vector pointing to $e^{i\omega}$ from $\alpha$ on the left side of the unit circle. The magnitude response would still have a value of $\frac{1}{1-\alpha}$ at $\omega = 0$, and $\frac{1}{1+\alpha}$ at $\omega = \pm \pi$. The only difference is that the values at $\omega = \pm \pi$ are the maxima.
To make a LPF or HPF sharper, simply let $\alpha$ approach 1, but not equal 1. This is because we need a safety margin since numerical approximations and noise could send a vector outside of the unit circle and we would have an unstable system. ($\sum \alpha^n$ diverges)
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 6}
\subsection{Discussion}
\subsubsection{Comb Filter}
If we look at the low-pass filter $H(\omega) = \frac{1}{1-\alpha e^{-i\omega}}$, we find that we have a filter whose magnitude response looks like
%\begin{center}
%\includegraphics{images/filters/lpf.ps}
%\end{center}
\psset{unit=1cm, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-1.25)(3.5,3)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(x)+1.3}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
Lets define a new frequency response to be $H_N(\omega) = \frac{1}{1-\alpha e^{-i\omega N}}$. Notice that this corresponds to the difference equation $y(n) - \alpha y(n-N) = x(n)$:
\begin{align*}
H(\omega) &= \frac{1}{1-\alpha e^{-i\omega N}} \\
H(\omega) - \alpha H(\omega) e^{-i\omega N} &= 1 \\
H(\omega) e^{i\omega n} - \alpha H(\omega)e^{i\omega n} e^{-i\omega N} &= e^{i\omega n} \\
y(n) - \alpha y(n-N) &= x(n) \\
\end{align*}
To graph the magnitude response for a particular value of $N$, we can analyze the vectors representing the numerator and denominator. But, we can also perform a simple trick, since we know the graph of $H_1(\omega)$:
%\begin{center}
%\includegraphics{images/comb/comb1.ps}
%\end{center}
\psset{unit=1cm, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-1.25)(3.5,3)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(x)+1.3}
\psaxes[showorigin=false,labels=none, Dx=1.62](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(0,3){$N=1$}
\rput(3.14, -0.35){$\pi$}
\rput(1.62, -0.35){$\pi/2$}
\rput(-1.62, -0.35){$-\pi/2$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
Now let $H_2(\omega) = H_1(2\omega)$, then we need to divide by 2 on our axes:
%\begin{center}
%\includegraphics{images/comb/comb2.ps}
%\end{center}
\psset{unit=1cm, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-1.25)(3.5,3)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(4*x/2)+1.3}
\psaxes[showorigin=false,labels=none, Dx=1.62](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(0,3){$N=2$}
\rput(3.14, -0.35){$\pi$}
\rput(1.62, -0.35){$\pi/2$}
\rput(-1.62, -0.35){$-\pi/2$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
If you notice there will always be $N$ peaks and $N$ valleys. In general, we can use the first graph to find the graph of the magnitude response for all value of $N$. The higher the $N$, the more oscillations.
%\begin{center}
%\includegraphics{images/comb/comb4.ps}
%\end{center}
\psset{unit=1cm, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-1.25)(3.5,3)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(8*x/2)+1.3}
\psaxes[showorigin=false,labels=none, Dx=1.62](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(0,3){$N=4$}
\rput(3.14, -0.35){$\pi$}
\rput(1.62, -0.35){$\pi/2$}
\rput(-1.62, -0.35){$-\pi/2$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3.5,-1)(3.75,3.5)
\slider{1}{8}{n}{$2N$}{4}
%\rput(0,3){$N=4$}
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-3.14}{3.14}{cos(n*x/2)+1.3}
\psaxes[showorigin=false,labels=none, Dx=1.62](0,0)(-3.25,0)(3.25,2.5)
\psline[linestyle=dashed](-3.14,0.3)(3.14,0.3)
\psline[linestyle=dashed](-3.14,2.3)(3.14,2.3)
\rput(3.6,2.3){$\frac{1}{1-\alpha}$}
\rput(3.6,0.3){$\frac{1}{1+\alpha}$}
\rput(3.14, -0.35){$\pi$}
\rput(1.62, -0.35){$\pi/2$}
\rput(-1.62, -0.35){$-\pi/2$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
\subsubsection{Notch Filter}
\begin{problem}
Graph the magnitude response and determine the first difference equation for the output signal $y(n)$ if the frequency response is given by:
$$
H(\omega) = \frac{ \left( e^{i\omega} - e^{i\frac{\pi}{4}}\right) \left(e^{i\omega} - e^{-i\frac{\pi}{4}}\right)}{ \left( e^{i\omega} - \alpha e^{i\frac{\pi}{4}}\right) \left(e^{i\omega} - \alpha e^{-i\frac{\pi}{4}}\right) }
$$
\end{problem}
\begin{solution}
We can break this problem down to its four component vectors and find that it is a \emph{notch filter} that zeros out frequencies at $\pm \pi/4$ and is approximately 1 everywhere else.
To determine the difference equation, lets simplify the expression:
\begin{align*}
H(\omega) &= \frac{ \left( e^{i\omega} - e^{i\frac{\pi}{4}}\right) \left(e^{i\omega} - e^{-i\frac{\pi}{4}}\right)}{ \left( e^{i\omega} - \alpha e^{i\frac{\pi}{4}}\right) \left(e^{i\omega} - \alpha e^{-i\frac{\pi}{4}}\right) } \\
H(\omega) &= \frac {e^{i2\omega} - e^{i\omega}e^{-i\pi/4}-e^{i\omega}e^{i\pi/4}+1}{e^{i2\omega} - \alpha e^{i\omega}e^{-i\pi/4}-\alpha e^{i\omega}e^{i\pi/4}+\alpha^2} \\
H(\omega) &= \frac {e^{i2\omega} - e^{i\omega} \left( e^{-i\pi/4}+e^{i\pi/4} \right) +1}{e^{i2\omega} - \alpha e^{i\omega} \left( e^{-i\pi/4}+e^{i\pi/4}\right)+\alpha^2} \\
H(\omega) &= \frac {e^{i2\omega} - e^{i\omega} \left( 2\cos(\frac{\pi}{4}) \right) +1}{e^{i2\omega} - \alpha e^{i\omega} \left( 2\cos(\frac{\pi}{4})\right)+\alpha^2} \\
H(\omega) &= \frac {e^{i2\omega} - e^{i\omega} \left( \sqrt{2} \right) +1}{e^{i2\omega} - \alpha e^{i\omega} \left( \sqrt{2}\right)+\alpha^2} \\
H(\omega) &= \frac {1 - \sqrt{2}e^{-i\omega} + e^{-i2\omega}}{1 - \alpha \sqrt{2}e^{-i\omega}+\alpha^2e^{-i2\omega}} \mbox{ multiply by } e^{-2\omega} \mbox{ on top and bottom}\\
\end{align*}
Now we have a much simpler form:
$$H(\omega) = \frac {1 - \sqrt{2}e^{-i\omega} + e^{-i2\omega}}{1 - \alpha \sqrt{2}e^{-i\omega}+\alpha^2e^{-i2\omega}} $$
Lets cross multiply (imagine having a 1 beneath $H(\omega)$:
\begin{align*}
H(\omega) - \alpha\sqrt{2}H(\omega)e^{-i\omega} + \alpha^2H(\omega)e^{-i2\omega} &= 1 - \sqrt{2}e^{-i\omega} + e^{-i2\omega} \\
H(\omega)e^{i\omega n} - \alpha\sqrt{2}H(\omega)e^{i\omega(n-1)} + \alpha^2H(\omega)e^{i\omega(n-2)} &= e^{i\omega n} - \sqrt{2}e^{i\omega(n-1)} + e^{i\omega(n-2)} \\
y(n) - \alpha\sqrt{2}y(n-1) + \alpha^2y(n-2) &= x(n) - \sqrt{2}x(n-1) + x(n-2)
\end{align*}
\end{solution}
\subsection{Difference Equations}
Looking at the difference equation:
$$y(n)=\alpha y(n-1)+x(n)$$
First note that we need zero initial conditions for this to be LTI. This is because if we had an initial condition like $y(-1)=k$ for some $k \in \R$. Let $\hat{x}(n) = x(n-N)$. Then if $\hat{y}$ is the response to $\hat{x}$, then both $\hat{x}$ and $\hat{y}$ must satisfy the difference equation $\hat{y}(n) = \alpha \hat{y}(n-1) + \hat{x}(n)$ and the condition that $\hat{y}(-1)=k$. If $\hat{y}=y(n-N)$, then $\hat{y}(-1)=y(-1-N)=k$, but there is no such condition. The system can not be LTI.
The following three equations are interrelated closely. We should be able to traverse between them:
$$
h(n)=\alpha^n u(n) \iff H(\omega) = \frac{1}{1-\alpha e^{-i\omega}} \iff y(n)=\alpha y(n-1)+x(n)
$$
\subsection{Dirac Delta}
The continuous-time impulse, Dirac Delta:
%\begin{figure}[htp]
%\centering
%\includegraphics{images/dirac/dirac.ps}
%\caption{ The Dirac Delta }
%\label{ fig:dirac }
%\end{figure}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-3,-1)(3,1.4)
% position
\rput(-2.571,-0.4){-3}
% position
\rput(-1.714,-0.4){-2}
% position
\rput(-0.857,-0.4){-1}
% line
\psline{->}(0.000,0)(0.000,1)
% value
\rput(0.35,1){(1)}
% position
\rput(0.000,-0.4){0}
% position
\rput(0.857,-0.4){1}
% position
\rput(1.714,-0.4){2}
% position
\rput(2.571,-0.4){3}
% axes
\psline{->}(-3.428,0)(3.428,0)
\rput(3.428,-0.3){ $t$ }
\end{pspicture}
\end{center}
The (1) in the figure represents the area under the impulse, or the \emph{strength} of the impulse. It is not the height as in discrete-time, as the hieght of the Dirac Delta is infinite.
A definition, but not very useful one, is
$$ \delta(t) = \begin{cases} \infty & t=0 \\ 0 & \mbox{otherwise} \end{cases} $$
Lets look at some others that may prove to be more useful. Consider the unit area, where its minimum $t$ value is $-\Delta/2$, maximum $t$ value is $\Delta/2$, and a height of $1/\Delta$. This can be defined more concretely as
$$
\delta_\Delta(t)=
\begin{cases}
0 & t<-\frac{\Delta}{2} \\
\frac{1}{\Delta} & -\frac{\Delta}{2}\leq t \leq\frac{\Delta}{2} \\
0 & t\gt\frac{\Delta}{2} \\
\end{cases}
$$
%\begin{figure}[htp]
%\centering
%\includegraphics{images/dirac/dirac-unitarea.ps}
%\caption{ The Unit Area }
%\label{ fig:dirac-unitarea }
%\end{figure}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
%\psaxes[labels=none, Dx=1]{->}(0,0)(-3,0)(3,0)
%\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none,algebraic]{
\psplot{-1}{1}{ 1 }
\psline(1,0)(-1,0)
%}
\psplot[algebraic,linewidth=2pt,algebraic]{-1}{1}{1}
\psline[linewidth=2pt](1,0)(1,1)
\psline[linewidth=2pt](-1,0)(-1,1)
\rput(-1.175,-0.35){$-\frac{\Delta}{2}$}
\rput(0,-0.35){0}
\rput(1,-0.35){$\frac{\Delta}{2}$}
\rput(3.25,-0.25){$t$}
\psline{<->}(1.5,0)(1.5,1)
\rput(1.725,0.5){$\frac{1}{\Delta}$}
\end{pspicture}
\end{center}
Then $\delta(t) = \lim_{\Delta \to 0}\delta_\Delta(t)$. The unit area goes to an infinite height, zero width, but its area does not change.
Lets consider a triangle whose base spans $t$ from $-\Delta$ to $\Delta$, and whose height is $1/\Delta$. This can be defined as
$$
\delta_\Delta(t)=
\begin{cases}
0 & t\lt -\frac{\Delta}{2} \\
\frac{1}{\Delta^2}t+\frac{1}{\Delta} & -\frac{\Delta}{2}\leq t \lt 0 \\
-\frac{1}{\Delta^2}t+\frac{1}{\Delta} & 0\leq t \leq\frac{\Delta}{2} \\
1 & t\gt\frac{\Delta}{2} \\
\end{cases}
$$
%\begin{figure}[htp]
%\centering
%\includegraphics{images/dirac/dirac-triangle.ps}
%\caption{ A triangle with an area of 1 }
%\label{ fig:dirac-triangle }
%\end{figure}
{\bf A triangle with an area of 1}
\begin{center}
\begin{pspicture}(-3,-1)(3,1)
%\psaxes[labels=none,Dx=2]{->}(0,0)(-3,0)(3,0)
%\pscustom[fillstyle=solid,fillcolor=blue,linestyle=none,algebraic]{
\psplot{-2}{0}{ 1/4*x+1/2}
\psline(0,0)(-2,0)
\psplot{0}{2}{ -1/4*x+1/2}
\psline(2,0)(0,0) % flipped for some reason
%}
\psplot[algebraic,linewidth=2pt,algebraic]{-2}{0}{1/4*x + 1/2 }
\psplot[algebraic,linewidth=2pt,algebraic]{0}{2}{-1/4*x + 1/2 }
\rput(-2,-0.25){$-\Delta$}
\rput(0,-0.25){0}
\rput(2,-0.25){$\Delta$}
\rput(3.25,-0.25){$t$}
\psline{<->}(2.35,0)(2.35,0.5)
\rput(2.65,0.25){$\frac{1}{\Delta}$}
\end{pspicture}
\end{center}
Again, $\delta(t) = \lim_{\Delta \to 0}\delta_\Delta(t)$. Lets consider the gaussian probability density function, where $\delta_\Delta(t) = \frac{1}{\sqrt{2\pi\Delta^2}}e^{-t^2/2\Delta^2}$.
%\begin{figure}[htp]
%\centering
%\includegraphics{images/dirac/dirac-gaussian.ps}
%\caption{ Gaussian Probability Density Function }
%\label{ fig:dirac-gaussian }
%\end{figure}
The standard deviation is $\Delta$, the maximum height at 0 is $\frac{1}{\sqrt{2\pi\Delta^2}}$. In this case $\int_{-\infty}^{\infty}\delta_\Delta(t)dt=1$, and again, $\delta(t) = \lim_{\Delta \to 0}\delta_\Delta(t)$.
The truth is, we don't care about what the delta \emph{is}, we care about what it does to other functions when it comes into contact with them.
Take an arbitrary function $f(t)$ and multiply it with the unit area definition $\delta_\Delta(t)$. We would be left with the area underneath $f(t)$ where it comes into contact in the section of the domain in $\pm\Delta/2$. The height is then $f(t)/\Delta$. If $\Delta$ is sufficiently small, then we can represent the height as $f(0)/\Delta$ from $-\Delta/2$ to $\Delta/2$. Thus, the area is equal to $f(0)$.
$$ \int_{-\infty}^{\infty}f(t)\delta_\Delta(t)dt = f(0)$$
Lets take the limit of this integral as $\Delta$ approaches 0:
\begin{align*}
\int_{-\infty}^{\infty}f(t)\delta_\Delta(t)dt &= f(0) \\
\lim_{\Delta \to 0}\int_{-\infty}^{\infty}f(t)\delta_\Delta(t)dt &= f(0) \\
\int_{-\infty}^{\infty}f(t)\lim_{\Delta \to 0}\delta_\Delta(t)dt &= f(0) \\
\int_{-\infty}^{\infty}f(t)\lim_{\Delta \to 0}\delta_\Delta(t)dt &= f(0) \\
\int_{-\infty}^{\infty}f(t)\delta(t)dt &= f(0)
\end{align*}
The value of the function is picked up wherever the impulse is active. This is a sampling property of the Dirac Delta. We can generalize this formula slightly:
$$ \int_{-\infty}^{\infty}f(t)\delta(t-t_0)dt=f(t_0) $$
The new resulting strength on a graph would be ($f(t_0)$). This is called the \emph{sifting property} of the Dirac Delta.
\begin{problem}
What are the values of these integrals?
\begin{enumerate}
\item $\int_{-\infty}^{\infty}\cos(\omega_0t)\delta(t-T)dT$
\item $\int_{-\infty}^{\infty}\delta(t)e^{i\omega_0 t}dt$
\end{enumerate}
\end{problem}
\begin{solution}
These can be fairly simple to integrate:
\begin{enumerate}
\item $\int_{-\infty}^{\infty}\cos(\omega_0t)\delta(t-T)dT = \cos(\omega_0T)$
\item $\int_{-\infty}^{\infty}\delta(t)e^{i\omega_0 t}dt = 1$
\end{enumerate}
\end{solution}
$$ x(t) = \int_{-\infty}^{\infty}x(\tau)\delta(t-\tau)d\tau $$
$$ \delta(t) \to \fbox{S} \to h(t) $$
This gives us the continuous-time convolution integral:
$$ y(t) = \int_{-\infty}^{\infty}x(\tau)h(t-\tau)d\tau $$
\subsection{Dirac Delta and Unit Step}
Looking at the Dirac Delta, we can see some interesting properties:
\begin{align*}
\int_{-\infty}^{\infty}\delta(t-\tau)f(t)dt &= f(\tau) \\
\int_{\tau^-}^{\tau^+}\delta(t-\tau)f(t)dt &= f(\tau) \\
\int_{\tau-\epsilon}^{\tau+\epsilon}\delta(t-\tau)f(t)dt &= f(\tau) \\
\int_{\tau-\epsilon}^{\tau+\epsilon}\delta(t-\tau)dt &= 1 \\
\end{align*}
What happens if we write an integral like this?
$$\int_{-\infty}^{t}\delta(\tau)d\tau$$
In turns out that this is a very important element in characterizing continuous-time LTI systems, known as the \emph{Continuous-Time Unit Step}, which can be defined as
$$ u(t) = \int_{-\infty}^{t}\delta(\tau)d\tau = \begin{cases} 0 & t<0 \\ 1 & t \geq 0 \\ \end{cases}$$
Note, however, that there is a discontinuity at $t=0$, but by convention, we shall include it for $t \geq 0$. The height of the discontinuity happens to be the coefficient in front of $\delta(t)$. We just showed how the CT unit step can be defined in terms of the integral of the Dirac Delta. Notice that then the derivative the the unit step will spawn a DIrac Delta, thus, we can define the Dirac Delta as:
$$ \delta(t) = \dot{u}(t) = \frac{d}{dt}u(t) = \frac{d}{dt}\int_{-\infty}^{t}\delta(\tau)d\tau = \delta(t) $$
If we define $u_\Delta(t)$ as
$$
u_\Delta(t)=
\begin{cases}
0 & t<-\frac{\Delta}{2} \\
\frac{1}{\Delta}t+\frac{1}{2} & -\frac{\Delta}{2}\leq t \leq\frac{\Delta}{2} \\
1 & t>\frac{\Delta}{2} \\
\end{cases}
$$
% \begin{figure}[htp]
% \centering
% \includegraphics{images/dirac/delta-unit-step.ps}
% \caption{ The unit step with the discontinuity from $-\frac{\Delta}{2}$ to $\frac{\Delta}{2}$ }
% \label{ fig:delta-unit-step }
% \end{figure}
\begin{center}
\begin{pspicture}(-3.2,-3.2)(6,6)
%\psaxes[labels=none,Dx=1]{->}(-3,0)(3.25,0)
\pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none,algebraic]{
\psplot{-1}{1}{ 1/2*x+1/2}
\psline(1,0)(-1,0)
\psplot{1}{3}{ 1 }
\psline(3,0)(1,0) % flipped for some reason
}
\psplot[algebraic,linewidth=2pt,algebraic]{-1}{1}{1/2*x + 1/2 }
\psplot[algebraic,linewidth=2pt,algebraic]{1}{3}{1}
\rput(-1.25,-0.25){$-\frac{\Delta}{2}$}
\rput(0,-0.25){0}
\rput(1,-0.25){$\frac{\Delta}{2}$}
\rput(3.25,-0.25){$t$}
\psline[linestyle=dashed](1,0)(1,1)
\psline{<->}(2.35,0)(2.35,1)
\rput(2.65,0.5){$1$}
\end{pspicture}
\end{center}
then we can take the derivative and we find that $\dot{u}_\Delta(t) = \delta_\Delta(t)$, where $\delta_\Delta(t)$ represents the unit area given by
$$
\dot{u}_\Delta(t) = \delta_\Delta(t)=
\begin{cases}
0 & t<-\frac{\Delta}{2} \\
\frac{1}{\Delta} & -\frac{\Delta}{2}\leq t \leq\frac{\Delta}{2} \\
0 & t>\frac{\Delta}{2} \\
\end{cases}
$$
% \begin{figure}[htp]
% \centering
% \includegraphics{images/dirac/dirac-unitarea.ps}
% \caption{ The unit area }
% \label{ fig:dirac-unit }
% \end{figure}
\begin{center}
\begin{pspicture}(-3.2,-3.2)(6,6)
%\psaxes[labels=none, Dx=1]{->}(-3,0)(3,0)
% \pscustom[fillstyle=solid,fillcolor=blue!40,linestyle=none,algebraic]{
\psplot{-1}{1}{ 1 }
\psline(1,0)(-1,0)
% }
\psplot[algebraic,linewidth=2pt,algebraic]{-1}{1}{1}
\psline[linewidth=2pt](1,0)(1,1)
\psline[linewidth=2pt](-1,0)(-1,1)
\rput(-1.175,-0.35){$-\frac{\Delta}{2}$}
\rput(0,-0.35){0}
\rput(1,-0.35){$\frac{\Delta}{2}$}
\rput(3.25,-0.25){$t$}
\psline{<->}(1.5,0)(1.5,1)
\rput(1.725,0.5){$\frac{1}{\Delta}$}
\end{pspicture}
\end{center}
If we take the limit,
$$ \lim_{\Delta \to 0} \dot{u}_\Delta(t) = \delta(t) = \dot{u}(t) $$
we can clearly see that the derivative relates the CT unit step and the Dirac Delta. What about the discrete-time counter parts? We can write the Kronecker Delta as $\delta(n) = u(n) - u(n-1)$, the difference of a unit step and a shifted version of a unit step. We can write the unit step in discrete-time in two ways:
\begin{align*}
u(n) &= \sum \limits_{k=0}^{\infty}\delta(n-k) \\
u(n) &= \sum \limits_{\ell=-\infty}^{n}\delta(\ell) \quad \mbox{substitute } \ell = n-k
\end{align*}
We can verify that this works because when $n<0$ we get $u(n)=0$, and for $n \geq 0$, we have $u(n)=1$.
\subsection{LTI Systems and their impulse responses}
Lets consider a continuous-time LTI system where $x(t) =\delta(t)$, then we have
\begin{align*}
x \to &\fbox{H} \to y \\
\delta(t) \to &\fbox{H} \to h(t) \\
\end{align*}
where $h(t)$ represents our impulse response. Note that if we mention ``impulse response'', then it is implied that a given system is LTI. We can demonstrate this property here:
$$ \alpha\delta(t-T) \to \fbox{H} \to \alpha h(t-T) $$
We know from the sifting property, we can write any signal as the integral of the product of itself and the Dirac Delta:
$$ x(t) = \int_{-\infty}^{\infty}x(\tau)\delta(t-\tau)d\tau$$
This looks very similar to the DT counterpart, where we write a signal as a linear combination of impulses:
\begin{align*}
x(n) &= \sum \limits_{m=-\infty}^{\infty}x(m)\delta(n-m) \\
y(n) &= \sum \limits_{m=-\infty}^{\infty}x(m)h(n-m) = (x*h)(n)\\
\end{align*}
Just as we wrote signals in DT as sums of weighted delta functions, in CT we can write the convolution integral:
\begin{nicebox}
$$y(t) = \int_{-\infty}^{\infty}x(\tau)h(t-\tau)d\tau = (x*h)(t) $$
\end{nicebox}
The convolution operator still has its commutativity property, which we can demonstrate by a substitution:
\begin{align*}
y(t) &= \int_{-\infty}^{\infty}x(\tau)h(t-\tau)d\tau = (x*h)(t)\\
y(t) &= \int_{\infty}^{-\infty}x(t-\mu)h(\mu)\left(-d\mu\right) \quad \mbox{let } \mu=t-\tau, \mbox{limits have switched}\\
y(t) &= \int_{-\infty}^{\infty}h(\mu)x(t-\mu)d\mu \quad \mbox{absorb negative by switching limits again }\\
y(t) &= \int_{-\infty}^{\infty}h(\mu)x(t-\mu)d\mu = (h*x)(t)\\
\end{align*}
\subsection{Strongly-Peaked Functions}
There exists a set of functions that physicists refer to as \emph{Strongly-Peaked Functions}, which are narrow and very tall. Consider Newton's Second Law, $F=Ma$, he actually wrote it like this: $F=\frac{d}{dt}(Mv)$, where $P=Mv$ is the momentum, which implies that $F = \frac{dP}{dt}$. Then the change in momentum can be written as
$$ \int_{-\epsilon}^{\epsilon}F(\tau)d\tau = P(\epsilon) - P(-\epsilon) $$
Consider a Pinto and a Mazeradi. If we look at the graph of their forces over time, we would see that there is far more area under the curve representing the Mazeradi over the same distance. From a book "Intro to Mechanics" written by Kleppner and Kolenkow in 1973, gives an example with a rubber ball: Consider a rubber ball whose mass is $0.2kg$, velocity at impact is $8\frac{m}{s}$, and duration of contact is $2\epsilon=10^{-3}s$. We find that $P(-\epsilon)=-1.6kg\frac{m}{s}$, $P(\epsilon)=1.6kg\frac{m}{s}$, and $ P(\epsilon) - P(-\epsilon) = 3.2kg\frac{m}{s}$. This tells us that
$$ 3.2 = \int_{-\epsilon}^{\epsilon}F(\tau)d\tau \mbox{,}$$
and the force graph represents a strongly-peaked function. Take the average height, and call this $F_{\mbox{avg}}$. Then
\begin{align*}
F_{\mbox{avg}} \Delta t &= 3.2 \\
F_{\mbox{avg}} &= 3.2/\Delta t = 3200 N = 720lbs
\end{align*}
In other words,
$$\lim_{\Delta \to 0} F = \infty$$
\subsection{CT Frequency Response}
Lets pass a pure frequency in continuous-time through the convolution integral:
\begin{align*}
y(t) &= \int_{-\infty}^{\infty}h(\tau)x(t-\tau)d\tau \\
y(t) &= \int_{-\infty}^{\infty}h(\tau)e^{i \omega (t-\tau)}d\tau \\
y(t) &= \int_{-\infty}^{\infty}h(\tau)e^{i \omega t}e^{-i \omega \tau}d\tau \\
y(t) &= \left( \int_{-\infty}^{\infty}h(\tau)e^{-i \omega \tau}d\tau\right)e^{i \omega t} \\
y(t) &= H(\omega)e^{i \omega t} \\
\end{align*}
In CT (and DT), an LTI system cannot create new frequencies. The golden rule still holds:
\begin{align*}
x & \to \fbox{h} \to y \\
e^{i\omega t} & \to \fbox{h} \to H(\omega)e^{i\omega t}
\end{align*}
Thus, \emph{the continuous-time frequency response} is defined as $$H(\omega) = \int_{-\infty}^{\infty}h(\tau)e^{-i\omega \tau}d\tau$$
Some important distinctions between the DT and CT frequency response:
\begin{enumerate}
\item In discrete-time, we have the $2\pi$ ambiguity:
$$H(\omega + 2\pi)=\sum \limits_{n\in Z} h(n) e^{-i\omega n} e^{-i2\pi n} = \sum \limits_{n\in Z} h(n) e^{-i\omega n} = H(\omega)$$
because $n$ is an element of $\Z$.
\item In continuous-time, we have a different story:
$$ H(\omega +2\pi) = \int_{-\infty}^{\infty}h(t)e^{-i\omega t}e^{-i2\pi t}dt \neq H(\omega)$$
because $t$ is an element of $\R$.
\end{enumerate}
\begin{example}
Given an impulse response $h(t) = e^{-\alpha t} u(t)$, where $\alpha \gt 0$, graph the magnitude response and solve for the frequency response.
\begin{align*}
h(t) &= e^{-\alpha t} u(t)\mbox{} \quad \alpha>0 \\
H(\omega) &= \int_{-\infty}^{\infty} h(t)x(t)dt \\
H(\omega) &= \int_{-\infty}^{\infty} \left( e^{-\alpha t} u(t) \right)e^{i \omega t}dt \\
H(\omega) &= \int_{0}^{\infty} e^{-\left(i \omega + \alpha\right)t}dt \\
H(\omega) &= \left. \frac{e^{-\left(i \omega + \alpha\right)t}}{-\left(i \omega + \alpha\right)} \right|_{0}^{\infty} \\
\end{align*}
Since $\alpha \gt 0$, as $t$ approaches $\infty$, $e^{-\left( i\omega + \alpha\right)t} $ approaches $0$.
\begin{align*}
H(\omega) &= \lim_{t \to \infty} \left( \frac{e^{-\left(i \omega + \alpha\right)t}}{-\left(i \omega + \alpha\right)}\right) - \left( \frac{e^{0}}{-\left(i \omega + \alpha\right)} \right) \\
H(\omega) &= 0 - \left( \frac{1}{-\left(i \omega + \alpha\right)} \right) \\
H(\omega) &= \frac{1}{i \omega + \alpha} \\
\end{align*}
\end{example}
A good way to graph this is to write the equation as $\frac{1}{i\omega - (-a)}$, and analyze the graph of $i \omega - (-a)$
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 7}
\subsection{Interconnections of LTI Systems }
\subsubsection{Parallel}
Given a parallel system, $h$, with two systems $f$ and $g$, determine an expression for $h(t)$ and $H(\omega)$. A parallel system is a system that can be graphically described as a circuit with two systems on parallel sides going in the same direction, connecting with an adder before the output.
%\begin{center}
%\includegraphics{images/parallel/para.ps}
%\end{center}
\begin{center}
\begin{pspicture}(0,-3)(8,3)
\rput(0,0){$x(t)$}
\rput(4,1.5){$f(t)$}
\rput(4,-1.5){$g(t)$}
\rput(8.2,0){$y(t)$}
\rput(1.5,-2){$h(t)$}
\psframe(1,-2.5)(7,2.5)
\psframe(3,1)(5,2)
\psframe(3,-1)(5,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.5,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.5,1.5)(3,1.5)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.5,-1.5)(3,-1.5)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.5,1.5)(6.5,0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.5,-1.5)(6.5,-0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.75,0)(7.75,0)
\psline[linewidth=1.25 pt](1.5,-1.5)(1.5,1.5)
\psline[linewidth=1.25 pt](5,1.5)(6.5,1.5)
\psline[linewidth=1.25 pt](5,-1.5)(6.5,-1.5)
\psline[linewidth=1.25 pt](6,-1.5)(6.5,-1.5)
\pscircle(6.5,0){0.25}
\psline(6.25,0)(6.75,0)
\psline(6.5,0.5)(6.5,-0.5)
\end{pspicture}
\end{center}
Let $x(t) = \delta(t)$
$$
h(t) = f(t) + g(t)
$$
Let $x(t) = e^{i\omega t}$
\begin{align*}
H(\omega)e^{i\omega t} &= F(\omega)e^{i\omega t} + G(\omega)e^{i\omega t} \\
H(\omega)e^{i\omega t} &= e^{i\omega t}\lr{F(\omega) + G(\omega)} \\
H(\omega) &= F(\omega) + G(\omega) \\
\end{align*}
\begin{example}
Consider the system defined in the frequency domain by the equation $$ H(\omega) = \frac{1 - e^{-i\omega T}}{i\omega + \alpha} $$
Write the system as a parallel system composed of two systems $F$ and $G$. Find $h(t)$ in terms of $f(t)$ and $g(t)$.
\begin{align*}
H(\omega) &= \frac{1 - e^{-i\omega T}}{i\omega + \alpha} \\
H(\omega) &= \frac{1}{i\omega + \alpha} - \frac{e^{-i\omega T}}{i\omega + \alpha}\\
\end{align*}
Here is a hint:
\begin{align*}
H(\omega) = \frac{1}{i\omega + \alpha} &\iff h(t) = e^{-\alpha t}u(t) \\
h(t) = \delta(t) &\iff H(\omega) = 1
\end{align*}
Lets see if we can find a relationship:
\begin{align*}
H(\omega) &= \frac{1}{i\omega + \alpha} - \frac{e^{-i\omega T}}{i\omega + \alpha}\\
H(\omega) &= F(\omega) - e^{-i\omega T}F(\omega) \\
H(\omega) &= F(\omega) - G(\omega) \\
h(t) &= f(t) - g(t) \\
h(t) &= e^{-\alpha t}u(t) - g(t) \\
h(t) &= e^{-\alpha t}u(t) - f(t-T) \\
\end{align*}
Lets solve for $G(\omega)$ using $g(t)$ = $f(t-T)$:
\begin{align*}
G(\omega) &= \infint g(t) e^{-i\omega t}dt \\
G(\omega) &= \infint f(t-T) e^{-i\omega t}dt \\
G(\omega) &= \infint f(\lambda) e^{-i\omega (\lambda + T)}d\lambda \quad \mbox{let } \lambda = t - T\\
G(\omega) &= e^{-i\omega T}\infint f(\lambda) e^{-i\omega \lambda}d\lambda \\
G(\omega) &= e^{-i\omega T}F(\omega) \\
\end{align*}
Note that if we let $g(t) = \delta(t-T)$ we get $e^{-i\omega T}$:
\begin{align*}
\infint g(t) e^{-i \omega t} dt = e^{-i\omega T} \iff g(t) = \delta(t-T)
\end{align*}
We can now make the following additions to the statements above:
\begin{align*}
H(\omega) = \frac{1}{i\omega + \alpha} &\iff h(t) = e^{-\alpha t}u(t) \\
h(t) = \delta(t) &\iff H(\omega) = 1 \\
h(t) = \delta(t-T) &\iff H(\omega) = e^{-\omega T} \\
g(t) = f(t-T) &\iff G(\omega) = e^{-i\omega T}F(\omega)
\end{align*}
So finally we have
\begin{align*}
h(t) &= f(t) - g(t) \\
h(t) &= e^{-\alpha t}u(t) - e^{-\alpha(t-T)}u(t-T) \\
\end{align*}
Now we can see that we could have defined the system like this:
% \begin{center}
% \includegraphics{images/parallel/para2.ps}
% \end{center}
\begin{center}
\begin{pspicture}(0,-3)(8,3)
\rput(0,0){$x(t)$}
\rput(4,1.5){$f(t)$}
\rput(3,-1.5){$f(t)$}
\rput(5.25,-1.5){$\delta(t-T)$}
\rput(8.2,0){$y(t)$}
\rput(1.5,-2){$h(t)$}
\psframe(1,-2.5)(7,2.5)
\psframe(3,1)(5,2)
\psframe(2.5,-2)(3.5,-1)
\psframe(4.5,-2)(6,-1)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.5,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.5,1.5)(3,1.5)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.5,-1.5)(2.5,-1.5)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(3.5,-1.5)(4.5,-1.5)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.5,1.5)(6.5,0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.5,-1.5)(6.5,-0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.75,0)(7.75,0)
\psline[linewidth=1.25 pt](1.5,-1.5)(1.5,1.5)
\psline[linewidth=1.25 pt](5,1.5)(6.5,1.5)
\psline[linewidth=1.25 pt](6,-1.5)(6.5,-1.5)
\pscircle(6.5,0){0.25}
\psline(6.25,0)(6.75,0)
\psline(6.5,0.5)(6.5,-0.5)
\end{pspicture}
\end{center}
We can also describe the same system as a cascade of $f(t)$ and $g(t)$, where $g(t) = \delta(t) - \delta(t-T)$.
\end{example}
\subsubsection{Cascade (series)}
Consider the casading system:
% \begin{center}
% \includegraphics{images/cascade.ps}
% \end{center}
\begin{center}
\begin{pspicture}(0,-3)(8,3)
\rput(0,0){$x(t)$}
\rput(8.2,0){$y(t)$}
\rput(2.75,0){$f(t)$}
\rput(5.25,0){$g(t)$}
\psframe(1,-1.5)(7,1.5)
\psframe(2,-0.5)(3.5,0.5)
\psframe(4.5,-0.5)(6,0.5)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.5,0)(2,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(3.5,0)(4.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6,0)(7.75,0)
\end{pspicture}
\end{center}
To find $h$, simply let $x$ = $\delta$. When $x$ first enters the system, it goes through $f$ and becomes $f$. Then we pass $f$ through the system $g$. Thus $y$ is the convolution of $f$ and $g$:
$$ h(t) = (f*g)(t) $$
To find $H$, let $x(t) = e^{i \omega t}$. When $x$ first enters the system, it goes through $F$ and becomes $F(\omega)e^{i\omega t}$ since the system is linear. Then the signal enters $G$ and becomes $F(\omega)G(\omega)e^{i\omega t}$. Therefore,
$$H(\omega) = G(\omega)F(\omega)$$
We can take note that convolution in the time domain corresponds to multiplication in the frequency domain:
\begin{align*}
h(t) = (f*g)(t) &\iff H(\omega) = F(\omega)G(\omega) \\
\end{align*}
\begin{example}
Consider the system
%$$ x(t) \to \fbox{ $\to \fbox{f(t)} \to \fbox{$\delta(t-T)$}\to$ } \to y $$
\begin{center}
\begin{pspicture}(0,-3)(8,3)
\rput(0,0){$x(t)$}
\rput(8.2,0){$y(t)$}
\rput(2.75,0){$f(t)$}
\rput(5.25,0){$\delta(t-T)$}
\psframe(1,-1.5)(7,1.5)
\psframe(2,-0.5)(3.5,0.5)
\psframe(4.5,-0.5)(6,0.5)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.5,0)(2,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(3.5,0)(4.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6,0)(7.75,0)
\end{pspicture}
\end{center}
Let $x$ be $\delta$. When $x$ first enters the system it becomes $f(t)$. Then $f(t)$ enters the system given by $\delta(t-T)$, thus we get the convolution
$$ g(t) = \lr{f(t) * \delta(t-T)} $$
We know can determine $G(\omega)$ because $\delta(t-T)$ in the time domain corresponds to $e^{-i\omega T}$ in the frequency domain:
$$ G(\omega) = F(\omega)e^{-i\omega T} $$
\end{example}
\begin{example}
Given $f(n)$ and $g(n)$, find $H(\omega)$ and $h(n)$:
\begin{align*}
f(n) &= \sum \limits_{k=0}^{\infty}\alpha^k\delta(n-kN) \\
g(n) &= \delta(n) - \alpha\delta(n-N) \\
\end{align*}
Lets find $G(\omega)$ first, since it is simpler:
\begin{align*}
G(\omega) &= \sum \limits_{n\in\Z} \lr{\delta(n) - \alpha\delta(n-N)}e^{-\omega n} \\
G(\omega) &= e^{-i\omega 0} - \alpha e^{-i\omega N} \\
G(\omega) &= 1 - \alpha e^{-i\omega N} \\
\end{align*}
This gives us some insight into $F(\omega)$:
\begin{align*}
F(\omega) &= \sum \limits_{n \in \Z} \lr{\sum \limits_{\ell=0}^{\infty} \alpha^\ell \delta(n-\ell N)}e^{-i\omega n} \\
F(\omega) &= \sum \limits_{n \in \Z} \lr{\delta(n) + \alpha \delta(n-N) + \alpha^2 \delta(n-2N) + \cdots }e^{-i\omega n} \\
F(\omega) &= \sum \limits_{n \in \Z}\delta(n)e^{-i\omega n} + \alpha \sum \limits_{n \in \Z}\delta(n-N)e^{-i\omega n} + \alpha^2 \sum \limits_{n \in \Z}\delta(n-2N)e^{-i\omega n} + \cdots \\
F(\omega) &= 1 + \alpha e^{-i\omega N} + \alpha^2 e^{-i\omega 2N} + \cdots \\
F(\omega) &= \sum \limits_{k \in \Z_{\oplus}} \alpha^k e^{-i\omega kN} \\
F(\omega) &= \sum \limits_{k \in \Z_{\oplus}} \lr{\alpha e^{-i\omega N}}^k \\
F(\omega) &= \frac{1}{1 - \alpha e^{-i\omega N}}
\end{align*}
Now we can solve for $H(\omega)$:
\begin{align*}
H(\omega) &= F(\omega)G(\omega) \\
H(\omega) &= \frac{1}{1 - \alpha e^{-i\omega N}} \lr{1 - \alpha e^{-i\omega N}} \\
H(\omega) &= 1
\end{align*}
This implies that $h(t) = \delta(t)$.
\end{example}
\subsubsection{Feedback}
Feedback systems are usually described in the frequency domain since it is simpler to articulate. Typically we are given a \emph{plant} and a \emph{controller} in a feedback system.
% \begin{center}
% \includegraphics{images/feedback/feedback.ps}
% \end{center}
\begin{center}
\begin{pspicture}(0,-3)(8,3)
\rput(0,0){$x$}
\rput(7.2,0){$y$}
\rput(3.75,0){$F$}
\rput(3.75,-2){$G$}
\rput(1.5,-2.5){$H$}
\psframe(1,-3)(6,1)
\psframe(3,-0.5)(4.5,0.5)
\psframe(3,-2.5)(4.5,-1.5)
% plus or minus for adder
\rput(2.1,-0.5){$-$} % input from loop
\rput(1.45,0.35){$+$} % input signal
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.25,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.75,-2)(1.75,-0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(4.5,0)(7,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5.5,-2)(4.5,-2)
\psline[linewidth=1.25 pt](3,-2)(1.75,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(2,0)(3,0)
\psline[linewidth=1.25 pt](5.5,0)(5.5,-2)
\pscircle(1.75,0){0.25}
\psline(1.5,0)(2,0)
\psline(1.75,0.25)(1.75,-0.25)
\end{pspicture}
\end{center}
Lets trace $x$ through the loop where $F$ is the plant, and $G$ is the controller, and the signal is negated after $G$ before added to the input signal for the loop. Let $x(t) = e^{i\omega t}$, then $y(t) = H(\omega)e^{i\omega t}$. Lets call $q(t)$ the signal that we trace through. This signal goes through the system, and $y(t)$ is fed back into $G$ and becomes $q(t)$:
\begin{align*}
q(t) &= G(\omega)H(\omega)e^{i\omega t} \\
q(t) &= e^{i\omega t} - G(\omega)H(\omega)e^{i\omega t} \quad \mbox{picks up a new input and negation and addition occurs} \\
q(t) &= e^{i\omega t} \lr{1-G(\omega)H(\omega)} \\
q(t) &= F(\omega)\lr{1-G(\omega)H(\omega)} e^{i\omega t} \quad \mbox{ goes through $F$}\\
q(t) &= y(t) = H(\omega)e^{i\omega t} = F(\omega)\lr{1-G(\omega)H(\omega)} e^{i\omega t} \\
\end{align*}
Now after the loop has finished, we can solve for $H(\omega)$:
\begin{align*}
H(\omega)e^{i\omega t} &= F(\omega)\lr{1-G(\omega)H(\omega)} e^{i\omega t} \\
H(\omega) &= F(\omega)-G(\omega)F(\omega)H(\omega) \\
H(\omega)+G(\omega)F(\omega)H(\omega) &= F(\omega) \\
H(\omega)\lr{1+G(\omega)F(\omega)} &= F(\omega) \\
H(\omega) &= \frac{F(\omega)}{1+G(\omega)F(\omega)} \\
\end{align*}
This is called \emph{Black's equation}. If we had two positive signs for the adder, the equation would only change slightly:
$$ H(\omega) = \frac{F(\omega)}{1-G(\omega)F(\omega)} $$
To generalize this, we can state that the frequency response to the system is $\frac{\mbox{Forward Gain}}{1 - \mbox{Loop Gain}}$. Note that when in a loop, $F(\omega)$ and $G(\omega)$ are in cascade (a series), therefore the frequency response during the loop (the Loop Gain) is $F(\omega)G(\omega)$.
\subsection{Frequency response of CT-LTI Systems}
\begin{example}
Consider an impulse response defined by
$$
h(t)=
\begin{cases}
1 & -T\leq t\leq T \\
0 & \mbox{otherwise}
\end{cases}
$$
\begin{enumerate}
\item Find $y(t)$ in terms of the input $x(t)$ and impulse response.
\end{enumerate}
\begin{align*}
y(t) &= \infint x(\tau)h(t-\tau)d\tau \\
y(t) &= \int_{t-T}^{t+T}x(\tau)d\tau \quad \mbox{ since } -T \leq t-\leq T \mbox{ for } h(t-\tau)\\
\end{align*}
We can solve further if necessary:
\begin{align*}
y(t) &= \int_{t-T}^{t+T}e^{i\omega \tau}d\tau \quad \mbox{ let } x(t) = e^{i\omega t} \\
y(t) &= \left. \frac{e^{i\omega \tau}}{i\omega} \right|_{t-T}^{t+T} \\
y(t) &= \frac{e^{i\omega (t+T)}}{i\omega} - \frac{e^{i\omega (t-T)}}{i\omega} \\
y(t) &= \frac{e^{i\omega (t+T)}- e^{i\omega (t-T)}}{i\omega} \\
y(t) &= \frac{e^{i\omega t}\lr{e^{i\omega T} - e^{-i\omega T}}}{i\omega} \\
y(t) &= \frac{2\sin(\omega T)}{\omega}e^{i\omega t} \\
\end{align*}
What is this filter doing? Taking a look at $h(t)$ and the frequency response $ H(\omega) = \infint h(t) e^{-i\omega t}dt $, we can see that this is a centered moving average. That is, at $t$, we sample $x(t)$ from $t-T$ to $t+T$ and integrate the area, we then plot this point on $y(t)$ continuously.
\end{example}
\begin{enumerate}
\item What is the frequency response, $H(\omega)$?
\end{enumerate}
We know that the frequency response is defined by $$ H(\omega) = \infint h(t) e^{-i\omega t} dt $$
Since we are using $h(t)$ instead of $h(t-\tau)$, notice the limits of the integral are not the same as the last part of the example.
\begin{align*}
H(\omega) &= \int_{-T}^{T}e^{-\omega t}dt \\
H(\omega) &= \left. \frac{e^{-i\omega t}}{-i\omega} \right|_{-T}^{T} \\
H(\omega) &= \frac{e^{-i\omega T} - e^{-i\omega (-T)} }{-i\omega} \\
H(\omega) &= \frac{e^{i\omega T} - e^{-i\omega T} }{i\omega} \\
H(\omega) &= \frac{ 2\sin(\omega T)}{\omega} \\
\end{align*}
We must take note of $H(\omega)$ when $\omega=0$. In this case, it is true that $H(0)=\int_{-T}^{T}dt=2T=\lim_{\omega \to 0}H(\omega)$, which agrees with our frequency response representation. When graphing this function, take note that when omega is negative, it causes a reflection over the $y$-axis so the function is even unlike the sine function normally behaves.
% \begin{center}
% \includegraphics{images/sinc/ctlti.ps}
% \end{center}
\psset{unit=0.75cm}
\begin{center}
\begin{pspicture}(-13,-2)(13,4)
\psplot[algebraic,linewidth=1.5pt]{-12.56}{12.56}{2*sin(x)/x}
\psplot[algebraic,linestyle=dashed]{-12.56}{-0.5}{abs(2/x)}
\psplot[algebraic,linestyle=dashed]{0.5}{12.56}{abs(2/x)}
\psplot[algebraic,linestyle=dashed]{0.5}{12.56}{-abs(2/x)}
\psplot[algebraic,linestyle=dashed]{-12.56}{-0.5}{-abs(2/x)}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-12.6,0)(12.6,0)
\rput(5.7, 1){$2T$}
\psline[linestyle=dashed](0,2)(5,2)
\psline{<->}(5,0)(5,2)
\rput(3.14, -0.5){$\frac{\pi}{T}$}
\rput(6.28, -0.5){$\frac{2\pi}{T}$}
\rput(9.42, -0.5){$\frac{3\pi}{T}$}
\rput(12.56, -0.5){$\frac{4\pi}{T}$}
\rput(-3.14, -0.5){$\frac{-\pi}{T}$}
\rput(-6.28, -0.5){$\frac{-2\pi}{T}$}
\rput(-9.42, -0.5){$\frac{-3\pi}{T}$}
\rput(-12.56, -0.5){$\frac{-4\pi}{T}$}
\rput(0, -0.5){$0$}
\rput(5,3 ){$H(\omega) = \frac{2\sin(\omega T)}{\omega}$}
\end{pspicture}
\end{center}
When $\omega T = \pi k$, we have frequencies multiplied by zero every $\pi/T k$, where $k \in \Z$. Thus we have a low-pass filter. So if we have an input signal given by $x(t) = e^{ik\pi t/T}$, we can see that it has a frequency that corresponds to the x-intercepts of the magnitude response (the absolute value of the frequency response), and thus gives an output of 0. If $x(t) = 1$, then we have a frequency of 0, thus $y(t)$ in this case is $2T$.
In general, averaging will produce a low-pass filter. Notice that as the gap between $(-T,T)$ shrinks, the interval between $(-\pi/T,\pi/T)$ grows, and vice versa.
\begin{example}
We can see that we have the following relationship:
$$ h(t) = \delta(t) \iff H(\omega) = 1 \quad \mbox{ because } \infint \delta(t)e^{-i\omega t}dt = 1$$
What happens when we let $h(t) = \delta(t-T)$?
\begin{align*}
H(\omega) &= \infint \delta(t-T)e^{-i\omega t}dt \\
H(\omega) &= e^{-i\omega T}
\end{align*}
We still have the magnitude at 1, since $\abs{H(\omega)} = 1$. We only introduce a phase, $y(t) = x(t-T)$
\end{example}
\begin{example}
Given a first order equation for the voltage across a circuit that contains a capacitor and a resistor, what is the frequency response?
$$ RC\dot{y}(t) + y(t) = x(t) \quad \mbox{(initial conditions are zero)} $$
Let $y(t) = H(\omega)e^{i\omega t}, x(t) = e^{i\omega t}$, then $\dot{y}(t) = H(\omega)i \omega e^{i\omega t}$. Now we can solve for the frequency response:
\begin{align*}
RC\dot{y}(t) + y(t) &= x(t) \\
RCH(\omega)i\omega e^{i\omega t} + H(\omega)e^{i\omega t} &= e^{i\omega t} \\
H(\omega) \lr{RCi\omega + 1} &= 1 \\
H(\omega) &= \frac{1}{RCi\omega + 1}
\end{align*}
Then we can re-write this as $ \frac{1/RC}{i\omega + 1/RC}$. This looks similar to $\frac{1}{i\omega + \alpha}$, where $H(\omega) = \frac{1}{i\omega + \alpha} \iff h(t) = e^{-\alpha t}u(t)$, which implies $h(t) = \frac{1}{RC} \cdot e^{-t/RC} \cdot u(t)$. The graph of $h(t)$ has a height of $1/RC$ at 0, and is decreasing towards zero as $t$ increases.
When we consider the magnitude response, never separate the $i\omega$!
$$ \left| H(\omega) \right| = \frac{1/RC}{\abs{i\omega + \lr{1/RC}}} $$
Note that $\lim_{\abs{\omega} \to \infty} \frac{1/RC}{\abs{i\omega + \lr{1/RC}}} = 0$. At $\omega = 0$, $\abs{H(\omega)} = 1$.
\end{example}
\begin{example}
Given the equation of an all-pass filter,
$$ \dot{y} + ay = \dot{x} - ax $$
what is the magnitude response and phase response?
Let $x(t) = e^{i\omega t}, y(t) = H(\omega)x(t)$
\begin{align*}
\dot{y} + ay &= \dot{x} - ax \\
i\omega H(\omega)e^{i\omega t} + aH(\omega)e^{i\omega t} &= i\omega e^{i\omega t} - ae^{i\omega t} \\
i\omega H(\omega) + aH(\omega) &= i\omega - a \\
H(\omega)\lr{i\omega + a} &= i\omega - a \\
H(\omega) &= \frac{i\omega - 1}{i\omega + a} \\
\abs{H(\omega)} &= \frac{\abs{i\omega - 1}}{\abs{i\omega + a}}
\end{align*}
If we analyze these two vectors, they are symmetric with respect to the imaginary axis, thus we have equal magnitudes and the magnitude response is always equal to one. However, the point of the all-pass filter is that the phase response it not so trivial.
\begin{align*}
\angle{ \frac{i\omega - 1}{i\omega + a} } &= \angle{ i\omega - 1 } - \angle {i\omega + a } \\
\angle{ \frac{i\omega - 1}{i\omega + a} } &= \tan^{-1}\lr{-\omega} - \tan^{-1}\lr{\frac{\omega}{a}} \\
\end{align*}
\end{example}
\subsection{Discussion}
\begin{example}
A signal $x$ is passed through a system with a special amplitude modulation:
$$
x(t) =
\begin{cases}
-1 & t<0 \\
e^{-t}-1 & t \geq 0
\end{cases}
$$
$$ y(t) = \lr{A + x(t)}\cos(\omega_0 t) \quad \omega_0 \gt\gt 1, A \gt 0$$
Putting these two together, we can form an expression for $y(t)$:
$$
y(t) =
\begin{cases}
(A-1)\cos(\omega_0 t) & t\lt 0 \\
(A-1+e^{-t})\cos(\omega_0 t) & t \geq 0
\end{cases}
$$
For $A = 1$, we have nothing on the negative interval $(-\infty,0)$. On the right we have a cosine function bounded by $e^{-t}$ with an amplitude of 1 at the origin.
%\includegraphics{images/diswk7a.ps}
\psset{unit=1cm, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-7,-3)(7,3)
\psplot[algebraic,linewidth=1.5pt]{-3.14}{0}{2* cos(x*2)}
\psplot[algebraic,linewidth=1.5pt]{0}{3.14}{(2+Euler^(-x))*cos(x*2)}
\psplot[algebraic,linestyle=dashed]{-3.14}{0}{2}
\psplot[algebraic,linestyle=dashed]{0}{3.14}{2 + Euler^(-x)}
\psplot[algebraic,linestyle=dashed]{0}{3.14}{-2 - Euler^(-x)}
\psplot[algebraic,linestyle=dashed]{-3.14}{0}{-2}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,-3.2)(3.25,3.2)
\rput(-0.5, 3){$A$}
\rput(-3.5, 2){$(A-1)$}
\rput(-3.5, -2){$-(A-1)$}
\rput(-0.5, -3){$-A$}
\rput(3,3){$A\gt 1$}
\end{pspicture}
\end{center}
notice we have 0 $\forall t \in (-\infty, 0)$
For $A \gt 1$, we have a normal cosine function from $(-\infty,0)$, with an amplitude ranging from $-(A-1)$ to $(A-1)$. On the positive side of the graph we have a cosine function bounded by $(A-1+e^{-t})$, which gives us an amplitude between $-A$ and $A$ at the origin, and then is tapered down by the bound. The reason that $\omega_0 \gt\gt 1$ is so that we can see more oscillations.
%\includegraphics{images/diswk7b.ps}
\psset{unit=1cm, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-7,-3)(7,3)
\psplot[algebraic,linewidth=1.5pt]{-3.14}{0}{2* cos(x*2)}
\psplot[algebraic,linewidth=1.5pt]{0}{3.14}{(2+Euler^(-x))*cos(x*2)}
\psplot[algebraic,linestyle=dashed]{-3.14}{0}{2}
\psplot[algebraic,linestyle=dashed]{0}{3.14}{2 + Euler^(-x)}
\psplot[algebraic,linestyle=dashed]{0}{3.14}{-2 - Euler^(-x)}
\psplot[algebraic,linestyle=dashed]{-3.14}{0}{-2}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,-3.2)(3.25,3.2)
\rput(-0.5, 3){$A$}
\rput(-3.5, 2){$(A-1)$}
\rput(-3.5, -2){$-(A-1)$}
\rput(-0.5, -3){$-A$}
\rput(3,3){$A\gt 1$}
\end{pspicture}
\end{center}
Now there is a bounded cosine function in the interval $(-\infty, 0)$
\end{example}
\begin{example}
Consider a square area defined by
$$
f(t) = \begin{cases}
1 & \mbox{if } t \in [-1,1] \\
0 & \mbox{otherwise}
\end{cases}
$$
What is the graphical representation of the convolution of $f$ with itself?
It just so happens that we get a triangle whose bounds are from -2 to 2. When $t = -2$, the area starts to grow, then the convolution continues until the growth stops at $t=0$ when the squares are completely aligned. Then the decline is when the square begin "moved" through the other is leaving the overlapped area, thus there is a decline in area overlap.
\end{example}
\begin{example}
Consider a frequency response defined by
$$ H(\omega) = \frac{i\omega}{i\omega + \alpha} \quad \alpha =0.01 $$
We can look at the magnitude response $\left| H(\omega) \right| = \frac{\abs{i\omega}}{\abs{i\omega + \alpha}}$ by considering what the two vectors $i\omega$ and $i\omega + \alpha$ are behaving like. If we take the limit, we find that $\lim_{\abs{\omega}\to\infty}\abs{H(\omega)}=1$:
\begin{align*}
&\lim_{\abs{\omega}\to\infty}\abs{H(\omega)} \\
&\lim_{\abs{\omega}\to\infty}\frac{\abs{i\omega}}{\abs{i\omega + \alpha}} \\
&\lim_{\abs{\omega}\to\infty}\frac{1}{1 + \frac{\alpha}{i\omega}} = 1
\end{align*}
Even for smaller numbers we have a ratio that is very close to one. Yet, at $\omega = 0$, we have a value of zero. Thus we have a notch filter that kills all DC signals, or low frequencies. If we passed the signal $x(t) = 1 + \cos(20\pi t)$ through this system, we would have an output of $y(t) = \cos(20\pi t)$, since the angle for a real number is 0 which is killed.
How can we find $h(t)$?
\begin{align*}
H(\omega) &= \frac{i\omega}{i\omega + \alpha} \\
H(\omega) &= \frac{i\omega + \alpha - \alpha}{i\omega + \alpha} \\
H(\omega) &= 1 - \frac{\alpha}{i\omega + \alpha} \\
\end{align*}
Again, we can use pattern matching here, using the pattern
\begin{align*}
H(\omega) = 1 &\iff h(t) = \delta(t) \\
H(\omega) = \frac{\alpha}{i\omega + \alpha} &\iff h(t) = \alpha e^{-\alpha t}u(t)
\end{align*}
Thus, the equation for the impulse response of the system is given by
$$ h(t) = \delta(t) - \alpha e^{-\alpha t}u(t) $$
Now, what happens if we pass $u(t)$ through this system? In other words, what is the output signal of $h(t)$ convolved with $u(t)$? Let's denote the output signal as $s(t)$:
\begin{align*}
s(t) &= (h*u)(t) \\
s(t) &= \infint h(\tau)u(t-\tau)d\tau \\
s(t) &= \int_{-\infty}^{t} h(\tau)d\tau \quad \mbox{since } \tau \in (-\infty, t] \Rightarrow u(t-\tau) = 1\\
s(t) &= \int_{-\infty}^{t} \lr{\delta(\tau) - \alpha e^{-\alpha \tau}u(\tau) }d\tau \\
s(t) &= \int_{-\infty}^{t} \delta(\tau) - \int_{-\infty}^{t}\alpha e^{-\alpha \tau}u(\tau) d\tau \\
s(t) &= \int_{-\infty}^{t} \delta(\tau) - \int_{0}^{t}\alpha e^{-\alpha \tau}d\tau \\
s(t) &= u(t) + \left. e^{-\alpha \tau} \right|_{0}^{t} \\
s(t) &= u(t) + \lr{e^{-\alpha t} - 1}u(t) \quad \mbox{ the domain was restricted by } u(t) \\
s(t) &= e^{-\alpha t} u(t) \\
\end{align*}
One thing to note here is that in general you can use the unit step to help you simplify the limits of an integral, but make sure to keep it as a restriction on the final answer.
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 8}
\subsection{Feedback Systems}
Consider a feedback system where our plant is the system $F(\omega) = \frac{1}{1-\frac{1}{2}e^{-i\omega}}$, and our control is defined by $G(\omega) = z^{-1}$. Also, before the adder, there is a multiplier of $\alpha$. Note that $z^{-1}$ represents a system such that:
\begin{align*}
q(t) \to &\fbox{$z^{-1}$} \to q(n-1) \\
q(t) \to &\fbox{$D_1$} \to q(n-1) \quad \mbox{basic delay}\\
\end{align*}
So in order to determine what happens, lets pass $e^{i\omega n}$ through $z^{-1}$:
$$ e^{i\omega n} \to \fbox{$z^{-1}$} \to e^{i\omega (n-1)} = e^{-i\omega} \cdot e^{i\omega n} $$
Therefore we know the frequency response, thus $G(\omega) = e^{-i\omega}$. Since $\alpha$ is simply multiplied before the system finishes the loop, we can just bundle that with $G(\omega)$, so $\alpha e^{-i\omega}$ can be our $G(\omega)$.
Now we can simply apply Black's equation,
\begin{align*}
H(\omega) &= \frac{F(\omega)}{1+G(\omega)F(\omega)} \\
H(\omega) &= \frac{ \frac{1}{1-\frac{1}{2}e^{-i\omega}}}{1-\alpha e^{-i\omega} \frac{1}{1-\frac{1}{2}e^{-i\omega}}} \\
H(\omega) &= \frac{1}{1-(\alpha + \frac{1}{2})e^{-i\omega}} \\
\end{align*}
Notice that we know have the ability to create a low-pass or high-pass filter using this feedback system. Originally, we had only a low pass filter. The system could use $\alpha$ as a parameter to determine whether the system represents a low or high pass filter. We know that $\abs{\alpha + \frac{1}{2}} \lt 1$ in order for the sum $\sum_{\R_+}\lr{\lr{\alpha + \frac{1}{2}}e^{-i\omega}}^n$ to converge (since $h(t) = \lr{\alpha + \frac{1}{2}}^n\cdot u(n)$). This tells us the range for $\alpha$ as a parameter: $\alpha \in (-\frac{3}{2},\frac{1}{2})$ for a high pass filter, and $\alpha \in (0,\frac{1}{2})$ for a low pass filter.
\subsection{Inverse Systems}
An inverse system is a system such that $x \to \fbox{$P(\omega)$} \to \fbox{$P(\omega)^{-1}$} \to y = x$.
%\begin{center}
%\includegraphics{images/inverses/invdiag2.ps}
%\end{center}
\begin{center}
\begin{pspicture}(0,-4)(8,2)
\rput(0,0){$x$}
\rput(7.2,0){$y$}
\rput(3.5,0){$k$}
\rput(3.75,-2){$P(\omega)$}
\rput(1.5,-2.5){$H$}
% plus or minus for adder
\rput(2.1,-0.5){$-$}
\rput(1.45,0.35){$+$}
\psframe(1,-3)(6,1)
\psframe(3,-2.5)(4.5,-1.5)
\pspolygon(3,-0.5)(3,0.5)(4.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.25,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.75,-2)(1.75,-0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(4.5,0)(7,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5.5,-2)(4.5,-2)
\psline[linewidth=1.25 pt](3,-2)(1.75,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(2,0)(3,0)
\psline[linewidth=1.25 pt](5.5,0)(5.5,-2)
\pscircle(1.75,0){0.25}
\psline(1.5,0)(2,0)
\psline(1.75,0.25)(1.75,-0.25)
\end{pspicture}
\end{center}
Given the system in the figure, find the frequency response. Note that the plant is a multiplier. After inspection, we find that
$$ H(\omega) = \frac{k}{1+kP(\omega)} $$
Thus, if we let $k$ be large enough, then $kP(\omega) \gt\gt 1$ and $kP(\omega)$ dominates the denominator. Using this information, we can make a good approximation:
$$ H(\omega) = \frac{k}{1+kP(\omega)} \approx \frac{k}{kP(\omega)} = \frac{1}{P(\omega)}$$
This works well as long as $kP(\omega) \gt\gt 1$ works for our frequency range of interest. Lets look at a system where the controller is a multiplier:
%\begin{center}
%\includegraphics{images/inverses/invdiag.ps}
%\end{center}
\begin{center}
\begin{pspicture}(0,-4)(8,2)
\rput(0,0){$x$}
\rput(7.2,0){$y$}
\rput(3.75,0){$P(\omega)$}
\rput(4,-2){$k$}
\rput(1.5,-2.5){$H$}
% plus or minus for adder
\rput(2.1,-0.5){$-$}
\rput(1.45,0.35){$+$}
\psframe(1,-3)(6,1)
\psframe(3,-0.5)(4.5,0.5)
\pspolygon(4.5,-1.5)(4.5,-2.5)(3,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.25,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.75,-2)(1.75,-0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(4.5,0)(7,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5.5,-2)(4.5,-2)
\psline[linewidth=1.25 pt](3,-2)(1.75,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(2,0)(3,0)
\psline[linewidth=1.25 pt](5.5,0)(5.5,-2)
\pscircle(1.75,0){0.25}
\psline(1.5,0)(2,0)
\psline(1.75,0.25)(1.75,-0.25)
\end{pspicture}
\end{center}
We have a similar situation. If we let $k$ be large enough, then $kP(\omega) \gt\gt 1$ and we have
$$ H(\omega) = \frac{P(\omega)}{1+kP(\omega)} \approx \frac{P(\omega)}{kP(\omega)} = \frac{1}{k} $$
It is important to note that letting $k$ become large introduces some consequences. We lose magnitude for flatness over a frequency range. $1/k$ is not much gain, but a flat signal, whereas $P(\omega)$ may be a signal with large amplitude but not flat.
\subsection{Causality}
\begin{definition}
A system $F$ is causal $\iff$ $\forall T\in\R, \forall x_1,x_2 \in X$ such that $x_1(t) = x_2(t)$ $\forall t \leq T$ produce $y_1(t) = y_2(t)$ $\forall t \leq T$. In general, we say that a causal system doesn't use future values. A causal system may use present or past values only. In other words, a causal system is \emph{nonanticipitory}. In general, to process information in real-time, a system must be causal.
\end{definition}
\begin{example}
Here is an example of a system defined by its output signal:
$
y(n) =
\begin{cases}
0 & \mbox{if $n$ is odd} \\
1 & \mbox{if $n$ is even} \\
\end{cases}
$
The system must be causal. The method here is to do a brute force proof.
\begin{proof}
Let $x_1(n),x_2(n) \in X$ be arbitrary. In this case, we have $y_1(n) = y_2(n)$ $\forall n$.
\end{proof}
\end{example}
\begin{example}
Here we have a system given by
$$ \delta(n) \to \fbox{F} \to \delta(n+1) + \delta(n) $$
This is only a partial characterization of $F$, thus, the system can be causal, but doesn't have to be. The method here is to show two examples of why both parts of the previous statement are true.
\begin{enumerate}
\item Let $F$ be defined by $y(n) = \begin{cases} 1 & \mbox{if $n\in\{-1,0\}$} \\ 0 & \mbox{otherwise} \\ \end{cases}$. Then this system is causal, because any two arbitrary functions will have the same output.
\item Let $F$ be defined by $y(n) = x(n) + x(n+1)$. This system is not causal since it ``looks into the future''.
\end{enumerate}
\end{example}
\begin{example}
Here we have a system given by
$$ \delta(n) \to \fbox{G} \to \delta(n+1) + \delta(n) $$
where $G$ is linear. Here, the system cannot be causal. The method here is to compare with another signal. If we pass in the zero signal, then in order for the system to be causal, the outputs should be identical up to $n=0$. But the outputs are not the same at $n=-1$, therefore the system is not causal. We can use this property because of the ZIZO property of linear systems. Another method we can use is to scale the input by $k$. Then $x_1(n) = x_2(n)$ $\forall n \leq -1 \wedge y_1(-1) \neq y_2(-1) \Rightarrow$ the system is not causal.
\end{example}
Let $P$ be the statement ``the output of the system precedes the input''
Let $L$ be the statement ``the system is linear''
Let $C$ be the statement ``the system is causal''
Then given a system that is linear:
\begin{align*}
&L \wedge C \Rightarrow \lnot P \\
&P \Rightarrow \lnot \left( L \wedge C \right) \\
&P \Rightarrow \lnot L \vee \lnot C \\
&\lnot C \quad \mbox{since we know $L$ is true}
\end{align*}
\begin{example}
Here we have a system given by
$$ \delta(n) \to \fbox{H} \to \delta(n+1) + \delta(n) $$
where $H$ is time invariant. The system cannot be causal. Here, the method is to compare with a time-shifted version of the input signal.
\begin{proof}
Let $x_2(n) = x_1(n-1)$. Then $x_1(n) = x_2(n)$ $\forall n \leq -1 \wedge y_1(-1) \neq y_2(-1)$.
\end{proof}
\end{example}
Let $TI$ be the statement ``the system is time invariant''
Then given $TI$ is true:
\begin{align*}
&TI \wedge C \Rightarrow \lnot P \\
&P \Rightarrow \lnot TI \vee \lnot C\\
&\lnot C \quad \mbox{ since we know $TI$ is true}
\end{align*}
Note that the output preceding the input doesn't preclude causality unless additional information is given, such as time invariance or linearity.
Here are some more examples:
\begin{enumerate}
\item $y(t) = cos(x(t))$ is casual because it only depends on the current time $t$.
\item $y(t) = x(cos(t))$ is not causal, since $y(0) = x(1)$.
\end{enumerate}
\subsection{Discussion}
Given the feedback system in the figure, find $H(\omega)$ and $h(n)$.
%\begin{center}
%\includegraphics{images/feedback/feedbackdis8.ps}
%\end{center}
\begin{center}
\begin{pspicture}(0,-4)(8,2)
\rput(0,0){$x$}
\rput(7.2,0){$y$}
\rput(3.75,0){$f(n) = \alpha^n(n)$}
\rput(3.75,-2){$g(n) = \alpha\delta(n)$}
\rput(1.5,-2.5){$H$}
\psframe(1,-3)(6,1)
\psframe(2.5,-0.5)(5,0.5)
\psframe(2.5,-2.5)(5,-1.5)
% plus or minus for adder
\rput(2.1,-0.5){$-$} % input from loop
\rput(1.45,0.35){$+$} % input signal
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.25,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.75,-2)(1.75,-0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5,0)(7,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5.5,-2)(5,-2)
\psline[linewidth=1.25 pt](2.5,-2)(1.75,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(2,0)(2.5,0)
\psline[linewidth=1.25 pt](5.5,0)(5.5,-2)
\pscircle(1.75,0){0.25}
\psline(1.5,0)(2,0)
\psline(1.75,0.25)(1.75,-0.25)
\end{pspicture}
\end{center}
We know from previous examples that
$$f(n) = \alpha^nu(n) \iff F(\omega) = \frac{1}{1-e^{-i\omega}} \mbox{,}$$
and that
$$g(n) = \alpha \delta(n) \iff G(\omega) = \alpha$$
By Black's formula, we know that $H(\omega) = \frac{F(\omega)}{1+F(\omega)G(\omega)}$. Now we can solve for $H(\omega)$:
\begin{align*}
H(\omega) &= \frac{F(\omega)}{1+F(\omega)G(\omega)} \\
H(\omega) &= \frac{ \frac{1}{1-e^{-i\omega}}}{1+ \frac{1}{1-e^{-i\omega}}\alpha} \\
H(\omega) &= \frac{1}{1 + \alpha - \alpha e^{-i\omega}} \\
H(\omega) &= \frac{1}{1+\alpha} \cdot \frac{1}{1-\frac{\alpha}{1+\alpha}e^{-i\omega}} \quad \mbox{factor out} \frac{1}{1+\alpha} \\
\end{align*}
Now we know that
\begin{align*}
H(\omega) &= \frac{1}{1+\alpha} \cdot \frac{1}{1-\lr{\frac{\alpha}{1+\alpha}}e^{-i\omega}} \iff h(n) = \frac{1}{1+\alpha} \left( \frac{\alpha}{1+\alpha}\right)^nu(n)
\end{align*}
We can simplify the expression to $h(t) = \frac{\alpha^n}{\left(1+\alpha\right)^{n+1}}u(n)$.
\begin{example}
Given the system in the figure, find $H(\omega)$.
%\begin{center}
%\includegraphics{images/feedback/feedback2dis8.ps}
%\end{center}
\begin{center}
\begin{pspicture}(-2,-4)(8,2)
\rput(-0.5,0){$x$}
\rput(7.7,0){$y$}
\rput(3.75,0){$F$}
\rput(3.75,-1){$G$}
\rput(0.5,-2.5){$H$}
\rput(2.65,-2){$P$}
\rput(4.85,-2){$Q$}
\psframe(0,-3)(7,1)
% for F
\psframe(3,-0.25)(4.5,0.25)
% for G
\psframe(3,-1.25)(4.5,-0.75)
% for P and Q
\psframe(2,-2.25)(3.25,-1.75)
\psframe(4.25,-2.25)(5.5,-1.75)
% plus or minus for adder
\rput(2.1,-0.5){$+$} % input from loop
\rput(1.45,0.35){$+$} % input signal
% plus or minus for adder
\rput(1.1,-0.5){$-$} % input from loop
\rput(0.45,0.35){$+$} % input signal
% input
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(-0.25,0)(0.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1,0)(1.5,0)
% to plant
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(2,0)(3,0)
% from plant
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(4.5,0)(7.5,0)
% to control
\psline[linewidth=1.25 pt](5.5,0)(5.5,-1)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5.5,-1)(4.5,-1)
% from control
\psline[linewidth=1.25 pt](3,-1)(1.75,-1)
% to adder from control
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.75,-1)(1.75,-0.25)
% to control
\psline[linewidth=1.25 pt](6.5,0)(6.5,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{<-}(3.25,-2)(4.25,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(6.5,-2)(5.5,-2)
% from control
\psline[linewidth=1.25 pt](2,-2)(0.75,-2)
% to adder from control
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.75,-2)(0.75,-0.25)
\pscircle(1.75,0){0.25}
\psline(1.5,0)(2,0)
\psline(1.75,0.25)(1.75,-0.25)
\pscircle(0.75,0){0.25}
\psline(0.5,0)(1,0)
\psline(0.75,0.25)(0.75,-0.25)
\end{pspicture}
\end{center}
The best approach to this is to interpret this as a two feedback systems recursively. In other words, let the overall system have a plant defined by the inner feedback system, and controller defined the the cascade of the inner feedback system with $P$ and $Q$. More abstractly,
\begin{align*}
Plant(\omega) &= \frac{F(\omega)}{1-F(\omega)G(\omega)} \\
Controller(\omega) &= P(\omega)Q(\omega)Plant(\omega) \\
\end{align*}
Thus we can solve for the overall system
\begin{align*}
H(\omega) &= \frac{Plant(\omega)}{1+Controller(\omega)} \\
H(\omega) &= \frac{\frac{F(\omega)}{1-F(\omega)G(\omega)}}{1+P(\omega)Q(\omega)\lr{\frac{F(\omega)}{1-F(\omega)G(\omega)}}} \\
H(\omega) &= \frac{F(\omega)}{1-F(\omega)G(\omega)+F(\omega)P(\omega)Q(\omega)}
\end{align*}
\end{example}
\subsection{Review}
\begin{problem}
Find the frequency response of the impulse response $x(t)=
\begin{cases}
1 & \left|t\right|\lt\alpha \\
0 & \mbox{otherwise} \\
\end{cases}
$
\end{problem}
\begin{solution}
\begin{align*}
X(\omega) &= \int_{-\infty}^{\infty} x(t) e^{-i\omega t}dt \\
X(\omega) &= \int_{-\alpha}^{\alpha} e^{-i\omega t}dt \\
X(\omega) &= \left. \frac{e^{-i\omega t}}{-i\omega}\right|_{-\alpha}^{\alpha} \\
X(\omega) &= \frac{e^{-i\omega \alpha} - e^{i\omega \alpha}}{-i \omega} \\
X(\omega) &= \frac{2i\sin(\omega \alpha)}{i \omega} \\
X(\omega) &= \frac{2\sin(\omega \alpha)}{\omega}
\end{align*}
\end{solution}
\begin{problem}
Find the frequency response of the impulse response $w(t)= e^{-\alpha \abs{t}}$.
\end{problem}
\begin{solution}
First break up the impulse response to $w(t) = \begin{cases} e^{-\alpha t} & t \geq 0 \\ e^{\alpha t} & t \lt 0 \end{cases}$
Now we can solve for $W(\omega)$.
\begin{align*}
W(\omega) &= \int_{-\infty}^{\infty}w(t) e^{-i\omega t} dt \\
W(\omega) &= \int_{-\infty}^{0}e^{\alpha t}e^{-i\omega t} dt + \int_{0}^{\infty} e^{-\alpha t}e^{-i\omega t} dt \\
W(\omega) &= \int_{-\infty}^{0}e^{t\left( \alpha-i \omega\right)} dt + \int_{0}^{\infty} e^{-t \left( i \omega + \alpha \right)} \\
W(\omega) &= \left. \frac{e^{t\lr{\alpha-i\omega}}}{\left( \alpha-i \omega\right)} \right|_{-\infty}^{0} - \left. \frac{e^{-t\lr{i\omega+\alpha}}}{-\left( i \omega + \alpha\right)} \right|_{0}^{\infty} \\
W(\omega) &= \frac{e^0 - 0}{\left( \alpha-i \omega\right)} - \frac{0 - 1}{\left(i \omega +\alpha\right)}\\
W(\omega) &= \frac{1}{\left( \alpha-i \omega\right)} + \frac{1}{\left(i \omega +\alpha\right)}\\
W(\omega) &= \frac{\left(i \omega +\alpha\right) + \left( \alpha-i \omega\right)}{\left( \alpha-i \omega\right)\left(i \omega +\alpha\right)}\\
W(\omega) &= \frac{2\alpha}{\left( \alpha^2 + \omega^2\right)}\\
\end{align*}
\end{solution}
\begin{problem}
Find the frequency response of the impulse signal $h(n) = u(n) - u(n-N)$
\end{problem}
\begin{solution}
\begin{align*}
H(\omega) &= \sum \limits_{n\in\Z} \left( u(n) - u(n-N) \right) e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n\in\Z_\oplus}e^{-i\omega n} - \sum \limits_{n=N}^{\infty} e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n\in\Z_\oplus \setminus \lr{N,\infty} }e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n=0}^{N-1}e^{-i\omega n} \\
H(\omega) &= \frac{ 1- e^{-i\omega N} }{1-e^{-i\omega}} \quad \mbox{since} \sum \limits_{n=k}^{N}\alpha^n = \frac{\alpha^{N+1}-\alpha^k}{\alpha - 1} \\
H(\omega) &= \frac{\lr{e^{-i\omega N/2}} \lr{e^{i\omega N/2} - e^{-i\omega N/2}} }{\lr{{e^{-i\omega/2}}}\lr{e^{i\omega / 2} - e^{-i\omega / 2}}} \quad \mbox{ factor out half of the power} \\
H(\omega) &= e^{-i\omega N/2+i\omega/2} \cdot \frac{2i\sin(\omega N/2)}{2i\sin(\omega/2)} \\
H(\omega) &= e^{-i\omega \lr{N-1}/2} \cdot \frac{\sin(\omega N/2)}{\sin(\omega/2)} \\
\end{align*}
The magnitude response for various values of $N$ are displayed:
%\centering
%\includegraphics{images/reviewGraphs/2.ps}
%\includegraphics{images/reviewGraphs/3.ps}
%\includegraphics{images/reviewGraphs/4.ps}
%\includegraphics{images/reviewGraphs/5.ps}
%\includegraphics[height=2.5in]{images/reviewGraphs/6.ps}
%\includegraphics[height=2.5in]{images/reviewGraphs/7.ps}
%\includegraphics[height=2.5in]{images/reviewGraphs/8.ps}
%\includegraphics[height=2.5in]{images/reviewGraphs/9.ps}
%\psset{xunit=1, yunit=1, algebraic, arrowscale=1.5}
\begin{center}
\begin{pspicture}(-3.5,-2)(3.5,3)
\psplot[algebraic,plotpoints=1000,linewidth=1.5pt]{-3.14}{3.14}{abs(sin(2*x/2) / sin(x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt, linestyle=dashed]{-3.14}{3.14}{abs(sin(2*x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(x/2))}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,2)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\rput(1.5, 2){$N=2$}
\end{pspicture}
\end{center}
\begin{center}
\begin{pspicture}(-3.5,-2)(3.5,3)
\psplot[algebraic,plotpoints=1000,linewidth=1.5pt]{-3.14}{3.14}{abs(sin(3*x/2) / sin(x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(3*x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(x/2))}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,3)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\rput(1.5, 3){$N=3$}
\end{pspicture}
\end{center}
\begin{center}
\begin{pspicture}(-3.5,-2)(3.5,5)
\psplot[algebraic,plotpoints=1000,linewidth=1.5pt]{-3.14}{3.14}{abs(sin(4*x/2) / sin(x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(4*x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(x/2))}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,4)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\rput(1.5, 4){$N=4$}
\end{pspicture}
\end{center}
\begin{center}
\begin{pspicture}(-3.5,-2)(3.5,5)
\psplot[algebraic,plotpoints=1000,linewidth=1.5pt]{-3.14}{3.14}{abs(sin(5*x/2) / sin(x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(5*x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(x/2))}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,5)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\rput(1.5, 5){$N=5$}
\end{pspicture}
\end{center}
\begin{center}
\begin{pspicture}(-3.5,-2)(3.5,5)
\slider{2}{10}{N}{$N$}{2}
\psplot[algebraic,plotpoints=1000,linewidth=1.5pt]{-3.14}{3.14}{abs(sin(N*x/2) / sin(x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(N*x/2))}
\psplot[algebraic,plotpoints=1000,linewidth=0.5pt,linestyle=dashed]{-3.14}{3.14}{abs(sin(x/2))}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-3.25,0)(3.25,5)
\rput(3.14, -0.35){$\pi$}
\rput(-3.14, -0.35){$-\pi$}
\rput(0, -0.35){$0$}
\end{pspicture}
\end{center}
\end{solution}
\begin{problem}
Given the difference formula $y(n) =\alpha y(n-1)=x(n)$, find the frequency response amd impulse reponse, and state whether the system is causal. Then determine the impulse response to $G(\omega) = \frac{H(\omega)}{e^{i\omega N}}$. Then find the difference equation for this system.
\end{problem}
\begin{solution}
By pattern recognition, we know the following
\begin{align*}
H(\omega) &= \frac{1}{1-\alpha e^{-i\omega}} \\
h(n) &= a^nu(n) \\
\end{align*}
Note that this system is casual since two arbitrary input signals with identical inputs up to a given time $N$ will produce identical outputs $\forall N \in \Z$. Lets continue to solve for $G(\omega)$:
\begin{align*}
G(\omega) &= \frac{H(\omega)}{e^{i\omega N}} \\
G(\omega) &= \frac{e^{-i\omega N}}{1-\alpha e^{-i\omega}} \\
\end{align*}
We can infer what the impulse reponse is since multiplication in the frequency domain corresponds to convolution in the time domain:
\begin{align*}
g(n) &= \delta(n-N) * \alpha^n u(n) \\
g(n) &= \alpha^{n-N} u(n-N)\\
g(n) &= h(n-N)\\
\end{align*}
To get the difference equation, we simply cross multiply the equation for $G(\omega)$:
\begin{align*}
G(\omega) &= \frac{e^{-i\omega N}}{1-\alpha e^{-i\omega}} \\
G(\omega)\lr{1-\alpha e^{-i\omega}} &= e^{-i\omega N} \\
G(\omega)e^{i\omega n}\lr{1-\alpha e^{-i\omega}} &= e^{-i\omega N}e^{i\omega n} \\
y(n)-\alpha G(\omega)e^{i\omega \lr{n-1}} &= e^{i\omega \lr{n-N}} \\
y(n)-\alpha y(n-1) &= x(n-N) \\
\end{align*}
\end{solution}
\begin{problem}
The filter represented by the following LCCDE is a low-pass filter. Find the LCCDE for a high-pass filter.
$$ y(n) = \sum \limits_{k=1}^{M}a_ky(n-k) + \sum \limits_{k=0}^{N}b_kx(n-k) $$
\end{problem}
\begin{solution}
\begin{align*}
y(n) &= \sum \limits_{k=1}^{M}a_ky(n-k) + \sum \limits_{k=0}^{N} b_k x(n-k) \\
L(\omega)e^{i\omega n} &= \sum \limits_{k=1}^{M} a_k L(\omega)e^{i\omega\lr{n-k}} + \sum \limits_{k=0}^{N} b_k e^{i \omega \lr{n-k}} \\
L(\omega)&= \sum \limits_{k=1}^{M}a_k L(\omega)e^{-i\omega k} + \sum \limits_{k=0}^{N} b_k e^{-i \omega k} \\
L(\omega)&= \frac{\sum \limits_{k=0}^{N} b_k e^{-i \omega k}}{1-\sum \limits_{k=1}^{M}a_k e^{-i\omega k}} \\
\end{align*}
Now to transform this from a low pass to a high pass, we simply shift the phase by $\pi$:
\begin{align*}
H(\omega) &= L(\omega + \pi) \\
H(\omega)&= \frac{\sum \limits_{k=0}^{N} b_k e^{-i \lr{\omega+\pi} k}}{1-\sum \limits_{k=1}^{M}a_k e^{-i\lr{\omega+\pi} k}} \\
H(\omega)&= \frac{\sum \limits_{k=0}^{N} b_k e^{-i \omega k}\lr{-1}^k }{1-\sum \limits_{k=1}^{M}a_k e^{-i\omega k}\lr{-1}^k} \\
H(\omega)&= \sum \limits_{k=1}^{M}a_k H(\omega) e^{-i\omega k}\lr{-1}^k + \sum \limits_{k=0}^{N} b_k e^{-i \omega k}\lr{-1}^k \\
y(n) &= \sum \limits_{k=1}^{M}\lr{-1}^k a_k y(n-k) + \sum \limits_{k=0}^{N} \lr{-1}^k b_k x(n-k) \\
\end{align*}
\end{solution}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 9}
\subsection{Memory in Systems}
A system is memoryless, according to our definition, if $y(t) = f(x(t))$. If $y(t) = f(t,x(t))$, then the system has memory. Memoryless systems are \emph{instantaneous systems}
\begin{example}
Consider an AM Modulator defined by $y(t) = \cos(\omega_0 t)x(t)$. This system has memory because it must keep track of time to know what the value of $\cos(\omega_0 t)$ is.
\end{example}
\begin{example}
Consider a resistor where the voltage across the circuit is given by $y(t) = Rx(t)$. This system is memoryless. If the voltage was given by $y(t) = R(t)x(t)$, then the system has memory.
\end{example}
If we consider an LTI system, what must be true of $h(n)$ for the system to be memoryless? Let's look at an output signal $y(n)$, whose value is the convolution of an arbitrary input $x(n)$ and $h(n)$.
\begin{align*}
y(n) &= \sum \limits_{k=0}^{\infty} h(n)x(k-n) \\
y(n) &= \cdots + h(-1)x(n+1) + h(0)x(n) + h(1)x(n-1) + \cdots \\
\end{align*}
Note that in order to have an instantaneous system without memory, the only term should be $h(0)x(n)$. So
$$y(n) = \sum \limits_{\ell=0}^{\infty} h(n)x(\ell-n) = kx(n)$$
where $k \in Z$. Therefore, $h(n) =k\delta(n)$ in order for the system to be memoryless.
Let $M$ be the statement ``the system is memoryless''.
Let $C$ be the statement ``the system is causal''.
Is every memoryless system causal? In other words, is the following statement a tautology?
$$M \Rightarrow C$$
Since we define a memoryless system as a system whose output can be given by $y(t) = f(x(t))$, then we can prove this by using the definition of causality. Let $y_1(t) = f(x_1(t))$ be a memoryless system, and let $y_2(t) = f(x_2(t))$ be a memoryless system. It is clear that if $x_1(t) = x_2(t)$ $\forall t \leq T$, then $y_1(t) = y_2(t)$ $\forall t \leq T$, therefore, every memoryless system is causal, and the statement $M \Rightarrow C$ is a tautology.
Let $M$ be the statement ``the system is memoryless''.
Let $TI$ be the statement ``the system is time-invariant''.
Is every memoryless system time-invariant? In other words, is the following statement a tautology?
$$M \Rightarrow TI$$
We can also prove this with the definition of time-invariance. Let $y(t) = f(x(t))$, and let $\hat{x}(t) = x(t-T)$. Then we have $\hat{y}(t) = f(\hat{x}(t)) = f(x(t-T)) = y(t-T)$. Therefore, every memoryless system is time-invariant, and the statement $M \Rightarrow TI$ is a tautology.
Note that other textbooks say that this system is time-invariant: $y(t) = x(t)g(t)$. But note that $\hat{x}(t) = x(t-T) \Rightarrow \hat{y}(t) = \hat{x}(t)g(t) \neq y(t-T)$. Therefore, the system is not time-invariant. We can also deduce that the system is not memoryless since $\lnot TI \Rightarrow \lnot M$ is always true.
Given that a system is memoryless and $x(t_0) = x(t_1)$, we can deduce that $y(t_0) = y(t_1)$. In other words, $M \wedge \left( x(t_0)=x(t_1) \right) \Rightarrow y(t_0) = y(t_1)$.
To disprove memorylessness of a system, you can simply find a known memoryless signal and compare the signals where they have identical inputs at some time $t_0$. If $x_1(t_0) = x_2(t_0)$, then $y_1(t_0) \neq y_2(t_0) \Rightarrow$ the system has memory.
\subsection{BIBO Stability}
BIBO stability stands for \emph{Bounded Input Bounded Output}. For a system that is BIBO stable, the response $y$ is bounded for every bounded input. The system $x \to \fbox{F} \to y$ is BIBO stable $\iff$ $\forall x \in X $ such that $x$ is bounded, $y$ is bounded such that $y$ is the response to $x$.
$$\mbox{The system } x \to \fbox{F} \to y \mbox{ is BIBO stable}$$
$$\iff$$
$$\abs{x(t)} \lt B_x \Rightarrow \exists B_y \gt 0 \mbox{ such that } \abs{y(t)} \l B_y \mbox{ }\forall t$$
\begin{example}
Is $y(n)=e^{\alpha x(n)}$ $\forall n $ for some $\alpha \in \R$ BIBO stable?
You can start with the assumption that $\abs{x(n)} \lt B_x$, then
\begin{align*}
y(n) &= e^{\alpha x(n)} \\
\abs{y(n)} &= \abs{e^{\alpha x(n)}} \\
\abs{y(n)} &= \abs{e^{\alpha x(n)}} \lt \abs{e^{\alpha B_x}} = B_y \\
\abs{y(n)} &\lt B_y
\end{align*}
Note that without loss of generality, $\alpha \gt 0$, since $-B_x \lt x(n) \lt B_x$ implies that $e^{\alpha B_x}$ and $e^{-\alpha B_x}$ are bounded the same. To see this you can look at the output values, $e^{-\alpha B_x} \lt y(n) \lt e^{\alpha B_x}$, which can be simplified to $\abs{y(n)} \lt e^{\alpha B_x}$.
\end{example}
As long as you find a value that trumps in both the positive and negative range, then you have found a bound. The question will always involve whether or not you can find a bounded output given a bounded input.
\subsection{LTI Systems and BIBO Stability}
Given an LTI system, the system is BIBO stable if and only if the impulse response is absolutely summable or absolutely integrable.
Let $LTI$ be the statement ``$H$ is an LTI system''.
Let $BIBO$ be the statement ``$H$ is BIBO stable''.
Then the following statement is a tautology:
$$ LTI \wedge BIBO$$
$$ \iff $$
$$\lr{ \sum \limits_{n\in\Z}\abs{h(n)} \lt \infty} \vee \lr{\int_{-\infty}^{\infty}\abs{h(t)}dt \lt \infty } $$
Here is the proof of the forward direction using summations:
\begin{proof}
First we are going to show $\sum \abs{h(n)} \lt \infty \Rightarrow BIBO$. Assume $\sum \limits_{n \in \Z} \abs{h(n)} \lt \infty$:
\begin{align*}
y(n) &= \sum \limits_{k\in\Z}h(k)x(n-k) \\
\abs{y(n)} &= \abs{\sum \limits_{k\in\Z}h(k)x(n-k)} \\
\abs{y(n)} &\leq \sum \limits_{k\in\Z}\abs{h(k)x(n-k)} \\
\abs{y(n)} &\leq \sum \limits_{k\in\Z}\abs{h(k)}\abs{x(n-k)} \\
\end{align*}
To make the input bounded, let $\abs{x(n)} \lt B_x$ $\forall n \in \Z$. Then
\begin{align*}
\abs{y(n)} &\leq \sum \limits_{k\in\Z}\abs{h(k)}B_x \\
\abs{y(n)} &\leq B_x\sum \limits_{k\in\Z}\abs{h(k)} \\
\abs{y(n)} &\leq B_x\cdot k \quad \mbox{where } k = \sum \abs{h(n)} \lt \infty \\
\abs{y(n)} &\leq B_y \\
\end{align*}
So given that an impulse response of a system is absolutely summable, when passed a bounded input signal, the system will always produce a bounded output signal. Therefore, any system whose impulse response is absolutely summable is BIBO stable.
\end{example}
\begin{example}
Given the system defined by $y(n) - \alpha y(n-1) = x(n)$, with zero initial conditions to make it an LTI system, for what values of $\alpha$ can this system be BIBO stable? \\
Solve this system in the forward direction $y(n) = \alpha y(n-1) + x(n)$:
\begin{align*}
y(n) &= x(n) + \alpha y(n-1) \\
y(n) &= x(n) + \alpha \lr{x(n-1) + \alpha y(n-2)} \\
y(n) &= x(n) + \alpha x(n-1) + \alpha^2 y(n-2) \\
y(n) &= x(n) + \alpha x(n-1) + \alpha^2\lr{x(n-2) + \alpha y(n-3)} \\
y(n) &= x(n) + \alpha x(n-1) + \alpha^2 x(n-2) + \alpha^3 y(n-3) \\
y(n) &= x(n) + \alpha x(n-1) + \alpha^2 x(n-2) + \alpha^3 x(n-3) + \cdots \\
y(n) &= \sum \limits_{k \in \Z_\oplus} \alpha^k x(n-k)
\end{align*}
This shows us that $h(n) = \begin{cases} 0 & n\lt0 \\ \alpha^n & n\geq 0 \end{cases} $, or $h(n) = \alpha^n u(n)$. Now we can determine which values for which the system is BIBO stable. We can look at the absolute sum of the impulse response $h(n)$:
$$ \sum \limits_{k \in \Z} \abs{h(n)} = \sum \limits_{k \in \Z_\oplus} \abs{\alpha^k} = \frac{1}{1-\abs{\alpha}} $$
But that last equality is true if and only if $\abs{\alpha} \lt 1$, thus our range of values for which the system is BIBO stable is $\alpha \in (-1,1)$.
\end{example}
It is important to note that you cannot assume that $H(\omega)$ exists to show that a system is BIBO stable, because that presumes BIBO stability.
So far, we showed that absolute summability or absolute integrability is a sufficient condition for BIBO stability. Lets show that it is also necessary. We want to show that BIBO stability implies absolute summability or absolute integrability, however, proving the contrapositive is much easier (we will show for absolute summability):
$$ \lnot \lr{\sum \limits_{n \in \Z} \abs{h(n)} \lt 0} \Rightarrow \lnot BIBO $$
Note the meaning of a system not being BIBO stable means that $\exists x \in X$ such that $\abs{x(n)} \lt B_x$ for some $B_x \lt \infty$ that produces an output signal that is unbounded. In other words, it produces a $y(n)$ such that $\abs{y(n)} = \infty$ for some $n$.
\begin{proof}
Let $h(n)$ be an impulse response to a system such that $\sum \abs{h(n)} = \infty$. Let $\hat{x}(n)$ be an input signal defined by
$$\hat{x}(n) = \begin{cases} \frac{h(n)}{\abs{h(n)}} & h(n) \neq 0 \\ 0 & \mbox{otherwise} \end{cases}$$
Let $x(n) = \hat{x}(-n)$.
$$x(n) = \begin{cases} \frac{h(-n)}{\abs{h(-n)}} & h(-n) \neq 0 \\ 0 & \mbox{otherwise} \end{cases}$$
Then we have our output signal defined by $y(n) = \sum_{k \in \Z}h(k)x(n-k)$. If we look at $y(0)$, we have
$$y(0) = \sum_{k \in Z}h(k)x(-k)$$
We can use this to show that we will have an unbounded output for any input.
\begin{align*}
y(0) &= \sum \limits_{k \in Z}h(k)x(-k) \\
y(0) &= \sum \limits_{k \in Z}h(k)\frac{h(k)}{\abs{h(k)}} \\
y(0) &= \sum \limits_{k \in Z}\frac{\lr{h(k)}^2}{\abs{h(k)}} \\
y(0) &= \sum \limits_{k \in Z}\frac{\abs{h(k)}\abs{h(k)}}{\abs{h(k)}} \\
y(0) &= \sum \limits_{k \in Z}\abs{h(k)} = \infty \quad \mbox{(given)}\\
\end{align*}
Therefore, if a system's impulse response is not absolutely summable, the system cannot produce a bounded output when given a bounded input.
\end{proof}
What if $h(n):\Z \to \C$? Then we could have proved the same above by letting $\hat{x}(n) = \frac{h^*(n)}{\abs{h^*(n)}} = \frac{h^*(n)}{\abs{h(n)}}$. This will also "blow up".
\subsection{Implications of BIBO for Frequency Response}
Recall that the definition of a frequency response $H(\omega)$ is given by the summation (in DT):
$$ H(\omega) = \sum \limits_{n \in \Z}h(n)e^{-i\omega n} $$
Notice that if we take the absolute value of the terms in the sum, we have
$$\sum \limits_{n \in \Z}\abs{h(n)e^{-i\omega n}} = \sum \limits_{n \in \Z}\abs{h(n)} $$
Therefore, if $\sum \abs{h(n)}$ is divergent, then the frequency response has issues.
\begin{enumerate}
\item $\sum \abs{h(n)} \lt \infty \Rightarrow H(\omega) $ is nice and smooth.
\item $\sum \abs{h(n)} = \infty \wedge \sum \abs{h(n)}^2 \lt \infty \Rightarrow H(\omega) $ is bounded but will have discontinuities. Note that $\sum \abs{h(n)}^2$ represents the energy of the system, thus $\sum \abs{h(n)}^2 \lt \infty$ means that the system has finite energy.
\item $\sum \abs{h(n)} = \infty \wedge \sum \abs{h(n)}^2 = \infty \Rightarrow$ "all bets are off". Either $H(\omega)$ has impulses, or doesn't exist. The system may not have a frequency response.
\end{enumerate}
Let $BIBO$ be the statement that the system produces a bounded output for every bounded input.
Let $FE$ be the statement that the system has finite energy ($\sum \abs{h(n)}^2 \lt \infty$)
Let $F$ be the statement that the system has a frequency response.
Then we can sum up the above statements regarding frequency response:
\begin{align*}
BIBO &\Rightarrow F \\
\lnot BIBO \wedge FE &\Rightarrow F \\
\lnot BIBO \wedge \lnot FE &\Rightarrow F \vee \lnot F \\
\end{align*}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 10}
\subsection{Fourier Analysis}
\subsubsection{Basics and Overview}
Fourier analysis is a tool we use to break a signal into linear combinations of its constituent complex exponential frequencies. Here is a list of acronyms to get familiar with:
\begin{center}
$
\begin{tabular}{|c|c|}
\hline
DFS & Discrete Fourier Series \\
\hline
DFT (FFT) & Discrete Fourier Transform (a.k.a. Fast Fourier Transform) \\
\hline
FS & Fourier Series \\
\hline
DTFT & Discrete Time Fourier Transform \\
\hline
CTFT & Continuous Time Fourier Transform \\
\hline
\end{tabular}
$
\end{center}
Here is a list of acronyms in virtue the type of signal being analyzed:
\begin{center}
$
\begin{tabular}{|c|c|c|}
\hline
& DT & CT \\
\hline
\hline
periodic & DFS, DFT (a.k.a FFT) & FS \\
\hline
aperiodic & DTFT & CTFT \\
\hline
\end{tabular}
$
\end{center}
\subsubsection{Signals as Vectors}
We will also introduce a new representation of signals in the form of vectors. Consider the vector given by $\vec{x} = \left[ \begin{array}{r} 4 \\ 2 \\ \end{array} \right]$. This vector corresponds to a periodic signal $4\delta(n) + 2\delta(n-1)$. Notice that we can link a Finite Duration signal to a periodic signal by letting the entire duration be one period, and decompose a periodic signal conversely.
Fourier analysis allows us to decompose signals into sums of orthogonal vectors. This signal $\vec{x}$ can also be written as the sum of the two vectors $\Psi_0 = \left[ \begin{array}{r} 1 \\ 0 \\ \end{array} \right]$, and $\Psi_1 = \left[ \begin{array}{r} 0 \\ 1 \\ \end{array} \right] $. Then we can see that $\vec{x} = 4\Psi_0 + 2\Psi_1$.
What if $\Psi_0 = \left[ \begin{array}{r} 1 \\ 1 \\ \end{array} \right]$ and $\Psi_1 = \left[ \begin{array}{r} 1 \\ -1 \\ \end{array} \right]$? \\
\begin{align*}
x(n) &= X_0\Psi(n) + X_1\Psi_1(n) \\
\left[ \begin{array}{r} x(0) \\ x(1) \\ \end{array} \right] &= X_0\left[ \begin{array}{r} \Psi_0(0) \\ \Psi_0(1) \\ \end{array} \right] + X_1 \left[ \begin{array}{r} \Psi_1(0) \\ \Psi_1(1) \\ \end{array} \right] \\
\left[ \begin{array}{r} 4 \\ 2 \\ \end{array} \right] &= 3\left[ \begin{array}{r} 1 \\ 1 \\ \end{array} \right] + 1 \left[ \begin{array}{r} 1 \\ -1 \\ \end{array} \right] \\
\end{align*}
In general, we could have used any orthogonal vectors, but the vectors $\{ \left[ \begin{array}{c} 1 \\ 1 \\ \end{array} \right],\left[ \begin{array}{c} 1 \\ -1 \\ \end{array} \right] \}$ are special.
\begin{align*}
\Psi_0 &= \left[ \begin{array}{c} \Psi_0(0) \\ \Psi_0(1) \\ \end{array} \right] = \left[ \begin{array}{c} 1 \\ 1 \\ \end{array} \right] \\
\Psi_1 &= \left[ \begin{array}{c} \Psi_1(0) \\ \Psi_1(1) \\ \end{array} \right] = \left[ \begin{array}{c} 1 \\ -1 \\ \end{array} \right] \\
\end{align*}
Where $\Psi_k(n) = e^{i k \omega_0 n}$. So when we use the basis $\{ \left[ \begin{array}{c} 1 \\ 1 \\ \end{array} \right],\left[ \begin{array}{c} 1 \\ -1 \\ \end{array} \right] \}$, we can decompose any DT signal of period 2 into linear combinations of $\Psi_0(n) = 1$ and $\Psi_1(n) = \lr{-1}^n$ in the Discrete Fourier Series expansion $X_0e^{i 0 \omega_0 n} + X_1e^{i\omega_0 n}$.
Let $\Psi = \{ \Psi_0, \Psi_1, \cdots , \Psi_{p-1} \}$ be an orthogonal basis. Then we can use the discrete fourier series exansion to decompose a signal $\vec{x}$ by projecting the signal onto the basis vectors, or writing the signal as a linear combination of the basis vectors:
$$\vec{x} = X_0\Psi_0 + X_1\Psi_1 + \cdots + X_{p-1}\Psi_{p-1}$$
In general the discrete fourier series expansion is
$$ \sum \limits_{k=0}^{p-1}X_ke^{ik\omega_0n} = \sum \limits_{k=0}^{p-1}X_k\Psi_k $$
We can determine the coefficients by the fact that the basis is orthogonal. We can take the dot product over the norm squared to find the cofficients. Given that $\vec{x} = \left[ \begin{array}{c} x_0 \\ x_1 \\ \vdots \\ x_{p-1} \\ \end{array} \right]$, we can write this as a projection onto the orthogonal basis:
$$\vec{x} = \frac{\vec{x} \cdot \Psi_0}{\norm{\Psi_0}^2}\Psi_0 + \frac{\vec{x} \cdot \Psi_1}{\norm{\Psi_1}^2}\Psi_1 + \cdots + \frac{\vec{x} \cdot \Psi_{p-1}}{\norm{\Psi_{p-1}}^2}\Psi_{p-1} $$
The dot product is denoted as $a \cdot b$, which can also be written as
$$a^Tb = a_0b_0 + a_1b_1 + \cdots + a_{p-1}b_{p-1} = \sum_{\ell=0}^{p-1}a_\ell b_\ell = a \cdot b$$
Here we can prove that the coefficients of the projection formula are equal to the dot product of the original signal and the respective basis vector or the norm of that vector squared:
\begin{proof}
We can write the signal as a linear combination of some orthogonal basis $\Psi = \{ \Psi_0, \Psi_1, \cdots , \Psi_{p-1} \}$.
\begin{align*}
\vec{x} &= X_0\Psi_0 + X_1\Psi_1 + \cdots + X_k\Psi_k + \cdots + X_{p-1}\Psi_{p-1} \\
\vec{x}\cdot\Psi_k &= X_0\Psi_0\cdot\Psi_k + X_1\Psi_1\cdot\Psi_k + \cdots + X_k\Psi_k\cdot\Psi_k + \cdots + X_{p-1}\Psi_{p-1}\cdot\Psi_k \\
\vec{x}\cdot\Psi_k &= X_k\Psi_k\cdot\Psi_k \quad \mbox{since the dot product of any orthogonal vectors is zero} \\
\frac{\vec{x}\cdot\Psi_k}{\Psi_k\cdot\Psi_k} &= X_k \\
\frac{\vec{x}\cdot\Psi_k}{\norm{\Psi_k}^2} &= X_k \\
\end{align*}
\end{proof}
When writing a signal as a linear combination of an orthogonal basis, we are writing it as a sum of complex exponentials where $\Psi_k(n) = e^{ik\omega_0 n}$ some frequency component of the signal, $\omega_0 = 2\pi/p$ is the fundamental frequency and $p$ is the period of the signal.
If we have a period of 2, note that $\Psi_k(n+2) = e^{i\omega_0\lr{n+2}} = e^{i\omega_0 n}e^{i2\omega_0} = e^{i\omega_0 n}e^{i2\pi} = e^{i\omega_0 n} = \Psi_k(n)$. In general, for a period $p$, $\Psi_k(n+p) = \Psi_k(n)$.
How do we project complex-valued vectors onto each other? We cannot use the dot product for projections involving the dot product. For example, $\left[ \begin{array}{r} 1 \\ i \\ \end{array} \right] \cdot \left[ \begin{array}{r} 1 \\ i \\ \end{array} \right] = 0 \neq 2 = \norm{\left[ \begin{array}{r} 1 \\ i \\ \end{array} \right]}^2$. Say hello to the \emph{inner product}. Now we can define $\langle f,g\rangle = f^Tg^*$. This way we take the conjugate of the second term in the inner product. Now we can accomplish projections using the formula:
$$ X_k = \frac{\langle \vec{x},\Psi_k \rangle}{\langle\Psi_k,\Psi_k\rangle} $$
\subsubsection{Harmonic Relations and Frequency Content}
Fourier analysis keeps our attention focused on frequency content, the $\omega_0$ in $e^{i\omega_0 n}$. Note that $\omega_0 = \frac{w\pi}{p}$, where $p$ is the \emph{period}, and $\omega_0$ is the \emph{fundamental frequency}. Every signal can be decomposed into linear combinations of harmonically related complex exponentials. Harmonically related simply means each has a frequency that is an integer multiple of the fundamental frequency. Assume for now that this decomposition can be done. Then we can sum over any continguous interval of length $p$ and a signal can be written as
$$ x(n) = \sum \limits_{k=0}^{p-1}X_ke^{i k \omega_0 n} $$
Lets compare this with the decomposition of a signal as shifted impulses:
$$ x(n) = \sum \limits_{m\in\Z} x(m) \delta(n-m) $$
When we pass this signal through a system $h$, we get the convolution sum. Lets derive this through the properties of a linear time invariant system $h$:
\begin{align*}
\delta(n) \to &\fbox{h} \to h(n) \\
\delta(n-m) \to &\fbox{h} \to h(n-m) \\
x(m)\delta(n-m) \to &\fbox{h} \to x(m)h(n-m) \\
\sum_m (m)\delta(n-m) \to &\fbox{h} \to \sum_m x(m)h(n-m) \\
\end{align*}
We know what happens when we pass a complex exponential into an LTI system:
\begin{align*}
e^{i\omega_0n} \to & \fbox{H} \to H(\omega_0)e^{i\omega_0n} \\
e^{ik\omega_0 n}\to & \fbox{H} \to H(k\omega_0)e^{ik\omega_0 n} \\
X_ke^{ik\omega_0 n}\to & \fbox{H} \to X_kH(k\omega_0)e^{ik\omega_0 n} \\
\sum_k e^{ik\omega_0 n}\to & \fbox{H} \to \sum_k X_kH(k\omega_0)e^{ik\omega_0 n} \\
\end{align*}
So we can generalize this for a decomposed signal $x(n) = \sum_{k=0}^{p-1}X_ke^{i k \omega_0 n}$:
$$ x(n) = \sum \limits_{k=0}^{p-1}X_ke^{i k \omega_0 n} \to \fbox{H} \to \sum \limits_{k=0}^{p-1}X_kH(k \omega_0)e^{i k \omega_0 n} $$
\begin{claim}
Given a discrete fourier series expansion of a signal $x(n) = \sum_{k=0}^{p-1}X_ke^{i k \omega_0 n}$, the signal convolved with a signal $h$ can be given by:
$$(x*h)(n) = \sum \limits_{k=0}^{p-1}X_kH(k \omega_0)e^{i k \omega_0 n} $$
\end{claim}
\begin{proof}
\begin{align*}
(x*h)(n) &= \sum \limits_{k=-\infty}^{\infty} h(k) x(n-k) \\
(x*h)(n) &= \sum \limits_{k=-\infty}^{\infty} h(k) \sum_{\ell=0}^{p-1}X_\ell e^{i \ell \omega_0 (n-k)} \\
(x*h)(n) &= \sum \limits_{k=-\infty}^{\infty} h(k) \sum_{\ell=0}^{p-1}X_\ell e^{i \ell \omega_0 n}e^{-i \ell \omega_0k} \\
(x*h)(n) &= \sum_{\ell=0}^{p-1} \sum \limits_{k=-\infty}^{\infty} h(k) X_\ell e^{i \ell \omega_0 n}e^{-i \ell \omega_0k} \\
(x*h)(n) &= \sum_{\ell=0}^{p-1} X_\ell \lr{\sum \limits_{k=-\infty}^{\infty} h(k) e^{-i \ell \omega_0k}} e^{i \ell \omega_0 n}\\
(x*h)(n) &= \sum_{\ell=0}^{p-1} X_\ell H(\ell\omega_0) e^{i \ell \omega_0 n}\\
\end{align*}
\end{proof}
Hence, we have the description of what happens to a signal when passed through a system. This mathematically describes what an audio equalizer does when we pass in a signal described by its constituent frequencies.
\subsubsection{Periodicity}
What frequencies can be present in a signal $\vec{x}$? The harmonics are given by a signal with period $p$ are $\{0,\omega_0,2\omega_0,\dots,(p-1)\omega_0\}$. Notice that $p\omega_0$ is missing. This is because $e^{ip\omega_0n}=e^{i2\pi n} = e^{i0n}$. Keep in mind that $\omega_0 = 2\pi/p$. In fact, the complex exponential basis of vectors is periodic with respect to $n$ and their indices $k$.
\begin{align*}
\Psi_k(n+p) &= e^{ik\omega_0(n+p)}=e^{ik\omega_0n}e^{ik\omega_0p}=e^{ik\omega_0n} = \Psi_k(n) \\
\Psi_{k+p}(n) &= e^{i(k+p)\omega_0n} = e^{ik\omega_0n}e^{ip\omega_0n} = \Psi_k(n)
\end{align*}
So we know that $\Psi_{-1}(n) = e^{-i\omega_0n} = \Psi_{p-1}(n) = e^{i(p-1)\omega_0n }$. So we can see that in general, we can write out a discrete fourier series expansion over any continuous set of $p$ integers. The notation used is
$$ x(n) = \sum \limits_{k=\langle p\rangle} X_ke^{ik\omega_0n}$$
\subsubsection{Discrete Fourier Series}
\begin{claim}
For a signal written in terms of vectors that represent complex exponentials,
$$x(n) = \sum_{k=\langle p\rangle}X_k\Psi_k$$
the spanning set that makes up the set of vectors is an orthogonal set, in other words,
$$ \Psi_k \perp \Psi_\ell \quad k \neq \ell$$
\end{claim}
\begin{proof}
For $\Psi_k(n) = e^{ik\omega_0}$, and $\Psi_\ell = e^{i\ell\omega_0n}$, we can write
$\Psi_k =
\left[
\begin{array}{c}
\Psi_k(0) \\
\Psi_k(1) \\
\vdots \\
\Psi_k(p-1) \\
\end{array}
\right]
$, and
$\Psi_\ell =
\left[
\begin{array}{c}
\Psi_\ell(0) \\
\Psi_\ell(1) \\
\vdots \\
\Psi_\ell(p-1) \\
\end{array}
\right]
$.
Note that for an aperiodic signal, the inner product can be written as $\langle \Psi_k,\Psi_\ell\rangle = \sum \limits_{n=-\infty}^{\infty}\Psi_k(n)\Psi_\ell^*(n) $ or $\int_{-\infty}^{\infty}\Psi_k(t)\Psi_\ell^*(t)dt$. For this proof, we will use the summation for a periodic signal. We can take the inner product
\begin{align*}
\langle \Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1}\Psi_k(n)\Psi_\ell^*(n) \\
\langle \Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} e^{ik\omega_0n}e^{-i\ell\omega_0n} \\
\langle \Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} e^{i(k-\ell)\omega_0n} \\
\langle \Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} \lr{e^{i(k-\ell)\omega_0}}^n \\
\end{align*}
Before we arrive at our answer, lets take a slight mathematical detour. Recall that we can write a sum $S$ as
\begin{align*}
S &= \sum \limits_{k=0}^{\infty}\alpha^k \\
S &= 1 + \alpha + \alpha^2 + \cdots \\
\alpha S &= \alpha + \alpha^2 + \alpha^3 + \cdots \\
(1-\alpha)S &= 1 \\
S &= \frac{1}{1-\alpha} \quad \mbox {if } \alpha \neq 1
\end{align*}
or for a finite sum
\begin{align*}
S &= \sum \limits_{n=A}^{B}\alpha^n \\
S &= \alpha^A + \alpha^{A+1} + \cdots + \alpha^B \\
\alpha S &= \alpha^{A+1} + \alpha^{A+2} + \cdots + \alpha^{B+1} \\
(1-\alpha)S &= \alpha^{B+1} - \alpha^A \\
S &= \frac{\alpha^{B+1}-\alpha^{A}}{1-\alpha} \quad \mbox{if }\alpha \neq 1 \\
S &= \begin{cases}
\frac{\alpha^{B+1}-\alpha^{A}}{1-\alpha} & \alpha\neq1 \\
B-A+1 & \alpha=1 \\
\end{cases}
\end{align*}
Now back to our proof:
\begin{align*}
\langle \Psi_k,\Psi_\ell\rangle &= \sum \limits_{n=0}^{p-1} \lr{e^{i(k-\ell)\omega_0}}^n \\
\langle \Psi_k,\Psi_\ell\rangle &= \begin{cases} \frac{\lr{e^{i(k-\ell)\omega_0}}^p-\lr{e^{i(k-\ell)\omega_0}}^0}{1-e^{i(k-\ell)\omega_0}} & k\neq\ell \\ (p-1)-0+1 & k=l \\ \end{cases} \\
\langle \Psi_k,\Psi_\ell\rangle &= \begin{cases} 0 & k\neq\ell\\ p & k=l \\ \end{cases}
\end{align*}
\end{proof}
We can generalize the result of the above proof further as
$$ \langle \Psi_k,\Psi_\ell\rangle = p\delta(k-\ell) $$
This works well because the inner product of a vector with itself as the \emph{norm} squared, and the inner product with any other orthogonal vector is 0.
$$\langle \Psi_k,\Psi_k\rangle = \norm{\Psi_k}^2 = p$$
A few properties about the inner product:
\begin{align*}
\langle f,g\rangle &= f^Tg^* \\
\langle \alpha f,g\rangle &= \alpha\langle f,g\rangle \\
\langle f,\beta g\rangle &= \beta^* \langle f,g\rangle \\
\langle \sum_k \alpha_k f_k, g\rangle &= \sum_k \alpha_k\langle f_k,g\rangle \\
\end{align*}
So to find $X_\ell$, we can project $\vec{x}$ onto $\Psi_\ell$.
\begin{align*}
\langle \vec{x}, \Psi_\ell\rangle &= \langle X_0\Psi_0 + \cdots + X_\ell\Psi_\ell + \cdots + X_{p-1}\Psi_{p-1}, \Psi_\ell\rangle \\
\langle \vec{x}, \Psi_\ell\rangle &= X_0\langle \Psi_0,\Psi_\ell\rangle + \cdots + X_\ell\langle \Psi_\ell,\Psi_\ell\rangle + \cdots + X_{p-1}\langle \Psi_{p-1},\Psi_\ell\rangle \\
\langle \vec{x}, \Psi_\ell\rangle &= X_\ell\langle \Psi_\ell,\Psi_\ell\rangle \quad \mbox{(all other inner products were 0)}\\
X_\ell &= \frac{\langle \vec{x}, \Psi_\ell\rangle }{\langle \Psi_\ell,\Psi_\ell\rangle }\\
X_\ell &= \frac{\langle \vec{x}, \Psi_\ell\rangle }{p}\\
X_\ell &= \frac{\vec{x}^T\Psi_\ell^* }{p}\\
X_\ell &= \frac{1}{p}\sum \limits_{n=0}^{p-1}x(n)\Psi_\ell^*(n) \\
X_\ell &= \frac{1}{p}\sum \limits_{n=0}^{p-1}x(n)e^{-i\ell\omega_0n} \\
\end{align*}
With the discrete fourier series, we can write signals in two distinct forms. We can write a signal in the time domain as a series of complex exponentials which can tell us what happens in the frequency domain:
\begin{nicebox}
\begin{align*}
x(n) &= \sum \limits_{k=\langle p\rangle }X_ke^{ik\omega_0n} \quad &\mbox{(synthesis equation)}
\end{align*}
\end{nicebox}
The coefficient in the frequency domain tells us the amount that a certain frequency contributes to the overall signal, which is given by the running sum of the product of the signal in the time domain and the frequency of interest:
\begin{nicebox}
\begin{align*}
X_k &= \frac{1}{p} \sum \limits_{n=\langle p\rangle }x(n)e^{-ik\omega_0n} \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
\begin{example}
Find the period and DFS expansion of $x(n) = \sin(2\pi n/3)$.
To find the period, simply set the inside of the sine function to $2\pi$:
\begin{align*}
\frac{2\pi p}{3} &= 2\pi \\
p &= 3
\end{align*}
Here is a cool method for solving. Luckily, the sine is easily written as the sum of complex exponentials:
\begin{align*}
x(n) &= \sin(2\pi n/3) \\
x(n) &= \frac{e^{i2\pi n/3}-e^{-i2\pi n/3}}{2i} \\
x(n) &= \frac{-1}{2i}e^{-i2\pi n/3} + 0 \cdot e^{i0n} + \frac{1}{2i} e^{i2\pi n/3} \quad \mbox{since } p = 3
\end{align*}
So quickly we can see that we have $e^{-i2\pi n/3}, e^{i0n}, e^{i2\pi n/3}$. This tells us we have $X_{-1} = \frac{-1}{2i}, X_0 = 0, X_1 = \frac{1}{2i}$.
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 11}
\subsection{Spectral Coefficients}
Recall that a signal $x:\Z \to \C$ is periodic with fundamental period $p \in \N$ such that
$$x(n+p) = x(n) \mbox{ } \forall n \in \Z$$
We can decompose a signal $x$ into a linear combination of complex exponentials, where $\omega_0$ is the fundamental frequency, given by
$$ \omega_0 = \frac{2\pi}{p}$$
The signal $x$ can be written in terms of the spectral coefficients as weights on complex exponentials containing harmonics of the fundamental frequency $\omega_0$
$$ x(n) = \sum \limits_{k=\langle p \rangle}X_k\Psi_k(n) = \sum \limits_{k=\langle p \rangle}X_ke^{ik\omega_0 n} \quad \mbox{(synthesis equation)} $$
Note that in reality, $x(n) \sim \frac{1}{p} \sum X_ke^{ik\omega_0 n}$, but the energy difference is the same, so in practice, we can use the equals sign. To learn more about this, research Dirichlet conditions and Gibb's phenomenon.
We can determine the coefficients with the analysis equation:
$$ X_k = \frac{1}{p} \sum \limits_{k=\langle p \rangle}x(n)e^{-ik\omega_0 n} \quad \mbox{(analysis equation)} $$
\begin{example}
Given $x(n) = sin(2\pi/3)$ with fundamental period 3, why can't $e^{i\frac{\omega_0}{2}n}$ be a term in $x(n)$?
Assume there exists such a frequency and that it contibutes to the signal.
\begin{align*}
e^{i\frac{\omega_0}{2}n} &= e^{i\frac{2\pi}{2p}n} \\
e^{i\frac{\omega_0}{2}n} &= e^{i\frac{2\pi}{6}n} \\
\Rightarrow p &= 6
\end{align*}
By assuming the frequency exists, we determined the period and arrived at a contradiction. We know that in general $p = 2\pi/\omega_0$ must be an integer, but this is not recommended as a proof.
\end{example}
\begin{example}
Is $x(n) = \cos(n)$ a periodic signal?
$\cos(n + 2\pi) = \cos(n)$ and $p = 2\pi \notin \Z$. Therefore this cosine function is \emph{quasi-periodic}, but not periodic.
\end{example}
\begin{example}
if $x(n) = \cos(\omega_0 n)$, what must $\omega_0$ be for this signal to be periodic?
$$x(n+p) = \cos(\omega n + \omega p) $$
This implies that $\omega p = 2\pi k$ for $k \in \Z$. Then $\omega_0 = \frac{2\pi k}{p}$. Where $2k/p \in \Q$.
\end{example}
Note that for discrete-time only the trigonometric functions must have a fundamental frequency that is a rational multiple of $\pi$. For continuous-time this doesn't matter.
\begin{example}
Given a signal that has a Kronecker delta every $p$ units, determine the harmonics and spectral coefficients.
\begin{center}
\includegraphics{images/pulsetrains/discrete.ps}
\end{center}
We can determine the $k^{th}$ spectral coefficient with the analysis equation:
$$ X_k = \frac{1}{p} \sum \limits_{n=\langle p \rangle}x(n)e^{-ik\omega_0 n} = \frac{1}{p} \sum \limits_{n=\langle p \rangle}\delta(n-\ell p)e^{-ik\omega_0 n} = e^0 \cdot \frac{1}{p} = \frac{1}{p}$$
So in this case we find that $X_k = 1/p $ $ \forall k$. This means that all of the frequencies in the period, namely $o,\omega_0, 2\omega_0, \dots (p-1)\omega_0$, have an even contribution to the signal.
\end{example}
Given a continuous-time signal $x(t)$ with period $p$, we still use can break the signal into a linear combination of complex exponentials, but there is a slight difference in the synthesis equation. For discrete-time, we sum over $p$ terms, whereas in continuous time, we sum over a countably infinite set of terms:
$$ x(t) = \sum \limits_{-\infty}^{\infty}X_ke^{ik\omega_0 t} $$
This also implies that we have a countably infinite set of frequencies $\{\dots, -2\omega_0,-\omega_0, 0, \omega_0, 2\omega_0,\dots\}$.
We can also write $x(t) = \sum X_k \Psi_k(t)$, and find the spectral coefficient in the same manner as in discrete-time using the inner product, thus
$$ X_k = \frac{<\vec{x},\Psi_k>}{<\Psi_k,\Psi_k>} $$
Recall that the inner product is defined as
\begin{nicebox}
\begin{align*}
\langle f,g \rangle &= \sum \limits_{n=\langle p \rangle}f(n)g^*(n) &\quad \mbox{(Discrete-time)} \\
\langle f,g \rangle &= \int_{\langle p \rangle}f(t)g^*(t)dt \quad &\mbox{(Continuous-time)}
\end{align*}
\end{nicebox}
\begin{claim}
For $k \neq \ell$, $\Psi_k(t) \perp \Psi_\ell(t)$
\end{claim}
\begin{proof}
\begin{align*}
\langle \Psi_k,\Psi_\ell \rangle &= \int_{\langle p \rangle} \Psi_k(t) \Psi_k^*(t) dt \\
\langle \Psi_k,\Psi_\ell \rangle &= \int_{\langle p \rangle} e^{i\omega_0 k t} e^{-i\omega \ell t} dt \\
\langle \Psi_k,\Psi_\ell \rangle &= \int_{\langle p \rangle} e^{i\omega_0 t (k - \ell)} dt \\
\langle \Psi_k,\Psi_\ell \rangle &=
\begin{cases}
\int_{\langle p \rangle}dt & \mbox{ if } k = \ell\\
\int_{\langle p \rangle}e^{i\omega_0 t(k-\ell)} & \mbox{ if } k \neq \ell \\
\end{cases} \\
\langle \Psi_k,\Psi_\ell \rangle &=
\begin{cases}
p & \mbox{ if } k = \ell\\
0 & \mbox{ if } k \neq \ell \\
\end{cases}
\end{align*}
When $k \neq \ell$, the integral produces zero because we are integrating over a period. Consider when you integrate a cosine function from 0 to $2\pi$. You sum up as much positive value as you do negative, and it is cancelled out.
\end{example}
\subsection{Discussion}
\begin{enumerate}
\item Low-Pass Filter
$$ H(\omega) = \frac{1}{1-\alpha e^{i\omega}} \iff h(n) = \alpha^nu(n) $$
for $\alpha \in (0,1)$
\item High-Pass Filter
$$ H(\omega) = \frac{1}{1-\alpha e^{i\omega}} \iff h(n) = \alpha^nu(n) $$
for $\alpha \in (-1,0)$
\item All-Pass Filter
$$ H(\omega) = \frac{\alpha - e^{-i\omega}}{1-\alpha e^{-i\omega}} \iff h(n) = \alpha^{n+1}u(n) - \alpha^{n-1}u(n-1) $$
\begin{claim}
$\abs{H(\omega)} = \abs{\frac{\alpha - e^{-i\omega}}{1-\alpha e^{-i\omega}}} = 1$
\end{claim}
\begin{proof}
\begin{align*}
H(\omega) &= \frac{\alpha - e^{-i\omega}}{1-\alpha e^{-i\omega}} \\
\abs{H(\omega)} &= \abs{\frac{\alpha - e^{-i\omega}}{1-\alpha e^{-i\omega}}} \\
\abs{H(\omega)} &= \frac{\abs{e^{-i\omega}-\alpha}}{\abs{1-\alpha e^{-i\omega}}} \\
\abs{H(\omega)} &= \frac{\abs{e^{-i\omega}-\alpha}}{\abs{e^{-\omega}}\abs{e^{i\omega}-\alpha}} \\
\abs{H(\omega)} &= \frac{\abs{e^{-i\omega}-\alpha}}{\abs{e^{i\omega}-\alpha}} \\
\abs{H(\omega)} &= 1 \quad \mbox{ since complex conjugate have equal magnitude} \\
\end{align*}
\end{proof}
\item Continuous-time High-Pass Filter / Notch Filter
$$ H(\omega) = \frac{i\omega}{i\omega + \alpha} \iff h(t) = 1 - \alpha e^{-\alpha t}u(t)$$
This filter becomes as notch-filter if $\alpha \approx 1$, and high-pass if $\alpha \approx 0$.
\item Continuous-time Low-Pass Filter / Anti-notch Filter
$$ H(\omega) = \frac{1}{i\omega + \alpha} \iff h(t) = e^{-\alpha t}u(t) $$
This filter becomes an anti-notch filter if $\alpha \approx 0$, and low-pass if $\alpha \approx 1$.
\item Notch Filter / Low Pass Filter
$$ H(\omega) = \frac{e^{i\omega}+1}{e^{i\omega}+\alpha} \iff h(n) = \alpha^nu(n)+\alpha^{n-1}u(n-1) $$
This filter is a notch if $\alpha \approx 1$, and is low-pass when $0 \lt \alpha \lt\lt 1$.
\item Comb Filter
$$ H(\omega) = \frac{e^{i\omega N}}{e^{i\omega N}-\alpha} \iff h(n) = \sum \limits_{k=0}^\infty\alpha^k\delta(n-kN) $$
for $\abs{\alpha} \lt 1$
\end{enumerate}
We can generalize a systems effect on a complex exponential:
$$ e^{i\omega_0 n} \to \fbox{H} \to \abs{H(\omega_0)}e^{i\angle H(\omega_0)}e^{i\omega_0 n} = \abs{H(\omega_0)}e^{i\lr{\omega_0 n + \angle H(\omega_0)}} $$
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 12}
\subsection{Continuous-Time Fourier Series}
The Continuous Fourier Series is defined as
\begin{nicebox}
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty}X_ke^{ik\omega_0t} \quad &\mbox{(synthesis)} \\
X_k &= \frac{1}{p} \int_{\langle p\rangle}x(t)e^{-ik\omega_0t}dt \quad &\mbox{(analysis)}
\end{align*}
\end{nicebox}
\begin{example}
Given a signal $x(t) = \sin(2\pi/3)$, find all frequencies in the signal.
\begin{align*}
x(t) &= \sin(2\pi/3) \\
x(t) &= \frac{1}{2i}e^{i2\pi/3} - \frac{1}{2i}e^{-i2\pi/3} \\
\end{align*}
Remember, all other spectral coefficients are 0, since we are in continuous-time!
\end{example}
\begin{example}
Given an inpulse train $x(t)$ of Dirac deltas every $p$ samples, find the spectral coefficients and corresponding frequencies within the signal.
% \begin{center}
% \includegraphics{images/pulsetrains/continuous.ps}
% \end{center}
\begin{align*}
X_k &= \frac{1}{p} \int_{\langle p\rangle}x(t)e^{-ik\omega_0t}dt \\
X_k &= \frac{1}{p} \int_{-p/2}^{p/2}x(t)e^{-ik\omega_0t}dt \\
X_k &= \frac{1}{p} \int_{-p/2}^{p/2}\delta(t)e^{-ik\omega_0t}dt \\
X_k &= \frac{1}{p} \cdot e^{0} \\
X_k &= \frac{1}{p} \\
\end{align*}
Therefore, we can determine that $\forall k$, $X_k = \frac{1}{p}$, and the signal is made up of every harmonic of $\omega_0$, and so we may write the signal in its continuous-time fourier series expansion as
$$ x(t) = \frac{1}{p} \sum \limits_{k=-\infty}^{\infty}e^{ik\omega_0 t} $$
Note that there are infinitely many harmonics of the fundamental frequency, not $p$ harmonics. This is a unique feature of the impulse train in continuous time.
\end{example}
\begin{example}
Consider a periodic signal in continuous defined by $x(t) = \begin{cases} 1 & -T/2 \leq t \leq T/2 \\ 0 & \mbox{otherwise} \end{cases}$, where $T \in \R$, and $T/2 \lt p/2$.
% \begin{center}
% \includegraphics{images/idealpass/periodicpass.ps}
% \end{center}
\begin{align*}
X_k &= \frac{1}{p} \int_{\langle p\rangle}x(t)e^{-ik\omega_0 t}dt \\
X_k &= \frac{1}{p} \int_{-T/2}^{T/2}e^{-ik\omega_0 t}dt \\
X_k &= \frac{1}{p} \lr{ \left. \frac{e^{-ik\omega_0 t}}{-ik\omega_0} \right|_{-T/2}^{T/2}} \\
X_k &= \frac{1}{p} \frac{e^{-ik\omega_0 T/2} - e^{ik\omega_0 T/2}}{-ik\omega_0} \\
X_k &= \frac{1}{p} \frac{e^{ik\omega_0 T/2} - e^{-ik\omega_0 T/2}}{ik\omega_0} \\
X_k &= \frac{1}{p} \frac{2i\sin(k\omega_0 T/2)}{ik\omega_0} \\
X_k &= \frac{1}{k\pi} \sin(k\omega_0 T/2) \\
\end{align*}
Seems as though we have found a link between the analysis equation and frequency response of the system.
$$ X_k = \left. X(\omega) \right|_{w=k\omega_0} \Rightarrow X(\omega) = \frac{2}{p}\frac{\sin(\omega T/2)}{\omega} $$
\end{example}
\subsection{Discrete-Time Fourier Transform}
In continuous-time, we have a \emph{continuous-time fourier series} (CTFS) expansion of a signal denoted as
$$ x(t) = \sum \limits_{k=-\infty}^{\infty}X_k e^{i k \omega_0 t} = \sum \limits_{k=-\infty}^{\infty} X_k \Psi_k(t) $$
where $x$ is periodic with fundamental period $p$, and where the fundamental frequency $\omega_0 = 2\pi/p$ and $x(t+p) = x(t)$ $\forall t$.
When we pass a complex exponential through a system $H$, we find the following relationship:
$$ e^{i\omega n} \to \fbox{H} \to H(\omega)e^{i\omega n} $$
where the frequency response of the system $H(\omega)$ is defined as
$$ H(\omega) = \sum \limits_{n=-\infty}^{\infty} h(n) e^{-i\omega n} = \sum \limits_{n=-\infty}^{\infty} h(n) \Psi_n(\omega) $$
Well, it turns out that $H(\omega)$ is the \emph{Discrete-Time Fourier Transform} (DTFT) of $h(n)$, also known as the analysis equation. Note that $\Psi_k(\omega) = e^{-k\Omega_0\omega}$, but we leave out $\Omega_0$ because it is equal to $2\pi/p=1$.
We know that $H(\omega)$ is periodic, and thus $H(\omega + 2\pi k) = H(\omega)$. Consider the following
\begin{align*}
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} H_k \Psi(\omega) \\
\end{align*}
So we are looking for the spectral coefficients $h(n) = H_n$ $ \forall n$. We can solve for this, but first we must
\begin{enumerate}
\item Define the appropriate inner product
\item Prove mutual orthogonality of $\Psi_k$ and $\Psi_\ell$.
\end{enumerate}
The continuous-time fourier series lives in the universe of $p$-periodic functions of a continuous variable $t$.
\begin{nicebox}
\begin{align*}
\langle f,g\rangle &= \int_{\langle p\rangle}f(t)g^*(t)dt
\end{align*}
\end{nicebox}
The discrete-time fourier transform lives in the universe of $2\pi$-periodic functions of a continuous variable $\omega$.
\begin{nicebox}
\begin{align*}
\langle F,G\rangle &= \int_{\langle 2\pi \rangle}F(\omega)G^*(\omega)d\omega
\end{align*}
\end{nicebox}
\begin{claim}
$\Psi_k(\omega) \perp \Psi_\ell(\omega)$ if $k \neq \ell$
\end{claim}
\begin{proof}
\begin{align*}
\langle \Psi_k,\Psi_\ell\rangle &= \int_{\langle 2\pi\rangle }\Psi_k(\omega)\Psi_{\ell}^*(\omega)d\omega \\
\langle \Psi_k,\Psi_\ell\rangle &= \int_{\langle 2\pi\rangle }e^{-ik\omega}e^{i\ell\omega}d\omega \\
\langle \Psi_k,\Psi_\ell\rangle &= \int_{\langle 2\pi\rangle }e^{-i\lr{k-\ell}\omega}d\omega \\
\langle \Psi_k,\Psi_\ell\rangle &= \int_{\langle 2\pi\rangle }\cos\lr{(k-\ell)\omega}d\omega -i\int_{\langle 2\pi\rangle }\sin\lr{(k-\ell)\omega}d\omega\\
\langle \Psi_k,\Psi_\ell\rangle &= 0 -i0\\
\end{align*}
\end{proof}
The last statement is true because anytime we integrate over a full period $2\pi$, $\forall m \in \Z\setminus \{0\}$,
$$ \int_{\langle 2\pi\rangle}\cos\lr{m\omega}d\omega = \int_{\langle 2\pi\rangle}\sin\lr{m\omega}d\omega = 0 $$
The reason that this works for all $m \in \Z\setminus \{0\}$ is because we will integrate over $\abs{m}$ cycles, which is still always zero. Now that we have a defined inner product and can assume orthogonality, we can solve for the impulse response, otherwise known as spectral coefficients:
\begin{align*}
H &= \sum \limits_{k=-\infty}^{\infty}h(k)\Psi_k \\
\langle H,\Psi_n\rangle &= \langle \sum \limits_{k=-\infty}^{\infty}h(k)\Psi_k, \Psi_n\rangle \\
\langle H,\Psi_n\rangle &= h(n)\langle \Psi_n, \Psi_n\rangle \\
\frac{\langle H,\Psi_n\rangle}{\langle \Psi_n, \Psi_n\rangle}&= h(n) \\
\end{align*}
The spectral coefficient $h(n)$ (a.k.a. impulse response) is the ratio of inner products: the inner product of $H$ and $\Psi_n$, over the norm of $\Psi_n$ squared, which we can show that $\norm{\Psi_n}^2$ is $2\pi$:
\begin{align*}
\norm{\Psi_n}^2 &= \langle \Psi_n,\Psi_n \rangle \\
\norm{\Psi_n}^2 &= \int_{\langle 2\pi\rangle}e^{-i\omega n}e^{i\omega n}d\omega \\
\norm{\Psi_n}^2 &= \int_{\langle 2\pi\rangle}d\omega \\
\norm{\Psi_n}^2 &= 2\pi
\end{align*}
Now we can solve for the impulse response:
\begin{align*}
h(n) &= \frac{\langle H,\Psi_n\rangle}{\langle \Psi_n, \Psi_n\rangle} \\
h(n) &= \frac{\langle H,\Psi_n\rangle}{\norm{\Psi_n}^2} \\
h(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle} H(\omega)\Psi_n(\omega)d\omega \\
\end{align*}
$h(n)$ demonstrates the inverse transform relationship. Here we have the DTFT transform pair:
\begin{nicebox}
\begin{align*}
h(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle } H(\omega)e^{i\omega n}d\omega \quad &\mbox{(synthesis equation)} \\
H(\omega) &= \sum \limits_{n=-\infty}^{\infty}h(n)e^{-i\omega n} \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
If we rearrange the synthesis equation slightly, we can analyze its meaning.
$$ h(n) = \int_{\langle 2\pi \rangle}\frac{H(\omega)d\omega}{2\pi}e^{i\omega n} $$
We can write a signal as a linear combination of an uncountable infinite set of complex exponentials. Furthermore, an uncountable infinite set of frequencies can contribute to $h(n)$. Now we have the possibility of a continuum of frequencies that can potentially contribute to $h(n)$, not just integer multiples. An infinity of harmonics contribute to the overall signal.
What distinguishes each frequency within $h(n)$ is $H(\omega)$. The coefficients of the linear combination are $\frac{H(\omega)d\omega}{2\pi}$, but $d\omega/2\pi$ is common among all frequencies, so $H(\omega)$ is what determines how much of frequency $\omega$ is present.
\begin{example}
$x(n) = \delta(n)$, find $X(\omega)$.
We can use the analysis equation:
$$ X(\omega) = \sum \limits_{-\infty}^{\infty}\delta(n)e^{-i\omega n} = 1$$
Thus, this is a fourier transform pair:
$$ \delta(n) \ftp 1 $$
\end{example}
\begin{example}
Consider an ideal low-pass filter that kills all frequencies except frequencies in $[-A,A]$. What is the impulse response $h(n)$?
\begin{center}
\includegraphics{images/idealpass/idealpass.ps}
\end{center}
Use the discrete-time fourier transform.
\begin{align*}
h(n) &= \frac{1}{2\pi} \int_{\langle 2\pi \rangle}H(\omega)e^{i\omega n} d\omega \\
h(n) &= \frac{1}{2\pi} \int_{-A}^{A}e^{i\omega n} d\omega \\
h(n) &= \frac{1}{2\pi} \lr{\left. \frac{e^{i\omega n}}{in}\right|_{-A}^{A}} \\
h(n) &= \frac{1}{2\pi} \lr{\frac{e^{iAn}-e^{-iAn}}{in}} \\
h(n) &= \frac{1}{2\pi} \frac{2i\sin (An)}{in} \\
h(n) &= \frac{1}{\pi} \frac{\sin (An)}{n} \\
\end{align*}
Note that we could have also written this as
\begin{align*}
h(n) &= \frac{1}{2\pi}\left[ \int_{-A}^{A}\cos(\omega n) d\omega + i \int_{-A}^{A}\sin(\omega n) d\omega\right] \\
h(n) &= \frac{1}{2\pi}\int_{-A}^{A}\cos(\omega n) d\omega \\
h(n) &= \left. \frac{1}{2\pi} \sin(\omega n) \right|_{-A}^{A} \\
h(n) &= \frac{1}{\pi} \frac{\sin (An)}{n} \\
\end{align*}
Since sine is an odd function, then integrating from $-A$ to $A$ gives us zero, so we are left with integrating the cosine only. What happens to the impulse response $h(n)$ as $A$ approaches $\pi$?
$$ \lim_{A\to\pi} \frac{\sin (An)}{\pi n} = \frac{\sin (\pi n)}{\pi n} $$
Now for all $n \neq 0$, $h(n) = 0$, because we are feeding a sine function multiples of $\pi$, and $h(0)=1$ by l'Hopital's rule. This means that as $A$ approaches $\pi$, the impulse response becomes the identity element of convolution:
$$ \lim_{A\to\pi} \frac{\sin (An)}{\pi n} = \delta(n) $$
\end{example}
\begin{example}
Find the frequency response for $x(n) = \delta(n-1)$.
$$ X(\omega) = \sum \limits_{n=-\infty}^{\infty}\delta(n-1)e^{-i\omega n} = e^{-i\omega} $$
Now we know the following fourier transform pairs:
\begin{align*}
\delta(n) &\ftp 1 \\
\delta(n-1) &\ftp e^{-i\omega} \\
\delta(n-N) &\ftp e^{-i\omega N} \\
\end{align*}
\end{example}
\begin{example}
Find the impulse response $g(n)$ of the frequency response $G(\omega)$.
$$ G(\omega) = \begin{cases}
e^{-i\omega/2} & \mbox{if }\abs{\omega}<\pi \\
\mbox{periodically replicates outside} \\
\end{cases}
$$
\begin{align*}
g(n) &= \frac{1}{2\pi} \int_{\langle 2\pi\rangle}e^{-i\omega/2}e^{i\omega n}d\omega \\
g(n) &= \frac{1}{2\pi} \int_{-\pi}^{\pi}e^{-i\lr{\omega/2 - \omega n}}d\omega \\
g(n) &= \frac{1}{2\pi} \left. \lr{\frac{e^{-i\lr{\omega/2 - \omega n}}}{-i\lr{1/2 -n}}}\right|_{-\pi}^{\pi} \\
g(n) &= \frac{1}{2\pi} \frac{2i\sin(\pi n-\pi/2)}{i(1/2-n)} \\
g(n) &= \frac{\sin(\pi n-\pi/2)}{\pi(1/2-n)} \\
\end{align*}
This is an example of a \emph{Half-Sample Delay System}
}
\begin{example}
$$ F(\omega) = \begin{cases}
e^{-i\omega \alpha} & \mbox{if }\abs{\omega}\lt \pi \\
\mbox{periodically replicates outside} \\
\end{cases}
$$
From the previous example, we know that
$$ f(n) = \frac{\sin(\pi(n-\alpha))}{\pi(n-\alpha)} $$
Show that $f(n)$ reduces to $\delta(n-\alpha)$ whenever $\alpha \in \Z$.
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 13}
\subsection{Properties of the DTFT}
\subsubsection{Translation - Time-Shifting}
Given $x(n) \ftp X(\omega)$, what is the transform of a time-shifted version $x(n-n_0)$?
We can use the DTFT directly:
\begin{align*}
\hat{X}(\omega) &= \sum \limits_{n=-\infty}^{\infty}\hat{x}(n)e^{-i\omega n} \\
\hat{X}(\omega) &= \sum \limits_{n=-\infty}^{\infty}x(n-n_0)e^{-i\omega n} \\
\hat{X}(\omega) &= \sum \limits_{k=-\infty}^{\infty}x(k)e^{-i\omega (k+n_0)} \quad \mbox{let }k=n-n_0 \\
\hat{X}(\omega) &= \sum \limits_{k=-\infty}^{\infty}x(k)e^{-i\omega k}e^{-i\omega n_0} \\
\hat{X}(\omega) &= X(\omega)e^{-i\omega n_0} \\
\end{align*}
Therefore, a time-shifted version of the signal in the time domain by $n_0$ corresponds to multiplication by $e^{-i\omega n_0}$ in the frequency domain:
\begin{nicebox}
$$ x(n-n_0) \ftp X(\omega)e^{-i\omega n_0} $$
\end{nicebox}
\subsubsection{Frequency-Shifting}
Given $x(n) \ftp X(\omega)$, what is the inverse transform of $X(\omega-\omega_0)$?
We can determine this using the synthesis equation:
\begin{align*}
\hat{x}(n) &= \frac{1}{2\pi} \int_{\langle 2 \pi \rangle}\hat{X}(\omega)e^{i\omega n}d\omega \\
\hat{x}(n) &= \frac{1}{2\pi} \int_{\langle 2 \pi \rangle}X(\omega-\omega_0)e^{i\omega n}d\omega \\
\hat{x}(n) &= \frac{1}{2\pi} \int_{\langle 2 \pi \rangle}X(\Omega)e^{i(\Omega + \omega_0) n}d\Omega \quad \mbox{let } \Omega = \omega-\omega_0 \\
\hat{x}(n) &= \frac{1}{2\pi} \int_{\langle 2 \pi \rangle}X(\Omega)e^{i\Omega n}e^{i\omega_0 n}d\Omega \\
\hat{x}(n) &= \lr{\frac{1}{2\pi} \int_{\langle 2 \pi \rangle}X(\Omega)e^{i\Omega n}d\Omega}e^{i\omega_0 n} \\
\hat{x}(n) &= x(n)e^{i\omega_0 n} \\
\end{align*}
Therefore frequency-shifting in the frequency domain by $\omega_0$ corresponds to multiplication in the time domain by $e^{i\omega_0 n}$
\begin{nicebox}
$$ X(\omega - \omega_0) \iftp x(n)e^{i\omega_0 n} $$
\end{nicebox}
\subsubsection{Time-Reversal}
Given $x(n) \ftp X(\omega)$, what is the transform of a time-reversed version $x(-n)$?
We can use the DTFT directly:
\begin{align*}
\hat{X}(\omega) &= \sum \limits_{n=-\infty}^{\infty} \hat{x}(n) e^{-i\omega n} \\
\hat{X}(\omega) &= \sum \limits_{n=-\infty}^{\infty} x(-n) e^{-i\omega n} \\
\hat{X}(\omega) &= \sum \limits_{m=-\infty}^{\infty} x(m) e^{i\omega m} \quad \mbox{let }m=-n\\
\hat{X}(\omega) &= \sum \limits_{m=-\infty}^{\infty} x(m) e^{-i(-\omega) m} \\
\hat{X}(\omega) &= X(-\omega)\\
\end{align*}
Therefore time-reversal in the time-domain corresponds to frequency reversal in the frequency domain:
\begin{nicebox}
$$ x(-n) \ftp X(-\omega) $$
\end{nicebox}
\subsubsection{Conjugation}
Given $x(n) \ftp X(\omega)$, what is the transform of the conjugate signal $x^*(n)$?
We can use the DTFT directly:
\begin{align*}
\hat{X}(\omega) &= \sum \limits_{n=-\infty}^{\infty}\hat{x}(n)e^{-i\omega n} \\
\hat{X}(\omega) &= \sum \limits_{n=-\infty}^{\infty}x^*(n)e^{-i\omega n} \\
\hat{X}^*(\omega) &= \lr{\sum \limits_{n=-\infty}^{\infty}x^*(n)e^{-i\omega n}}^* \\
\hat{X}^*(\omega) &= \sum \limits_{n=-\infty}^{\infty}x(n)e^{i\omega n} \\
\hat{X}^*(\omega) &= \sum \limits_{n=-\infty}^{\infty}x(n)e^{-i(-\omega) n} \\
\hat{X}^*(\omega) &= X(-\omega) \\
\hat{X}(\omega) &= X^*(-\omega) \\
\end{align*}
Therefore conjugation in the time domain corresponds to conjugate frequency reversal in the frequency domain:
\begin{nicebox}
$$ x^*(n) \ftp X^*(-\omega) $$
\end{nicebox}
Note that you can also factor-out the conjugation:
\begin{align*}
\hat{X}(\omega) &= \sum \limits_{n=-\infty}^{\infty}x^*(n)e^{-i\omega n} \\
\hat{X}(\omega) &= \lr{\sum \limits_{n=-\infty}^{\infty}x(n)e^{i\omega n}}^* \\
\hat{X}(\omega) &= \lr{X(-\omega)}^* \\
\hat{X}(\omega) &= X^*(-\omega) \\
\end{align*}
\subsubsection{Complex Exponential}
Given $x(n) \ftp X(\omega)$, what is the transform of $x(n)=e^{i\omega_0 n}$?
We know that this represents a dirac delta in the frequency domain centered at $\omega_0$. This gives us a frequency response of $A\delta(\omega -\omega_0)$ for some constant $A$.
%\includegraphics{images/pulsetrains/freqpulse.ps}
\begin{center}
\begin{pspicture}(-2,-1)(6,1.5)
\psline{->}(-1,0)(6,0)
\rput(6,-0.25){$\omega$}
\rput(6,1){$X(\omega)$}
\rput(6,0.5){$\cdots$}
\rput(-1,0.5){$\cdots$}
\rput(2.8,1){$(A)$}
\psline{->}(0,0)(0,1)
\rput(0,-0.25){$\omega_0-2\pi$}
\psline{->}(2.5,0)(2.5,1)
\rput(2.5,-0.25){$\omega_0$}
\psline{->}(5,0)(5,1)
\rput(5,-0.25){$\omega_0+2\pi$}
\end{pspicture}
\end{center}
So we can use the synthesis equation over an interval of $2\pi$:
\begin{align*}
x(n) &= \frac{1}{2\pi} \int_{\langle 2 \pi \rangle } X(\omega) e^{i\omega n}d\omega \\
e^{i\omega_0 n} &= \frac{1}{2\pi} \int_{\langle 2 \pi \rangle } A \delta(\omega-\omega_0) e^{i\omega n}d\omega \\
e^{i\omega_0 n} &= \frac{A}{2\pi} \int_{-\pi}^{\pi} \delta(\omega-\omega_0) e^{i\omega n}d\omega \\
e^{i\omega_0 n} &= \frac{A}{2\pi} e^{i\omega_0 n} \\
A &= 2\pi \\
\end{align*}
This tells us that $X(\omega) = 2\pi\delta(\omega - \omega_0)$, but we know that $X(\omega + 2\pi k) = X(\omega) $ $ \forall k \in \Z$, so we must write the frequency response in the form:
$$ X(\omega) = \sum \limits_{k=-\infty}^{\infty} 2\pi \delta(\omega-\omega_0 +2\pi k) $$
If we restricted the domain, for example, $-\pi \leq \omega \lt \pi$, then $X(\omega) = 2\pi\delta(\omega-\omega_0)$.
\subsubsection{Convolution}
Given $f(n) \ftp F(\omega)$ and $g(n) \ftp G(\omega)$, what is the transform of a convolved signal $(f*g)(n)$?
We can use a combination of the convolution sum and the DTFT:
\begin{align*}
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} h(n) e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} (f*g)(n) e^{-i\omega n} \\
H(\omega) &= \sum \limits_{n=-\infty}^{\infty} \lr{\sum \limits_{\ell = -\infty}^{\infty}f(\ell)g(n-\ell)}e^{-i\omega n} \\
H(\omega) &= \sum \limits_{\ell=-\infty}^{\infty} \sum \limits_{n = -\infty}^{\infty}f(\ell)g(n-\ell)e^{-i\omega n} \quad \mbox{swap the summations}\\
H(\omega) &= \sum \limits_{\ell=-\infty}^{\infty} f(\ell)\sum \limits_{m = -\infty}^{\infty}g(m)e^{-i\omega (m+\ell)} \quad \mbox{let } m = n -\ell\\
H(\omega) &= \sum \limits_{\ell=-\infty}^{\infty} f(\ell)\sum \limits_{m = -\infty}^{\infty}g(m)e^{-i\omega m} e^{-i\omega\ell} \\
H(\omega) &= \sum \limits_{\ell=-\infty}^{\infty} f(\ell)e^{-i\omega\ell} \sum \limits_{m = -\infty}^{\infty}g(m)e^{-i\omega m} \\
H(\omega) &= F(\omega)G(\omega)
\end{align*}
Therefore we can conclude that convolution in the time-domain corresponds to multiplication in the frequency domain:
\begin{nicebox}
$$ (f*g)(n) \ftp F(\omega)G(\omega) $$
\end{nicebox}
\subsection{DT-LTI Systems}
\subsubsection{Causal LCCDEs}
We know that the frequency response of a system is the fourier transform of the impulse response. When we pass a signal through such a system, we get a product the the frequency responses in the frequency domain. In other words, convolution in the time domain corresponds to multiplication in the frequency domain:
$$ y(n) = (x*h)(n) \ftp Y(\omega) = X(\omega)H(\omega) $$
This tells us the the frequency response of the system can be described as a ratio of the input and output signal frequency responses:
$$ H(\omega) = \frac{Y(\omega)}{X(\omega)} $$
\begin{example}
Find the frequency response of the causal linear constant coefficient difference equation (LCCDE) given by
$$ \sum \limits_{k=0}^{N}a_ky(n-k) = \sum \limits_{\ell=0}^{M}b_\ell x(n-\ell) $$
We can use the following properties of the DTFT:
\begin{align*}
x(n) &\ftp X(\omega) \\
x(n-\ell) &\ftp e^{-i\ell\omega}X(\omega) \\
y(n-k) &\ftp e^{-i k \omega}Y(\omega) \\
\end{align*}
Now we can solve the LCCDE:
\begin{align*}
\sum \limits_{k=0}^{N}a_ky(n-k) &= \sum \limits_{\ell=0}^{M}b_\ell x(n-\ell) \\
\sum \limits_{k=0}^{N}a_ke^{-i k \omega}Y(\omega) &= \sum \limits_{\ell=0}^{M}b_\ell e^{-i \ell \omega}X(\omega) \\
Y(\omega) \lr{\sum \limits_{k=0}^{N}a_ke^{-i k \omega}} &= X(\omega) \lr{\sum \limits_{\ell=0}^{M}b_\ell e^{-i \ell \omega}} \\
\frac{Y(\omega)}{X(\omega)} &= \frac{\sum \limits_{\ell=0}^{M}b_\ell e^{-i \ell \omega}}{\sum \limits_{k=0}^{N}a_ke^{-i k \omega}} \\
H(\omega) &= \frac{\sum \limits_{\ell=0}^{M}b_\ell e^{-i \ell \omega}}{\sum \limits_{k=0}^{N}a_ke^{-i k \omega}} \\
\end{align*}
It turns out that $H(\omega)$ is a ratio of polynomials in $e^{i\omega}$. This is a characteristic of systems that have LCCDEs. Note that if we define $z = e^{i\omega}$, we can write this expression as
$$ \hat{H}(z) = \frac{\sum \limits_{\ell=0}^{M}b_\ell z^{-\ell}}{\sum \limits_{k=0}^{N}a_k z^{-k}} = \frac{P(z)}{Q(z)} $$
\end{example}
\begin{example}
What is the LCCDE of the frequency response given by $H(\omega) = \frac{1}{1-\alpha e^{-i\omega}}$?
\begin{align*}
H(\omega) &= \frac{1}{1-\alpha e^{-i\omega}} \\
\frac{Y(\omega)}{X(\omega)} &= \frac{1}{1-\alpha e^{-i\omega}} \\
Y(\omega) - \alpha e^{-i\omega}Y(\omega) &= X(\omega) \\
y(n) - \alpha y(n-1) &= x(n) \quad \mbox{inverse fourier transform of both sides} \\
\end{align*}
\end{example}
\subsubsection{Delay-Adder-Gain Block Diagrams}
This \emph{Delay-Adder-Gain} (DAG) block diagram represents a system with 3 components required to implement an LCCDE given by $y(n) = \alpha y(n-1) + x(n)$. The components required are adders, multipliers, and delay blocks. The minimum number of delay blocks is the order of the filter.
%\includegraphics{images/feedback/dag.ps}
\begin{center}
\begin{pspicture}(0,-3.5)(8,2)
\rput(0,0){$x$}
\rput(7.2,0){$y$}
\rput(5.5,-1.25){$z^{-1}$}
\rput(3.5,-2){$\alpha$}
\rput(1.5,-2.5){$H$}
% plus or minus for adder
\rput(2.1,-0.5){$-$}
\rput(1.45,0.35){$+$}
\psframe(1,-3)(6.5,1)
\pspolygon(4,-1.5)(4,-2.5)(2.5,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(0.25,0)(1.5,0)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(1.75,-2)(1.75,-0.25)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(5.5,-2)(4,-2)
\psline[linewidth=1.25 pt](2.5,-2)(1.75,-2)
\psline[linewidth=1.25 pt, arrowscale=1.5]{->}(2,0)(7,0)
\psline[linewidth=1.25 pt](5.5,-1.7)(5.5,-2)
\psline[linewidth=1.25 pt]{->}(5.5,0)(5.5,-0.75)
\psframe(5, -1.7)(6,-0.75)
\pscircle(1.75,0){0.25}
\psline(1.5,0)(2,0)
\psline(1.75,0.25)(1.75,-0.25)
\end{pspicture}
\end{center}
Notice that the DAG block diagram shows the delay block with $z^{-1}$. This is because $y(n-1) \ftp e^{-i\omega}Y(\omega) = z^{-1}Y(\omega)$. The DAG shows a system that is rational in $z=e^{i\omega}$. Here is an example of a signal whose DTFT is not rational in $z=e^{i\omega}$:
$$ x(n) = \frac{\sin(An)}{\pi n} $$
The fourier transform of this signal is the ideal low-pass filter:
%\includegraphics{images/idealpass/idealpass.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(1,-1)(3,1.3)
%axis
\psline(1,0)(3,0)
% 0
\psline(2,0.1)(2,-0.1)
% pi and -pi
\psline(1,0.1)(1,-0.1)
\rput(1,-0.25){$-\pi$}
\psline(3,0.1)(3,-0.1)
\rput(3,-0.25){$\pi$}
\rput(3.3,-0.25){$\omega$}
\rput(3,1){$H(\omega)$}
\rput(1.35,1){$(1)$}
\psline(1.5,0)(1.5,1)
\psline(1.5,1)(2.5,1)
\psline(2.5,1)(2.5,0)
\rput(2,-0.25){0}
\rput(1.5,-0.25){-A}
\rput(2.5,-0.25){A}
\end{pspicture}
\end{center}
This system is an "ideal low-pass" filter because we cannot implement it. The cuttoff/rolloff can never be vertical like the filter shows in the frequency domain. If the filter has an LCCDE, it is implementable, but otherwise we cannot.
\subsubsection{Rational DTFTs}
We know $x(n) = \alpha u(n) \ftp \frac{1}{1-\alpha e^{-i\omega}}$ where $\abs{\alpha} \lt 1$. What about a translated version of $x(n)$? We know that $x(n - n_0) \ftp e^{-i\omega n_0}X(\omega)$, so we can determine that
$$ x(n-n_0) = \alpha^{n-n_0}u(n-n_0) \ftp \frac{e^{-i\omega n_0}}{1-\alpha e^{-i\omega}} $$
Any shifted version of this signal will produce a signal that is rational in $z=e^{i\omega}$.
\begin{example}
Given an impulse response $g(n) = -2^nu(-n-1)$, find the impulse response $G(\omega)$.
First note that $u(-n-1) = \begin{cases} 1 & -n-1\geq0 \\ 0 & \mbox{otherwise} \end{cases}$, which translates to $u(-n-1) = \begin{cases} 1 & n\leq-1 \\ 0 & \mbox{otherwise} \end{cases}$. Then we can start to solve for the frequency response:
\begin{align*}
G(\omega) &= \sum \limits_{n=-\infty}^{\infty}-2^n u(-n-1) e^{-i\omega n} \\
G(\omega) &= \sum \limits_{n=-\infty}^{-1}-2^n e^{-i\omega n} \\
G(\omega) &= -\sum \limits_{n=-\infty}^{-1}\lr{2 e^{-i\omega }}^n \\
G(\omega) &= -\sum \limits_{m=1}^{\infty}\lr{2 e^{-i\omega }}^{-m} \quad \mbox{let }m=-n\\
G(\omega) &= -\sum \limits_{m=1}^{\infty}\lr{\frac{1}{2} e^{i\omega }}^m \\
G(\omega) &= -\sum \limits_{m=0}^{\infty}\lr{\frac{1}{2} e^{i\omega }}^{m+1} \quad \mbox{shift limits}\\
G(\omega) &= -\frac{1}{2}e^{i\omega}\sum \limits_{m=0}^{\infty}\lr{\frac{1}{2} e^{i\omega }}^{m} \\
G(\omega) &= -\frac{1}{2}e^{i\omega} \frac{1}{1-\frac{1}{2}e^{i\omega}} \\
G(\omega) &= \frac{1}{1-2e^{-i\omega}} \\
\end{align*}
Note that we could have solved for $g(-n) = -\frac{1}{2}^nu(n-1)$, and then used the DTFT property that $g(-n) \ftp G(-\omega)$ to solve the problem.
\end{example}
\subsubsection{Differentiation in Frequency Domain}
What happens when we differentiate in the frequency domain?
\begin{align*}
\frac{d H(\omega)}{d\omega} &= \frac{d}{d\omega}\sum \limits_{n=-\infty}^{\infty}h(n)e^{-i\omega n} \\
\frac{d H(\omega)}{d\omega} &= \sum \limits_{n=-\infty}^{\infty}h(n) (-in) e^{-i\omega n} \\
\frac{d H(\omega)}{d\omega} &= -i \cdot \sum \limits_{n=-\infty}^{\infty}n \cdot h(n) e^{-i\omega n} \\
i \cdot \frac{d H(\omega)}{d\omega} &= \sum \limits_{n=-\infty}^{\infty}n \cdot h(n) e^{-i\omega n} \\
\end{align*}
This gives us the DTFT differentiation property:
\begin{nicebox}
$$ nh(n) \ftp i\frac{d H(\omega)}{d\omega} $$
\end{nicebox}
\begin{example}
$f(n) = n \alpha^n u(n)$ for $\abs{\alpha} \lt 1$. Find the frequency response.
\begin{align*}
F(\omega) &= \ftrans{nh(n)} \\
F(\omega) &= i\frac{d H(\omega)}{d\omega} \\
F(\omega) &= i\frac{d}{d\omega} \lr{\frac{1}{1-\alpha e^{-i\omega}}} \\
F(\omega) &= i \cdot \frac{0 - i\alpha e^{-i\omega}}{\lr{1-\alpha e^{-i\omega}}^2} \\
F(\omega) &= \frac{\alpha e^{-i\omega}}{\lr{1-\alpha e^{-i\omega}}^2} \\
\end{align*}
\end{example}
\subsection{Discussion}
\subsubsection{Conjugate Symmetry}
\begin{claim}
if a signal is real-valued, then its fourier transform is conjugate symmetric. In other words,
$$ x(n) \in \R \mbox{ } \forall n \Rightarrow X(\omega) = X^*(\omega) $$
\end{claim}
\begin{proof}
We can use the definition of the fourier transform of a signal $x(n)$:
\begin{align*}
X(\omega) &= \sum \limits_{n=-\infty}^{\infty} x(n) e^{-i\omega n} \\
X(-\omega) &= \sum \limits_{n=-\infty}^{\infty} x(n) e^{i\omega n} \\
X^*(-\omega) &= \sum \limits_{n=-\infty}^{\infty} x^*(n) e^{-i\omega n} \\
X^*(-\omega) &= \sum \limits_{n=-\infty}^{\infty} x(n) e^{-i\omega n} \quad \mbox{since $x(n)$ is real} \\
X^*(-\omega) &= X(\omega) \\
\end{align*}
Here is a more simplistic proof that uses the fact that $\ftrans{x^*(n)} = X^*(-\omega)$:
\begin{align*}
x(n) &\ftp X(\omega) \\
x^*(n) &\ftp X^*(\omega) \\
x(n) &\ftp X^*(\omega) \\
\end{align*}
\end{proof}
This implies that the frequency response is equal to its conjugate (for real-valued signals $x(n)$):
\begin{nicebox}
$$x \in \left[ \Z \to \R \right]$$
$$\Rightarrow $$
$$X(\omega) = X^*(\omega)$$
\end{nicebox}
\subsubsection{Signals of Even Functions}
\begin{claim}
If $x(n)$ is a real-valued signal of an even function, then its fourier transform is even and real-valued. In other words,
\begin{nicebox}
$$x \in \left[ \Z \to \R \right] \mbox{ AND } x(n) = x(-n) \mbox{ } \forall n$$
$$\Rightarrow$$
$$X(-\omega) = X(\omega) \mbox{ AND } X \in \left[ \R \to \R \right]$$
\end{nicebox}
\end{claim}
To show that the frequency response is odd, we show that $X(\omega) = X(-\omega)$. To show that the frequency response is real-valued, we need to show that the conjugate of the frequency response is equal to the frequency response, namely $X(\omega) = X^*(\omega)$
\begin{proof}
First show that the frequency response is even by noting some properties of the DTFT:
\begin{align*}
x(n) &\ftp X(\omega) \\
x(-n) &\ftp X(-\omega) \\
\end{align*}
Putting it all together,
\begin{align*}
x(n) &= x(-n) \\
\ftrans{x(n)} &= \ftrans{x(-n)} \\
X(\omega) &= X(-\omega)
\end{align*}
Now to show that the frequency response is real, we simply note that $X(\omega) = X^*(-\omega)$ because $x$ is real-valued, then
\begin{align*}
X(-\omega) &= X(\omega) = X^*(-\omega) = X^*(\omega) \\
X(\omega) &= X^*(\omega) \\
\Rightarrow X(\omega) &\in \R \mbox{ } \forall \omega
\end{align*}
\end{proof}
Now we make make the general claim that the fourier transform of a real-valued even signal is equal to the real part of the signal's frequency response:
\begin{align*}
\ftrans{\frac{x(n) + x(-n)}{2}} &= \Real{\frac{X(\omega)+X(-\omega)}{2}} \\
\ftrans{x_e(n)} &= \Real{\frac{1}{2}X(\omega)+\frac{1}{2}X(-\omega)} \\
\ftrans{x_e(n)} &= \Real{\frac{1}{2}X(\omega)+\frac{1}{2}X(\omega)} \quad \mbox{since } X(\omega) = X(-\omega) \\
\ftrans{x_e(n)} &= \Real{X(\omega)} \\
\end{align*}
\begin{nicebox}
$$x \in \left[ \Z \to \R \right] $$
$$\Rightarrow$$
$$x_e(n) \ftp \Real{X(\omega)} $$
\end{nicebox}
\subsubsection{Signals of Odd Functions}
\begin{claim}
If $x(n)$ is a real-valued signal of an odd function, then its fourier transform is odd and imaginary. In other words,
\begin{nicebox}
$$x \in \left[ \Z \to \R \right] \mbox{ AND } x(n) = -x(-n) \mbox{ } \forall n$$
$$\Rightarrow$$
$$-X(-\omega) = X(\omega) \mbox{ AND } X \in \left[\R \to i\cdot\R \right]$$
\end{nicebox}
\end{claim}
To show that the frequency response is odd, we need to show $X(\omega) = -X(-\omega)$. To show that the frequency response is purely imaginary, we need to show that the frequency response is equal to the negative of the conjugate of the frequency response, namely $X(\omega) = -X^*(\omega)$.
\begin{proof}
First show that the frequency response is odd by noting some properties of the DTFT:
\begin{align*}
x(n) &\ftp X(\omega) \\
x(-n) &\ftp X(-\omega) \\
-x(-n) &\ftp -X(-\omega) \\
\end{align*}
Putting it all together,
\begin{align*}
x(n) &= -x(-n) \\
\ftrans{x(n)} &= \ftrans{-x(-n)} \\
X(\omega) &= -X(-\omega)
\end{align*}
Now we can use conjugate symmetry to show that the frequency response is imaginary. This means that if $x$ is real-valued, then $X(\omega) = X^*(\omega)$.
\begin{align*}
X(\omega) &= -X(-\omega) \quad \mbox{since $X$ is odd}\\
X(\omega) &= X^*(-\omega) \quad \mbox{since $x$ is real}\\
X^*(-\omega) &= -X(-\omega) \\
X^*(\omega) &= -X(\omega) \\
\end{align*}
\end{proof}
Here is a more rigorous proof:
\begin{proof}
To show that the frequency response is odd:
\begin{align*}
X(\omega) &= \sum \limits_{n=-\infty}^{\infty}x(n)e^{-i\omega n} \\
X(\omega) &= \sum \limits_{m=-\infty}^{\infty}x(-m)e^{i\omega m} \quad \mbox{let }m=-n \\
X(\omega) &= -\sum \limits_{m=-\infty}^{\infty}x(m)e^{i\omega m} \quad \mbox{since $x$ is odd} \\
X(\omega) &= -X(-\omega) \\
\end{align*}
To show that the frequency response is purely imaginary:
\begin{align*}
X(\omega) &= \sum \limits_{n=-\infty}^{\infty}x(n)e^{-i\omega n} \\
X(\omega) &= \sum \limits_{m=-\infty}^{\infty}x(-m)e^{i\omega m} \quad \mbox{let }m=-n \\
X^*(\omega) &= \sum \limits_{m=-\infty}^{\infty}x^*(-m)e^{-i\omega m} \\
X^*(\omega) &= \sum \limits_{m=-\infty}^{\infty}x(-m)e^{-i\omega m} \quad \mbox{since $x$ is real-valued}\\
X^*(\omega) &= -\sum \limits_{m=-\infty}^{\infty}x(m)e^{-i\omega m} \quad \mbox{since $x$ is odd}\\
X^*(\omega) &= -X(\omega)\\
\end{align*}
Since the frequency response is equal to the negative of its conjugate, it is purely imaginary.
\end{proof}
Now we make make the general claim that the fourier transform of a real-valued odd signal is equal to the product of $i$ and the imaginary part of the signal's frequency response:
\begin{align*}
\ftrans{\frac{x(n) - x(-n)}{2}} &= i\Imag{\frac{X(\omega)-X(-\omega)}{2}} \\
\ftrans{x_o(n)} &= i\Imag{\frac{1}{2}X(\omega)-\frac{1}{2}X(-\omega)} \\
\ftrans{x_o(n)} &= i\Imag{\frac{1}{2}X(\omega)+\frac{1}{2}X(\omega)} \quad \mbox{since } X(\omega) = -X(-\omega) \\
\ftrans{x_o(n)} &= i\Imag{X(\omega)} \\
\end{align*}
\begin{nicebox}
$$x \in \left[ \Z \to \R \right]$$
$$\Rightarrow$$
$$x_o(n) \ftp i\Imag{X(\omega)} $$
\end{nicebox}
\subsubsection{Additional Problems}
\begin{example}
Given $x_e(n) = \frac{1}{2}\lr{x(n) + x(-n)}$, prove that $\Real{X(\omega)} = \frac{1}{2}\lr{X(\omega) + X^*(\omega)}$.
\begin{align*}
x(n) &\ftp X(\omega) \\
\frac{1}{2}x(n) &\ftp \frac{1}{2}X(\omega) \\
\frac{1}{2}x(-n) &\ftp \frac{1}{2}X(-\omega) \\
\end{align*}
We showed that if a signal is even, then its fourier transform is also even and purely real. This implies that the inverse fourier transform of the real part a frequency response is the even part of the signal. In other words,
\begin{align*}
\iftrans{\Real{X(\omega)}} &= x_e(n) \\
\Real{X(\omega)} &= \ftrans{x_e(n)} \\
\end{align*}
Also note that if $x$ is real, then we can use the conjugate symmetric property, $X(\omega) = X^*(-\omega)$.
\begin{align*}
x_e &= \frac{1}{2}\lr{x(n) + x(-n)} \\
\ftrans{x_e} &= \ftrans{\frac{1}{2}\lr{x(n) + x(-n)}} \\
\ftrans{x_e} &= \frac{1}{2}\ftrans{x(n)} + \frac{1}{2}\ftrans{x(-n)} \\
\ftrans{x_e} &= \frac{1}{2}X(\omega) + \frac{1}{2}X(-\omega) \\
\ftrans{x_e} &= \frac{1}{2}X(\omega) + \frac{1}{2}X^*(\omega) \\
\end{align*}
\end{example}
\begin{example}
Given $x_o(n) = \frac{1}{2}\lr{x(n) - x(-n)}$, prove that $\Imag{X(\omega)} = \frac{1}{2}\lr{X(\omega) - X^*(\omega)}$.
\begin{align*}
x(n) &\ftp X(\omega) \\
\frac{1}{2}x(n) &\ftp \frac{1}{2}X(\omega) \\
\frac{1}{2}x(-n) &\ftp \frac{1}{2}X(-\omega) \\
\end{align*}
We showed that if a signal is odd, then its fourier transform is also odd and purely imaginary. This implies that the inverse fourier transform of the product of $i$ and the imaginary part a frequency response is the odd part of the signal. In other words,
\begin{align*}
\iftrans{i\Imag{X(\omega)}} &= x_o(n) \\
i\Imag{X(\omega)} &= \ftrans{x_o(n)} \\
\end{align*}
Also note that if $x$ is real, then we can use the conjugate symmetric property, $X(\omega) = X^*(-\omega)$.
\begin{align*}
x_o &= \frac{1}{2}\lr{x(n) - x(-n)} \\
\ftrans{x_o} &= \ftrans{\frac{1}{2}\lr{x(n) - x(-n)}} \\
\ftrans{x_o} &= \frac{1}{2}\ftrans{x(n)} - \frac{1}{2}\ftrans{x(-n)} \\
\ftrans{x_o} &= \frac{1}{2}X(\omega) - \frac{1}{2}X(-\omega) \\
\ftrans{x_o} &= \frac{1}{2}X(\omega) - \frac{1}{2}X^*(\omega) \\
\end{align*}
\end{example}
\begin{example}
For a signal $x(n) \in \R $ $ \forall n$, given the following facts, determine $x(n)$.
\begin{align*}
(1)& \quad x(n) = 0 \quad \forall n \gt 0 \\
(2)& \quad x(0) \gt 0 \\
(3)& \quad \Imag{X(\omega)} = \sin(\omega) - \sin(2\omega)\\
(4)& \quad \frac{1}{2\pi} \int_{-\pi}^{\pi}\abs{X(\omega)}^2d\omega=3 \\
\end{align*}
Note that Parseval's Relation for Aperiodic signals is:
$$ \sum \limits_{n\in\Z}\abs{x(n)}^2 = \frac{1}{2\pi} \int_{-\pi}^{\pi}\abs{X(\omega)}^2d\omega$$
Start with the third fact:
\begin{align*}
\Imag{X(\omega)} &= \sin(\omega) - \sin(2\omega) \\
i\Imag{X(\omega)} &= i\lr{\sin(\omega) - \sin(2\omega)} \\
\ftrans{x_o(n)} &= i\lr{\sin(\omega) - \sin(2\omega)} \\
\ftrans{x_o(n)} &= i\lr{\frac{1}{2i}e^{i\omega}-\frac{1}{2i}e^{-i\omega}-\frac{1}{2i}e^{i2\omega}+\frac{1}{2i}e^{-i2\omega}} \\
\ftrans{\frac{x(n)-x(-n)}{2}} &= i\lr{\frac{1}{2i}e^{i\omega}-\frac{1}{2i}e^{-i\omega}-\frac{1}{2i}e^{i2\omega}+\frac{1}{2i}e^{-i2\omega}} \\
\frac{1}{2}\ftrans{x(n)} - \frac{1}{2}\ftrans{x(-n)}&= \frac{1}{2}\lr{e^{i\omega}-e^{-i\omega}-e^{i2\omega}+e^{-i2\omega}} \\
\ftrans{x(n)} - \ftrans{x(-n)}&= \lr{e^{i\omega}-e^{-i\omega}-e^{i2\omega}+e^{-i2\omega}} \\
\end{align*}
How can we eliminate the $x(-n)$? Lets consider $\forall n \lt 0$, so $x(-n) = 0$:
\begin{align*}
\ftrans{x(n)} - \ftrans{x(-n)}&= \lr{e^{i\omega}-e^{-i\omega}-e^{i2\omega}+e^{-i2\omega}} \\
\ftrans{x(n)} &= \lr{e^{i\omega}-e^{-i\omega}-e^{i2\omega}+e^{-i2\omega}} \\
x(n) &= \iftrans{e^{i\omega}-e^{-i\omega}-e^{i2\omega}+e^{-i2\omega}} \\
x(n) &= \delta(n+1)-\delta(n-1)-\delta(n+2)+\delta(n-2) \\
\end{align*}
But notice that $\delta(n-1)$ and $\delta(n-2)$ are out of range. This is because $\delta(n-1)$ doesn't make sense $\forall n \lt 0$ since $n$ here must be 1 for $\delta(n-1$ to equal 1. The same is true for $\delta(n-2)$ since 2 is not in the domain. Now we can continue by taking those terms out of the equation:
\begin{align*}
x(n) &= \delta(n+1)-\delta(n-1)-\delta(n+2)+\delta(n-2) \\
x(n) &= \delta(n+1)-\delta(n+2) \\
\end{align*}
Now we can use the fourth fact, $\frac{1}{2\pi} \int_{-\pi}^{\pi}\abs{X(\omega)}^2d\omega=3$, in combination with Parseval's relation.
\begin{align*}
\frac{1}{2\pi} \int_{-\pi}^{\pi}\abs{X(\omega)}^2d\omega &= 3 \\
\sum \limits_{n\in\Z}\abs{x(n)}^2 &= 3 \\
\sum \limits_{n=-\infty}^{0}\abs{x(n)}^2 &= 3 \\
\sum \limits_{n=-\infty}^{-1}\abs{x(n)}^2 + \abs{x(0)}^2 &= 3 \\
\abs{x(-2)}^2 + \abs{x(-1)}^2 + \abs{x(0)}^2 &= 3 \\
2 + \abs{x(0)}^2 &= 3 \\
\abs{x(0)}^2 &= 1 \\
x(0) &= 1 \quad \mbox{because of (2)}\\
\end{align*}
We know that this implies $\delta(n)$ since we have 1 at 0. Therefore our signal is $x(n) = \delta(n) + \delta(n+1)-\delta(n+2)$.
\end{example}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{Week 14}
\subsection{CTFT}
\subsubsection{Continous-Time Fourier Transform}
The Continuous-Time Fourier Transform is defined as
\begin{nicebox}
\begin{align*}
x(t) &= \frac{1}{2\pi}\int_{-\infty}^{\infty}X(\omega)e^{i\omega t}d\omega \quad &\mbox{(synthesis equation)} \\
X(\omega) &= \int_{-\infty}^{\infty}x(t)e^{-i\omega t}dt \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
The reason that we integrate over all real numbers for the synthesis equation in continuous-time is because $X(\omega)$ doesn't have the periodicity of the DTFT. Note that the $2\pi$ is a product of the units we are using for $\omega$, namely the fact that $\omega = \frac{2\pi}{f}$, where $f$ is in $Hz$. If we used $f=2\pi\omega$ instead of $\omega$, we could write the CTFT as
\begin{align*}
x(t) &= \int_{-\infty}^{\infty}X(f)e^{i 2\pi f t}d\omega \quad &\mbox{(synthesis equation)} \\
X(f) &= \int_{-\infty}^{\infty}x(t)e^{-i 2\pi f t}dt \quad &\mbox{(analysis equation)}
\end{align*}
This notation is used more frequently in communication and circuit design, since $Hz$ is more intuitive than radians per second. If we look at the synthesis equation and rearrange some terms, we can see that we have a linear combination of complex exponentials whose fate is determined by $X(\omega)$, the part of the integral that distinguishes the contribution of each frequency:
$$ x(t) = \int_{-\infty}^{\infty}\lr{\frac{X(\omega)d\omega}{2\pi}}e^{i\omega t} $$
\begin{example}
Find the Fourier Transform of $x(t) = \delta(t)$.
To solve this, we can simply find that $\delta(t) \ftp 1$:
\begin{align*}
X(\omega) &= \int_{-\infty}^{\infty}x(t)e^{-i\omega t}dt \quad \mbox{(analysis)} \\
X(\omega) &= \int_{-\infty}^{\infty}\delta(t)e^{-i\omega t}dt \\
X(\omega) &= e^{0} \\
X(\omega) &= 1 \\
\end{align*}
\begin{nicebox}
$$ \delta(t) \ftp 1 $$
\end{nicebox}
\begin{example}
Find the Fourier Transform of $x(t) = \delta(t-T)$.
\begin{align*}
X(\omega) &= \int_{-\infty}^{\infty}x(t)e^{-i\omega t}dt \quad \mbox{(analysis)} \\
X(\omega) &= \int_{-\infty}^{\infty}\delta(t-T)e^{-i\omega t}dt \\
X(\omega) &= e^{-i\omega T} \\
\end{align*}
\begin{nicebox}
$$\delta(t-T) \ftp e^{-i\omega T} $$
\end{nicebox}
\begin{example}
Find the Fourier Transform of $x(t) = 1$.
% \img{images/dirac/single.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,1.5)
\psline{->}(-2,0)(2,0)
\rput(2,-0.25){$\omega$}
\rput(2,1){$X(\omega)$}
\psline{->}(0,0)(0,1)
\rput(0.3,1){$(A)$}
\rput(0,-0.25){0}
\end{pspicture}
\end{center}
We know that 0 is the frequency of $x(t) = 1$, we just don't know the magnitude.
\begin{align*}
x(t) &= \frac{1}{2\pi}\int_{-\infty}^{\infty}X(\omega)e^{i\omega t}d\omega \quad \mbox{(synthesis)} \\
1 &= \frac{1}{2\pi}\int_{-\infty}^{\infty}A\delta(\omega)e^{i\omega t}d\omega \\
1 &= \frac{A}{2\pi} \\
A &= 2\pi \\
\end{align*}
\begin{nicebox}
$$1 \ftp 2\pi\delta(\omega) $$
\end{nicebox}
\begin{example}
Find the Fourier Transform of $x(t) = e^{i\omega_0 t}$.
We that the only frequency in the signal is $\omega_0$, and again, we only need to find the amplitude of a shifted dirac delta.
% \img{images/dirac/singlefreq.ps}
\begin{center}
\begin{pspicture}(-2.5,-1)(2.5,1.5)
\psline{->}(-2,0)(2,0)
\rput(2,-0.25){$\omega$}
\rput(2,1){$X(\omega)$}
\psline{->}(0,0)(0,1)
\rput(0.3,1){$(A)$}
\rput(0,-0.25){$\omega_0$}
\end{pspicture}
\end{center}
\begin{align*}
x(t) &= \frac{1}{2\pi}\int_{-\infty}^{\infty}X(\omega)e^{i\omega t}d\omega \quad \mbox{(synthesis)} \\
e^{i\omega_0 t} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}A\delta(\omega-\omega_0)e^{i\omega t}d\omega \\
e^{i\omega_0 t} &= \frac{A}{2\pi}e^{i\omega_0 t} \\
A &= 2\pi \
\end{align*}
\begin{nicebox}
$$e^{i\omega_0 t} \ftp 2\pi\delta(\omega-\omega_0) $$
\end{nicebox}
\begin{example}
Find the impulse response of the ideal low pass filter with a pass band amplification of $\pi/b$:
% \img{images/idealpass/pioverb.ps}
\begin{center}
\begin{pspicture}(0,-1)(4,1.5)
%axis
\psline{->}(1,0)(3,0)
% 0
\psline(2,0.1)(2,-0.1)
\rput(3.3,-0.25){$\omega$}
\rput(3,1){$H(\omega)$}
\rput(1.25,1){$(\pi/b)$}
\psline(1.5,0)(1.5,1)
\psline(1.5,1)(2.5,1)
\psline(2.5,1)(2.5,0)
\rput(2,-0.25){0}
\rput(1.5,-0.25){$-a$}
\rput(2.5,-0.25){$a$}
\end{pspicture}
\end{center}
\begin{align*}
h(t) &= \frac{1}{2\pi}\int_{-\infty}^{\infty}H(\omega)e^{i\omega t}d\omega \\
h(t) &= \frac{1}{2\pi}\int_{-a}^{a}\frac{\pi}{b}e^{i\omega t}d\omega \\
h(t) &= \frac{1}{2b} \lr{\left. \frac{e^{i\omega t}}{it}\right|_{-a}^{a}} \\
h(t) &= \frac{1}{2b} \frac{e^{i a t} - e^{-i a t}}{it} \\
h(t) &= \frac{\sin(at)}{tb} \\
\end{align*}
% \img{images/sinc/sinc.ps}
\psset{unit=0.5cm}
\begin{center}
\begin{pspicture}(-13,-1.25)(13,3)
\psplot[algebraic,linewidth=1.5pt,plotpoints=1000]{-12.56}{12.56}{2*sin(x)/x}
\psaxes[showorigin=false,labels=none, Dx=3.14](0,0)(-12.6,0)(12.6,0)
\rput(5.7, 1){$a/b$}
\psline[linestyle=dashed](0,2)(5,2)
\psline{<->}(5,0)(5,2)
\rput(3.14, -0.5){$\frac{\pi}{a}$}
\rput(6.28, -0.5){$\frac{2\pi}{a}$}
\rput(9.42, -0.5){$\frac{3\pi}{a}$}
\rput(12.56, -0.5){$\frac{4\pi}{a}$}
\rput(-3.14, -0.5){$\frac{-\pi}{a}$}
\rput(-6.28, -0.5){$\frac{-2\pi}{a}$}
\rput(-9.42, -0.5){$\frac{-3\pi}{a}$}
\rput(-12.56, -0.5){$\frac{-4\pi}{a}$}
\rput(0, -0.5){$0$}
\end{pspicture}
\end{center}
What is $\int_{-\infty}^{\infty}\frac{\sin(at)}{tb}dt$? Upon first glance, this problem seems to require complex analysis, but we can simply note that $H(\omega) = \int_{-\infty}^{\infty}\frac{\sin(at)}{tb}e^{-i\omega t}dt$, so we can take $\int_{-\infty}^{\infty}\frac{\sin(at)}{tb}e^{0}dt = H(0) = \pi/b$. The Fourier Transform of the signal at 0 is the integral of the signal.
\subsection{Amplitude Modulation}
\subsubsection{Modulation Property of CTFT}
We have shown that convolution in the time domain is multiplication in the frequency domain:
$$(f*g)(t) \ftp F(\omega)G(\omega)$$
Consider the Modulation Property of the Continuous-Time Fourier Transform:
\begin{nicebox}
\begin{align*}
f(t)g(t) \ftp \frac{1}{2\pi} (F*G)(\omega)
\end{align*}
\end{nicebox}
\begin{proof}
\begin{align*}
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}(F*G)(\omega)e^{i\omega t}d\omega \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}F(W)G(\omega-W) dW e^{i\omega t}d\omega \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)\int_{-\infty}^{\infty}G(\omega-W) e^{i\omega t} d\omega dW \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)\int_{-\infty}^{\infty}G(\Omega) e^{i(\Omega+W) t} d\Omega dW \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)\lr{\int_{-\infty}^{\infty}G(\Omega) e^{i\Omega t} d\Omega }e^{iWt}dW \\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\int_{-\infty}^{\infty}F(W)e^{iWt}dW \lr{\int_{-\infty}^{\infty}G(\Omega) e^{i\Omega t} d\Omega}\\
\iftrans{(F*G)(\omega)} &= \frac{1}{2\pi}\lr{2\pi f(t)}\lr{2\pi g(t)} \\
\iftrans{(F*G)(\omega)} &= 2\pi f(t)g(t) \\
\iftrans{\frac{1}{2\pi}(F*G)(\omega)} &= f(t)g(t) \\
\end{align*}
\end{proof}
Which shows that multiplication in the time domain is the product of a scalar and the convolution in the frequency domain.
\subsubsection{Complex Exponential Carrier}
Consider an AM modulator where $y(t) = x(t)g(t)$, where $g(t) = e^{i\omega_0 t}$, where $\omega_0$ is large and positive:
% \img{images/modulators/am.ps}
\begin{center}
\begin{pspicture}(-3,-3)(3,3)
% in from x
\rput(-3.2,0){ $x(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(-2.7,0)(-0.25,0)
% out to y
\rput(3.2,0){$y(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0.25,0)(2.7,0)
% up arrow
\rput(0,-2){ $g(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0,-1.65)(0,-0.25)
% multiplier
\pscircle(0,0){0.25}
\psline(-0.175,0.175)(0.175,-0.175)
\psline(0.175,0.175)(-0.175,-0.175)
% box
\pspolygon(-1.5,-2.65)(-1.5,1)(1.5,1)(1.5,-2.65)
\end{pspicture}
\end{center}
Lets say the spectrum of the input signal is given by the following figure:
% \img{images/triangles/triangle.ps}
\begin{center}
\begin{pspicture}(-3.2,-3.2)(3,3)
% axis
\psline{->}(-3,0)(3,0)
\psline(-2,0)(0,1)
\psline(0,1)(2,0)
\rput(-2,-0.25){$-A$}
\rput(0,-0.25){0}
\rput(2,-0.25){$A$}
\rput(3.25,-0.25){$\omega$}
\rput(-2,1){$X(\omega)$}
\rput(1.12,1){$(1)$}
\end{pspicture}
\end{center}
We can determine that $Y(\omega) = \frac{1}{2\pi}(X*G)(\omega)$. Since $g(t) = e^{i\omega_0 t}$, it represents a single frequency in the frequency domain. In other words, the spectrum of $g(t)$, namely, $G(\omega)$, is a single dirac delta shifted by $\omega_0$ with an amplitude of $2\pi$ (see previous sections for derivation).
% \img{images/dirac/2pi.ps}
\begin{center}
\begin{pspicture}(-1,-1)(3,3)
\psline{->}(-2,0)(2,0)
\rput(2,-0.25){$\omega$}
\rput(2,1){$G(\omega)$}
\psline{->}(0,0)(0,1)
\rput(0.3,1){$(2\pi)$}
\rput(0,-0.25){$\omega_0$}
\end{pspicture}
\end{center}
In other words,
$$ g(t) = e^{i\omega_0 t} \ftp 2\pi \delta(\omega-\omega_0) $$
When doing amplitude modulation with a single complex exponential of frequency $\omega_0$, we find that the frequency response $Y(\omega) = X(\omega-\omega_0)$. This is because $(X*G)(\omega)=2\pi X(\omega - \omega_0)$, and the $2\pi$ cancels out. Here is how it works out:
\begin{align*}
Y(\omega) &= \frac{1}{2\pi}(X*G)(\omega) \\
Y(\omega) &= \frac{1}{2\pi}\int_{-\infty}^{\infty}X(\Omega)G(\omega-\Omega)d\Omega \\
Y(\omega) &= \frac{1}{2\pi}\int_{-\infty}^{\infty}X(\Omega)2\pi\delta(\omega-\Omega-\omega_0)d\Omega \\
Y(\omega) &= \int_{-\infty}^{\infty}X(\Omega) \delta(\omega-\omega_0-\Omega)d\Omega \\
Y(\omega) &= X(\omega-\omega_0)
\end{align*}
Thus we have a spectrum for $Y(\omega)$ that looks like
% \img{images/triangles/yomega.ps}
\begin{center}
\begin{pspicture}(-3.2,-3.2)(3,3)
% axis
\psline{->}(-3,0)(3,0)
\psline(-2,0)(0,1)
\psline(0,1)(2,0)
\rput(-2,-0.25){$\omega_0-A$}
\rput(0,-0.25){$\omega_0$}
\rput(2,-0.25){$\omega_0+A$}
\rput(3.25,-0.25){$\omega$}
\rput(-2,1){$Y(\omega)=X(\omega-\omega_0)$}
\rput(1.12,1){$(1)$}
\end{pspicture}
\end{center}
\subsubsection{Sinusoidal Carriers}
Generally an antenna must be approximately a fourth of the size of a cycle of the wave it transmits. The speed of light can be given in terms of a frequency $f$ in Hertz multiplied by the corresponding wavelength $\lambda$:
$$c=f\lambda = 2.9979 \times 10^8 m/s $$
Lets say the spectrum of ones voice fell along a range from $-3.3kHz$ to $3.3kHz$, then we can approximate the length of the antenna needed by first determining the wavelength:
\begin{align*}
\lambda &= c/f \\
\lambda &\approx 98 km\\
\end{align*}
This tells us we would need about $25km$ of antenna to transmit this persons voice. This is why we use amplitude modulation---to transmit a signal at higher frequencies.
Lets consider an amplitude modulator with a sinusoidal carrier given by
% \img{images/modulators/amcos.ps}
\begin{center}
\begin{pspicture}(-3,-3)(3,2)
% in from x
\rput(-3.2,0){$x(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(-2.7,0)(-0.25,0)
% out to y
\rput(3.2,0){$y(t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0.25,0)(2.7,0)
% up arrow
\rput(0,-2){$\cos(\omega_0 t)$}
\psline[linewidth=1.25 pt, arrowscale=1.1]{->}(0,-1.65)(0,-0.25)
% multiplier
\pscircle(0,0){0.25}
\psline(-0.175,0.175)(0.175,-0.175)
\psline(0.175,0.175)(-0.175,-0.175)
% box
\pspolygon(-1.5,-2.65)(-1.5,1)(1.5,1)(1.5,-2.65)
\end{pspicture}
\end{center}
Where $x(t)$ is the \emph{information-bearing} signal (a.k.a. the modulating signal), and $h(t) = \cos(\omega_0 t)$ is the \emph{carrier} signal (a.k.a. the modulated signal). The output signal $y(t)$ is the product of these two signals, $x(t)\cos(\omega_0 t)$. We can begin solving for the spectrum of $y$ by first finding the spectrum of $h$.
\begin{align*}
h(t) &= \cos(\omega_0 t) \\
h(t) &= \frac{1}{2}e^{i\omega_0 t} + \frac{1}{2}e^{-i\omega_0 t} \\
\ftrans{h(t)} &= \frac{1}{2}\ftrans{e^{i\omega_0 t}} + \frac{1}{2}\ftrans{e^{-i\omega_0 t}} \\
H(\omega) &= \frac{1}{2}2\pi\delta(\omega-\omega_0) + \frac{1}{2}2\pi\delta(\omega+\omega_0) \\
H(\omega) &= \pi\delta(\omega-\omega_0) + \pi\delta(\omega+\omega_0) \\
\end{align*}
$H(\omega)$ is two dirac deltas symmetrically shifted each by $\omega_0$,
% \img{images/dirac/doublefreq.ps}
\begin{center}
\begin{pspicture}(-3,-1)(3,3)
\psline{->}(-2,0)(2,0)
\rput(2,-0.25){$\omega$}
\rput(2,1){$H(\omega)$}
\psline{->}(-1,0)(-1,1)
\rput(-1.3,1){$(\pi)$}
\rput(-1,-0.25){$-\omega_0$}
\psline{->}(1,0)(1,1)
\rput(1.3,1){$(\pi)$}
\rput(1,-0.25){$\omega_0$}
\end{pspicture}
\end{center}
which gives us an output signal of shifted versions of the input signal with half of the original height:
% \img{images/triangles/double.ps}
\begin{center}
\begin{pspicture}(-5,-2)(5,3)
% axis
\psline{->}(-5,0)(5,0)
\psline(-3,0)(-2,1)
\psline(-2,1)(-1,0)
\psline(1,0)(2,1)
\psline(2,1)(3,0)
\rput(-3,-0.25){$-\omega_0-A$}
\rput(-2,-1.25){$-\omega_0$}
\rput(-1,-0.25){$-\omega_0+A$}
\rput(3,-0.25){$\omega_0+A$}
\rput(2,-1.25){$\omega_0$}
\rput(1,-0.25){$\omega_0-A$}
\psline[linestyle=dashed](-2,-1)(-2,1)
\psline[linestyle=dashed](2,-1)(2,1)
\rput(5.25,-0.25){$\omega$}
\rput(-3.5,1){$Y(\omega)$}
\rput(2.6,1){$(1/2)$}
\rput(-1.4,1){$(1/2)$}
\end{pspicture}
\end{center}
where we must require that $-\omega_0+A \lt \omega_0-A$, so that $A \lt \omega_0$. This saves us a few complications when $\omega_0 \gt\gt A$. How can we demodulate this signal? Once way would be to pass it through a amplitude modulator using a complex exponential $e^{-i\omega_0 t}$, and then we would get the signal shifted over to the left. We can then pass it through a low-pass filter with a pass band amplification of 2 to get the original height back. But, what if we had an oscillator using a cosine function at the recieving end instead of a complex exponential?
The following scheme is called \emph{synchronous demodulation}, and is when we have an identical cosine at the receiving end. Looking at what happened upon transmission, the signal was shifted $\pm \omega_0$ and the amplitude was cut in half. Applying this again, we would get a signal $q(t) = y(t)\cos(\omega_0 t)$, where we shift the shifted signal $\pm \omega_0$. This gives us three disjoint triangular components in the frequency spectrum. The first shift to the left $\omega_0$ gives us two triangles with a height of $1/4$ centered at $-2\omega_0$ and $0$. The shift to the right gives us two triangles with a height of $1/4$ centered at $2\omega_0$ and $0$. The two triangles centered at 0 add to form the original triangle halfed. So we can now put this signal $q(t)$ through a low-pass filter and amplify the signal by 2 to retrieve the original signal.
% \img{images/triangles/three.ps}
\begin{center}
\begin{pspicture}(-5,-1)(5,3)
% axis
\psline{->}(-5,0)(5,0)
\psline(-4,0)(-3,0.5)
\psline(-3,0.5)(-2,0)
\psline(2,0)(3,0.5)
\psline(3,0.5)(4,0)
\psline(-1,0)(0,1)
\psline(0,1)(1,0)
\rput(-3,-0.25){$-2\omega_0$}
\rput(0,-0.25){$0$}
\rput(3,-0.25){$2\omega_0$}
\rput(5.25,-0.25){$\omega$}
\rput(-3.5,1){$Q(\omega)$}
\rput(0.5,1){$(1/2)$}
\rput(3.7,0.5){$(1/4)$}
\rput(-2.3,0.5){$(1/4)$}
\end{pspicture}
\end{center}
The reason this is called amplitude modulation is because given an input signal $x(t)$, that signal multiplied with a $\cos(\omega_0 t)$ will give us a new signal $y(t)$, which is a cosine wave oscillating between $\pm x(t)$, where $x(t)$ serves as an envelope.
What happens if at the reciever we have $q(t) = y(t)\cos(\omega_0t + \theta)$?
\begin{align*}
y(t) &= x(t)\cos(\omega_0 t) \\
q(t) &= y(t)\cos(\omega_0 t + \theta) \\
q(t) &= x(t)\cos(\omega_0 t) \cos(\omega_0 t + \theta) \\
\end{align*}
We can use the trigonometric identity:
$$ \cos\alpha \cos\beta = \frac{1}{2}\lr{\cos\lr{\alpha+\beta}+\cos\lr{\alpha-\beta}} $$
\begin{align*}
q(t) &= x(t)\cos(\omega_0 t) \cos(\omega_0 t + \theta) \\
q(t) &= \frac{x(t)}{2}\lr{\cos(2\omega_0 t + \theta) + \cos(-\theta)} \\
q(t) &= x(t)\lr{\frac{1}{2}\cos(2\omega_0 t + \theta) + \frac{1}{2}\cos(\theta)} \\
\end{align*}
If $\theta \approx \frac{\pi}{2}$, you get an extremely weak baseband. When $\theta = 0$, you have a synchronous demodulation scheme:
$$ q(t) = \frac{x(t)}{2} + \frac{1}{2}\cos(2\omega_0 t)x(t) $$
\subsection{Discussion}
\subsubsection{CTFT Time-Shifting}
What is the Fourier Transform of the signal $x(t-t_0)$?
\begin{align*}
\hat{X}(\omega) &= \int_{-\infty}^{\infty}x(t-t_0)e^{-i\omega t}dt \\
\hat{X}(\omega) &= \int_{-\infty}^{\infty}x(\tau)e^{-i\omega (\tau + t_0)}d\tau \\
\hat{X}(\omega) &= \int_{-\infty}^{\infty}x(\tau)e^{-i\omega \tau} e^{-i\omega t_0}d\tau \\
\hat{X}(\omega) &= e^{-i\omega t_0}\int_{-\infty}^{\infty}x(\tau)e^{-i\omega \tau}d\tau \\
\hat{X}(\omega) &= e^{-i\omega t_0}X(\omega) \\
\end{align*}
Therefore we can say that time-shifting in the time domain corresponds to multiplication by a complex exponential in Fourier space:
\begin{nicebox}
$$x(t-t_0) \ftp e^{-i\omega t_0}X(\omega) $$
\end{nicebox}
\subsubsection{CTFT Duality}
What is the Fourier Transform of the Fourier Transform of the signal $x(t)$?
$$\mbox{If } x(t) \ftp X(\omega), \mbox{ then } X(t) \ftp ? $$
\begin{align*}
x(t) &= \frac{1}{2\pi} \int_{\R}X(\omega)e^{i\omega t}d\omega \\
x(\omega) &= \frac{1}{2\pi} \int_{\R}X(t)e^{i\omega t}dt \quad \mbox{let } t=\omega \\
x(-\omega) &= \frac{1}{2\pi} \int_{\R}X(t)e^{-i\omega t}dt \quad \mbox{let } \omega=-\omega \\
2\pi x(-\omega) &= \int_{\R}X(t)e^{-i\omega t}dt \\
2\pi x(-\omega) &= \ftrans{X(t)} \\
\end{align*}
If you take the fourier transform of the fourier transform, you get the original signal back with a scaled factor.
\begin{nicebox}
$$x(t) \ftp X(\omega)$$
$$\Rightarrow$$
$$X(t) \ftp 2\pi x(-\omega)$$
\end{nicebox}
\begin{example}
What is the fourier transform of $x(t) = 1$?
We know that $\ftrans{\delta(t)} = 1$, so we can apply duality and find that $\ftrans{1} = 2\pi\delta(-\omega) = 2\pi\delta(\omega)$. In other words, a constant signal 1 corresponds to a dirac delta.
\end{example}
\begin{example}
What is the fourier transform of $\cos(\omega_0 t)$?
First note that $\cos(\omega_0 t) = \frac{1}{2}e^{i\omega_0 t} + \frac{1}{2}e^{-i\omega_0 t}$. Then recall a few properties of the CTFT:
\begin{align*}
\delta(t) &\ftp 1 \\
\delta(t-t_0) &\ftp e^{-i\omega t_0} \\
\end{align*}
Then by duality, we can say that
\begin{align*}
e^{-i\omega_0 t} &\ftp 2\pi\delta(\omega_0+\omega) &\quad \mbox{since } \delta(-\omega_0-\omega) = \delta(\omega_0+\omega) \\
e^{i\omega_0 t} &\ftp 2\pi\delta(\omega_0-\omega) \\
\end{align*}
Now putting it all together:
\begin{align*}
\cos(\omega_0 t) &= \frac{1}{2}e^{i\omega_0 t} + \frac{1}{2}e^{-i\omega_0 t} \\
\ftrans{\cos(\omega_0 t)} &= \frac{1}{2}\ftrans{e^{i\omega_0 t}} + \frac{1}{2}\ftrans{e^{-i\omega_0 t}} \\
\ftrans{\cos(\omega_0 t)} &= \frac{1}{2}2\pi\delta(\omega-\omega_0) + \frac{1}{2}2\pi\delta(\omega+\omega_0) \\
\ftrans{\cos(\omega_0 t)} &= \pi\delta(\omega-\omega_0) + \pi\delta(\omega+\omega_0) \\
\end{align*}
\end{example}
\subsubsection{CTFT Frequency Shifting}
What is the Fourier Transform of the signal $e^{i\omega_0 t}x(t)$?
\begin{align*}
\hat{X}(\omega) &= \int_{-\infty}^{\infty}\hat{x}(t)e^{-i\omega t}dt \\
\hat{X}(\omega) &= \int_{-\infty}^{\infty}e^{i\omega_0 t}x(t)e^{-i\omega t}dt \\
\hat{X}(\omega) &= \int_{-\infty}^{\infty}x(t)e^{-i(\omega-\omega_0) t}dt \\
X(\omega-\omega_0) &= \int_{-\infty}^{\infty}x(t)e^{-i(\omega-\omega_0) t}dt \\
\end{align*}
Therefore we can say that multiplication by a complex exponential in the time domain corresponds to a frequency shift in Fourier space:
\begin{nicebox}
$$e^{i\omega_0 t}x(t) \ftp X(\omega-\omega_0) $$
\end{nicebox}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}