$$
% create the definition symbol
\def\bydef{\stackrel{\Delta}{=}}
%\def\circconv{\otimes}
\def\circconv{\circledast}
\newcommand{\qed}{\mbox{ } \Box}
\newcommand{\infint}{\int_{-\infty}^{\infty}}
% z transform
\newcommand{\ztp}{ ~~ \mathop{\mathcal{Z}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iztp}{ ~~ \mathop{\mathcal{Z}^{-1}}\limits_{\longleftrightarrow} ~~ }
% fourier transform pair
\newcommand{\ftp}{ ~~ \mathop{\mathcal{F}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iftp}{ ~~ \mathop{\mathcal{F}^{-1}}\limits_{\longleftrightarrow} ~~ }
% laplace transform
\newcommand{\ltp}{ ~~ \mathop{\mathcal{L}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\iltp}{ ~~ \mathop{\mathcal{L}^{-1}}\limits_{\longleftrightarrow} ~~ }
\newcommand{\ftrans}[1]{ \mathcal{F} \left\{#1\right\} }
\newcommand{\iftrans}[1]{ \mathcal{F}^{-1} \left\{#1\right\} }
\newcommand{\ztrans}[1]{ \mathcal{Z} \left\{#1\right\} }
\newcommand{\iztrans}[1]{ \mathcal{Z}^{-1} \left\{#1\right\} }
\newcommand{\ltrans}[1]{ \mathcal{L} \left\{#1\right\} }
\newcommand{\iltrans}[1]{ \mathcal{L}^{-1} \left\{#1\right\} }
% coordinate vector relative to a basis (linear algebra)
\newcommand{\cvrb}[2]{\left[ \vec{#1} \right]_{#2} }
% change of coordinate matrix (linear algebra)
\newcommand{\cocm}[2]{ \mathop{P}\limits_{#2 \leftarrow #1} }
% Transformed vector set
\newcommand{\tset}[3]{\{#1\lr{\vec{#2}_1}, #1\lr{\vec{#2}_2}, \dots, #1\lr{\vec{#2}_{#3}}\}}
% sum transformed vector set
\newcommand{\tsetcsum}[4]{{#1}_1#2(\vec{#3}_1) + {#1}_2#2(\vec{#3}_2) + \cdots + {#1}_{#4}#2(\vec{#3}_{#4})}
\newcommand{\tsetcsumall}[4]{#2\lr{{#1}_1\vec{#3}_1 + {#1}_2\vec{#3}_2 + \cdots + {#1}_{#4}\vec{#3}_{#4}}}
\newcommand{\cvecsum}[3]{{#1}_1\vec{#2}_1 + {#1}_2\vec{#2}_2 + \cdots + {#1}_{#3}\vec{#2}_{#3}}
% function def
\newcommand{\fndef}[3]{#1:#2 \to #3}
% vector set
\newcommand{\vset}[2]{\{\vec{#1}_1, \vec{#1}_2, \dots, \vec{#1}_{#2}\}}
% absolute value
\newcommand{\abs}[1]{\left| #1 \right|}
% vector norm
\newcommand{\norm}[1]{\left|\left| #1 \right|\right|}
% trans
\newcommand{\trans}{\mapsto}
% evaluate integral
\newcommand{\evalint}[3]{\left. #1 \right|_{#2}^{#3}}
% slist
\newcommand{\slist}[2]{{#1}_{1},{#1}_{2},\dots,{#1}_{#2}}
% vectors
\newcommand{\vc}[1]{\textbf{#1}}
% real
\newcommand{\Real}[1]{{\Re \mit{e}\left\{{#1}\right\}}}
% imaginary
\newcommand{\Imag}[1]{{\Im \mit{m}\left\{{#1}\right\}}}
\newcommand{\mcal}[1]{\mathcal{#1}}
\newcommand{\bb}[1]{\mathbb{#1}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\I}{\mathbb{I}}
\newcommand{\Th}[1]{\mathop\mathrm{Th(#1)}}
\newcommand{\intersect}{\cap}
\newcommand{\union}{\cup}
\newcommand{\intersectop}{\bigcap}
\newcommand{\unionop}{\bigcup}
\newcommand{\setdiff}{\backslash}
\newcommand{\iso}{\cong}
\newcommand{\aut}[1]{\mathop{\mathrm{Aut(#1)}}}
\newcommand{\inn}[1]{\mathop{\mathrm{Inn(#1)}}}
\newcommand{\Ann}[1]{\mathop{\mathrm{Ann(#1)}}}
\newcommand{\dom}[1]{\mathop{\mathrm{dom} #1}}
\newcommand{\cod}[1]{\mathop{\mathrm{cod} #1}}
\newcommand{\id}{\mathrm{id}}
\newcommand{\st}{\ |\ }
\newcommand{\mbf}[1]{\mathbf{#1}}
\newcommand{\enclose}[1]{\left\langle #1\right\rangle}
\newcommand{\lr}[1]{\left( #1\right)}
\newcommand{\lrsq}[1]{\left[ #1\right]}
\newcommand{\op}{\mathrm{op}}
\newcommand{\dotarr}{\dot{\rightarrow}}
%Category Names:
\newcommand{\Grp}{\mathbf{Grp}}
\newcommand{\Ab}{\mathbf{Ab}}
\newcommand{\Set}{\mathbf{Set}}
\newcommand{\Matr}{\mathbf{Matr}}
\newcommand{\IntDom}{\mathbf{IntDom}}
\newcommand{\Field}{\mathbf{Field}}
\newcommand{\Vect}{\mathbf{Vect}}
\newcommand{\thm}[1]{\begin{theorem} #1 \end{theorem}}
\newcommand{\clm}[1]{\begin{claim} #1 \end{claim}}
\newcommand{\cor}[1]{\begin{corollary} #1 \end{corollary}}
\newcommand{\ex}[1]{\begin{example} #1 \end{example}}
\newcommand{\prf}[1]{\begin{proof} #1 \end{proof}}
\newcommand{\prbm}[1]{\begin{problem} #1 \end{problem}}
\newcommand{\soln}[1]{\begin{solution} #1 \end{solution}}
\newcommand{\rmk}[1]{\begin{remark} #1 \end{remark}}
\newcommand{\defn}[1]{\begin{definition} #1 \end{definition}}
\newcommand{\ifff}{\LeftRightArrow}
<!-- For the set of reals and integers -->
\newcommand{\rr}{\R}
\newcommand{\reals}{\R}
\newcommand{\ii}{\Z}
\newcommand{\cc}{\C}
\newcommand{\nn}{\N}
\newcommand{\nats}{\N}
<!-- For terms being indexed.
Puts them in standard font face and creates an index entry.
arg: The term being defined.
\newcommand{\pointer}[1]{#1\index{#1}} -->
<!-- For bold terms to be index, but defined elsewhere
Puts them in bold face and creates an index entry.
arg: The term being defined. -->
\newcommand{\strong}[1]{\textbf{#1}}
<!-- For set names.
Puts them in italics. In math mode, yields decent spacing.
arg: The name of the set. -->
\newcommand{\set}[1]{\textit{#1}}
$$
\documentclass{article}
\usepackage{latex2html5}
\usepackage{writer}
\usepackage{auto-pst-pdf}
\usepackage{pstricks-add}
\usepackage{graphicx}
\usepackage{hyperref}
\definecolor{lightblue}{rgb}{0.0,0.24313725490196078,1.0}
\title{{\Huge math club}}
\author{
\textbf{Dan Lynch} \\
UC Berkeley \\
EECS Department \\
D@nLynch.com \\
}
\date{1st of December 2012}
\begin{document}
\maketitle
\newpage
\tableofcontents
\newpage
\section{intro}
\subsection{fourier series}
Given a periodic signal $x$ in CT, we can estimate the signal using the formula:
$$ \hat{x}(t) = \sum \limits_{k=-N}^{N}\alpha_k e^{ik\omega_0 t} $$
How can we determine the coefficients that make the estimates closest in terms of error energy? Let $W\in\R^{2N+1}$ be a subspace spanned by the set of orthogonal basis vectors $\Psi_0, \Psi_1, \Psi_{-1} \dots, \Psi_{N}, \Psi_{-N}$. If $x$ is a vector that is not in the column space of $W$, then we can project $x$ onto the column space of $W$, producing an approximation vector $\hat{x}$, which has a distance of $\abs{\left| \mathcal{E}_N\right|}$ from the vector $x$. The vectors $\mathcal{E}_N$ and $\hat{x}$ are orthogonal.
% \img{images/bases/projtheory/proj.ps}
\psset{unit=1cm}
\begin{center}
\begin{pspicture}(-1,-3)(9,4)
\pscustom[fillstyle=solid,fillcolor=gray!40,linestyle=none]{
\psline[linewidth=1 pt](0,0)(4,1.2)
\psline[linewidth=1 pt](4,1.2)(8.4,0)
\psline[linewidth=1 pt](8.4,0)(4,-1.2)
\psline[linewidth=1 pt](4,-1.2)(0,0)
}
\psline[linewidth=1 pt](0,0)(4,1.2)
\psline[linewidth=1 pt](4,1.2)(8.4,0)
\psline[linewidth=1 pt](8.4,0)(4,-1.2)
\psline[linewidth=1 pt](4,-1.2)(0,0)
\rput(0.78,0){$W$}
% new vector
\rput(6,3.3){$x$}
\psline[linewidth=1.5 pt,linecolor=red]{->}(2.2,0.2)(6,3)
% new vector
\rput(6.35,1.5){$\mathcal{E}_N$}
\psline[linewidth=1.5 pt]{->}(6,0)(6,3)
% new vector
\rput(4,-0.3){$\hat{x}_N$}
\psline[linewidth=1.5 pt]{->}(2.2,0.2)(6,0)
% new vector
\psline[linewidth=1.5 pt](2.2,0.2)(6,0)
\end{pspicture}
\end{center}
We want to minimize $\abs{\left| \mathcal{E}_N\right|}$ to make the best approximation. $\mathcal{E}_N = x - \hat{x}_N$, hence $\abs{\left| \mathcal{E}_N\right|} = \abs{\left| x-\hat{x}_N\right|}$. Since $\mathcal{E} \perp W$, then we can use the inner product and the properties of orthogonality to solve for the coefficients. Since $W \in \R^{2N+1}$, we have $2N+1$ equations and $2N+1$ coefficients.
\begin{align*}
\langle \hat{x}_N, \Psi_\ell\rangle &= \langle \sum \limits_{k=-N}^{N}\alpha_k e^{ik\omega_0 t} , \Psi_\ell\rangle \\
\langle \hat{x}_N, \Psi_\ell\rangle &= \sum \limits_{k=-N}^{N}\alpha_k \langle e^{ik\omega_0 t} , \Psi_\ell\rangle \\
\langle \hat{x}_N, \Psi_\ell\rangle &= \alpha_\ell \langle e^{i\ell\omega_0 t} , \Psi_\ell\rangle \\
\langle \hat{x}_N, \Psi_\ell\rangle &= \alpha_\ell \langle \Psi_\ell , \Psi_\ell\rangle \\
\alpha_\ell &= \frac{\langle \hat{x}_N, \Psi_\ell\rangle }{\langle \Psi_\ell , \Psi_\ell\rangle } \\
\end{align*}
if $\hat{x}(t) = \sum \limits_{k=0}^{M-1}\alpha_k \Psi_k$, where $x$ is a $p$-periodic signal, which $\Psi_k$'s would you choose? Since we are taking a subset, the larger exponentials are better since they dominate. Otherwise, using $\{0,1,\dots,M\}\in\Z$ is not using very much. Pick the largest in magnitude FS coefficient, and then go down from there, but it doesn't have to be continguous.
{\bf Continous-Time Fourier Series}
The Continuous-Time Fourier Series (CTFS), often called just Fourier Series (FS), tells us that we can write a signal as a linear combination of orthogonal functions, or bases. In general, we can use any bases of orthogonal functions, but we will mainly use complex exponentials.
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty} X_k e^{ik\omega_0 t} \\
x(t) &= \sum \limits_{k=-\infty}^{\infty} X_k \Psi_k(t) \\
\end{align*}
This brings us to our synthesis equation:
$$
x = \sum \limits_{k=-\infty}^{\infty}X_k\Psi_k
$$
We use the inner product to find the spectral coefficient $X_k$, which brings us to our analysis equation:
$$
X_k = \frac{\langle x,\Psi_k\rangle}{\langle \Psi_k,\Psi_k\rangle}
$$
But how do we define our inner product? In discrete-time, we used
\begin{nicebox}
$$
\langle f,g\rangle = \sum \limits_{n=\langle p\rangle} f(n)g^*(n) \quad \mbox{(Discrete-time inner product)}
$$
\end{nicebox}
We can simplify this using linear algebraic language.
\begin{align*}
f &= \left[ \begin{array}{c} f(0) \\ f(1) \\ \vdots \\ f(p-1) \\ \end{array} \right] \\
g &= \left[ \begin{array}{c} g(0) \\ g(1) \\ \vdots \\ g(p-1) \\ \end{array} \right]
\end{align*}
Therefore, we can write $\langle f,g \rangle$ as multiplication of a tranposed vector with another vector:
$$ \langle f,g\rangle = f^Tg^* $$
But now that we are in continuous-time, we need more. We have a continuum of values, not a discrete, countable set. Hence, we define the inner product as an integral:
\begin{nicebox}
$$
\langle f,g\rangle = \int_{\langle p\rangle} f(t)g^*(t) dt \quad \mbox{(Continuous-time inner product)}
$$
\end{nicebox}
Now lets look at $\langle \Psi_k, \Psi_\ell\rangle$:
\begin{align*}
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} \Psi_k(t)\Psi_\ell^*(t) dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{ik\omega_0 t}e^{-i\ell \omega_0 t} dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{i(k-\ell)\omega_0 t} dt \\
\end{align*}
We have two cases, where the exponent is zero when $k=\ell$, and then when it is non-zero and $k\neq \ell$.
I) $k=\ell$
This one is more simple. We are integrating 1 over a contiguous interval of length $p$, hence the value is $p$:
\begin{align*}
\langle \Psi_\ell, \Psi_\ell\rangle &= \int_{\langle p\rangle} dt = p
\end{align*}
II) $k\neq\ell$
This case requires a bit more work. Lets integrate.
\begin{align*}
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{i(k-\ell)\omega_0 t} dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \left.\frac{e^{i(k-\ell)\omega_0 t}}{i(k-\ell)\omega_0} \right|_{\langle p\rangle} \\
\langle\Psi_k, \Psi_\ell\rangle &= \frac{e^{i(k-\ell)\omega_0 p}- e^{0}}{i(k-\ell)\omega_0} \\
\langle\Psi_k, \Psi_\ell\rangle &= \frac{e^{i(k-\ell)2\pi}- e^{0}}{i(k-\ell)\omega_0} \\
\langle\Psi_k, \Psi_\ell\rangle &= \frac{0}{i(k-\ell)\omega_0} = 0\\
\end{align*}
Lets look at this in another perspective and change to sines and cosines instead of integrating:
\begin{align*}
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} e^{i(k-\ell)\omega_0 t} dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} \cos((k-\ell)\omega_0 t) + i\sin((k-\ell)\omega_0 t) dt \\
\langle\Psi_k, \Psi_\ell\rangle &= \int_{\langle p\rangle} \cos((k-\ell)\omega_0 t) dt + \int_{\langle p\rangle}i\sin((k-\ell)\omega_0 t) dt \\
\end{align*}
Consider the case where $k-\ell=1$. We are summing over a full period of the cosine function:
% \img{images/trigonometric/cos1/cos.ps}
Notice that there is an equal amount of positive and negative area, hence the value is zero. If $k-\ell=2$, then we are integrating $\cos(2\omega_0 t)$, which has a period $p/2$, hence we would integrate over two full periods of the cosine function giving us zero:
% \img{images/trigonometric/cos2/cos.ps}
If you were to let $k-\ell=N$, where $N\in \Z$, then you would be integrating over $N$ periods since the function would have a period of $p/N$. This same phenomenon occurs with the sine function.
% \img{images/trigonometric/sin1/sin.ps}
Therefore, any time we have $k-\ell \neq 0$, we are integrating over integer multiples of the period, hence
$$\langle \Psi_k, \Psi_\ell\rangle = p\delta(k-\ell)$$
Looking back at the analysis equation, we have
$$ X_k = \frac{\langle x,\Psi_k\rangle}{\langle\Psi_k, \Psi_k\rangle} = \frac{1}{p} \int_{\langle p\rangle}x(t)e^{-ik\omega_0 t} dt $$
Note that since $\omega_0$ and $k$ are both required, the equation relies on them, and sometimes you may see $X(k\omega_0)$.
To sum everything up, we have two important equations regarding the continuous-time fourier series (FS):
\begin{nicebox}
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty}X_ke^{ik\omega_0t} \quad &\mbox{(synthesis equation)} \\
X_k &= \frac{1}{p} \int_{\langle p\rangle}x(t)e^{-ik\omega_0t}dt \quad &\mbox{(analysis equation)}
\end{align*}
\end{nicebox}
Because we can write $x(t)$ as an infinite number of $X_k$s, $X_k$ is not necessarily periodic, but $x(t)$ is $p$-periodic in continuous-time. Note that we can write $x(t)$ using any orthogonal set of basis vectors:
\begin{align*}
x(t) &= \sum \limits_{k=-\infty}^{\infty}X_k\Psi_k(t) \\
x(t) &= \sum \limits_{k=0}^{\infty}A_k \cos(k\omega_0 t) + \sum \limits_{k=1}^{\infty}B_k \sin(k\omega_0 t)
\end{align*}
Consider the following bases:
\begin{align*}
\Psi_k(t) &= \cos(\omega_0 t) \quad k\in\Z_{\oplus} \\
\Phi_\ell(t) &= \sin(\omega_0 t) \quad \ell \in \Z_+
\end{align*}
In order to use these as bases, we need to show the following:
\begin{align*}
\langle \Psi_k, \Psi_m\rangle &= 0 \quad \mbox{if } k\neq m \\
\langle \Psi_k, \Phi_\ell\rangle &= 0 \quad \forall\mbox{ } k,\ell \\
\langle \Phi_\ell, \Phi_r\rangle &= 0 \quad \mbox{if } \ell\neq r \\
\end{align*}
Then to determine the coefficients, we can use the inner product.
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}
\section{mission}
\newpage
\bibliographystyle{cell}
\bibliography{sources}
\end{document}