\documentstyle[12pt]{article} 
%\renewcommand\baselinestretch{1.667} 
\topmargin= -0.4in 
\textheight=10.4in 
%\textheight=10.5in 
%\textheight=10.5in 
%\textwidth=6in 
\textwidth=6.5in 
\oddsidemargin=0.0in 
\pagestyle{empty} 

\begin{document} 
	
%\vspace*{1.2in} 
 
\begin{center}CALIFORNIA STATE UNIVERSITY, LONG BEACH\\ 
             \vspace*{.10in} 
               Department of Mathematics and Statistics\\ 
             \vspace*{.15in} 
              NUMERICAL ANALYSIS COMPREHENSIVE EXAM\\ 
              \vspace*{.10in} 
               Sample Exam 3\\ 
\end{center} 
 
\vspace{.4in} 
\noindent  
Choose any \underline{Five Problems}.  You may use only a 
\underline{non-graphing calculator}. 

\vspace*{.15in}
\noindent
1. a) Let $x_{0},  x_{1},\ldots, x_{n}$ be $(n+1)$ distinct points, 
and let $f(x)$ be defined at these points. Prove that the polynomial 
$P_{n}(x)$ of degree $\leq n$  which interpolates $f(x)$ at these 
$(n+1)$ points exists and is unique. Develop all details needed in your
proof and clearly define any special symbols you use. 
 
\vspace*{.15in} 
\noindent
b) Let $x_{0},x_{1}, \ldots , x_{n}$ be $(n+1)$ distinct points in 
the interval $[a,b]$.
Show that a quadrature \linebreak formula 
$Q(f) = \sum_{i=0}^{n} A_{i}f(x_{i})$
for approximating $\int_{a}^{b} f(x) dx$ is exact for all polynomials
of degree $\leq n$ \underline{if and only if} $A_{i} = \int_{a}^{b} l_{i}(x) dx,\
0 \leq i \leq n$, where $\l_{i}(x)$ is the Lagrange polynomial
$$
 l_{i}(x) \equiv \prod_{j=0,j\neq i}^{n} \left(
   \frac{x-x_{j}}{x_{i} - x_{j}} \right) .
$$
Explain key details in your proof. 
%b) Let $x_{0},x_{1}, \ldots , x_{n}$ be $(n+1)$ distinct points in 
%interval $[a,b]$.
%Define the quadrature formula $Q(f)$ for approximating 
%$\int_{a}^{b} f(x) dx$ by $Q(f) \equiv \sum_{i=0}^{n} A_{i}f(x_{i})$.
%Let each coefficient $A_{i}$ have the value 
%$A_{i} = \int_{a}^{b} l_{i}(x) dx$, where %$\l_{i}(x)$ 
%is the Lagrange polynomial
%$$
% l_{i}(x) \equiv \prod_{j=0,j\neq i}^{n} \left(
%   \frac{x-x_{j}}{x_{i} - x_{j}} \right) .
%$$
%Let $P(x)$ be \underline{any} polynomial of degree $\leq n$.  Prove that
%$Q(P) \ = \ \int_{a}^{b} P(x) dx$.  Explain key details in your proof.

\vspace*{.25in}
\noindent
2. a) Let $\| \cdot \|_{*}$ be a \underline{new} norm  defined on  
$\mbox{\bf R}^{n}$ by 
$\| x \|_{*} \equiv c \max \{|x_{1}|, \ldots, |x_{n}| \} $ for a constant
$c > 0$ and for all $x \in \mbox{\bf R}^{n}$, i.e. 
$\| x \|_{*} =  c \| x \|_{\infty}$.  
Let $\|A\|_{*}$ be the corresponding 
natural matrix norm induced on $A \in \mbox{\bf R}^{n \times n}$.   
Prove that 
$$
\|A\|_{*} =\displaystyle{\max_{1 \leq i \leq n}} \sum_{j=1}^{n}
|a_{ij}|.
$$
Include all details.

\underline{Hint}: The proof proceeds just like the one for 
$\|A\|_{\infty}$ with only small changes. 

\vspace*{.15in} 
\noindent
b) Let $A \in \mbox{\bf R}^{n \times n}$.  Let $\| \cdot \|$ be any
norm on $\mbox{\bf R}^{n}$ and $\|A\|$ be the corresponding 
natural matrix norm induced on A.  
Suppose that $A$ is nonsingular, $Ax = b$, and $r=b-A\tilde{x}$
for $x,\tilde{x} \in \mbox{\bf R}^{n}$. Prove that
$$
\frac{\|x-\tilde{x}\|}{\|x\|} \leq K(A) \frac{\|r\|}{\|b\|}
$$
where $K(A)$ is the condition number of $A$. 

\vspace*{.15in}
\noindent
c) Let $A \in \mbox{\bf R}^{n \times n}$.  Let $\| \cdot \|$ be 
 \underline{any} 
norm on $\mbox{\bf R}^{n}$ and $\|A\|$ be the corresponding 
natural matrix norm induced on A.  
Let $r_{\sigma}(A)$ be the spectral radius of
$A$.  Prove that if $r_{\sigma}(A) < 1$ then 
$\lim_{k \rightarrow \infty} \|A^{k}\| = 0$.  You may use other facts that
you know about $r_{\sigma}(A)$ in your proof. 

\newpage

\noindent 
3. a) Let $M \in \mbox{\bf R}^{n \times n}$ with 
$x^{(k)}, x^{(k+1)}$, and  $g \in \mbox{\bf R}^{n}$ in the matrix
iteration formula 

\noindent
$x^{(k+1)} = M x^{(k)} + g$. 
Assume the spectral radius $ r_{\sigma}(M) < 1$ and let
$x \in \mbox{\bf R}^{n}$ be a fixed-point of the iteration. 
Give a direct proof 
(without quoting a similar result) that $x^{(k)}$ converges to $x$ as 
$k \rightarrow \infty$ for all $x^{(0)} \in \mbox{\bf R}^{n}$.

\underline{Hint}:  Derive a suitable form for $( x^{(k)} - x )$.   

\vspace*{.15in}
\noindent
b) Let $A \in \mbox{\bf R}^{n \times n}$ be strictly diagonally dominant. 
Let the matrix iteration formula from part (a) now represent the Jacobi  
iteration $x^{(k+1)} = M_{J} x^{(k)} + g$ for solving $Ax = b$, 
$b \in \mbox{\bf R}^{n}$.  Prove that $ r_{\sigma}(M_{J}) < 1$.

%\underline{Hint}: Form $M_{J}$.  Recall Gerschgorin's theorem.

\vspace*{.30in}
\noindent
4. Let $\Pi_{n}$ be the vector space of all polynomials of degree $\leq n$.

\vspace*{.10in}
\noindent
a) The polynomial $P_{3}(x) = x^{3} - \frac{3}{5}x$ is orthogonal to $\Pi_{2}$
relative to the weight function $w(x) \equiv 1$ on $[-1,1]$. Obtain 
the Gaussian quadrature formula that is based on $P_{3}(x)$. Also,
state the precision of this quadrature formula.

\vspace*{.15in}
\noindent
b) Let $P_{n+1}(x) \in \Pi_{n+1}$ be orthogonal to $\Pi_{n}$ relative to
a weight function $w(x) \geq 0$ on $[a,b]$. Denote by  
$x_{0},x_{1},\ldots ,x_{n}$ the
$(n+1)$ real roots of $P_{n+1}$, all in $(a,b)$. Let $Q(f) = 
\sum_{i=0}^{n}A_{i}f(x_{i})$ be a quadrature formula to approximate
$\int_{a}^{b} w(x)f(x) dx$.  Prove that if $Q(f)$ has precision 
$\geq n$, then the precision of $Q(f)$ is actually at least $2n+1$. 

\vspace*{.15in}
\noindent
c) Let $f(x)$ be a smooth function and define 
$F(x) \equiv  \int_{a}^{x} f(t) \, dt$. Recall the Trapezoidal
rule quadrature formula is $Q(f) = \frac{h}{2} [ f(a) + f(a+h) ]$ . 
Derive the error term of the Trapezoidal rule    
by applying Taylor series expanded about $a$ to
$F(a+h)$ and $Q(f)$ in $ \int_{a}^{a+h} f(t) \, dt - Q(f)$.

\vspace*{.30in}
\noindent
5.  a) Let $Q \in \mbox{\bf R}^{n \times n}$ be an orthogonal
matrix.  Prove that $\|Q\|_{2} = 1$.  Also, in the 
2-norm, prove that the condition number $K(Q) = 1$.  

\vspace*{.15in}
\noindent
b) Let  
$$   
   A =  \left[  
   \begin{array}{rrr} -1 &  0\\ 
                       1 &  0\\  
                       1 &  3\\
                       1 &  3   
  \end{array} \right]. 
$$     

\vspace*{.10in}  
\noindent
Use the $QR$ method to obtain the $QR$ factors of the matrix $A$ above.
You do not have to actually form $Q$ if you \underline{identify $Q$}
by matrix multiplication.  
Explicitly compute $R$.
Show all work.   
 
\underline{Note}:  You may choose to solve part (c) below at the same
time you solve part (b).

\vspace*{.15in} 
\noindent 
c)  For the matrix $A$ in part (b), consider the system of equations
$Ax  =  b$ where $b = (2,0,6,0)^{T}$.  Use the $QR$ factors which you
found in part (b) to obtain the actual least-squares solution 
$x^{*} = (x_{1}^{*} , x_{2}^{*})^{T}$
of this system.  If you could not solve part (b), explain fully how
you would have proceeded to solve this least-squares problem.  

\newpage
\noindent
6. Consider solving numerically the initial-value problem 
$y^{'}(x) = f(x,y),\ y(x_{0}) = y_{0}$, by using the linear multi-step method
    $$ 
     w_{i+1} =  \, \frac{3}{4}w_{i} + 
               \frac{1}{4}w_{i-2}  
              + \, \frac{3}{2}h \, f(x_{i},w_{i}) \, . 
    $$ 

\vspace*{.15in}
\noindent
a) Determine by a direct calculation 
(without quoting a general result) the order of the local 
truncation error of this method. 

\vspace*{.15in}
\noindent
b) Analyze the method for consistency, stability, and convergence.
Determine whether the method is strongly stable, weakly stable, or
unstable. Explain all your conclusions and state any theorems you use
in your analysis.

\vspace*{.15in}
\noindent
c)  Discuss the advantages and disadvantages of using  implicit
multi-step methods versus explicit multi-step methods. 

\vspace*{.3in}
\noindent
7. a) Show that the central difference formula
$$
D(h) = \frac{y(x_{0}+h) - y(x_{0}-h)}{2h}
$$
has an error expansion of the form 
$$
D(h) = y^{'}(x_{0})+ k_{1}h^{2} + k_{2}h^{4} + k_{3}h^{6} + \cdots 
$$
assuming $y$ is sufficiently differentiable.  You do not have to 
compute the precise values of the constants $k_{1}, k_{2}, k_{3}, \ldots$ .
  
\vspace*{.15in}
\noindent
b) Use extrapolation applied to $D(h)$ to develop 
an $O(h^{4})$ accurate approximation of $y^{'}(x_{0})$. 
Write out fully your new approximation of $y^{'}(x_{0})$ 
as a single difference formula which uses nodes at 
$x_{0} \pm h$ and $x_{0} \pm 2h$.  

\vspace*{.15in}
\noindent
c)  Show how one would derive  
an $O(h^{6})$ accurate approximation of $y^{'}(x_{0})$.
To do this, look carefully at the form of the error expansion.
Your work should show 
that the new approximation would actually be $O(h^{6})$ accurate.
DO NOT actually write out the difference formula with the nodes 
as you did in part (b).  

\end{document}