\documentstyle[12pt]{article} 
%\renewcommand\baselinestretch{1.667} 
\topmargin= -0.4in 
\textheight=10.4in 
%\textheight=10.5in 
%\textheight=10.5in 
%\textwidth=6in 
\textwidth=6.5in 
\oddsidemargin=0.0in 
\pagestyle{empty} 

\begin{document} 

%\vspace*{1.2in} 
 
\begin{center}CALIFORNIA STATE UNIVERSITY, LONG BEACH\\ 
             \vspace*{.10in} 
               Department of Mathematics and Statistics\\ 
             \vspace*{.15in} 
              NUMERICAL ANALYSIS COMPREHENSIVE EXAM\\ 
              \vspace*{.10in} 
               Sample Exam 1\\ 
\end{center} 
 
\vspace{.50in} 
\noindent
Choose any \underline{Five Problems}.  You may use only a 
\underline{non-graphing calculator}.  

\vspace*{.40in}
\noindent
1. a) Let $A \in \mbox{\bf R}^{n \times n}$.
Prove that 
$$
\|A\|_{\infty} =\displaystyle{\max_{1 \leq i \leq n}} \sum_{j=1}^{n}
|a_{ij}|.
$$
Include all details.

\vspace*{.15in}
\noindent
b) Let $A, I \in \mbox{\bf R}^{n \times n}$
 with $A$ nonsingular and $I$ the identity matrix. 
Let $\| \cdot \|$ be any natural matrix norm on $\mbox{\bf R}^{n \times n}$. Prove that 
$\| I \| = 1$ and $K(A) \geq 1$ where $K(A)$ is the condition number
of $A$.

\vspace*{.15in}
\noindent
c) Let $A \in \mbox{\bf R}^{n \times n}$ be nonsingular and let
$\| \cdot \|$ be any natural matrix norm on $\mbox{\bf R}^{n \times n}$.
Let $\lambda$ be an eigenvalue of $A$.  Prove that 
$1/ \| A^{-1} \| \, \leq  \, | \lambda | \, \leq \, \| A \| $.

\vspace*{.40in}
\noindent
2. a) Let $x_{0},  x_{1},\ldots, x_{n}$ be $(n+1)$ distinct points, 
and let $f(x)$ be defined at these points. Prove that the polynomial 
$P_{n}(x)$ of degree $\leq n$  which interpolates $f(x)$ at these 
$(n+1)$ points exists and is unique. Develop all details needed in your
proof and clearly define any special symbols you use. 
 
\vspace*{.15in} 
\noindent 
b) Let $f(x) = \frac{12}{x}$. Construct and completely write out 
the Newton form of $P_{3}(x)$, the polynomial of degree $\leq 3$ 
which interpolates $f(x)$ at $x=1,2,3,4$.

\vspace*{.15in}
\noindent
c)  Let $x_{0},x_{1}, \ldots ,x_{k}$ be distinct points.  Let 
$p_{k-1}(x) \in \Pi_{k-1}$ interpolate a function $f(x)$ at 
$x_{0},x_{1}, \ldots ,x_{k-1}$ and let
$q_{k-1}(x) \in \Pi_{k-1}$ interpolate $f(x)$ at 
$x_{1},x_{2}, \ldots ,x_{k}$. Define
$$
P(x) \equiv 
 p_{k-1}(x)+(\frac{x-x_{0}}{x_{k}-x_{0}})(q_{k-1}(x)-p_{k-1}(x)) \quad .
$$
Show that $P(x)$ interpolates $f(x)$ at all $(k+1)$ distinct points
$x_{0},\ x_{1},\ldots, x_{k}$. Also, show directly from the
above formula for $P(x)$ that the lead
coefficient of $P(x)$ is the divided difference 
 $f[x_{0},x_{1}, \ldots ,x_{k}]$.
To show this, you may use whatever you know about $p_{k-1}(x)$
and $q_{k-1}(x)$.

\newpage
\noindent
3. a) 
Prove that every Householder matrix  $P \in \mbox{\bf R}^{n \times n}$ 
is both symmetric and orthogonal.

\vspace*{.15in}
\noindent
b) Use the $QR$ method to obtain the least-squares solution to the
system
\begin{eqnarray*}
 -x_{1} +  3x_{2}& = & \quad 0\\
 2x_{1} +  4x_{2}& = & \quad 0\\
2x_{1} +  5x_{2}& = & \quad 15
\end{eqnarray*}

\noindent
Your work should show your construction of the necessary Householder
matrices, but you need not explicitly compute $Q$.

\vspace*{.15in}
\noindent
c) For the general least-squares problem associated with an $m\times n$ 
system $Ax \ = \ b$, $m > n$, 
what advantages does the $SVD$ method have over the $QR$
method when the rank of $A$ is $<n$?   If the columns of $A$ are
clearly linearly independent, which method would you choose for
solving the least-squares problem?  Why?  
Also, 
explain why either of these methods is preferred over solving
$A^{T}Ax = A^{T}b$ directly?
  
\vspace*{.30in}
\noindent
4.  a) Let $g \in C^{k}$.  Assume the iteration $x_{n+1} = g(x_{n})$ converges to a fixed-point $p$.  \hfill

\noindent  
If $g^{'}(p) = 0,\ g^{''}(p) = 0, \ldots, g^{(k-1)}(p) = 0$,
but $g^{(k)}(p) \neq 0$, give a proof to show that the convergence 
is order $k$.

\vspace*{.15in}
\noindent
b)  Let $f \in C^{2}[a,b]$ with $f(p) = 0$ and $f^{'}(p) \neq 0$ for
some $p \in (a,b)$.  Consider the iteration $x_{n+1} = g(x_{n})$
where $g(x) = x - \phi(x)f(x)$ for some function $\phi(x) \in
C^{2}[a,b]$.  What condition must $\phi(x)$ satisfy so that the rate
of convergence of the iteration is at least quadratic?  Explain your
answer.

\vspace*{.15in}
\noindent
c) Let $g \in C^{1}[a,b]$ and let $p$ be a fixed-point of $g$.
 For the iteration $x_{n+1} = g(x_{n})$, assume $x_{n} \in [a,b]$ for
$n = 0, 1, 2, \ldots $.  Prove that
$$
   | x_{n} - p | \leq {\lambda}^{n}| x_{0} - p |
$$
where $\lambda  =  \displaystyle{\max_{a \leq x \leq b}}|g^{\prime}(x)|$. 

\vspace*{.3in}
\noindent
5. Consider solving numerically the initial-value problem 
$y^{'}(x) = f(x,y),\ y(x_{0}) = y_{0}$, by using the linear multi-step method
    $$ 
     w_{i+1} =  \, w_{i} \, - \, w_{i-1} + w_{i-2}   
              + \, h \, [f(x_{i},w_{i})  + 
                f(x_{i-1},w_{i-1})] \, . 
    $$ 

\vspace*{.15in}
\noindent
a) Show directly (without quoting a general result) that the order of the local truncation error is at least
   $O(h^{2})$.

\vspace*{.15in}
\noindent
b) Analyze the method for consistency, stability, and convergence.
Determine whether the method is strongly stable, weakly stable, or
unstable. Explain all your conclusions and state any theorems you use
in your analysis.

\vspace*{.15in}
\noindent
c)  Discuss the advantages and disadvantages of using  implicit
multi-step methods versus explicit multi-step methods.

\newpage
\noindent
6. Let $\Pi_{n}$ be the vector space of all polynomials of 
degree $\leq n$ on $[a,b]$. 

\vspace*{.15in}
\noindent
a) Let $w(x) \geq 0$ be a weight function in $C[a,b]$.  Let
$f,\ g \in C[a,b]$.  Write out each of the following three definitions
on $C[a,b]$:
the meaning of the standard inner product $<f,g>$ relative to 
$w(x)$, 
the meaning of the 2-norm, and 
the meaning of $f$ and $g$ being orthogonal.

\vspace*{.15in}
\noindent
b)  Using the definitions in part (a), 
let $\{ \phi_{0},\phi_{1}, \ldots, \phi_{n}\}$
be an \underline{orthonormal} basis of polynomials in $\Pi_{n}$ relative to 
$w(x)$ on $C[a,b]$.  Let $f \in C[a,b]$.    
Give an inner product argument to prove that there exists a unique 
$r_{n}^{*}(x) \in \Pi_{n}$ which minimizes $\|f-r\|_{2}$ over all 
$r(x) \in \Pi_{n}$. Explain why your proof yields both existence and
uniqueness of $r_{n}^{*}(x)$.

\vspace*{.15in}
\noindent
c) What advantages are there to using an orthonormal basis of 
$\Pi_{n}$ to compute $r_{n}^{*}(x)$ instead of the standard basis 
$\{1,x,x^{2},\ldots,x^{n}\}$?

\vspace*{.4in}
\noindent
7. a) Let $CT(h)$ denote Composite Trapezoidal rule with step size $h$
to approximate $\int_{a}^{b}
f(x) \, dx$.
Assuming $f$ is sufficiently smooth, the error expansion for $CT(h)$
has the form 
$$CT(h) = \int_{a}^{b} f(x)\, dx + k_{1}h^{2} + k_{2}h^{4} + k_{3}h^{6}
  + \cdots 
$$
Show how one obtains an $O(h^{6})$ accurate approximation of $\int_{a}^{b}
f(x) \, dx$. Include all details to verify that the approximation is
$O(h^{6})$ accurate.

\vspace*{.15in}
\noindent
b)  Use Romberg integration to approximate $\int_{1/2}^{1} \frac{1}{x} \, dx$. Specifically, compute the Romberg table up to $R_{33}$. Carry at least
6 decimal digits in all your calculations.

\end{document}