\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{amssymb}

\AtBeginDocument{{\noindent\small
Eighth Mississippi State - UAB Conference on Differential Equations and
Computational Simulations.
{\em Electronic Journal of Differential Equations},
Conf. 19 (2010),  pp. 245--255.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2010 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document} \setcounter{page}{245}
\title[\hfilneg EJDE-2010/Conf/19/\hfil Bifurcation of solutions]
{Bifurcation of solutions of separable parameterized
equations into lines}

\author[Y.-Q. Shen, T. J. Ypma\hfil EJDE/Conf/19 \hfilneg]
{Yun-Qiu Shen, Tjalling J. Ypma}  % in alphabetical order

\address{Yun-Qiu Shen \newline
Department of Mathematics \\
Western Washington University \\
Bellingham, WA 98225-9063, USA}
\email{yunqiu.shen@wwu.edu}

\address{Tjalling J. Ypma \newline
Department of Mathematics \\
Western Washington University \\
Bellingham, WA 98225-9063, USA}
\email{tjalling.ypma@wwu.edu}

\thanks{Published September 25, 2010.}
\subjclass[2000]{65P30, 65H10, 34C23, 37G10}
\keywords{Separable parameterized equations; bifurcation;
rank deficiency;\hfill\break\indent
Golub-Pereyra variable projection method;
bordered matrix; singular value decomposition; \hfill\break\indent
Newton's method}

\begin{abstract}
 Many applications give rise to separable parameterized equations
 of the form $A(y, \mu)z+b(y, \mu)=0$, where $y \in \mathbb{R}^n$,
 $z \in \mathbb{R}^N$ and the parameter $\mu \in \mathbb{R}$;
 here $A(y, \mu)$ is an $(N+n) \times N$ matrix and
 $b(y, \mu) \in \mathbb{R}^{N+n}$. Under the assumption that
 $A(y,\mu)$ has full rank we showed in \cite{shen:ypma:5} that
 bifurcation points can be located by solving a reduced equation
 of the form $f(y, \mu)=0$. In this paper we extend that method
 to the case that $A(y,\mu)$ has rank deficiency one at the
 bifurcation point. At such a point the solution curve $(y,\mu,z)$
 branches into infinitely many additional solutions, which form
 a straight line. A numerical method for reducing the problem to a
 smaller space and locating such a bifurcation point is given.
 Applications to equilibrium solutions of nonlinear ordinary
 equations and solutions of discretized partial differential
 equations are provided.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{example}[theorem]{Example}


\section{Introduction}

Equilibrium solutions of certain nonlinear ordinary differential
equations with parameters,
and numerical solutions of certain boundary value problems,
give rise to separable nonlinear parameterized equations of the form
\begin{equation}
A(y, \mu)z+b(y, \mu)=0  \label{eq:1.1}
\end{equation}
where $y \in {\mathbb{R}}^n, z \in {\mathbb{R}}^N$ and the
parameter $ \mu \in {\mathbb{R}}$. Here $A(y, \mu)$ is an $(N+n)
\times N$ matrix and $b(y, \mu) \in {\mathbb{R}}^{N+n}$; in
practice usually $N \gg n$. We refer to the recent survey
\cite{golu:pere:2} for numerous examples, references, and variants
of the Golub-Pereyra variable projection method for the solution
of separable equations. Further references to related material
appear in \cite{golu:leve, golu:pere:1,luke, morr,
mull:stok,shen:ypma,ypma:shen}. Recently in \cite{shen:ypma:5} we
extended a variant of the Golub-Pereyra variable projection method
\cite{shen:ypma,ypma:shen} to \eqref{eq:1.1} for bifurcation
problems specified by a parameter $\mu$. Under the assumption that
$A(y,\mu)$ has full rank we show there that curve following and
bifurcation at a point $(y, \mu,z)$ can be located within a
smaller space, for $(y, \mu)$ only, by solving a reduced equation
of the form $f(y, \mu) = 0$.

 In this paper we extend both the technique and
the analysis of \cite{shen:ypma:5} to the case when $A(y, \mu)$
has rank deficiency one at the bifurcation point
$(y^*,\mu^*,z^*)$. That implies a line of solutions in the
nullspace of $A(y^*,\mu^*)$ satisfying \eqref{eq:1.1} through the
bifurcation point. We call this phenomenon bifurcation into a
line. We show that \eqref{eq:1.1} can be reduced to solving only
for $(y, \mu, w)$, for a variable $w \in \mathbb{R}$ defined
below, by using a variant of the matrix bordering techniques of
\cite{beyn,grie:redd,rabi:redd,shen,shen:2,shen:3,shen:ypma:3} and
an extension of the algorithms of \cite{shen:ypma:2,shen:ypma:4}
to eliminate $N-1$ components of the linear variable $z$. We show
that bifurcation phenomena, including the line of solutions, are
preserved in this smaller space and that the relevant points can
be located by solving a smaller separable nonlinear system of the
form
\begin{equation}
f(y,\mu,w) \equiv \alpha(y,\mu)w+ \beta(y,\mu) = 0,  \label{eq:1.2}
\end{equation}
where $w \in \mathbb{R}$ and $\alpha(y,\mu), \beta(y,\mu) \in {\mathbb{R}}^{n+1}$.
If $N$ is much larger than $n$ the reduction of
dimension from $N+n+1$ to $n+2$ is significant. The precise form of $f(y,\mu,w)$ and how to
solve \eqref{eq:1.2} are detailed below.

 In the next section we present the theoretical basis of our method.
A reduction from \eqref{eq:1.1} to \eqref{eq:1.2} is  presented
and the preservation of the bifurcation is proved. In Section 3 we
derive expressions for the derivatives involved in our method. We
derive an extended system for the location of the bifurcation
point in the smaller dimensional space in Section 4. In the last
section we give some numerical examples to illustrate the method.

\section{Analysis}
Write
\begin{equation}
\widetilde{f} (y, \mu, z) \equiv A(y, \mu)z+b(y, \mu)=0  \label{eq:2.1}
\end{equation}
where $y \in {\mathbb{R}}^n, z \in {\mathbb{R}}^N, \mu \in
{\mathbb{R}}$ and $A(y, \mu)$ is an $(N+n) \times N$ matrix and
$b(y, \mu) \in {\mathbb{R}}^{N+n}$.  Assume both $A$ and $b$ are
$C^2-Lipschitzian$ in $y$ and $\mu$; thus $\widetilde{f}(y,\mu,z)$
is also $C^2$-Lipschitzian in $y$ and $\mu$. Suppose a curve
following technique leads to a bifurcation point $(y^*,
\mu^*,z^*)$ of the solution set where the column rank deficiency
of $A(y^*, \mu^*)$ is one. At this point bifurcation into a
straight line in the nullspace of $A(y^*,\mu^*)$ and other curves
occurs.

The singular value decomposition of $A(y, \mu)$ at any point  $(y,
\mu)$ is defined to be \cite{golu:vanl,watk}
\begin{equation}
A(y, \mu)=U \Sigma V^T \equiv [u_1,\dots,u_{N+n}]
\begin{bmatrix}
\operatorname{diag}(\sigma_1, \dots, \sigma_N) \\
0_{n \times N}
\end{bmatrix}
[v_1,\dots,v_N]^T \label{eq:2.2}
\end{equation}
where $\{ u_1,\dots,u_{N+n} \}$ and $\{ v_1,\dots,v_N \}$
form orthonormal bases for ${\mathbb{R}}^{N+n}$ and
${\mathbb{R}}^N$ respectively and the singular values satisfy
$\sigma_1 \ge \dots \ge \sigma_N \ge 0$. The rank deficiency one
at $(y^*,\mu^*)$ implies that the corresponding singular values
satisfy $\sigma^*_{N-1} \ne 0$ and
$\sigma^*_N=0$. If $(\bar{y}, \bar{\mu})$ is near
$(y^*,\mu^*)$ then the corresponding singular values satisfy
$\bar{\sigma}_{N-1} \ne 0$ and $\bar{\sigma}_N$
is small by continuity of the singular values.

For a given (fixed) $(\bar y, \bar{\mu})$ near $(y^*, \mu^*)$
define the $N$-vector $\mathit{l}$ and
the $(N+n) \times (n+1)$ matrix $R$ from the singular value
decomposition of $A(\bar{y}, \bar{\mu})=\bar{U} \bar{\Sigma} {\bar V}^T$ by
\begin{equation}
\mathit{l} \equiv \bar{v}_N, \quad
R \equiv [\bar{u}_N, \dots, \bar{u}_{N+n}].
\label{eq:2.3}
\end{equation}
Let $M(y,\mu)$ be the $(N+n+1) \times (N+n+1)$ bordered matrix
\begin{equation}
M(y, \mu) \equiv  \begin{bmatrix} A(y, \mu) & R \\
\mathit{l}^T & O \end{bmatrix}.  \label{eq:2.4}
\end{equation}
Note that in \eqref{eq:2.4} and henceforth the vector $\mathit{l}$
and the matrix $R$ are fixed;
they are determined by the selected
point $(\bar y, \bar\mu)$; only the term $A(y, \mu)$ varies with
$y$ and $\mu$.

\begin{lemma}\label{lm:2.1}
The matrix $M(y, \mu)$ is nonsingular for all $(y, \mu)$ in a small neighborhood
of $(y^*, \mu^*)$.
\end{lemma}

\begin{proof}
 Let $\bar{y}, \bar{\mu}$ be as above. Observe that since
 $\bar{\sigma}_{N-1} \ne 0$
\[
 M(\bar y, \bar{\mu})
 = \begin{bmatrix} \bar{U} & 0 \\ 0 & 1 \end{bmatrix}
\begin{bmatrix}
\operatorname{diag}(\bar{\sigma}_1, \dots,
\bar{\sigma}_{N-1}) & 0 & 0 \\
0 & \begin{bmatrix}
\bar{\sigma}_N \\ 0 \end{bmatrix}
 & I_{n+1} \\ 0 & 1 & 0 \end{bmatrix}
\begin{bmatrix}
\bar{V}^T & 0 \\ 0 & I_{n+1} \end{bmatrix}
\]
is nonsingular.  From the
Neumann Lemma \cite{orte:rhei} $M(y, \mu)$ is nonsingular provided
$\| M(\bar y, \bar {\mu})^{-1}[M(\bar y, \bar {\mu})-M(y, \mu)]\| < 1$,
and since
\[
\| M(\bar y, \bar {\mu})-M(y, \mu) \|
 = \Big\| \begin{bmatrix} A(\bar y, \bar {\mu})-A(y, \mu) & 0 \\
  0 & 0 \end{bmatrix} \Big\|
\] this holds for all $(y, \mu)$ and $(\bar y, \bar{\mu})$
sufficiently near $(y^*, \mu^*)$ by continuity of $A(y, \mu)$.
\end{proof}

Assume $z$ satisfies \eqref{eq:1.1}. Define
$w=\mathit{l}^Tz \in \mathbb{R}$ and $\tilde z=z-w \mathit{l}$.
Then
\begin{equation}
\begin{bmatrix} A(y,\mu) \\ \mathit{l}^T \end{bmatrix} z
+ \begin{bmatrix} b(y,\mu) \\ -w \end{bmatrix}
=\begin{bmatrix}0 \\ 0 \end{bmatrix}
=\begin{bmatrix} A(y,\mu) \\ \mathit{l}^T \end{bmatrix}
(\tilde z + w\mathit{l})+ \begin{bmatrix} b(y,\mu) \\ -w \end{bmatrix}.  \label{eq:2.5}
\end{equation}
Denote
\begin{equation}
\widetilde A(y, \mu)=\begin{bmatrix} A(y, \mu) \\ \mathit{l}^T
\end{bmatrix}, \quad
\widetilde b(y, \mu,w)
=\begin{bmatrix} b(y, \mu) \\ -w \end{bmatrix}. \label{eq:2.6}
\end{equation}
By Lemma \ref{lm:2.1} the first $N$ columns of $M(y,\mu)$ form
the full rank matrix $\widetilde A(y, \mu)$. Thus \eqref{eq:2.5}
 has the unique least squares solution
$\widetilde{z}+w\mathit{l}=-[\widetilde A(y, \mu)]^+\widetilde{b}(y, \mu,w)$, where $[\widetilde{A}(y, \mu)]^+$
denotes the pseudo-inverse of $\widetilde A(y, \mu)$.
Extending the algorithm of \cite{shen:ypma,ypma:shen} we have
the following result.


\begin{theorem}\label{th:2.1}
$(y, \mu, w, \tilde z)$ is a solution of \eqref{eq:2.5} if
and only if it satisfies
\begin{equation}
f(y,\mu,w) \equiv C^T(y, \mu) \widetilde{b}(y,\mu,w)= 0, \quad
\tilde z = -w\mathit{l}- [\widetilde{A}(y, \mu)]^+
\widetilde{b}(y,\mu,w),  \label{eq:2.7}
\end{equation}
where $C^T(y,\mu)$ is the transpose of the $(N+n+1) \times (n+1)$
matrix $C(y,\mu)$ satisfying
\begin{equation}
C^T(y, \mu)M(y, \mu) = C^T(y, \mu) \begin{bmatrix}
A(y, \mu) & R \\ \mathit{l}^T & 0 \end{bmatrix}
= -[0_{(n+1) \times N}, I_{n+1}].
\label{eq:2.8}
\end{equation}
\end{theorem}

\begin{proof} ($\Longrightarrow$) Assume \eqref{eq:2.5} holds.
Multiply both sides of \eqref{eq:2.5} by $C^T(y, \mu)$ and
use \eqref{eq:2.8} to give the first
part of \eqref{eq:2.7}. Since $z=\tilde z +w\mathit{l}$ is a
solution of \eqref{eq:2.5}
for the given $(y,\mu,w)$ it is the unique least squares solution of a
linear equation with a full rank coefficient matrix, so we have the
second part of \eqref{eq:2.7}.

($\Longleftarrow$) Assume \eqref{eq:2.7} holds.
 From \eqref{eq:2.8} we have
\[
0= C^T(y, \mu) \widetilde b(y, \mu,w)
=-[0, I_{n+1}]M^{-1}(y, \mu) \widetilde{b}(y, \mu,w)
\]
which implies
\begin{equation}
\begin{aligned}
\widetilde{b}(y, \mu,w)
&=M(y, \mu) \begin{bmatrix} I_N & 0 \\ 0 & I_{n+1} \end{bmatrix}
M^{-1}(y,\mu) \widetilde{b}(y, \mu,w)   \\
&=M(y, \mu) \begin{bmatrix} [I_N, 0]M^{-1}(y,\mu)
 \widetilde{b}(y, \mu,w)
 \\ 0 \end{bmatrix}   \\
&= \widetilde{A}(y, \mu) [I_N, 0]M^{-1}(y,\mu) \widetilde{b}
(y, \mu,w).
\end{aligned}  \label{eq:2.9}
\end{equation}
We then obtain, using \eqref{eq:2.7} and \eqref{eq:2.9},
\begin{align*}
&\widetilde A(y, \mu)(\tilde{z}+w\mathit{l})
+ \widetilde b(y, \mu,w)   \\
&=  \widetilde A(y, \mu) \left( -{\widetilde A}^+
 (y, \mu)\widetilde b(y, \mu,w) \right)
+ \widetilde{b}(y, \mu,w)   \\
&=  [I_{N+n+1}-\widetilde{A}(y, \mu)\widetilde{A}^+(y, \mu)]\widetilde{A}(y, \mu)
[I_N, 0]M^{-1}(y,\mu)\widetilde{b}(y, \mu,w) \\
&=  [\widetilde{A}(y, \mu)- \widetilde{A}(y, \mu)\widetilde{A}^+
(y, \mu)\widetilde{A}(y, \mu)][I_N, 0]M^{-1}(y,\mu)\widetilde{b}
 (y, \mu,w) = 0
\end{align*}
as claimed.
\end{proof}

 We only need to solve the first equation of \eqref{eq:2.7} for
$(y,\mu,w)$ since it preserves bifurcations of \eqref{eq:2.5}
by the following theorem.

\begin{theorem} \label{th:2.2}
 For any fixed parameter $\mu$, the points $(y, \mu,w)$ and
$(\bar{y},\mu, \bar{w})$ are two distinct
solutions of the first part of  \eqref{eq:2.7} if and only
if $(y, \mu,z)$ and $(\bar y, \mu, \bar z)$ are two distinct solutions
of  \eqref{eq:2.5} with $z=\tilde{z}+w \mathit{l}$ and
$\bar z = \tilde{\bar{z}}+ \bar{w}\mathit{l}$, where
 $\tilde{z}$ and $\tilde{\bar{z}}$ are determined by the second
part of  \eqref{eq:2.7} respectively.
\end{theorem}

 \begin{proof} $( \Longrightarrow )$ If not, then
$(y,\mu,w) \ne (\bar{y},\mu, \bar{w})$ but
$(y, \mu,z)=( \bar{y}, \mu, \bar{z})$
which implies that $y=\bar{y}$, $z= \bar{z}$ and thus $w \ne \bar{w}$.
Thus $0=z-\bar{z}=(\tilde{z}-\tilde{\bar z})+(w-\bar{w})\mathit{l}$
by the definition of $z$ and $\bar{z}$.
 From Theorem \ref{th:2.1}, the fact that $(y,\mu,z)$ satisfies the
first equation of \eqref{eq:2.7} and $\tilde z$ satisfies the second
equation of \eqref{eq:2.7} implies that $(y,\mu,w,\tilde{z})$
satisfies \eqref{eq:2.5}; similarly so does
$(\bar{y},\mu,\bar{w}, \tilde{\bar z})$.
Therefore,
\begin{equation}
\widetilde{A}(y, \mu)(\tilde z + w\mathit{l})
+ \widetilde{b}(y,\mu,w)=0
=\widetilde{A}(\bar{y}, \mu)(\tilde{\bar z} + \bar{w}\mathit{l})
+\widetilde{b}(\bar{y}, \mu, \bar{w}).
\end{equation}
Subtracting the left side of the equation from the right side and
using $y = \bar{y}$ and $z = \bar{z}$ we have $w=\bar{w}$,
 contradicting $w \ne \bar{w}$. So
$(y,\mu,z) \ne ( \bar{y}, \mu, \bar{z})$.

 $( \Longleftarrow )$ If not, then
$(y,\mu,z) \ne ( \bar{y}, \mu, \bar{z})$
but $(y,\mu,w) = (\bar{y},\mu, \bar{w})$ which implies that
$w= \bar{w}$, $y= \bar{y}$ but
$z \ne \bar{z}$. By the second equation of \eqref{eq:2.7}
$\tilde{z} = \tilde{\bar{z}}$.
Thus $z=\tilde{z}+w\mathit{l}=\tilde{\bar{z}}
+\bar{w}\mathit{l}=\bar{z}$, which is a contradiction. So
$(y,\mu,w) \ne (\bar{y},\mu, \bar{w})$.
\end{proof}


Write
\begin{equation}
C^T(y,\mu) \equiv [(P^T(y, \mu))_{(n+1) \times
(N+n)},(q^T(y,\mu))_{n+1}].  \label{eq:2.10}
\end{equation}
Then the reduced equation $f(y,\mu,w)=0$ of \eqref{eq:2.7} becomes
\begin{equation}
f(y,\mu,w) \equiv - q^T(y,\mu)w+ P^T(y,\mu) b(y,\mu) \label{eq:2.11}
\end{equation}
which is of the form of a separable equation in a smaller space.
Essentially, the $N$-vector $z$ has been has been replaced by
the scalar $w$ while the matrix
$(A(y,\mu))_{(N+n) \times N}$ is replaced by the
$(n+1)$-vector $-q^T(y,\mu)$.

Finally we have the following results showing preservation
of the line of solutions.

\begin{theorem} \label{th:2.3}
 Let $(y^*, \mu^*, z^*)$ be a bifurcation point of  \eqref{eq:2.1}
with $A(y^*,\mu^*)$ having rank deficiency one and let
$A(y,\mu)$ have full rank at every solution point $(y,\mu,z)$
of \eqref{eq:2.1} in a small neighborhood of $(y^*, \mu^*, z^*)$
with $(y,\mu) \ne (y^*, \mu^*)$. Let $(\hat{y},\hat{\mu},\hat{z})$
be a solution point of \eqref{eq:2.1} in this neighborhood.
Then $(\hat{y},\hat{\mu})=(y^*,\mu^*)$ if and only if
$q^T(\hat{y},\hat{\mu})=0 \in \mathbb{R}^{n+1}$.
\end{theorem}

\begin{proof} First note from \eqref{eq:2.8} that $C(y,\mu)$ has
full rank $n+1$ since $M(y,\mu)$ is nonsingular for all $(y,\mu)$
in a neighborhood of $(y^*,\mu^*)$.

 $( \Longrightarrow )$  From \eqref{eq:2.8} the rows of
$C^T(y^*,\mu^*)=[P^T(y^*,\mu^*),q^T(y^*,\mu^*)]$ form a basis for the
left nullspace of $\widetilde A(y^*, \mu^*)$, which is
$(n+1)$-dimensional because the matrix $M(y^*,\mu^*)$ of \eqref{eq:2.4}
has full rank. Since $A(y^*,\mu^*)$ has column rank deficiency one,
the matrix $A(y^*,\mu^*)$ also has an $(n+1)$-dimensional
left nullspace. Suppose its basis consists of the vectors
$\varphi_1,\dots, \varphi_{n+1}$, and denote by $\Phi$ the matrix
with these vectors as columns. Then
the rows of $[ \Phi^T,0_{(n+1) \times 1}] $ also form a basis for
the left nullspace of $\widetilde{A}(y^*, \mu^*)$, so
$[P^T(y^*,\mu^*),q^T(y^*,\mu^*)]=G [ \Phi^T,0]$
for some nonsingular $(n+1) \times (n+1)$ matrix $G$,  which implies
that
\begin{equation}
P^T(y^*,\mu^*)=G \Phi^T, \quad q^T(y^*,\mu^*)=0.
\end{equation}

 $( \Longleftarrow )$ The $n+1$ rows of
$C^T(\hat{y},\hat{\mu})=[P^T(\hat{y},\hat{\mu}),0]$ form a basis
of the left nullspace of $\widetilde A(\hat{y},\hat{\mu})$,
which implies that the $n+1$ rows of $P^T(\hat{y},\hat{\mu})$
form a basis for the left nullspace of the $(N+n) \times N$
matrix $A(\hat{y},\hat{\mu})$. Thus $A(\hat{y},\hat{\mu})$ has
rank deficiency one which implies that
$(\hat{y},\hat{\mu})=(y^*,\mu^*)$.
\end{proof}

\begin{theorem} \label{th:2.4}
 (a) If $(y^*,\mu^*,z^*)$ is a bifurcation point of \eqref{eq:1.1}
which bifurcates solutions into a straight line then $(y^*,\mu^*,w^*)$
with $w^*=\mathit{l}^Tz^*$ is a bifurcation point of \eqref{eq:2.11}
which bifurcates solutions into a straight line;

 (b) If $(y^*,\mu^*,w^*)$ is a bifurcation point of \eqref{eq:2.11}
which bifurcates solutions into a straight line then $(y^*,\mu^*,z^*)$
with $z^*=-[\widetilde{A}(y^*, \mu^*)]^+ \widetilde{b}(y^*,\mu^*,w^*)$
is a bifurcation point of \eqref{eq:1.1} which bifurcates solutions
into a straight line.
\end{theorem}

\begin{proof}
  From Theorems \ref{th:2.1} and \ref{th:2.2} $(y^*,\mu^*,z^*)$ is
a bifurcation point of \eqref{eq:1.1} if only if $(y^*,\mu^*,w^*)$
is a bifurcation point of \eqref{eq:2.11}.  From Theorem \ref{th:2.3}
there is a line of solutions of \eqref{eq:1.1} through
$(y^*,\mu^*,z^*)$ if and only if there is a line of solutions
of \eqref{eq:2.11} through $(y^*,\mu^*,w^*)$.
\end{proof}

\section{Computation} \label{sc:3}

We shall need derivatives of $f(y,\mu,w)$ to solve \eqref{eq:2.11}.
Denote the vectors
$f'_j \equiv \frac{\partial f}{\partial {y_j}}$ for $j=1,\dots,n$,
$f'_{n+1} \equiv \frac{\partial f}{\partial \mu}$ and
$f'_{n+2} \equiv \frac{\partial f}{\partial w}$.
We use similar notation for the vectors of second partial
derivatives $f''_{j,k}=(f'_j)'_k$ of $f$ with respect
to the $j$th and $k$th independent variables of $(y,\mu,w)$,
and also for the derivatives of other functions of $(y,\mu,w)$.

\begin{theorem} \label{th:3.1}
Let $\zeta(y,\mu,w) \in \mathbb{R}^N$ and
$\psi(y,\mu,w) \in \mathbb{R}^{n+1}$ satisfy
\begin{equation}
\begin{bmatrix} A(y, \mu) & R \\ \mathit{l}^T & 0 \end{bmatrix}
\begin{bmatrix} \zeta(y,\mu,w) \\ \psi(y,\mu,w) \end{bmatrix}
= - \begin{bmatrix} b(y,\mu) \\ -w \end{bmatrix}.  \label{eq:3.1}
\end{equation}
Then for $j,k=1,\dots,n+1$, we have
\begin{itemize}
\item[(a)]
\begin{equation}
f(y,\mu,w)= \psi(y,\mu,w) \label{eq:3.2}
\end{equation}
\item[(b)]  $f'_j=\psi'_j = P^T(y, \mu) [ A'_j \zeta(y,\mu,w)+b'_j]$,
\begin{equation}
 \zeta'_j=-[I_N,0_{N \times (n+1)}]M^{-1}(y,\mu) \begin{bmatrix}
A'_j \zeta(y,\mu,w)+b'_j \\ 0 \end{bmatrix};
\label{eq:3.3}
\end{equation}

\item[(c)] $f'_{n+2}=\psi'_{n+2}=-q^T(y,\mu)$,
\begin{equation}
 \zeta'_{n+2} = [I_N,0] M^{-1}(y, \mu) e_{N+n+1}; \label{eq:3.4}
\end{equation}

\item[(d)]
\begin{equation}
 f''_{j,k}= \psi''_{j,k}   = P^T(y,\mu)[A''_{j,k}\zeta(y,\mu,w) +A^{
\prime}_j \zeta'_{k}
+A'_k \zeta'_{j}+b''_{j,k}];  \label{eq:3.5}
\end{equation}

\item[(e)] $f''_{n+2,j}= \psi''_{n+2,j} = P^T(y, \mu) A'_j
\zeta'_{n+2}$,
\begin{equation}
  f''_{j,n+2}=\psi''_{j,n+2}
= P^T(y, \mu) A'_j \zeta'_{n+2}; \label{eq:3.6}
\end{equation}

\item[(f)]
\begin{equation}
f''_{n+2,n+2}=\psi''_{n+2,n+2} = 0 \in \mathbb{R}^{n+1}. \label{eq:3.7}
\end{equation}
\end{itemize}
where $e_{N+n+1}$ is the $(N+n+1)$-th unit vector of the standard
basis in $\mathbb {R}^{N+n+1}$.
\end{theorem}


\begin{proof} (a) Using \eqref{eq:2.7}, \eqref{eq:2.8}
and \eqref{eq:3.1} we obtain
\begin{equation}
 f(y,\mu,w)=C^T(y,\mu)\widetilde{b}(y,\mu,w)= -[0,I_{n+1}]M^{-1}(y,\mu)\widetilde{b}(y,\mu,w)
=\psi(y,\mu,w).
\end{equation}

 (b) Differentiating both sides of \eqref{eq:3.1}
with respect to $y_j$ or $\mu$, we have
\begin{equation}
\begin{bmatrix} A'_j & 0 \\ 0 & 0 \end{bmatrix}
\begin{bmatrix} \zeta(y,\mu,w) \\ \psi(y,\mu,w) \end{bmatrix}
+ M(y,\mu) \begin{bmatrix} \zeta'_j \\ \psi'_j
\end{bmatrix}
= - \begin{bmatrix} b'_j \\ 0 \end{bmatrix}  \label{eq:3.8}
\end{equation}
which implies
\[
\begin{bmatrix} \zeta'_j \\
\psi'_j \end{bmatrix} = -M^{-1}(y,\mu) \begin{bmatrix} A'_j \zeta(y,\mu,w)+b'_j \\ 0
\end{bmatrix}.
\]
Using \eqref{eq:2.8}, \eqref{eq:2.10} and \eqref{eq:3.2} we thus have
\begin{align*}
f'_j &=  \psi'_j
=-[0, I_{n+1}]M^{-1}(y,\mu) \begin{bmatrix}
A'_j \zeta(y,\mu,w)+b'_j \\ 0 \end{bmatrix} \\
&=  P^T(y, \mu) [A'_j \zeta(y,\mu,w)+b'_j]
\end{align*}
and
\[
\zeta'_j=-[I_N,0]M^{-1}(y,\mu) \begin{bmatrix}
A'_j \zeta(y,\mu,w)+b'_j \\ 0 \end{bmatrix}.
\]

 (c) Differentiating both sides of \eqref{eq:3.1} with respect to
 $w$ gives
\begin{equation}
M(y, \mu) \begin{bmatrix} \zeta'_{n+2} \\ \psi'_{n+2}
\end{bmatrix} =  e_{N+n+1}
\label{eq:3.9}
\end{equation}
which with \eqref{eq:2.8}, \eqref{eq:2.10} and \eqref{eq:3.2} gives
\[
f'_{n+2}=\psi'_{n+2} = [0,I_{n+1}] M^{-1}(y, \mu) e_{N+n+1}
 = -C^T(y, \mu)e_{N+n+1}
= -q^T(y,\mu)
\]
and $ \zeta'_{n+2} = [I_N,0] M^{-1}(y, \mu) e_{N+n+1}$.

 (d) Differentiating both sides of \eqref{eq:3.8} with respect to
 $y_k$ or $\mu$ with \eqref{eq:2.8}, \eqref{eq:2.10} and \eqref{eq:3.2}
we obtain \eqref{eq:3.5}.

 (e) Differentiating both sides of \eqref{eq:3.9} with respect
to $y_j$ or $\mu$ with \eqref{eq:2.8}, \eqref{eq:2.10}
and \eqref{eq:3.2} we obtain the first part of \eqref{eq:3.6}.
Direct differentiation of both sides of the first part
of \eqref{eq:3.3} with respect to $w$ gives the second part
of \eqref{eq:3.6}

 (f) Differentiating both sides of the first part
of \eqref{eq:3.4} with respect to $w$ we obtain \eqref{eq:3.7}.
\end{proof}

Let $(y^*,\mu^*,w^*)$ be a bifurcation solution of \eqref{eq:2.11}.
By \eqref{eq:3.2} $\psi(y^*,\mu^*,w^*) =f(y^*,\mu^*,w^*)=0$.
Thus \eqref{eq:2.5} and \eqref{eq:3.1} imply that
\begin{equation}
\begin{aligned}
z^* &=  -[\widetilde{A}(y^*, \mu^*)]^+ \widetilde{b}(y^*,\mu^*,w^*)  \\
&=  \zeta(y^*,\mu^*,w^*)\\
& = -[I_N,0_{N \times (n+1)}]M^{-1}(y^*,\mu^*)
 \widetilde{b}(y^*,\mu^*,w^*) \qquad
\end{aligned}\label{eq:3.10}
\end{equation}
by the uniqueness of the linear least squares solution for the
full rank matrix $\widetilde{A}(y^*,\mu^*)$. This expression is
used to obtain $z^*$, without using the pseudo-inverse,
after $y^*,\mu^*$ and $w^*$ have been found.

 From Theorem \ref{th:3.1} we obtain the $(n+1) \times (n+2)$
Jacobian matrix
\begin{equation}
f'(y,\mu,w)
=[P^T(y,\mu)K(y,\mu,w),-q^T(y,\mu)]
= C^T(y, \mu) \begin{bmatrix} K(y,\mu,w)& 0 \\ 0 & -1 \end{bmatrix}
\label{eq:3.11}
\end{equation}
where the $(N+n) \times (n+1)$ matrix
\begin{equation}
K(y,\mu,w) \equiv [A'_1 \zeta(y,\mu,w), \dots, A'_{n+1}\zeta(y,\mu,w)]+b'(y,\mu)  \label{eq:3.12}
\end{equation}
and $P(y, \mu)$ and $q(y,\mu)$ are defined in \eqref{eq:2.11}.

Since $(y^*,\mu^*,w^*)$ is a bifurcation point of \eqref{eq:2.11},
$f'(y^*,\mu^*,w^*)$ has a rank deficiency $d \ge 1$. To solve the
equation \eqref{eq:2.11} we use the technique of
\cite{shen:2,shen:3,shen:ypma:3,shen:ypma:5}.

Let the singular value decomposition of the $(n+1) \times (n+2)$
 matrix $f'(\bar{y},\bar{\mu},\bar{w})$ be
\begin{equation}
\begin{aligned}
& f'(\bar{y},\bar{\mu},\bar{w})
=\widetilde{U} \widetilde{\Sigma} {\widetilde V}^T   \\
& \equiv [\widetilde{u}_1,\dots,\widetilde{u}_{n+1}]
[ \operatorname{diag}(\widetilde{\sigma}_1, \dots, \widetilde
{\sigma}_{n+1}) , 0_{(n+1) \times 1}]
[\widetilde{v}_1,\dots,\widetilde{v}_{n+2}]^T.
\end{aligned}\label{eq:3.13}
\end{equation}
Let the $(n+2) \times (d+1)$ matrix $\widetilde{L}$ and the
$(n+1) \times d$ matrix $\widetilde{R}$ be
\begin{equation}
\widetilde{L} = [\widetilde{v}_{n+2-d},\dots,\widetilde{v}_{n+2}],
\quad
\widetilde{R} =[\widetilde {u}_{n+2-d},\dots,\widetilde{u}_{n+1}].
  \label{eq:3.14}
\end{equation}
An argument similar to that used in Lemma \ref{lm:2.1} leads to

\begin{lemma}\label{lm:3.1}
The $(n+d+2) \times (n+d+2)$ bordered matrix
\begin{equation}
\widetilde{M}(y, \mu,w) \equiv \begin{bmatrix}
f'(y,\mu,w) & \widetilde{R} \\ \widetilde{L}^T & 0 \end{bmatrix}
\label{eq:3.15}
\end{equation}
is nonsingular for all $(y, \mu,w)$ in a small
neighborhood of $(y^*, \mu^*,w^*)$.
\end{lemma}

As in \cite{shen:3,shen:ypma:3} an extended system of $n+d+2$
equations in $n+d+2$ variables for \eqref{eq:2.11} is
\begin{equation}
F(y,\mu,w,\lambda)=
\begin{bmatrix}
f(y,\mu,w)+ \widetilde{R} \lambda \\ g(y,\mu,w)
\end{bmatrix} = \begin{bmatrix}
0 \\ 0 \end{bmatrix}  , \label{eq:3.16}
\end{equation}
with the Jacobian matrix
\begin{equation}
F'(y,\mu,w,\lambda)=
\begin{bmatrix}
f'(y,\mu,w) & \widetilde{R}\\ -\eta^T(y,\mu,w) \sum_{k=1}^{n+1} [\xi_k(y,\mu,w) \nabla^2f_k(y,\mu,w)] & 0
\end{bmatrix}. \label{eq:3.17}
\end{equation}
Here $\lambda \in \mathbb{R}^{d}$ is a Lagrange-type auxiliary vector,
$\xi(y,\mu,w) \in \mathbb{R}^{n+1}$ and
$g(y,\mu,w) \in \mathbb{R}^{d+1}$ satisfy
\begin{equation}
\widetilde{M}^T(y,\mu,w) \begin{bmatrix} \xi(y,\mu,w) \\
g(y,\mu,w) \end{bmatrix}
= \begin{bmatrix} 0 \\ \gamma \end{bmatrix}
\label{eq:3.18}
\end{equation}
with a randomly selected nonzero vector $\gamma \in \mathbb{R}^d$,
and $\eta(y,\mu,w) \in \mathbb{R}^{(n+1) \times (d+1)}$ satisfies
\begin{equation}
\eta(y,\mu,w)= \begin{bmatrix} I_{n+1} & 0 \end{bmatrix}
\widetilde{M}^{-1}(y,\mu,w) \begin{bmatrix} 0 \\ I_{d+1} \end{bmatrix}.
\label{eq:3.19}
\end{equation}
It has been proved that under certain conditions for almost
every choice of $\gamma$ the matrix $F'(y^*,\mu^*,w^*,0)$
is nonsingular (see \cite{shen:3,shen:ypma:3} for details).
Thus Newton's method can be used to solve \eqref{eq:3.16}
with quadratic convergence. The solution of \eqref{eq:2.11}
corresponds to a solution of \eqref{eq:3.16} with $\lambda^*=0$.

Convergence of Newton's method depends on the initial  point being
near the solution. We can choose a good initial point by using a
curve following technique on a solution branch of \eqref{eq:2.11}
connecting to the bifurcation point \cite{chow:shen,shen:ypma:5}.
That starting point corresponds to a starting point of
\eqref{eq:3.16} with $\lambda^{(0)}=0$. The correct rank
deficiency $d$ can be obtained by looking for small singular
values $\widetilde{\sigma}_{n+2-d},\dots,\widetilde{\sigma}_{n+1}$
in \eqref{eq:3.13}, since the singular values are continuous and
the corresponding values at $(y^*,\mu^*,w^*)$ are zero. If the
rank deficiency $d$ is under-estimated then convergence becomes
linear, while if the rank deficiency $d$ is over-estimated then
the computed solution may not be the desired solution with
$\lambda^*=0$.


\section{Examples}

We present two examples to illustrate our method.


\begin{example}\label{ex:1} \rm
\begin{equation}
\frac{d}{dt} \begin{bmatrix}  z_1 \\ y \\ z_2 \end{bmatrix} =
\begin{bmatrix}  -\mu & 0 \\ -1 & \mu y \\ 0 & - \mu \end{bmatrix}
\begin{bmatrix} z_1 \\ z_2 \end{bmatrix} + \begin{bmatrix} y \\ -\mu y \\ \mu -\mu y^2 \end{bmatrix}.
\label{eq:5.1}
\end{equation}
\end{example}
This simple example with $N=2$ and $n=1$ is a case of  the Sherman
system \cite{sher} with one parameter $\mu$ while the other two
parameters are fixed. The system is derived from a nuclear spin
generator; for more details see \cite{niko:bozh:nede:zlat}. There
is one equilibrium solution $y=0,z_1=0, z_2=1$ for any value of
$\mu$. At the point $(\bar y,\bar \mu,\bar z_1,\bar
z_2)=(0.02,0.02,0.02,1.02,)$ near the bifurcation point $(y^*,
\mu^*,z_1^*, z_2^*)=(0,0,0,1)$ the singular value decomposition of
$A(\bar y, \bar{\mu})$ is
\begin{equation}
\begin{bmatrix}  -0.0200 & 0.0004 &-0.9998 \\ -0.9998 & 0.0000
& 0.0200 \\
0.0000 & 1.0000 & 0.0004 \end{bmatrix}
\begin{bmatrix}  1.0002 & 0 \\ 0 & 0.0200 \\
0 & 0 \end{bmatrix}
\begin{bmatrix}  1.0000 & -0.0004 \\ -0.0004 & -1.0000
\end{bmatrix}^T.
\end{equation}
$A(y^*,\mu^*)$ has rank deficiency one as indicated by the singular
values of $A(\bar{y}, \bar{\mu})$ since
$\bar{\sigma}_2=0.0200$ is very small relative to
$\bar{\sigma}_1=1.0002$. This gives
\begin{equation}
\mathit{l}=\begin{bmatrix} -0.0004 \\ -1.0000 \end{bmatrix}, \quad
R= \begin{bmatrix} 0.0004 & -0.9998 \\ 0.0000 & 0.0200 \\
1.0000 & 0.0004 \end{bmatrix}
\end{equation}
and $\bar w=\mathit{l}^T \bar z=-1.0200$. The singular value
decomposition of $f'(\bar y,\bar \mu, \bar w)$ is
\[
\begin{bmatrix}  0.0004 & 1.0000 \\ 1.0000 & -0.0004 \end{bmatrix}
\begin{bmatrix}  0.9998 & 0 & 0 \\ 0 & 0.0286 & 0 \end{bmatrix}
\begin{bmatrix}  1.0000 & 0.0003 & 0.0003 \\ -0.0004 & 0.7142 & 0.6999 \\
0.0000 & -0.6999 & 0.7142 \end{bmatrix}^T
\]
which suggests that the rank deficiency of $f'(y^*,\mu^*,w^*)$ is
$d=1$ since $\widetilde \sigma_2=0.0286$ is very small relative to
$\widetilde \sigma_1=0.9998$. Hence
\[
\widetilde L=\begin{bmatrix} 0.0003 & 0.003 \\ 0.7142 & 0.6999 \\
-0.6999 & 0.7142 \end{bmatrix}, \quad
\widetilde R = \begin{bmatrix} 1.0000 \\ -0.0004 \end{bmatrix}.
\]
We choose $\gamma=0.6742$ at random.
We list in Table \ref{tb:1} the data beginning with
$(y^{(0)},\mu^{(0)},w^{(0)},\lambda^{(0)})=(0.02,0.02,-1.02,0)$
and using
\begin{equation}
\| d^{(k)}\|_2 = \|(y^{(k+1)},\mu^{(k+1)},w^{(k+1)},\lambda^{(k+1)})-(y^{(k)},
\mu^{(k)},w^{(k)}, \lambda^{(k)})\|_2.
\end{equation}

\begin{table}[thbp]
\caption{Newton iterations for computing a bifurcation
point in Example \ref{ex:1}}
\label{tb:1}
\begin{center}
\begin{tabular}{||c||c|c|c|c||c||}
\hline\hline
$k$ & $y^{(k)}$ & $\mu^{(k)}$ & $w^{(k)}$ & $\lambda^{(k)}$ & $\|d^{(k)}\|_2$ \\ \hline\hline
0 & ~2.0000e-02 & ~2.0000e-02 & -1.0200e+00 & ~0.0000e+00 & 3.4423e-02 \\
1 & -8.1505e-06 & -6.2170e-09 & -1.0004e+00 & ~4.0814e-04 & 5.7221e-04 \\
2 & -2.9878e-15 & -8.4432e-19 & -1.0000e+00 & -2.4929e-12 & 1.3296e-10 \\
3 & ~0.0000e+00 & -4.7698e-26 & -1.0000e+00 & ~0.0000e+00 & ------------ \\
\hline\hline
\end{tabular}
\end{center}
\end{table}

 The last line of the table is an approximation of
$(y^*,\mu^*,w^*,\lambda^*)$;
using \eqref{eq:3.10} then gives
$z^*  \approx (0.0000e+00,1.0000e+00)^T$.

\begin{example} \label{ex:2} \rm
Let $(u,v)=(u(t,x),v(t))$ be a solution of the system
\begin{equation}
\begin{gathered}
 \frac{\partial u}{\partial t}=\mu u
 + \frac{\partial^2u}{\partial x^2} -c_1uv,
\quad \frac{dv}{dt}=(\mu-\mu_0)v-c_2v^2+c_3v \int_{0}^1 u(t,x)dx,  \\
 x \in (0,1), \quad u(t,0)=u(t,1)=0 \label{eq:4.2}
\end{gathered}
\end{equation}
where $c_1,c_2$ and $c_3$ are positive constants.
\end{example}
Let $h=1/(N+1)$, where $N$ is an integer. Denote
the numerical approximations of the steady state solution $u(t,x)$
at the interior points $x_j=jh$ by
$u_j,j=1,2,\dots,N$. Using the central difference scheme
$u_{xx} \approx (u_{j+1}+u_{j-1}-2u_j)/h^2$, the trapezoidal rule for
the integral,
and denoting $z=[u_1,\dots,u_N]^T$ and $y=v$ we obtain the following separable system for
the steady state solutions of the differential equations:
\begin{equation}
\begin{bmatrix} E(y,\mu) \\ B(y)
\end{bmatrix} z +
\begin{bmatrix}
0_{N \times 1}  \\ (\mu-\mu_0)y-c_2y^2 \\
\end{bmatrix} = 0, \label{eq:4.3}
\end{equation}
where $B(y)=c_3hy[1,\dots,1]$ and the $N \times N$ matrix
\begin{equation}
E(y, \mu) =  \begin{bmatrix} -2+h^2(\mu-c_1y) & 1 & {} & {}
\\ 1 & -2+h^2(\mu-c_1y) & \ddots & {} \\
{} & \ddots & \ddots & 1 \\ {} & {} & 1 & -2+h^2(\mu-c_1y)
\end{bmatrix}.  \label{eq:4.4}
\end{equation}
The matrix $A(y^*,\mu^*)$ has rank deficiency one when
$(y^*,\mu^*)=(0,\mu_0)$ for
$\mu_0=4(N+1)^2sin^2(\pi/(2N+2))$, as may be seen by the fact
that $\operatorname{Null}(E(0,\mu_0))$ has dimension one.
The point $(y^*,\mu^*,z^*)=(0,\mu_0,0)$ is a bifurcation point.

Equation \eqref{eq:4.3} for $(y,\mu,z) \in \mathbb{R}^{N+2}$
reduces to a system of the form \eqref{eq:2.11} in
$(y,\mu,w) \in \mathbb{R}^3$.
As a numerical example we choose $N=19$, so $h=0.05$ and
$\mu_0=9.84932752388982$.
We choose $c_1=c_2=c_3=1$. Suppose we know a point
$(\bar y, \bar \mu, \bar z)
=(0.001, 9.848, (0.0002, \dots, 0.0002))$ near the bifurcation
point $(y^*,\mu^*,z^*)=(0,\mu_0,0)$.
The 19 singular values of $A(\bar y, \bar \mu)$ are
\[
3.9507,3.8774,\dots,0.5612,0.3573,0.1934,0.0733,0.0002.
\]
The last number drops abruptly, which suggests that $A(y^*,\mu^*)$
has rank deficiency one.
We obtain $\bar w =8.0361 \times 10^{-4}$ from
$\bar{w}= \mathit{l}^T \bar{z}$.
The singular value decomposition of $f'(\bar y, \bar \mu, \bar w)$
gives two singular values $\widetilde{\sigma}_1=0.0033$,
$\widetilde{\sigma}_2=0.0000$ which implies $d=2$.
Thus $\lambda^{(0)}=0 \in \mathbb{R}^2$.
We randomly choose $\gamma=(0.3721, 0.6843)^T$.
Table \ref{tb:2} gives data for
three iterations and shows quadratic convergence.
The computed approximation $z^*$ satisfies
$\|z^*\|_2 \approx 2.2462 \times 10^{-14}$.
If the underestimate $d=1$ is used then the rate of convergence
is reduced to linear as mentioned above.

\begin{table}[thbp]
\caption{Newton iterations for computing a bifurcation point in Example
\protect\ref{ex:2}}
\label{tb:2}
\begin{center}
\begin{tabular}{||c||c|c|c|c||c||}
\hline\hline
$k$ & $y^{(k)}$ & $\mu^{(k)}$ & $w^{(k)}$ & $\lambda_1^{(k)}$ & $ \|d^{(k)}\|_2$ \\ \hline\hline
0 & ~1.0000e-03 & 9.8480e+00 & ~8.0361e-04 & ~0.0000e+00 & 1.8461e-03 \\
1 & -1.1537e-10 & 9.8493e+00 & ~1.3079e-09 & ~3.2664e-09 & 2.1661e-06 \\
2 & -2.0761e-16 & 9.8493e+00 & -4.5227e-14 & -9.2603e-22 & 2.3200e-14 \\
3 & -1.0311e-16 & 9.8493e+00 & -2.2462e-14 & ~2.8124e-30 & ----------- \\ \hline\hline
\end{tabular}
\end{center}
\end{table}


\begin{thebibliography}{99}

\bibitem{beyn} {W.-J. Beyn};
 \emph{Numerical methods for dynamical
systems}, in Advances in Numerical Analysis, Vol. 1: Nonlinear Partial
Differential Equations and Dynamical Systems, W. Light ed., Clarendon Press,
Oxford, 1991, pp.~175--236.

\bibitem{chow:shen} {S.-N. Chow and Y.-Q. Shen}; \emph{Bifurcations
via singular value decompositions}, Appl. Math. Comp. 28(3) (1988),
pp.~231--245.

\bibitem{golu:leve} {G.~H. Golub and R.~J. LeVeque}; \emph{Extensions
and uses of the variable projection algorithm for solving nonlinear least
squares problems}, in Proceedings of the Army Numerical Analysis and
Computing Conference, US Army Research Office, Washington DC, 1979,
pp.~1--12.

\bibitem{golu:pere:1} {G.~H. Golub and V. Pereyra}; \emph{The
differentiation of pseudo-inverses and nonlinear least squares problems
whose variables separate}, SIAM J. Numer. Anal. 10(2) (1973), pp.~413--432.

\bibitem{golu:pere:2} {G.~H. Golub and V. Pereyra}; \emph{Separable
nonlinear least squares: the variable projection method and its applications},
Inverse Problems 19 (2003), Topic Review, R1--R26.

\bibitem{golu:vanl} {G.~H. Golub and C.~F. Van Loan}; \emph{Matrix
Computations}, 3rd ed., Johns Hopkins, Baltimore, 1996.

\bibitem{grie:redd} {A. Griewank and G.~W. Reddien};
\emph{Characterization and computation of generalized turning points}, SIAM J.
Numer. Anal. 21 (1984), pp.~176--185.

\bibitem{luke} {G.~G. Lukeman};
 \emph{Separable Overdetermined Nonlinear
Systems: An Applications of the Shen-Ypma Algorithm}, VDM Verlag Dr. M\"{u}ller,
Saarbr\"{u}cken, 2009.

\bibitem{morr} {D.~B. Morrison}; \emph{Application of the Shen-Ypma
algorithm for nonlinear systems with some linear unknowns}, MSc Thesis,
Dalhousie Univ., Halifax, Nova Scotia, 1998.

\bibitem{mull:stok} {K. M. Mullen and I. M. van Stokkum}; \emph{The
variable projection algorithm in time-resolved spectroscopy, microscopy and
mass spectrometry applications}, Numer. Algorithm, 51(3)(2009), pp.~319-340.

\bibitem{niko:bozh:nede:zlat} {S. Nikolov, B. Bozhov, V. Nedev and V.
Zlatanov}; \emph{The Sherman system: bifurcation, regular and chaotic behaviour},
C. R. Acad. Bulgare Sci. 56(5) (2003), pp.~19-24.

\bibitem{orte:rhei} {J.~M. Ortega and W.~C. Rheinboldt}; \emph{
Iterative Solution of Nonlinear Equations in Several Variables}, Academic
Press, New York, 1970.

\bibitem{rabi:redd} {P.~J. Rabier and G.~W. Reddien}; \emph{
Characterization and computation of singular points with maximum rank
deficiency}, SIAM J. Numer. Anal. 23 (1986), pp.~1040--1051.

\bibitem{shen} {Y.-Q. Shen}; \emph{Computation of a simple
bifurcation point using one singular value decomposition nearby}, Computing
58(4) (1997), pp.~335--350.

\bibitem{shen:2} {Y.-Q. Shen}; \emph{Computation of a Hopf
bifurcation point using one singular value decomposition nearby}, in: Differential
Equations and Applications, P. W. Bates, S.-N. Chow, K. Lu and X. Pan eds.,
International Press, Cambridge, 1997, pp.~277--285.

\bibitem{shen:3} {Y.-Q. Shen}; \emph{Computation of a multiple
bifurcation point using one singular value decomposition nearby}, Dynam.
Cont. Disc. Impul. Syst. 6(1) (1999), pp.~53--68.

\bibitem{shen:ypma} {Y.-Q. Shen and T.~J. Ypma}; \emph{Solving
nonlinear systems of equations with only one nonlinear variable}, J. Comput.
Appl. Math. 30(2) (1990), pp.~235--246.

\bibitem{shen:ypma:2} {Y.-Q. Shen and T.~J. Ypma}; \emph{Solving
separable nonlinear equations with Jacobians of rank deficiency one}, in
LNCS 3314: Computational and Informational Sciences, J. Zhang, J.~-H. He and
Y. Fu eds., Springer, New York, 2004, pp.~99--104.

\bibitem{shen:ypma:3} {Y.-Q. Shen and T.~J. Ypma}; \emph{A uniform
approach to computing dynamical equilibria}, Can. Appl. Math. Q. 14(3)
(2006), pp.~343--359.

\bibitem{shen:ypma:4} {Y.-Q. Shen and T.~J. Ypma}; \emph{Solving
rank-deficient separable nonlinear equations}, Appl. Numer. Math. 57(5-7)
(2007), pp.~609-615.

\bibitem{shen:ypma:5} {Y.-Q. Shen and T.~J. Ypma}; \emph{Numerical
bifurcation of separable parameterized equations}, Elect. Trans. Numer.
Anal., 34 (2009), pp.~31-43.

\bibitem{sher} {S. Sherman}; \emph{A third-order nonlinear system from
a nuclear spin generator}, Contrib. Diff. Equation 2 (1963), pp.~197-227.

\bibitem{watk} {D.~S. Watkins}; \emph{Fundamentals of Matrix
Computations}, Wiley,  New York, 1991.

\bibitem{ypma:shen} {T.~J. Ypma and Y.-Q. Shen}; \emph{Solving N+m
nonlinear equations with only m nonlinear variables}, Computing 44(3)
(1990), pp.~259--271.

\end{thebibliography}

\end{document}
