\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage[all]{xy}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2013 (2013), No. 40, pp. 1--20.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2013 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2013/40\hfil A step-like approximation]
{A step-like approximation and a new numerical schema
for the Korteweg-de Vries equation in the soliton region}

\author[J. Baggett, O. Bastille, A. Rybkin\hfil EJDE-2013/40\hfilneg]
{Jason Baggett, Odile Bastille, Alexei Rybkin}  % in alphabetical order

\address{Jason Baggett \newline
Department of Mathematics and Statistics \\
University of Alaska Fairbanks\\
PO Box 756660, Fairbanks, AK 99775, USA}
\email{jabaggett@alaska.edu}

\address{Odile Bastille \newline
Department of Mathematics and Statistics \\
University of Alaska Fairbanks\\
PO Box 756660, Fairbanks, AK 99775, USA}
\email{orbastille@alaska.edu}

\address{Alexei Rybkin \newline
Department of Mathematics and Statistics \\
University of Alaska Fairbanks\\
PO Box 756660, Fairbanks, AK 99775, USA}
\email{arybkin@alaska.edu}


\thanks{Submitted September 27, 2011. Published February 4, 2013.}
\thanks{Supported in part by the NSF under grants DMS
0707476 and DMS 1009673}
\subjclass[2000]{35P25, 35Q53, 37K15, 37K10, 37K40, 42C40, 65N25}
\keywords{KdV equation; Haar wavelets; potential fragmentation;
\hfill\break\indent  inverse scattering transform}

\begin{abstract}
 We discuss a numerical schema for solving the initial value problem for
 the Korteweg-de Vries equation in the soliton region which is based
 on a new method of evaluation of bound state data. Using a
 step-like approximation of the initial profile and a fragmentation principle
 for the scattering data, we obtain an explicit procedure for
 computing the bound state data. Our method demonstrates an improved accuracy
 on discontinuous initial data.
 We also discuss some generalizations of this algorithm and how it might be
 improved by using Haar and other wavelets.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks


\section{Introduction}

In this article, we consider the initial value problem for the 
Korteweg\-/de Vries (KdV) equation
\begin{equation}
u_{t}-6uu_{x}+u_{xxx}=0,\quad u(x,0)=V(x)  \label{KdV}
\end{equation}
on the whole line. The function $V(x)$, called initial profile, is assumed
to be bounded (but not necessarily continuous), compactly supported (i.e.
zero outside of a finite interval), and $V(x) \le0$ (assumed for simplicity only). In particular, we are concerned with
numerical algorithms for \eqref{KdV} for large times $t$.

The KdV equation is ``exactly solvable" by
relating it to the Schr\"{o}dinger equation
\begin{equation}
-\phi _{xx}+V(x)\phi =\lambda \phi   \label{Schr}
\end{equation}
through the so-called Inverse Scattering Transform (IST) (see, e.g. 
\cite{AC91}). In a sense, the IST linearizes the KdV (as well as some other
nonlinear evolution PDEs) and provides us with an extremely powerful tool to
analyze its solutions. The main feature of the IST is that it solves 
\eqref{KdV} purely in terms of the so-called scattering data associated 
with \eqref{Schr}  (see Section 4 for more detail).

The scattering data (More precisely, the right scattering data)
of the Schr\"{o}dinger equation
consists of finitely many bound states $\{-\kappa _j^2\}_{j=1}^J$, the
corresponding (left) norming constants $\{c_j\}_{j=1}^J$, and the (right)
reflection coefficient $R$. The bound states are precisely the eigenvalues $
\lambda $ of the Schr\"{o}dinger equation that give square-integrable
solutions $\phi $. The right and left reflection coefficients $R$ and $L$,
respectively, and the transmission coefficient $T$ come from the asymptotic
behavior of the left and right scattering solutions to the Schr\"{o}dinger
equation $\phi _r$ and $\phi _1$, respectively, where for $\lambda =k^2$
\begin{equation} \label{eq1.1}
\phi _r(x,k)=
\begin{cases}
e^{-ikx}+R(k)e^{ikx}+o(1) & x\to \infty  \\
T(k)e^{-ikx}+o(1) & x\to -\infty ,
\end{cases}
\end{equation}
and
\begin{equation} \label{eq1.2}
\phi _1(x,k)=
\begin{cases}
e^{ikx}+L(k)e^{-ikx}+o(1) & x\to -\infty  \\
T(k)e^{ikx}+o(1) & x\to \infty .
\end{cases}
\end{equation}
If $\lambda =-\kappa ^2$ is a bound state, then 
$\phi _1( x,i\kappa) $ is square-integrable. 
The corresponding (left) norming constant is
defined by
\begin{equation*}
c=\Big( \int_{-\infty }^{\infty }|\phi _1(x,i\kappa )T^{-1}(i\kappa
)|^2dx\Big) ^{-1/2}.
\end{equation*}
The IST procedure for \eqref{KdV} consists of two main steps: computing
the scattering data $\{ -\kappa _j^2,c_j,R(k)\} $ for $V$ and 
then constructing $u(x,t) $ using the
scattering data. Unfortunately, neither step is explicit in general and
numerical algorithms based upon the (full scale) IST have not so far shown
a noticeable improvement over conventional methods of direct numerical
integration of the KdV (see, e.g. \cite{BV05, HKRT11}). However direct
numerical computations work best for small $t$ and become unpractical for
large times. The real power of the IST, on the other hand, is exactly in
capturing the large time behavior of solutions to \eqref{KdV} (i.e.
solitons) which is of particular interest in applications. That is to say,
that the long-time asymptotic behavior of $u(x,t) $ is
explicitly known in terms of $\{ -\kappa _j^2,c_j,R(k)\} $ to any degree of accuracy in all physically important
regions of the plane $(x,t) $ (see, e.g. the recent expository
paper \cite{GrTe2009} and the
extensive literature cited therein). Loosely speaking, for our initial
profile $V$ the solution $u(x,t) $ evolves into a finite number
of solitons and a dispersive tail. In the present paper we are concerned
with the soliton part. It is well-known (see, e.g. \cite{GrTe2009}) that for
our $V$ in the soliton region; i.e., $x/t\geq C$ with some $C>0$, for any
positive $l$
\begin{equation}
u(x,t)=-2\sum_{j=1}^{J}\kappa _j^2\operatorname{sech}^2(\kappa _jx-4\kappa
_j^{3}t+\gamma _j)+O\big( t^{-l}\big) ,  \label{soliton sltn}
\end{equation}
where
\begin{equation*}
\gamma _j=\ln \Big( \frac{\sqrt{2\kappa _j}}{c_j}\prod_{m=1}^{j-1}
\frac{\kappa _j+\kappa _{m}}{\kappa _j-\kappa _{m}}\Big) ,
\end{equation*}
and the problem essentially boils down to effective computation of
 $\kappa_j$ and $c_j$ without the full machinery of the IST.

With our assumption that $V$ has compact support, the reflection
coefficients $R$ and $L$ are meromorphic functions in the upper half
 complex plane
with simple poles at $\{ i\kappa _j\}$, and the (left) norming
constants $\{ c_j\} $ can be retrieved from the residues of $R$
at these poles. The poles of $R$ can be numerically approximated by using
root finders. However, computing residues is numerically more difficult. It
is more convenient to introduce a related function $B$ which is a rotation
of the left reflection coefficient $L$. Then $B$ has the same poles as $R$,
and its corresponding residues are equal to the residues of $R$ times a
computable scaling factor. We give a new algorithm for computing the
residues of $B$ as presented below.

Our approach is based on approximating our potential $V$ by $N$
piecewise constant functions (using e.g. a Haar basis). The reflection and
transmission coefficients $L_n$, $R_n$, and $T_n$ for the $n$-th block
can be explicitly derived in terms of elementary functions. The scattering
coefficients $L,R$, and $T$ for the whole approximation can then be
recursively computed. More specifically, let
\begin{equation*}
\Lambda =
\begin{pmatrix}
1/T & -R/T \\
L/T & 1/\overline{T}
\end{pmatrix}.
\end{equation*}
Then the reflection and transmission coefficients $L$, $R$, and $T$ for the
total potential can be derived from the principle of potential fragmentation
(see, e.g. \cite{A92, AS02}):
\begin{equation*}
\Lambda =\Lambda _N\dots\Lambda _{2}\Lambda _1,
\end{equation*}
where bars denote complex conjugation and $\Lambda _n$ are
the transition matrices
\begin{equation*}
\Lambda _n=
\begin{pmatrix}
1/T_n & -R_n/T_n \\
L_n/T_n & 1/\overline{T_n}
\end{pmatrix}.
\end{equation*}
This gives us a recursive formula based on the M\"obius transforms for the
left and right reflection coefficients and also for the function $B$. Using
this recursive formula for $B$, we can derive a recursive matrix formula for
the residues of $B$ at the poles in the upper-half plane. Consequently, we
are able to evaluate the bound state data$\ \left\{ \kappa
_j,c_j\right\} $ and hence solve  \eqref{KdV} numerically in the soliton
region by \eqref{soliton sltn}.

Recursive methods similar in nature to the one we employ are quite common in
physics and applied mathematics and have been used in a variety of physical
situations related to wave propagation. For instance, back in the 50s one such
method was used in the particularly influential paper \cite{Parratt54} for the study of solids by reflection of X-rays. We also mention  \cite
{Synolakis1998} where some important results on the run-up of solitary waves on
bathymetries with piecewise linear topographies were obtained. In the
mathematical literature what we call potential fragmentation is also
referred to as layer stripping. We just name \cite{SWG96} where layer
stripping was used in the context of radar imaging for both theoretical and
computational purposes. However, besides \cite{A92, AS02}, we could not
locate any literature where fragmentation would be used in connection
with bound states. To deal with bound state data in this context one needs
to use analyticity properties of the scattering coefficients in order to
work with complex momentum $k$ as opposed to real $k$ considered in the
previous literature. Computing residues of bulky recursively generated
analytic functions does not look at first sight promising at all. A
simple and effective way of handling recursion relations which produces
surprisingly accurate results is the main contribution of the present paper.

We do not provide any error estimates; instead, the accuracy is
verified on explicitly solvable examples. We also give a comparison of
computing bound states as poles of $R$ and $B$ and computing norming
constants with our algorithm as opposed to other common algorithms. Although
our algorithm is slower than standard (unrelated to fragmentation) methods
for obtaining bound state data, we demonstrate that it tends to be more
accurate, especially for discontinuous initial profiles when conventional
ODE solvers suffer from the Gibbs phenomenon. We also provide a comparison
of the asymptotic solution to the KdV versus numerically integrated
solutions. We will discuss some of them in the main body of the paper.

Lastly, we also note that there are many ways to improve our algorithm. For
example, one can produce step-like approximations of our potentials $V(x)$
using Haar wavelets. The Haar wavelets are piecewise constant functions that
form an orthogonal system. Wavelets are closely related to Fourier series,
and they exhibit many properties that are numerically desirable.
Furthermore, it is quite natural to consider piecewise linear
approximations. The partial scattering coefficients $L_n$, $R_n$, and $
T_n$ are still explicit computable in terms of Airy functions.
One can
also use better root finders. For instance, it is not difficult to create an algorithm that
captures the fact that if an extra fragment is added to the potential then
bound states move in a certain way and new ones can only emerge from zero.

The referees pointed out the recent papers \cite{AMS09,Mee2007}.
 The former is devoted to the
numerical solution to the initial value problem for the focusing Nonlinear
Schr\"{o}dinger (NLS) equation. In this very interesting paper, the authors
developed a new numerical algorithm for NLS which is also based upon the
IST. Their method uses the structure of the Gelfand-Levitan-Marchenko kernel
in an optimal way and produces very accurate numerical solutions to NLS with
certain initial data. Being more subtle, the method requires more steps and
appears to work best when the number of bound states is at most one.
Extending their method to many bound states is left in \cite{AMS09} as an
open problem. It would be also very interesting to compare their method
adjusted to the KdV setting with ours. That could probably be done by
applying the algorithms from \cite{Mee2007} developed for
numerical solution of the Marchenko integral equations for the 1D Schr\"{o}
dinger equation.  It is worth mentioning that \cite{Mee2007} is one of very
few papers devoted to numerical aspects of the Gelfand-Levitan-Marchenko
inverse scattering procedure. The approach of \cite{Mee2007} is based upon
structured matrix systems.

The paper is organized as follows. In Section 2, we briefly introduce 
our main notation. In Section 3, we provide a brief overview of Scattering 
Theory for the one-dimensional time independent Schr\"odinger equation. 
In Section 4, we discuss the Inverse Scattering Transform and the 
asymptotic behavior of the KdV equation. In Section 5, we determine
 explicitly the scattering data in the case where the potential consists 
of a single well. In Section 6, using potential fragmentation we deduce 
some recursive relationships that yield the scattering data in the case
 where the potentials consists of multiple wells.  
In Section 7, we provide some numerical simulations to compare
 calculating the scattering data using our recursive methods as opposed 
to using traditional methods. Finally in Section 8, 
we discuss some possible improvements for our proposed algorithms; 
in particular, we discuss modifying the algorithm to utilize Haar wavelets.

\section{Notation}

We will denote the upper-half complex plane by $\mathbb{C}^{+}$. For a
function $f:\mathbb{C}\to \mathbb{C}$, we will let $\overline{f}(z)$
denote complex conjugation and $\widetilde{f}(z)=f(-z)$ reflection. As is
customary in analysis, we will let $L^2(\mathbb{R})$ be the class of
functions $f$ such that $\int_{\mathbb{R}}|f|^2<\infty $. We will let $
L_1^{1}(\mathbb{R})$ denote the class of functions $f$ such that $\int_{
\mathbb{R}}(1+|x|)|f(x)|<\infty $. Given functions 
$f,g\in L^2(\mathbb{R})$, we define the $L^2$ inner product to be
\begin{equation*}
\left\langle f,g\right\rangle _{2}=\int_{\mathbb{R}}f\overline{g}.
\end{equation*}
and the $L^2$-norm $\Vert \cdot \Vert _{2}$ to be the norm with respect to
this inner product, i.e.
\begin{equation*}
\Vert f\Vert _{2}=\Big[ \int_{\mathbb{R}}|f|^2\Big] ^{1/2}.
\end{equation*}
For a set $A\subseteq \mathbb{R}$, $\chi _{A}$ will denote the
characteristic function on $A$; i.e.
\begin{equation*}
\chi _{A}(x)=
\begin{cases}
1 & \text{if }x\in A \\
0 & \text{otherwise}.
\end{cases}
\end{equation*}

\section{Direct scattering theory for the Schr\"{o}dinger equation
on the line}

Consider the KdV equation
\begin{equation*}
u_{t}-6uu_{x}+u_{xxx}=0.
\end{equation*}
A particular stable solution of the KdV equation is given by
\begin{equation*}
u(x,t)=-2\kappa ^2\operatorname{sech}^2(\kappa x-4\kappa ^{3}t+\gamma )
\end{equation*}
where $\kappa $ and $\gamma $ are real constants. Solutions of this form are
called \textit{{solitons}. }For more general initial profiles $u(x,0)$, one applies the inverse scattering formalism or IST (see e.g. \cite{AC91}). The linearization process of the IST consists of first finding a pair of linear operators $L$, $B$, known as the \emph{Lax pair}, satisfying
\begin{equation*}
\begin{cases}
L\phi=\lambda \phi  \\
\phi_t=B\phi
\end{cases}.
\end{equation*}
The operators $L,B$ are constructed in terms of $u(x,t)$ and 
its partial derivatives such that the nonlinear evolution equation 
one wishes to solve then appears as a compatibility condition for
\begin{equation*}
L_t=BL-LB
\end{equation*}
leading to $\lambda_t=0$. The eigenvalue problem associated to 
$L$ in $L^2(\mathbb{R})$ is known as the scattering problem whereas
 $B$ is used to determine the time evolution of the system.
 In the KdV case, one easily verifies that $L$ and $B$ can be chosen as 
follows:
\begin{gather*}
L= - \frac{\partial^2}{\partial x^2}+ u(x,t) \\
B= -4 \frac{\partial^3}{\partial x^3}+3\Big(u(x,t) \frac{\partial}{\partial x}
+ \frac{\partial}{\partial x} u(x,t) \Big).
\end{gather*}

But the equation $-\phi _{xx}+u(x,t)\phi =\lambda \phi $
In direct scattering, one considers the initial
profile $u(x,0)=V(x)$, and solve
\begin{equation*}
-\phi _{xx}+V(x)\phi =\lambda \phi .
\end{equation*}
Let us define $H$ as the Schr\"odinger operator associated to the initial 
profile $V$, i.e. $H=-\frac{d^2}{d x^2}+V(x)$. Suppose further that 
$V\in L_1^{1}(\mathbb{R})$.
Under this condition, for each $\lambda >0$, there is a
nontrivial solution (generalized eigenfunction) to $H\phi =\lambda \phi $ 
that behaves asymptotically like a sinusoid (see e.g. \cite{Deift79}). 
However, these eigenfunctions
$\phi $ are not contained in $L^2(\mathbb{R})$. We call the set of such
 $\lambda $
the continuous spectrum of $H$. There may also be finitely many negative 
eigenvalues, called bound states \cite{Deift79}. The negative eigenvalues give
square-integrable eigenfunctions $\phi $.
The set of bound states is called the discrete spectrum. The continuous spectrum
gives rise to a component of the solution of the KdV which acts like a
solution to the linear equation $u_{t}+u_{xxx}=0$. This part of the solution
is the dispersive portion of the wave and becomes of negligible amplitude for 
large times. The discrete spectrum gives rise to the solitons. 
This portion of the solution of the
KdV is structurally stable in that each soliton's shape and velocity is 
preserved over time (outside of brief-in-time elastic interactions) 
as it moves to the right. Thus, we focus on knowing the discrete spectrum 
for large times.


\begin{figure}[tbp]
\begin{center}
\includegraphics[width=0.45\textwidth, viewport = 140 520 315 680]{fig1a.pdf}
\quad
\includegraphics[width=0.45\textwidth, viewport = 140 520 315 680]{fig1b.pdf} \\
 (A) Wave $e^{-ikx}$ radiating from $\infty$ \hfil
 (B) $R(k)e^{ikx}$ is reflected \\
\includegraphics[width=0.45\textwidth, viewport = 140 520 315 680]{fig1c.pdf}
\quad
\includegraphics[width=0.45\textwidth, viewport = 140 520 315 680]{fig1d.pdf} \\ 
 (C) $T(k)e^{-ikx}$ is transmitted \hfil
 (D) Wave coming from $-\infty$
\end{center}
\caption{The scattering solutions $\protect\phi _{r}(x)$ and 
$\protect\phi _{l}(x)$ as waves radiating from $\pm \infty $}
\label{fig:wave}
\end{figure}


Suppose $\lambda =k^2\in \mathbb{R}$. 
In the left and right scattering solutions to the Schr\"odinger equation 
given by respectively \eqref{eq1.1} and \eqref{eq1.2}, one can
view $\phi _r$ as a wave $e^{-ikx}$ radiating from infinity, and 
$R(k)e^{ikx}$ is the portion of the wave that is reflected while $T(k)e^{-ikx}
$ is the portion that is transmitted (see Figure \ref{fig:wave}). 
Hence the terminology of \emph{right reflection coefficient} for $R(k)$ 
and \emph{transmission coefficient} for $T(k)$. Similarly, for $\phi_l$,
$T(k)$ is the same transmission coefficient and $L(k)$ is the \emph{left
reflection coefficient}.


\section{The classical inverse scattering transform}


Since $V(x)\in L_1^{1}(\mathbb{R})$, we have that there are finitely
 many bound states $\lambda =k^2$ where $k=i\kappa $. Let $J$ denote 
the number of
bound states, and let
\begin{equation*}
\kappa _1>\kappa _{2}>\dots>\kappa _{J}>0.
\end{equation*}
Let $c_j$ denote the left norming constant at $k=i\kappa _j$; i.e.,
\begin{equation*}
c_j=\Vert \phi _1(x,i\kappa_j )T^{-1}(i\kappa_j )\Vert _{2}^{-1}.
\end{equation*}

Once we know the scattering data for the Schr\"{o}dinger operator, we can
use the IST to obtain the soliton solutions
of the KdV equation.

\centerline{
  \xymatrix{u(x,0)\ar[rrr]^{\text{direct scattering}} 
  &&&S(0)\ar[d]^{\text{time evolution}}\\
    u(x,t) &&&S(t)\ar[lll]^{\text{inverse scattering}}\\
  } }

In the Direct Scattering step, we map the initial potential $u(x,0)
$ into the scattering data
\begin{equation*}
S(0)=\{\{-\kappa _j^2,c_j\}_{j=1}^{J},R(k),k\in \mathbb{R}\}.
\end{equation*}

Next, we evolve the scattering data over time in a simple fashion:

\begin{itemize}
\item $\kappa _j(t)=\kappa _j$,

\item $c_j(t)=c_je^{4\kappa _j^{3}t}$,

\item $R(k,t)=R(k)e^{8ik^{3}t}$.
\end{itemize}
Then the scattering data becomes
\begin{equation*}
S(t)=\{\{-\kappa _j(t)^2,c_j(t)\}_{j=1}^{J},R(k,t),k\in \mathbb{R}\}.
\end{equation*}

We can reclaim the solution to the KdV using Inverse Scattering as follows:

\begin{itemize}
\item Form the Gelfand-Levitan-Marchenko (GLM) kernel:
\begin{equation*}
F(x,t)=\sum_{j=1}^{J}c_j^2(t)e^{-\kappa _jx}+\frac{1}{2\pi }
\int_{-\infty }^{\infty }e^{ikx}R(k,t)dk.
\end{equation*}

\item Solve the Gelfand-Levitan-Marchenko equation for $K(x,y,t)$, 
$y\geq x$:
\begin{equation*}
K(x,y,t)+F(x+y,t)+\int_{x}^{\infty }F(s+y,t)K(x,s,t)ds=0.
\end{equation*}

\item The solution to the KdV equation is
\begin{equation*}
u(x,t)=-2\frac{d }{d x}K(x,x,t).
\end{equation*}
\end{itemize}

Luckily, for large times $t$ we can simplify the GLM kernel.
 Slightly adjusting arguments used to prove the Riemann-Lebesgue lemma,
 we have that
\begin{equation*}
\int_{-\infty }^{\infty }e^{i\left(kx+k^3t\right)}R(k)dk\to 0\text{ as }t\to
\infty
\end{equation*}
for every $x$. 
Thus, for large times we can approximate the GLM kernel by
\begin{equation*}
F(x,t)\approx \sum_{j=1}^{J}c_j^2(t)e^{-\kappa _jx}.
\end{equation*}
Let
\begin{equation*}
C(x,0)=
\begin{pmatrix}
c_{11}(x) & c_{12}(x) & \ldots  & c_{1,J}(x) \\
c_{21}(x) & c_{22}(x) & \ldots  & c_{2,J}(x) \\
\vdots  &  & \ddots  &  \\
c_{J,1}(x) & c_{J,2}(x) & \ldots  & c_{J,J}(x)
\end{pmatrix}
\end{equation*}
where
\begin{equation*}
c_{mj}(x)=\frac{c_{m}c_j}{\kappa _{m}+\kappa _j}e^{-(\kappa _{m}+\kappa
_j)x}.
\end{equation*}
The matrix $C$ evolves in time by
\begin{equation*}
c_{mj}(x,t)=c_{mj}(x)e^{4(\kappa _{m}^{3}+\kappa _j^{3})t}.
\end{equation*}
Then for large times, our solution to the KdV is \cite{AC91}
\begin{equation*}
u(x,t)\approx -2\frac{\partial ^2}{\partial x^2}\ln [\det (I+C(x,t))]
\end{equation*}
From this, one obtains the asymptotic formula \eqref{soliton sltn}.
Notice that the large time solution $u(x,t)$ of the KdV behaves like a
finite sum of single solitons. Moreover, we no longer need to do the full
IST to solve the KdV for large times. We need only find the bound states $
-\kappa _j^2$ and norming constants $c_j$.

The coefficients $R$ and $T$ can be analytically continued to $\mathbb{C}^+$, and their poles
are precisely $i\kappa _j$. That is, all of the poles of $R$
and $T$ in $\mathbb{C}^{+}$ lie on the imaginary axis and correspond with the bound
states. Better yet, these poles are actually simple \cite{A94, MD05}.
Furthermore, if we assume that $V$ is supported on $(-\infty,0)$, then \cite{AK01}
\begin{equation}
\operatorname{Res}_{k=i\kappa _j}R(k)=ic_j^2.  \label{ResR}
\end{equation}
Consequently, in this case, the bound states and norming
constants can be obtained from knowledge of $R(k)$ for $k\in \mathbb{C}^{+}$, and we can approximate the solution of the KdV for large times from only the
knowledge of $R(k)$ for $k\in \mathbb{C}^{+}$.

\section{The scattering quantities for a block (well) potential}

Consider the case when our potential $V$ is a single nonpositive well which
is $-a^2$ on the interval $[-b,0]$ and 0 elsewhere, i.e. $V(x)=-a^2\chi
_{[-b,0]}(x)$ (see Figure \ref{fig:one_block}).

\begin{figure}[tbp]
\begin{center}
\includegraphics[width=0.7\textwidth, viewport = 140 530 500 690]{fig2}
\end{center}
\caption{The setup for a single block potential}
\label{fig:one_block}
\end{figure}

In this case, we can obtain an exact solution to the Schr\"{o}dinger
equation. Moreover, using the continuity of the solution $\phi $ and its
derivative $\phi _{x}$, we can set up a system of equations and solve for $R$
and $T$. Doing this, we obtain
\begin{equation}
R(k)=\omega ^2\frac{1-\xi }{\xi -\omega ^{4}}\,,\quad
L(k)=\omega ^2 \frac{1-\xi }{\xi -\omega ^{4}}e^{-iab(\omega -1/\omega )}\,,\quad 
T(k)=\frac{1-\omega ^{4}}{\xi -\omega ^{4}}e^{i\frac{ab}{\omega }}
\label{oneblock}
\end{equation}
where
\begin{equation*}
\omega =\frac{k}{a}+\sqrt{\big( \frac{k}{a}\big) ^2+1}\,,\quad
\xi =e^{i(\omega +1/\omega )ab}.
\end{equation*}
These formulas for $R$, $L$, and $T$ are actually meromorphic in $\mathbb{C}$ 
if we choose the branch cut along the imaginary axis between $-ia$ and $ia$. 
Using these formulas, $R$, $L$, and $T$ can be analytically continued in 
$\mathbb{C}^{+}$.
The only difficulty lies in considering the branch cut. However, we have
that $\omega (-\overline{k})=-\overline{\omega (k)}$ and 
$\xi (-\overline{k} )=\overline{\xi (k)}$. It follows that 
$R(-\overline{k})=\overline{R(k)}$
and $T(-\overline{k})=\overline{T(k)}$. For $k\in i\mathbb{R}$, 
we have that $R(k)=R(-\overline{k})=\overline{R(k)}$ and 
$T(k)=T(-\overline{k})=\overline{T(k)}$,
so $R$ and $T$ are real-valued for $k\in i\mathbb{R}$. For $k=+0+i\kappa $,
we have that $-\overline{k}=-0+i\kappa $. 
Therefore, $R(-0+i\kappa )=\overline{
R(+0+i\kappa )}$ and since $R$ is real-valued on $i\mathbb{R}$,
 $\overline{R(+0+i\kappa )}=R(+0+i\kappa )$. 
Hence, $R(-0+i\kappa )=R(+0+i\kappa )$, so $
R$ is continuous along the branch cut between $-ia$ and $ia$. 
It follows that $R$ is meromorphic in $\mathbb{C}$. Similarly, 
$T$ is meromorphic in $\mathbb{C}$ as well.

Consider the poles $i\kappa _j$ and residues $ic_j^2$ of $R$. The
poles of $R$ and $T$ satisfy $\xi =\omega ^{4}$. If we let $y_j=\frac{
\kappa _j}{a}$, then $\kappa _j$ and $c_j$ can be explicitly computed
by the following formulas:
\begin{equation}
\frac{ab}{\pi }\sqrt{1-\big( \frac{\kappa _j}{a}\big) ^2}-\frac{2}{
\pi }\arctan \frac{\kappa _j}{a\sqrt{1-\big( \frac{\kappa _j}{a}\big)
^2}}=j-1  \label{bound_oneblock}
\end{equation}
and
\begin{equation}
c_j^2=\frac{2\kappa _j\big( 1-( \frac{\kappa _j}{a})^2\big) }{2+b\kappa _j}  \label{norm_oneblock}
\end{equation}
for $j=1,\dots,\lceil \frac{ab}{\pi }\rceil $.

\section{The potential fragmentation and the scattering quantities for
potentials composed of blocks}
\label{sec:multiblocks} 

We define the scattering matrix to be
\begin{equation*}
S=
\begin{pmatrix}
T & R \\
L & T
\end{pmatrix}
\end{equation*}
The matrix $S$ is unitary; i.e., $S^{-1}=S^{\ast }$ where $S^{\ast }$ is the
conjugate transpose of $S$ \cite{A94}. This gives us a few identities,
namely \cite{AKM96, MD05}
\begin{equation}
L\overline{T}+T\overline{R}=0  \label{LTTR}
\end{equation}
for $k\in \mathbb{R}$. If we were to shift our potential to the right by $p$, then
the scattering matrix would change as follows \cite{A92}:
\begin{gather}
L(k)\quad  \to \quad L(k)e^{2ikp}  \label{shiftL} \\
T(k)\quad  \to \quad T(k)  \label{shiftT} \\
R(k)\quad  \to \quad R(k)e^{-2ikp}  \label{shiftR}
\end{gather}

Now suppose that our potential $V$ consists of $N$ nonpositive blocks. Let
 $R_n$, $L_n$, $T_n$ be the reflection and transmission coefficients on the $n$
-th block: $V_n(x)=-a_n^2$ on $[-b_n,-b_{n-1}]$ where $b_{0}=0$. Let
$R_n^0,L_n^0,T_n^0$ be the reflection and transmission
coefficients on the $n$-th block shifted to the origin: $V_n(x)=-a_n^2$
on $[-(b_n-b_{n-1}),0]$. Let $R_{1,2,\dots,n}$, $L_{1,2,\dots,n}$, $
T_{1,2,\dots,n}$ be the reflection and transmission coefficients on the first 
$n$ blocks. $R,L,T$ without subscripts or superscripts will denote the
reflection and transmission coefficients for the overall potential.

Let
\begin{equation}
\Lambda = \begin{pmatrix}
1/T & -R/T \\
L/T & 1/\overline{T}
\end{pmatrix}.  \label{transition_matrix}
\end{equation}
The fragmentation principle, or layer stripping principle as it is also
known \cite{A92, AS02, AKM96, SWG96}, says that for $k\in \mathbb{R}$
\begin{equation}
\Lambda =\Lambda _N\dots\Lambda _{2}\Lambda _1
\label{potentialfragmentation}
\end{equation}
where $\Lambda _n$ are the transition matrices
\begin{equation*}
\Lambda _n=
\begin{pmatrix}
1/T_n & -R_n/T_n \\
L_n/T_n & 1/\overline{T_n} .
\end{pmatrix}
\end{equation*}
Note that blocks with $a_n=0$ may be simply ignored since this implies
 $\Lambda _n$ is the identity matrix. Also note that for $f\in \{R,L,T\}$
with any potential and for all $k\in \mathbb{C}$, gives us that
\begin{equation}
\widetilde{f}(k)=f(-k)=\overline{f(\overline{k})}.  \label{tilbar}
\end{equation}
Using this fact, all $\widetilde{f}$ and $\overline{f}$ where 
$f\in \{R,L,T\}$ are interchangeable for $k\in \mathbb{R}$ but 
not for general complex $k$.

We can use potential fragmentation to come up with some recursive formulas.
Using \eqref{LTTR}-\eqref{tilbar}, we obtain that for $k\in \mathbb{R}$
\begin{equation}
R_{1,\dots,n+1}=-\frac{L_{1,\dots,n}}{\widetilde{R}_{1,\dots,n}}
\frac{ R_{n+1}^0e^{2ikb_n}-\widetilde{L}_{1,\dots,n}}{
1-R_{n+1}^0L_{1,\dots,n}e^{2ikb_n}}.  \label{fragR}
\end{equation}
A similar expression may be obtained for the left reflection coefficient:
\begin{equation}
L_{1,\dots,n+1}=-\frac{R_{n+1}^0}{\widetilde{R}_{n+1}^0}\frac{
L_{1,\dots,n}e^{2ikb_n}-\widetilde{R}_{n+1}^0}{
1-R_{n+1}^0L_{1,\dots,n}e^{2ikb_n}}e^{-2ikb_{n+1}}.  \label{fragL}
\end{equation}
Since $|
L_{1,\dots,n}| =| R_{1,\dots,n}| $ for $k\in \mathbb{R}$, we have
$L_{1,\dots,n}=R_{1,\dots,n}e^{-2ik\beta _n}$ for some $\beta
_n:\mathbb{R}\to \mathbb{R}$. Equations \eqref{fragR} and \eqref{fragL} 
then give us
that
\begin{equation}
R_{1,\dots,n+1}=-\frac{R_{1,\dots,n}}{\widetilde{R}_{1,\dots,n}}\frac{
R_{n+1}^0e^{2ik(b_n-\beta _n)}-\widetilde{R}_{1,\dots,n}}{
1-R_{n+1}^0R_{1,\dots,n}e^{2ik(b_n-\beta _n)}},  \label{fragR2}
\end{equation}
where $\beta _1=b_1$ and
\begin{equation}
e^{-2ik\beta _{n+1}}=\frac{R_{n+1}^0}{\widetilde{R}_{n+1}^0}\frac{
\widetilde{R}_{1,\dots,n}}{R_{1,\dots,n}}\frac{R_{1,\dots,n}e^{2ik(b_n-\beta
_n)}-\widetilde{R}_{n+1}^0}{R_{n+1}^0e^{2ik(b_n-\beta _n)}-
\widetilde{R}_{1,\dots,n}}e^{-2ikb_{n+1}}.  \label{expbetaN}
\end{equation}
Define $A_n=\frac{L_{1,\dots,n}}{R_{1,\dots,n}}e^{2ikb_n}$. Then $
A_n=e^{2ik(b_n-\beta _n)}$ for $k\in \mathbb{R}$. Equations \eqref{fragR2} and
\eqref{expbetaN} give us that
\begin{equation}
R_{1,\dots,n+1}=-\frac{R_{1,\dots,n}}{\widetilde{R}_{1,\dots,n}}\frac{
A_nR_{n+1}^0-\widetilde{R}_{1,\dots,n}}{1-A_nR_{n+1}^0R_{1,\dots,n}}
\label{recR}
\end{equation}
and
\begin{equation}
A_{n+1}=\frac{R_{n+1}^0}{\widetilde{R}_{n+1}^0}
\frac{\widetilde{R}_{1,\dots,n}}{R_{1,\dots,n}}\frac{A_nR_{1,\dots,n}
-\widetilde{R}_{n+1}^0}{A_nR_{n+1}^0-\widetilde{R}_{1,\dots,n}}  \label{recA}
\end{equation}
where $A_1=1$. Let us next define $
B_n=A_nR_{1,\dots,n}=L_{1,\dots,n}e^{2ikb_n}$. Then we get the following
recursive formula:
\begin{equation}
B_{n+1}=-\frac{R_{n+1}^0}{\widetilde{R}_{n+1}^0}
\frac{B_n-\widetilde{R}_{n+1}^0}{1-R_{n+1}^0B_n}  \label{recB}
\end{equation}
where $B_1=R_1$. Notice that $B_{n+1}$ is a M\"{o}bius transform of
$B_n$, and that the recursive formula for $B_n$ is much simpler than the
recursive formula for $R_{1,\dots,n}$. Moreover, this formula only depends on 
$B_n$, $R_{n+1}^0$, and $\widetilde{R}_{n+1}^0$. From \eqref{oneblock},
\begin{equation}
R_n^0=\frac{\omega _n^2(1-\xi _n)}{\xi _n-\omega _n^{4}}.
\label{Rn0}
\end{equation}
where $h_n=b_n-b_{n-1}$ is the width for the $n$-th block, 
\[
\omega _n(k)=\frac{k}{a_n}+\sqrt{\big(\frac{k}{a_n}\big)^2+1},
\]
 and $\xi
_n(k)=e^{-ia_nh_n(\omega _n(k)+1/\omega _n(k))}$. For $k\in \mathbb{R}$, we
have that $\overline{R_n^0}=\widetilde{R}_n^0$. By taking the
complex conjugate of \eqref{Rn0}, we obtain that for $k\in \mathbb{R}$,
\begin{equation}
\widetilde{R}_n^0=\frac{\omega _n^2(1-\xi _n)}{\xi _n\omega
_n^{4}-1}.  \label{CRn0}
\end{equation}
Since $R_n^0$ is meromorphic in $\mathbb{C}$, $\widetilde{R}_n^0$ is
meromorphic in $C$ as well (in particular, both are meromorphic in 
$\mathbb{C}^{+}$ where the poles of interest lie). Since the formula 
in \eqref{CRn0} is
meromorphic in $\mathbb{C}$, it follows that \eqref{CRn0} holds for all 
$k\in \mathbb{C}$.
Continuing inductively using equations \eqref{recR}-\eqref{recB}, 
it follows that $R_{1,\dots,n},A_n,$ and $B_n$ can be
continued to meromorphic functions in $\mathbb{C}$ (and in particular,
 $\mathbb{C}^{+}$) for all $1\leq n\leq N$.

Since $B_n=A_nR_{1,\dots,n}=L_{1,\dots,n}e^{2ikb_n}$, we have that $B_n$
has the same poles $k=i\kappa _j$ in $\mathbb{C}^{+}$ as $L_{1,..,n}$.
Consequently, $B_n$ and $R_{1,\dots,n}$ have the same poles in $\mathbb{C}^{+}$.
Since the poles $k=i\kappa _j$ in $\mathbb{C}^{+}$ of $L_{1,\dots,n}$ and $R_{1,\dots,n}
$ are simple, we have that $A_n=\frac{L_{1,\dots,n}}{R_{1,\dots,n}}e^{2ikb_n}
$ is analytic in $\mathbb{C}^{+}$ and nonzero at all $k=i\kappa _j$. It follows
from equation \eqref{ResR} then that
\begin{equation}
\operatorname{Res}_{k=i\kappa _j}B_n
 =A_n(i\kappa _j)\operatorname{Res}_{k=i\kappa
_j}R_{1,\dots,n} 
 =ic_j^2A_n(i\kappa _j) 
\end{equation}  \label{resR2}
The value of $A_n(i\kappa _j)$ can be determined via the recursive
formula \eqref{recA}. If we can determine an algorithm for determining the
residues of $B_N$, then this would effectively give us an algorithm for
calculating the (left) norming constants.

Now suppose $B_n$ has the form
\begin{equation}
B_n=-\frac{R_n^0}{\widetilde{R}_n^0}\frac{p_n}{q_n}.
\label{recB2}
\end{equation}
Applying \eqref{recB}, we get a linear system of recurrence relations for 
$p_n$ and $q_n$:
\begin{gather*}
p_{n+1}  =  -R_n^0p_n  -  \widetilde{R}_{n+1}^0\widetilde{R}_n^0 q_n\label{pqrec}
 \\
q_{n+1}  =  R_{n+1}^0R_n^0p_n  + \widetilde{R}_n^0q_n
\end{gather*}
or in matrix form
\begin{equation}
\begin{pmatrix}
p_{n+1} \\
q_{n+1}
\end{pmatrix}
=M_n
\begin{pmatrix}
p_n \\
q_n
\end{pmatrix}
=M_n\dots M_{2}M_1
\begin{pmatrix}
p_1 \\
q_1
\end{pmatrix}
\label{mateq}
\end{equation}
where
\begin{equation*}
M_{m}=
\begin{pmatrix}
-R_{m}^0 & -\widetilde{R}_{m+1}^0\widetilde{R}_{m}^0 \\
R_{m+1}^0R_{m}^0 & \widetilde{R}_{m}^0
\end{pmatrix}.
\end{equation*}

Let $N$ denote the number of blocks. If $q_N(k)=0$ but $k$ is not a pole
of $B_N$, then $p_N=0$ as well. From \eqref{mateq}, this means that
$\det (M_n)=0$ for some $1\leq n\leq N-1$ or $
\begin{pmatrix}
p_1 \\
q_1
\end{pmatrix}
=0$. Since $B_1=R_1$, from \eqref{recB2} we have that
$\frac{p_1}{q_1}=-\widetilde{R}_1$. Our choice of $p_1$ and
 $q_1$ is arbitrary, as
long as this ratio is preserved, since our resulting solution of $B_{n+1}$
is independent of our choice for $p_1$ and $q_1$. Some choices for our
initial vector may be preferable for numerical computations, but for our
purposes we will choose $
\begin{pmatrix}
p_1 \\
q_1
\end{pmatrix}
=
\begin{pmatrix}
-\widetilde{R}_1 \\
1
\end{pmatrix}
$, because it is nonzero for all $k$. Thus, if $q_N=0$ but $k$ is not a
pole of $B_N$, then $\det (M_{N-1}\dots M_{2}M_1)=0$. Equivalently, if
 $q_N=0$ and $\det (M_{N-1}\dots M_{2}M_1)\neq 0$, then $k$ is a pole of
 $R_{1,\dots,N}$.

We claim that $\det (M_{N-1}\dots M_{2}M_1)(k)=0$ for some
 $k\in \mathbb{C}^{+}$ if
and only if $k=ia_N$, or for some $1\leq n\leq N-1$ and some $0\leq j\leq
\lfloor \frac{a_nh}{\pi }\rfloor $,
\begin{equation}
k=i\sqrt{a_n^2-\big( \frac{\pi j}{h_n}\big) ^2}.  \label{detM}
\end{equation}
We have that $\det (M_{N-1}\dots M_{2}M_1)=0$ if and only if
$\det (M_n)=0 $ for some $1\leq n\leq N-1$. Moreover,
\begin{equation*}
\det (M_n)=R_n^0\widetilde{R}_n^0(R_{n+1}^0\widetilde{R}_{n+1}^0-1).
\end{equation*}
Thus, $\det (M_n)=0$ if and only if $R_n^0=0$ (equivalently 
$\widetilde{R}_n^0=0$) or $R_{n+1}^0\widetilde{R}_{n+1}^0=1$. The
second case occurs when
\begin{equation*}
\omega _{n+1}^{4}(1-\xi _{n+1})^2=(\xi _{n+1}-\omega _{n+1}^{4})(\xi
_{n+1}\omega _{n+1}^{4}-1).
\end{equation*}
After some algebra and noting that $\xi _{n+1}=e^{-ia_{n+1}h_{n+1}(\omega
_{n+1}+1/\omega _{n+1})}\neq 0$, this simplifies to $\omega _{n+1}^{4}=1$. 
A simple calculation then gives us that $\omega _{n+1}^{4}=1$ for 
$k\in \mathbb{C}^{+}$ if and only if $k=ia_{n+1}$. 
After a lengthy computation using \eqref{Rn0},
we obtain that $R_n^0(k)=0$ for $k\in \mathbb{C}^{+}$ if and only if equation
\eqref{detM} holds.

Now suppose that $q_N(k)=0$, $\det (M_{N-1}\dots M_{2}M_1)(k)\neq 0$ at $
k=i\kappa $, and that $k$ is not a pole of $\widetilde{R}_N^0$. Then $k$
is a pole of $B_N$, and consequently a pole of $R=R_{1,\dots,N}$ as well.
Consequently, $k^2$ is a bound state of the Schr\"{o}dinger equation.
Since $\det (M_{N-1}\dots M_{2}M_1)(k)\neq 0$, we have that $p_N(k)\neq 0$.
Since $k$ is not a pole of $\widetilde{R}_N^0$ and since $R_N^0$ and
$\widetilde{R}_N^0$ have the same zeros with the same multiplicity, $-
\frac{R_N^0}{\widetilde{R}_N^0}p_n\neq 0$. However, $q_N=0$ and
the poles of $B_N$ are simple, so
\begin{equation}
\operatorname{Res}_{k=i\kappa }B_N=-\frac{R_N^0}{\widetilde{R}_N^0}\frac{p_N}{
q_N'}.  \label{resB}
\end{equation}
To find $q_N'$, we can differentiate \eqref{pqrec} to acquire
\begin{equation}
\begin{pmatrix}
p_{n+1}' \\
q_{n+1}'
\end{pmatrix}
=M_n
\begin{pmatrix}
p_n' \\
q_n'
\end{pmatrix}
+M_n'
\begin{pmatrix}
p_n \\
q_n
\end{pmatrix}
.  \label{p'q'rec}
\end{equation}

Therefore, for the poles of $R$ where $\det (M_N\dots M_{2}M_1)\neq 0$ and
that are not poles of $\widetilde{R}_N^0$, the residues of $R$ can be
recovered through \eqref{resR2} and \eqref{resB}.


\section{Numerical simulations}

Tables \ref{tab:bound_block} and \ref{tab:bound_wave1} give a comparison of
the algorithms listed below for calculating bound states, while tables 
\ref{tab:norm_block} and \ref{tab:norm_wave1} give a comparison of the
algorithms listed below for calculating norming constants. The exact bound
states in table \ref{tab:bound_block} were calculated using equation
\eqref{bound_oneblock}. All calculations were performed using MATLAB, all
integrals were approzimated using the trapezoidal method, and all
differential equations were numerically integrated using the Runge-Kutta 4-5
algorithm.

There are two commonly used numerical methods for approximating the bound
states:

\begin{itemize}
\item[(1)] Matrix methods - Estimate the Schr\"{o}dinger operator $H=-\frac{
d^2}{dx^2}+V(x)$ using a finite-dimensional matrix and find the
eigenvalues of the matrix. In particular, \cite{T00} describes how this can
be done using the Fourier basis. In tables \ref{tab:bound_block} and \ref
{tab:bound_wave1}, 
a $512\times 512$ matrix is used.

\item[(2)] Shooting Method - The Shooting Method involves recursively
choosing values of $\lambda $ and \textquotedblleft
shooting\textquotedblright\ from both end points to a point $c\in \lbrack
a,b]$. Define the miss-distance function to be the Wronskian determinant
\begin{equation*}
D(\lambda )=
\begin{vmatrix}
u_{L}(c,\lambda ) & u_{R}(c,\lambda ) \\
u_{L}'(c,\lambda ) & u_{R}'(c,\lambda )
\end{vmatrix}
.
\end{equation*}
where $u_{L}$ is the interpolated function from the left endpoint and $u_{R}$
is from the right endpoint. If $\lambda $ is an eigenvalue that satisfies
the boundary value problem, then $D(\lambda )=0$. For more details, see for
example \cite{P93}.
\end{itemize}

There are of course other existing methods for approximating bound states;
see for example \cite{FV87, K07, KL81, C05}. However, we will only focus on
these two.

If one approximates the potential using finitely many blocks, then we can
use the following algorithms for estimating bound states:

\begin{itemize}
\item[(3)] Use the recursive formulas \eqref{recR} and \eqref{recA} to find
the bound states as zeros of $1/R_{1,\dots,N}$.

\item[(4)] Similarly, one can use the recursive formula \eqref{recB} to find
the bound states as zeros of $1/B_N$.

\item[(5)] Using \eqref{mateq}, the bound states can be found as zeros of $
q_N$. One must also check the values of $k$ listed in \eqref{detM} where $
\det (M_{N-1}\dots M_1)=0$.
\end{itemize}

Theoretically, algorithms (3)-(5) are very similar to one another; all of
them involve finding bound states as zeros of functions that are multiples
of each other by a well-behaved nonzero multiplier. However, computationally
these are different from one another. Algorithm (3) involves a pair of
recursive equations that must be calculated in tandem. Algorithm (4)
involves a simple single recursive equation and is the least computationally
expensive of these three. Algorithm (5) involves a recursive matrix
equation. A natural question is how do these three differ from each other
computationally in terms of accuracy and stability.

Algorithm (1) seems to be the fastest of these algorithms, followed closely
by (2). Moreover, algorithm (1) has great accuracy when the initial
potential is smooth. However, for discontinuous potentials, the Gibbs
phenomenon severely hinders the accuracy of the algorithm. All of algorithms
(2)-(5) rely on finding roots of some function, so inheritently all of these
functions have all of the problems that root finders tend to have. In
particular, for general potentials, the exact number of bound states is
unknown, hence one does not know how many roots to search for. In practice,
one could partition the positive imaginary axis and search for roots in each
interval. However, the question is still open as to how small the width of
each interval needs to be. This is further complicated by the fact that in the
case of a single block, the bound states are known to cluster towards zero as
the width of the block goes to infinity (this can be easily derived from \eqref{bound_oneblock}).
Lastly, for root finders that do not incorporate the bisection method, given a good initial
approximation of a bound state, the root finder might converge to the wrong bound state.
This leads to multiple approximations to the same bound state that appear to be different bound
states; the clustering effect makes it difficult to spot these repeats in the case noted above.
However, for the bound states that are calculated, algorithms (3)-(5) are extremely accurate.
Algorithms (3)-(5) also seem to be much slower than algorithms (1) and (2), with (5) being the
slowest and most computationally expensive.

In summary, the commonly used algorithms (1) and (2) for calculating bound
states are much faster than the other algorithms. Moreover, algorithm (1)
tends to be extremely accurate, especially when the potential is smooth.
However, although algorithms (3)-(5) are much slower, they also tend to be
very accurate, especially with discontinuous potentials.

Supposing the bound states have been calculated, tables \ref{tab:norm_block}
and \ref{tab:norm_wave1} give a comparison of some of the various algorithms
for computing (left) norming constants. First is the algorithm described in
the present paper:

\begin{itemize}
\item[(i)] The potential is approximated using finitely many blocks, and the
norming constants are calculated as residues via equations \eqref{resB} and
\eqref{resR2}.
\end{itemize}

Next we have the obvious algorithm using the definition of the left norming
constant:

\begin{itemize}
\item[(ii)] Suppose $V$ has compact support $[A,B]$. Then $\phi
(x,k)=\phi _1(x,k)/T(k)$ satisfies $\phi (x,k)=e^{ikx}$ for $x\geq B$. One
can numerically integrate the Schr\"{o}dinger equation from $B$ to $A$. Then
$c^2=\Vert \phi_1 \Vert _{2}^{-1}$, which can be numerically integrated.
\end{itemize}

The authors were also presented the following algorithms by Paul Sacks:
letting $a=1/T$ and $b=-R/T$, then $R=-\frac{b}{a}$ and the transition
matrix $\Lambda $ given in \eqref{transition_matrix} becomes
\begin{equation*}
\Lambda =
\begin{pmatrix}
a & b \\
\widetilde{b} & \widetilde{a}
\end{pmatrix}.
\end{equation*}
Moreover, $b$ is analytic everywhere in $\mathbb{C}^{+}$, and the simple
 poles of $T$ in $\mathbb{C}^{+}$ are simple zeros of $a$.
 Consequently, \eqref{ResR} gives us that
\begin{equation*}
c_j^2=i\frac{b(i\kappa _j)}{a'(i\kappa _j)}.
\end{equation*}
The derivative $a'$ with respect to $k$ can be approximated using
the central difference
\begin{equation*}
a'(k)\approx \frac{a(k+\eta /2)-a(k-\eta /2)}{\eta }.
\end{equation*}
The question then becomes how one evaluates $a(k)$ and $b(k)$. Here are two
approaches:

\begin{itemize}
\item[(iii)] The potential is approximated using a finite number of blocks,
and $a$ and $b$ are calculated using potential fragmention
\eqref{potentialfragmentation}. The transition matrices are evaluated using
equation \eqref{oneblock}.

\item[(iv)] Supposing the potential has compact support $[\alpha ,\beta ]$,
the Schr\"odinger equation can be numerically integrated from $\alpha $ to $
\beta $ with the initial conditions $\phi (\alpha ,k)=e^{-ik\alpha }$, $\phi
^{\prime -ik\alpha }$. Then $\phi (x,k)=\phi _r(x,k)/T(k)$, so for $x\geq
\beta $
\begin{equation*}
\phi (x,k)=a(k)e^{-ikx}-b(k)e^{ikx}.
\end{equation*}
Consequently, $a$ and $b$ can be retrieved from
\begin{equation*}
\begin{pmatrix}
a(k) \\
b(k)
\end{pmatrix}
=\frac{1}{2}
\begin{pmatrix}
e^{ik\beta } & \frac{ie^{ik\beta }}{k} \\
-e^{-ik\beta } & \frac{ie^{-ik\beta }}{k}
\end{pmatrix}
\begin{pmatrix}
\phi (\alpha ,k) \\
\phi '(\alpha ,k)
\end{pmatrix}.
\end{equation*}
\end{itemize}

Algorithms (ii) and (iv) seem to be the fastest of these four algorithms.
For smooth potentials, algorithm (iii) seems to be the most accurate with
the other three algorithms being approximately the same order of accuracy.
However, for discontinuous potentials, algorithm (i) seems to be the most accurate and
algorithm (iv) is the least accurate. Since algorithms (ii) and (iv) involve numerically
integrating the Schr\"odinger equation, these two algorithms should do well with smooth
potentials and horribly with discontinuous ones. Since algorithms (i) and (iii) involve
approximating the initial potential with step functions, one would expect that these
algorithms would do better for discontinuous potentials than with smooth ones. What is
surprising is that these algorithms seem to do about as well if not better than algorithms
 (ii) and (iv) even for smooth potentials.  Moreover, the accuracy of algorithms (i) and (iii)
increases when the bound states are approximated using algorithms (3)-(5). Furthermore,
as we discuss in the next section, algorithms (i) and (iii) can be revised to use higher order
spline interpolants of the initial potential, leading to even greater accuracy for smooth
potentials with very little change in computational time.

Lastly, Figures \ref{fig:plot1} and \ref{fig:plot2} compare the asymptotic
formula given in \cite{AC91} with the numerically integrated solution
obtained by using the split step Fourier method. In Figure \ref{fig:plot1},
the initial potential is smooth, giving great accuracy for the split step
Fourier method. However, in Figure \ref{fig:plot2}, the potential is
discontinuous, giving extra noise in the solution from the split step
Fourier method. Despite this, the soliton solutions closely match the
asymptotic solution.


\begin{table}[tbp]
\caption{$V(x)=-4\protect\chi _{[-4,0]}(x)$, domain chosen $[-10,10]$%
, spacial step size $h=0.01$}
\label{tab:bound_block} \scriptsize 
\begin{tabular}{rrrrrr}
Algor. & $\kappa_1$ & $\kappa_2$ & $\kappa_3$ & Rel. Error & Time
(sec) \\ \hline
Exact & 1.899448036751944 & 1.571342556813314 & 0.876610362727433 & 0 & 
0.004355000 \\ 
(1) & 1.898826427139628 & 1.568514453040000 & 0.867505110670815 & 
365 E-5 & 0.126239000 \\ 
(2) & 1.899418261950639 & 1.572105829640451 & 0.872097420881459 & 
175 E-5  & 0.505034000 \\ 
(3) & 1.899448036751942 & 1.571342556813313 & 0.876610362727439 & 
3 e-15 & 4.168762000 \\ 
(4) & 1.899448036751949 & 1.571342556813312 & 0.876610362727428 & 
3 e-15 & 5.425778000 \\ 
(5) & 1.899448036751942 & 1.571342556813315 & 0.876610362727434 & 
1 e-15 & 10.268152000
\end{tabular}
\end{table}

\begin{table}[tbp]
\caption{$V(x)=-4\protect\chi _{[-4,0]}(x)$, domain chosen $[-4,0]$,
spacial step size $h=0.01$, energy step size $\protect\eta =0.001$, exact
bound states used}
\label{tab:norm_block}\scriptsize 
\begin{tabular}{rrrrrr}
Algor.& $c_1^2$ & $c_2^2$ & $c_3^2$ & Rel. Error & Time (sec) \\ 
\hline
Exact & 0.038798932148319 & 0.145167980693995 & 0.257227284424067 & 0 & 
0.005992000 \\ 
(i) & 0.038798932148326 & 0.145167980694058 & 0.257227284424741 & 
227 E-14 & 2.008827000 \\ 
(ii) & 0.039619680931665 & 0.160080616838866  & 0.364236083957119 & 
363 E-3& 0.032151000	\\
(iii) & 0.038798937542783 & 0.145168027811526 & 0.257226712349713 & 
193 E-8  & 2.070128000 \\ 
(iv) & 0.051311576782601 & 0.109225786002665 & -0.041977058580690 & 
1.012 & 0.147137000
\end{tabular}
\end{table}

\begin{table}[tbp]
\caption{$V(x)=-2\operatorname{sech} ^2(x)$, domain chosen $[-5,5]$, spacial step
size $h=0.01$}
\label{tab:bound_wave1}\textit{{\scriptsize 
\begin{tabular}{rrrr}
Algorithm & $\kappa$ & Relative Error & Time (sec) \\ \hline
Exact & 1.000000000000000 & 0 & 0 \\ 
(1) & 1.000181385743159 & 0.000181385743159 & 0.123699000 \\ 
(2) & 1.000010661550817 & 0.000010661550817 & 0.165820000 \\ 
(3) & 0.999997769556372 & 0.000002230443628 & 8.624536000 \\ 
(4) & 0.999997769556371 & 0.000002230443629 & 8.780760000 \\ 
(5) & 0.999997769556372 & 0.000002230443628 & 14.264264000
\end{tabular}
}}
\end{table}

\begin{table}[tbp]
\caption{$V(x)=-2\operatorname{sech}^2(x)$, domain chosen $[-5,5]$, spacial step
size $h=0.01$, energy step size $\protect\eta =0.001$, exact bound state used
}
\label{tab:norm_wave1}\textit{{\scriptsize 
\begin{tabular}{rrrr}
Algorithm & $c^2$ & Relative Error & Time (sec) \\ \hline
Exact & 2.000000000000000 & 0 & 0 \\ 
(i) & 2.004086813877857 & 0.002043406938928 & 3.460512000 \\ 
(ii) & 1.996830443518782 &  0.001584778240609 & 0.023956000 \\
(iii) & 1.999683571279579 & 0.000158214360211 & 1.631856000 \\ 
(iv) & 1.993946894122799 & 0.003026552938601 & 0.088449000
\end{tabular}
}}
\end{table}

\begin{figure}[tbp]
\begin{center}
\includegraphics[width=4in]{fig3.pdf}
\end{center}
\caption{$V(x)=-10\operatorname{sech}^2(x)$, $t=0.3$}
\label{fig:plot1}
\end{figure}

\begin{figure}[tbp]
\begin{center}
\includegraphics[width=4in]{fig4.pdf} 
\end{center} 
\caption{$V(x)=-4\protect\chi _{[-4,0]}(x))$, $t=0.3$}
\label{fig:plot2}
\end{figure}

\section{Haar systems and a KdV large-time solver }

Suppose now that $V$ is finite, nonpositive, and has compact support. 
Then $V$ can be well approximated using finitely many nonpositive blocks. 
For such potentials $V$, we now summarize the algorithm for solving 
the KdV for large times:

\begin{itemize}
\item Approximate the potential $V(x)$ using $N$ nonpositive blocks

\item Bound states are found as zeros of $1/R_{1,\dots,N}$ with initial
estimates, for example, derived from a spectral matrix estimate of the 
Sch\"{o}dinger operator

\item The norming constants are calculated as residues of $B_N$ at the
bound states using the previously described recursive formulas

\item The solution to the KdV is obtained from the formula \eqref{soliton sltn}.
\end{itemize}

There are a number of possible improvements to this algorithm. For example,
the number of bound states is known for a single block, so the results in
\cite{AKM98} could possibly be implemented to obtain an accurate estimate 
for the number of bound states for the potential. As another example, 
instead of piecewise-constant functions, one could instead use higher order spline
interpolants of the potential. All of the recursive formulas in section 
\ref{sec:multiblocks} were derived from potential fragmentation, which holds for
arbitrary potentials; the only things that would change would be the formula
for $R_n^0$, the initial values in the recursive formulas, and the
values for $k$ in \eqref{detM}. For example, in the case of piecewise-linear
spline interpolants, the formula for $R_n^0$ would involve the Airy
functions.

Another possible route for improvement would be the use of Haar wavelets
 to obtain a step-like approximation. We will only consider Haar wavelets 
in the current paper. For a great exposition on Haar and other wavelets, 
see \cite{O06}. Consider the \emph {scaling function}
\begin{equation*}
\varphi(x) = \varphi_0(x)
 = \begin{cases} 1 & \text{if } 0 < x \leq 1,	\\
0 & \text{otherwise},
\end{cases}
\end{equation*}
and the \emph {mother wavelet}
\begin{equation*}
	w(x) = \begin{cases} 1 & \text{if } 0 < x \leq 1/2,	\\
 -1 & \text{if } 1/2 < x \leq 1,	\\
 0 & \text{otherwise}.
\end{cases}
\end{equation*}
We form the Haar wavelets as follows: let
\begin{equation*}
	w_{j,0}(x) = w(2^j x).
\end{equation*}
Then $w_{j,0}$ has support $[0,2^{-j}]$. Next, we translate $w_{j,0}$ 
so as to fill up the entire interval $[0,1]$ with $2^j$ subintervals 
of length $2^{-j}$:
\begin{equation*}
	w_{j,k}(x) = \varphi_{2^j+k} = w_{j,0}(x-k) = w(2^j(x-k)),	\quad
 k = 0,1,\dots,2^j-1.
\end{equation*}
Then $w_{j,k}$ has support $[2^{-j}k,2^{-j}(k+1)]$. 
The collection of \emph {Haar wavelets}
\begin{equation*}
	\mathcal{H}_{2^n} = \{ \varphi_m  :  0 \leq m \leq 2^n-1 \}
\end{equation*}
forms an orthogonal system with respect to the $L^2$ norm of dimension $2^n$; 
the collection $\mathcal{H}_\infty$ forms a complete orthogonal system 
for $L^2([0,1])$. For $\mathcal{H}_{2^n}$, let $\boldsymbol{\varphi}_r$ 
denote the vector in $\mathbb{R}^{2^n}$ corresponding to $\varphi_r$; i.e., 
the entries of $\boldsymbol{\varphi}_r$ are the function values of 
$\varphi_r$ on the $2^n$ intervals.

By translating and scaling, suppose without loss of generality that $V$ 
has compact support $[0,1]$. Since $V$ is finite, we have that 
$V \in L^2([0,1])$, so $V$ can be expressed in terms of the Haar basis:
\begin{equation*}
		V = \sum_{r=0}^\infty c_r \varphi_r
\end{equation*}
where
\begin{equation*}
	c_r = \frac{\left\langle V, \varphi_r \right\rangle_2}{\|\varphi_r\|_2}.
\end{equation*}

Let $V_0$ denote the piecewise-constant approximation of $V$ on the $2^n$ 
intervals mentioned above, and let $\mathbf{V}$ denote the corresponding 
column vector in $\mathbb{R}^{2^n}$. Then $V_0$ can be represented as a 
linear combination of the Haar wavelets in $\mathcal{H}_{2^n}$:
\begin{equation*}
	V_0 = \sum_{r=0}^{2^n-1} c_r \varphi_r
\end{equation*}
where the coefficients $c_r$ are as described above. Letting $\mathbf{c}$ 
denote the column vector of coefficients $c_r$, the \emph {discrete
 wavelet transform} (DWT) is the map $H_{2^n} : \mathbf{V} \mapsto \mathbf{c}$; 
that is, $H_{2^n}$ is a change of basis from the standard basis to the Haar basis. Letting $W_{2^n}$ denote the matrix whose $r$-th column is $\boldsymbol{\varphi}_r$, we have that
\begin{equation*}
	\mathbf{V} = W_{2^n} \mathbf{c},
\end{equation*}
so
\begin{equation*}
	\mathbf{c} = W_{2^n}^{-1} \mathbf{V},
\end{equation*}
implying that $H_{2^n} = W_{2^n}^{-1}$. (Note: often, the columns are 
normalized so that $W_{2^n}$ is an orthogonal matrix. In this case, 
$H_{2^n} = W_{2^n}^*$ where $*$ denotes the transpose).

The Discrete Wavelet Transform is analogous to the Fast Fourier 
Transform (FFT), which expresses $\mathbf{V}$ in the orthogonal basis 
corresponding to the Fourier basis $\{ e^{i2^n x} : -2^{n-1} < r \leq 2^n \}$ 
in $L^2([-\pi,\pi])$. However, the Fourier basis is not localized, 
unlike the Haar basis, so the Fourier basis has difficulty capturing 
data concentrated in a relatively small region. The Fourier basis tends 
to accurately approximate smoother functions, while exhibiting the so 
called \emph {Gibbs phenomenon} at discontinuities. On the other hand, 
the Haar basis tends to accurately approximate discontinuous functions, 
while only slowly converging to smoother functions.

In the context of solving the KdV, Haar wavelets may possibly be implemented 
in a couple ways. One approach would be to approximate the potential using 
Haar wavelets since it generally gives more accurate piecewise-constant 
interpolants than, say the midpoint rule. Then the interpolating potential 
would be changed to the standard basis and used in our algorithm.


\subsection{Acknowledgements}
This work was done as part of the REU program run by the third author 
in the summer of 2009, and was supported by NSF grants DMS 0707476 
and DMS 1009673. The government support is highly appreciated.

We are grateful to the other participants who have also contributed 
to this project: Lyman Gillispie and Sigourney Walker. 
We would particularly like to thank Paul Sacks for providing us 
algorithms (iii) and (iv) for calculating norming constants.
 We are also grateful to Constantine Khroulev and Anton Kulchitsky 
for useful consultations and discussions. And last but not least 
we would like to thank the anonymous referee for bringing our attention 
to \cite{AMS09,Mee2007,Parratt54}, and numerous valuable comments.


\begin{thebibliography}{10}

\bibitem{AC91} Ablowitz, Mark J.; Clarkson, Peter A.;
\newblock {\em Solitons, nonlinear evolution equations and inverse scattering},
  volume 149 of {\em London Mathematical Society Lecture Note Series}.
\newblock Cambridge University Press, Cambridge, UK.

\bibitem{AK01} Aktosun, Tuncay; Klaus, Martin;
\newblock Inverse theory: Problem on the line.
\newblock pages 770--785.
\newblock Chapter 2.2.4.

\bibitem{AKM96} Aktosun, Tuncay; Klaus, Martin; van der Mee, Cornelis;
\newblock Factorization of scattering matrices due to partitioning of
  potentials in one-dimensional schr\"{o}dinger-type equations.
\newblock {\em J. Math. Phys.}, 37(12):5897--5915.

\bibitem{AKM98} Aktosun, Tuncay; Klaus, Martin; van der Mee, Cornelis
\newblock On the number of bound states for the one-dimensional schr\"{o}dinger
  equation.
\newblock {\em J. Math. Phys.}, 39(9):4249--4256.

\bibitem{AS02} Aktosun, Tuncay; Sacks, Paul E.;
\newblock Potential splitting and numerical solution of the inverse scattering
  problem on the line.
\newblock {\em Math. Methods Appl. Sci.}, 25(4):347--355.

\bibitem{A94}  Aktosun, Tuncay;
\newblock Bound states and inverse scattering for the {S}chr\"{o}dinger
  equation in one dimension.
\newblock {\em J. Math. Phys.}, 35(12):6231--6236.

\bibitem{A92} Aktosun, Tuncay.
\newblock A factorization of the scattering matrix for the {S}chr{\"{o}}dinger
  equation and for the wave equation in one dimension.
\newblock {\em J. Math. Phys.}, 33(11):3865--3869.

\bibitem{AMS09} Aric\`{o}, Antonio; van der Mee, Cornelis; Seatzu, Sebastiano;
\newblock Structured matrix solution of the nonlinear {S}chr\"{o}dinger
  equation by the inverse scattering transform.
\newblock {\em Electron. J. Diff. Eqns}, 2009(15):1--21.

\bibitem{BV05} Belashov, Vasily Y.; Vladimirov, Sergey V.;
\newblock {\em Solitary waves in dispersive complex media}, volume 149 of {\em
  Solid-State Sciences}.
\newblock Springer, Springer-Verlag Berlin.

\bibitem{C05} Chanane, Bilal;
\newblock Computation of the eigenvalues of {S}turm-{L}iouville problems with
  parameter dependent boundary conditions using the regularized sampling
  method.
\newblock {\em Math. Comp.}, 74(252):1793--1801.

\bibitem{Deift79} Deift, Percy; Trubowitz, Eugene;
\newblock Inverse scattering on the line.
\newblock {\em Comm. Pure Appl. Math.}, 32(2):121--251.

\bibitem{FV87} Fack, Veerle; Vanden Berghe, Guido;
\newblock (Extended) {N}umerov method for computing eigenvalues of specific
  {S}chr\"odinger equations.
\newblock {\em J. Phys. A.}, 20(13):4153--4160.

\bibitem{GrTe2009} Grunert, Katrin; Teschl, Gerald;
\newblock Long-time asymptotics for the {K}orteweg-de {V}ries equation via
  nonlinear steepest descent.
\newblock {\em Math. Phys. Anal. Geom.}, 12(3):287--324.

\bibitem{HKRT11} Holden, Helge; Karlsen, Kenneth H.;
 Risebro, Nils H.; Tao, Terence;
\newblock Operator splitting for the kdv equation.
\newblock {\em Math. Comp.}, 80(274):821--846.

\bibitem{Synolakis1998} K{\^a}no{\u{g}}lu, Utku; Synolakis, Costas E.;
\newblock Long wave runup on piecewise linear topographies.
\newblock {\em J. Fluid Mech.}, 374:1--28, 1998.

\bibitem{K07} Katatbeh, Qutaibeh D.;
\newblock Spectral bisection algorithm for solving schr\"odinger equation using
  upper and lower solutions.
\newblock {\em Electron. J. Diff. Eqns}, 2007(129):11 pp.

\bibitem{KL81} Korsch, Hans J.; Laurent, Helmut;
\newblock Milne's differential equation and numerical solutions of the
  schr\"{o}dinger equation i. bound-state energies for single- and
  double-minimum potentials.
\newblock {\em J. Phys. B: At. Mol. Phys.}, 14(22):4213--4230.

\bibitem{Mee2007} van der Mee, Cornelis; Seatzu, Sebastiano;Theis, Daniela
\newblock Structured matrix algorithms for inverse scattering on the line.
\newblock {\em Calcolo}, 44(2):59--87, 2007.

\bibitem{MD05} Munteanu, Ligia; Donescu, Stefania;
\newblock {\em Introduction to soliton theory: applications to mechanics},
  volume 143 of {\em Fundamental Theories of Physics}.
\newblock Kluwer Academic Publishers, Dordrecht, Netherlands.

\bibitem{O06} Olver, Peter J.;
\newblock Chapter 13: Fourier analysis.
\newblock Lecture notes and book preprints, available at
http://www.math.umn.edu/~olver/am\_/fa.pdf

\bibitem{Parratt54} Parratt, Lyman G.;
\newblock Surface studies of solids by total reflection of {X}-rays.
\newblock {\em Phys. Rev.}, 95(2):359--369, 1954.

\bibitem{P93} Pryce, John D.;
\newblock {\em Numerical solution of Sturm-Liouville problems}.
\newblock Oxford Science Publications. Oxford University Press, Oxford, UK.

\bibitem{SWG96} Sylvester, John; Winebrenner, Dale; Gylys-Colwell, Fred;
\newblock Layer stripping for the {H}elmholtz equation.
\newblock {\em J. Appl. Math.}, 56(3):736--754.

\bibitem{T00} Trefethen, Lloyd N.
\newblock {\em Spectral Methods in MATLAB}.
\newblock SIAM, Philadelphia, PA.


\end{thebibliography}



\end{document}

