\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
2014 Madrid Conference on Applied Mathematics in honor of Alfonso Casal,\\
\emph{Electronic Journal of Differential Equations},
Conference 22 (2015),  pp. 117--137.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2015 Texas State University.}
\vspace{9mm}}

\begin{document} \setcounter{page}{117}
\title[\hfilneg EJDE-2015/Conf/22 \hfil Transparent boundary conditions]
{Transparent boundary conditions for the wave equation in
one dimension and for a \\ Dirac-like equation}

\author[M. P. Velasco, D. Usero, S. Jim\'enez, L. V\'azquez \hfil EJDE-2015/conf/22 \hfilneg]
{M. Pilar Velasco, David Usero, Salvador Jim\'enez, Luis V\'azquez}

\address{M. Pilar Velasco \newline
\'Area de Matem\'aticas, Estad\'istica e Investigaci\'on operativa\\
 Centro Universitario de la Defensa, Academia General Militar,
50090 Zaragoza, Spain}
\email{velascom@unizar.es}

\address{David Usero \newline
 Secci\'on Departamental de Matem\'atica Aplicada,
Facultad de  CC. Qu\'imicas\\
 Universidad Complutense de Madrid, 28040 Madrid, Spain}
\email{umdavid@mat.ucm.es}

\address{Salvador Jim\'enez \newline
Departamento de Matem\'atica Aplicada a las TT. II., E.T.S.I.
Telecomunicaci\'on\\
Universidad Polit\'ecnica de Madrid, 28040 Madrid, Spain}
\email{s.jimenez@upm.es}

\address{Luis V\'azquez \newline
Departamento de Matem\'atica Aplicada, Facultad de Inform\'atica\\
 Universidad Complutense de Madrid, 28040 Madrid, Spain}
\email{lvazquez@fdi.ucm.es}


\thanks{Published November 20, 2015.}
\subjclass[2010]{35L05, 65M06}
\keywords{Wave equation; transparent boundary condition;
 Dirac-type equation}

\begin{abstract}
 We present a method to achieve transparent boundary conditions
 for the one-dimensional wave equation, and show its numerical implementation
 using a finite-difference method. We also present an alternative method for
 building the same transparent boundary conditions using a Dirac-like equation
 and a Spinor-like formalism. Finally, we extend our method to the 
 three-dimensional wave equation with radial symmetry.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\allowdisplaybreaks

\section{Introduction}

Frequently in the study of the partial differential equations that model
real phenomena it is necessary to fix artificial boundary conditions for
limiting the area of study and obtaining unique and well-posed solutions.
However these artificial conditions can affect to the solutions of the
equations and cause non-desired effects. For example, in the particular case
of the study of traveling waves by the wave equation the presence of artificial
boundary conditions produces the appearance of reflected waves related to
the transmitted wave and these reflected waves can spoil the perception of
the phenomenon.

 For solving this problem and avoiding the reflection effect caused by the
artificial boundary conditions, in this work we propose transparent boundary
conditions for the wave equation in one dimension. The purpose of these
transparent boundary conditions is the disappearance of the reflected wave
and to achieve that the whole traveling wave is transmitted.

 In Section 2 we analyze the wave equation and describe the movement
of the traveling wave by supposing that the whole traveling wave is
transmitted without reflection, at continuous and discrete level.
Considering the data of the previous section, in Section 3, we look
for what conditions should verify the traveling wave in the limit of
the boundary conditions for a complete transmission, at continuous and
discrete level again, and we obtain numerical simulations that check the
efficiency of our study. An alternative theoretical support for the construction
of transparent boundary conditions by using Dirac-type equations and a
spinor-like formalism is shown in Section 4.
Finally, in Section 5, we extend the problem to a particular case in three
dimensions and we use the obtained results for achieving transparent
boundary conditions in the case of the three-dimensional wave equation
with radial symmetry.

Note that we use explicit schemes for the numerical implementations since 
they are quite simple and it is possible with them to preserve at local 
level the dispersion relations. Implicit schemes could be used as long as 
they also preserve this and all the necessary values lay inside the 
appropriate region.

 The following step in this area that will be analyzed in future works
is to extend these results for the cases of the wave equation in two
 and three dimensions.


\section{Transparent boundary conditions for
the wave equation in one-dimension}

The solutions to the wave equation possess the property of superposition of
traveling waves. We shall use this to build transparent boundary conditions.

\subsection*{Continuous level}

We consider the classical initial value problem for the wave equation in the
whole (one-dimensional) space
\begin{equation}
\begin{gathered}
u_{tt} -c^2 u_{xx} = 0 \,,\quad
t\ge 0,\;  x\in(-\infty,+\infty)\,,\\
u(0,x) = f(x) \,,\\
u_t(0,x) = g(x) \,,
\end{gathered} \label{sist1}
\end{equation}
where $f$ and $g$ are suitable functions.

According to the D'Alembert formula, the solution to \eqref{sist1} is
\begin{equation}
u(t,x)=\frac{1}{2}\left[f(x-ct) +f(x+ct)\right]
+\frac{1}{2c}\int_{x-ct}^{x+ct}g(s)\,ds\,.
\end{equation}
Let us suppose that $G$ exists, a primitive function for $g$, and we have:
\begin{equation}
\begin{aligned}
u(t,x) &= \frac{1}{2}\left[f(x+ct) +f(x-ct) \right]
+\frac{1}{2c}\left[G(x+ct)-G(x-ct) \right]
 \\
&= \frac{1}{2}f(x+ct)+\frac{1}{2c}G(x+ct)
+ \frac{1}{2}f(x-ct)-\frac{1}{2c}G(x-ct)\,.
\end{aligned}
\end{equation}
This corresponds to the superposition of two traveling waves $v(t,x)$ and
 $w(t,x)$ given by:
\begin{gather}
v(t,x) = \frac{1}{2}f(x+ct)+\frac{1}{2c}G(x+ct)\,,\\
w(t,x) = \frac{1}{2}f(x-ct)-\frac{1}{2c}G(x-ct)\,,
\end{gather}
such that
\begin{equation}
u(t,x)=v(t,x)+w(t,x)\,.
\end{equation}
Each one represents a given profile moving, undisturbed, in one specific
direction, $v$ to the left and $w$ to the right, with speed $c$. For instance:
\begin{equation}
\begin{aligned}
v(t+\tau,x) &=
\frac{1}{2}f\Big((x+c(t+\tau)\Big) + \frac{1}{2c}G\Big((x+c(t+\tau)\Big)\\
&= \frac{1}{2}f\Big((x+c\tau)+ct\Big) + \frac{1}{2c}G\Big((x+c\tau)+ct\Big) \\
&=  v(t,x+c\tau),
\end{aligned}
\end{equation}
which indicates that at a given time the profile is the same but shifted in
position. If we refer this to the initial profile, we have:
\begin{gather}
v(t,x) = v(0,x+ct), \\
w(t,x) = w(0,x-ct).
\end{gather}

Alternatively, we could have use Fourier techniques to split the
initial data of \eqref{sist1} into the components traveling to the left and to
the right. Let be $\hat f(\kappa)$ and $\hat g(\kappa)$ the Fourier transform
over the whole space of, respectively, $f$ and $g$. We define:
\begin{equation}
\begin{gathered}
v_0(x)=\frac{1}{\sqrt{2\pi}}\int_0^{\infty}
\hat f(-\kappa) e^{-i\kappa x} d\kappa\,,\\
v'_0(x)=\frac{1}{\sqrt{2\pi}}\int_0^{\infty}
\hat g(-\kappa) e^{-i\kappa x} d\kappa\,,
\end{gathered}
\end{equation}
and
\begin{equation}
\begin{gathered}
w_0(x)=\frac{1}{\sqrt{2\pi}}\int_0^{\infty}
\hat f(\kappa) e^{i\kappa x} d\kappa\,,\\
w'_0(x)=\frac{1}{\sqrt{2\pi}}\int_0^{\infty}
\hat g(\kappa) e^{i\kappa x} d\kappa\,.
\end{gathered}
\end{equation}

Let us consider a special case for \eqref{sist1} where both functions $f$
and $G$ (and, accordingly, $g$) have compact support, and that there exists a
value $L>0$ such that
\begin{equation}
\forall x,\quad  |x|>L \to
\begin{cases}
f(x)=0\,,\\ G(x)=0\,,
\end{cases}
\end{equation}
which implies that
\begin{equation}
u(t,-L)=v(t,-L)\,,\quad u(t,L)=w(t,L)\quad \forall t\ge 0\,.
\end{equation}
This means that the region $(-\infty,-L)$ will only ``see'' a perturbation
given by $v$, while the region $(L,\infty)$ will only ``see'' a perturbation
given by $w$, and that only after a certain time. Besides, the central region
$(-L,L)$ will become undisturbed ($u$ and $u_t$ being zero for all its
points) after some
time, since both profiles will exit by its left side or by its right side.

For this central region we can substitute \eqref{sist1} by an equivalent
problem:
\begin{equation}
\begin{gathered}
\varphi_{tt} -c^2 \varphi_{xx} = 0 \,,\quad t\ge 0,\; x\in[-L,L]\,,\\
\varphi(0,x) = f(x)\,,\\
\varphi_t(0,x) = g(x)\,,\\
\varphi(t,-L) = v(0,ct-L)\,,\\
\varphi(t,L) = w(0,L-ct)\,,\\
\end{gathered}
\label{sist2}
\end{equation}
since we have
\begin{equation}
u(t,x) = \varphi(t,x)\,,\quad
\forall t\ge 0,\; \forall x\in[-L,L]\,.
\end{equation}
We may say that the boundary conditions on $\varphi$, both at $L$ and at $-L$,
are ``transparent''. Any other kind of boundary conditions would induce a
solution $\varphi$ different from $u$ at some location inside $[-L,L]$ after
some given time.

\subsection*{Discrete level}

In practice, we cannot simulate \eqref{sist1} by finite differences due to the
infinite range of $x$-values, but we can simulate a problem such as
\eqref{sist2}: we build a discrete time-space mesh given by values
$t_n=n\Delta t$, $x_l=l\Delta x$, with $n\in\mathbb{N}$ and $l\in\mathbb{Z}$,
 and compute the values
$\varphi(t_n,x_l)$, that we denote by $\varphi^n_l$.

The standard discretized equation is given by the centered, second order
expression or scheme:
\begin{equation}
\frac{\varphi^{n+1}_{l}-2\varphi^{n}_{l}+\varphi^{n-1}_{l}}{\Delta t^2}
-c^2\frac{\varphi^{n}_{l+1}-2\varphi^{n}_{l}+\varphi^{n}_{l-1}}{\Delta x^2}=0\,,
\end{equation}
that we may express as
\begin{equation}
\varphi^{n+1}_{l}=2\varphi^{n}_{l}-\varphi^{n-1}_{l}
-\gamma^2\left(\varphi^{n}_{l+1}-2\varphi^{n}_{l}+\varphi^{n}_{l-1}\right),
\label{scheme}
\end{equation}
with
\begin{equation}
\gamma = \frac {c\Delta t}{\Delta x}\,.
\end{equation}
The local truncation error for $\varphi^{n+1}_{l}$ in \eqref{scheme} is:
\begin{equation}
-\frac{\gamma^2}{12}\,(1-\gamma^2)\Delta x^4\, \varphi_{xxxx}(\tilde t,\tilde
x)\,,
\end{equation}
for some intermediate values $\tilde t$, and $\tilde x$, that depend on $n$
and $l$. This expression can be obtained, for instance, expanding in
Taylor series around $\varphi^n_l$ the different terms involved and using
the mean value theorem. We see that in order to compute a given value
$\varphi^{n+1}_l$ we need some previous (in time)
neighbouring values. We sketch this dependence with the diagram represented
 in Figure \ref{fig1}.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.3\textwidth]{fig1} % molecula
\end{center}
\caption{Values needed to compute $\varphi^{n+1}_l$.}
\label{fig1}
\end{figure}

Two sets of values, for $n=0$ and $n=1$, must be known to start the
computations. They are obtained from the initial conditions. In general this
can be done assuming that the solution satisfies the equation at the initial
time (which is not required in theory) and performing a Taylor series expansion:
for all $l$,
\begin{equation}
\begin{aligned}
\varphi^0_l&=f(x_l)\,,\\
 \varphi^1_l&=\varphi^0_l+\Delta t\, g(x_l) + c\,\frac{\Delta
t^2}{2}f''(x_l)
+c^2\frac{\Delta t^3}{2}g''(x_l)+O(\Delta t^4)\,.
\end{aligned}
\end{equation}
Truncating the $O(\Delta t^4)$ term, we obtain an approximation to the initial
data of the same order in $\Delta t$ as the truncation error of
\eqref{scheme}. On the other hand, in the case where both $v$ and $w$
are known, we may choose the exact values: for all $l$,
\begin{equation}
\begin{aligned}
\varphi^0_l&=f(x_l)\,,\\
 \varphi^1_l&=u(\Delta t,x_l)=v(0,x_l+c\Delta t)+w(0,x_l-c\Delta t)
\,,
\end{aligned}
\end{equation}
The boundary conditions are: for all $n$,
\begin{equation}
\begin{aligned}
\varphi^n_{-\ell} &= v(0,ct_n-L)\,,\\
\varphi^n_{\ell} &= w(0,L-ct_n)\,,
\end{aligned}
\end{equation}
where $t_n=n\Delta t$ and it is necessary to choose $\Delta x$ in such a way that
$\ell\equiv L/\Delta x$ is a natural number.

If we consider, for instance, the case $l=-\ell+1$ (that is, the leftmost
position where the solution is to be computed) we see from Figure \ref{fig1}
that the boundary value $\varphi^n_{-\ell}$ is necessary to compute
$\varphi^{n+1}_{-\ell+1}$. It is clear that it is not possible to compute these
boundary values from the numerical scheme \eqref{scheme} and that we actually need
to provide them by an independent mechanism.

For stability reasons it is convenient to choose $c\Delta t$ and $\Delta x$
fulfilling certain relation, and the best choice corresponds to $\gamma=1$
(or, equivalently, rescale the equation to
have $c=1$ and choose $\Delta t=\Delta x$), since in this case the numerical
solution is exact, in the sense that $\varphi^n_l$ is computed with no local
truncation error (provided the initial conditions are exact), and the only
possible errors arise from the numerical round-off in the computations.
\label{gamaesuno}


\section{A different way to build transparent conditions}

\subsection{Continuous level}\label{clasico}
From the previous analysis, we see that building exact transparent boundary
conditions amounts to determine the values of $v$ and $w$. We can also
assume (we have seen it in the discrete case but it is clear that it should
also be the same in the continuous case) that the boundary conditions cannot be
deduced from the evolution equation, short to solving it.

But we may try a different approach. Instead of building $v$ and $w$ from the
initial data, we  try to identify them as the solution to some specific
equations. It is easy to check that $v$ and $w$ satisfy the equations:
\begin{gather}
v_t-cv_x=0\,, \label{eqleft}\\
w_t+cw_x=0\,, \label{eqright}
\end{gather}
and thus problem \eqref{sist1} can be stated equivalently as
\begin{equation}
\begin{gathered}
u(t,x)=v(t,x)+w(t,x),\quad
t\ge 0,\;  x\in(-\infty,+\infty)\,,\\
\begin{cases}
v_t-cv_x=0\,,\\
v(0,x) = \frac{1}{2}f(x)+\frac{1}{2c}G(x)\,,
\end{cases}
\\
\begin{cases}
w_t+cw_x=0\,,\\
w(0,x) = \frac{1}{2}f(x)-\frac{1}{2c}G(x) \,.
\end{cases}
\end{gathered}
\label{sist3}
\end{equation}
Since both \eqref{eqleft} and \eqref{eqright} are first order partial
differential equations, only an initial condition is necessary, and in order to
have full equivalence with \eqref{sist1} we have to impose that both $v$
and $w$ are sufficiently regular and satisfy their corresponding equation,
 either \eqref{eqleft} or \eqref{eqright}, at the initial time.

The interesting thing about \eqref{sist3} is that, if we build the
corresponding problem for initial data with compact support, in the same way as
we did for \eqref{sist1} with \eqref{sist2} we have:
\begin{gather}
\varphi(t,x)=\phi(t,x)+\psi(t,x), \quad
t\ge 0,\; x\in[-L,L]\,, \nonumber \\
\begin{cases}
\phi_t-c\phi_x=0\,,\\
\phi(0,x) = \frac{1}{2}f(x)+\frac{1}{2c}G(x)\,,\\
\phi(t,-L) = \phi(0,ct-L)\,,\\
 \phi(t,L) =0\,,
\end{cases}
\nonumber \\
\begin{cases}
\psi_t+c\psi_x=0\,,\\
\psi(0,x) = \frac{1}{2}f(x)-\frac{1}{2c}G(x)\,,\\
\psi(t,L) = \psi(0,L-ct)\,,\\
 \psi(t,-L) =0 \,,
\end{cases} \label{sist4}
\end{gather}
with
\begin{equation}
\phi(t,x)=v(t,x)\,,\quad \psi(t,x)=w(t,x)\quad \forall x\in[-L,L]\,.
\end{equation}
But, although we have two boundary conditions, in fact only one 
is necessary and
\eqref{sist4} is equivalent to
\begin{equation}
\begin{gathered}
\varphi(t,x)=\phi(t,x)+\psi(t,x),\quad
t\ge 0,\; x\in[-L,L]\,,\\
\begin{cases}
\phi_t-c\phi_x=0\,,\\
\phi(0,x) = \frac{1}{2}f(x)+\frac{1}{2c}G(x)\,,\\
 \phi(t,L) =0\,,
\end{cases}
\\
\begin{cases}
\psi_t+c\psi_x=0\,,\\
\psi(0,x) = \frac{1}{2}f(x)-\frac{1}{2c}G(x)\,,\\
 \psi(t,-L) =0 \,,
\end{cases}
\end{gathered}
\label{sist5}
\end{equation}
where both values $\phi(t,-L)$ and $\psi(t,L)$ are provided for all times by
the solutions. Thus, we see that this formulation enables us to build the
appropriate boundary conditions to our original problem \eqref{sist1}. Also, if
the values of both $v$ and $w$ (or, equivalently, $\phi$ and $\psi$) are known
at the initial time, all references to both $f$ or $G$ can be suppressed in this
new problem.


\subsection{Discrete level}

At discrete level the new first order equations \eqref{eqleft} and
\eqref{eqright} may be represented in different ways, either explicitly or
implicitly, and some authors have considered different approaches \cite{eh,ii}.
If we want to have a scheme that gives a similar accuracy as
\eqref{scheme}, we may choose a representation of the time derivative given by
the second order centered difference:
\begin{equation}
\frac{\phi^{n+1}_{l}-\phi^{n-1}_{l}}{2\Delta t}\,.
\end{equation}
In the case of the spatial derivative, if we also use the
centered second order representation given by
\begin{equation}
\frac{\phi^{n}_{l+1}-\phi^{n}_{l-1}}{2\Delta x}\,,
\end{equation}
we end with the ``leap-frog'' numerical scheme, which is known to be unstable,
a property that rends it useless for our needs.

We start looking for a stable scheme. Let us consider the, so called,
downwind method:
\begin{gather}
\frac{\phi^{n+1}_{l}-\phi^{n}_{l}}{\Delta t} - c
\frac{\phi^{n}_{l+1}-\phi^{n}_{l}}{\Delta x}=0
\\
\iff \phi^{n+1}_{l} = (1- \gamma) \phi^{n}_{l} + \gamma
\phi^{n}_{l+1}\,.
\end{gather}
It is stable provided $0<\gamma\le 1$.
The inconvenient is that it is only first-order
accurate, instead of second-order as \eqref{scheme}.
But we see that only values at $l$ and
to its right are needed. We represent the corresponding grid in Figure \ref{fig2}.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.3\textwidth]{fig2} % molecula2
\end{center}
\caption{Values needed to compute $\phi^{n+1}_l$.}
\label{fig2}
\end{figure}

The standard second-order stable (for $\gamma\le 1$) numerical scheme is
the Lax-Wendroff method, given by
\begin{equation}
\phi^{n+1}_l = \phi^n_l + \frac{\gamma}{2} \left(\phi^n_{l+1}-\phi^n_{l-1}\right)
+ \frac{\gamma^2}{2} \left(\phi^n_{l+1}-2\phi^n_{l}+\phi^n_{l-1}\right),
\end{equation}
but it has the inconvenience of using values on the left side of $l$.
In fact, most of the good properties of this scheme
(for instance, it is conservative) comes from the fact that it has a
symmetric disposition of the points it uses.

We may try to build a second-order scheme that only uses points to the right. We
have, for instance
\begin{equation}
\phi^{n+1}_l = \frac{2-3\gamma+\gamma^2}{2}\, \phi^n_l
+ \gamma(2-\gamma) \phi^n_{l+1}
+ \frac{\gamma}{2}(1-\gamma)\phi^n_{l+2}\,,
\end{equation}
that has a truncation error
\begin{equation}
-\gamma \frac{2-3\gamma+\gamma^2}{6}\,
\Delta x^3\, \phi_{xxx}(\tilde t,\tilde x)\,.
\end{equation}
Although this looks fine, we have to understand that \eqref{scheme}
being of second-order implies that the truncation error in
$\varphi^{n+1}_l$ is ${\mathcal O}(\Delta x^4)$, while
it is only ${\mathcal O}(\Delta x^3)$ for $\phi^{n+1}_l$.
This is due to the fact of the continuous equation being of first order,
and it means that we need not a second-order
scheme but a third-order one to obtain the same kind of precision.
For instance:
\begin{equation} \label{scheme1}
\begin{aligned}
\phi^{n+1}_l
&=  \frac{6-11\gamma+6\gamma^2-\gamma^3}{6}\, \phi^n_l
+ \frac{6\gamma-5\gamma^2+\gamma^3}{2}\, \phi^n_{l+1}
 \\
&\quad + \frac{4\gamma^2-3\gamma-\gamma^3}{2}\, \phi^n_{l+2}
+ \frac{2\gamma-3\gamma^2+\gamma^3}{6}\, \phi^n_{l+3}\,,
\end{aligned}
\end{equation}
with truncation error
\begin{equation}
-\frac{\gamma}{24}(144 -264\gamma +144\gamma^2 -\gamma^3)\,
\Delta x^4\, \phi_{xxxx}(\tilde t,\tilde x)\,.
\end{equation}
This scheme is stable provided
\begin{equation}
0<\gamma\,,\quad
\frac{6-11\gamma+6\gamma^2-\gamma^3}{6}\le 1\,,
\end{equation}
which is achieved if $0<\gamma \le 1$ (although there are other possibilities).
We represent the new grid in Figure \ref{fig3}.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.6\textwidth]{fig3} % molecula3
\end{center}
\caption{Values needed to compute $\phi^{n+1}_l$ with the third-order
scheme (left side).}
\label{fig3}
\end{figure}

With such a numerical scheme, we may compute terms with no reference
to any left hand-side boundary condition. We may combine it with the usual
(and much simpler) method \eqref{scheme}: we compute the values at time $n+1$
with \eqref{scheme} and then, using \eqref{scheme1} with $l=\ell$,
the value on the left boundary at time step $n+1$.

In this way we have transparent boundary conditions at discrete level. A similar
approach can be used on the right boundary. Changing the sign of $c$
(that is, changing the sign of $\gamma$), $\phi$ by $\psi$ and inverting the
relative positions with respect to $l$ (which induces some changes in the
underlying Taylor expansions), we end with:
\begin{equation} \label{scheme2}
\begin{aligned}
\psi^{n+1}_l
&=  \frac{6-11\gamma+6\gamma^2-\gamma^3}{6}\, \psi^n_l
+ \frac{6\gamma-5\gamma^2+\gamma^3}{2}\, \psi^n_{l-1} \\
&\quad + \frac{4\gamma^2-3\gamma-\gamma^3}{2}\, \psi^n_{l-2}
+ \frac{2\gamma-3\gamma^2+\gamma^3}{6}\, \psi^n_{l-3}
\,.
\end{aligned} %\label{schemeright}
\end{equation}
The corresponding grid is represented in Figure \ref{fig4}.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.6\textwidth]{fig4} % molecula4
\end{center}
\caption{Values needed to compute $\psi^{n+1}_l$ with the third-order scheme
(right side).}
\label{fig4}
\end{figure}

In both cases, we have to assume that the only signal that is near each one of these
boundaries travels in the appropriate direction. We can ensure this choosing $L$ (and, thus,
$\ell$) sufficiently far away from the initial support. If we rescale the equations
in order to have $c=1$, the support is enlarged by one step in both space
directions for every step in time. Thus, we only use the auxiliary schemes when
$\varphi^n_{-\ell+1}$ or $\varphi^n_{\ell-1}$ are no longer zero.

By the way: if $\gamma=1$ \eqref{scheme1} and \eqref{scheme2} become,
respectively
\begin{equation}
\phi^{n+1}_l =  \phi^n_{l+1}\,,\\
\psi^{n+1}_l =  \psi^n_{l-1}\,,
\end{equation}
and the solution is, again, exact up to round-off errors, as was the case of
the numerical method for the second order equation (see Section
\ref{gamaesuno}).

\subsection{Numerical simulations}
\label{clasico-num}

The idea now is to simulate the solution to \eqref{sist1} computing
\eqref{scheme} and starting, for instance, with the exact initial data, and
using both \eqref{scheme1} and \eqref{scheme2} to compute the boundary values.
For this, we choose a region $[-L,L]$ wide enough, such that when the
perturbation reaches any of its two ends, it is only the appropriate traveling
wave that is seen there and, thus: $\varphi_{-\ell}^n=\phi_{-\ell}^n$,
$\varphi_{\ell}^n=\psi_{\ell}^n$.

In the practical implementation, we may keep the values at both boundaries to
zero until the wave has arrived and compute from that moment the corresponding
values. Besides, only two data (those on the boundary) have to be computed,
one with \eqref{scheme1} and one with \eqref{scheme2}, and, thus, the additional
computational effort is minimal: it is not necessary to keep extra variables nor
to simulate new equations in the whole spatial region.


\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.7\textwidth]{fig5} %perfilC1
\end{center}
\caption{Simulation of transparent boundary conditions for the wave equation,
$c=1$, $\gamma=1$}
\label{perfilC1}
\end{figure}

In Figure \ref{perfilC1} we represent the simulation with $c=1$, $\gamma=1$ of
the evolution of an initial profile given by
\begin{equation}
u(t,x) =
\begin{cases}
\frac{1-\cos(\pi(x-ct)/2)}{4} + \frac{1-\cos(\pi(x+ct)/2)}{4}\,,
&\text{if } 0<|x-t|\le 1,\\ \rule{0pt}{12pt}
0 &\text{otherwise}.
\end{cases}
\end{equation}
It corresponds to two similar traveling waves, one moving to the left, one to
the right. We see that there is no disturbance due to the boundary
conditions and the signal vanishes as it passes through the border.
To check the influence of the numerical errors, in Figure \ref{perfilC2} we
represent the evolution of the same profile but simulated with
$c =1/2$, $\gamma=0.5\,$.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.7\textwidth]{fig6} %perfilC2
\caption{Simulation of transparent boundary conditions for the wave equation, $c
=1/2$, $\gamma=0.5$}
\label{perfilC2}
\end{center}
\end{figure}

We see no difference in behaviour, although the number of iterations has
doubled (since the new step-time has been halved), and the
signal leaves the central zone with no effect caused by the border.


\section{Spinor-like formalism}
\label{section13}

\emph{One-way} wave equations are partial differential equation that permits
wave propagation only in certain directions. Engquist and Majda \cite{em}
derived a theory to construct absorbing boundary conditions by factoring
the wave equation
\begin{equation}\label{waveEQ11D}
 u_{tt}-c^2u_{xx}=0
\end{equation}
into two different \emph{one-way} differential equations
(for a detailed factorization see \cite{ce,eh,em,th})
\begin{equation}\label{waveEQ21D}
 u_{t} \pm c\sqrt{u_{xx}}=0\,,
\end{equation}
where $\sqrt{u_{xx}}$ represents a pseudo-differential operator that is not
local in the space variable. Due to this fact, this operator,
in one and higher dimensions, must be approximated using a wide variety
of equations involving higher order derivatives \cite{th,eh, trh} and
in some cases this give rise to an ill-posed boundary problem \cite{h, tr}.
More recently Ionescu and Igel \cite{ii} proposed a different factorization
of the wave equation valid only for spherical coordinates.

In 1928, in a completely different context, in order to avoid this complex
formulation,
and in his search for a covariant expression of the Schr{\"o}dinger equation
\cite{dirac}, Dirac proposed a
matrix and vector (or {\it spinor}) formalism. In two dimensions it amounts to
look for matrices $2\times 2$, $A$ and $D$, such that:
\begin{equation}
\begin{aligned}
\frac{\partial^2}{\partial x^2}-\frac{1}{c^2}\frac{\partial^2}{\partial t^2}
&=  \Big(A\partial_x +\frac{i}{c}D\partial_t\Big)^2 \\
&=  A^2\partial_{xx} + \frac{i}{c}(AD+DA)\partial_{tx}
-\frac{1}{c}D^2\partial_{tt}.
\end{aligned} \label{dirac}
\end{equation}
To recover the wave equation from the previous expression, $A$ and $D$ must
satisfy the following algebra:
\begin{equation}
\begin{gathered}
A^2=I,\\ D^2=I,\\ AD+DA=O.
\end{gathered}\label{sistema}
\end{equation}
Solutions are obtained taking $A$ and $D$ among the three Pauli
matrices:
\begin{equation}
\sigma_1 = \begin{pmatrix} 0&1\\ 1&0\end{pmatrix},\quad
\sigma_2 = \begin{pmatrix}0&-i\\ i&0\end{pmatrix},\quad
\sigma_3 = \begin{pmatrix}1&0\\ 0&-1\end{pmatrix},
\end{equation}
for instance:
\begin{equation}
A=\begin{pmatrix} 0&-i\\ i&0\end{pmatrix},\quad D
=\begin{pmatrix}1 &0\\ 0 &-1\end{pmatrix}.
\label{gama0y1}
\end{equation}
This kind of ideas has been used in different contexts, resulting in
interesting formulations for differential models
 (see, for instance, \cite{v,pv,v2}).

The method is very general and in principle it could be extended to 
decompose differential operators of a least second order and it also 
could be applied to the hyperbolic and parabolic problems in the spirit 
of computing general roots of an operator indicated in \cite{v}.
A priori the main problem could be an algebraic one associated to the
implementation of a certain algebra as it is indicated in \cite{v}
 with the Silvester algebra. In this process a new 
differential equation is generated where the unknown function now 
is multicomponent.

The decomposition of the differential operators can be used for nonlinear 
problems but in that case the boundary includes some border effects 
that cannot be addressed directly by this linear approach.

We shall apply similar ideas in what follows to transform the wave equation
into a set of one-way differential equations. Choosing the representation
for the matrices, it is possible to decompose the problem into waves
traveling in opposite directions as is done with the other implementations.

We start by splitting the initial data of \eqref{sist1} into the components
traveling to the left and to the right, as given in the previous section, and
define
\begin{equation}
U_0(x)=\begin{pmatrix}v_0(x)\\ w_0(x)\end{pmatrix},\quad 
U'_0(x) =\begin{pmatrix}v'_0(x)\\ w'_0(x)\end{pmatrix}.
\end{equation}
We also define $U$, a two-component vector,
\begin{equation}
U(t,x)=\begin{pmatrix}v(t,x)\\ w(t,x)\end{pmatrix},
\end{equation}
with $v$ and $w$ two real functions. We shall show that they correspond to
the left and right traveling components of the solution of \eqref{sist1}, when
the following problem is considered
\begin{equation}
\begin{gathered}
U_{t} \pm c M U_{x} = \mathcal{O} \,,\quad 
t\ge 0,\; x\in(-\infty,+\infty)\,,\\
U(0,x) = U_0(x) \,,\\
U_t(0,x) = U'_0(x) \,,
\end{gathered}
\label{spin1dim}
\end{equation}
where $\mathcal{O}$ stands for the null vector and $M$ is an involutory 
matrix, i.e., such that $M^2=I$.
We have that
\begin{equation}
u(t,x)=v(t,x)+w(t,x)\,,\quad 
\forall t\ge 0,\; \forall x\in(-\infty,\infty),
\end{equation}
with $u(t,x)$ the solution of \eqref{sist1}.
Indeed, assuming $U$ to be regular enough,  on the one hand we have
\begin{equation}
\begin{aligned}
U_{t} \pm c M U_{x} = \mathcal{O} 
&\to U_{tt} \pm c M U_{xt} =\mathcal{O}  \\
&\iff U_{tt} \pm c M U_{tx} =\mathcal{O}  \\
&\iff U_{tt} - c^2 M^2 U_{xx} =\mathcal{O}  \\
&\iff \left\{\begin{array}{l} v_{tt}-c^2 v_{xx}=0\,,\\ w_{tt}-c^2 w_{xx}=0\,,
\end{array}\right\}
\end{aligned}
\end{equation}
and on the other hand,
\begin{equation}
\left\{\begin{array}{l}
v_0(x)+w_0(x)=f(x)\,,\\
v'_0(x)+w'_0(x)=g(x)\,,
\end{array}
\right\}
\iff
\left\{\begin{array}{l}
v(0,x)+w(0,x)=u(0,x)\,,\\
\rule{0pt}{12pt}
v_t(0,x)+w_t(0,x)=u_t(0,x)\,.
\end{array}
\right\}
\end{equation}

We see that equation \eqref{waveEQ21D} can be expressed using the Dirac-like
equation of the initial value and boundary problem \eqref{spin1dim},considering a
two-dimensional real quantity $U(t,x)$ as the variable. Due to its transformation
properties, and following with the Dirac analogy, we may call it a {\it
spinor}. We may now transform our original problem
\eqref{sist1} into \eqref{spin1dim}. Since the sign that appears in the equation
is irrelevant, we have chosen a positive sign, and our ``Dirac equation'' for
this case is, finally,
\begin{equation}\label{D-WaveEQ1D}
 U_t + c MU_x=\mathcal{O}.
\end{equation}
Although our procedure looks different from \eqref{dirac} and \eqref{sistema},
it can be shown to be similar, just considering an appropriate $M$.
For instance, if we chose $A$ and $D$ as in \eqref{gama0y1}, we have $
M=-iAD^{-1}$, a real involutory matrix.

Involutory matrices of dimensions $2\times 2$ are of two kinds:
 $M=\pm I$ or
\begin{equation}\label{A1D}
 M= \pm  \begin{pmatrix}
 a& \beta \\
 \delta & -a \\
 \end{pmatrix}
\end{equation}
with $\beta\delta=1-a^2$. Choosing a specific matrix $M$ is equivalent to 
fixing the Dirac gauge. We shall consider in what follows only symmetric matrices, 
for sake of simplicity. This supposes that, besides the somewhat trivial choices
$\pm I$, matrix $M$ is of the form
\begin{equation}
M=\begin{pmatrix}
\sin\alpha &\cos\alpha\\
\cos\alpha &-\sin\alpha
\end{pmatrix}
\end{equation}
with $\alpha$ is some angle to be fixed if necessary.
An involutory, symmetric, matrix is orthogonal and we see that our choice of $M$
corresponds to the matrix of a reflection.

\subsection*{Incident wave at $x=-L$}
\label{section131}

An incident wave at $x=-L$ with negative wave number is represented in this case
by
\begin{equation}\label{IncidPW1D}
 U_{I}=\begin{pmatrix} a\\ b\end{pmatrix}e^{i(\omega t + k x)}, \quad k>0.
\end{equation}
Such a wave induces, due to reflection at the boundary $x=-L$, 
a reflected plane wave traveling backwards of the form
\begin{equation}\label{ReflPW1D}
 U_{R}=T\begin{pmatrix} a\\ b\end{pmatrix}
e^{i(\omega t + k x)}+R\begin{pmatrix} a\\ b\end{pmatrix}e^{i(\omega t - k x )},
\end{equation}
where $T$ and $R$ are, respectively, the transmission and the reflection 
coefficients.

It can be checked that at the boundary $x=-L$ we have
\begin{equation} \label{ders1D}
\begin{gathered}
\partial_x U_{R}=i k (Te^{-ikL}-Re^{ikL})
\begin{pmatrix} a\\ b\end{pmatrix}e^{i\omega t}, \\
\partial_t U_{R}=i \omega (Te^{-ikL}+Re^{ikL})
\begin{pmatrix} a\\ b\end{pmatrix}e^{i\omega t}.
\end{gathered} 
\end{equation}
Introducing \eqref{ReflPW1D} and \eqref{ders1D} into \eqref{D-WaveEQ1D} 
and setting $x=-L$, we obtain the linear algebraic system
\begin{equation}\label{syst1D}
[\omega(Te^{-ikL}+Re^{ikL}) I-ck(Te^{-ikL}-Re^{ikL})M]
\begin{pmatrix} a\\ b\end{pmatrix} =0\,.
\end{equation}
This equation results from considering that the superposition of transmitted 
and reflected waves coincides at the boundary and that the Dirac equation 
\eqref{D-WaveEQ1D} holds.

Different representations of matrix $M$ result in different equations.
Trivial cases are $M=I$, that gives the condition $R=0$, and
$M=-I$, that gives the condition $T=0$. These two cases are trivial
since they correspond to the Dirac form of the scalar square root of the
 wave equation and represent waves traveling in one single direction. 
These cases are trivial but useless since what we want is to decompose 
the wave packet into its left  and right-traveling parts.

A nontrivial case arises when other possible forms of matrix $M$ are used, and
the necessary condition for system \eqref{syst1D} to have a
nontrivial solution corresponds to annihilate the determinant of its matrix,
\begin{equation}
\begin{aligned}
&\big|\omega(Te^{-ikL}+Re^{ikL})I-ck(Te^{-ikL}-Re^{ikL})M\big|=0
\\
&\iff (\omega^2-c^2k^2)(T^2e^{-2ikL}+R^2e^{2ikL})+2RT(w^2+c^2k^2)=0\,.
\end{aligned} \label{det21D}
\end{equation}
Given the dispersion relation for the plane-waves,
\begin{equation}
 \omega^2=c^2k^2;
\end{equation}
this equation has two solutions, $R=0$, $T\in\mathbb{R}$, and $T=0$, $R\in\mathbb{R}$.

In the two trivial cases $M=\pm I$, since the system has a 
diagonal matrix, all the spinors are solutions of \eqref{syst1D}.

In other representations of matrix $M$, special spinors solutions can be 
found for null values of $T$ and $R$. These spinors are the 
``special directions'' for the operators
``transmission'' and ``reflection''.
If we look for spinors associated to every value of the reflection coefficient
obtained with the others realizations of matrix $M$, we have for $R=0$,
\begin{equation}\label{abR01D}
 \Psi_T=\begin{pmatrix} a\\ b\end{pmatrix} 
=\begin{pmatrix}
 1 \\
 \frac{1-\sin{\alpha}}{\cos{\alpha}}
 \end{pmatrix}a \,,
\end{equation}
and for $R=1$,
\begin{equation}%\label{abR11D}
 \Psi_R=\begin{pmatrix} a\\ b\end{pmatrix} 
=\begin{pmatrix}
 1 \\
 \frac{-1-\sin{\alpha}}{\cos{\alpha}}
 \end{pmatrix}a \,.
\end{equation}

Then matrix $M$ can be diagonalized in the basis of vectors
\begin{equation}
\mathcal{B}=\{U_1,U_2\},\quad U_1
=\begin{pmatrix}-\cos\alpha\\
\sin\alpha-1\end{pmatrix},\quad
 U_2=\begin{pmatrix}-\cos\alpha \\ \sin\alpha+1\end{pmatrix},
\end{equation}
with canonical form
\begin{equation}\label{canonical}
D=\begin{pmatrix}1 &0\\ 0&-1\end{pmatrix}.
\end{equation}
It is possible to see that at $x=-L$, the component along vector
$U_2$ is not transmitted to the left, while the component along
$U_1$ passes without distortion. We may, in this way, obtain
transparent boundary conditions at $x=-L$. Let us consider our
solution to \eqref{spin1dim}. We decompose the vector in the basis
$\mathcal B$:
\[
U=a_1U_1+a_2U_2 \iff \begin{pmatrix}a_1\\ a_2\end{pmatrix}=NU\,,\quad
N=\frac{1}{2\cos\alpha}\begin{pmatrix}-\sin\alpha-1 &-\cos\alpha\\
\sin\alpha-1 &\cos\alpha\end{pmatrix},
\]
and we can establish the dynamics for each component:
\begin{equation}
\begin{gathered}
(a_1)_{tt} -c^2 (a_1)_{xx} = 0 \,,\quad
t\ge 0,\; x\in(-\infty,+\infty)\,,\\
 a_1(0,x) = -\frac{\sin\alpha+1}{2\cos\alpha} v_0(x)-\frac12 w_0(x) \,,\\
  (a_1)_t(0,x) =
-\frac{\sin\alpha+1}{2\cos\alpha} v'_0(x)-\frac12 w'_0(x) \,,
\end{gathered}
\end{equation}
and
\begin{equation}
\begin{gathered}
(a_2)_{tt} -c^2 (a_2)_{xx} = 0 \,,\quad
t\ge 0,\; x\in(-\infty,+\infty)\,,\\
 a_2(0,x) = \frac{\sin\alpha-1}{2\cos\alpha} v_0(x)+\frac12
w_0(x)\,,\\
(a_2)_t(0,x) =
\frac{\sin\alpha-1}{2\cos\alpha} v'_0(x)+\frac12 w'_0(x)\,,
\end{gathered}
\end{equation}
or, in spinor form, if we define $V=NU$:
\begin{equation}\label{components}
\begin{gathered}
V_{t} +c D V_{x} = 0 \,,\quad 
t\ge 0,\; x\in(-\infty,+\infty)\,,\\
V(0,x) = NU_0(x) \,,\\
V_t(0,x) = NU'_0(x) \,,
\end{gathered}
\end{equation}
where
\begin{equation}
N^{-1}=\begin{pmatrix}-\cos\alpha &-\cos\alpha \\ \sin\alpha-1
&\sin\alpha+1\end{pmatrix},
\end{equation}
and $D=NMN^{-1}$ is the canonical form \eqref{canonical}.

Finally we have
\begin{equation}
\begin{aligned}
u(t,x) &=  \begin{pmatrix} 1 &1\end{pmatrix} 
\begin{pmatrix}v\\ w\end{pmatrix} 
= \begin{pmatrix} 1 &1\end{pmatrix}N^{-1} 
\begin{pmatrix}a_1\\ a_2\end{pmatrix} \\
&=  -(1-\sin\alpha+\cos\alpha)a_1(t,x)+(1+\sin\alpha-\cos\alpha)\,
a_2(t,x).
\end{aligned}
\end{equation}
The special spinor problem \eqref{components} represents a pair of
independent waves, $a_1(x,t)=a_1(x+ct)$ traveling to the left, and
another $a_2(x,t)=a_2(x-ct)$ traveling to the right.

When the signal reaches the left boundary, the $a_1$ component vanishes and if
we want the whole signal also to vanish to the left of $-L$, we may choose
$\alpha=2k\pi$ or $\alpha=\frac{3\pi}{2}+2k\pi$, $k=1,2,3,\ldots$

The first choice ($\alpha=2k\pi$, $k=1,2,3,\ldots$) corresponds to
\begin{equation}
M=\begin{pmatrix} 
0 & 1\\
1 & 0
\end{pmatrix},
\end{equation}
which is equivalent to the coupled formulation of the wave equation
\begin{equation}
\begin{gathered}
v_t + c w_x =0 \\
w_t + c v_x = 0.
\end{gathered}
\end{equation}
In this case the basis of spinors is:
\begin{equation}\label{abmixed}
\Psi_T=\begin{pmatrix}
 1 \\
 1
 \end{pmatrix} \,,
\quad 
 \Psi_R=\begin{pmatrix}
 1 \\
 -1
 \end{pmatrix} \,.
\end{equation}

For the second choice ($\alpha=\frac{3\pi}{2}+2k\pi$, $k=1,2,3,\ldots\,$), we
particularize the calculus for the case in which the waves are decomposed.
By reformulating from the beginning we obtain the spinors:
\begin{equation}%\label{abR01D}
 \Psi_T=\begin{pmatrix}
 1 \\
 0
 \end{pmatrix}a \,,
\end{equation}
and for $R=1$,
\begin{equation}%\label{abR11D}
 \Psi_R=\begin{pmatrix}
 0 \\
 1
 \end{pmatrix}a \,.
\end{equation}
Expressing now the spinor formalism for these new values, we obtain as
matrix 
\begin{equation}
M=\begin{pmatrix}
-1 & 0\\
0 & 1
\end{pmatrix}.
\end{equation}
This uncouples the equations into two one-dimensional wave equations, that are
just the ones treated in Section \ref{clasico}. We thus see that the spinor 
formalism is a natural way to decompose the original problem into its components 
and to obtain the first-order differential equations we have used before. 
Numerical simulations can then be performed using the Lax-Wendroff scheme with 
the third-order scheme at the corresponding boundary end. 
In Figures \ref{LWperfilC1} and \ref{LWperfilC2}, we have represented the
numerical solution for the left and right-traveling components 
(same initial values as before): there are no reflections at the boundaries 
when the waves reach them.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.7\textwidth]{fig7} % LWperfil1
\end{center}
\caption{Simulation of transparent boundary conditions for the Dirac Equation, $c
=1$, $\gamma=1$, left-traveling wave.}
\label{LWperfilC1}
\end{figure}

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.7\textwidth]{fig8} % LWperfil2
\end{center}
\caption{Simulation of transparent boundary conditions for the Dirac Equation, $c
=1$, $\gamma=1$, right-traveling wave.}
\label{LWperfilC2}
\end{figure}


\subsection{Incident wave at $x=L$}

We treat now the case of the incident wave at the opposite boundary. 
All the computations are similar to what is done Section \ref{section131}, 
but they involve an incident wave at $x=L$ with positive wave number, 
represented by
\begin{equation}\label{IncidPW1D-L}
 U_{I}=\begin{pmatrix} a\\ b\end{pmatrix}e^{i(\omega t - k x)}, \quad k>0.
\end{equation}
The reflected wave at the other boundary is now:
\begin{equation}\label{ReflPW1D-L}
 U_{R}=T\begin{pmatrix} a\\ b\end{pmatrix}e^{i(\omega t - k x)}
+R\begin{pmatrix} a\\ b\end{pmatrix}e^{i(\omega t + k x )},
\end{equation}
and at the boundary $x=L$ we have
\begin{equation} \label{ders1D-L}
\begin{gathered}
\partial_x U_{R}=i k (-Te^{-ikL}+Re^{ikL})
\begin{pmatrix} a\\ b\end{pmatrix}e^{i\omega t}, \\
\partial_t U_{R}=i \omega (Te^{-ikL}+Re^{ikL})
\begin{pmatrix} a\\ b\end{pmatrix}e^{i\omega t}.\\
\end{gathered} 
\end{equation}
We follow what was done in \ref{section131}, and obtain at
$x=L$ the linear algebraic system
\begin{equation}\label{syst1D-L}
[\omega(Te^{-ikL}+Re^{ikL}) I-ck(-Te^{-ikL}+Re^{ikL})M]
\begin{pmatrix} a\\ b\end{pmatrix} =0\,.
\end{equation}
Here again, different choices for the matrix $M$ will result in different equations.
The trivial case $M= I$ gives the condition $T=0$ while the second 
trivial case, with $M=- I$, gives the condition $R=0$.

With some other form for matrix $M$ we obtain non trivial cases. We annihilate the
determinant of system \eqref{syst1D-L} to obtain a necessary condition for a 
nontrivial solution:
\begin{equation} \label{det1D-L}
\begin{aligned}
&\big|\omega(Te^{-ikL}+Re^{ikL}) I-ck(-Te^{-ikL}+Re^{ikL})M
\big|=0
 \\
&\iff (\omega^2-c^2k^2)(T^2e^{-2ikL}+R^2e^{2ikL})+2RT(w^2+c^2k^2)=0\,.
\end{aligned}
\end{equation}
The two solutions are, as in the other boundary, $R=0$ and $T=0$.

In the two trivial cases mentioned above, $M=\pm  I$, all the spinors are
solutions of \eqref{syst1D-L}. In the nontrivial case, we have for $R=0$:
\begin{equation}\label{abR01D-L}
 \Psi_T=\begin{pmatrix} a\\ b\end{pmatrix} 
=\begin{pmatrix}
 1 \\
 \frac{-1-\sin{\alpha}}{\cos{\alpha}}
 \end{pmatrix}a \,,
\end{equation}
and for $R=1$:
\begin{equation}\label{abR11D-L}
 \Psi_R=\begin{pmatrix} a\\ b\end{pmatrix} 
=\begin{pmatrix}
 1 \\
 \frac{1-\sin{\alpha}}{\cos{\alpha}}
 \end{pmatrix} a \,.
\end{equation}

The basis of spinors in which $M$ is diagonal is now
\begin{equation}
\mathcal{B}=\{U_1,U_2\},\quad U_1
=\begin{pmatrix}-\cos\alpha\\
1+\sin\alpha\end{pmatrix},\quad
 U_2=\begin{pmatrix}-\cos\alpha \\ \sin\alpha-1\end{pmatrix},
\end{equation}
and the canonical form is
\begin{equation}\label{canonical-L}
D=\begin{pmatrix}-1 &0\\ 0&1\end{pmatrix}.
\end{equation}
At $x=L$, the component along vector
$U_2$ is not transmitted to the right, while the component along $U_1$ passes without
distortion, which correspond to the transparent boundary conditions at $x=L$. 
Decomposing the solution to \eqref{spin1dim} in this new basis, we have
\[
U=a_1U_1+a_2U_2 
\iff \begin{pmatrix}a_1\\ a_2\end{pmatrix}=NU\,,\quad
N=\frac{1}{2\cos\alpha}\begin{pmatrix} \sin\alpha-1 &\cos\alpha\\
-(1+\sin\alpha) &-\cos\alpha\end{pmatrix}.
\]
The dynamics for each component corresponds to
\begin{equation}
\begin{gathered}
(a_1)_{tt} -c^2 (a_1)_{xx} = 0 \,,\quad
t\ge 0,\; x\in(-\infty,+\infty)\,,\\
 a_1(0,x) = \frac{\sin\alpha-1}{2\cos\alpha} v_0(x)+\frac12 w_0(x) \,,\\
  (a_1)_t(0,x) =
\frac{\sin\alpha-1}{2\cos\alpha} v'_0(x)+\frac12 w'_0(x) \,,
\end{gathered}
\end{equation}
and to
\begin{equation}
\begin{gathered}
(a_2)_{tt} -c^2 (a_2)_{xx} = 0 \,,\quad
t\ge 0,\; x\in(-\infty,+\infty)\,,\\
 a_2(0,x) = -\frac{1+\sin\alpha}{2\cos\alpha} v_0(x)-\frac12
w_0(x)\,,\\
(a_2)_t(0,x) =
-\frac{1+\sin\alpha}{2\cos\alpha} v'_0(x)-\frac12 w'_0(x)\,.
\end{gathered}
\end{equation}
Represented in spinor form, with $V=NU$, this gives
\begin{equation}\label{components-L}
\begin{gathered}
V_{t} +c D V_{x} = 0 \,,\quad
t\ge 0,\; x\in(-\infty,+\infty)\,,\\
V(0,x) = NU_0(x) \,,\\
V_t(0,x) = NU'_0(x) \,,
\end{gathered}
\end{equation}
where
\begin{equation}
N^{-1}=\begin{pmatrix}-\cos\alpha &-\cos\alpha \\ 1+\sin\alpha
&\sin\alpha-1\end{pmatrix},
\end{equation}
and $D=NMN^{-1}$ is the canonical form \eqref{canonical-L}.

Although this looks exactly the same as \eqref{components}, 
it is necessary to point out that, here, the diagonal matrix $D$ is 
the opposite to \eqref{canonical}. Also, the matrix
$N$ and its inverse $N^{-1}$ have changed. This is clearly seen in our final 
representation of the solution
\begin{equation}
\begin{aligned}
u(t,x) &=  \begin{pmatrix} 1 &1\end{pmatrix} 
\begin{pmatrix}v\\ w\end{pmatrix}
= \begin{pmatrix} 1 &1\end{pmatrix}N^{-1} 
\begin{pmatrix}a_1\\ a_2\end{pmatrix} \\
&=  (1+\sin\alpha-\cos\alpha)\, a_1(t,x)-(1-\sin\alpha+\cos\alpha)\,
a_2(t,x).
\end{aligned}
\end{equation}

The interpretation is now that problem \eqref{components-L}, 
in spinor form, represents a
pair of independent waves, $a_1(x,t)=a_1(x+ct)$ traveling to the right, and
another $a_2=a_2(x-ct)$ traveling to the left. Such that when the signal 
reaches the right boundary, the $a_1$ component is lost. 
If we want the whole signal to vanish to the right of
$L$, we may choose $\alpha=\frac{\pi}{2}+2n\pi$ or 
$\alpha=\pi+2n\pi$, $n=1,2,3,\ldots$

As before, if we choose $\alpha=\frac{\pi}{2}+2n\pi$, $n=1,2,3,\ldots\,$, 
it is necessary to reformulate the spinors:
\begin{equation}%\label{abR01D}
 \Psi_T=\begin{pmatrix}
 0 \\
 1
 \end{pmatrix})a \,,
\end{equation}
and for $R=1$,
\begin{equation}%\label{abR11D}
 \Psi_R=\begin{pmatrix}
 1 \\
 0
 \end{pmatrix}a\,.
\end{equation}
With the new spinors, matrix $M$ is now
\begin{equation}
M=\begin{pmatrix}
1 & 0\\
0 & -1
\end{pmatrix}.
\end{equation}
Once again, this uncouples the equations into two one-dimensional wave equations, 
that are just the ones treated in Section \ref{clasico}.

The other values of $\alpha=\pi+2n\pi$, $n=1,2,3,\ldots\,$, give raise, again, to
the coupled formulation for the wave equation
\begin{equation}
 \begin{gathered}
v_t - c w_x =0 \\
w_t - c v_x = 0,
\end{gathered} 
\end{equation}
with the same spinors $\Psi_T$ and $\Psi_R$ written in \eqref{abmixed}.

\section{Wave equation in three dimensions with radial symmetry}



\begin{figure}[htb]
\begin{center}
\includegraphics[width=13cm]{fig9} % radial1
\end{center}
\caption{Computation of $\mathcal U(t,r)$ with a transparent boundary condition at
$r=10$.}
\label{radial1}
\end{figure}

\begin{figure}[htb]
\begin{center}
\includegraphics[width=13cm]{fig9} % radial2
\end{center}
\caption{Computation of $u(t,r)$ with a transparent boundary condition at
$r=10$.}
\label{radial2}
\end{figure}


Let us consider the wave equation in three dimensions with radial symmetry.
We have
\begin{equation}
\begin{aligned}
u_{tt}-c^2\Delta u=0
&\iff u_{tt}-c^2\frac{1}{r^2}\;\frac{\partial}{\partial r}\left(r^2 u_r\right)=0
 \\
&\iff u_{tt}-c^2\big(u_{rr}+\frac{2}{r}\,u_r\big)=0\,.
\end{aligned}
\end{equation}
Performing the change: $\mathcal U=ru$, we have
\begin{equation}
\mathcal U_{tt}-c^2\mathcal U_{rr}=0\,,
\label{WaveEqU}
\end{equation}
and we may use the previous results in one spatial dimension to create the 
corresponding transparent boundary conditions. If we consider an initial 
value problem with radial symmetry, we can express it in terms of the
 new function as:
\begin{equation}
\left\{\begin{array}{l}
u_{tt}-c^2\left(u_{rr}+\frac{2}{r}\,u_r\right) = 0 \,,\\
t\ge 0,\; r\in[0,+\infty)\,,\\
u(0,r) = f(r) \,,\\
u_t(0,r) = g(r) \,,
\end{array}
\right\}
\iff
\left\{\begin{array}{l}
\mathcal U_{tt}-c^2\mathcal U_{rr}=0 \,,\\
t\ge 0,\; r\in[0,+\infty)\,,\\
\mathcal U(0,r) = rf(r) \,,\\
\mathcal U_t(0,r) = rg(r) \,,
\end{array}
\right\}
\label{sistr1}
\end{equation}
In this case, if we suppose that the initial data is regular at he origin, and
thus that $\forall t,\,u_r(t,0)=0$ due to the radial symmetry, we have a 
left boundary condition given by
\begin{equation}
 \mathcal U(t,0)=\lim_{r\to 0} ru(t,r)=0,\quad \forall t\,.
\end{equation}
On the other hand, since
\begin{equation}
\mathcal U_r(t,r)=u(t,r)+ru_r(t,r),
\end{equation}
we have, also by the regularity of the functions,
\begin{equation}
 \mathcal U_r(t,0)=\lim_{r\to 0} \Bigl(u(t,r)+ru_r(t,r)\Bigr)=u(t,0),\quad
\forall t\,.
\end{equation}
This may be used to reconstruct $u(t,r)$ from $\mathcal U(t,r)$ at $r=0$, since
this value cannot be obtained directly inverting the change. 
Some other conditions can be derived for the derivatives of $\mathcal U$ at $r=0$.
 For instance,
\[
\mathcal U_{t}(t,0)=0\,,\quad
\mathcal U_{rr}(t,0)=0 \,,\quad
\mathcal U_{tt}(t,0)=0 \quad \forall t\,.
\]
In this radial symmetry case, the waves can only travel towards the right 
from the origin $r=0$,
but this does not exclude the possibility of the initial data having some
traveling component moving leftwards, towards the origin. Nevertheless, if
the initial data is of compact support, after some time all the signal will be
traveling to the right of the origin. We may thus establish a transparent
boundary condition at some suitable distance.


As for the numerical simulations, we may use the previous schemes, 
\eqref{scheme} for the general case with a left boundary condition at 
the origin, and \eqref{scheme2} at the right boundary.

In Figures \ref{radial1} and \ref{radial2} we represent, respectively, 
the profiles of $\mathcal U$ and $u$ for the initial data,
\begin{equation}
u(t,r) = \begin{cases}
\frac{1-\cos(\pi(r-ct)/2)}{4} - \frac{1-\cos(\pi(r+ct)/2)}{8}\,,
&\text{if } 0<|r-t|\le 1,\\ 
0 &\text{otherwise}.
\end{cases}
\end{equation}
computed with $c=1$ and $\gamma=1$. This last
figure shows the decay in the amplitude of the solution as the signal 
travels away from the origin.

It is clear that we may also apply the spinor formalism of 
Section \ref{section13} to
equation \eqref{WaveEqU} and achieve the same results but under the 
alternative formulation.


\subsection*{Acknowledgments}

This work was partially supported by the Spanish  Ministerio de Ciencia
E Innovaci\'on under grant AYA2011-29967-C05-02. 
We thank prof. Pedro J. Pascual for many fruitful discussions.


\begin{thebibliography}{99}

\bibitem{eh} B. Engquist, L. Halpern;
\emph{Long-time Behaviour of Absorbing Boundary Conditions},
 Mathematical Methods in the Applied Sciences, 13 (1990), 189--203.

\bibitem{ii} D.-C. Ionescu, H. Igel;
\emph{Transparent Boundary Conditions for Wave Propagation on Unbounded Domains}, 
Computational Science -- ICCS 2003 Lecture Notes in Computer Science, 2659
 (2003), 807--816.

\bibitem{ce} R. Clayton, B. Engquist;
\emph{Absorbing boundary conditions for acoustic and elastic wave equations}, 
Bull. Seis. Sot. Am., 67 (1977), 1524--1540.

\bibitem{em} B. Engquist, A. Majda;
\emph{Absorbing boundary conditions for the numerical simulation of waves}, 
Math. Comp., 31 (139) (1977), 629--651.

\bibitem{th} A. Taflove, S. C. Hagness;
\emph{Computational Electrodynamics: The finite difference time-domain method}, 
Artech House, 2000.

\bibitem{trh} L. Trefethen, L. Halpern;
\emph{Well-Posedness of One-Way wave equations and Absorbing Boundary Conditions}, 
Math. Comp., 47(176) (1986), 421--435.

\bibitem{h} L. Halpern;
\emph{Absorbing Boundary Conditions for the Discretization Schemes of 
the One-Dimensional Wave Equation}, Math. Comp., 28(158) (1982), 415--429.

\bibitem{tr} L. Trefethen;
\emph{Finite Difference and Spectral Methods for Ordinary and Partial 
Differential Equations}, Cornell University [Department of Computer 
Science and Center for Applied Mathematics], 1996.

\bibitem{dirac} P. A. M. Dirac;
\emph{The Quantum Theory of the Electron, Proceedings of the Royal Society 
of London}, Series A1, 117(778) (1928), 610--624.

\bibitem{v} L. V\'azquez;
\emph{Fractional diffusion equation with internal degrees of freedom}, 
Jour. of Comp. Math., 21 (2003), 491-494.

\bibitem{pv} Pierantozzi, L. V\'azquez;
\emph{An interpolation between the wave and diffusion equations through
 the fractional evolution equations Dirac like}, 
Jour. of Math. Phys., 46 (2005), 1135123.

\bibitem{v2} L. V\'azquez;
\emph{A Fruitful Interplay: From Nonlocality to Fractional Calculus}, 
in V.V. Konotop, F. Abdullaev (Eds.), Nonlinear Waves, 
Classical and Quantum Aspects, NATO Science Series, Kluwer Academic 
Publishers 2004, pp. 129-133.

\end{thebibliography}

\end{document}
\

