\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2016 (2016), No. 311, pp. 1--16.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2016 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2016/311\hfil Advance-delay differential-difference equations]
{Systems of advance-delay differential-difference equations and
  transformation groups}

\author[S. I. Iakovlev, V. Iakovleva \hfil EJDE-2016/311\hfilneg]
{Serguei I. Iakovlev, Valentina Iakovleva}

\address{Serguei I. Iakovlev \newline
Departamento de Matematicas,
Universidad Simon Bolivar,
 Apartado Postal 89000,
  Caracas 1080-A, Venezuela}
\email{serguei@usb.ve, serguei.rusa@gmail.com}

\address{Valentina Iakovleva\newline
Departamento de Matematicas,
Universidad Simon Bolivar,
 Apartado Postal 89000,
  Caracas 1080-A, Venezuela}
\email{romanova@usb.ve, valentina.iakovleva1@gmail.com}

\thanks{Submitted April 26, 2016. Published December 7, 2016.}
\subjclass[2010]{34K99, 47A10}
\keywords{Advance-delay differential-difference equations;
\hfill\break\indent step derivation method; uniqueness;
transformation group; infinitesimal generator; 
\hfill\break\indent point spectrum}

\begin{abstract}
 A linear system of mixed-type differential difference equations is studied.
 A step derivation method and some conditions on an initial function are
 used to guarantee  existence, uniqueness and smoothness of the solution.
 Further, a transformation group is defined on a complete countably normed
 space of initial functions and  the spectrum of the infinitesimal generator
 of this group is studied.
 The same technique applies to a linear system of retarded differential
 difference equations. A problem to extend solutions of such a system to
 the left on the real line is solved.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{corollary}[theorem]{Corollary}
\allowdisplaybreaks

\section{Introduction}

In this article a system of linear advance-delay differential difference 
equations is studied. Such differential difference equations of mixed type are 
also known as forward-back equations.
There is a number of interesting papers on applications of such equations as, 
for example, in Physics \cite{bDel}, in Economics \cite{kt}, in Biology \cite{cbh}.
In \cite{ISV} it is shown how such advance-delay differential difference 
equations appear and are investigated in the context of constructing 
eigenfunctions of the Steklov smoothing operator.
Among other articles in which forward-back equations
are studied we can also mention the following papers
(\cite{rus,rus1,mv,iv,fl,IV}).

This article consists of four sections. 
In Section 2 a step derivation method (analogous to the step integration method) 
is used to prove existence and uniqueness theorems for an initial value problem. 
To guarantee existence, uniqueness, and smoothness of a solution being 
constructed an initial vector-valued function should satisfy a denumerable 
number of some special conditions.

In Section 3 a transformation group associated with this system is defined
on a linear complete countably normed space of initial functions.
A strong differentiability of this group is established.
Next the spectrum of its infinitesimal generator is studied.
It is shown that this spectrum is a purely point one and consists of
a countable number of eigenvalues of finite multiplicity.
The corresponding eigenfunctions appear to be initial functions that generate 
solutions of exponential type to the homogeneous system of differential 
difference equations under investigation.

Section 4 is devoted to a system of linear retarded differential-difference equations.
This system is obtained and in essence considered as a particular case of 
the main system of advance-delay differential-difference equations studied in 
Sections 2 and 3. We consider the problem of extending solutions of such 
retarded equations to the left on $(-\infty\,,0\, ]$. This problem is solved 
by the same step derivation method. Further statements concerning these retarded 
equations turn out to be quite analogous to the ones
proved for the advance-delay equations.

\section{Existence and uniqueness theorems}

We study the following inhomogeneous system of $l$ linear differential 
difference equations of mixed type
\begin{equation}\label{1.1}
x'(t)=Ax(t+a)+Bx(t-a)+Cx(t)+f(t)\,, \quad t \in \mathbb{R}\,.
\end{equation}
Here $x(t)\in \mathbb{C}^{l}$,  $A,B,C$ are square complex $l\times l$ matrices
with $A$ and  $B$ invertible,
$a > 0$ is fixed, and $f\in C^{\infty} (\mathbb{R}\,,\mathbb{C}^{l})$.

A vector valued function $x(t)$ is assumed to be a solution of equation 
\eqref{1.1} in the sense of Carath\'{e}odory:
 $x(t)$ is absolutely continuous
in $\mathbb{R}$, i.e., $x\in AC(\mathbb{R}\,,\mathbb{C}^{l})$,
and satisfies equation  \eqref{1.1} almost everywhere in $\mathbb{R}$.
(In this case the absolute continuity of $x$ means the absolute continuity of 
every component of $x$ on any closed bounded interval of the real line.)

 A special case of this system is a  differential difference equation 
$2h\mu u'(t)=u(t+h)-u(t-h)$, where $u(t) \in \mathbb{C}$, $h>0$, and 
$\mu$ is a complex parameter, that appears in studying
the eigenvalue-eigenfunction problem for Steklov's smoothing operator \cite{ISV}.

The next simple proposition says that every absolutely continuous solution of  
\eqref{1.1} is in reality infinitely differentiable, and consequently 
 \eqref{1.1} holds at every point $t \in \mathbb{R}$.

\begin{proposition} \label{prop1.1}
If a function $x\in AC(\mathbb{R}\,,\mathbb{C}^{l})$
and satisfies the differential difference equation  \eqref{1.1}
almost everywhere in $\mathbb{R}$,
then $x\in C^{\infty} (\mathbb{R}\,,\mathbb{C}^{l})$
and  \eqref{1.1} is fulfilled at every point $t \in \mathbb{R}$.
\end{proposition}

\begin{proof}
If $F$ is one of the primitives of $f$, then  \eqref{1.1} can be written 
in the form
\begin{equation} \label{1.01}
\Bigl(x(t)-(A\int_0^{t}x(\tau+a)\,d\tau  +B\int_0^{t}x(\tau-a)\,d\tau 
 +C\int_0^{t}x(\tau)\,d\tau+F(t))\Bigr)'=0\,.
\end{equation}
Integrating \eqref{1.01} from $0$ to $t$, we obtain
\begin{equation}\label{1.02}
x(t)=A\int_0^{t}x(\tau+a)\,d\tau
+ B\int_0^{t}x(\tau-a)\,d\tau + C\int_0^{t}x(\tau)\,d\tau
+ F(t) + c_{x}
\end{equation}
with the constant $c_{x}=x(0)-F(0)$. As $x(t)$ is continuous, the
right-hand side of  \eqref{1.02},
and consequently its left-hand side equal to $x$,
are of class $C^{1} (\mathbb{R})$. If we assume now that
$x\in C^{n}(\mathbb{R})$ for some arbitrary $n \in \mathbb{N}$,
then the right-hand side of  \eqref{1.02} will be  of class
$C^{n+1}(\mathbb{R})$ and hence $x\in C^{n+1}(\mathbb{R})$ as well.
Thus by induction on $n$ we infer that the function
$x\in C^{n}(\mathbb{R})$ for all $n \in \mathbb{N}$,
that is, $x\in C^{\infty} (\mathbb{R})$.
Now differentiating  \eqref{1.02} with respect to $t$
we establish that  \eqref{1.1} holds at every point $t \in \mathbb{R}$.
\end{proof}


Let $x|_{[-a,a]}=:\varphi$ be an ``initial function'',
which in view of Proposition \eqref{prop1.1} is necessarily smooth,
i.e., $\varphi\in C^{\infty}[-a,a]$.
Let us show that if we know the initial function $\varphi\in C^{\infty}[-a,a]$,
then by a step derivation method (an analogue to the step integration method 
\cite{BC})
we can construct the function $x(t)$ on the whole real line $\mathbb{R}$.
For this purpose we write  \eqref{1.1} in the form
\begin{equation}\label{1.2}
x(t+a)=A^{-1}x'(t)-A^{-1}Bx(t-a)-A^{-1}Cx(t)-A^{-1}f(t)\,.
\end{equation}
Setting $t+a=:\tau$,  from \eqref{1.2} we find that
\begin{equation}\label{1.3}
x(\tau)=A^{-1}x'(\tau-a)-A^{-1}Bx(\tau-2a)-A^{-1}Cx(\tau-a)-A^{-1}f(\tau-a)\,.
\end{equation}
Now we follow an induction process to construct the solution of  \eqref{1.1}
on the interval $[ a,na]$, $n\in\mathbb{N}$. For $n=2$,
 if $\tau\in [ a,2a]$, then $(\tau-a)\in[ 0 \,,a]$ and $(\tau-2a)\in[-a ,0]$.
 On the interval $[-a,a]$ the function $x(t)$ is known.
 Therefore the right-hand side of  \eqref{1.3} is uniquely determined for
  $\tau\in [ a,2a]$. Hence the values of $x(\tau)$ are found for $\tau\in [a,2a]$.
  Now suppose that we know $x(\tau)$ on the interval $[a,ma]$ 
(for arbitrary and fixed $m\in\mathbb{N}$).
  Then the right-hand side of \eqref{1.3} is determined for 
$\tau\in [ ma,(m+1)a]$.
  It follows that the function $x(\tau)$ on the left-hand side of \eqref{1.3}
  is found for $\tau\in [ma,(m+1)a]$.
Hence the values of $x(\tau)$ are already determined on the interval 
$[a,(m+1)a]$.
  Thus the method of steps allows to find the solution of  \eqref{1.1}
  on the interval $ [a,na]$ for any $n\in\mathbb{N}$, i.e., the function $x(t)$
is determined for all $\tau\geq a$ starting from the initial function 
$\varphi=x|_{[-a,a]}$.

Similarly, rewriting  \eqref{1.1} in the form
\begin{equation}\label{1.4}
x(t-a)=B^{-1}x'(t)-B^{-1}Ax(t+a)-B^{-1}Cx(t)-B^{-1}f(t)\;
\end{equation}
and making the change of variable $s:=t-a$, we obtain
\begin{equation}\label{1.5}
x(s)=B^{-1}x'(s+a)-B^{-1}Ax(t+2a)-B^{-1}Cx(s+a)-B^{-1}f(s+a)\,.
\end{equation}
Now by applying the method of steps to \eqref{1.5},
we also find the function $x(s)$ for all $s\leq -a$ starting from 
the initial function $\varphi=x|_{[-a,a]}$.

Suppose now that we have an arbitrary function $\varphi\in C^{\infty}[-a,a]$.
Let us try to consider $\varphi$ as an ``initial function''.
By applying the method of steps to this function, we construct a function
$x\in C^{\infty}[ma\,,(m+1)a]$, $m\in\mathbb{Z}$, satisfying  \eqref{1.1} only 
in each open interval  $(ma,(m+1)a)$, $m\in\mathbb{Z}$. 
But at the points $t=ka$, $k\in \mathbb{Z}\setminus{\{0\}}$,
 the function $x(t)$ and its derivatives can have jump discontinuities. 

\begin{theorem} \label{thm1.1}
Let $\varphi$ be an arbitrary function from $C^{\infty}[-a,a]$.
A solution $x(t)$ of equation \eqref{1.1} satisfying the initial condition
$x|_{[-a,a]}=\varphi$ and constructed by the step derivation method
belongs to $C^{\infty}(\mathbb{R})$
if and only if the following conditions are fulfilled
\begin{equation} \label{1.6}
\varphi^{(n+1)}(0)=A\varphi^{(n)}(a)+B\varphi^{(n)}(-a)
+C\varphi^{(n)}(0)+f^{(n)}(0)\,,\quad n=0,1,2,\ldots
\end{equation}
\end{theorem}

\begin{proof}
Necessity. Let $x\in C^{\infty}(\mathbb{R})$ be a solution to \eqref{1.1}
and $x(t)=\varphi(t)$ for $t\in[-a,a]$.
By taking the n-th derivative of  \eqref{1.1} and setting $t=0$, we obtain
\begin{equation}\label{1.7}
 x^{(n+1)}(0)=Ax^{(n)}(a)+Bx^{(n)}(-a)+Cx^{(n)}(0)+f^{(n)}(0)\,.
\end{equation}
As $x|_{[-a,a]}=\varphi$, equation \eqref{1.7} is exactly  \eqref{1.6}.

Sufficiency. Let us assume that \eqref{1.6} is true. 
From \eqref{1.3} it follows that
\begin{equation} \label{1.8}
\begin{aligned}
x^{(n)}(\tau)
&=A^{-1}x^{(n+1)}(\tau-a)-A^{-1}Bx^{(n)}(\tau-2a)\\
&\quad -A^{-1}Cx^{(n)}(\tau-a)-A^{-1}f^{(n)}(\tau-a)
\end{aligned}
\end{equation}
for arbitrary $n\in \mathbb{N}_0:=\mathbb{N}\cup\{0\}$. Thus,
\begin{equation} \label{1.9}
x^{(n)}(a^{+})=A^{-1}\varphi^{(n+1)}(0)-A^{-1}B\varphi^{(n)}(-a)
-A^{-1}C\varphi^{(n)}(0)-A^{-1}f^{(n)}(0)\,.
\end{equation}
where as usually
$ x^{(n)}(a^{\pm}) :=  \lim_{\varepsilon \to 0, \varepsilon > 0}
 x^{(n)}(a \pm \varepsilon) $.
Hence the equality $x^{(n)}(a^{+}) = \varphi^{(n)}(a) $ follows from
\eqref{1.9} and condition  \eqref{1.6}.
On the other hand,   $x^{(n)}(a^{-}) = \varphi^{(n)}(a) $ because $x$
coincides with $\varphi$ on $[-a, a]$.
Therefore, we see that  the continuity condition  $x^{(n)}(a^+)=x^{(n)}(a^-)$,
$n\in \mathbb{N}_0$, is valid.
Similarly with the help of  \eqref{1.5} the continuity of
$x^{(n)}(t)$ at $t=-a$ is established.

We finish the proof by induction on $k$. To be exact, let us assume that
for all $n\in \mathbb{N}_0$ the equality $x^{(n)}(ka^+)=x^{(n)}(ka^{-})$ is true
for $k=\pm1,\pm2,\ldots,\pm m$. Then according to  \eqref{1.8} 
for arbitrary $n\in \mathbb{N}_0$
we have
\begin{equation} \label{1.10}
\begin{aligned}
x^{(n)}((m+1)a^+)&=A^{-1}x^{(n+1)}(ma^+)-A^{-1}Bx^{(n)}((m-1)a^+)\\
                 &\quad -A^{-1}Cx^{(n)}(ma^+)-A^{-1}f^{(n)}(ma)\,.
\end{aligned}
\end{equation}
and
\begin{equation} \label{1.11}
\begin{aligned}
x^{(n)}((m+1)a^-)&=A^{-1}x^{(n+1)}(ma^-)-A^{-1}Bx^{(n)}((m-1)a^-)\\
                 &\quad -A^{-1}Cx^{(n)}(ma^-)-A^{-1}f^{(n)}(ma)\,.
\end{aligned}
\end{equation}
By the induction hypothesis $x^{(n+1)}(ma^+)=x^{(n+1)}(ma^{-})$,
$x^{(n)}(ma^+)=x^{(n)}(ma^{-})$ and
$x^{(n)}((m-1)a^+)=x^{(n)}((m-1)a^{-})$. Therefore we see that the right-hand sides
of  \eqref{1.10} and  \eqref{1.11} are equal.
It follows that $x^{(n)}((m+1)a^+)=x^{(n)}((m+1)a^-)$ for any
$n\in \mathbb{N}_0$ as well.
In the same manner by using the relation  \eqref{1.5} we establish
the validity of the equality
$x^{(n)}(-(m+1)a^-)=x^{(n)}(-(m+1)a^+)$ for any $n\in \mathbb{N}_0$.

Thus it is shown that for all $n\in \mathbb{N}_0$ the equality 
$x^{(n)}(ka^+)=x^{(n)}(ka^-)$ is also true for $k=\pm(m+1)$.
Consequently, by induction we infer that the equality 
$x^{(n)}(ka^+)=x^{(n)}(ka^-)$ is true for all $k\in \mathbb{Z}$.
So the function $x(t)$ and all its derivatives are continuous at the 
points $t=ka$, $k\in \mathbb{Z}$.
Hence $x\in C^{\infty}(\mathbb{R})$. The theorem is proved.
\end{proof}

\begin{remark}  \rm
 Let an initial function $\varphi\in C^{\infty}[-a,a]$
 generate by the step derivation method a function $x(t)$, $t\in \mathbb{R}$.
 It follows from the proof of Theorem~\ref{thm1.1} that
 if the function $x(t)$ is infinitely differentiable at the point $t=a$,
 then $x(t)$ is also necessarily infinitely differentiable at every point
 $t=\pm ma$, $m\in \mathbb{N}$, and hence $x\in C^{\infty}(\mathbb{R})$.
 Indeed, as is easily seen the condition of continuity
 $x^{(n)}(a^+)=x^{(n)}(a^-)\equiv \varphi^{(n)}(a)$, $n\in \mathbb{N}_0$,
 is equivalent with condition  \eqref{1.6} which is sufficient for $x=x(t)$, 
$t\in \mathbb{R}$, to be smooth. The same is true if we replace the point 
$t=a$ by $t=-a$.
\end{remark}

Actually Theorem~\ref{thm1.1} together with the preceding application 
of the step derivation method  is the existence and uniqueness theorem 
for equation  \eqref{1.1}. To be exact, the uniqueness means the following: 
any two solutions of \eqref{1.1} coinciding on the interval $[-a,a]$ are 
identically equal on the whole line. This uniqueness is an immediate consequence 
of the fact that the procedure of the step derivation method 
of constructing a solution to equation  \eqref{1.1} from an initial 
function is uniquely determined. In particular, if $f=0$ and $\varphi=0$, 
then a corresponding solution to equation \eqref{1.1} is also identically
 equal to zero.

Starting from the above reasoning we can now formulate and prove
the following general existence and uniqueness theorem.
Let us first introduce a linear operator $D_{a}$ acting on
$C^{\infty} (\mathbb{R},\mathbb{C}^{l})$ so that
\begin{equation*}
(D_{a}x)(t)=Ax(t+a)+Bx(t-a)+Cx(t)\,,\quad 
x\in C^{\infty} (\mathbb{R}\,,\mathbb{C}^{l})\,.
\end{equation*}
Then the differential difference equation  \eqref{1.1} is written as follows
\begin{equation}\label{1.12}
x'(t)=(D_{a}x)(t)+f(t)\,.
\end{equation}

\begin{theorem}[Existence and uniqueness] \label{thm1.2}
Consider an interval $I_{\tau}:=[\tau-a,\tau+a]$,
where $\tau\in \mathbb{R}$ is arbitrary and fixed.
Let a function $\psi\in C^{\infty} (I_{\tau},\mathbb{C}^{l})$.
Then the initial value problem
\begin{equation}\label{1.13}
 \begin{gathered}
 x'(t)=(D_{a}x)(t)+f(t)\,,\quad t\in \mathbb{R}\,,\\
x|_{[\tau-a,\tau+a]} =\psi
      \end{gathered}
\end{equation}
has a solution $x(t)\in C^{\infty} (\mathbb{R},\mathbb{C}^{l})$
if and only if the initial function $\psi$ satisfies the following conditions
\begin{equation}\label{1.14}
\psi^{(n+1)}(\tau)=(D_{a}\psi^{(n)})(\tau)+f^{(n)}(\tau)\,,\quad
 n\in \mathbb{N}_0\,.
\end{equation}
Such a solution is unique, that is, any two smooth solutions to  \eqref{1.12}
coinciding on an arbitrary (closed) interval of length $2a$
are equal on the whole line.
\end{theorem}

\begin{proof}
Necessity. Let $x\in C^{\infty}(\mathbb{R})$ satisfy  \eqref{1.13}.
Then by repeated differentiation of   \eqref{1.12} we obtain
\begin{equation}\label{1.15}
x^{(n+1)}(t)=(D_{a}x^{(n)})(t)+f^{(n)}(t)\,.
\end{equation}
As $x|_{I_{\tau}}=\psi$, evaluating  \eqref{1.15} at  $t=\tau$ gives \eqref{1.14}.

Sufficiency. Let $\varphi(t):=\psi(t+\tau)$, $t\in [-a,a]$, and
$f_{\tau}(t):=f(t+\tau)$, $t\in \mathbb{R}$.
Since $\psi$ satisfies  \eqref{1.14},
it is easily seen that $\varphi$ satisfies the condition
\begin{equation}\label{1.16}
\varphi^{(n+1)}(0)=(D_{a}\varphi^{(n)})(0)+f_{\tau}^{(n)}(0)\,,
\quad n\in \mathbb{N}_0\,.
\end{equation}
Hence by Theorem \ref{thm1.1} there exists a smooth solution $x(t)$, 
$t\in \mathbb{R}$, to the  initial value problem
\begin{equation}\label{1.17}
\begin{gathered}
 x'(t)=(D_{a}x)(t)+f_{\tau}(t)\\
x|_{[-a,a]}=\varphi\,.
      \end{gathered}
\end{equation}

\noindent
Now let $y(t):=x(t-\tau)$, where $t\in \mathbb{R}$.
Then we have
$$y'(t)=x'(t-\tau)=(D_{a}x)(t-\tau)+ f_{\tau}(t-\tau)= (D_{a}y)(t)+f(t) \,.$$
Thus the function $y=y(t)$ satisfies the equation $y'(t)=(D_{a}y)(t)+f(t)$.
And also
\begin{align*}
 y(t)|_{[\tau-a\,,\,\tau+a]}
&=x(t-\tau)|_{[\tau-a\,,\,\tau+a]}
=x(s)|_{[-a\,,\,+a]} \\
&=\varphi(s)|_{[-a\,,\,+a]}
=\psi(s+\tau)|_{[-a\,,\,+a]} \\
&= \psi(t)|_{[\tau-a\,,\,\tau+a]}\,. 
\end{align*}
That is, $y(t)|_{[\tau-a,\tau+a]}=\psi(t)$.
It follows that the function $x=y(t)$
 is a smooth solution to the initial value problem  \eqref{1.13}.

Uniqueness. Let $x=y(t)$ and $x=z(t)$ be two solutions of \eqref{1.12}
coinciding on the interval $[\tau-a,\tau+a]$.
Then $y_{\tau}(t):=y(t+\tau)$ and $z_{\tau}(t):=z(t+\tau)$
are also solutions to the equation
$x'(t)=(D_{a}x)(t)+f_{\tau}(t)$ already coinciding on the interval $[-a,a]$.
Consequently as indicated before  Theorem \ref{thm1.2}
$y_{\tau}(t)=z_{\tau}(t)$ for all $t\in \mathbb{R}$, that is,
$y(t+\tau)=z(t+\tau)$, $t\in \mathbb{R}$.
This proves that $y(t)=z(t)$ on the whole line.
\end{proof}

For $f=0$ system of equations \eqref{1.1} becomes a linear homogeneous system
\begin{equation}\label{1.18}
x'(t)=Ax(t+a)+Bx(t-a)+Cx(t)\,.
\end{equation}
In this particular case, which we will consider in the next section,
the assertions of Theorem \ref{thm1.2} have the  form

\begin{corollary}\label{cor1.1}
Let a function $\varphi\in C^{\infty} (I_{\tau},\mathbb{C}^{l})$,
where $I_{\tau}:=[\tau-a,\tau+a]$.
Then the initial-value problem
\begin{equation}\label{1.19}
\begin{gathered}
 x'(t)=(D_{a}x)(t)\,,\quad t\in \mathbb{R}\,,\\
x|_{[\tau-a,\tau+a]}=\varphi\\
      \end{gathered}
\end{equation}
has a solution $x(t)\in C^{\infty} (\mathbb{R},\mathbb{C}^{l})$
if and only if the initial function $\varphi$ satisfies the following conditions
\begin{equation}\label{1.20}
\varphi^{(n+1)}(\tau)=(D_{a}\varphi^{(n)})(\tau)\,,\quad n\in \mathbb{N}_0\,;
\end{equation}
that is,
\begin{equation}\label{1.21}
\varphi^{(n+1)}(\tau)=A\varphi^{(n)}(\tau+a)
+B\varphi^{(n)}(\tau-a)+C\varphi^{(n)}(\tau)\,,\quad n=0,1,2,\ldots
\end{equation}
Such a solution is unique in the sense that any two smooth solutions
to system of equations  \eqref{1.18} equal on an arbitrary (closed) interval
of length $2a$ coincide on the whole real line.
\end{corollary}

\section{Transformation group}

 In a linear space $\Phi=C^{\infty}({[-a,a]}\,,\mathbb{C}^{l})$
we define a denumerable system $\mathcal{N}$ of semi-norms
$\|\cdot\|_{m}$, $m\in \mathbb{N}_0$,
setting for $\varphi =(\varphi_{1},\ldots,\varphi_{l})\in \Phi$
\begin{gather}\label{2.1}
\|\varphi\|_0:=\max \{ |\varphi_{i}(x)|:
x\in [-a,a]\,,\;i=1,\ldots ,l\}\,, \\
\label{2.2}
\|\varphi\|_{m}:=\max \{ |\varphi_{i}^{(m)}(x)|:
x\in [-a,a]\,,\; i=1,\ldots,l\}\,,\quad m\geq 1\,.
\end{gather}

As $\|\varphi\|_0=0$ implies $\varphi=0$ for any $\varphi\in \Phi$,
this system $\mathcal{N}$ of semi-norms is separating.
Hence by definition \cite{Sch} a pair $(\Phi\,,\mathcal{N})$ is a countably
 normed space.
We recall that in $\Phi$ as in any countably normed space there is a 
convergence $\varphi_{n}\overset{\Phi}{\to}\varphi$ as 
$n\to \infty$
for the elements $\varphi_{n}\,,\varphi \in \Phi$ if
$\|\varphi_{n}-\varphi\|_{m}\to 0$ as $n\to \infty$ for all $m$.
As is easily seen in our case this convergence means a uniform convergence 
of the components
$(\varphi_n)_i=: \varphi_{n i}$, $\varphi_{i}$ $(i=1,\ldots ,l)$
and their derivatives of any order on the interval $[-a,a]$, i.e.,
\begin{equation*}
\varphi^{(m)}_{n\,i}(x)\underset{n\to \infty}{\to}
\varphi_{i}^{(m)}(x)\,,\quad i=1,\ldots ,l\,,
\end{equation*}
uniformly on $[-a,a]$ for each $m\in \mathbb{N}_0$.
Similarly a sequence of elements $\varphi_{n}\in \Phi$ is said to be fundamental 
if $\|\varphi_{n}-\varphi_{k}\|_{m}\to 0$ as $n,k\to \infty$ for all 
$m\in \mathbb{N}_0$.
This is equivalent to the assertion that every sequence of complex-valued functions
$\varphi^{(m)}_{n\,i}$ ($i=1,\ldots ,l$ and $m\in \mathbb{N}_0$) is fundamental
in $C[-a,a]$ endowed with the uniform norm.
Since $C[-a,a]$ is complete with respect to the uniform convergence,
every sequence $\varphi^{(m)}_{n\,i}$ ($n\in \mathbb{N}$) converges uniformly
on the interval $[-a,a]$ to a continuous function $\varphi_{i\,m}$.
Let $\varphi_i=: \varphi_{i\,0}$. Then applying successively for $m=1,2,\ldots$
the Theorem on Interchange of Limit and Derivative we conclude that 
$\varphi_{i\,m}=\varphi^{(m)}_{i}$.
Hence $\varphi_i\in C^{\infty}[-a,a]$ and so $\varphi\in \Phi$.
It follows from above that $\varphi_{n}\overset{\Phi}{\to}\varphi$.
Consequently the countably normed space $\Phi$ is complete.

Note that in old literature (see for instance \cite{gs}) in the definition 
of countably normed spaces a family $\mathcal{N}$ was required to be a 
countable family of norms rather than seminorms.
Nowadays it is sufficient for $\mathcal{N}$ to be only a countable separating 
collection of seminorms.
However, in our case we can replace the family of seminorms $\mathcal{N}$
by an ``equivalent'' family $\mathcal{N'}$ consisting of norms
$\|\cdot\|'_{m}:=\sum_{k=0}^{m}\|\cdot\|_{k}$, $m\in \mathbb{N}_0$.
A pair $(\Phi\,,\mathcal{N'})$ will be a countably normed space
in accordance with the old definition.
At the same time as is easily checked the convergence in
$(\Phi\,,\mathcal{N}')$  coincides with the one defined above in
$(\Phi\,,\mathcal{N})$, i.e., the systems $\mathcal{N}$ and
$\mathcal{N'}$ are really equivalent.

So according to the modern definition a countably normed space is just
a locally convex topological linear space
in which a separating collection of seminorms is countable.
It is to be noted that  complete countably normed spaces are
a natural example of an $F$-- space (i.e., a Fr{\'e}chet space).
In $F$-- spaces some important results of the Banach space theory remain true.
For example, Banach's Theorem on Inverse Operator
(also called Banach's Theorem on Isomorphism) is valid in $F$-- spaces,
and hence in complete countably normed spaces \cite{M} as well.
We will need this theorem in this section below.

We define in the complete countably normed space $\Phi$ a subset
\begin{equation*}\label{2.3}
\mathfrak{I}:=\{ \varphi\in \Phi: \varphi^{(m+1)}(0)
=A\varphi^{(m)}(a)+B\varphi^{(m)}(-a)+C\varphi^{(m)}(0)\,,
\; m\in \mathbb{N}_0 \}\,.
\end{equation*}

It is easily seen that $\mathfrak{I}$ is a linear closed subspace of $\Phi$.
Here the closure of $\mathfrak{I}$ follows from the fact that if
$\varphi_{n}\in \mathfrak{I}$ and
$\varphi_{n}\overset{\Phi}{\to}\varphi$ as $n\to\infty$,
then for every $m\in \mathbb{N}_0 $ the relation
\begin{equation*}
\varphi^{(m+1)}_{n}(0)=A\varphi^{(m)}_{n}(a)
+B\varphi^{(m)}_{n}(-a)+C\varphi^{(m)}_{n}(0)
\end{equation*}
also holds for the limit function $\varphi$.
We call $\mathfrak{I}$ an initial space.
Note that the initial space $\mathfrak{I}$ is not empty
because it contains all  functions from $C^{\infty}[-a,a]$
identically equal to zero in some neighborhoods of the points
$t=\pm a$ and $t=0$.
Note that $\mathfrak{I}$, being a closed subspace of $\Phi$,
is a complete countably normed space as well.

By Corollary \ref{cor1.1} if $\varphi\in \mathfrak{I}$,
then there exists a unique solution $x(t)$, $t\in \mathbb{R}$,
to the initial-value problem
\begin{equation}\label{2.4}
 \begin{gathered}
 x'(t)=(D_{a}x)(t)\,,\quad t\in \mathbb{R}\,,\\
x|_{[-a\,,+a]}=\varphi \,.\\
      \end{gathered}
\end{equation}

Let  $x=x(t)$ satisfy  \eqref{2.4} with $\varphi\in \mathfrak{I}$.
For every fixed $t\in \mathbb{R}$ we define a function
$\varphi_{t}=\varphi_{t}(\theta):=x(\theta+t)$ of a variable $\theta\in [-a,a]$.

\begin{proposition} \label{prop3.1}
For all $t\in \mathbb{R}$ the function $\varphi_{t}\in \mathfrak{I}$.
\end{proposition}

\begin{proof}
By differentiating repeatedly $m$ times, $m\in \mathbb{N}_0$, the equation
\begin{equation*}
x'(\tau)=Ax(\tau+a)+Bx(\tau-a)+Cx(\tau)\,, \quad \tau\in \mathbb{R}\,,
\end{equation*}
and putting $\tau=\theta+t$, we obtain
\begin{equation*}
x^{(m+1)}(\theta+t)=Ax^{(m)}(\theta+t+a)+Bx^{(m)}(\theta+t-a)+Cx^{(m)}(\theta+t)\,;
\end{equation*}
that is,
\begin{equation*}
\varphi_{t}^{(m+1)}(\theta)=A\varphi_{t}^{(m)}(\theta+a)
+B\varphi_{t}^{(m)}(\theta-a)+C\varphi_{t}^{(m)}(\theta)\,.
\end{equation*}
In particular, for $\theta=0$ we have the relation
\begin{equation*}
\varphi_{t}^{(m+1)}(0)=A\varphi_{t}^{(m)}(a)+B\varphi_{t}^{(m)}(a)
+C\varphi_{t}^{(m)}(0)\,.
\end{equation*}
According to the definition of the subspace $\mathfrak{I}$
the function $\varphi_{t}\in \mathfrak{I}$.
\end{proof}

Let $T(t):\mathfrak{I}\to \mathfrak{I}$, $t\in \mathbb{R}$,
be a linear map such that $T(t)\varphi:=\varphi_{t}$ for
$\varphi\in \mathfrak{I}$, i.e., $(T(t)\varphi)(\theta)=x(\theta+t)$.
It will be shown below that the map $T(t)$, $t\in \mathbb{R}$,
is a one--parameter group of linear transformations
of the complete countably normed space $\mathfrak{I}$.
To be exact, the following relations hold:
\begin{itemize}
\item[(1)] $T(0)=I$ (the identity operator),

\item[(2)] $T(t_1+t_2)=T(t_1)\, T(t_2)$ for all $t_1,t_2\in \mathbb{R}$.
\end{itemize}
Obviously only the group relation (2) needs to be checked.
According to Corollary \ref{cor1.1} a one-to-one mapping
$X:\mathfrak{I}\to C^{\infty} (\mathbb{R}\,,\mathbb{C}^{l})$
is defined so that for $\varphi\in \mathfrak{I}$
we have $(X\varphi)(t)=x(t)$, where $x(t)$ is a unique solution
of the initial value problem  \eqref{2.4}.
We call $X $ a resolution operator.
By using the resolution operator the action of the operator
$T(t)$ on an arbitrary function $\varphi \in \mathfrak{I}$
is written as follows
\begin{equation}\label{2.5}
(T(t)\varphi)(\theta)=(X\varphi)(\theta+t)\,.
\end{equation}

\begin{lemma} \label{lem3.1}
Let $z=z(t)$ be an arbitrary solution of the homogeneous equation
$x'(t)=(D_{a}x)(t)$. For an arbitrary but fixed $\tau\in \mathbb{R}$
let us consider an initial function $\varphi_{\tau}(\theta):=z(\theta+\tau)$.
Then a function $y(t)=z(t+\tau)$ will be a unique solution to the 
initial-value problem
\begin{equation}\label{2.6}
\begin{gathered}
 x'(t)=(D_{a}x)(t)\,,\quad t\in\mathbb{R}\,,\\
x(\theta)|_{[-a,+a]}=\varphi_{\tau}(\theta)\,.
      \end{gathered}
\end{equation}
In other words, if $z'(t)=(D_{a}z)(t)$,  then
\begin{equation}\label{2.7}
(Xz(\theta+\tau))(t)=z(t+\tau)\,.
\end{equation}
\end{lemma}

\begin{proof}
It is clear that the function $y(t)$ is a solution to  \eqref{2.6}.
But according to Corollary  \eqref{cor1.1} problem  \eqref{2.6} 
has only one solution.
\end{proof}

\begin{theorem} \label{thm3.0}
The group relation $T(t_1+t_2)=T(t_1) T(t_2)$ holds for all $t_1,t_2\in \mathbb{R}$.
\end{theorem}

\begin{proof}
By definition $(T(\tau)\varphi)(\theta)=x(\theta+\tau)$,
where $x=x(t)$ is a solution of  \eqref{2.4}.
Consequently, $(T(t_1+t_2)\varphi)(\theta)=x(\theta+t_1+t_2)$ and
$(T(t_1)\varphi)(\theta)=x(\theta+t_1)=:\varphi_{t_1}(\theta)$.
Then
\begin{equation*}
(T(t_2) T(t_1))\varphi)(\theta)=(T(t_2)\varphi_{t_1})(\theta)
=(T(t_2)x(\theta+t_1))(\theta)\,.
\end{equation*}
In view of  \eqref{2.5} and  \eqref{2.7} we have
\begin{equation*}
(T(t_2)x(\theta+t_1))(\theta)
=(Xx(\theta+t_1))(t)\mid_{\theta+t_2}
=x(t+t_1)\mid_{\theta+t_2}=x(\theta+t_2+t_1)\,.
\end{equation*}
It follow that $(T(t_2)\, T(t_1)\varphi)(\theta)
=x(\theta+t_2+t_1)=(T(t_1+t_2)\varphi)(\theta)$;
 that is, $T(t_2)\, T(t_1)\varphi=T(t_1+t_2)\varphi$
for all $\varphi \in \mathfrak{I}$ as required to be proved.
\end{proof}

\begin{theorem} \label{thm3.1}
The operator-valued function $T=T(t)$ is strongly differentiable with respect to $t$
and for any $\varphi \in \mathfrak{I}$ we have
\begin{equation}\label{2.8}
\frac{d}{dt}(T(t)\varphi(\theta))=T(t)\varphi'(\theta)\,.
\end{equation}
\end{theorem}

\begin{proof}
We first check formula \eqref{2.8} for $t=0$.
It is necessary to show that there exists a limit
\begin{equation}\label{2.9}
\Phi-\lim_{t\to 0}\frac{T(t)\varphi(\theta)-\varphi(\theta)}{t}=\varphi'(\theta)\,,
\end{equation}
i.e., we need to verify that for every $ m\in \mathbb{N}_0 $
\begin{equation*}
\| \frac{T(t)\varphi(\theta)-\varphi(\theta)}{t}-
\varphi'(\theta)\|_{m}\to 0 \quad
\text{as } t\to 0\,.
\end{equation*}
By  \eqref{2.1} and  \eqref{2.2} we have
\begin{align*}
\| \frac{T(t)\varphi(\theta)-\varphi(\theta)}{t}- \varphi'(\theta)\|_{m}
&=\| \frac{x(\theta+t)-x(\theta)}{t}- x'(\theta)\|_{m}\\
&=\| \frac{x^{(m)}(\theta+t)-x^{(m)}(\theta)}{t}- x^{(m+1)}(\theta)\|_0\,.
\end{align*}
For each $i=1,\ldots , l$ by the Mean Value  Theorem for some
$\delta_{i}\in(0\,,1)$ (depending on $t$ an $\theta$)
we have
\[
\frac{x_{i}^{(m)}(\theta+t)-x_{i}^{(m)}(\theta)}{t}- x_{i}^{(m+1)}
(\theta)=x_{i}^{(m+1)}(\theta+\delta_{i} t)-x_{i}^{(m+1)}(\theta)\,.
\]
Let $|t|< a$, then $\theta+\delta_{i} t\in [-2a,2a]$.
Therefore,
\begin{align*}
&\| \frac{T(t)\varphi(\theta)-\varphi(\theta)}{t}-
\varphi'(\theta)\|_{m} \\
&=\max_{\theta\in[-a\,,\,a]\,,\,i=1,\ldots ,l}
|x_{i}^{(m+1)}(\theta+\delta_{i}t)-x_{i}^{(m+1)}(\theta)| \\
&\leq \max\{ |x_{i}^{(m+1)}(\tilde{\theta})-x_{i}^{(m+1)}(\theta)|:
\theta,\tilde{\theta}\in[-2a\,,2a]\,,\, |\theta-\tilde{\theta}|< |t|\,,
\,i=1,\ldots , l\}\,.
\end{align*}
The above expression tends to zero as $t\to 0$
by the uniform continuity of the functions $x_{i}^{(m+1)}$
on the closed interval $[-2a,2a]$.

Now to prove \eqref{2.8} for $t\neq 0$ we use the group property of $T(t)$:
\begin{equation*}
\frac{T(t+\delta)\varphi(\theta)-T(t)\varphi(\theta)}{\delta}=
\frac{T(\delta)\varphi_{t}(\theta)-\varphi_{t}(\theta)}{\delta}\,.
\end{equation*}
Since $\varphi_{t}\in \mathfrak{I}$, by applying the already considered 
case of $t=0$, we obtain
\begin{equation*}
\Phi-\lim_{\delta\to 0}\frac{T(\delta )\varphi_{t}(\theta)
-\varphi_{t}(\theta)}{\delta}
= \varphi'_{t}(\theta)=x'(\theta+t)=T(t)\varphi'(\theta)\,.
\end{equation*}
Note that the last equality $x'(\theta+t)=T(t)\varphi'(\theta)$ is true because,
as it follows from the definition of $\mathfrak{I}$, the function $\varphi'$
also belongs to the space $\mathfrak{I}$, and $y(t):=x'(t)$
obviously satisfies  \eqref{2.4} with $\varphi'$ in place of $\varphi$.
The proof is complete.
\end{proof}

\begin{corollary} \label{cor3.1}
The group $T(t)$ is strongly continuous, i.e.,
\begin{equation*}
\Phi-\lim_{t\to t_0}T(t)\varphi=T(t_0)\varphi
\end{equation*}
for all $\varphi\in \mathfrak{I}$ and any $t_0\in \mathbb{R}$.
\end{corollary}

\begin{proof}
In $\mathfrak{I}$ like in any countably normed space
strong differentiability implies strong continuity \cite{gs}.
\end{proof}

\begin{corollary} \label{cor3.2}
A differentiation operator
$D:\mathfrak{I}\to \mathfrak{I}$ such that
$D\varphi(\theta)=\varphi'(\theta)$, $\varphi\in \mathfrak{I}$,
is the generator of the transformation group $T(t)$.
The operator $D$ is continuous.
\end{corollary}

\begin{proof}
Since  $\varphi$ and its derivative $\varphi'$ belong to the space
$\mathfrak{I}$, the operator $D$ maps $\mathfrak{I}$ to $\mathfrak{I}$.
Consequently, by \eqref{2.9} the operator $D$ is the generator of the group $T(t)$.
Further, $\|D\varphi\|_{m}=\|\varphi'\|_{m}=\|\varphi\|_{m+1}$.
So, if
$\varphi_{n}\overset{\Phi}{\to}0$ as $n\to \infty$,
i.e.,
$\|\varphi_{n}\|_{k}\to 0$ as $n\to \infty$ for every $k\in\mathbb{N}_0$,
then
$\|D\varphi_{n}\|_{m}=\|\varphi_{n}\|_{m+1}\to 0$ as $n\to \infty$
for every $m\in\mathbb{N}_0$ as well.
This proves the continuity of the operator $D$.
\end{proof}

\begin{theorem} \label{thm3.2}
The point spectrum $\sigma_{p}(D)$ of the differentiation operator
\noindent
$D:\mathfrak{I}\to \mathfrak{I}$ coincides with the set
$\Lambda=\{ \lambda\in\mathbb{C}: 
\det (e^{\lambda a}A+e^{-\lambda a}B+C-\lambda I)=0 \}$.
If $\lambda\in \Lambda$, the corresponding  eigenfunctions $x_{\lambda}$ 
are of the form
$x_{\lambda}(\theta)=e^{\lambda \theta}H_{\lambda}$, $\theta\in [-a,a]$,
where the vectors $H_{\lambda}\in \mathbb{C}^{l}$ depending on $\lambda$
are solutions of the homogeneous system of linear equations
\begin{equation}\label{2.10}
(e^{\lambda a}A+e^{-\lambda a}B+C-\lambda I)H=0\,.
\end{equation}
\end{theorem}

\begin{proof}
A solution of the eigenvalue-eigenvector problem $Dx=\lambda x$, that is,
$x'(\theta)=\lambda x(\theta)$ is a smooth vector-valued function
$x_{\lambda}(\theta)=e^{\lambda \theta}H$ with $H\in \mathbb{C}^{l}$.
And $x_{\lambda}\in \mathfrak{I}$
if and only if
\begin{equation*}
x^{(k+1)}_{\lambda}(0)=Ax^{(k)}_{\lambda}(a)
+Bx^{(k)}_{\lambda}(-a)+Cx^{(k)}_{\lambda}(0)\,, \quad k\in\mathbb{N}_0\,,
\end{equation*}
that is, if and only if in the case of $\lambda\neq 0$,
\begin{equation*}
\lambda^{k+1}H=(e^{\lambda a}A+e^{-\lambda a}B+C)\lambda^{k}H \,,
\quad k\in\mathbb{N}_0\,,
\end{equation*}
or equivalently
\begin{equation*}
\lambda H=(e^{\lambda a}A+e^{-\lambda a}B+C)H\,,
\end{equation*}
which is  \eqref{2.10}.
The case $\lambda= 0$ is straight for work as the last equation
like the previous one holds for $\lambda=0$ as well.

It is easily seen that if $H_{1},\ldots ,H_{k}$ $(1\leq k\leq l)$
form a basis of solutions of  \eqref{2.10}, then the eigenspace  
for $\lambda$ has the form 
$\mathcal{E}_{\lambda}=gen\,{(e^{\lambda \theta}H_{1}, \ldots ,
e^{\lambda \theta}H_{k})}$
and hence $\dim \mathcal{E}_{\lambda}=k$.
\end{proof}

 Let us show that the spectrum of $D$ is a purely point one.

\begin{lemma} \label{lem3.2}
 Let a function $x=x(\theta)$, $\theta\in [-a,a]$, be a solution of the equation
\begin{equation*}
x'(\theta)=\lambda x(\theta)+f(\theta)\,,
\end{equation*}
where $\lambda \in\mathbb{C}$ and $f\in \mathfrak{I}$.
If the function $x=x(\theta)$ satisfies the condition
\begin{equation}\label{2.11}
x'(0)=Ax(a)+Bx(-a)+Cx(0)\,,
\end{equation}
then $x\in \mathfrak{I}$, i.e., the following equality
\begin{equation}\label{2.12}
x^{(k+1)}(0)- \Big( Ax^{(k)}(a)+Bx^{(k)}(-a)+Cx^{(k)}(0)\Big)=0
\end{equation}
holds for all $k\in \mathbb{N}_0$.
\end{lemma}

\begin{proof}
Since $f\in C^{\infty}[-a,a]$, the function $x\in C^{\infty}[-a,a]$ as well.
We prove the assertion by induction. By  \eqref{2.11} the condition \eqref{2.12} 
is obviously true for $k=0$.
Assume it to be true for $k=n-1$ with some arbitrary $n> 0$. 
We need to establish  \eqref{2.12} for $k=n$.
By differentiating the equation $x'(\theta)=\lambda x(\theta)+f(\theta)$ 
repeatedly $n$ times, we find that
\begin{gather*}
x^{(n+1)}(0)=\lambda x^{(n)}(0)+f^{(n)}(0)\,, \\
x^{(n)}(\pm a)=\lambda x^{(n-1)}(\pm a)+f^{(n-1)}(\pm a)\,.
\end{gather*}
Consequently,
\begin{align*}
&x^{(n+1)}(0)-\bigl(Ax^{(n)}(a)+Bx^{(n)}(-a)+Cx^{(n)}(0)\bigr) \\
&=\bigl(\lambda x^{(n)}(0)+f^{(n)}(0)\bigr)
-A\bigl(\lambda x^{(n-1)}(a)+f^{(n-1)}(a)\bigr) \\
&\quad -B\bigl(\lambda x^{(n-1)}(-a)+f^{(n-1)}(-a)\bigr)
-C\bigl(\lambda x^{(n-1)}(0)+f^{(n-1)}(0)\bigr) \\
&=\lambda\bigl(x^{(n)}(0)-\bigl(Ax^{(n-1)}(a)
 +Bx^{(n-1)}(-a)+Cx^{(n-1)}(0)\bigr)\bigr) \\
&\quad +\bigl(f^{(n)}(0)-\bigl(Af^{(n-1)}(a)
 +Bf^{(n-1)}(-a)+Cf^{(n-1)}(0)\bigr)\bigr)=0
\end{align*}
by the induction hypothesis and the condition $f\in \mathfrak{I}$.
\end{proof}

Now suppose that $\lambda \not \in\sigma_{p}(D)=\Lambda$.
Let us compute the resolvent $R(\lambda)=(D-\lambda I)^{-1}$ of the operator $D$.
Given $f\in \mathfrak{I}$,
the solution of the inhomogeneous equation $Dx=\lambda x+f$, i.e.,
of the differential equation $x'(\theta)=\lambda x(\theta)+f(\theta)$ is given by
\begin{equation}\label{2.13}
x(\theta)=e^{\lambda \theta}\, H_{f}
 +e^{\lambda \theta}\int_0^{\theta}e^{-\lambda \tau}f(\tau)\, d\tau
\end{equation}
with an arbitrary vector $ H_{f}\in \mathbb{C}^{l}$.
By Lemma \ref{lem3.2}  for the solution $x=x(\theta)$ to belong to 
the space $\mathfrak{I}$ it is sufficient to satisfy the condition  \eqref{2.11} 
by choosing the vector $H_{f}$.
From  \eqref{2.13} we find that $x'(0)=\lambda H_{f}+f(0)$.
Substituting this expression for $x'(0)$ and the expressions for
$x(\theta)|_{\pm a}$ also obtained from  \eqref{2.13} in  \eqref{2.11}
we obtain
\begin{equation} \label{2.14}
\begin{aligned}
&(\lambda I -e^{\lambda a}A-e^{-\lambda a}B-C)H_{f} \\
&=-f(0)+e^{\lambda a}A\int_0^{a}e^{-\lambda \tau}f(\tau)\,d\tau
+e^{-\lambda a}B\int_0^{-a}e^{-\lambda \tau}f(\tau)\, d\tau \,.
\end{aligned}
\end{equation}
Since $\lambda \not\in \Lambda =\sigma_{p}(D)$,
the determinant $\det\,(\lambda I - e^{\lambda a}A-e^{-\lambda a}B-C)\neq 0$.
Hence the matrix $(\lambda I - e^{\lambda a}A-e^{-\lambda a}B-C)$ is invertible.
It follows from  \eqref{2.14} that
\begin{equation} \label{2.15}
\begin{aligned}
H_{f}&=(e^{\lambda a}A+e^{-\lambda a}B+C-\lambda I)^{-1}(f(0)\\
&\quad -e^{\lambda a}A\int_0^{a}e^{-\lambda \tau}f(\tau )\, d\tau
 -e^{-\lambda a}B\int_0^{-a}e^{-\lambda \tau}f(\tau )\, d\tau)\,.
\end{aligned}
\end{equation}
Consequently, by  \eqref{2.15} and  \eqref{2.13} for
$\lambda \not \in\sigma_{p}(D)=\Lambda$,
the resolvent
\begin{equation}\label{2.16}
R(\lambda)f(\theta)=e^{\lambda \theta}\, H_{f} +
e^{\lambda \theta}\int_0^{\theta}e^{-\lambda \tau}f(\tau)\, d\tau
\end{equation}
is defined on the whole space $\mathfrak{I}$.
It follows that for $\lambda \not \in\sigma_{p}(D)$ the bounded operator
$D-\lambda I:\mathfrak{I}\to \mathfrak{I}$
is a bijection of the complete countably normed space $\mathfrak{I}$.
By Banach's Theorem on Inverse Operator \cite{M,Y}
(which is also valid in $F$-- spaces and hence, in particular,
in complete countably normed spaces)
the inverse operator
$R(\lambda):\mathfrak{I}\to \mathfrak{I}$
is continuous as well.
The function $P(\lambda):=\det (e^{\lambda a}A+e^{-\lambda a}B+C-\lambda I)$,
$\lambda \in\mathbb{C}$, is a quasi-polynomial.
As is known \cite{BC} quasi-polynomials like
$P(\lambda)$ have  in $\mathbb{C}$ a countable set of isolated zeros.
Since the set $\sigma_{p}(D)=\Lambda$ coincides with the zeros of $P(\lambda)$,
we conclude that the point spectrum of the operator $D$
consists of a countable number of isolated eigenvalues of finite
multiplicity $k\leq l$.
So the following theorem on the spectrum of the generator $D$ is proved:

\begin{theorem} \label{thm3.3}
The spectrum of the linear operator $D:\mathfrak{I}\to \mathfrak{I}$
is a purely point one and consists of a countable number of
isolated eigenvalues of finite multiplicity $k\leq l$.
For $\lambda \not \in\sigma_{p}(D)$ the resolvent $R(\lambda)$ of $D$
is given by formulas  \eqref{2.16} and  \eqref{2.15}.
\end{theorem}

\begin{remark}  \rm
The continuity of $R(\lambda)$ can be proved without applying Banach's 
Theorem on Inverse Operator.
Indeed, let us denote by $K$ various constants independent of $f$.
From  \eqref{2.15} it follows that $\|H_{f}\|\leq K\|f\|_0$,
where the norm $\|H_{f}\|$ of the vector $H_{f}=(h_{1}, 
\ldots,h_{l})\in \mathbb{C}^{l}$
is defined to be $\max\{|h_{i}|:i=1,\ldots, l\}$.
Hence for the first term on the right-hand side of  \eqref{2.16} we have
$\| e^{\lambda \theta}H_{f}\|_{m}\leq K\|f\|_0$.
Further for the second term in \eqref{2.16}
\begin{equation*}
e^{\lambda \theta}\int_0^{\theta}e^{-\lambda \tau}f(\tau)\, d\tau=:g(\theta)
\end{equation*}
we find that $\|g\|_0\leq K\|f\|_0$.
As $g'(\theta)=\lambda g(\theta)+f(\theta)$ and
$g''(\theta)={\lambda}^{2} g(\theta)+\lambda f(\theta)+f'(\theta)$,
it is shown by induction that for any $m\in \mathbb{N}$
the following relation holds
\begin{equation*}
g^{(m)}(\theta)={\lambda}^{m}g(\theta)
+\sum_{j=0}^{m-1}{\lambda}^{m-1-j}f^{(j)}(\theta)\,.
\end{equation*}
Whence,
\begin{equation*}
\|g\|_{m}=\|g^{(m)}\|_0\,\leq \,|\lambda|^{m}\|g\|_0\,+\,
\sum_{j=0}^{m-1}{|\lambda|}^{m-1-j}\|f\|_{j}\,.
\end{equation*}
That is,
\begin{equation*}
\|g\|_{m}\leq\, K\sum_{j=0}^{m-1}\|f\|_{j}\,, \quad  m\in \mathbb{N}\,.
\end{equation*}
Consequently, for $m\in \mathbb{N}_0$ we have
\begin{equation*}
\|R_{\lambda}f\|_{m}\leq \|e^{\lambda \theta}H_{f}\|_{m}+\|g\|_{m}
\leq K \sum_{j=0}^{m}\|f\|_{j}\,.
\end{equation*}
So for any $m\in \mathbb{N}_0$ there exists  $K=K_{m}$ independent of $f$ such that
\begin{equation*}
\|R_{\lambda}f\|_{m}\leq K \sum_{j=0}^{m}\|f\|_{j}\,.
\end{equation*}
This settles the continuity of the operator $R(\lambda)=(D-\lambda I)^{-1}$.
\end{remark}

It should be noted that the initial space $\mathfrak{I}$ is not exhausted
by finite linear combinations of the eigenfunctions of the operator $D$.
In other words, the set of all eigenfunctions of $D$ is not a Hamel basis 
\cite{ns} for $\mathfrak{I}$.
Indeed the functions from $\Phi=C^{\infty}[-a,a]$
identically equal to zero in some neighborhoods of the points $t=\pm a$ 
and $t=0$ lie in $\mathfrak{I}$.
But any of these functions is not analytical and hence
it can not be a finite linear combination of the eigenfunctions
$x_{\lambda}(\theta)=e^{\lambda \theta}H_{\lambda}$.

A direct substitution and a simple calculation show that functions of 
exponential form
$z_{\lambda}(t)=e^{\lambda t}H$, $t \in \mathbb{R}$,
where $\lambda\in \mathbb{C}$ and $H\in \mathbb{C}^{l}$,
are solutions of the homogeneous differential difference equation \eqref{1.18},
i.e., the equation $x'(t)=Ax(t+a)+Bx(t-a)+Cx(t)$,
if and only if $\lambda\in \Lambda$ and $H=H_{\lambda}$.
In other words, the function $z_{\lambda}(t)$,
$t \in \mathbb{R}$, satisfies  \eqref{1.18} if and only if
its corresponding initial function $z_{\lambda}(\theta)$, $\theta\in [-a,a]$,
coincides with the eigenfunction $x_{\lambda}(\theta)=e^{\lambda \theta}H_{\lambda}$ of the operator $D$.
It follows that the group $T(t)$
acts on the eigenfunction $x_{\lambda}(\theta)$ as follows
\begin{align*}
T(t)x_{\lambda}(\theta)
&=T(t)z_{\lambda}(\theta)=z_{\lambda}(\theta+t) \\
&=e^{\lambda (\theta+t)}H_{\lambda}
= e^{\lambda t}(e^{\lambda \theta}H_{\lambda}) \\
&=e^{\lambda t}x_{\lambda}(\theta)\,.
\end{align*}
Consequently we have $T(t)x_{\lambda}(\theta)=e^{\lambda t}x_{\lambda}(\theta)$.
Thus the eigenfunctions $x_{\lambda}(\theta)$ of
the generator $D$ corresponding to the eigenvalue $\lambda$
are as well the eigenfunctions of the operator $T(t)$ corresponding to 
the eigenvalue $e^{\lambda t}$.
It is well known that in view of spectral mapping theorems this assertion 
is valid in Banach spaces \cite{EN}.

So, between the exponential solutions $z_{\lambda}(t)$, $t \in \mathbb{R}$,
of equation \eqref{1.18} and the eigenfunctions $x_{\lambda}(\theta)$
of the operator $D$ there is a one-to-one correspondence.
But as noted above the initial space
$\mathfrak{I}$ is not exhausted by the finite linear combinations 
of the eigenfunctions
$x_{\lambda}(\theta)$, where  $\lambda\in \Lambda=\sigma_{p}(D)$.
Hence the set of solutions of equation  \eqref{1.18} also contains
a large collection of functions other than a countable set
of the exponential functions
$z_{\lambda}(t)=e^{\lambda t}H_{\lambda}$, $\lambda\in \Lambda$,
and their finite linear combinations.

\section{Retarded differential-difference equations \\ on the whole real line}

On putting in  \eqref{1.1} $A=0$ we obtain a system of linear retarded 
differential difference equations of the form
\begin{equation}\label{3.1}
x'(t)=Bx(t-a)+Cx(t)+f(t)\,, \quad t \in \mathbb{R}\,.
\end{equation}

It is well known \cite{JH} that if $f\in C(\mathbb{R})$,
and $\psi \in C[-a\,,0]$ are given functions,
then there is a unique continuous function $x=x(t)$ (depending on $f$ and $\psi$)
defined on $[-a\,,+\infty)$ which coincides with $\psi$ on $[-a\,,0]$ and
satisfies equation \eqref{3.1} for $t\geq 0$ (with $x'(0)=x'(0^{+})$ 
equal to the right-hand derivative).
This solution $x=x(t)$ is explicitly calculated by the method of steps \cite{BC,JH}.
As in the proof of Theorem~\ref{thm1.1} it is easily established that
this solution $x=x(t)$ has a continuous derivative at $t=0$ if only if
the initial function $\psi$ has a derivative at the point $t=0$ with
\begin{equation}\label{3.2}
\psi'(0)=B\psi(-a)+C\psi(0)+f(0)\,.
\end{equation}

Indeed, since $x \in C[-a\,,+\infty)$, and according to  \eqref{3.1},
 $x'(0^{+})=Bx(-a)+Cx(0)+f(0)$; that is, $x'(0^{+})=B\psi(-a)+C\psi(0)+f(0)$. 
And there must exist $x'(0^{-})=\varphi'(0)$.
Hence $x'(0^{+})=x'(0^{-})$ if and only if relation  \eqref{3.2} is valid.
To extend the solution $x(t)$ further to the left, as noted in \cite{JH},
requires more smoothness of $f$ and $\psi$ and additional boundary 
conditions similar to condition \eqref{3.2}.

As in Section 2 let $B,C$ be square complex $l$ by $l$ matrices with $B$ invertible, 
$a>0$ be fixed, and $f\in C^{\infty} (\mathbb{R}\,,{\mathbb{C}}^{l})$.
We are interested in solving the following problem: What conditions should 
the function $\psi$ satisfy for the solution $x=x(t)$ of \eqref{3.1} 
to be extendible to the interval $(-\infty,0]$; that is,
to be defined and to satisfy  \eqref{3.1} on the whole real line $\mathbb{R}$?
Proposition \ref{prop1.1} clearly remains valid in the case of equation \eqref{3.1},
therefore if $f\in C^{\infty} (\mathbb{R})$ and a function $x\in AC(\mathbb{R})$
satisfies equation \eqref{3.1} for almost all $t\in \mathbb{R}$, then
$x\in C^{\infty} (\mathbb{R})$ and equation \eqref{3.1} is fulfilled 
for all $t\in \mathbb{R}$.
So in what follows we consider only smooth solutions 
$x\in C^{\infty} (\mathbb{R})$ of  \eqref{3.1}
defined on the whole real line $\mathbb{R}$.

Let $x$ be such a smooth solution. We define $\psi:=x|_{[-a\,,\,0]}$
to be a corresponding initial function which is necessarily smooth,
i.e., $\psi\in C^{\infty}{[-a,0]}$.
As in  Section 2  we will first show that if we know the initial function $\psi$
then combining the step integration method and the step derivation method
we can restore the solution $x(t)$ on the whole real line $\mathbb{R}$.
The cases $t> 0$ and $t<-a$ should be treated separately.
For $t> 0$ we apply the step integration method \cite{JH} and for 
$t<-a$ the step derivation method.
The corresponding formulas will be used later in proving the main theorem 
of this section.

Let $t> 0$. We rewrite equation \eqref{3.1} as follows
\begin{equation}\label{3.02}
x'(t)-Cx(t)=g(t)\;, \quad t>0\;,
\end{equation}
with $g(t):=Bx(t-a)+f(t)$. The solution of the linear inhomogeneous 
system  \eqref{3.02}
of differential equations of the first order with $x(0)=\psi(0)$ 
is given by the variation-of-constants formula
\begin{equation}\label{3.3}
x(t)=e^{Ct} \psi(0) +\int_0^{t}e^{C(t-s)}[Bx(s-a)+f(s)]\, ds\,.
\end{equation}
Now using the step integration method we will find $x(t)$ for all $t>0$.
Indeed, if $t\in [0\,,a]$, then $s-a \in [-a\,,0]$, and so according 
to  \eqref{3.3}
\begin{equation}\label{3.4}
x(t)=e^{Ct}\, \psi(0) +\int_0^{t}e^{C(t-s)}[B\psi(s-a)+f(s)]\, ds\,.
\end{equation}
Thus formula \eqref{3.4} restores uniquely $x(t)$ on the interval $[0,a]$.
We proceed by induction. Suppose that for arbitrary and fixed $m\in \mathbb{N}$
we know $x(t)$ on the interval $I_{m}=[-a\,,ma]$, and let $x|_{I_{m}}=:\psi_{m}$.
If $t\in [ma,(m+1)a]$, then in  \eqref{3.3} $s-a\in I_{m}$ and therefore
\begin{equation}\label{3.5}
x(t)=e^{Ct} \psi(0) +\int_0^{t}e^{C(t-s)}[B\psi_{m}(s-a)+f(s)]\, ds\,.
\end{equation}
This means that $x$ is found on the interval $[ma\,,(m+1)a]$ as well.
So by induction the solution $x(t)$ is uniquely determined on every interval
$I_{n}=[-a\,,na]$, $n\in \mathbb{N}$, and hence for all $t>0$.

Now let $t<-h$. In this case proceeding exactly as in Section 2
we rewrite equation  \eqref{3.1} in the form
\begin{equation}\label{3.6}
x(t-a)=B^{-1}x'(t)-B^{-1}Cx(t)-B^{-1}f(t)\,.
\end{equation}
After making the change of variable $s:=t-a$ we obtain
\begin{equation}\label{3.7}
x(s)=B^{-1}x'(s+a)-B^{-1}Cx(s+a)-B^{-1}f(s+a)\,.
\end{equation}
If $s\in [-2a,-a]$, then $s+a\in [-a,0]$.
Hence according to \eqref{3.7} for $s\in [-2a,-a]$ we have
\begin{equation}\label{3.8}
x(s)=B^{-1}\psi'(s+a)-B^{-1}C\psi(s+a)-B^{-1}f(s+a)\,,
\end{equation}
that uniquely determines $x(s)$ for $s\in [-2a\,,-a]$.
Now suppose by induction that for arbitrary and fixed $m\in \mathbb{N}$
we know $x(t)$ on the interval $[-ma,-(m-1)a]$.
Then the right-hand side of  \eqref{3.7} is determined for  
$s\in [-(m+1)a,-ma]$.
It follows that the function $x(s)$ on the left-hand side of  
\eqref{3.7} is determined
for $s\in [-(m+1)a,-ma]$ as well. This means by induction that $x(t)$ is restored
on every interval $[-(n+1)a,-na]$,  $n\in \mathbb{N}$, that is, for all $t<-a$.

Now we can take an arbitrary function  $\psi\in C^{\infty}{[-h,0]}$
and consider it as an ``initial function''.
By combining the step integration method and the step derivation method 
described above we construct a function $x(t)$, $t \in \mathbb{R}$.
Using formulas \eqref{3.3} and   \eqref{3.7} it is easily established by induction
that $x\in C^{\infty}{[ma\,,(m+1)a]}$, $m\in \mathbb{Z}$, and
that $x$ satisfies equation \eqref{3.1} for $t\in (ma,(m+1)a)$, 
$m\in \mathbb{Z}$.
At the same time at the points $t_{m}=ma$, $m\in \mathbb{Z}$,
the function $x(t)$ and all its derivatives can have jump discontinuities.
For the constructed function $x=x(t)$ to satisfy equation \eqref{3.1} 
for all $t \in \mathbb{R}$
and to be from $C^{\infty} (\mathbb{R})$ the initial function $\psi$
should satisfy some additional conditions like in Theorem~\ref{thm1.1}.
These conditions are contained in the next theorem.

\begin{theorem} \label{thm4.1}
Let $\psi\in C^{\infty}{[-h,0]}$ and $f\in C^{\infty} (\mathbb{R})$ 
be given functions.
A solution $x=x(t)$ to equation \eqref{3.1} satisfying the initial condition 
$x|_{[-a, 0]}=\psi$
and constructed by the method of steps belongs to $C^{\infty} (\mathbb{R})$
if and only if the following conditions are fulfilled
\begin{equation}\label{3.9}
\psi^{(n+1)}(0)=B\psi^{(n)}(-a)+C\psi^{(n)}(0)+f^{(n)}(0)\,, \quad
n\in \mathbb{N}_0 \,.
\end{equation}
\end{theorem}

\begin{proof} Necessity. As in Theorem \ref{thm1.1}
it suffices to take the $n$-th derivative  of the both sides of  \eqref{3.1}
and put $t=0$. Since $x(t)=\psi(t)$ for $t\in [-a\,,0]$, we are done.

Sufficiency. Since for $t\in [0\,,a]$ the integrand in  \eqref{3.4} is 
infinitely differentiable,
the left-hand side of  \eqref{3.4}, the function $x(t)$, belongs to  
$C^{\infty}{[0,a]}$.
So $x\in C^{\infty}{[-a,0]} \cap C^{\infty}{[0,a]}$.
Hence we have to show that $x^{(n)}(t)$ is continuous at the point
$t=0$ for all $n\in\mathbb{N}_0$.
According to  \eqref{3.4} $x(0^{+})=\psi(0)$, whereas
$x(0^{-})=\psi(0)$ by the initial condition.
Hence $x(0^{-})=x(0^{+})$; that is, the function $x$ is continuous at $t=0$.
Assume by induction that for an arbitrary $k\in\mathbb{N}_0$ we have
$x^{(k)}(0^{+})=x^{(k)}(0^{-})$; that is, $x^{(k)}(0^{+})=\psi^{(k)}(0)$
as $x|_{[-a,0]}=\psi$.
It follows from  \eqref{3.1} that
\begin{equation}\label{3.10}
x^{(k+1)}(0^{+})=Bx^{(k)}(-a^{+})+Cx^{(k)}(0^{+})+f^{(k)}(0^{+}) \,;
\end{equation}
that is,
\begin{equation}\label{3.11}
x^{(k+1)}(0^{+})=B\psi^{(k)}(-a)+C\psi^{(k)}(0)+f^{(k)}(0) \,.
\end{equation}
As obviously $x^{(k+1)}(0^{-})=\psi^{(k+1)}(0)$,
the equality $x^{(k+1)}(0^{+})=x^{(k+1)}(0^{-})$ is fulfilled because of \eqref{3.9}.
Thus, by induction $x^{(n)}(t)$ is continuous at zero for all $n\in\mathbb{N}_0$.
Consequently, condition  \eqref{3.9} guaranties that
the constructed by the method of steps solution $x(t)$ belongs 
to $C^{\infty}{[-a,a]}$.

We proceed by induction on $m\in \mathbb{N}$. Let $I_{m}:=[-a\,,ma]$.
Assume that $x\in C^{\infty}{(I_{m})}$.
If $t\in I_{m+1}=[-a\,,(m+1)a]$, then in  \eqref{3.3} $s-a\in I_{m}$.
Therefore the integrand in   \eqref{3.3} belongs to $C^{\infty}{(I_{m+1})}$.
It follows that the function $x$ on the left-hand side of  \eqref{3.3}
belongs to $C^{\infty}{(I_{m+1})}$ as well.
By induction we infer that $x\in C^{\infty}{(I_{m})}$ for all $m\in \mathbb{N}$,
that is  $x\in C^{\infty}{[-a,+\infty)}$.

 We pass to the case $t\leq -a$. Here the fact that the function $x(t)$
 constructed by the step derivation method belongs to $C^{\infty}{(-\infty\,,0]}$
 is established in just the same way as in Theorem~\ref{thm1.1}.
 Besides we can also consider the case of equation \eqref{3.1} for $t\leq-a$
 as a particular case of equation \eqref{1.1} with the matrix $A=0$.
 (The existence of $A^{-1}$ was only needed for constructing the solution of
 \eqref{1.1} to the right.)
 Indeed, as is already shown above the function $\varphi:=x|_{[-a, a]}$ 
is infinitely differentiable.
 Since $A=0$ and $\varphi|_{[-a, 0]}=\psi$ the relation \eqref{1.6}
 is written in the form of  \eqref{3.9}.
 Therefore the solution $x(t)$ extended to the left by means of
 the step derivation method will be from $C^{\infty}{(-\infty,a]}$,
 and the proof is complete.
\end{proof}

\begin{remark}  \rm
It follows from the proof of Theorem~\ref{thm4.1} that if the function $x=x(t)$
constructed by the method of steps is infinitely differentiable at the point 
$t=0$ or $t=-a$,
then necessarily  $x\in C^{\infty}{(\mathbb{R})}$.
\end{remark}

In other aspects equation \eqref{3.1} is quite analogous to equation \eqref{1.1}.
To get all other statements concerning \eqref{3.1}
it is only needed to make negligible changes in all the subsequent results
for  \eqref{1.1} obtained in Sections 2 and 3.
To be exact, in place of the intervals $[-a,a]$ and $[\tau-a,\tau+a]$
we should take the intervals $[-a,0]$ and $[\tau-a,\tau]$ respectively
and put the matrix $A$ equal to zero.
The reason is that the existence of the inverse matrix $A^{-1}$
was only needed in Section 2 for constructing solutions of equation \eqref{1.1}
to the right on the real line.



\begin{thebibliography}{99}


\bibitem{BC} R. Bellman, K. L. Cooke;
 \emph{Differential-Difference Equations. A series of Monographs and Textbooks.}
Academic Press, (1963).

\bibitem{bDel} E. Buksman, J. De Luca;
 \emph{Two-degree-of-fredom Hamiltonian for the time-symmetric two-body
problem of the relativistic action-at-a-distance electrodynamics.}
Physical Review E,  \textbf{67} (2003), 026219.

\bibitem{cbh} H. Chi, J. Bell, B. Hassard;
\emph{Numerical solution of a nonlinear advance-delay-differential
equation from nerve conduction theory.} J. Math. Biol. \textbf{24} (1986), 583--601.

\bibitem{JH} Jack K. Hale; \emph{Theory of Functional Differential Equations},
Applied Mathematical Sciences, Vol. 3, Springer-Verlag, (1977).

\bibitem{EN} K. J. Engel, R. Nagel;
\emph{One-Parameter Semigroups for Linear Evolution Equations},
Springer-Verlag, (2000).

\bibitem{fl} N. J. Ford, P. M. Lumb;
\emph{Mixed-type functional differential equations: a numerical approach.}
Journal of computational and applied mathematics. \textbf{229}, No. \textbf{2}, (2009), 471--479.

\bibitem{gs} I. M. Gelfand, G. E. Schilov;
\emph{Generalized Functions.}, V. 2, Academic Press, (1968).

\bibitem{ISV} S. I. Iakovlev, V. Iakovleva;
\emph{Eigenvalue-eigenfunction problem for Steklov's smoothing operator
and differential-difference equations of mixed type.}
Opuscula Mathematica \textbf{33}, No. \textbf{1}, (2013), 81--98.

\bibitem{iv} V. Iakovleva, C. J. Vanegas;
\emph{On the solution of differential equations with delayed and advanced arguments}
Electronic Journal of Differential Equations., Conf. \textbf{13} (2005), 57--63.

\bibitem{IV} V. Iakovleva, C. J. Vanegas;
\emph{Spectral analysis of the semigroup associated
to a mixed functional diferential equation.}
International Journal of Pure and Applied Mathematics.
\textbf{72}, No. \textbf{4}, (2011), 491--499.

\bibitem{kt} A. Kaddar, H. Talibi Alaoui;
\emph{Fluctuations in a mixed IS-LM business cycle model.}
Electronic Journal of Differential Equations.  No. \textbf{134} (2008), 1--9.

\bibitem{mv} J. Mallet-Paret, S. M. Verduyn Lunel;
 \emph{Mixed-type functional differential equations,
holomorphic factorization and applications.}  Proc. of Equadiff 2003, Inter.
Conf. on Diff. Equations. HASSELT 2003, World Scientific, Singapore (2005), 73--89.

\bibitem{M} S. Misohata;
 \emph{Theory of Equations with Partial Derivatives}, Mir, Moscow, (1977).

\bibitem{ns} Arch W. Naylor, George R. Sell;
 \emph{Linear Operator Theory in Engineering and Science,
Applied Mathematical Science 40}, Springer, (2000).

\bibitem{rus} A. Rustichini;
 \emph{Functional differential equations of mixed type:
the linear autonomous case.} Journal of Dynamics and Differential Equations.
\textbf{1(2)} (1989), 121--143.

\bibitem{rus1} A. Rustichini;
 \emph{Hopf bifurcation of functional differential equations of mixed type.}
Journal of Dynamics and Differential Equations. \textbf{1(2)} (1989), 145--177.

\bibitem{Sch} M. A. Schubin;
 \emph{Lectures on Equations of Mathematical Physics},
MCNMO, Moscow, (2001). (in Russian)

\bibitem{Y} K. Yosida;
\emph{Functional Analysis}, Springer-Verlag, (1965).

\end{thebibliography}

\end{document}
