\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2008(2008), No. 54, pp. 1--6.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2008 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2008/54\hfil Liapunov exponents]
{Liapunov exponents for higher-order linear
differential equations whose characteristic equations have
variable real roots}


\author[M. I. Gil'\hfil EJDE-2008/54\hfilneg]
{Michael I. Gil'}

\address{Michael I. Gil' \newline
Department of Mathematics \\
Ben Gurion University of the Negev \\
P.0. Box 653, Beer-Sheva 84105, Israel}
\email{gilmi@cs.bgu.ac.il}

\thanks{Submitted December 27, 2007. Published April 15, 2008.}
\thanks{Supported  by the  Kamea Fund of the Israel}
\subjclass[2000]{34A30, 34D20}
\keywords{Linear differential equations; Liapunov exponents;
\hfill\break\indent exponential stability}

\begin{abstract}
We consider  the  linear  differential equation
$$
\sum_{k=0}^n  a_k(t)x^{(n-k)}(t)=0\quad t\geq 0, \; n\geq 2,
$$
where $a_0(t)\equiv 1$, $a_k(t)$ are continuous bounded functions.
Assuming that all the roots of the polynomial
$z^n+a_1(t)z^{n-1}+ \dots +a_n(t)$ are real and satisfy the inequality
$r_k(t)<\gamma$ for $t\geq 0$ and $k=1, \dots, n$,
we prove that the solutions of the above equation satisfy
$|x(t)|\leq  \mathop{\rm const} e^{\gamma t}$ for  $t\geq 0$.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}

\section{Introduction and statement of the main result}

Consider the scalar equation
\begin{equation}
\sum_{k=0}^n  a_k(t)D^{n-k}x(t)=0,\quad t>0,
\label{e1.1}
\end{equation}
where $D^kx(t):=\frac{ d^k x(t)}{dt^k}$, $a_0(t)\equiv 1$, and $a_k(t)$
are continuous functions defined and bounded on $[0,\infty)$ for $k=1, \dots, n$.
As initial conditions, we have
\begin{equation}
x^{(k)}(0)=x_{0k} \quad (x_{0k}\in\mathbb{R};\; k= 0,  \dots, n-1).
\label{e1.2}
\end{equation}

A solution of problem \eqref{e1.1}--\eqref{e1.2}
is a function $x(t)$ having continuous derivatives up to order $n$
and satisfying  \eqref{e1.1} and \eqref{e1.2} for all $t>0$.  Put
$$
P(z, t)=\sum_{k=0}^n  a_k(t)z^{n-k}\quad (z\in\mathbb{C}).
$$
Levin \cite[Section 5]{lea} proved the following result,
among other remarkable results:
Suppose that the roots $r_1(t), \dots, r_n(t)$ of
$P(z, t)$ for each $t\geq 0$ are  real and satisfy
\begin{equation}
\nu_0\leq r_1(t)< \nu_1\leq r_2(t)< \nu_2\leq  \dots < \nu_{n-1}
\leq r_n(t)\leq \gamma\quad
(t\geq 0),
\label{e1.3}
\end{equation}
where $\nu_j$ ($j=0, \dots, n-1$) and $\gamma$ are  constants.
Then any   solution $x(t)$
of \eqref{e1.1} satisfies the inequality
\begin{equation}
|x(t)|\leq {\rm const}\; e^{\gamma t}\quad (t\geq 0).
\label{e1.4}
\end{equation}
This result is very useful for various applications, see for
instance \cite{gi04,gi05,lib} and references therein.
The aim of this paper is to prove  the following theorem.

\begin{theorem} \label{thm1.1}
Assume that all the roots $r_k(t)$ of polynomial $P(z,t)$ for each
$t\geq 0$ are real and
\begin{equation}
r_k(t)<\gamma\quad (t\geq 0;\; k=1, \dots, n)
\label{e1.5}
\end{equation}
with a constant $\gamma<\infty$. Then  any solution $x(t)$ of  \eqref{e1.1}
satisfies  inequality \eqref{e1.4}.
\end{theorem}

This theorem is proved in the next section.
Condition \eqref{e1.5} is weaker than  \eqref{e1.3},
since \eqref{e1.3} does not allow the roots to intersect.

Theorem \ref{thm1.1} supplements the very interesting recent investigations
of asymptotic behavior of solutions of differential equations,
cf. \cite{be,ca,car,ho,il,mo}.


Clearly,  Theorem \ref{thm1.1} gives us the exponential stability conditions.
Note that  the problem of stability analysis of various linear
differential equations  continues to attract the attention of many
specialists despite its long history \cite{de,gi05a,hov,li,tu}.
 It is still one of the most burning problems of
 the theory of differential equations.
The basic method for the stability  analysis of  differential equations
 is the direct Liapunov method.
By  this method many very strong  results
 are obtained, but finding Liapunov's functions is often
connected with serious mathematical difficulties.
At the same time, Theorem \ref{thm1.1}, gives us the exact explicit
 stability conditions.

\section{Proof of Theorem \ref{thm1.1}}

Put $R_+:=[0,\infty)$ and denote by $C(R_+)$ the Banach space of functions
continuous and bounded on $R_+$ with the sup norm $\|\cdot\|$.
Let us consider the nonhomogeneous equation
\begin{equation}
\sum_{k=0}^n  a_k(t)D^{n-k}v(t)=f(t), \quad t>0,
\label{e2.1}
\end{equation}
where $f\in C(R_+)$ and with the zero initial conditions
\begin{equation}
v^{(k)}(0)=0 \quad (k=0, 1, \dots, n-1).
\label{e2.2}
\end{equation}
Introduce the set
$$
\mathop{\rm Dom}(L):=\{w\in C(R_+):  w^{(k)}\in C(R_+),\;
 w^{(k)}(0)=0\;( k=0, 1, \dots, n-1)\}.
$$
\begin{lemma} \label{lem2.1}
Under the hypothesis of Theorem \ref{thm1.1}, with $\gamma<0$, problem
\eqref{e2.1}--\eqref{e2.2}  has a unique  solution $v\in Dom(L)$. Moreover,
$$
\|v\|\leq \frac{\|f\|}{|\gamma|^{n}}.
$$
\end{lemma}

\begin{proof}
For  $w$ in $\mathop{\rm Dom}(L)$, define the operator
$$
Lw(t):=P(t, D)w=\sum_{k=0}^n  a_k(t)D^{n-k}w(t).
$$
So that \eqref{e2.1} can be written  as $Lv(t)=f(t)$.
Since the coefficients of equation \eqref{e2.1} are bounded, the roots of
$P(z,t)$ are bounded on $R_+$. Thus,
$$
r_k(t)\geq -\alpha\quad (t\geq 0;\;k=1, 2, \dots, n)
$$
for a finite positive number $\alpha$.
On $\mathop{\rm Dom}(L)$ also define the operator $L_0$ by
$$
L_0f(t):= (D+\alpha )^nf(t)=(\frac{d}{dt}+\alpha)^n f(t).
$$
Then the inverses to $L$ and $L_0$ satisfy the relations
\begin{equation}
L^{-1} = L_0^{-1} L_0 L^{-1}=L_0^{-1} (L L_0^{-1} )^{-1}.
\label{e2.3}
\end{equation}
Below we check that $L_0$ and $L L_0^{-1}$ are really invertible. 
By the Laplace transform for any $y\in C(R_+)$ we have
$$
L_0^{-1} y(t)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty}
\frac{ e^{\lambda t} \tilde y(\lambda)} {(\lambda+\alpha )^n}\;d\lambda
$$
where $\tilde y$ is the Laplace transform of $y$. So
$$
f_0(t):=(LL_0^{-1} y)(t)=\frac 1{2\pi i}
\int_{-i\infty}^{i\infty} \frac{ e^{\lambda t} P(\lambda, t)\tilde
y(\lambda)d\lambda}{(\lambda+\alpha )^n}.
$$
Hence,
$$
f_0(t)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty}  e^{\lambda t} \tilde y(\lambda)
\prod_{k=1}^n \frac{\lambda-r_k(t)}{\lambda+\alpha }\;d\lambda.
$$
Put
$$
F(t, \nu)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty}  e^{\lambda t} \tilde y(\lambda)
\prod_{k=1}^n \frac{\lambda-r_k(\nu)}{\lambda+\alpha }\;d\lambda\quad (t, \nu\geq 0).
$$
Thus $F(t, t)=f_0(t)$.
We can write out
$$
F(t,\nu)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty}
e^{\lambda t} \tilde y_1(\lambda, \nu) \frac{\lambda-r_1(\nu)}{\lambda+\alpha }\;d\lambda
$$
where
$$
\tilde y_j(\lambda, \nu):= \prod_{k=j+1}^n \frac{\lambda-r_k(\nu)}{\lambda +\alpha }\tilde y(\lambda)=
\tilde y_{j+1}(\lambda, \nu) \frac{\lambda-r_{j+1}(\nu)}{\lambda+\alpha }
$$
where $j<n$,  and $\tilde y_n(\lambda, \nu)\equiv \tilde y(\lambda)$.
So
$$
\tilde y_j(\lambda, \nu)= \tilde y_{j+1}(\lambda, \nu)
\big(1-\frac{\alpha+r_{j+1}(\nu)}{\lambda+\alpha }\big).
$$
Let  $y_{j}(t,\nu)$ ($j<n$) be the Laplace original
 of  $\tilde y_j(\lambda, \nu)$  with respect to $\lambda$.
Then by the convolution property,
\begin{equation}
F(t, \nu)=y_1(t, \nu)-(\alpha +r_1(t))\int_0^t e^{-\alpha (t-s)}y_1(s, \nu) ds
\label{e2.4}
\end{equation}
and
\begin{equation}
y_j(t, \nu)=y_{j+1}(t, \nu)-(\alpha+r_{j+1}(\nu))\int_0^t
e^{-\alpha(t-s)}y_{j+1}(s, \nu) ds
\label{e2.5}
\end{equation}
for $j=1, \dots, n-1$.  Besides $y_{n}(t, \nu)\equiv y(t)$. Put
$$
\beta=-\gamma=|\gamma|.
$$
Then  $-r_{j}(\nu)> \beta$ ($\nu\geq 0$) and
\begin{equation}
|y_j(t, \nu)|\geq |y_{j+1}(t, \nu)|-(\alpha-\beta)\int_0^t
e^{-\alpha(t-s)}|y_{j+1}(s, \nu)| ds.
\label{e2.6}
\end{equation}
Thus, with the notation
$$
\eta_j:=\sup_{t\geq 0}|y_{j}(t, t)|\;\;(j<n),\quad \eta_n:=\sup_{t\geq 0}|y(t)|=\|y\|
$$
we have
$$
\eta_{j+1}\leq \eta_j + (\alpha-\beta)\eta_{j+1}\sup_{t\geq 0}\int_0^t
e^{-\alpha(t-s)}ds=\eta_j + \frac{(\alpha-\beta)\eta_{j+1}}{\alpha}.
$$
Consequently,
$$
\eta_{j+1}\leq \frac{\alpha}{\beta}\eta_j\quad (j=1, \dots, n-1;\;n\geq 2).
$$
Thus taking into account that $F(t,t)=f_0(t)$, according to
\eqref{e2.4} and \eqref{e2.5}, we
arrive at
$$
\|y\|=\eta_n\leq
\frac{\eta_{n-1} \alpha}{\beta}
\leq \frac{\eta_{n-2} \alpha^2}{\beta^2}\leq \dots
\leq \eta_1 \frac{\alpha^{n-1}}{\beta^{n-1}}\leq \|f_0\|\frac{\alpha^{n}}{\beta^{n}}.
$$
But $LL_0^{-1} y=f_0$. Consequently, $y=(LL_0^{-1})^{-1} f_0$ and we get
the inequality
$$
\frac{\alpha^{n}}{\beta^{n}} \|f_0\|\geq  \|(LL_0^{-1})^{-1} f_0\|
$$
for an arbitrary $f_0\in C(R_+)$. So
\begin{equation}
\|(LL_0^{-1})^{-1}\|\leq \frac{\alpha^{n}}{\beta^{n}}.
\label{e2.7}
\end{equation}
Furthermore, take into account that
$$
L_0^{-1} y(t)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty} \frac{ e^{\lambda t} \tilde y(\lambda)}
{(\lambda+\alpha )^n}\;d\lambda=\int_0^t \tilde Q(t-s)y(s)ds\quad
(y\in C(R_+)),
$$
where
$$
\tilde Q(t)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty}
\frac{ e^{\lambda t}} {(\lambda+\alpha )^n}\;d\lambda\quad (n\geq 2).
$$
By the Cauchy formula for derivatives, we have
$$
\tilde Q(t)=\frac{t^{n-1}}{(n-1)!}e^{-\alpha t}\quad (t\geq 0).
$$
Hence,
\begin{align*}
\|L_0^{-1} y\|& =\sup_{t\geq 0}|\int_0^t \tilde Q(t-s)y(s)ds|\\
&\leq \|y\|\sup_{t\geq 0}\int_0^t \frac{(t-s)^{n-1}}{(n-1)!}e^{-\alpha (t-s)}ds\\
&=\|y\|\int_0^\infty \frac{s^{n-1}}{(n-1)!}e^{-\alpha s}ds\\
&=   \|y\|\frac{1}{\alpha^{n}}\;\;(y\in C(R_+)).
\end{align*}
So
$$
\|L_0^{-1}\|\leq \frac{1}{\alpha^{n}}.
$$
Now \eqref{e2.3} and \eqref{e2.7} imply
$$
\|L^{-1}\|\leq \|L_0^{-1}\|\|(L L_0^{-1} )^{-1}\|\leq \frac{1}{\beta^{n}}.
$$
Since $\beta=|\gamma|$, this proves the required result.
\end{proof}


\begin{lemma} \label{lem2.2}
Under the hypothesis of Theorem \ref{thm1.1} with $\gamma<0$,
a solution $x(t)$ of  \eqref{e1.1}--\eqref{e1.2}
satisfies the inequality
$$
|x(t)|\leq M_2\|\hat x_0\|_n\quad (t\geq 0).
$$
where
$\|\hat x_0\|_n$ is an arbitrary  norm of the initial vector
$\hat x_0=(x_{00}, \dots, x_{0 n-1})$ and the constant $M_2$
does not depend on the initial vector.
\end{lemma}

\begin{proof} Put
$$
g(t)=\sum_{k=0}^{n-1} v_k t^k e^{-c t}
$$
with a positive $c<|\gamma|$ and real constants $v_k, k\geq 1$,
$v_0=x_{00}$. Clearly
\begin{gather*}
 g'(t)=\sum_{k=0}^{n-1} v_k (-c t^k+kt^{k-1}) e^{-c t}, g'(0)=-v_0c+v_1,
\\
 g^{(j)}(t)=e^{-c t}\sum_{k=0}^{n-1} v_k \sum_{l=0}^j C_{j}^l (-c)^{j-l}
\frac{k!}{(k-l)!}t^{k-l}
 \quad (j=2, \dots, n-1)
\end{gather*}
where $C_j^k$ are the binomial coefficients. So
$$
 g^{(j)}(0)=\sum_{k=0}^j v_k k! C_{j}^k (-c)^{j-k} \quad (j=2, \dots, n-1).
$$
Then solving the recursion equation
$$
\sum_{k=0}^j v_k k! C_{j}^k (-c)^{j-k}=x_{0j}
$$
with respect to $v_k$, we get
$$
 g^{(j)}(0)=x_{0j} \;\;(j=0, \dots, n-1).
$$
Now put in \eqref{e1.1} $x(t)=v(t)+g(t)$. Then $v$ is a solution of
problem \eqref{e2.1}, \eqref{e2.2} with
$$
f(t)=-P(D,t)g(t).
$$
It is clear that  all derivatives of $g$ are bounded.
Since $a_k(t)$ are bounded, simple calculations show
that  $\|f\|\leq \mathop{\rm const}\;\|\hat x_0\|_n$. But
by the previous lemma  $\|v\|\leq {\rm const}\;\|f\|$,
and therefore,
$$
\|x\|\leq \|v\|+\|g\|\leq {\rm const}\;\|\hat x_0\|_n,
$$
as claimed.
\end{proof}

\begin{proof}[Proof of Theorem \ref{thm1.1}] In \eqref{e1.1} put
\begin{equation}
x(t)=w(t) \exp[bt]
\label{e2.8}
\end{equation}
with a real constant $b$.
Evidently,
$$
\sum_{k=0}^n  a_{n-k}(t)D^k e^{bt}w= e^{bt}
\sum_{k=0}^n  a_{n-k}(t)\sum_{j=0}^k C_k^j b^{k-j} D^jw=
e^{bt}\sum_{k=0}^n  a_{n-k}(t)(D+b)^kw.
$$
So $w$ satisfies the equation
\begin{equation}
P(D+b, t)w=0.
\label{e2.9}
\end{equation}
Take $b=\gamma+\epsilon$ with a positive $\epsilon$ small enough.
Under \eqref{e1.4} the roots $\tilde r_j(t)$ of $P(z+b, t)$ satisfy
the inequality $\tilde r_j(t)\leq $ $\gamma-b=-\epsilon$.
The previous lemma  asserts that
any solution $w$ of equation \eqref{e2.9} is bounded on $R_+$.
 Now \eqref{e2.8} proves the theorem.
\end{proof}

\begin{thebibliography}{00}

\bibitem{be}
Bela\" \i di, Benharrat;
 Estimation of the hyper-order of entire solutions of complex linear ordinary
    differential equations whose coefficients are entire functions.
{\em Electron. J. Qual. Theory Differ. Equ. 2002}, Paper No. 5, 8 p., electronic
    only (2002).

\bibitem{bu} Burton, T. A.;
{\em Stability and periodic solutions of ordinary and functional differential
    equations}. Mathematics in Science and Engineering, Vol. 178.
    Academic     Press, Orlando   1985.

\bibitem{car} Caraballo, T.;
On the decay rate of solutions of non-autonomous differential systems,
{\em Electron. J. Diff. Eqns., Vol. 2001}, No. 05,  1-17 (2001).

\bibitem{ca} Carbonell, F.; Jimenez, J.C.; Biscay, R.;
 A numerical method for the computation of the Lyapunov exponents of
    nonlinear ordinary differential equations.
{\em Appl. Math. Comput. 131}, No.1, 21-37 (2002).

\bibitem{de}   De la Sen, M.;
  Robust stability of a class of linear time-varying systems.
{\em IMA J. Math. Control Inf. 19}, No.4, 399-418 (2002).

\bibitem{gi04} Gil', M. I.;
 A new stability test for nonlinear nonautonomous systems,
{\em Automatica, 42}, (2004), 989-997.

\bibitem{gi05} Gil', M. I.;
{\em Explicit Stability Conditions for Continuous Systems},
Lectures Notes In Control and Information Sci, Vol. 314,
Springer Verlag, 2005.

\bibitem{gi05a} Gil', M. I.;
Stability of nonlinear systems with differentiable linear parts,
{\em Circuits,  Systems and Signal Processing 24}, No 3, (2005), 242-251.

\bibitem{ho}  Hoang Nam;
 The central exponent and asymptotic stability of linear differential
    algebraic equations of index 1.
{\em Vietnam J. Math. 34}, No. 1, 1-15 (2006).

\bibitem{hov} Hovhannisyan, G. R.;
 Asymptotic stability for second-order differential equations with
complex coefficients
{\em Electron. J. Diff. Eqns., Vol. 2004}, No. 85,  1-20 (2004).

\bibitem{il}  Illarionova, O. G.;
 Stability of the $k$th general exponent of a linear system of differential
 equations. {\em Differ. Equations 32}, No.9, 1173-1176 (1996);
 translation from Differ.     Uravn. 32, No.9, 1171-1174 (1996).

\bibitem{lea} Levin A. Yu.;
 Non-oscillations of solutions of the equation
$x^{(n)}(t) + p_1(t) x^{(n-1)}(t) + \dots +  p_n(t)x(t)=0$, {\em Russian
Mathematical Surveys, 24(2)}, 43-99 (1969).

\bibitem{lib} Liberzon, M. R.;
 Essays on the absolute stability theory. {\em
Automation and Remote Control, 67}, No. 10, 1610-1644 (2006).

\bibitem{li} Linh, N. M.  and V. N. Phat;
 Exponential stability of nonlinear time-varying differential
equations and applications,
{\em Electron. J. Diff. Eqns., Vol. 2001, No. 34}, 1-13 (2001).

\bibitem{mo} Morozov, O. I.;
 A criterion for upper semistability of the highest Lyapunov exponent of a
    nonhomogeneous linear system.
 {\em Differ. Equations 28}, No.4, 473-478 (1992); translation from Differ.
Uravn. 28, No.4, 587-593 (1992).

\bibitem{tu} Tunc, C.;
 Stability and boundedness of solutions to certain fourth-order
 differential equations,
{\em Electron. J. Diff. Eqns., Vol. 2006}, No. 35,  1-10  (2006).

\end{thebibliography}
\end{document}
