\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2011 (2011), No. 133, pp. 1--9.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2011 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2011/133\hfil Limit cycles and bounded trajectories]
{Limit cycles and bounded trajectories for a
 nonlinear second-order differential equation}

\author[H. Gonz\'alez \hfil EJDE-2011/133\hfilneg]
{Henry Gonz\'alez}

\address{Henry Gonz\'alez \newline
Faculty of Light Industry and Environmental
Protection Engineering \\
Obuda University \\
1034 Budapest, B\'ecsi \'ut 96/B, Hungary}
\email{gonzalez.henry@rkk.uni-obuda.hu}

\thanks{Submitted April 15, 2011. Published October 13, 2011.}
\subjclass[2000]{34C05, 70K05}
\keywords{Ordinary differential equations;
 phase plane analysis; limit cycles; \hfill\break\indent
 maximum deviation trajectories}

\begin{abstract}
 In this article, we determine the trajectories of maximum
 deviation, and the closed trajectories of maximum deviation
 for nonlinear differential equations of the form
 $$
 \ddot y+2a(t,y,\dot y) \dot y+b(t,y, \dot y)y=c(t,y,\dot y)
 $$
 where the coefficients and the right-hand side are piecewise
 continuous functions in $t$ and continuous in $y,\dot y$.
 Also we find necessary and sufficient conditions for
 the boundedness of all the trajectories.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{example}[theorem]{Example}

\section{Introduction}\label{sec.int}

In the application of mathematical  models to real problems, after
linearization, simplifications and other adaptations of the models,
frequently we have to investigate the solutions of differential
systems which contain uncertain parameters. One of the methods to
attack this problem is the extremal principle which consists in
the determination of trajectories that are the solution of some
optimal problems and with their help we can determine the
considered properties of all the trajectories of our differential
system. In 1946, Bulgakov \cite{B} apply this method for an
$n$-order differential equation with constant coefficients and with
uncertainty in the right side. In the problem,
the maximum deviation was computed for solutions with a fixed end
time, and then calculated the supremum of the maximum deviation
when the end time tends to infinity.
In the previous decades have appeared a great number of
publications related with the application of the extremal
principle in the solutions of problems of absolute stability
\cite{AZh,ZhG}, stability radius \cite{Marg}, and others.
In the present article, using the solutions of maximum deviation
for a second-order nonlinear differential equation with
uncertainty, we give necessary and sufficient condition for the
boundedness of all the trajectories.

This article is organized as follows: In section \ref{pf} we
formulate the main problem  and determine
the trajectories of maximum deviation of our differential system.
In section \ref{main} we determine the limit cycles of maximum
deviation as function of the parameters of the system.
Also we show the stability of these cycles and using these results
we obtain two qualitative behaviours of phase portrait of the
trajectories of maximum deviation;  which allows us to give the
necessary and sufficient condition for the boundedness of all the
trajectories.

\section{Problem formulation and the trajectories of maximum deviation}
\label{pf}

Let $\underline{a}, \overline{a}, \underline{b}, \overline{b},
c_0$ be given real numbers, which satisfy the inequalities
\begin{equation}
0<\underline{a} \le \overline{a},\quad
0<\underline{b} \le \overline{ b},\quad
\overline{a}^2-\underline{b}<0,\quad c_0 >0,
\label{desig}
\end{equation}
and let $\mathcal{E}$ be the family of nonlinear second-order
differential equations
\begin{equation}
\ddot y+2a(t,y,\dot y)\dot y+b(t,y,\dot y)y=c(t,y,\dot y),
\label{equ}
\end{equation}
where $a(t,y,\dot y)$, $b(t,y,\dot y)$, $c(t,y,\dot y)$ are piecewise
continuous functions in the variable $t$ and continuous in $y, \dot y$,
 satisfying the inequalities
\begin{equation}
\underline{a} \le a(t,y,\dot y) \le \overline{a},\quad
\underline{ b} \le b(t,y,\dot y) \le \overline{b},\quad
|c(t,y,\dot y)| \le c_0. \label{acotacion}
\end{equation}

In this article, we give necessary and sufficient condition in terms
of the parameters $\underline{a}, \overline{a}, \underline{b},
\overline{b}, c_0$, for the boundedness of all the trajectories of
the family $\mathcal{E}$ of equations.
 A similar problem has been analyzed for
the particular case of linear inhomogeneous equations \eqref{equ}
with constant coefficients
$( \underline{a} = \overline{a}$, $\underline{b} = \overline{b} )$
in \cite{ZhH}.

It follows from condition \eqref{desig} for the parameters that
for all real number $\delta$ there is a trajectory
of the family of equations $\mathcal{E}$, which begins in the
point $(\delta, 0)$ and realizes infinitely rotating turns in
positive direction (counterclockwise) around
the origin in the phase plane $y, \dot y$. So we can consider
for every real number $\delta$ the extremal problem
\begin{equation} \label {maxdev}
\begin{gathered}
|y(T)| \to \sup \\
y(.) \text{ is a solution of } \mathcal{E}  \\
y(0)=\delta, \quad \dot y(0)=0 \\
\dot y(T)=0,\quad \dot y(t)\neq 0,\quad t\in (0,T).
\end{gathered}
\end{equation}
This problem has been named problem of maximum
deviation of the solutions of the family of equations
$\mathcal{E}$. Note that if we consider the same problem with
the functions $a(.), b(.), c(.)$ depend only on $t$,
so taking into account that according with \eqref{acotacion}
the bounds for this functions: $\underline{a}, \overline{a}$,
$\underline{b}, \overline{b}, c_0$, are constants,
we have that the solution of the corresponding linear problem
is the same that the nonlinear one.

The extremal problem \eqref{maxdev}
can be interpreted as an optimal control problem with variable time
$T$ in which the role of the control is played by the
functions $a(t), b(t), c(t), t \in [0,T]$.
We say that a trajectory of the family $\mathcal{E}$ is of maximum
deviation if it is a continuous solution of an equation of
$\mathcal{E}$ and if it is the union of trajectories, which are
optimal for the extremal problem \eqref{maxdev}.
To solve the optimal problem \eqref{maxdev} we apply the Pontryagin
Maximum Principle \cite{P}-\cite{JMS}. In the variables
$x=(x_1,x_2):=(y,\dot y)$ the considered differential equations
of the family $\mathcal E$ are equivalent to the system
\begin{equation}
\begin{gathered}
\dot x_1 = x_2 \\
\dot x_2 = -b(t)x_1-2a(t)x_2+c(t)
\end{gathered} \label{systemx}
\end{equation}
So the conjugate system and the  Pontryagin function are
\begin{gather*}
\dot \psi_0 = 0\\
\dot \psi_1 = b(t)\psi_2 \\
\dot \psi_2 = -\psi_0 -\psi_1 -2a(t)\psi_2,
\end{gather*}
and
\begin{equation}
H(x,\psi,a,b,c)= \psi_0 x_2+\psi_1 x_2 +\psi_2 (-bx_1-2a x_2+c).
\label{pontryaginf}
\end{equation}
The necessary conditions for the optimality have the form:
\begin{itemize}
\item[(1)] $\max H(x(t),\psi(t),a,b,c)=H(x(t),\psi(t),a(t),b(t),c(t))
\equiv 0$,
\item[(2)] $\psi_0 \equiv -1$,
\item[(3)] $\psi_1 (T) \theta_1 +\psi_2 (T) \theta_2 =0$,
\end{itemize}
where $\theta$ is a vector colinear with $(1,0)$.
According with condition (1) and the expression of the
Pontryagin function \eqref{pontryaginf}, we have that the optimal
control $(a^{0}(t),b^{0}(t),c^{0}(t))$ satisfies
\begin{equation}
\begin{gathered}
a^0 (t)=\underline{a} \quad\text{ for all } t \\
b^0 (t)=\begin{cases}
\underline{b}&\text{if }  x_1(t) \psi_2(t)>0\\
\overline{b}&\text{if }  x_1(t) \psi_2(t)<0
\end{cases} \\
c^0(t)=  \begin{cases}
c_0 &\text{if } \psi_2(t)>0\\
-c_0 &\text{if } \psi_2(t)<0
\end{cases}
\end{gathered} \label{optcontrol1}
\end{equation}
From (3) follows that $\psi_1(T)=0$, so if we write
$\overline{\psi}_1 =\psi_1 +1$, then $\overline{\psi}_1(T)=1$ and
from the fact that $(\overline{\psi}_1(t),\psi_2(t))$ and
$(x_1(t),x_2(t))$ are conjugate variables we have:
$x_1(t)\overline{\psi}_1(t)+x_2(t)\psi_2 (t)\equiv C$, where $C$
is a constant. Evaluating the last equality for $t=T$ we obtain
$C=x_1(T)$. From all that and the necessary condition (1),
\eqref{pontryaginf} and  \eqref{systemx} we have
\begin{gather*}
\dot x_1 (t)\overline{\psi}_1(t)+\dot x_2 (t) \psi_2(t)\equiv 0\\
x_1 (t)\overline{\psi}_1(t)+ x_2 (t) \psi_2(t)\equiv x_1 (T)
\end{gather*}.
Solving with respect to $\psi_2(t)$, we have
\begin{equation}
\psi_2(t)=\frac{-x_1(t)\dot x_1 (t)}{x_2 (t)\dot x_1 (t)-x_1 (t)
\dot x_2 (t)}. \label{psi2}
\end{equation}
It is well known that the trajectories of maximum deviation move
all the time in a unique sense around the origin of coordinates,
because if such a trajectory moves in the two possible directions
around the origin as illustrate the figure \ref{fig1}, then
we can easily construct a trajectory (by example with the help
of the dashed line in figure 1 which contradicts the
optimality of the trajectory of maximum deviation.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.55\textwidth]{fig1} %ambossentidos
\end{center}
\end{figure}

So the trajectories of maximum deviation move all the time in
positive sense around the origin and then in the expression
\eqref{psi2} the denominator is positive for all $t\in [0,T]$,
and the numerator have the same sign as
$-x_1 (t)\dot x_1 (t)=-x_1(t)\dot y(t)$,
so from this fact and \eqref{optcontrol1} we conclude that the
synthesis of the optimal control for the problem \eqref{maxdev} is
\begin{equation}
\begin{gathered}
a^0 (y,\dot y)=\underline{a} \quad\text{for all } (y,\dot y)
 \in \mathbb{R}^2\\
b^0 (y,\dot y)= \begin{cases}
\underline{b}& \text{if }  y \dot y>0\\
\overline{b}& \text{if }  y \dot y<0
\end{cases}
\\
c^0(y,\dot y)= \begin{cases}
c_0& \text{if } \dot y>0\\
-c_0& \text{if } \dot y<0
\end{cases}
\end{gathered} \label{synthesis}
\end{equation}
Therefore, the trajectories of maximum deviation of $\mathcal{E}$
are the solutions of the equation
\begin{equation}
\ddot y+2a^0(y,\dot y)\dot y+b^0(y,\dot y)y=c^0(y,\dot y),
 \label{maxdevequation}
\end{equation}
where the functions $a^0(y,\dot y), b^0(y,\dot y), c^0(y,\dot y)$
are given by \eqref{synthesis}.

\section{Closed trajectories of maximum deviation and main results}
\label{main}

Now we consider a trajectory of maximum deviation of $\mathcal{E}$
and denote by $(-\delta,0)$, $(0,\alpha)$, $(\gamma,0)$,
$(0, -\theta)$, $(-\epsilon,0)$ the points of interception of this
trajectory with the axes of coordinate;
see figure 2.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.55\textwidth]{fig2} % mdt2
\end{center}
\end{figure}

The numbers $\alpha, \gamma, \theta, \epsilon$ are functions of
the number $\delta$. In the case $\epsilon (\delta)=
\delta$ the considered trajectory is closed and we will denote
it by $C_{\delta}$. The bounded region limited by the
closed trajectory $C_{\delta}$ is invariant for the family of
equations $\mathcal{E}$.\\
In the qualitative study of the solutions of $\mathcal{E}$ it is fundamental to determine the closed trajectories of maximum deviation of
$\mathcal{E}$ and their stability in the sense of limit cycles.

From the expressions \eqref{synthesis} it follows that the optimal
trajectories for  \eqref{maxdev} corresponding
to opposite values of $\delta$ are symmetric with respect to the
origin of coordinates and so the equality $\epsilon (\delta)=
\delta$ is fulfilled if and only if the equality
$\gamma (\delta)= \delta$ is fulfilled. Thus to determine the
closed trajectories of maximum deviation of $\mathcal{E}$
we must solve the equation $\gamma (\delta)= \delta$ with
respect to the unknown number $\delta$. Define the functions
$\varphi:\mathbb{R}^+ \to \mathbb{R}^+$ and
$\psi:\mathbb{R}^+ \to \mathbb{R}^+ $ by
\[
\varphi(\delta)=\alpha, \quad\text{and}\quad \psi(\alpha)=\gamma.
\]
From the theorem of existence and uniqueness of solutions
for differential equations it follows that the functions
$\varphi$ and $\psi$ are correctly defined on
$\mathbb{R}^+ =\{ x\in \mathbb{R}, x>0 \}$,
they are injective functions and so the
inverse to $\varphi$: $\delta = \varphi^{-1}(\alpha)$ exists.
Then solving the equation \eqref{maxdevequation}-\eqref{synthesis},
with initial condition $y(0)=0, \dot y(0)=\alpha $ we obtain for
the functions $\varphi^{-1}(\alpha)$ and $\psi (\alpha)$ the
expressions:
\begin{equation}
\begin{gathered}
\varphi^{-1} (\alpha)
=\frac{e^{\underline{a} \tau (\alpha)}}{\overline{b}}
\sqrt{\overline{b}{\alpha}^2 -2\underline{a} c_0 \alpha +{c_{0}}^2}
 -\frac{c_0}{\overline{b}}\\
\psi (\alpha)=\frac{e^{-\underline{a} s (\alpha )}}
{\underline{b}} \sqrt{\underline{b} {\alpha}^2 -2\underline{a}
 c_0 \alpha +{c_{0}}^2} +\frac{c_0}{\underline{b}},
\end{gathered} \label{phipsi}
\end{equation}
where
\begin{equation}
\tau (\alpha)=\begin{cases}
\frac{1}{\overline{\beta}}
\arctan( \frac{\overline{\beta} \alpha}
{c_0 -\underline{a} \alpha})
 & \text{if } \alpha <\frac{c_0}{\underline{a}}\\
\frac{1}{\overline{\beta}} [\pi
-\arctan(\frac{\overline{\beta} \alpha}{-c_0 +\underline{a} \alpha})]
 &\text{if } \alpha >\frac{c_0}{\underline{a}}\\
\frac{\pi}{2\overline \beta} &\text{if } \alpha
=\frac{c_0}{\underline{a}},
\end{cases} \label{t}
\end{equation}
\begin{equation}
 s(\alpha)=\begin{cases}
 \frac{1}{\underline{\beta}}
 [\pi -\arctan(\frac{\underline{\beta} \alpha}
 {c_0 -\underline{a} \alpha})]
& \text{if } \alpha <\frac{c_0}{\underline{a}}\\
 \frac{1}{\underline{\beta}} \arctan(\frac{\underline{\beta} \alpha}
 {-c_0 +\underline{a} \alpha})
&\text{if } \alpha >\frac{c_0}{\underline{a}}\\
\frac{\pi}{2\underline{\beta}} &\text{if } \alpha
=\frac{c_0}{\underline{a}},
\end{cases} \label{s}
\end{equation}
\begin{equation}
\overline{\beta} =\sqrt{\overline{b} -\underline{a}^2}, \quad
\underline{\beta} =\sqrt{\underline{b} -\underline{a}^2}. \label{betas}
\end{equation}
Note that from the conditions on the parameters \eqref{desig}
 it follows that all the square roots that appear in the
expressions \eqref{phipsi}-\eqref{betas} are well defined.

A number $\delta_0 >0$ satisfies the equation
$\gamma (\delta) =\delta$ if and only if it satisfies the equation
$\psi (\alpha) =\varphi^{-1} (\alpha)$. So if we define the function
\begin{equation}
f(\alpha)=-\psi (\alpha) +\varphi^{-1} (\alpha),\quad \alpha >0.
\end{equation}
Then using \eqref{phipsi} we have
\begin{equation}
f(\alpha)=\underline{b} e^{\underline{a} \tau (\alpha ) }
\sqrt{\overline{b} {\alpha}^2 -2\underline{a} c_0 \alpha
+{c_{0}}^2} -\overline{b} e^{{-\underline{a}} s(\alpha ) }
\sqrt{\underline{b} {\alpha}^2 -2\underline{a} c_0 \alpha
+{c_{0}}^2}-{c_0}(\underline{b} + \overline{b})
\end{equation}
and the closed trajectories of maximum deviation of the family
$\mathcal{E}$ are determined by the positive roots $\alpha$
of the equation
\begin{equation}
f(\alpha)=0 .\label{closedtraj}
\end{equation}
Now we put
\begin{equation}
\overline M (\alpha )=\overline{b} {\alpha}^2 -2\underline{a} c_0
\alpha +{c_{0}}^2, \; \underline M(\alpha) =\underline{b} {\alpha
}^2 -2\underline{a} c_0 \alpha +{c_{0}}^2. \label{Mm}
\end{equation}
then the derivative of $f(\alpha)$ is
\begin{equation}
f'(\alpha) = \frac{\alpha \underline{b} \overline{b}}
{\sqrt{\underline{M}(\alpha) \overline {M} (\alpha)}}
\big[ e^{\underline{a} \tau (\alpha )}
\sqrt{\underline{M} (\alpha)}-e^{-\underline{a} s(\alpha )}
 \sqrt{\overline{M} (\alpha)} \big].
\end{equation}
Consider now the function $g: \mathbb{R}^{+}\to \mathbb{R}^{+}$
defined by
\begin{equation}
g(\alpha) = \sqrt{\frac{\overline{M} (\alpha)}{\underline{M}
(\alpha)}}
e^{-\underline{a} (s(\alpha )+\tau (\alpha ))}. \label{gfunction}
\end{equation}
It is easy to see that
\begin{equation}
\begin{gathered}
f'(\alpha)>0  \text{ if and only if }  g(\alpha) <1 \\
f'(\alpha)<0  \text{ if and only if }  g(\alpha) >1 \\
f'(\alpha)=0  \text{ if and only if }  g(\alpha) =1 .
\end{gathered}\label{fderivada}
\end{equation}
Easy computations give
\begin{equation}
g'(\alpha)=\frac{(\overline{b} -\underline{b})c_0^2 \alpha
e^{-\underline{a} (s(\alpha )+ \tau (\alpha ))}}
{\underline{M}(\alpha) \sqrt{\underline{M} (\alpha) \overline {M}
 (\alpha )}} \ge 0
\quad\text{for all } \alpha >0. \label{gderivada}
\end{equation}
Note that $\overline{M (\alpha)}/\underline{M (\alpha)}$ is a
rational function, both the numerator and denominator are
polynomials in $\alpha$ of second degree. So, there exists the
limit
\begin{equation}
\lim_{\alpha \to \infty} \frac{\overline{M}
(\alpha)}{\underline{M} (\alpha)}
=\frac{\overline{b}}{\underline{b}}.\label{limMm}
\end{equation}
From expressions \eqref{t} and \eqref{s} follows that there exist
the limits:
\begin{equation}
\begin{gathered}
\tau_{\infty}:=\lim_{\alpha \to \infty} \tau (\alpha )
 = \frac{1}{\overline{\beta}} (\pi-\arctan
 \frac{\overline{\beta}} {\underline{a}})\\
s_{\infty}:=\lim_{\alpha \to \infty} s (\alpha)
=\frac{1}{\underline{\beta}} \arctan
\frac{\underline{\beta}}{\underline{a}}.
\end{gathered} \label{tsinfinito}
\end{equation}
Now from \eqref{limMm}, \eqref{tsinfinito} and \eqref{gfunction}
 we conclude that there exists the limit
\begin{equation}
G(\underline{a} ,\underline{b} ,\overline{b})
:= \lim_{\alpha \to \infty} g(\alpha)
=\sqrt{(\overline{b}/\underline{b})}\,
e^{-\underline{a}(s_{\infty}+ \tau_{\infty})}.
\label{G}
\end{equation}
We now consider two cases:

\textbf{Case 1.} $G(\underline{a} ,\underline{b} ,\overline{b})\le 1$.

In this case the function $g(\alpha), \alpha >0$,
according with \eqref{gderivada} increases monotonically,
so $g(\alpha)<1$ for all $\alpha >0$,
from what follows according with \eqref{fderivada} that the function
$f(\alpha), \alpha \in (0, \infty)$ increases monotonically and
it is easy to calculate that:
$ \lim_{\alpha  \to \infty} f(\alpha) = \infty$,
$ \lim_{\alpha  \to 0} f(\alpha) <0$ and so we conclude that in
this case  \eqref{closedtraj} has exactly one root $\alpha_1 >0$.

Now Let  $\delta_1=\varphi^{-1}(\alpha_1)$.
then $C_{\delta_1}$ is in this case the unique cycle of maximal
deviation of the family of equations $\mathcal{E}$.
The function $\chi:\mathbb{R}^+ \to \mathbb{R}^+$ defined by
\begin{equation}
\chi(\delta)=\epsilon=(\psi \circ \varphi)^2 (\delta)
 \label{chi}
\end{equation}
is a Pointcare map for the cycle $C_{\delta_1}$ and simple
calculations show that
\begin{equation}
\chi'(\delta_1)=g^2(\alpha_1)<1
\end{equation}
and so the cycle $C_{\delta_1}$ is a stable cycle.
See figure 3.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.55\textwidth]{fig3} % case13
\end{center}
\end{figure}

Then from the invariance of the region bounded by $C_{\delta_1}$,
 we have that all the solutions
of $\mathcal{E}$ with initial condition in this closed bounded
region are bounded.
Now we will prove that in this case for all point $x$ in the exterior
of the cycle $C_{\delta_1}$ all the trajectories of $\mathcal{E}$
with initial condition in $x$ are also bounded. In order to prove
that we consider a maximum deviation trajectory with initial point
$(A,0),A>0$ and end point $(B,0),B>0$ which takes a turn around the
origin and we close this line with the vertical segment $AB$.
Then the closed bounded region limited by this closed line is invariant
for the family of equations $\mathcal{E}$, and for all point $x$ in
the exterior of $C_{\delta_1}$ choosing the number $A$
sufficiently large we can assure that this point is in this bounded
invariant region and then all trajectories of $\mathcal{E}$ with
initial condition in $x$ are bounded (See figure 4).

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.55\textwidth]{fig4} % cycleexterior4
\end{center}
\end{figure}

We have proved that in this Case 1 all trajectories of
$\mathcal{E}$ are bounded.

\textbf{Case 2.}
$ G(\underline{a} ,\underline{b} ,\overline{b}) > 1$.
In this case we write the function $f(\alpha)$ in the form
\begin{equation}
f(\alpha )= \underline{b} e^{\underline{a} \tau (\alpha ) }
\sqrt{\overline{M} (\alpha)} \; h(\alpha )-{c_0}(\underline{b} +
\overline{b}), \label{otherf}
\end{equation}
where
\begin{equation}
h(\alpha )=
 1-\frac{\overline{b}}{\underline{b}} e^{{-\underline{a}}
 (s(\alpha ) +\tau (\alpha )) } \sqrt{
 \frac {\underline{M} (\alpha)}{\overline{M} (\alpha)}}.
\end{equation}
Now by differentiation we obtain
\begin{equation}
h'(\alpha)=(\overline{b} -\underline{b})c_0^2 \alpha \ge 0
\end{equation}
for all $\alpha >0 $. Then
\begin{equation}
\max_{\alpha >0}h(\alpha)=\lim_{\alpha \to \infty} h(\alpha)
=1-\lim_{\alpha \to \infty} g(\alpha )<0 ,
\end{equation}
and then from this and the expression \eqref{otherf}
we conclude that $f(\alpha )<0 $ for all $\alpha >0$, and
so in this Case 2, the equation \eqref{closedtraj} does not have
positive root and $\mathcal{E}$ doesn't have any cycle of maximum
deviation.
The negativity of the function $f(\alpha)$ means that for all
positive $\delta$ we have $\epsilon (\delta) > \delta$ and so the
trajectories of maximum
deviation of the family $\mathcal{E}$ are all unbounded;
see figure 5.

\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.55\textwidth]{fig5} % case25
\end{center}
\end{figure}

From the results for the two possible cases 1 and 2, we have the following
result.

\begin{theorem} \label{thm1}
A necessary and sufficient condition for the boundedness of all
the solutions of the family of equations $\mathcal{E}$ is
the inequality
\begin{equation}
  G(\underline{a}, \underline{b}, \overline{b}) \le 1,
\end{equation}
where the number $G(\underline{a} ,\underline{b} ,\overline{b})$,
depending of the parameters $\underline{a} ,\underline{b} ,\overline{b}$
can be computed by the expressions \eqref{G}) and \eqref{tsinfinito}.
\end{theorem}

\section{Examples}

\begin{example} \label{examp1} \rm
Let  $\underline{a}=1$,  $\underline{b}=2$,
$\overline{b}=4$, and  $\overline{a}\ge 1$, $c_0 > 0$ arbitrary
real numbers. Then by direct computation using Maple we obtained
\begin{equation}
G(\underline{a} ,\underline{b} ,\overline{b}) =0.192429 \le 1
\end{equation}
and so all the trajectories of the corresponding family of
 equations $\mathcal{E}$ are bounded.
\end{example}

\begin{example} \label{examp2} \rm
Let  $\underline{a}=1$,  $\underline{b}=2$,
$\overline{b}=40$, and  $\overline{a}\ge 1$, $c_0 > 0$ arbitrary
real numbers. Then by direct computation using Maple we obtained
\begin{equation}
G(\underline{a} ,\underline{b} ,\overline{b}) =1.545757 > 1
\end{equation}
and so for all point $x$ of the phase plane there is a non bounded
trajectory of the corresponding family of equations
$\mathcal{E}$ with initial point $x$, for example,
the trajectory of maximum deviation with initial point $x$.
\end{example}

\begin{thebibliography}{9}
\bibitem{B} B. V. Bulgakov;
\emph{About the accumulation of perturbations in oscillating linear
systems with constant parameters}.
 Inform to the academic of Sciences URSS, 1946, T. 51, $N^o$ 5.

\bibitem{AZh} V. V. Alexandrov, V. N. Zhermolenko;
\emph{About the absolut stability of second order systems}.
Viestnik of the Moscow University, $N^o$ 5, 1972.

\bibitem{ZhG} V. N. Zhermolenko, H. Gonzalez M.;
\emph{Estabilidad absoluta de los sistemas lineales de segundo orden
con perturbaciones param\'etricas}.
Revista Ciencias Matem\'aticas, Vol. IV, no. 2, 1983.

\bibitem{Marg} M. Margaliot, D. Liberzon;
\emph{Lie-algebraic stability conditions for nonlinear switched
systems and differential inclusions}.
 Systems And Control Letters, 55, 2006, pp. 8-16.

\bibitem{Zh} V. N. Zhermolenko;
\emph{Limite cycles in the phase plane. In the Monograph:
Bulgakov's Problem about maximum deviations
and it's applications}. Moscow University Press, 1993.

\bibitem{ZhH} V. N. Zhermolenko, H. Gonzalez M.;
\emph{Sobre el ciclo l\'imite en el problema de la
desviaci\'on m\'axima de sistemas de segundo orden}.
Investigaci\'on Operacional, $N^o$ 2, 1979, pp. 56-69.

\bibitem{P} L. S. Pontriaguin, V. G. Boltianski, P. V. Gamkrelidze,
E. F. Mishenko;
\emph{Mathematical theory of optimal process}.
M. Fizmatgiz, 1961.

\bibitem{JMS} J. Macki, Strauss;
\emph{Introduction to Optimal Control Theory}.
Springer-Verlag, New York, 1982.

\end{thebibliography}

\end{document}
