\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2017 (2017), No. 241, pp. 1--8.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2017 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2017/241\hfil Differential inequalities]
{Application of the steepest descent method to solve differential inequalities}

\author[A. V. Fominyh, V. V. Karelin,  L. N. Polyakova \hfil EJDE-2017/241\hfilneg]
{Alexander V. Fominyh, Vladimir V. Karelin, Lyudmila N. Polyakova}

\address{Alexander V, Fominyh \newline
Saint Petersburg State University,
7-9, University emb.,
199034  St. Petersburg, Russia}
\email{alexfomster@mail.ru}

\address{Vladimir V. Karelin \newline
Saint Petersburg State University,
7-9, University emb.,
199034  St. Petersburg, Russia}
\email{vlkarelin@mail.ru}

\address{Lyudmila N. Polyakova \newline
Saint Petersburg State University,
7-9, University emb.,
199034  St. Petersburg, Russia}
\email{lnpol07@mail.ru}

\thanks{Submitted March 2, 2017. Published October 4, 2017.}
\subjclass[2010]{34A60, 49J52}
\keywords{Differential inequalities; Gateaux gradient;
\hfill\break\indent Steepest descent method}

\begin{abstract}
 In this article we consider the problem of finding the solution of
 a system of differential inequalities. We reduce the original
 problem to the unconstrained minimization of a functional. 
 We find the Gateaux gradient for this functional, and then obtain
 nucessary and sufficient conditions for the existence of a minimum.
 Based on these conditions we apply the steepest descent method,
 and present a numerical implementation of the method. 
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\allowdisplaybreaks

\section{Introduction}

Differential inequalities are widely used in obtaining various estimates.
For example, the Wintner theorem can be used to determine the intervals
of the existence of solutions of certain differential equations \cite{hart}.
Various theorems on differential inequalities can be found in many works
on differential equations (see  \cite{sans}). 
Some features of the solutions of differential inequalities were studied 
in the articles \cite{friedman, hay, lili}.
The paper \cite{bekbel} contains numerous applications of differential inequalities.
The application of the Chaplygin theorem to the proof of the theorem
on the continuation of the solution to a point and  the Perron uniqueness theorem
  \cite{ahmsad}, as well as some problems of stability theory 
\cite{alexplat, congfeili, vasnef}, are  important examples of such applications. 
A certain attention has been paid to second order delay differential inequalities 
(see \cite{jleli, koplat}).

In this article a problem of finding a solution of
the nonlinear system of differential inequalities is considered.

We use an optimization approach: the original problem is reduced
 to the unconstrained minimization of a functional on the functional space.
  Some numerical algorithms for solving differential inequalities
  can be found in the articles \cite{ belenk, masuizu,vain}.
  In the work \cite{vain} the grid approximation of boundary
  value problems for differential inequalities is investigated.
 The article \cite{belenk} explores the optimal solution (in the sense
   of an integral functional) of a linear system of differential inequalities,
   and the original problem is considered in the form of a
   linear programming problem in a partially ordered space.

\section{Statement of the Problem}

Let us consider a system of differential inequalities
\begin{equation} \label{eq:1}
g_i(x,\dot{x},t) \leq 0, \quad i = \overline{1,\ell}, \quad t \in [0, T],
\end{equation}
with the given initial condition
\begin{equation} \label{eq:2}
x(0) = x_0.
\end{equation}
In system \eqref{eq:1} $T > 0$  is a given moment of time,
 $x$ is an $n$-dimensional vector-function of the phase coordinates,
 which is supposed to be continuous with continuous derivatives in 
the interval $[0, T]$,
$g_i(x, \dot{x}, t)$, $i = \overline{1,\ell}$,
are real continuous scalar functions,
which are supposed to be continuously differentiable at $x$ and at $\dot{x}$.
  In expression \eqref{eq:2} $x_0 \in \mathbb{R}^n$ is a given vector.
  Assume that there exists a solution of system \eqref{eq:1}
  with initial condition \eqref{eq:2}. It is required to find a solution 
$x^* \in C^1_n [0,T]$
of system \eqref{eq:1}, which satisfies initial condition~\eqref{eq:2}.

\section{Reduction to the variational problem}

Denote $z(t) = \dot{x}(t)$, $z \in C_{n}[0, T]$.
 Then from \eqref{eq:2} we get
  \begin{equation} \label{eq:2'}
  x(t) = x_0 + \int_0^t z(\tau) \,d\tau.
  \end{equation}
Using this expression, we  write $g_i(x,z,t)$
instead of
$g_i\big( x_0 + \int_0^t z(\tau) \,d\tau, z, t \big)$,
$i = \overline{1,\ell}$,
for brevity.

Let us introduce the functional
\begin{eqnarray}
 \label{eq:3}
I(z) = \sum_{i=1}^{\ell} \int_0^{T} \big( \max \big\{0, g_i({x,z,t}) \big\} 
\big)^2 dt.
\end{eqnarray}
It is easy to see that this functional
 is nonnegative for all $z \in C_{n}[0, T]$
 and vanishes at the point $z^{*} \in C_{n}[0, T]$
  if and only if the vector-function
  $$
  x^{*}(t) = x_0 + \int ^t_0 z^{*}(\tau) \,d\tau
  $$
   is a solution of problem \eqref{eq:1}, \eqref{eq:2}.

\section{Necessary conditions for a minimum}

Using the same technique as in \cite{fom},
we can  show the following theorem.

\begin{theorem} \label{th:1}
The functional $I(z)$ is Gateaux differentiable
and its Gateaux gradient at the point $z$ is 
\begin{equation} \label{eq:4}
\begin{aligned}
 \nabla I(z, t) 
&= \sum_{i=1}^{\ell} \Big[ \Big( |g_i(x,z,t)| + g_i(x,z,t) \Big)
 \frac {\partial g_i(x,z,t)}{\partial z}  \\
&\quad  +  \int_{t}^{T} \Big( |g_i(x,z,\tau)| + g_i(x,z,\tau) \Big)
 \frac {\partial g_i(x,z,\tau)}{\partial x} \,d\tau \Big]. 
 \end{aligned}
\end{equation}
\end{theorem}

Using  that the maximum of convex functions,
the square of a nonnegative convex function, and the sum of convex functions
 are convex functions, it is not hard to check that the following lemma holds.

\begin{lemma} \label{lm:1}
If the functionals
$g_i(x, z, t),\ i = \overline{1,\ell},$
are convex in $[x, z]$, then the functional $I(z)$ is convex.
\end{lemma}

From the known minimum condition \cite{dem}
we conclude the following theorem.

\begin{theorem} \label{th:2}
For the vector-function $z^{*}$ to be a minimum point of the functional $I$,
 it is necessary, and in the case of the convexity of the functionals
  $  g_i(x, z, t)$, $i = \overline{1,\ell}$,
  in $[x, z]$, also sufficient that
\begin{equation} \label{eq:5}
 \nabla I(z^{*}, t) = 0_n,
  \end{equation}
where $0_{n}$ is a zero element of the space $C_{n}[0, T]$.
\end{theorem}

\section{The steepest descent method}

Let us describe the following steepest descent method \cite{kantakil}
for finding stationary points of the functional $I$.

Fix an arbitrary point $z_1 \in C_{n} [0, T]$.
 Assume that the point $z_{k} \in C_{n} [0, T]$ is already found.
 If the necessary minimum condition \eqref{eq:5} holds,
 then the point $z_{k}$ is a stationary point
 of the functional $I$, and the process terminates.
  Otherwise put
$$
 z_{k+1}(t) = z_{k}(t) - \gamma_{k} \nabla I(z_{k}, t),
  $$
where $\nabla I(z_{k}, t)$ is Gateaux gradient of the functional $I$
at the point $z_{k}$, and
$$
x_k(t) = x_0 + \int_0^t z_k(\tau) \,d\tau,
 $$
and the value $\gamma_{k}$ is the    solution of the
one-dimensional minimization problem
\begin{equation} \label{eq:6}
\min_{\gamma \geq 0} I(z_{k} -  \gamma \nabla I(z_{k}, t)) =
 I(z_{k} - \gamma_k \nabla I(z_{k}, t)).
 \end{equation}
From \eqref{eq:6} we get
$$
I(z_{k+1}) \leq I(z_{k}).
$$
If the sequence $\{z_{k}\}$ is infinite, then it can be shown,
that under some additional assumptions the described method converges
 \cite{kantakil} in the following sense
$$
 \|\nabla I(z_k)\| 
= \Big(\int^{T}_0 \big( \nabla I(z_k, t),
\nabla I(z_k, t) \big) dt\Big)^{1/2} \to 0, \quad k \to \infty.
 $$
If the sequence $\{z_{k}\}$ is finite,
then its last point is a stationary point of the functional $I$ by construction.

Note, that if the stationary point $\overline{z} \in C_{n}[0, T]$ is obtained,
but $I(\overline{z}) \neq 0$, then one has to take the other initial approximation
and to repeat the iteration process, because the point $\overline{z}$ is not
a global minimum of the functional $I$ in this case.

This approach can be applied to the problem considered in the case when
the initial condition and the final position of the object
are under the following constraints
\begin{gather} \label{eq:7}
 x_i(0) \leq x_{0i}, \quad i = \overline{1,n}, \\
 \label{eq:8}
 x_i(T) \leq x_{Ti}, \quad i = \overline{1,n},
  \end{gather}
where $x_0$, $x_T \in \mathbb{R}^n$ are the given vectors.
Assume that there exists a solution of system \eqref{eq:1} with 
constraints \eqref{eq:7},
 \eqref{eq:8} if such constraints are considered. In this case define
 $$
 x(t) = \overline{x}_0 + {\int_0^t z(\tau) \,d\tau}, \quad
 \overline{x}_{0i} \leq x_{0i},
  \quad i = \overline{1,n},
   $$
   and add to the functional $I$ the convex summand
$$
\sum_{i=1}^{n} \big( \max \big\{ 0, \overline{x}_{0i} - x_{0i} \big\} \big)^2 
+ \sum_{i=1}^{n} \Big( \max \Big\{0, \overline{x}_{0i} 
+ \int_0^T z_i(t) dt - x_{Ti}\Big\} \Big)^2,
 $$
which considers constraints \eqref{eq:7}, \eqref{eq:8}.
 Then besides the vector-function $z^*$ one also has to find the
  vector $\overline{x}_0^*$ to satisfy conditions \eqref{eq:7}, \eqref{eq:8}.

  In this case one should consider the functional
\begin{align*}
J(z, \overline{x}_0)
& = I(z) +  \sum_{i=1}^{n} \big( \max \big\{ 0, \overline{x}_{0i} - x_{0i} 
\big\} \big)^2 \\
&\quad  + \sum_{i=1}^{n} \Big( \max \Big\{0, \overline{x}_{0i} 
+ \int_0^T z_i(t) dt - x_{Ti}\Big\} \Big)^2
\end{align*}
 (instead of the functional $I$) and its gradient $\nabla J$ 
(instead of the gradient $\nabla I$).

\section{``Normal'' form of differential inequalities system}

Let us consider the special case when system \eqref{eq:1} is of the form
$$
\dot{x}_i \leq f_i(x,t), \quad i=\overline{1,n}, \quad t \in [0,T],
$$
and we have initial condition \eqref{eq:2}. 
Suppose the vector-function $f(x,t)$
is continuous and continuously differentiable at $x$. Using \eqref{eq:2'},
we will write $f(x,t)$ instead of
 $f \big(x_0 + \int_0^t z(\tau) \,d\tau, t \big)$
 for brevity.

In this case functional \eqref{eq:3} is of the form
\begin{equation}  \label{eq:10}
I(z) = \sum_{i=1}^{n} \int_0^{T} 
\big( \max \big\{0, z_i - f_i({x,t}) \big\} \big)^2 dt.
\end{equation}

Let us demonstrate how Gateaux gradient  \eqref{eq:4} 
can be obtained.
It is not difficult to see that functional \eqref{eq:10} can be presented
as
\begin{align*}
I(z)& = \sum_{i=1}^{n} I_{1i}(z) + \sum_{i=1}^{n} I_{2i}(z) \\
& = \sum_{i=1}^{n} \int_0^{T} \frac{1}{2} \big( z_i - f_i({x,t}) \big)^2 dt
  + \sum_{i=1}^{n} \int_0^{T} \frac{1}{2} \big( z_i - f_i({x,t}) \big)
   | z_i - f_i({x,t}) | dt.
\end{align*}

Let $v \in C_{n}[0,T]$. Let $(\cdot)'$ denote transposition and
$e_i$, $i = \overline{1,n}$, is the canonical basis in $\mathbb{R}^n$.
We calculate
\begin{equation}  \label{eq:12}
 \begin{aligned}
&I_{1i}(z + \alpha v) \\
&= \int_0^{T} \frac{1}{2}
\Big( z_i + \alpha v_i - f_i(x,t) - \alpha
 \Big( \frac{\partial f_i}{\partial x} \Big)' \int_0^t v(\tau) d \tau
 + o(\alpha) \Big)^2 dt  \\
&  = I_{1i}(z) + \alpha \int_0^{T} \big( z_i - f_i(x,t) \big)
\Big( v_i - \Big( \frac{\partial f_i}{\partial x} \Big)'
\int_0^t v(\tau) d \tau \Big)\, dt
    + o(\alpha). 
    \end{aligned}
     \end{equation}
We introduce the sets
\begin{gather*}
T_{i+}(z) = \{ t \in [0,T] \ | \ z_i - f_i(x,t) > 0 \}, \\
T_{i-}(z) = \{ t \in [0,T] \ | \ z_i - f_i(x,t) < 0 \}, \\
T_{i0}(z) = \{ t \in [0,T] \ | \ z_i - f_i(x,t) = 0 \}.
\end{gather*}
We have
$$ 
I_{2i}(z) = \begin{cases}
    \int_0^{T} \frac{1}{2} \big( z_i - f_i({x,t}) \big)^2 dt,
  & t \in T_{i+}(z) \cup T_{i0}(z) , \\[4pt]
    - \int_0^{T} \frac{1}{2} \big( z_i - f_i({x,t}) \big)^2 dt,
  & t \in T_{i-}(z) .
 \end{cases}
$$
Then
\begin{align*}
I_{2i}(z + \alpha v) 
&=  I_{2i}(z) 
 + \alpha \int_0^{T} \big( z_i - f_i(x,t) \big) \Big( v_i -
\Big( \frac{\partial f_i}{\partial x} \Big)'
\int_0^t v(\tau) d \tau \Big) dt  \\
&\quad + o(\alpha), \quad t \in T_{i+}(z) \cup T_{i0}(z),
\end{align*}
\begin{align*}
 I_{2i}(z + \alpha v) 
&=  I_{2i}(z) - \alpha \int_0^{T} \big( z_i - f_i(x,t) \big)
 \Big( v_i - \Big( \frac{\partial f_i}{\partial x} \Big)'
 \int_0^t v(\tau) d \tau \Big) dt  \\
&\quad + o(\alpha), \quad t \in T_{i-}(z).
\end{align*}
From the above two equations we obtain
\begin{equation} \label{eq:13}
I_{2i}(z+\alpha v) = I_{2i}(z) + \alpha \int_0^{T} | z_i - f_i(x,t)
| \Big( v_i - \Big( \frac{\partial f_i}{\partial x} \Big)'
 \int_0^t v(\tau) d \tau \Big) dt + o(\alpha).
  \end{equation}
From \eqref{eq:12}, \eqref{eq:13} we get
\begin{align*}
  \nabla I(z, t) 
&= \sum_{i=1}^{n} \Big[ \Big( |z_i - f_i(x,t)|
 + z_i - f_i(x,t) \Big) e_i \\
&\quad - \int_{t}^{T} \Big( |z_i - f_i(x,\tau)| + z_i - f_i(x,\tau) \Big)
 \frac {\partial f_i(x,\tau)}{\partial x} \,d\tau \Big].
\end{align*}
Formula \eqref{eq:4} can be proved analogously.

\section{Numerical examples}

Let us consider some examples of the described method.
By an error on the $k$-th iteration we will understand
the difference $I(z_k) - I(z^*)= I(z_k)$.

Consider the differential inequality
$$
\dot{x} \leq -x^2 + t, \quad t \in [0,1],
$$
with the initial condition
$x(0) = 1$.

Table \ref{table1} shows the steepest descent method results.
 Here we put $z(t) = 0$ as initial approximation, then $x(t) = 1$.
The results show that one of the solutions has been obtained on 
the 2-nd iteration.
\begin{table}[ht]
\caption{}\label{table1}
\begin{center}
\renewcommand{\arraystretch}{1.2}
\begin{tabular}{|c|c|c|c|c|} \hline
        k&$I(z_k)$& $z_k$ & $x_k$ & $\| \nabla I(z_k) \|$\\
  \hline 
     1 & 0.(3) & 0 & 1 & 2.0331  \\
     2 & 0  & $-1 + 1.5 t - 0.5 t^2$ & $1 - t + 0.75 t^2 -0.1(6) t^3$ & 0\\
\hline
\end{tabular}
\end{center}
\end{table}

Let us consider one more example \cite{belenk}.
It is required to find the vector-function $x$, which satisfies the constraints
$$
\ddot{x} \leq 1, \quad \dot{x} \geq 0, \quad t \in [0,10],
 $$
with the initial condition
$$
x(0) = 1, \quad \dot{x}(0) = 5,
$$
and the final state
$x(10) \geq 19$.

This mathematical formulation has the following physical interpretation:
if $x$ is the height of the aircraft, it is required from the initial state,
having given initial speed,
to fly up to a sufficient height under the assumption that the device acceleration
 does not exceed the set value.

Put
$$
x_1 = x, \quad x_2 = \dot{x}.
$$
Then the  constraints can be written as 
\begin{gather*}
\dot{x}_1 \leq x_2, \quad -\dot{x}_1 \leq -x_2, \\
\dot{x}_2 \leq 1, \quad x_2 \geq 0, \\
x_1(0) = 1, \quad x_2(0) = 5, \\
-x_{1}(10) \leq -19.
\end{gather*}

Table \ref{table2} shows results obtained by using the steepest descent method.
 Here we put $z(t) = [5, 0]$ as initial approximation,
 then $x(t) = [1 + 5t, 5]$.
The results show that on the $15$-th iteration the error does not exceed 
  $5 \times 10^{-3}$.

\begin{table}[ht]
\caption{} \label{table2}
\begin{center}
\renewcommand{\arraystretch}{1.2}
\begin{tabular}{|c|c|c|c|c|} \hline
        k&$J(z_k)$&$\| z_k - z_{k-1} \|$& $\| x_k - x_{k-1} \|$
 &$\| \nabla J(z_k) \|$\\
  \hline
     1 & 16 & & &25.2982 \\
     2 & 1.6 & 1.26491& 7.30297 &14.8234 \\
     5 & 0.1601 & 0.09786 & 0.40068 & 2.1766 \\
     10 & 0.0237 & 0.01227 & 0.07436 &0.409 \\
     15 & 0.0042 & 0.04468 & 0.06138 & \\
\hline
\end{tabular}
\end{center}
\end{table}

As a final example we consider the system
\begin{gather*}
 \dot{x}_{1} \leq ( 0.01 - 0.05 x_{2}) x_{1},  \\
 \dot{x}_{2} \leq ( -0.01 + 0.02 x_{1}) x_{2}
\end{gather*}
with the initial state
$$ 
x_{1}(0) = 40, \quad x_{2}(0) = 20 
$$
and the final condition
$$ 
x_{1}(1) \geq 15, \quad x_{2}(1) \leq 30.
$$

To this problem we can given the following biological interpretation.
If $x_{1}$ denotes the number of victims and $x_2$ denotes the number of predators,
 then it is required that the number of victims does not fall below
 a certain value, and the population of predators does not exceed
 a predetermined amount at the set moment of time.
   Herewith the speed of the of victims extinction and the predators
 increasing are bounded above.

Table \ref{table3} shows the steepest descent method results.
Here we put $z(t) = [0, 0]$ as initial approximation, then $x(t) = [40, 20]$.
The results show that on the $4$-th iteration the error does not exceed
$5 \times 10^{-3}$.

\begin{table}[ht]
\caption{} \label{table3}
\begin{center}
\renewcommand{\arraystretch}{1.2}
\begin{tabular}{|c|c|c|c|c|}\hline
        k&$J(z_k)$&$\| z_k - z_{k-1} \|$& $\| x_k - x_{k-1} \|$ 
& $\| \nabla J(z_k) \|$\\
  \hline 
     1 & 1568.16 &  & & 151.3121 \\
     2 & 0.9498  & 32.38078 & 20.12449 & 1.8403 \\
     3 & 0.0054  & 2.39244 & 0.51905 & 0.09609 \\
     4 & 0.0047  & 0.0349 & 0.02015 &  \\
\hline 	
\end{tabular}
\end{center}
\end{table}


\subsection*{Conclusion}
Thus, in this article the problem of solving a
system of differential inequalities is reduced to a
variational problem of minimizing a functional on the whole space.
 For this functional Gateaux gradient is found, necessary and
 sufficient conditions for a minimum are obtained.
 On the basis of these conditions the method of steepest descent is described.
  Numerical examples of the method realization are presented.

\subsection*{Acknowledgments}
 This research was supported by the Russian Fund of Fundamental
Research, project no. 16-31-00056 mol-a.

\begin{thebibliography}{10}

\bibitem{ahmsad}  R. R. Ahmerov, B. N. Sadovskyi;
\emph{Basics of the Ordinary Differential Equations Theory}
 [Osnovy teorii obyknovennyh differencial'nyh uravnenij]. 
http://w.ict.nsc.ru/books/ textbooks/akhmerov/ode-unicode/index.html

\bibitem{alexplat}  A. Y. Alexandrov, A. V. Platonov;
\emph{Differential inequalities and motion stability}
 [Differencial'nye neravenstva i ustojchivost' dvizhenija]. 
SPb, Solo, 2006, 107 p. (in Russ.)

\bibitem{bekbel}  E. Beckenbach, R. Bellman;
\emph{Inequalities}. Berlin, Springer Verlag, 1965, 208 p.

\bibitem{belenk} V. Z. Belen'kyi;
\emph{Sufficient Optimality Conditions for Linear Differential
Inequalities with Discontinuous Trajectories.}
Russian Academy of Sciences. Izvestiya Mathematics, 1993, vol. 41, 
issue 1, pp. 39--54.

 \bibitem{congfeili}  S. Cong, S.-M. Fei, T. Li;
\emph{On exponential stability of switched systems with time-delays: 
Differential inequality approach}. Control Theory and Applications, 
2008, vol.~25, issue~3, pp.~521--524.

\bibitem{dem} V. F. Demyanov;
\emph{Extremum conditions and variation calculus}
 [Uslovija jekstremuma i variacionnoe ischislenie].
  Moscow, Vysshaya shkola, 2005, 335 p. (in Russ.)

\bibitem{fom}  A. V. Fominykh;
\emph{Methods of subdifferential and hypodifferential descent in the 
problem of constructing an integrally constrained program control}.
Automation and Remote Control, 2017, vol.~78, issue~4, pp.~608--617. 
DOI 10.1134/S0005117917040038

\bibitem{friedman}  A. Friedman;
\emph{Uniqueness of solutions of ordinary differential inequalities in Hilbert space}.
 Archive for Rational Mechanics and Analysis, 1964, vol.~17, issue 5, pp.~353--357.

\bibitem{hart} F. Hartman;
\emph{Ordinary Differential Equations} [Obyknovennye differencial'nye uravnenija].
 Moscow, Mir Publ., 1970. 720~p. (in Russ.)

  \bibitem{hay}  J. Hay;
\emph{Necessary conditions for the existence of global solutions of 
higher-order nonlinear ordinary differential inequalities}. 
Differential Equations, 2002, vol.~38, issue~3, pp.~362--368.

\bibitem{jleli}  M. Jleli, M. Kirane, B. Saet;
\emph{Blow-up phenomena for second-order differential inequalities with 
shifted arguments},
 Electronic Journal of Differential Equations, Vol.~2016 (2016), No.~91, pp.~1–12.

\bibitem{kantakil} L. V. Kantorovich, G. P. Akilov;
\emph{Functional analysis} [Funkcional'nyj analiz]. 
Moscow, Nauka Publ., 1977, 741~p. (In Russ.)

\bibitem{koplat} R. G. Koplatadze;
\emph{On oscillatory solutions of second order delay differential inequalities}. 
Journal of Mathematical Analysis and Applications, 1973,
vol.~42, issue~1, pp.~148--157.

\bibitem{lili} F. Li., X. Li.;
\emph{A priori estimates for nonlinear differential inequalities and applications}.
 Journal of Mathematical Analysis and Applications, 2011,  vol.~378, issue~2, 
pp.~723--733.

\bibitem{masuizu}  I. Masubuchi;
\emph{On numerical solution to parameter-dependent convex differential inequalities}. 
Proceedings of the 38th IEEE Conference on Decision and Control 1999, vol.~1, 
pp.~305--309.

\bibitem{sans} W. Walter;
\emph{Differential and Integral Inequalities}. 
Berlin, Springer Verlag, 1970, 352~p.

\bibitem{vain} V. Vainel't;
\emph{Difference Schemes for Solving Boundary Value Problems for
 Differential Inequalities.} USSR Computational Mathematics and 
Mathematical Physics, 1978, vol.~18, issue 3, pp. 113--125.	

\bibitem{vasnef} A. B. Vasilyeva, N. N. Nefyodov;
\emph{Comparison theorems. Chaplygin method of differential inequalities}
 [Teoremy sravnenija. Metod differencial'nyh neravenstv Chaplygina]. 
Moscow, Moscow State University Publ. House, 2007, 9~p. (In Russ.)

\end{thebibliography}

\end{document}

