\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2018 (2018), No. 115, pp. 1--12.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2018 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2018/115\hfil Averaging for differential inclusions with maxima]
{Averaging method for ordinary differential inclusions with maxima}

\author[B. Bar, M. Lakrib \hfil EJDE-2018/115\hfilneg]
{Bachir Bar, Mustapha Lakrib}

\address{Bachir Bar \newline
Universit\'e AbouBakr Belkaid,
LSDA, 13000 Tlemcen, Alg\'erie. \newline
Universit\'e Djillali Liab\`es,
LDM, 22000 Sidi Bel Abb\`es, Alg\'erie}
\email{bachir.bar@mail.univ-tlemcen.dz}

\address{Mustapha Lakrib\newline
 Universit\'e Djillali Liab\`es, 
LDM, 22000 Sidi Bel Abb\`es, Alg\'erie}
\email{m.lakrib@univ-sba.dz}

\thanks{Submitted October 17, 2017. Published May 14, 2018.}
\subjclass[2010]{34A60, 34C29}
\keywords{Differential inclusions with maxima; averaging method;
\hfill\break\indent one-sided Lipschitz condition}

\begin{abstract}
 We consider  ordinary  differential inclusions with maxima perturbed
 by a small parameter and give justification of the method of averaging
 for this type of inclusions.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks


\section{Introduction} \label{sec:introduction}

It  is  known  that  equations and inclusions with maxima arise naturally when 
solving practical and phenomenon problems, in particular, in  the study of 
systems with automatic regulation and automatic control. 
Some works on these equations and inclusions  are 
 \cite{Bainov,cernea, Angelov, Pinto, Otrocol, Pinto2, Stepanov}.

Differential  equations and inclusions with maxima displaying nonlinear 
oscillations are ubiquitous in the  scientific literature. 
The method of averaging is one of the main tool to analyze these
oscillatory equations and inclusions.
This method was used for ordinary and functional differential equations without 
maxima  in \cite{Kovalchuk,L-K-B,L-S,M-S-T}.
This method  was also  applied to   ordinary differential  equations
with maxima  in  \cite{Kichmarenko3,Kichmarenko2,Plotnikov,Shpakovich}
and in the monograph \cite[Chap. 7]{Bainov}.  
It  was  extended to fuzzy differential equations with maxima in \cite{Olga} 
and to set valued differential equations with Hukuhara derivative and 
maxima in \cite{Kichmarenko}, where both the right-hand sides and the
solutions are set valued.

For ordinary differential inclusions (without maxima), many authors 
have contributed to the development of
the  averaging method in
\cite{LAKRIB, Gama, plotn, Lakrib2, plotni, Sokolovskaya} 
and the references therein.
However, to our knowledge this method has not been extended to  ordinary  
differential inclusions with maxima.

In the present work, we consider ordinary differential inclusions with
maxima perturbed by a small parameter and establish an averaging result 
under weak regularity assumptions. More precisely, we consider the 
initial-value problem
\begin{equation}
 \begin{gathered}
  \dot{x}\in \varepsilon F\Big(t,x(t),\max_{s\in S(t)} x(s)\Big), \quad
 t\geq 0 \\
  x(0)=x_0
 \end{gathered} \label{1.10}
\end{equation}
where  $\varepsilon>0$ is a small parameter,
$F$ and $S$ are  multifunctions, with $S(t)\subset[0,t]$ for $t\geq 0$, and
$$
\max_{s\in S(t)} x(s):=\Big(\max_{s\in S(t)} x_1(s), \dots, 
\max_{s\in S(t)} x_n(s)\Big).
$$

The structure of this article is as follows.
In Section \ref{s2} we provide  an existence result and  a Filippov-Pli\'s
type result for ordinary differential inclusions with maxima.
In Section~\ref{s3}
we present our main result: Theorem \ref{theo2.3}. We state and prove some
preliminary results in Section~\ref{s4} and then give the
proof of Theorem \ref{theo2.3}.
The technical tools used in this article are standard, 
however their exposition in the framework of problem \eqref{1.10} is new.


We complete this section with some definitions and notation.
Throughout this paper we denote by $\mathbb{R}^n$ the real $n$-dimensional space. 
The set of nonnegative real numbers is denoted by $\mathbb{R}_+$.
For $X\subseteq \mathbb{R}$ and $Y=\mathbb{R}_+$ or $\mathbb{R}^n$, 
the set of (locally) Lebesgue integrable functions $\delta  :X\to Y$ is denoted 
by $\mathcal{L}_{\rm (loc)}^1 (X,Y)$. In $\mathbb{R}^n$ we use the notation
 $\langle \cdot,\cdot\rangle$ and
$| \cdot|$ for the usual inner product and Euclidean norm, respectively.
The set of all  nonempty compact (nonempty compact and convex, respectively) 
subsets of $\mathbb{R}^n$ is  denoted
$\operatorname{Comp}(\mathbb{R}^n)$ ($\operatorname{Conv}(\mathbb{R}^n)$, 
respectively). The distance from  $\alpha \in \mathbb{R}^n$ to 
$C\in \operatorname{Comp}(\mathbb{R}^n)$ is given by $d(\alpha,C)=\inf\big\{ | \alpha-c|, \ c\in C\big\}$ and the Hausdorff distance between
$A,B \in \operatorname{Comp}(\mathbb{R}^n)$ is defined as
\[
H(A,B)=\max\Big(\sup_{a\in A} d(a,B),\sup_{b\in B} d(b,A)   \Big).
\]
Endowed with the Hausdorff distance, $ \operatorname{Comp}(\mathbb{R}^n)$ 
is a complete separable metric space. The support function of  
$A\in \operatorname{Comp}(\mathbb{R}^n)$ is
$\sigma(b,A)=\sup\{\langle b,a\rangle,\ a\in A\}$ for  $b\in\mathbb{R}^n$.
Notice that for $A\in \operatorname{Conv}(\mathbb{R}^n)$, $\sigma(\cdot,A)$ 
uniquely determines $A$.

The definition of the one-sided Lipschitz condition for  multifunctions \cite{Dont91}, 
adapted to the multifunction $F$ in problem \eqref{1.10}, reads as follows.

\begin{definition}  \label{rghp} \rm 
A multifunction
 $F:\mathbb{R}_+\times \mathbb{R}^n\times \mathbb{R}^n
\to \operatorname{Conv}(\mathbb{R}^n)$ 
is said to be one-sided Lipschitz (OSL) (with respect to $(x,y)$), 
if there exists $\lambda \in \mathbb{R}$  such that
 for every \hbox{$t\in\mathbb{R}_+$, $x_1,y_1, x_2,y_2\in\mathbb{R}^n$}
 and all $z_1\in F(t,x_1,y_1)$, there exists  \hbox{$z_2\in F(t,x_2,y_2)$} such that
 $$
 \langle z_2-z_1,x_2-x_1\rangle\leq \lambda \left(| x_2-x_1|^2+| x_2-x_1|| y_2-y_1| 
\right).
 $$
 This is equivalently expressed by the support function 
 \[
\sigma(x_2-x_1,F(t,x_1,y_1) )-\sigma(x_2-x_1,F(t,x_2,y_2) )
\leq \lambda \left(| x_2-x_1|^2+| x_2-x_1|| y_2-y_1| \right)
\]
 for every $t\in\mathbb{R}_+$ and $x_1,y_1, x_2,y_2\in\mathbb{R}^n$.
\end{definition}

Note that the constant $\lambda$ in Definition \ref{rghp} can take negative 
values. As in the case of Lipschitz condition, $\lambda$  is called the OSL constant.
It is well known that the OSL condition generalizes the Lipschitz condition 
with respect to the Hausdorff metric.
Note however that it  does not imply continuity.

\section{Existence  and  Filippov-Pli\'s type results}
\label{s2}

First we recall  that a function $x$ is called solution of an ordinary differential 
equation (resp. inclusion) with a  maximum if $x$ is absolutely continuous 
on some interval and satisfies the differential equation (resp. inclusion) 
almost everywhere on this interval.

By an application of Schauder's fixed point theorem \cite[Chap.2]{Zeidler}, 
one can easily prove  the following result on existence of solutions of 
ordinary differential equations with maxima.

\begin{lemma} \label{lemma2.1}
 Let  $f:\mathbb{R}_+\times\mathbb{R}^n\times\mathbb{R}^n\to \mathbb{R}^n$ 
be a continuous function. Suppose that
 $f$ is uniformly bounded by some locally Lebesgue integrable function.
 Let  $S:\mathbb{R}_+\to \operatorname{Comp}(\mathbb{R})$ be a continuous 
multifunction, with $S(t)\subset[0,t]$ for $t\geq 0$. 
Let  $x_0\in\mathbb{R}^n$ and  $L>0$. Then the initial-value problem associated with
 an ordinary differential equation  with a maximum
 \begin{equation}
  \begin{gathered}
   \dot{x}=  f\Big(t,x(t),\max_{s\in S(t)} x(s)\Big), \quad  t\in [0,L]\\
   x(0)=x_0
  \end{gathered}  \label{1.1540}
 \end{equation}
 admits  at least one solution defined on  $[0,L]$.
\end{lemma}

By use of the  Michael's selection theorem  \cite[Chap.2]{Deimling} and 
Lemma~\ref{lemma2.1}, it is not hard to prove the following result on existence 
of solutions of ordinary differential inclusions with maxima.

\begin{lemma} \label{rtgm}
 Let $F:\mathbb{R}_+\times \mathbb{R}^n\times\mathbb{R}^n\to 
\operatorname{Conv}(\mathbb{R}^n)$ be a continuous multifunction.
Suppose that  $F$ is uniformly bounded by some locally Lebesgue integrable function.
 Let $S:\mathbb{R}_+\to \operatorname{Comp}(\mathbb{R})$ be a  continuous 
multifunction, with $S(t)\subset[0,t]$ for $t\geq 0$. 
Let  $x_0\in\mathbb{R}^n$ and  $L>0$. Then the initial-value problem, associated with
 an ordinary differential inclusion with a maximum
 \begin{equation}
  \begin{gathered}
   \dot{x}(t)\in  F\Big(t,x(t),\max_{s\in S(t)} x(s)\Big), \quad  t\in[0,L]\\
   x(0)=x_0
  \end{gathered}  \label{rty}
 \end{equation}
 admits at least one solution defined on  $[0,L]$.
\end{lemma}

We need the following lemma which is a Filippov-Pli\'s type result for 
ordinary differential inclusions with maxima.
Its proof follows the same pattern as in \cite{Donchev} where a similar
result is obtained in the without maxima case.

\begin{lemma} \label{rghbn}
 Let $F : \mathbb{R}_+ \times \mathbb{R}^n\times\mathbb{R}^n 
\to \operatorname{Conv}(\mathbb{R}^n)$ and 
$S:\mathbb{R}_+ \to \operatorname{Comp}(\mathbb{R})$ be  multifunctions that 
satisfy the following conditions:
 \begin{itemize}
  \item $F$ is  continuous.

  \item $F$ is uniformly bounded by some locally Lebesgue integrable function, 
i.e., there exists $m\in \mathcal{L}^1_{\rm loc}(\mathbb{R}_+,\mathbb{R}_+)$ such that
\[
H( F(t,x,y),0)\leq m(t), \quad \forall t\in\mathbb{R}_+,\;
 \forall x,y\in \mathbb{R}^n.
\]

\item $F$ is OSL  with  constant $\lambda\in \mathbb{R}$.

\item $S$ is continuous, with $S(t)\subset[0,t]$ for $t\geq 0$.
 
\end{itemize}
 Let $L>0$ and $\delta\in \mathcal{L}^1([0, L],\mathbb{R}_+)$.  
If  $x_1 : [0, L] \to \mathbb{R}^n$ is  an absolutely continuous function  satisfying
 \[
d\Big(\dot{x_{1}}(t), F(t,x_{1}(t), \max_{s\in S(t)}x_{1}(s))\Big)
\leq \delta(t), \quad \forall  t \in [0, L]
\]
then, for each $x_0\in\mathbb{R}$, there exists a solution $x$ of 
problem \eqref{rty} such that, for $t \in [0, L]$,
 \begin{equation}  \label{Plis}
  |x_1(t)-x(t)| \leq \Big(| x_1(0)-x_0 |+\int_{0}^{L}\delta(t)dt\Big)
\exp(2\lambda^+t),
 \end{equation}
 where $\lambda^+=\max\{\lambda,0\}$.
\end{lemma}

\begin{proof}
For $t\in [0,L]$ and $\alpha,\beta\in \mathbb{R}^n$,  we define the set
\begin{align*}
 G(t,\alpha,\beta)
= \Big\{&  x\in F(t,\alpha,\beta):
 \langle  \dot{x}_{1}(t)-x,  x_{1}(t)-\alpha\rangle\\
& \leq   \lambda| x_{1}(t)-\alpha|^2
 +| x_{1}(t)-\alpha|\big(\lambda
| \max_{s\in S(t)}x_{1}(s)-\beta|+\delta(t)\big)
 \Big\} .
\end{align*}
 We first prove that $G(t,\alpha,\beta)$ is nonempty for every 
$t\in [0,L]$ and all $\alpha,\beta\in \mathbb{R}^n$.

Let $w \in F(t,x_{1}(t), \max_{s\in S(t)}x_{1}(s))$ be such that
 $$
| \dot{x_{1}}(t)-w|=d\Big(\dot{x}_{1}(t), F(t,x_{1}(t), 
\max_{s\in S(t)}x_{1}(s))\Big)\leq \delta(t).
$$
 From assumption (H3) it follows that there exists $x\in F(t,\alpha,\beta)$ 
such that
 \[
\langle w-x,x_1(t)-\alpha\rangle
\leq \lambda \Big(| x_1(t)-\alpha|^2+| x_1(t)-\alpha|
\big| \max_{s\in S(t)}x_{1}(s)-\beta\big| \Big).
\]
 Therefore,
\begin{align*}
&\langle \dot{x}_{1}(t)-x,   x_1(t)-\alpha\rangle\\
& \leq \langle w-x,x_1(t)-\alpha\rangle+|\dot{x}_{1}(t)-w||x_1(t)-\alpha|\\
&\leq  \lambda| x_1(t)-\alpha|^2+| x_1(t)-\alpha| 
 \Big(\lambda\big|\max_{s\in S(t)}x_{1}(s)-\beta\big|+ \delta(t) \Big),
 \end{align*}
i.e., $G(t,\alpha,\beta)\not=\emptyset$. Obviously,  $G$ is compact and convex 
valued and is continuous.
 Furthermore $G(t,\alpha,\beta)\subset
 F(t,\alpha,\beta)$. Therefore, by Lemma \ref{rtgm}, there exists a solution $x$ 
of problem
 \begin{equation}
\begin{gathered}
   \dot{x}(t)\in  G\Big(t,x(t),\max_{s\in S(t)} x(s)\Big), \quad  t\in[0,L] \\
   x(0)=x_0
  \end{gathered}   \label{rtby}
 \end{equation}
 such that, for $t\in [0,L]$,
 \begin{equation}
  \begin{aligned}
&\langle \dot{x}_{1}(t)-\dot{x}(t),x_1(t)-x(t)\rangle\\
& \leq \lambda| x_1(t)-x(t)|^2
  +| x_1(t)-x(t)|  \Big(\lambda\big|\max_{s\in S(t)}x_{1}(s)
 -\max_{s\in S(t)}x(s)\big|+ \delta(t) \Big)\\
& \leq \lambda| x_1(t)-x(t)|^2+| x_1(t)-x(t)| 
  \Big(\lambda\max_{s\in S(t)}|x_{1}(s)-x(s)|+ \delta(t) \Big).
  \end{aligned}  \label{2.6}
 \end{equation}
Let $r(t)=| x_1(t)-x(t)|$,  $t\in [0,L]$.
 The function $r$ is absolutely continuous. At every $t\in[0,L]$ for which
$r$ is differentiable, by \eqref{2.6}, we have the inequality
 \begin{equation}
  \label{r(t)}
  r(t)\dot{r}(t)=\frac{1}{2}\frac{d}{dt}r^2(t)
\leq \lambda r(t) \Big(r(t)+ \max_{s\in S(t)} r(s)\Big)+r(t)\delta(t).
 \end{equation}

 Define the set $T = \{t\in [0,L] : r(t) = 0\}$ and let $T_0$ be the set of 
the points of density of $T$. It is known that 
$\operatorname{meas}(T_0) = \operatorname{meas}(T)$, where 
 meas is the  measure of Lebesgue. If $t \notin T$, then, from \eqref{r(t)}
 we deduce
 \begin{equation}   \label{r}
  \dot{r}(t)\leq \lambda^+ \Big(r(t)+ \max_{s\in S(t)} r(s)\Big)+ \delta(t).
 \end{equation}
 If $t\in T_0$ and if $\dot{r}(t)$ exists, then $\dot{r}(t)=0$.
 Hence, \eqref{r} is satisfied for almost all $t\in [0,L]$.
 Therefore, one obtains that: $r(t)\leq \overline{r}(t)$, for $t\in [0,L]$, where
 $\overline{r}$ is the solution of
 \begin{gather*}
 \dot{\overline{r}}(t)
=\lambda^+ \Big(\overline{r}(t)+ \max_{s\in S(t)}\overline{r}(s)\Big)
+ \delta(t),\quad t\in[0,L]\\
 \overline{r}(0)=r(0).
 \end{gather*}
Taking into account that
 $$
 \overline{r}(t) \leq  r(0)+ \int_{0}^{t}\Big(2 \lambda^+
\overline{r}(\tau)+ \delta(\tau)\Big)d\tau,
 $$
 by the Gronwall Lemma \cite[Chap.1]{G} we deduce  the desired boundedness 
in \eqref{Plis}.
\end{proof}

\section{Averaging result} \label{s3}

Let $F : \mathbb{R}_+ \times \mathbb{R}^n\times\mathbb{R}^n \to 
\operatorname{Conv}(\mathbb{R}^n)$ and $S:\mathbb{R}_+ 
\to \operatorname{Comp}(\mathbb{R})$ be  multifunctions,
 with $S(t)\subset[0,t]$ for all $t\geq 0$. 
Let $\varepsilon>0$ be a small parameter. We are
interested in the limiting behavior of the trajectories of the
initial-value problem
\begin{equation}
 \begin{gathered}
  \dot{x}\in \varepsilon F\Big(t,x(t),\max_{s\in S(t)} x(s)\Big), \quad t\geq 0 \\
  x(0)=x_0
 \end{gathered} \label{1.1}
\end{equation}
on intervals of time $[0,L/\varepsilon]$, $L>0$, as the perturbation
parameter $\varepsilon$ tends to zero. For this purpose we make use of 
the averaging method.

First, let us formulate the assumptions on the multifunctions $F$ and $S$, 
 needed for proving our averaging result.
\begin{itemize}
 \item[(H1)]  $F=F(t,x,y)$ is  continuous and the continuity in $(x,y)$ 
is uniform with respect to $t$.

 \item[(H2)]  There exist $m\in \mathcal{L}^1_{\rm loc}
(\mathbb{R}_+,\mathbb{R}_+)$  and a constant $M>0$ such that
 \[
H( F(t,x,y),0)\leq m(t), \quad \forall t\in\mathbb{R}_+,\;
 \forall x,y\in\mathbb{R}^n
\]
 with
 $$
\int_{t_1}^{t_2}m(t)dt\leq M(t_2-t_1), \quad  \forall t_1,t_2\in\mathbb{R}_+,\;
 t_1\leq t_2.
$$

 \item[(H3)] $F$ is OSL  with   constant $\lambda\in \mathbb{R}$.

 \item[(H4)] $S$ is uniformly continuous.

 \item[(H5)]
 For all $x,y \in\mathbb{R}^n$, there exists a  limit
 \begin{equation}
  \overline{F}(x,y):=\lim_{T\to+\infty}\frac{1}{T}\int_{0}^{T}F(t,x,y)dt,
  \label{30}
 \end{equation}
 i.e.,
 $$
 \lim_{T\to+\infty} H\Big(\overline{F}(x,y),\frac{1}{T}\int_{0}^{T}F(t,x,y)dt\Big)=0.
 $$
\end{itemize}

Note that in \eqref{30} and in what follows  the integral of a multifunction 
$G$ is understood in the Lebesgue-Aumann   sense \cite{cas},  i.e.
$$
\int_{t_1}^{t_2}G(t)dt
=\Big\{   \int_{t_1}^{t_2}g(t)dt : g\in \mathcal{L}^1([t_1,t_2],\mathbb{R}^n), 
g(t)\in G(t) \Big\},\quad  \forall t_1,t_2\in\mathbb{R},\; t_1\leq t_2.
$$
Consider now problem \eqref{1.1}  with the initial-value averaged problem
\begin{equation}
 \begin{gathered}
  \dot{y}\in\varepsilon  \overline{F}\Big(y(t),\max_{s\in S(t)} y(s)\Big), \quad
 t\geq 0 \\
  y(0)=x_0.
 \end{gathered} \label{1.2}
\end{equation}
The main result of this article is contained in the following theorem.

\begin{theorem} \label{theo2.3} 
 Suppose that {\rm (H1)--(H5)} are fulfilled. Let  $x_0\in\mathbb{R}^n$. 
Then, for every $L>0$ and  $\eta> 0$, there exists 
$\varepsilon_0=\varepsilon_0(x_0,L,\eta) >0$ such that, for
 any $\varepsilon \in(0, \varepsilon_0]$, the following holds:
 \begin{itemize}
  \item[(i)]
  for any  solution $x$ of problem  \eqref{1.1},
  there exists  a  solution $y$ of problem \eqref{1.2} such that
  \begin{equation}    \label{equ2.3}
   | x(t)-y(t)|\leq\eta, \quad \forall t\in[0,L/\varepsilon];
  \end{equation}
  \item[(ii)]  for any  solution $y$ of problem \eqref{1.2}, 
 there exists a   solution $x$ of problem \eqref{1.1}   such that  
inequality \eqref{equ2.3} holds.
 \end{itemize}
\end{theorem}

Let  $x_0\in\mathbb{R}^n$. For $L>0$, denote by 
$\operatorname{Sol}(\varepsilon F,x_0,L)$ and 
$\operatorname{Sol}(\varepsilon\overline{F},x_0,L)$ the solutions sets on 
$[0,L/\varepsilon]$  of problems~\eqref{1.1} and \eqref{1.2}, respectively,
 and consider the associated reachable sets at time  $t\in [0,L/\varepsilon]$
 given  by
\begin{gather*}
R(\varepsilon F,x_0,t)=\{x(t):x\in \operatorname{Sol}(\varepsilon F,x_0,L) \},\\
R(\varepsilon\overline{F},x_0,t)=\{y(t):y\in \operatorname{Sol}
(\varepsilon\overline{F},x_0,L) \}.
\end{gather*}
From Theorem \ref{theo2.3} we obtain the following corollary.

\begin{corollary}  
Suppose that {\rm (H1)--(H5)} are fulfilled. 
Let  $x_0\in\mathbb{R}^n$. For any $L>0$,  we have
 \[
\lim_{\varepsilon\to 0}\sup\Big\{H\big (R(\varepsilon F,x_0,t),
R(\varepsilon \overline{F},x_0,t)\big): t\in[0,L/\varepsilon] \Big\}=0.
\]
\end{corollary}

\begin{remark} \rm 
 In Theorem  \ref{theo2.3}, solutions of problems  \eqref{1.1} and \eqref{1.2}
 are  defined globally in time. On any interval of time $[0,L/\varepsilon]$, $L>0$, 
they are contained in the compact ball in $\mathbb{R}^n$ of radius $ML$, 
centered at $x_0$.

  In problem \eqref{1.1}, $S$  is a general multifunction which is uniformly 
continuous. In~\cite{Plotnikov},  the authors
  considered  problem \eqref{1.1} in single-valued case (differential equations 
with maxima) with %as a multifunction
  $S$ an interval valued multifunction which is  uniformly continuous, that is, 
 $S(t)=[g(t),\gamma(t)]$, where $g,\gamma:\mathbb{R}_+\to\mathbb{R}_+$ 
are uniformly continuous functions such that $0\leq g(t)\leq \gamma(t)\leq t$, 
for all $t\in\mathbb{R}_+$.

  If a multifunction $F=F(t,x,y)$  is continuous in $t$ and satisfies 
a Lipschitz condition on $(x,y)$
  (as assumed in \cite{Plotnikov}), then assumptions  (H1) and (H3)
 are automatically fulfilled.
  
  In assumption  (H5), when the limit \eqref{30} is  uniform with 
respect to $(x,y)$, then $\varepsilon_0$ in the conclusion of Theorem \ref{theo2.3} 
does not depend on the initial condition $x_0$.
\end{remark}

\section{Proof of the main result} \label{s4}

To prove Theorem  \ref{theo2.3} we need to establish the following two 
lemmas. 

\begin{lemma}
 Let $F:\mathbb{R}_+\times \mathbb{R}^n\times\mathbb{R}^n
\to \operatorname{Conv}(\mathbb{R}^n)$ be a multifunction.
 \begin{itemize}
  \item[(i)] If $F$ satisfies assumptions {\rm (H1)} and {\rm (H2)}, then its average
  $\overline{F}$ in {\rm (H5)} is uniformly bounded by the constant $M$ 
in {\rm (H2)} and is continuous.

  \item[(ii)] If $F$ satisfies assumption {\rm (H3)} then its average 
$\overline{F}$ in {\rm (H5)} satisfies the OSL condition with constant 
$\lambda$ in {\rm (H3)}.
\end{itemize}
\end{lemma}

\begin{proof}
 For the proof of (i)  see \cite{LAKRIB}.

(ii) Note that, for $x\in\mathbb{R}^n$ and 
$A, B\in \operatorname{Conv}(\mathbb{R}^n)$, we have
 \begin{equation}  \label{equequ}
  |\sigma(x,A)-\sigma(x,B)| \leq| x| \Big|\sigma\Big(\frac{x}{| x|},A\Big)
-\sigma\Big(\frac{x}{| x|},B\Big)\Big| \leq| x| H(A,B).
 \end{equation}

 Now, let $x_1,x_2,y_1,y_2\in\mathbb{R}^n$. Using inequality \eqref{equequ}, 
by assumption (H5) we can easily deduce that, for any
 $\eta>0$ there exists $T_0=T_0(x_1,x_2,y_1,y_2,\eta)>0$ such that, for all
 $T\geq T_0$ we have
 \begin{align*}
& \sigma(x_2-x_1,\overline{F}(x_1,y_1) )-\sigma(x_2-x_1,\overline{F}(x_2,y_2))\\
&\leq \Big[ \sigma(x_2-x_1,\overline{F}(x_1,y_1) )
 -\sigma\Big(x_2-x_1,\frac{1}{T}\int_0^TF(t,x_1,y_1)dt\Big)\Big]\\
&\quad  +\Big[\sigma\Big(x_2-x_1,\frac{1}{T}\int_0^TF(t,x_1,y_1)dt\Big)
-\sigma\Big(x_2-x_1,\frac{1}{T}\int_0^TF(t,x_2,y_2)dt\Big)\Big] \\
&\quad  +\Big[\sigma\Big(x_2-x_1,\frac{1}{T}\int_0^TF(t,x_2,y_2)dt\Big)
 -\sigma(x_2-x_1,\overline{F}(x_2,y_2))\Big] \\
& \leq | x_2-x_1 | H\Big(\overline{F}(x_1,y_1),
 \frac{1}{T}\int_{0}^{T}F(t,x_1,y_1)dt\Big) \\
&\quad  +\frac{1}{T}\int_0^T\big[\sigma(x_2-x_1,F(t,x_1,y_1))
 -\sigma(x_2-x_1,F(t,x_2,y_2))\big]dt \\
&\quad +  | x_2-x_1 | H\Big(\frac{1}{T}\int_{0}^{T}F(t,x_2,y_2)dt,
\overline{F}(x_2,y_2)\Big) \\
&\leq  2| x_2-x_1 |\eta+\lambda \left(| x_2-x_1|^2+| x_2-x_1|| y_2-y_1| \right).
 \end{align*}
Since the value of $\eta$ is arbitrary, in the limit we obtain
 that 
$$
\sigma(x_2-x_1,\overline{F}(x_1,y_1) )-\sigma(x_2-x_1,\overline{F}(x_2,y_2))
 \leq \lambda \left(| x_2-x_1|^2+| x_2-x_1|| y_2-y_1| \right),
$$ 
which completes the proof that $\overline{F}$ is OSL with constant $\lambda$.
\end{proof}

\begin{lemma}\label{lema}
 Suppose that {\rm (H1)--(H4)}  are fulfilled.  Let  $x_0\in\mathbb{R}^n$. 
Then, for every  solution $x$ of \eqref{1.1} and  $L>0$ there exists  a 
solution $\overline{z}:[0,L/\varepsilon]\to\mathbb{R}^n$ of the discrete problem
 \begin{equation}
  \begin{gathered}
   \dot{\overline{z}}(t)\in \varepsilon
 F\Big(t,\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\Big), \quad
 t\in[t_i,t_{i+1}] \\ 
\overline{z}(0)=x_0
  \end{gathered}  \label{disc}
 \end{equation}
where $0=t_0<t_1<\dots<t_p=L/\varepsilon$
with $t_{i+1}=t_i+L/\varepsilon p$, $i=0,\dots,p-1$,
 such that, for $t\in[0,L/\varepsilon]$
 \[
|\overline{z}(t)-x(t)|\leq \big(L\exp(2\lambda^+ L)\big)  
\omega_{F}\Big(\frac{M}{p}(L+\omega_{S}(L))+\varepsilon M \omega_{S}(L)\Big), 
\]
where $\lambda^+=\max\{\lambda,0\}$ and
 $\omega_{G}$ is the  modulus of  continuity of multifunction $G$.
\end{lemma}

\begin{remark}\rm
 Note that on $[0,L/\varepsilon]$, $L>0$, solutions of \eqref{disc} are 
contained in the compact ball in $\mathbb{R}^n$ of radius $ML$, centered at $x_0$.
\end{remark}


\begin{proof}[Proof of Lemma \ref{lema}]
We present two steps.
\smallskip

\noindent\textbf{Step 1.}
 Let $\overline{z}(0)=x_0$ and suppose that $\overline{z}$ exists on  $[0,t_i]$. 
 We prove inductively that it exists on  $[t_i,t_{i+1}]$, $i=0,\dots, p-1$.
For a given $t\in [t_i,t_{i+1}]$ and $\alpha,\beta \in\mathbb{R}^n$ consider 
the map 
$$
G(t,\alpha,\beta)=E(t,\alpha,\beta)\cap \varepsilon 
F\Big(t,\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\Big)
$$
where
\begin{align*}
E(t,\alpha,\beta)
=\Big\{& z\in \mathbb{R}^n: \langle  \dot{x}(t)-z,  x(t)-\alpha\rangle 
\leq \varepsilon\Big[\lambda| x(t)-\alpha|^2 \\
&+| x(t)-\alpha|
 \Big(\lambda\big| \max_{s\in S(t)}x(s)-\beta\big|+\delta(t)\Big)\Big]\Big\}
\end{align*}
with
\[
\delta(t)= H\Big(F(t,\alpha,\beta),F\Big(t,\overline{z}(t_i),
\max_{s\in S(t_i)}\overline{z}(s)\Big)\Big).
\]
We obtain the existence of a solution of the initial-value problem
\begin{equation}
 \begin{gathered}
  \dot{\alpha}\in  G\Big(t,\alpha(t),\max_{s\in S(t)}\alpha(s)\Big), \quad
  t\in[t_i,t_{i+1}]\\
  \alpha(t_i)=\overline{z}(t_i).
\end{gathered}  \label{4}
\end{equation}
We have, $G(t,\alpha,\beta)$ is nonempty for every $t$, $\alpha$ and $\beta$. 
Indeed, by assumption (H3) (OSL condition) there is  
$w\in \varepsilon F(t,\alpha,\beta)$ such that
\[
\langle \dot{x}(t)-w,x(t)-\alpha\rangle
\leq \varepsilon\lambda \Big(| x(t)-\alpha|^2+| x(t)-\alpha|
\big| \max_{s\in S(t)}x(s)-\beta\big| \Big).
\]
Further, for $w$ we find 
$z\in\varepsilon F\left(t,\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\right)$ 
such that
\[
| w-z| \leq \varepsilon 
H\Big(F(t,\alpha,\beta),F\Big(t,\overline{z}(t_i),
\max_{s\in S(t_i)}\overline{z}(s)\Big)\Big)=\varepsilon\delta(t).
\]
Then
\[
\langle \dot{x}(t)-z,x(t)-\alpha\rangle\leq \varepsilon 
\Big[\lambda| x(t)-\alpha|^2+| x(t)-\alpha|
 \Big(\lambda\big|\max_{s\in S(t)}x(s)-\beta\big|+ \delta(t)\Big) \Big];
\]
that is, $z\in G(t,\alpha,\beta)$.

Now, it is easy to see that $G$ is  compact and convex valued, and is continuous.
Hence, problem \eqref{4} has a solution  that we denote also  by $\overline{z}$.
This completes the induction step.
\smallskip

\noindent\textbf{Step 2}. 
For $t\in[0,L/\varepsilon]$, we have $t\in[t_i,t_{i+1}]$ for some 
$i=0,\dots,p-1$ and:

On the one hand,
\begin{gather*}
| \overline{z}(t)-\overline{z}(t_{i})| 
\leq  \int_{t_i}^{t_{i+1}}\varepsilon m(s)ds
\leq \varepsilon M(t_{i+1}-t_{i})
\leq \frac{LM}{p}, \\
\big|\max_{s\in S(t)}\overline{z}(s)-\max_{s\in S(t_i)}\overline{z}(s)\big|
\leq \varepsilon M \omega_{S}\Big(\frac{L}{\varepsilon p}\Big)
\leq M \Big(\frac{1}{p}+\varepsilon\Big)\omega_{S}(L) 
\end{gather*}
where $\omega_{S}$ is the  modulus of continuity of the multifunction $S$, and then
\begin{align*}
 \overline{\delta}(t)&:=H\Big(F\Big(t,\overline{z}(t),\max_{s\in S(t)}
 \overline{z}(s)\Big),F\Big(t,\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)
 \Big)\Big)\\
& \leq \omega_{F}\Big(\frac{M}{p}(L+\omega_{S}(L))+\varepsilon M \omega_{S}(L)\Big),
 \end{align*}
where $\omega_{F}$ is the  modulus of  continuity of the multifunction
$F$  which is, by assumption (H1), independent of $t$.

On the other hand
\begin{align*}
&\langle  \dot{x}(t) -\dot{\overline{z}},  x(t)-\overline{z}\rangle  \\
&\leq \varepsilon\Big[\lambda| x(t)-\overline{z}|^2+| x(t)-\overline{z}|
\Big(\lambda\big| \max_{s\in S(t)}x(s)-\max_{s\in S(t_i)}\overline{z}(s)\big|
 + \overline{\delta}(t)\Big)\Big].
 \end{align*}

We repeat the arguments following  inequality \eqref{2.6}  to obtain that, 
for all $t\in [0,L/\varepsilon]$,
\begin{align*}
| \overline{z}(t) -x(t)|
&\leq \Big(\int_{0}^{L/\varepsilon}\varepsilon\overline{\delta}(t)dt\Big)
 \exp (2\varepsilon\lambda^+t) \\
&\leq \big(L\exp(2\lambda^+L)\big) \omega_{F}
 \Big(\frac{M}{p}(L+\omega_{S}(L))+\varepsilon M \omega_{S}(L)\Big),
\end{align*}
with $\lambda^+=\max\{\lambda,0\}$.
\end{proof}

\begin{proof}[Proof of Theorem \ref{theo2.3}] % \label{section4.2}
Let $x_0\in \mathbb{R}^n$ and  $x$ be a solution of \eqref{1.1}. Let  $L > 0$.
By Lemma \ref{lema} there exists  a solution
 $\overline{z}:[0,L/\varepsilon]\to\mathbb{R}^n$ of the discrete problem
\begin{gather*}
\dot{\overline{z}}(t)\in \varepsilon 
F\Big(t,\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\Big), \quad
 t\in[t_i,t_{i+1}] \\ 
\overline{z}(0)=x_0
\end{gather*}
where $0=t_0<t_1<\dots<t_p=L/\varepsilon$
with $t_{i+1}=t_i+L/\varepsilon p$, $i=0,\dots,p-1$,
such that, for $t\in[0,L/\varepsilon]$
\begin{equation} \label{1}
 |\overline{z}(t)-x(t)|\leq \big(L\exp(2\lambda^+ L)\big) \omega_{F}
\Big(\frac{M}{p}(L+\omega_{S}(L))+\varepsilon M \omega_{S}(L)\Big),
\end{equation}
where $\lambda^+=\max\{\lambda,0\}$.

Notice that by assumption  (H5), for any $\mu > 0$ there exists 
$\overline{\varepsilon}$ such that for every 
$\varepsilon \in (0,\overline{\varepsilon}]$ we have 
\begin{equation}
H\Big(\frac{\varepsilon p}{L}\int_{t_i}^{t_{i+1}} 
F\Big(t,\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\Big)dt,
 \overline{F}\Big(\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\Big)\Big)
\leq \mu. \label{virtue}
\end{equation}

For $i=0,\dots,p-1$, let $v_i:[t_i,t_{i+1}]\to\mathbb{R}^n$ be 
 continuous function satisfying: for $t\in[t_i,t_{i+1}]$, 
$v_i(t)\in F\Big(t,\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\Big)$ and
$\overline{z}(t)=\overline{z}(t_i)+\varepsilon \int_{t_i}^t v_i(s)ds$.
There exists  
$v^i\in \overline{F}\big(\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)\big)$ 
such that, by \eqref{virtue},
$$
\Big| \frac{\varepsilon p}{L}\int_{t_i}^{t_{i+1}} v_i(t)dt-v^i\Big|
=\Big| \frac{\varepsilon p}{L}\int_{t_i}^{t_{i+1}} (v_i(t)-v^i)dt\Big|\leq \mu.
$$
Then we consider the  function  $z^1:[0,L] \to \mathbb{R}^n$  given by
 $$ 
z^1(t)=z^1(t_i)+\varepsilon\int_{t_i}^{t_{i+1}}v^ids,\quad t\in[t_i,t_{i+1}].
$$
For $t\in[t_i,t_{i+1}]$ we have
\[
| z^1(t)-z^1(t_i)|\leq \int_{t_i}^{t_{i+1}}M\varepsilon ds\leq \frac{ML}{p}. 
\]
By the definition of $z^1$ and $\overline{z}$, we have
\begin{align*}
 | z^1(t_{i+1})- \overline{z}(t_{i+1})|
&\leq| z^1(t_{i})- \overline{z}(t_{i}) |
 +\varepsilon\Big|\int_{t_i}^{t_{i+1}}(v_i(t)-v^i)dt\Big|\\
&\leq| z^1(t_{i})- \overline{z}(t_{i}) |+\frac{L\mu}{p}
\leq \dots \\
&\leq p\frac{L\mu}{p}=L\mu.
 \end{align*}
For $t\in[t_i,t_{i+1}]$ we obtain
\begin{equation} \label{2}
 \begin{split}
| z^1(t)- \overline{z}(t)|
&\leq| z^1(t)- z^1(t_i)|+| z^1(t_{i})- \overline{z}(t_{i})|
 +| \overline{z}(t_{i})- \overline{z}(t)| \\
&\leq L\mu+\frac{2ML}{p}
 \end{split}
\end{equation}
and
$$
\Big|\max_{s\in S(t_i)}z^1(s)-\max_{s\in S(t_i)}\overline{z}(s)\Big|
\leq \max_{s\in S(t_i)}\left|z^1(s)-\overline{z}(s)\right|
\leq L\mu+\frac{2ML}{p}
$$
so that
$$
 H\Big(\overline{F}\Big(\overline{z}(t_i),\max_{s\in S(t_i)}\overline{z}(s)
\Big),\overline{F}\Big(z^1(t),\max_{s\in S(t_i)}z^1(s)\Big)\Big)
\leq\omega_{\overline{F}}\Big(2L\mu+\frac{4ML}{p}\Big),
$$
where $\omega_{\overline{F}}$ is the  modulus of  continuity of the multifunction
$\overline{F}$.

Therefore, for $t\in[t_i,t_{i+1}]$, $i=0,\dots,p-1$,
\begin{align*}
 & d\Big(\dot{z^1}(t),\varepsilon\overline{F}\Big(z^1(t),
 \max_{s\in S(t_i)}z^1(s)\Big)\Big) \\
&\leq \varepsilon d\Big(v^i,\overline{F}\Big(z^1(t),\max_{s\in S(t_i)}z^1(s)
 \Big)\Big)\\
&\leq\varepsilon H\Big(\overline{F}\Big(\overline{z}(t_i),
 \max_{s\in S(t_i)}\overline{z}(s)\Big),
 \overline{F}\Big(z^1(t),\max_{s\in S(t_i)}z^1(s)\Big)\Big)\\
&\leq \varepsilon
  \omega_{\overline{F}}\Big(2L\mu+\frac{4ML}{p}\Big).
\end{align*}
Taking into account that  $\varepsilon\overline{F}$ is OSL with constant 
$\varepsilon\lambda$, by Lemma \ref{rghbn} there exists  a solution
 $y$ of \eqref{1.2}, such that, for $t\in[0,L/\varepsilon]$,
\begin{equation}  \label{3}
 \begin{split}
  | z^1(t)-y(t)| 
&\leq \big(\exp(2\lambda^+ L)\big)\int_{0}^{L/\varepsilon}
  \varepsilon  \omega_{\overline{F}}\Big(2L\mu+\frac{4ML}{p}\Big)ds\\
&\leq \big(L\exp(2\lambda^+ L)\big)\omega_{\overline{F}}
 \Big(2L\mu+\frac{4ML}{p}\Big).
 \end{split}
\end{equation}
By inequalities \eqref{1}, \eqref{2} and \eqref{3} it follows  that,
 for $t\in[0,L/\varepsilon]$,
\begin{align*}
  | x(t)-y(t)|
&\leq| x(t)-\overline{z}(t) | +| \overline{z}(t)-z^1(t)|+ | z^1(t)-y(t)|\\
&\leq   \big(L\exp(2\lambda^+ L)\big) 
\omega_{F}\Big(\frac{M}{p}(L+\omega_{S}(L))+\varepsilon M \omega_{S}(L)\Big)
  +L\mu+\frac{2ML}{p}\\
&\quad +  \big(L\exp(2\lambda^+ L)\big)\omega_{\overline{F}}
\Big(2L\mu+\frac{4ML}{p}\Big).
 \end{align*}
Therefore, for any $\eta>0$, by  appropriate choice of $\mu$, 
 sufficiently large  $p$  and sufficiently small $\varepsilon$, 
we get the inequality $|x(t)-y(t)|\leq \eta$ for $t\in[0,L/\varepsilon]$.
The proof of  assertion (i) is now complete.


Adopting the process presented above, we obtain assertion  (ii). In
this way the proof is complete. 
\end{proof}

\begin{remark} \rm
 In all the results above, it is not necessary to consider the whole space
 $\mathbb{R}_+\times\mathbb{R}^n\times\mathbb{R}^n$. One can
 restrict the domains of definition of  function $f$ in \eqref{1.1540} 
and  multifunctions $F$ in \eqref{rty} and  \eqref{1.1} to $\mathbb{R}_+\times
 \mathbb{U}\times \mathbb{U}$ for any open subset
 $\mathbb{U}\subset\mathbb{R}^n$ with additional technical assumptions.
\end{remark}

\begin{thebibliography}{00}

 \bibitem{Bainov}  D. D. Bainov, S. G. Hristova;
 \emph{Differential Equations with Maxima.} CRC Press Tylor
 and Francis Group, 2011.

 \bibitem{G}  D. D.  Bainov, P. S. Simeonov; 
\emph{Integral Inequalities and Applications, Mathematics and Its Applications.}
 Kluwer  Academic Publishers, Dordrecht-Boston-London, 1992.

 \bibitem{LAKRIB}  A. Bourada, R. Guen, M. Lakrib, K. Yadi;
 Some averaging results for ordinary differential inclusions. 
\emph{ Discuss. Math. Differential Incl., Control Optim.}, \textbf{35}
(2015), No. 1, 47-63.

\bibitem{cas}  C. Castaing and M. Valadier; 
\emph{Convex analysis and measurable multifunctions.} 
In: Lect. Notes in Math., No. 580,  Springer-Verlag, Berlin, 1977.

 \bibitem{cernea}  A. Cernea; 
On the existence of solutions for differential inclusions with maxima.
 {\it Libertas Math.} (new series), \textbf{35}  (2015),  No. 1, 89-98.

 \bibitem{Deimling}  K. Deimling; 
\emph{Multivalued Differential Equations.}
 de Gruyter Series in Nonlinear Analysis and Applications. Vol. 1, Walter de 
Gruyter, Berlin, 1992.

 \bibitem{Dont91}  T. D. Donchev; 
Functional  differential  inclusions  with  monotone  right  hand  side. 
{\it Nonlinear Anal.}, \textbf{16} (1991), 543-552.

\bibitem{Donchev}  T. Donchev, E. Farkhi;  
Stability and Euler approximation of  one-sided Lipschitz differential inclusions. 
{\it SIAM J. Control Optim.}, \textbf{36}  (1998), 780-796.

 \bibitem{Gama}  R. Gama, G. Smirnov; 
 Stability and Optimality of Solutions to Differential
 Inclusions via Averaging Method, {\it Set-Valued Var. Anal.}, 
\textbf{22} (2014), No. 2, 349-374.


 \bibitem{Angelov}  L. Georgiev, V. G. Angelov;
 On the existence and uniqueness of solutions for maximum equations. 
{\it Glasnik Mat.}, \textbf{37} (2002), 275-281.

 \bibitem{Pinto}  P. Gonzalez, M. Pinto; 
Convergent solutions of certain nonlinear differential equations with maxima.
 {\it Math. Comput. Modelling}, \textbf{45} (2007),  1-10.

 \bibitem{Kichmarenko}  D. Kichmarenko; 
Averaging of differential equations with  Hukuhara derivative with maxima. {\it
  Int. J. Pure Appl. Math.}, \textbf{57} (2009), No. 3, 447-457.

 \bibitem{Kichmarenko3}  D. Kichmarenko, K. Yu; Sapozhnikova;
 Full averaging scheme for differential equations with maximum. 
{\it Contemp. Anal. Appl. Math.} \textbf{3} (2015), No. 1, 113-122.

 \bibitem{Olga}  O. Kichmarenko, N. Skripnik;
 Partial Averaging of Fuzzy  Differential Equations with Maxima.
 {\it Adv. Dyn. Syst. Appl.}, \textbf{6} (2011), No. 2,  199-207.

 \bibitem{plotn}  S. Klymchuk, A. Plotnikov, N. Skripnik; 
Overview of V. A. Plotnikov's research on averaging of differential inclusions. 
{\it Phys. D: Nonlinear Phenomena}, \textbf{241} (2012), No. 22, 1932-1947.

\bibitem{Kovalchuk}  T. V. Koval'chuk, V. I. Kravets, V. V.  Mohyl'ova, A. N. Stanzhitskii; 
Application of the method of averaging to the problems of optimal control 
over functional-differential equations {\it Ukr. Mat. Zh.},
 \textbf{70} (2018), No. 2, 206-215.

 \bibitem{Lakrib2}  M. Lakrib; 
An Averaging Theorem for Ordinary Differential Inclusions,  
{\it Bull. Belg. Math. Soc. Simon Stevin} \textbf{16} (2009), 13-29.

\bibitem{L-K-B}   M. Lakrib, T. Kherraz, A. Bourada;
  Averaging for ordinary differential equations perturbed by a small parameter, {\it
  Math. Bohemica},   \textbf{141} (2016), No. 2, 143-151.

  \bibitem{L-S}    M. Lakrib, T. Sari; 
Time averaging for ordinary differential equations and retarded functional 
differential equations,  {\it Electron. J.    Diff. Equ.}, \textbf{2010} (2010), 
1-24.

 \bibitem{M-S-T}  V. Mogilova, O. Stanzhytskyi, A. Tkachuk; 
Application of the averaging method to some optimal control problems, 
\emph{Funct. Diff. Equ.}, \textbf{20} (2013), No 3-4, 227-237.

  \bibitem{Otrocol}  D. Otrocol; 
Systems of functional-differential equations with maxima, of mixed type,
 {\it Electron. J. Qual. Theory Differ. Equ.}, (2014), No. 5, 1-9.

\bibitem{Pinto2}  M. Pinto, S. Trofimchuk; 
Stability and existence of multiple periodic solutions for a quasilinear 
differential equation with maxima. {\it Proc. Roy.
  Soc. Edinburgh Sect. A}, \textbf{130} (2000), 1103-1118.

\bibitem{Kichmarenko2}  V. A. Plotnikov, O. D. Kichmarenko;
 A note on the averaging method for differential equations with maxima.  
{\it Iran. J. Optim.}, \textbf{1} (2009), 132-140.

\bibitem{Plotnikov}  V. A. Plotnikov, O. D. Kichmarenko; 
Averaging of differential equations with maxima. 
{\it Vestn. Chernovits. Univ.}, \textbf{150} (2002), 78-82.

\bibitem{plotni}  V. A. Plotnikov, A. Plotnikov, A. Vityuk; 
\emph{Differential equations with multivalued right-hand side: Asymptotic methods.} 
Astro Print, Odessa, 1999 (In Russian).

\bibitem{Shpakovich}  V. Shepakovich, V. Muntyan;
 The averaging method for differential equations with maxima. 
{\it Ukr. Mat. Zh.}, \textbf{39} (1987), No. 5, 662-665.

\bibitem{Sokolovskaya}   E. V. Sokolovskaya; 
Generalization of the Krylov-Bogolyubov averaging principle for
  differential inclusions with non-Lipschitz right-hand side,  {\it
   Bull. Samara State Univ., Ser. Nat. Sci.}, (2004), 36-51 (in Russian).

\bibitem{Stepanov}  E. Stepanov;
 On solvability of some boundary value problems for differential equations
 with maxima. {\it Topol. Meth. Nonlin. Anal.}, \textbf{8} (1996), 315-326.

 \bibitem{Zeidler} E. Zeidler; 
\emph{Functional Analysis and Its Applications I: Fixed Point Theorems.} 
Springer-Verlag, New York, 1986.

\end{thebibliography}

\end{document}
