\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2008(2008), No. 73, pp. 1--7.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2008 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2008/73\hfil Cohen-Grossberg neural networks]
{Convergence of Cohen-Grossberg neural networks with
delays and time-varying coefficients}

\author[Q. Zhou,  J. Shao\hfil EJDE-2008/73\hfilneg]
{Qiyuan Zhou, Jianying Shao} 

\address{Qiyuan Zhou  \newline
Department of Mathematics, Hunan University of Arts and Science,
Changde, Hunan 415000,  China}
\email{zhouqiyuan65@yahoo.com.cn}

\address{Jianying Shao  \newline
College of Mathematics and Information Engineering,
Jiaxing University, Jiaxing, Zhejiang 314001, China}
\email{shaojianying2008@yahoo.cn}

\thanks{Submitted January 17, 2008. Published May 15, 2008.}
\thanks{Supported by grant 07JJ46001 from the Scientific Research
Fund of Hunan Provincial \hfill\break\indent
Natural Science Foundation of China, and
20070605 from by Scientific Research  Fund \hfill\break\indent
of Zhejiang Provincial Education Department}

\subjclass[2000]{34C25,  34K13, 34K25}
\keywords{Cohen-Grossberg neural networks;
exponential convergence;  \hfill\break\indent
delays;  time-varying coefficients}

\begin{abstract}
 In this paper presents sufficient conditions for all solutions
 of the Cohen-Grossberg neural networks with
 delays and time-varying coefficients to converge exponentially
 to zero.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks

\section{Introduction}

Consider the Cohen-Grossberg neural network (CGNN), with delay
and time-varying coefficients,
\begin{equation}
\begin{aligned}
 \dot x_i(t)&=-a_{i}(t,x_i(t))\Big[b_{i}(t,x_i(t))
 -\sum  _{j=1} ^n  c_{ij}(t)f_{j}(x_j(t-\tau_{ij}(t)))\\
 &\quad -\sum  _{j=1} ^n d_{ij}(t)g_{j}
 \big(\int_{0}^{\infty}K_{ij}(u) x_{j}(t-u)du\Big) +I_i(t)\Big],\quad
 i=1,2,\dots,n,
\end{aligned} \label{e1.1}
\end{equation}
where $a_{i}  $ and  $ b_{i}  $ are continuous functions on
  $\mathbb{R}^{2}$, $f_{j}, g_{j},
  c_{ij},  d_{ij} $ and  $I_i $ are continuous functions on
  $\mathbb{R}$; $n$ corresponds to
  the number of units in a neural network; $x_i(t)$ denotes
  the potential (or voltage) of cell $i$ at time $t$; $a_{i}$
  represents an amplification function; $b_{i}$ is an appropriately
  behaved function; $c_{ij}(t)$ and $d_{ij}(t)$ denote the strengths
  of connectivity between cell $i$ and $j$ at time $t$ respectively;
  the activation functions $f_i(\cdot)$ and $g_i(\cdot)$ show  how the $i$th neuron
  reacts to the input, $\tau_{ij}\geq 0$ corresponds to the
    transmission delay of the $i$th unit
    along the axon of the $j$th unit at the time $t$, and $I_i(t)$ denotes the $i$th
  component of an external input source introduced from outside the
  network to cell $i$ at time $t$ for $i, j=1,2,\dots,n$.

  Since  the model CGNNs was  introduced by
Cohen and  Grossberg \cite{c3}, the dynamical characteristics
 (including stable, unstable and periodic oscillatory) of CGNNs
 have been widely investigated for the sake of theoretical interest
 as well as application considerations. Many good results  on the
    problem of the existence and stability of  the equilibriums   for
    system \eqref{e1.1} are given out
    in the literature. We refer the reader to the references in this article
and the references cited
    therein. Suppose that the following conditions are satisfied.
\begin{itemize}
\item[(H0)]  $a_{i}(t,x_i ) =a_{i}(0,x_i )$ and
$b_{i}(t,x_i ) =b_{i}(0,x_i )$ for all $t$,  and
$ c_{ij}, d_{ij}, I_{j},:\mathbb{R}\to\mathbb{R}$
    are constants, where  $i,j=1,  2, \dots,  n$.

\item[(H0*)] For each $j\in\{1,  2,  \dots,  n\}$,   there
exist nonnegative constants  $\tilde{L}_{j}$ and $L_{j}$
 such that
$$
   |f_{j}(u )-f_{j}(v )|\leq \tilde{L}_{j}|u -v |,   \quad
   |g_{j}(u )-g_{j}(v )| \leq L_{j}|u -v |, \quad \forall
u,  v \in \mathbb{R}.
$$
\end{itemize}

  Most authors of bibliographies listed above
obtained that  all solutions of  system \eqref{e1.1} converge  to the
equilibrium point.   However, to the best of
our knowledge, no author has considered
the convergence  of all solutions without  assumptions (H0) and (H0*).
Thus, it is worth   to investigate the convergence  for
 \eqref{e1.1} in this case.
The main purpose of this paper is to give a  new criteria   for
the convergence for all solutions of  \eqref{e1.1}. By
applying mathematical analysis techniques, without assuming
(H0) and (H0*),  we derive some sufficient conditions
ensuring that all solutions of \eqref{e1.1} converge exponentially
to  zero, which are new and complement of previously known results.
Moreover, we provide an example that illustrates  our results.

  Throughout this paper,  for $i,  j=1,  2,  \dots,  n$,
it will be assumed that
      $K_{ij}:[0, +\infty) \to \mathbb{R}$ are continuous  functions,
      and there exists a   constant  $\tau $
    such that
\begin{equation}
 \tau=\max_{1\leq i,j\leq n}\big\{\sup_{t\in \mathbb{R}}\tau_{ij}(t)\big\}.
    \label{e1.2}
\end{equation}
We also assume that the following conditions.
\begin{itemize}
\item[(H1)]   For each $j\in\{1,  2,  \dots,  n \}$,
there exist nonnegative constants   $\tilde{L}_{j}$ and $L_{j}$
 such that
\begin{equation}
   |f_{j}(u ) |\leq \tilde{L}_{j}|u  |,   \quad
   |g_{j}(u ) |\leq L_{j}|u |,  \quad \forall  u   \in \mathbb{R}.\label{e1.3}
\end{equation}
\item[(H2)]  For $i=1, 2, \dots, n$, there exist  positive
 constants such that
 $\underline{a_i}$, $\overline{a_i}$   and $ T_{1}$ such that
 $$
\underline{a_i}\leq a_{i}(t,u) \leq  \overline{a_i}, \quad
\text{for all  } t>T_{1},\; u\in \mathbb{R}.
$$

\item[(H3)] For $i=1, 2, \dots, n$, there exist   positive
 constants
 $\underline{b_i}$  and $ T_{2}$ such that
$$
\underline{b}_i| u |\leq \mathop{\rm sign} (u) b_{i}(t,u)
,  \quad \text{for  all } t>T_{2}, \; u \in \mathbb{R}.
$$
\item[(H4)]  There exist constants $T_{3}>0, \eta>0,  \lambda>0$ and
$\xi_{i}>0$, $i=1,  2,  \dots,  n$, such that for all $t>T_{3}$,
$$
  -[\underline{a}_i\underline{b}_i -\lambda]\xi_{i}+
\sum_{j=1}^{n}|c  _{ij}(t)|\overline{a_i}e^{\lambda
\tau}\tilde{L}_{j}\xi_{j}+ \sum_{j=1}^{n}|d _{ij}(t)
|\overline{a_i}\int_{0}^{\infty}|K_{ij}(u) |e^{\lambda u}du
L_{j}\xi_{j}<-\eta<0,
$$
where $i=1,  2, \dots, n$.

\item[(H5)] $I_{i}(t)=O(e^{-\lambda t})$,  $i=1,  2, \dots,  n$.

\end{itemize}
The initial conditions associated with  \eqref{e1.1} are
\begin{equation}
x_{i}(s)=\varphi_{i}(s),s\in (-\infty,  0], \quad i=1,2,\dots,n,
\label{e1.4}
\end{equation}
where $\varphi_{i}(\cdot)$  denotes a real-valued bounded
continuous function defined on $(-\infty, 0]$.
For $ Z(t)=(x_{1}(t), x_{2}(t),\dots,x_{n}(t))^{T} $, we define
the  norm
$$
\|Z(t)\|_{\xi}=\max_{i=1,2,\dots,n}|\xi^{-1}_{i}x_{i}(t)|.
$$
The remaining part of this paper is organized as follows. In
Section 2, we present  sufficient conditions to ensure
that all solutions of  \eqref{e1.1}  converge exponentially to the
zero.   In Section 3, we shall give some examples and
remarks to illustrate the results obtained in the previous
sections.

\section{Main Results}

\begin{theorem} \label{thm2.1}
Assume that {\rm (H1)--(H5)} hold. Then
every solution
$$
Z(t)=(x_{1}(t),x_{2}(t),\dots,x_{n}(t))^{T}
$$
of  \eqref{e1.1}, corresponding to
any initial value $\varphi=(\varphi_{1}(t), \varphi_{2}(t),
\dots, \varphi_{n}(t))^{T}$,  satisfies
$$
 x_{ i}(t) =O(e^{-\lambda t}), \quad i= 1,  2, \dots,  n.
$$
\end{theorem}

 \begin{proof}  From (H5), we can choose
constants $F>0$ and $T> \max\{ T_{1},   T_{2},  T_{3}\}$ such
that
\begin{equation}
\overline{a_{i }}| I_{i}(t) |<\frac{1}{2}Fe^{-\lambda t}, \quad
\text{for  all  } t\geq T, \; i=1,  2,  \dots,  n.
 \label{e2.1}
\end{equation}
 Set $ Z(t)=(x_{1}(t), x_{2}(t),\dots,x_{n}(t))^{T} $ be a
solution of  \eqref{e1.1} with any initial value
$\varphi=(\varphi_{1}(t), \varphi_{2}(t), \dots,
\varphi_{n}(t))^{T}$, and let $i_{t}$ be an index such
that
\begin{equation}
\xi^{-1}_{i_{t}}|x_{i_{t}}(t)|=\|Z (t)\|_{\xi}. \label{e2.2}
\end{equation}
Calculating the upper right derivative of
 $ e^{\lambda s}|x_{i_{s}}(s)|$ along   \eqref{e1.1}, in view of
 \eqref{e2.1},
(H1), (H2) and (H3), we have
\begin{equation}
\begin{aligned}
&D^+(e^{\lambda s}|x_{i_{s}}(s)|)|_{s=t}\\
& =  \lambda e^{\lambda
t}|x_{i_{t}}(t)| +e^{\lambda t}\mathop{\rm sign}
(x_{i_{t}}(t))\{-a_{i_{t}}(t,x_{i_{t}}(t))[b_{i_{t}}(t,x_{i_{t}}(t))\\
&\quad -\sum _{j=1} ^n
 c_{i_{t}j}(t)f_{j}(x_j(t-\tau_{i_{t}j}(t)))\\
& \quad -\sum  _{j=1} ^n
 d_{i_{t}j}(t)g_{j}(\int_{0}^{\infty}K_{i_{t}j}(u) x_{j}(t-u)du)+I_{i_{t}}(t)]\} \\
& \leq   e^{\lambda t}\{-( \underline{a}_{i_{t}}
\underline{b}_{i_{t}}
-\lambda)|x_{i_{t}}(t)|\xi^{-1}_{i_{t}}\xi_{i_{t}} +
\sum_{j=1}^{n}|c_{i_{t}j}(t )|\overline{a_i}
 \tilde{L}_{j}|x_{j}(t-\tau_{i_{t}j}(t))|\xi^{-1}_{j
}\xi_{j}\\
& \quad +\sum_{j=1}^{n}| d_{i_{t}j}(t)|\overline{a_i}
L_{j}\int_{0}^{\infty}|K_{i_{t}j}(u)|
    |x_{j}(t-u) |\xi^{-1}_{j }du \xi_{j} \}+\frac{1}{2} Fe^{-\lambda
t}e^{\lambda t},
 \end{aligned} \label{e2.3}
\end{equation}
where $t>T$. Let
\begin{equation}
M(t)=\max_{s\leq t}\{e^{\lambda s}\|Z (s)\|_{\xi}\}.
\label{e2.4}
\end{equation}
It is obvious that $e^{\lambda t}\|Z (t)\|_{\xi}\leq M(t)$, and
$M(t)$ is non-decreasing.  Now, we  consider two
cases.

\noindent \textbf{Case (i).}  If
 \begin{equation}
M(t)>  e^{\lambda t}\|Z (t)\|_{\xi} \quad \text{for  all  } t\geq T.
\label{e2.5}
\end{equation}
  Then, we claim that
\begin{equation}
M(t)\equiv M(T) \quad \text{is  constant  for  all  }
   t\geq T. \label{e2.6}
\end{equation}
   Assume, by way of contradiction, that \eqref{e2.6} does not hold.
Then, there    exists
$t_{1}>T$ such that $M(t_{1})> M(T)$. Since
$$
e^{\lambda t}\|Z (t)\|_{\xi}\leq M(T) \quad    \text{for  all  } t\leq T,
$$
there must exist $\beta \in (T,  t_{1})$ such that
$$
e^{\lambda \beta}\|Z (\beta)\|_{\xi}= M(t_{1})\geq  M(\beta),
$$
which contradicts to \eqref{e2.5}. This contradiction implies that
\eqref{e2.6} holds. It follows that
\begin{equation}
e^{ \lambda t}\|Z (t)\|_{\xi} <  M(t)=  M(T)
\text{ for  all  } t\geq T. \label{e2.7}
\end{equation}

\noindent\textbf{ Case (ii).}
 If there is  a point $t_{0}\geq T$ such that
$M(t_{0})=  e^{\lambda t_{0}}\|Z (t_{0})\|_{\xi}$. Then, in view
of \eqref{e2.3} and (H4), we obtain
\begin{equation}
\begin{aligned}
& D^+(e^{\lambda s}|x_{i_{s}}(s)|)|_{s=t_{0}}\\
&\leq   e^{\lambda t_{0}}\{-(
\underline{a}_{i_{t_{0}}} \underline{b}_{i_{t_{0}}}
-\lambda)|x_{i_{t_{0}}}(t_{0})|\xi^{-1}_{i_{t_{0}}}\xi_{i_{t_{0}}}\\
&\quad + \sum_{j=1}^{n}|c_{i_{t_{0}}j}(t _{0})|\overline{a_{i_{t_{0}}}}
 \tilde{L}_{j}|x_{j}(t_{0}-\tau_{i_{t_{0}}j}(t_{0}))|\xi^{-1}_{j
}\xi_{j}\\
&\quad  +\sum_{j=1}^{n}| d_{i_{t_{0}}j}(t_{0})|\overline{a_{i_{t_{0}}}}
L_{j}\int_{0}^{\infty}|K_{i_{t_{0}}j}(u)|
    |x_{j}(t_{0}-u) |\xi^{-1}_{j }du \xi_{j} \} + \frac{1}{2}F\\
& =   -( \underline{a}_{i_{t_{0}}} \underline{b}_{i_{t_{0}}}
-\lambda)  |x_{i_{t_{0}}}(t_{0})|e^{\lambda
t_{0}}\xi^{-1}_{i_{t_{0}}}\xi_{i_{t_{0}}} \\
&\quad +
\sum_{j=1}^{n}|c_{i_{t_{0}}j}(t _{0})|\overline{a_{i_{t_{0}}}}
 \tilde{L}_{j}|x_{j}(t_{0}-\tau_{i_{t_{0}}j}(t_{0}))|
 e^{\lambda (t_{0}-\tau_{i_{t_{0}}j}(t_{0}))}\xi^{-1}_{j} e^{ \lambda \tau_{i_{t_{0}}j}(t_{0})}
\xi_{j}\\
&\quad +\sum_{j=1}^{n}|
d_{i_{t_{0}}j}(t_{0})|\overline{a_{i_{t_{0}}}}
L_{j}\int_{0}^{\infty}|K_{i_{t_{0}}j}(u)|e^{\lambda u } |x_{j}(t_{0}-u) |
   e^{\lambda(t_{0}-u)}\xi^{-1}_{j }du \xi_{j} +\frac{1}{2}F\\
&\leq  \{-( \underline{a}_{i_{t_{0}}} \underline{b}_{i_{t_{0}}}
-\lambda)  \xi_{i_{t_{0}}} + \sum_{j=1}^{n}|c_{i_{t_{0}}j}(t
_{0})|\overline{a_{i_{t_{0}}}}
 \tilde{L}_{j}  e^{ \lambda \tau  }\xi_{j} \\
& \quad
 +\sum_{j=1}^{n}| d_{i_{t_{0}}j}(t_{0})|\overline{a_{i_{t_{0}}}}
L_{j}\int_{0}^{\infty}|K_{i_{t_{0}}j}(u)|e^{\lambda u } du \xi_{j}
\}M(t_{0})  +\frac{1}{2}F\\
& < -\frac{1}{2}\eta M(t_{0})+
 F.
\end{aligned}\label{e2.8}
\end{equation}
In addition, if $M(t_{0})\geq  2\frac{F}{\eta}$,
then $M(t )$ is strictly decreasing in a small neighborhood
$(t_{0}, t_{0}+\delta_{0})$.  This contradicts that $M(t)$
is non-decreasing.  Hence,
\begin{equation}
e^{\lambda t_{0}}\|Z (t_{0})\|_{\xi}=M(t_{0})<  2\frac{F}{\eta}.
 \label{e2.9}
\end{equation}

For $t>t_{0}$,   by the same approach as the one
 used in the  proof of \eqref{e2.9}, we have
\begin{equation}
e^{\lambda t }\|Z (t )\|_{\xi} <  2\frac{F}{\eta}, \quad  \text{if  }
   M(t )=  e^{\lambda t }\|Z (t )\|_{\xi}. \label{e2.10}
\end{equation}
 On the other hand, if $M(t )>  e^{\lambda t }\|Z (t )\|_{\xi}, t>t_{0}$,
we can choose $t_{0}\leq     t_{2}<t$ such that
$$
M(t_{2} )=  e^{\lambda t_{2}}\|Z (t_{2} )\|_{\xi}
<  2\frac{F}{\eta},\quad
M(s)>  e^{\lambda s }\|Z (s )\|_{\xi} \quad \text{for  all }
    s\in (t_{2},  t].
$$
Using  a similar argument as in the proof of  \textbf{Case (i)},
 we can show  that
\begin{equation}
M(s)\equiv M(t_{2})  \text{ is   constant  for  all  }
   s\in ( t_{2},  t], \label{e2.11}
\end{equation}
which implies
    $$
e^{ \lambda t }\|Z (t )\|_{\xi} <  M(t)=  M(t_{2})  < 2\frac{F}{\eta}.
$$
In summary, there must exist $N>0$ such that
$e^{ \lambda t }\|Z (t )\|_{\xi}<\max\{ M(T), 2\frac{F}{\eta}\}$
holds for all $t>N$. The proof is complete.
\end{proof}


\section{An Example}

In this section, we give an example to demonstrate the results
obtained in previous sections.
Consider the CGNN with delays and time-varying coefficients
\begin{equation}
\begin{gathered}
\begin{aligned}
 x_{1}'(t)
&=-(2+e^{\cos t}\frac{1}{10\pi}\arctan x_{1}(t))
[ (2-\frac{ (100+|t|)\sin t}{1+2|t|})(x_{1}(t) +5x^{3}_{1}(t))\\
&\quad + \frac{1}{8}\frac{ (101+|t|)\sin t}{1+4|t|}
    f_{1}(x_{1}(t -2\sin ^{2}t))
 + \frac{1}{8}\frac{ (102+|t|)\sin t}{1+36|t|}\\
&\quad \times  f_{2}(x_{2}(t-3\sin ^{2}t ))
  +  \frac{1}{8}\frac{ (103+|t|)\sin t}{1+4|t|}
\int_{0}^{\infty}e^{- u} g_{1}(x_{j}(t-u))du\\
&\quad + \frac{1}{8}\frac{ (100+|t|^{2})\sin t}{1+36|t|^{2}}
\int_{0}^{\infty}e^{- u} g_{2}(x_{j}(t-u))du+   e^{-3t}\sin t ],
\end{aligned}\\
\begin{aligned}
x_{2}'(t)
&=-(2+e^{\sin t}\frac{1}{10\pi}\arctan x_{2}(t))
[(4-\frac{ (200+|t|)\cos t}{1+2|t|} )(x_{2}(t) +15x^{3}_{2}(t))
 \\
&\quad + \frac{1}{8}\frac{ (200+|t|)\cos t}{1+8|t|}f_{1}(x_{1}
(t-2\sin ^{2}t )) \\
&\quad +\frac{1}{8}\frac{ (206+|t|)\cos t}{1+5|t|}f(x_{2}(t-5\sin ^{2}t ))\\
&\quad +\frac{1}{8}\frac{ (205+|t|)\cos t}{1+6|t|}\int_{0}^{\infty}e^{- u}
  g_{1}(x_{j}(t-u))du\\
&\quad + \frac{1}{8}\frac{ (204+|t|)\cos t}{1+7 |t|}
\int_{0}^{\infty}e^{- u} g_{2}(x_{j}(t-u))du+  e^{-t}\sin t ],
\end{aligned}
\end{gathered} \label{e3.1}
\end{equation}
where $f_{1}(x)=f_{2}(x) =g_{1}(x)=g_{2}(x)=x\sin x $.
Noting that
\begin{gather*}
a_{1}(t, x)=  (2+e^{\cos t}\frac{1}{10\pi}\arctan x_{1} ), \quad
a _{2}(t, x)= (2+e^{\sin t}\frac{1}{10\pi}\arctan x_{2} ),
\\
b_{1}(t, x)= (2-\frac{ (100+|t|)\sin t}{1+2|t|} )(x_{1}
 +5x^{3}_{1} ), \\
b _{2}(t, x)=  (4-\frac{ (200+|t|)\cos t}{1+2|t|} )(x_{2}(t)  +15x^{3}_{2} ),
\\
 L _{1}=L _{2}=\tilde{L} _{1}=\tilde{L} _{2}=1,  \quad
    \tau=5, \quad K_{ij}(u)= e^{-u}, \quad i, j=1,2,
\\
 c _{11}(t) = \frac{1}{8}\frac{ (101+|t|)\sin t}{1+4|t|}, \quad
 d _{11} (t)= \frac{1}{8}\frac{ (103+|t|)\sin t}{1+4|t|},  \\
 c _{12}(t) =  \frac{1}{8}\frac{ (102+|t|)\sin t}{1+36|t|}, \quad
d _{12}(t) =  \frac{1}{8}\frac{ (100+|t|^{2})\sin t}{1+36|t|^{2}}, \\
c _{21}(t) = \frac{1}{8}\frac{ (200+|t|)\cos t}{1+8|t|}, \quad
d _{21}(t) = \frac{1}{8}\frac{ (205+|t|)\cos t}{1+6|t|},
\\
 c _{22} (t)= \frac{1}{8}\frac{ (206+|t|)\cos t}{1+5|t|},  \quad
 d _{22}(t)= \frac{1}{8}\frac{ (204+|t|)\cos t}{1+7 |t|}\,.
\end{gather*}
It follows that
\begin{gather*}
1=\underline{a_i}\leq a_{i}(t,u) \leq  \overline{a_i}=4,  \quad
\text{for all  } t,u\in \mathbb{R},  i=1, 2;\\
| u | =\underline{b}_i| u |\leq \mathop{\rm sign} (u) b_{i}(t,u)
,  \quad \text{for  all  } t,  u \in \mathbb{R}, \; i=1, 2.
\end{gather*}
Then, we can choose a sufficient large
 constant $T_{0}>0$ and a positive constant
$\bar{\eta}=\frac{1}{2} $ and  $\xi_{i}=1$, $i=1,  2$, such that
for all $t>T_{0}$, there
    holds
\begin{align*}
&- \underline{a}_i\underline{b}_i \xi_{i}+
\sum_{j=1}^{2}|c  _{ij}(t)|\overline{a_i} \tilde{L}_{j}\xi_{j}+
\sum_{j=1}^{2}|d _{ij}(t)
|\overline{a_i}\int_{0}^{\infty}|K_{ij}(u) | du L_{j}\xi_{j}\\
&= - \underline{a}_i\underline{b}_i  +
\sum_{j=1}^{2}|c  _{ij}(t)|\overline{a_i} \tilde{L}_{j} +
\sum_{j=1}^{2}|d _{ij}(t) |\overline{a_i}  L_{j} \\
&= - 1 + 4\sum_{j=1}^{2}|c  _{ij}(t)|    + 4\sum_{j=1}^{2}|d _{ij}(t) |
<-\frac{1}{2}=-\bar{\eta}<0,  i=1,  2.
\end{align*}
 Then, we can choose constants $\eta>0$ and  $\lambda>0 $
such that
\begin{align*}
&-[\underline{a}_i\underline{b}_i
-\lambda]\xi_{i}+ \sum_{j=1}^{2}|c
_{ij}(t)|\overline{a_i}e^{\lambda \tau}\tilde{L}_{j}\xi_{j}+
\sum_{j=1}^{2}|d _{ij}(t)
|\overline{a_i}\int_{0}^{\infty}|K_{ij}(u) |e^{\lambda u}du
L_{j}\xi_{j}\\
&<-\eta<0, \quad  i=1,  2,   t>T_{0},
\end{align*}
which implies that \eqref{e3.1}  satisfies
(H1)--(H5). Hence,  from   Theorem \ref{thm2.1},    all
solutions of system \eqref{e3.1} converge  exponentially to  the
    zero point $(0,  0, \dots, 0)^{T}$.

\begin{remark} \label{rmk3.1} \rm
  Since $f_{1}(x)=f_{2}(x) =g_{1}(x)=g_{2}(x)=x\sin x $ and
\eqref{e3.1} is a very simple form of delayed Cohen-Grossberg neural
 network with  time-varying coefficients.
It is clear that the   conditions  (H0) and (H0*) are not satisfied.
Therefore,  the results in  the references of this article
are not applicable for proving that the solutions to \eqref{e3.1} converge
exponentially  to  the zero.
This implies that the results of this paper are essentially new.
\end{remark}

\begin{thebibliography}{00}

\bibitem{c1} J. Cao;
\emph{Global stability analysis in delayed cellular neural networks},
Phys. Rev. E, 59(1999) 5940-5944.

\bibitem{c2} J. Cao, J. Liang;
\emph{Boundedness and stability for Cohen-Grossberg
neural networks with time-varing delays}, J. Math. Anal. Appl.,
              296(2004) 665-685.

\bibitem{c3}  M. Cohen, S. Grossberg;
\emph{Absolute stability and global pattern formation and parallel
memory storage by competiticve neural networks}, IEEE Trans. Man
 Cybernet., SMC, 13(1983) 815-826.

\bibitem{c5} T. Chen, L. Rong;
\emph{Delay-independent stability analysis of Cohen-Grossberg neural
networks}, Physics Letters A, 317(2003) 436-449.

\bibitem{c6} Chun-Hsien Li and Suh-Yuh Yang;
\emph{A further analysis on harmless delays in Cohen-Grossberg neural
networks}, Chaos, Solutions and Fractals, 34(2007) 646-653.


\bibitem{g1} K. Gopalsamy, X. Z. He;
\emph{Delay-independent stability in  bidirection associative memory networks},
 IEEE Trans. Neural Networks, 5(1994) 998-1002.

\bibitem{g2}  S. Guo, L. Huang;
\emph{Stability analysis of a delayed Hopfield neural network},
 Physical Review E, 67 (2003) 061902.

\bibitem{l1}  X. Li, L. Huang, H. Zhu;
\emph{Global stability of cellular neural networks with constant
and variable delays}, Nonlinear Anal., 53(2003) 319-333.

\bibitem{l2} X. Liao, C. Li and K. Wong;
\emph{Criteria for exponential stability of Cohen-Grossberg neural
       networks}, Neural Networks, 17(2004) 1401-1414.

\bibitem{l3} Bingwen Liu, Lihong Huang;
\emph{Existence and exponential stability of
periodic solutions for a class of Cohen-Grossberg neural networks
with time-varying delays}, Chaos, Solitons and Fractals 32 (2007)
617-627.

\bibitem{l4} Fei Long, Yixuan Wang, Shuzi Zhou;
\emph{Existence and exponential stability of periodic solutions for a class of
Cohen-Grossberg neural networks with bounded and unbounded delays},
Nonlinear Analysis: Real World Applications 8 (2007) 797- 810.

\bibitem{w1} L. Wang, X. Zou;
\emph{Harmless delays in Cohen-Grossberg neural networks},
  Physics D, 170 (2002) 162-173.

\bibitem{w2}  L. Wang;
\emph{Stability of Cohen-Grossberg neural networks with distributed delays},
Appl. Math. Comput., 160(2005) 93-110.

\bibitem{w3} L. Wang, X. Zou;
\emph{Exponential stability of Cohen-Grossberg neural networks},
Neural Networks, 15 (2002) 415-422.

\bibitem{y1} Zhaohui Yuan, Dewen Hua, Lihong Huang, Guohua Dong;
\emph{Existence and global expo- nential stability of periodic solution for
CohenGrossberg neural networks with delays}, Nonlinear Analysis:
Real World Applications 7 (2006) 572-590.

\bibitem{z1} Weirui Zhao;
\emph{Dynamics of Cohen-Grossberg neural network with variable
coefficients and time-varying delays}, Nonlinear Analysis: Real
World Applications, In Press, Corrected Proof, Available online 23
February 2007.

\bibitem{z2} Fuxing Zhang, Bingwen Liu, Lihong Huang;
\emph{Existence and exponential stability of periodic
solutions for a class of Cohen-Grossberg neural networks with
bounded and unbounded delays}, Computers and Mathematics with
Applications 53 (2007) 1325-1338.

\end{thebibliography}

\end{document}
