\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2008(2008), No. 133, pp. 1--8.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2008 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2008/133\hfil Exponential convergence]
{Exponential convergence for BAM neural networks with distributed delays}

\author[Y. Li, Y. Ren \hfil EJDE-2008/133\hfilneg]
{Yongkun Li, Yaping Ren}  % in alphabetical order

\address{Yongkun Li \newline
Department of Mathematics,
Yunnan University\\
Kunming, Yunnan 650091, China}
\email{yklie@ynu.edu.cn}


\address{Yaping Ren \newline
School of Statistics and Mathematics,
Yunnan University of Finance and Economics,
Kunming  650221, China}
\email{ren\_yaping5@yahoo.com.cn}

\thanks{Submitted July 14, 2008. Published September 26, 2008.}
\thanks{Supported by grants 10361006 from the National Natural Sciences
Foundation of China, \hfill\break\indent
and 2003A0001M from the Natural Sciences
Foundation of Yunnan Province}
\subjclass[2000]{92B20, 34K20}
\keywords{Bidirectional associative memory networks;
 delay kernels; \hfill\break\indent exponential convergence}

\begin{abstract}
 This paper concerns the exponential convergence of
 bidirectional associative memory (BAM) neural networks
 with unbounded distributed delays. Sufficient
 conditions are derived by exploiting the exponentially fading
 memory property of delay kernel functions. The method is based on
 comparison principle of delay differential equations and does not
 need the construction of any Lyapunov functions.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks

\section{Introduction}

The bidirectional associative memory (BAM) model, known as an
extension of the unidirectional autoassociator of Hopfied
\cite{h1}, was first introduced by Kosko \cite{k1}.
It has been used in many fields
such as the pattern recognition and automatic control. Sufficient
conditions have been obtained for the global asymptotic stability
of delayed BAM networks; see the references in this article. Only
a few results are available on the exponential stability of BAM
networks with  distributed delays. As well known, the exponential
stability guarantees fast response in a system and therefore is a
desirable performance in evaluating and designing BAM networks.

Mathematically, the effect of distributed time delays on the
dynamics of BAM networks is often characterized through delay
kernel functions. It is hence natural to think that certain
conditions should be required on the nature of delay kernels in
order to attain the exponential convergence in the system.


The BAM networks with unbounded distributed delays under
consideration are described by the following integro-differential equations

\begin{equation} \label{e1.1}
\begin{aligned}
\dot{x}_i(t)&=-a_i(t)x_i(t)+\sum_{j=1}^{m}A_{ji}(t)
f_j(x(t-\tau_{j}))\\
&\quad +\sum_{j=1}^{m}C_{ji}(t)\int_{-\infty}^{t}
K_{ji}(t-s)g_j(x_j(s))\,{\,d} s+I_i(t),\quad
 i=1,\dots,n,\\
\dot{y}_j(t)&=-b_j(t)y_j(t)+\sum_{i=1}^{n}B_{ij}(t)
h_i(y(t-\tau_{i}))\\
&\quad +\sum_{i=1}^{n}D_{ij}(t)\int_{-\infty}^{t}
G_{ij}(t-s)l_i(x_i(s))\,{\,d} s+J_j(t),\quad j=1,\dots,m,
\end{aligned}
\end{equation}
where $x_i$ and $y_j$  are the activations of the $i$th neuron and
the $j$th neuron $(i=1,\dots,n,j=1,\dots,m)$; $a_i>0$ and $b_j>0$
are passive decay rates of neurons $i$ and $j$; $A_{ji}$,
$C_{ji}$, $B_{ij}$ and $D_{ij}$ are the connection weights; $f_j$,
$g_j$, $h_i$ and $l_i$ are the activation functions of the
neurons; $I_i(t)$, $J_j(t)$
 denote the $i$th and $j$th component
of internal input sources introduced from outside the networks to
the cells $i$ and $j$, respectively; $K_{ji}$ and $G_{ij}$ are the
distributed delay kernels representing the past history effects on
the neuron state dynamics. It is usually assumed that $K_{ji}$ and
$G_{ij}$ are non-negative and continuous functions defined on
$[0,+\infty)$ and satisfy the normalization conditions
\[
\int_0^\infty K_{ji}(s)\,{\,d} s=1,\quad
\int_0^\infty G_{ij}(s)\,{\,d}s=1.
\]
We also assume that the delay kernels satisfy the conditions
\begin{equation} \label{e1.2}
\int_0^\infty K_{ji}(s)e^{\sigma_0s}\,{\,d} s<\infty,\quad
\int_0^\infty G_{ij}(s)e^{\sigma_0s}\,{\,d}s<\infty,\quad
i=1,\dots,n,\; j=1,\dots,m,
\end{equation}
for some scalar $\sigma_0>0$. That is, we assume that the neurons
are of exponentially fading memory.

The initial conditions for system \eqref{e1.1} are specified as
continuous functions $\varphi_{x_i}$, $\varphi_{y_j}$:
$(-\infty,0]\rightarrow\mathbb{R}^n$, i.e.,
$x_i(s)=\varphi_{x_i}(s)$ and $y_j(s)=\varphi_{y_j}(s)$ for
$s\leq 0$. The existence and uniqueness of a solution to the
initial value problem of system \eqref{e1.1} can follow from the Lipshitz
conditions on the activation functions:
\begin{gather} \label{e1.3}
| f_j(a)-f_j(b)|\leq \mu_{f_j}| a-b|,\quad
|g_j(a)-g_j(b)|\leq \mu_{g_j}| a-b|,\quad \forall a,b\in\mathbb{R},
\\
| h_i(a)-h_i(b)|\leq \mu_{h_i}| a-b|,\quad
| l_i(a)-l_i(b)|\leq \mu_{l_i}| a-b|,\quad \forall a,b\in\mathbb{R},
\label{e1.4}
\end{gather}
where $\mu_{f_j}>0$, $\mu_{g_j}>0$, $\mu_{h_i}>0$ and
$\mu_{l_i}>0$ are the Lipschitz constants, $i=1,\dots,n $,
$j=1,\dots,m$.

Without loss of generality we may assume $f_j(0)=0$, $g_j(0)=0$,
$h_i(0)=0$ and $l_i(0)=0$ for $i=1,\dots,n $, $j=1,\dots,m$. So
the origin is a fixed point of system \eqref{e1.1}. The aim of this paper
is to establish conditions for system \eqref{e1.1} to converge to the
origin in terms of
\begin{equation} \label{e1.5}
| x_i(t)|\leq \alpha_i e^{-\sigma t},\quad
|y_j(t)|\leq \beta_j e^{-\sigma t},\quad t\geq0,\;
i=1,\dots,n ,j=1,\dots,m,
\end{equation}
whenever $| x_i(s)|\leq \alpha_i$ and $| y_j(s)|\leq
\beta_j$, for $s\leq 0$, where $\alpha_i>0$ and $\beta_j>0$,
$\sigma >0$ are real constants. \eqref{e1.5} gives a componentwise exponential
convergence estimate for the system \eqref{e1.1}. Clearly, $\sigma$
provides an estimate of the exponential decay rate of the system,
 $\alpha_i$ and $\beta_j$ give bounds on the states of the $i$th
 neuron and $j$th neuron.


It is not difficult to see that the componentwise exponential
convergence property defined above is stronger than the
conventional exponential stability in Lyapunov sense. Indeed, if
the estimate \eqref{e1.5} hold the origin of system \eqref{e1.1} must be
exponentially stable, but the converse is not true in general. An
obvious advantage of this type of convergence is that it allows an
individual monitoring of each neuron's state.

The main purpose of this paper is to find conditions which ensure
the componentwise exponential convergence estimate. The organization
of this paper is as follows. In Section 2, we introduce some lemmas
cited from \cite{c1} which are useful in the proof of our main results of
this paper. In Section 3, we use comparison principle to analysis
the exponential convergence of BAM networks. In Section 4, we give
an illustrative example of the effectiveness of the obtained
results.


 \section{Preliminaries}

Let $F(t,\varphi)$ be an $n$-vector-valued continuous functional
with $t\geq 0$ and $\varphi$ a continuous function from
$(-\infty,0]$ into $\mathbb{R}^n$, and $F_i$ denotes the $i$th
component of F.

$F(t,\varphi)$ is said to be quasi-monotone non-decreasing in
$\varphi$ if, for $i=1,\dots,n$, $F_i(t,\varphi)\leq
F_i(t,\psi)$ whenever $\varphi_i(0)=\psi_i(0)$ and
$\varphi_i(s)\leq\psi_i(s)$
 (in componentwise sense) for all $s\leq 0$.

 For a continuous function $z(t)$ from $\mathbb{R} $ into
 $\mathbb{R}^n$, define the truncation $z_t$ by $z_t(s)=z(t+s)$
 for $s\leq 0$.

 The following comparison principle is a direct extension of that
 cited in \cite{c1} to unbounded delay case.

 \begin{lemma} \label{lem2.1}
 Let $F(t,\varphi)$ be quasi-monotone non-decreasing in
$\varphi$. If $p(t)$ and $q(t)$ are vector-valued continuous
functions such that, for $i=1,\dots,n $, $s\leq 0$,
$t\geq 0$,
\begin{itemize}
  \item [(i)] $D^+p_i(t)\leq F_i(t,p_t)$,
  \item [(ii)] $\dot{q}_t(t)=F_i(t,q_t)$,
  \item [(iii)] $p_i(s)\leq q_i(s)$,
\end{itemize}
then $p_i(t)\leq q_i(t)$ for $t\geq 0$.
 \end{lemma}

Following the same line of proof as in  of \cite[Lemma 2]{c1}, we can derive
from Lemma \ref{lem2.1} the following comparison result.

\begin{lemma} \label{lem2.2}
 Let $F(t,\varphi)$ be quasi-monotone non-decreasing in
$\varphi$. If $e(t)$ and $q(t)$ are vector-valued continuous
functions such that, for $i=1,\dots,n $, $s\leq 0$,
$t\geq 0$,
\begin{itemize}
  \item [(i)]$e_i(t)\geq F_i(t,e_t)$,
  \item [(ii)]$\dot{q}_t(t)=F_i(t,q_t)$,
  \item [(iii)]$q_i(s)\leq e_i(s)$,
\end{itemize}
then $q_i(t)\leq e_i(t)$ for $t\geq 0$.
 \end{lemma}


 \section{Exponential convergence}

In this section we discuss the componentwise exponential
convergence of system \eqref{e1.1}. To get the estimate \eqref{e1.5}, we
evaluate the upper right derivative $D^+| x_i(t)|$ for a solution
$x_i(t)$ of system \eqref{e1.1} to obtain
\begin{equation} \label{e3.1}
\begin{aligned}
D^+| x_i(t)|
&=\lim_{r\rightarrow 0^+}\sup\frac{1}{r}[| x_i(t+r)|-| x_i(t)|] \\
&\leq -| a_i(t)|| x_i(t)|+\sum_{j=1}^{m}| A_{ji}(t)|
| f_j(x(t-\tau_{j}))| \\
&\quad +\sum_{j=1}^{m}| C_{ji}(t)|\int_{-\infty}^{t}
K_{ji}(t-s)| g_j(x_j(s))|\,{\,d} s+| I_i(t)| \\
&\leq -| a_i(t)| | x_i(t)|+\sum_{j=1}^{m}| A_{ji}(t)|
\mu_{f_j}| x(t-\tau_{j})| \\
&\quad +\sum_{j=1}^{m}\mu_{g_j}| C_{ji}(t)|\int_{-\infty}^{t}
K_{ji}(t-s) | x_j(s)|\,{\,d} s+| I_i(t)|,
\end{aligned}
\end{equation}
for $i=1,\dots,n$.

In the context of this paper, we may take $F$ in Lemma \ref{lem2.1} with
\begin{align*}
F_i(0,\varphi)
&= -| a_i(0)| \varphi_i(0)+\sum_{j=1}^{m}|A_{ji}(0)|
  \mu_{f_j}\varphi(-\tau_{j})\\
&\quad +\sum_{j=1}^{m}\mu_{g_j}| C_{ji}(0)|\int_{-\infty}^{0}
K_{ji}(-s) \varphi_j(s)\,{\,d} s+| I_i(0)|,
\end{align*}
for $i=1,\dots,n $. It is clear that $F$ is quasi-monotone
non-decreasing in $\varphi$.
So the right-hand side of the last inequality \eqref{e3.1} is
quasi-monotone non-decreasing in $| x_i(s)|$ for
$s\in(-\infty, t]$ with $t\geq 0$. This implies that $| x_i(t)|$ can be
dominated by the following comparison system:
\begin{equation} \label{e3.2}
\begin{aligned}
\dot{q}_i(t)&= -|a_i(t)|q_i(t)+\sum_{j=1}^{m}| A_{ji}(t)|
\mu_{f_j}q(t-\tau_{j}) \\
&\quad +\sum_{j=1}^{m}\mu_{g_j}| C_{ji}(t)|\int_{-\infty}^{t}
K_{ji}(t-s) q_j(s)\,{\,d} s+| I_i(t)|,
\end{aligned}
\end{equation}
in the sense that $|x_i(t)|\leq q_i(t)$ for $t\geq 0$
whenever  $|x_i(s)|\leq q_i(s)$ for $s\leq 0$,
$i=1,\dots,n $. The result is a special case of a general
comparison principle for distributed delay systems, see Lemma \ref{lem2.1}.
It enables us to derive properties of non-linear system \eqref{e1.1} by
examining a linear comparison system \eqref{e3.2}. However, it should be
noted that even for a linear distributed delay system such as
\eqref{e3.2}, there are not known general results which provide necessary
and sufficient conditions for exponential convergence of the
system. Therefore, in the sequel we should first proceed to find
an appropriate exponential estimate for the comparison system
\eqref{e3.2}, from which we can then yield the estimate \eqref{e1.5} for system
\eqref{e1.1} by the above comparison principle. To do this, we make use
of the following comparison result.

Suppose that there are $n$ functions $e_i(t)$ such that
\begin{equation} \label{e3.3}
\begin{aligned}
\dot{e}_i(t)&\geq -|a_i(t)| e_i(t)+\sum_{j=1}^{m}| A_{ji}(t)|
\mu_{f_j}e(t-\tau_{j}) \\
&\quad +\sum_{j=1}^{m}\mu_{g_j}| C_{ji}(t)|\int_{-\infty}^{t}
K_{ji}(t-s) e_j(s)\,{\,d} s+|I_i(t)|,
\end{aligned}
\end{equation}
for $i=1,\dots,n $. Then  $q_i(t)\leq e_i(t)$ for $t\geq
0$ provided $q_i(s)\leq e_i(s)$ for $s\leq 0$. Actually,
this can be derived simply by using the above comparison principle
along with the reverse transformations : $q_i\rightarrow -q_i$ and
$e_i\rightarrow -e_i$ in \eqref{e3.2} and \eqref{e3.3}, respectively.
See Lemma \ref{lem2.2}.

From this we have $|x_i(t)|\leq e_i(t)$ for $t\geq 0$
provided $|x_i(s)|\leq e_i(s)$ for $s\leq 0$,
$i=1,\dots,n$.
Now, taking $e_i(t)=\alpha_ie^{-\sigma t}$, to satisfy
inequalities \eqref{e3.3} it suffices to have
\begin{equation} \label{e3.4}
\begin{aligned}
&(\sigma-|a_i(t)|)\alpha_i+\sum_{j=1}^{m}\alpha_j| A_{ji}(t)|
\mu_{f_j}e^{\sigma\tau_j}\\
&+\sum_{j=1}^{m}\alpha_j\mu_{g_j}|
C_{ji}(t)|\int_0^{\infty} K_{ji}(s) e^{\sigma s}\,{\,d} s+|
I_i(t)|\leq 0,
\end{aligned}
\end{equation}
for $i=1,\dots,n $.
For the same reason we can get
\begin{equation} \label{e3.5}
\begin{aligned}
&(\sigma-|b_j(t)|)\beta_j+\sum_{i=1}^{n}\beta_i| B_{ij}(t)|
\mu_{h_i}e^{\sigma\tau_i}\\
&+\sum_{i=1}^{n}\beta_i\mu_{l_i}|
D_{ij}(t)|\int_0^{\infty} G_{ij}(s) e^{\sigma s}\,{\,d} s+|
J_j(t)|\leq 0,
\end{aligned}
\end{equation}
for $j=1,\dots,m $. Those together with condition \eqref{e1.2} lead to
the following result.

\begin{theorem} \label{thm3.1}
System \eqref{e1.1} admits the exponential convergent estimate
\eqref{e1.5} with
$0<\sigma \leq \sigma_0$ if conditions \eqref{e3.4} and \eqref{e3.5} hold.
\end{theorem}

This result establishes an explicit relation on specific exponential
convergent dynamics and system parameters including the weights, the
gain of neurons, and the delay kernels.

\begin{remark} \label{rmk3.1} \rm
 It is obvious that criterion \eqref{e3.4}
depends only on the relative values of $\alpha_i(i=1,\dots,n )$.
Thus, if condition \eqref{e3.4} is satisfied by a set of $\alpha_i>
0(i=1,\dots,n) $, it remains valid with $|a_i(t)|\alpha_i$
replacing each $\alpha_i$ for any $|a_i(t)|>0$. This is essential
because of the global Lipschitz conditions \eqref{e1.3} and \eqref{e1.4}
 of the
non-linear functions assumed previously. As a result, Theorem \ref{thm3.1}
actually provides a sufficient condition for global exponential
convergence of system \eqref{e1.1}. In fact, for any initial function
$\varphi_{x_i}(s)$, one can always pick a scalar $| a_i(t)|>0$
large enough so that $|\varphi_{x_i}(s)|\leq|
a_i(t)|\alpha_i$ for $s\leq 0$, $i=1,\dots,n$. Hence, by the
theorem it follows that:
\begin{equation} \label{e3.6}
|x_i(t)|\leq |a_i(t)|\alpha_i e^{-\sigma t},\quad i=1,\dots,n.
\end{equation}
For the same reason, we get
\begin{equation} \label{e3.7}
|y_j(t)|\leq |b_j(t)|\beta_j e^{-\sigma t},\quad j=1,\dots,m.
\end{equation}
 So the system is globally exponentially
convergent to the origin in terms of the estimates \eqref{e3.6} and
\eqref{e3.7}.
\end{remark}

Conditions \eqref{e3.4} and \eqref{e3.5} are delay-dependent since they
involve the delay kernels $K_{ji}(s)$ and $G_{ij}(s)$ explicitly.
We can also derive the following delay-independent results. To do
this, let
\begin{align*}
z_x(\sigma)&= \max_{1\leq i\leq
n}\Big\{(\sigma-|
a_i(t)|)\alpha_i+\sum_{j=1}^{m}\alpha_j|A_{ji}(t)|
\mu_{f_j}e^{\sigma\tau_j}\\
&\quad +\sum_{j=1}^{m}\alpha_j\mu_{g_j}|
C_{ji}(t)|\int_{-\infty}^{t} K_{ji}(t-s) e^{\sigma s}\,{\,d}
s+|I_i(t)|\Big\}
\end{align*}
and
\begin{align*}
z_y(\sigma)&= \max_{1\leq j\leq
m}\Big\{(\sigma-| b_j(t)|\beta_j+\sum_{i=1}^{n}\beta_i|
B_{ij}(t)|
\mu_{h_i}e^{\sigma\tau_i}\\
&\quad +\sum_{i=1}^{n}\beta_i\mu_{l_i}|
D_{ij}(t)|\int_0^{\infty} G_{ij}(s) e^{\sigma s}\,{\,d} s+|
J_j(t)|\Big\}.
\end{align*}
 It is clear that $z_x(\sigma)$ and $z_y(\sigma)$ are continuous for
$\sigma\in[0,\sigma_0]$ by condition \eqref{e1.2}. Therefore, if
$z_x(0)<0$ and $z_y(0)<0$; i.e.,
\begin{gather}
-|a_i(t)|\alpha_i+\sum_{j=1}^{m}\alpha_j|A_{ji}(t)|
\mu_{f_j}+\sum_{j=1}^{m}\alpha_j\mu_{g_j}| C_{ji}(t)|+|
I_i(t)|<0, \label{e3.8} \\
-|b_j(t)|\beta_j+\sum_{i=1}^{n}\beta_i|B_{ij}(t)|
\mu_{h_i}+\sum_{i=1}^{n}\beta_i\mu_{l_i}| D_{ij}(t)|+|
J_j(t)|< 0, \label{e3.9}
\end{gather}
for $i=1,\dots,n$ and $j=1,\dots,m$, then by continuity, there
should be some $\sigma\in(0,\sigma_0]$ such that
$z_x(\sigma)\leq 0$ and $z_y(\sigma)\leq 0$, i.e.,
conditions  \eqref{e3.4} and \eqref{e3.5} holds, and vice versa.
Thus, by noting
Remark \ref{rmk3.1} also, we conclude the following equivalent condition to
Theorem \ref{thm3.1}.

\begin{theorem} \label{thm3.2}
System \eqref{e1.1} is globally exponentially convergent in terms of the
estimates \eqref{e3.6} and \eqref{e3.7} for some $\sigma\in(0,\sigma_0]$ and
$\alpha_i>0$, $\beta_j>0$, $i=1,\dots,n$, $j=1,\dots,m$, if
conditions \eqref{e3.8} and \eqref{e3.9} holds.
\end{theorem}

 \section{Examples}

 To illustrate the above results, we now consider a simple example
 of system \eqref{e1.1} comprising $n$ identical neurons coupled through
 weights $A_{ji}$,  $B_{ij}$, $C_{ji}$ and $D_{ij}$ whose absolute values $|A_{ji}|$,
 $|B_{ji}|$, $|C_{ji}|$ and $|D_{ij}|$
 constitute doubly stochastic matrix. Examples
 of such matrix are as below:
 \[
 A=B=C=D:=\frac{1}{n-1}
 \left|\begin{matrix}
 0&1&1&\dots&1\\
1&0&1&\cdots&1\\
\vdots&\vdots&\vdots&\ddots&\vdots\\
1&1&1&\dots&0
\end{matrix}\right|.
 \]
Assume the neuronal passive decay rates $a_i=a>0$, $b_j=b>0$ and
the neuron activation satisfies conditions \eqref{e1.3} and \eqref{e1.4} with
the gains $0<\mu_{f_j}=\mu_{h_i}<1$ and
$\mu_{g_j}=\mu_{l_i}:=\mu>0$. The delay kernels $K_{ji}$ and
$G_{ij}$ are taken as
\[
K_{ji}=G_{ij}=\frac{r^{m+1}}{m!}t^me^{-rt},\quad
r\in(0,\infty),m=0,1,2,\dots.
\]
It can be calculated that
\[
\int_0^\infty K_{ji}(s)e^{\sigma s}\,{\,d} s=\int_0^\infty
G_{ij}(s)e^{\sigma s}\,{\,d}
s=\big(\frac{r}{r-\sigma}\big)^{m+1}.
\]
For simplicity, we take all $\alpha_i=1$ in condition \eqref{e3.4}. Then
by Theorem \ref{thm3.1} and Remark \ref{rmk3.1}, if the neuron gain $\mu$ satisfies
the bound
\[
\mu\leq(c-\sigma)\big(1-\frac{\sigma}{r}\big)^{m+1},
\]
the system will globally converge to the origin in terms of
\[
|x_i(t)|\leq k e^{-\sigma t},\quad
| y_j(t)|\leq ke^{-\sigma t},\quad t\geq 0,\; i=1,\dots,n ,j=1,\dots,m,
\]
whenever $|x_i(\theta)|\leq k $ and $| y_j(\theta)|\leq
k $ for $\theta\leq 0$, with $k>0$ a constant depending on
the initial condition $x_i(\theta)$ and $y_j(\theta)$,
$i=1,\dots,n$, $j=1,\dots,m,$ and $0<\sigma<\mathrm{min}\{c,r\}$. If
one is merely interested in qualitatively confirming global
exponential convergence of the system, it is convenient to use the
criterion
\[
\mu<c
\]
according to condition \eqref{e3.4} with all $\alpha_i=1$. That is, the
neuron activation gain should not exceed the value of the neuronal
passive decay rates.

\begin{thebibliography}{00}

\bibitem{c1} T. Chu;
\emph{An exponential convergence estimate for analog
neural networks with delays}. Phys. Lett. A. 283 (2001) 113-118.

\bibitem{g1} K. Gopalsamy, X. Z. He;
\emph{Delay-independent stability in bi-directional associative
memory networks}, IEEE Trans. Neural Networks. 5 (1994) 998-1002.

\bibitem{h1} J. J. Hopfield;
\emph{Neuron with graded response have collective computational properties
like those of tew-state neurons}, Proc. Natl. Acad. Sci. UAS. 81
(1984) 3088-92.

\bibitem{k1} B. Kosto;
\emph{Bi-directional associative mamories}, IEEE Trans. Syst. Man Cybernet.
 18 (1988) 49-60.

\bibitem{l1}  X. F. Liao, J. B. Yu;
\emph{Qualitative analysis of bidrectional associative memory with
time delays}, Int. J. Circuit Theory Appl. 26 (1998) 210-229.

\bibitem{l2} Y. Liu,  Z. Wang,  X. Liu;
\emph{Global asymptotic stability of
generalized bi-directional associative memory networks with discrete
and distributed delays}, Chaos, Solitons and Fractals, 28 (3)
793-803.

\bibitem{l3} X. Lou,  B. Cui;
\emph{On the global robust asymptotic stability of BAM neural networks
with time-varying delays}, Neurocomputing, 70(2006)  273-279.

\bibitem{p1} J. H. Park;
\emph{A novel criterion for global asymptotic stability of BAM
neural networks with time delays Chaos}, Solitons \& Fractals, 29
(2006) 446-453.

\bibitem{p2}  J. H. Park,  O. M. Kwon;
\emph{On improved delay-dependent criterion for global stability of
bidirectional associative memory neural networks with time-varying
delays}, Appl. Math. Comput.,  199(2008) 435-446.

\bibitem{r1} V. S. H. Rao, Bh. R. M. Phaneendra;
\emph{Global dynamics of bidirectional associative memory
neural networks involving transmission delays and dead zones}, Neural
networks. 12 (1999) 455-465.

\bibitem{s1}  Q. Song,  Z. Zhao,  Y.  Li;
\emph{Global exponential stability
of BAM neural networks with distributed delays and
reaction-diffusion terms}, Physics Letters A, 335 (2005) 213-225.

\bibitem{w1} B. Wang, J. Jian, C. Guo;
\emph{Global exponential stability of a class of
BAM networks with time-varying delays and continuously distributed
delays}, Neurocomputing, 71(2008)495-501.

\bibitem{w2} B. Wang, J. Jian,    C. Guo,  Global exponential stability of a
class of BAM networks with time-varying delays and continuously
distributed delays, Neurocomputing, 71( 2008) 495-501.

\bibitem{x1} Y. Xia,  J. Cao, M.  Lin;
\emph{New results on the existence
and uniqueness of almost periodic solution for BAM neural networks
with continuously distributed delays}, Chaos, Solitons and Fractals,
31 (2007) 928-936.

\bibitem{z1} H. Zhao;
\emph{Global stability of bidirectional associative
mamory neural networks with distributed delays}, Phys. Lett. A. 297
(2002) 182-190.

\bibitem{z2}  H. Zhao;
\emph{Global stability of bidirectional associative
memory neural networks with distributed delays}, Physics Letters A,
297 (2002) 182-190.

\end{thebibliography}

\end{document}
