\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2011 (2011), No. 87, pp. 1--8.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2011 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2011/87\hfil Singular equilibrium solutions]
{Singular equilibrium solutions for a replicator dynamics model}

\author[Ch. D. Kravvaritis, V. G. Papanicolaou \hfil EJDE-2011/87\hfilneg]
{Christos D. Kravvaritis, Vassilis G. Papanicolaou} % in alphabetical order

\address{Christos D. Kravvaritis \newline
Department of Informatic\\
Technical University of Munich\\
Boltzmannstr. 3\\
85748 Garching, Munich, Germany}
\email{kravvarc@in.tum.de}

\address{Vassilis G. Papanicolaou \newline
Department of Mathematics\\
National Technical University of Athens\\
Zografou Campus\\
157 80 Athens, Greece}
\email{papanico@math.ntua.gr}

\thanks{Submitted May 18, 2011. Published June 29, 2011.}
\subjclass[2000]{91A22, 91B52}
\keywords{Replicator dynamics; singular equilibrium solutions}

\begin{abstract}
 We evaluate explicitly certain classes of singular equilibrium
 solutions for a specific one-dimensional replicator dynamics
 equation. These solutions are linear combinations of Dirac
 delta functions. Equilibrium solutions are important in the
 study of equilibrium selection in non-cooperative games.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]

\section{Introduction--the replicator dynamics model}

The  replicator dynamics models are popular models in evolutionary
game theory. They have significant applications in economics,
population biology and other areas of science.

Let $A=(a_{ij})$ be an $m\times m$ matrix (the \emph{payoff matrix}).
Then a typical replicator dynamics equation is
\[
u_i'(t) = \Big[ \sum_{j=1}^{m} a_{ij} u_j(t) - \sum_{i=1}^{m}
\sum_{j=1}^{m} a_{ij} u_i(t) u_j(t) \Big] u_i(t), \quad i = 1,\dots ,m.
\]
The set $S = \{1,\dots ,m\}$ is the \emph{strategy space}.
The term in the square brackets is a measure of the success
of strategy $i$ and it is assumed
to be the difference of the payoff of the players playing strategy
$i$ from the average payoff of the population. It is then assumed
that the logarithmic derivative of $u_i(t)$, where $u_i$ is the
percentage of the population playing $i$, is equal to
this success measure; i.e., that agents update their strategies
proportionally to the success of the strategy $i$.

The vector
\[
u(t) = (u_1(t),\dots ,u_m(t) )^T,
\]
is a probability distribution on $S$, hence
\[
u_j(t) \geq 0, \quad j = 1,\dots ,m; \quad
\sum_{j=1}^{m} u_j(t) = 1.
\]
If these conditions on $u(t)$ are satisfied for $t = 0$, then it is easy
to see that they are satisfied for all $t \geq 0$.

The replicator dynamics equation can be written in a compact form
\begin{equation}
u_t = (Au) u - (u, Au) u = [ Au - (u, Au)] u, \label{A0}
\end{equation}
where $(Au) u$ denotes the vector whose $j$-th component is the
product of the $j$-th components of the vectors $(Au)$ and $u$.

This model was introduced by  Taylor and  Jonker \cite{T-J} and
Maynard Smith \cite{S}. See also  Imhof \cite{I}, where
a stochastic version of the model is discussed.

Infinite-dimensional versions of this evolutionary strategy model
have been proposed, e.g., Bomze \cite{B} and  Oechssler and
 Riedel \cite{O-R1,O-R2},
in connection to certain economic applications.
However, the abstract form of the proposed equations does not
provide any insight on the form of solutions.

In order to make some progress in this direction, in earlier papers
 Papanicolaou et al. \cite{K-P-Y, P-S,K-P-X-Y}, had focused on
the case where $S$ is a ``continuum''
(i.e. a region of $\mathbb{R}^d$, $d \geq 1$) and $A$
a differential operator or an integral operator.
In this short note we are only interested in
equilibrium solutions to \eqref{A0} in a special infinite-dimensional
case. Smooth (or even continuous) equilibrium solutions do not exist
 in the considered case, but
there are infinitely many singular solutions. Inspired by
an example of  Krugman \cite{K} we take
$S$ to be the interval $[a, b]$, with $a < b$, of the real line,
and choose $A$ to be the (compact) self-adjoint, integral
(hence nonlocal) operator given by
\begin{equation}
(Av )(x) = \frac{1}{2r} \int_a^b e^{-r |x - \xi |} v(\xi) d\xi,
\label{A1}
\end{equation}
where $r$ is a strictly positive constant.

Let us first determine the inverse operator of $A$.
To do that we proceed as follows: By letting
\begin{equation}
(Av)(x) = w(x), \quad w(x) = \frac{1}{2r}\int_{a}^{b}e^{-r|x-\xi |}v(\xi )d\xi ,  \label{A1a}
\end{equation}
it is straightforward to show that
\begin{gather}
-w''(x) + r^2 w(x) = v(x), \label{A1b}\\
w'(a) = r w(a), \quad  w'(b) = -r w(b).\label{A1c}
\end{gather}
For the rest of this article, without loss of generality,
we  assume that $a = 0$.

Formulas \eqref{A1a}--\eqref{A1c} tell us that $A$ is the inverse
of the self-adjoint (local) differential operator $L$, defined as
\begin{equation}
L := -\frac{d^2}{dx^2} + r^2, \label{A2}
\end{equation}
and whose domain consists of sufficiently smooth functions $v = v(x)$ satisfying the
boundary conditions
\begin{equation}
w'(0) = r w(0), \quad  w'(b) = -r w(b).\label{A3}
\end{equation}
Now let $(\cdot , \cdot )$ be the standard inner product for the
Hilbert space $L_2(0, b)$, namely
\[
(f, g) = \int_{0}^{b}f(x)\overline{g(x)}dx.
\]
A simple integration by parts yields
\begin{align*}
(Lw, w) &= \int_{0}^{b}[ -w''(x)\overline{w(x)} + r^2w(x)
\overline{w(x)}] dx
\\
&= \int_{0}^{b}[ | w'(x)|
^2+r^2| w(x)| ^2] dx-w'(b)\overline{w(b)}
+ w'(0)\overline{w(0)}.
\end{align*}
The boundary conditions \eqref{A3} imply that
\[
(Lw,w)=\int_{0}^{b}
[| w^{\prime} (x) |^2 + r^2|w(x)| ^2] dx
+ r [ | w(b) |^2 + | w(0) |^2 ] \geq 0;
\]
i.e., $L$ is positive. Therefore, $A$, the operator in \eqref{A1},
which is $L^{-1}$, is also a positive operator
and the corresponding replicator dynamics equation \eqref{A0}
 has the form
\begin{equation}
u_t = [ Au- (u, Au)] u, \quad t>0,\; x\in [0,b],\label{DD1}
\end{equation}
with
\begin{equation}
\int_{0}^{b}u(0,x)dx = 1, \quad u(0,x)\geq 0, \quad x \in [0, b].
\label{DD2}
\end{equation}
Integration of both sides of \eqref{DD1} with respect to $x$ gives
\begin{equation}
\frac{\partial }{\partial t}\int_{0}^{b}u(t,x)dx = (u, Au)
\big[1 - \int_0^b u(t, x) dx]. \label{T1}
\end{equation}
It follows from \eqref{T1} that the set of probability measures
on $S = [0, b]$ is invariant under
the flow \eqref{DD1} and this is, of course, a desirable feature
of the model. This ``conservation of probability'' is essential for the
applicability of \eqref{DD1}--\eqref{DD2} in the context
of evolutionary dynamics modelling.

\section{Singular equilibrium solutions}

The \emph{equilibrium solutions} to \eqref{DD1}--\eqref{DD2} are the
solutions $u$ which are independent of $t$. Equivalently, $u$ is an
equilibrium solution if $u(t, x) = v(x)$, where $v(x)$ satisfies
\begin{equation}
[ Av - (v, Av) ] v = 0,\label{E1}
\end{equation}
with
\begin{equation}
\int_0^b v(x) dx = 1, \quad v(x)\geq 0, \quad x \in [0, b].
\label{E2}
\end{equation}
Suppose $v(x)$ is an equilibrium solution such that $v(x) > 0$ on
an open interval $(c_1,c_2)$ with $c_1 < c_2$. Then, \eqref{E1} implies
\begin{equation}
(Av)(x) = \gamma, \quad x \in (c_1, c_2), \label{E3}
\end{equation}
where$\gamma := (v, Av)$.
We now apply $A^{-1}$ , namely $L$ of \eqref{A2}, to both sides
of \eqref{E1}. Since
$L$ is a local operator and we are interested in its effect only on
the interval $x \in (c_1 ,c_2)$, we do not need to know the value
of $(Av)(x)$ when $x \notin (c_1, c_2)$:
\[
v(x) = r^2 \gamma, \quad \text{for } x \in (c_1 ,c_2);
\]
i.e., $v(x)$ must be constant on $(c_1, c_2)$. In particular,
if $v(x)$ is continuous on $[0, b]$, thus we must have
\begin{equation}
v(x) = \frac{1}{b},\quad x \in [0,b].\label{T2}
\end{equation}
However, with $v(x)$ given in \eqref{T2}, the quantity $(Av)(x)$
is not constant, and this contradicts \eqref{E3}.
Therefore, there are no continuous equilibrium solutions.

Motivated by the above observations we try to find equilibrium
solutions to \eqref{DD1}--\eqref{DD2},
namely solutions to \eqref{E1}--\eqref{E2}, in
the class of singular probability measures. More precisely,
we look for solutions  of the form
\begin{equation}
v(x) = \sum_{j=1}^n \alpha _{j} \delta_{c_j}(x) = \sum_{j=1}^{n}
\alpha_{j} \delta (x - c_j), \label{E4}
\end{equation}
where $\delta_c (x) := \delta (x - c)$ with $\delta$ denoting the Dirac delta function,
while the constants $c_1 < \dots < c_n$ lie in $(0, b)$,
and the positive constants
$\alpha _j$ satisfy
\begin{equation}
\sum_{j=1}^n \alpha_j = 1. \label{E5}
\end{equation}
Obviously any such $v(x)$ satisfies \eqref{E2} and we only need
to check \eqref{E1}; i.e., we need to find the $v(x)$'s for which
\begin{equation}
(Av)(x) = (v,Av),\quad x = c_1,\dots ,c_n.\label{E6}
\end{equation}
To satisfy \eqref{E6} we apply $A$ given in \eqref{A1} on \eqref{E4}.
 Using
\[
(A\delta _{c} ) (x) = \frac{1}{2r} \int_0^b e^{-r|x-\xi |}
\delta(\xi -c) d\xi = \frac{e^{-r|x - c|}}{2r},
\]
we obtain
\begin{equation}
(Av)(x) = \frac{1}{2r} \sum_{j=1}^{n} \alpha _{j} e^{-r|x-c_{j}|}.
\label{E7}
\end{equation}
For the right side of \eqref{E6} we observe that
\[
(v, Av)=\frac{1}{2r}\int_{0}^{b}\Big[ \sum_{j=1}^{n}\alpha _{j}\delta
(x-c_{j})\Big] \Big[ \sum_{k=1}^{n}\alpha _{k}e^{-r|x-c_{k}|}\Big] dx
= \frac{1}{2r}\sum_{j=1}^{n}\sum_{k=1}^{n}\alpha _{j}\alpha _{k}e^{-rc_{jk}},
\]
where
\begin{equation}
c_{jk} := |c_j - c_k|. \label{E8}
\end{equation}
Let us view $c_1,\dots ,c_n$ as given and treat
$\alpha _1,\dots ,\alpha_n$ as unknowns. Then,
we claim that all we need to do is to find (positive)
$\alpha_1,\dots ,\alpha _n$ and $\lambda$ satisfying \eqref{E5} and
\begin{equation}
\sum_{j=1}^{n}\alpha _{j}e^{-rc_{jk}} = \lambda, \quad k = 1,\dots ,n.
\label{E10}
\end{equation}
To justify this claim, we notice that, in view of \eqref{E7}, \eqref{E10}
can be written in the form
\begin{equation}
(Av)(x)=\frac{\lambda }{2r},\quad x = c_1,\dots ,c_n.
\label{T3}
\end{equation}
Then, \eqref{E5} and \eqref{T3} imply
\begin{equation}
(v, Av) = \int_{0}^{b}v(x)\frac{\lambda }{2r}dx=\frac{\lambda }{2r}
= (Av)(x),\quad x = c_1,\dots ,c_n.
\label{T4}
\end{equation}
Since the support of $v$ is in $\{c_1,\dots ,c_n\}$, we infer
from \eqref{T4} that $v(x)$ of \eqref{E4} is an equilibrium solution.
Notice that the special case with $n = 1$ yields the
equilibrium solution
\begin{equation}
v(x) = \delta (x - c_1), \label{E10a}
\end{equation}
where $c_1$ is any point in the interval $(0, b)$.

From now on we concentrate on the system \eqref{E5} and \eqref{E10}
with $n \geq 2$.
If we set
\begin{equation}
b_1 := e^{-r c_{12}}, \quad b_2 := e^{-r c_{23}},\quad \dots ,
\quad b_{n-1} := e^{-r c_{n-1,n}}, \label{E11}
\end{equation}
then \eqref{E10} takes the form
\begin{equation}
\begin{gathered}
\alpha_1 + b_1 \alpha_2 + (b_1 b_2) \alpha_3 + (b_1 b_2 b_3) \alpha_4
+ \dots + (b_1 \dots b_{n-1}) \alpha_n = \lambda
\\
b_1\alpha_1 + \alpha_2 + b_2 \alpha_3 + (b_2 b_3) \alpha_4
+ \dots +(b_2 \dots b_{n-1}) \alpha_n = \lambda \\
(b_1 b_2) \alpha_1 + b_2 \alpha_2 + \alpha_3 + b_3 \alpha_4
+ \dots + (b_3 \dots b_{n-1}) \alpha_n = \lambda \\
\dots \\
(b_1 \dots b_{n-1}) \alpha_1 + (b_2 \dots b_{n-1}) \alpha_2
+ (b_3 \dots b_{n-1}) \alpha_3
+ \dots + \alpha_n = \lambda
\end{gathered} \label{E12a}
\end{equation}
or, in matrix notation
\begin{equation}
B\alpha =\lambda \textbf{1},  \label{E12}
\end{equation}
where $B$ is the $n\times n$ symmetric matrix
\begin{equation}
B := \begin{bmatrix}
1 & b_1 & (b_1 b_2) & \dots & (b_1 \dots b_{n-1}) \\
b_1 & 1 & b_2 & \dots & (b_2 \dots b_{n-1}) \\
(b_1 b_2) & b_2 & 1 & \dots & (b_3 \dots b_{n-1}) \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
(b_1 \dots b_{n-1}) & (b_2 \dots b_{n-1})
& (b_3 \dots b_{n-1}) & \dots & 1
\end{bmatrix}, \label{E13}
\end{equation}
while the $n$-vectors $\alpha$ and $\textbf{1}$ are given by
\begin{equation}
\alpha := \begin{bmatrix}
\alpha _1 \\
\alpha _2 \\
\vdots \\
\alpha _{n}
\end{bmatrix},
\quad
\textbf{1} :=\begin{bmatrix}
1 \\
1 \\
\vdots \\
1
\end{bmatrix}.
\label{E14}
\end{equation}
From \eqref{E12}, we have
\begin{equation}
\alpha_j = \frac{\det B_{j}}{\det B}\lambda ,\quad j=1,\dots ,n,  \label{E15}
\end{equation}
where $B_j$ is the matrix obtained from $B$ by replacing its $j$-th
column by $\textbf{1}$.

We will now evaluate the determinants $\det B$ and
$\det B_{j}$, for $j=1,\dots ,n$.
To make our computations more transparent we introduce $\Delta$
and $\Delta_j$ defined as
\begin{equation}
\Delta ( b_1,\dots ,b_{n-1}) := \det B,
\quad
\Delta _j ( b_1,\dots ,b_{n-1}) := \det B_j.
\label{E16}
\end{equation}
Multiply the second row of $B$ by $b_1$ and subtract the
resulting row from the first we obtain
\[
\Delta ( b_1,\dots ,b_{n-1}) = (1 - b_1^2)
\det \begin{bmatrix}
1 & b_2 & \dots & (b_2 \dots b_{n-1}) \\
b_2 & 1 & \dots & (b_3 \dots b_{n-1}) \\
\vdots & \vdots & \ddots & \vdots \\
(b_2 \dots b_{n-1}) & (b_3 \dots b_{n-1}) & \dots & 1
\end{bmatrix}.
\]
Hence
\[
\Delta ( b_1,\dots ,b_{n-1}) = ( 1-b_1^2) \Delta
( b_2,\dots ,b_{n-1}),
\]
which yields
\begin{equation}
\Delta ( b_1,\dots ,b_{n-1}) =\prod_{j=1}^{n-1}
(1-b_{j}^2 .  \label{E17}
\end{equation}

In a similar way, one computes $\det B_1$: Multiply the second row of
$B_1$ by $b_1$ and subtract resulting row from the first we obtain
\begin{equation}
\Delta _1( b_1,\dots ,b_{n-1}) =(1-b_1)
\prod_{j=2}^{n-1}(1-b_{j}^2) .  \label{E18}
\end{equation}
To compute $\det B_2$ we, again, multiply the second row of $B_2$ by
$b_1$ and subtract resulting row from the first. This yields
\[
\Delta _2( b_1,\dots ,b_{n-1}) =(1-b_1^2) \Delta
_1(b_2,\dots ,b_{n-1}) -(1-b_1)b_1\Delta (
b_2,\dots ,b_{n-1}) .
\]
Thus, \eqref{E17} and \eqref{E18} give
\[
\Delta _2( b_1,\dots ,b_{n-1}) =(1-b_1^2) (
1-b_2) \prod_{j=3}^{n-1}(1-b_{j}^2)
-(1-b_1)b_1\prod_{j=2}^{n-1}(1-b_{j}^2) ,
\]
or
\begin{equation}
\Delta _2( b_1,\dots ,b_{n-1}) =(1-b_1b_2)
(1-b_1)(1-b_2) \prod_{j=3}^{n-1}(1-b_{j}^2) .
\label{E19}
\end{equation}
The computation of $\det B_3$ is simpler: Multiply the second row of
$B_3$ by $b_1$ and subtract resulting row from the first we obtain
\begin{equation}
\begin{aligned}
&\Delta _3( b_1,\dots ,b_{n-1})\\
& = (1-b_1^2) \Delta_2(b_2,\dots ,b_{n-1}) \\
&\quad + (1 - b_1)
\det \begin{bmatrix}
b_1 & 1 & \dots & (b_2 \dots b_{n-1}) \\
(b_1 b_2) & b_2 & \dots & (b_3 \dots b_{n-1}) \\
\vdots & \vdots & \ddots & \vdots \\
(b_1 \dots b_{n-1}) & (b_2 \dots b_{n-1}) & \dots & 1
\end{bmatrix}. \label{T5}
\end{aligned}
\end{equation}
The determinant of the matrix appearing in the second term
on the right side of \eqref{T5}
is zero because its first column is $b_1$ times the second column.
Hence, \eqref{T5} simplifies to
\[
\Delta _3( b_1,\dots ,b_{n-1}) =(1-b_1^2) \Delta_2(b_2,\dots ,b_{n-1}) .
\]
Then \eqref{E17} gives
\begin{equation}
\Delta _3( b_1,\dots ,b_{n-1}) =(1-b_1^2) (
1-b_2b_3) (1-b_2)(1-b_3) \prod_{j=4}^{n-1}(1-b_{j}^2).
\label{E20}
\end{equation}
We can compute $\det B_k$, for $k = 4,\dots ,n - 1$,
in a similar way we have computed
$\det B_3$. The result is
\begin{equation}
\Delta _k ( b_1,\dots ,b_{n-1}) =\Big[ \prod_{j=1}^{k-2}
(1-b_{j}^2) \Big] (1-b_{k-1}b_{k}) (1-b_{k-1})
(1-b_{k}) \prod_{j=k+1}^{n-1}(1-b_{j}^2), \label{E21}
\end{equation}
where $k = 4,\dots ,n - 1$ and the empty product that appears in
the case $k = n - 1$ is taken to be equal to $1$.

It remains to compute $\det B_n$. Following the same steps as the
ones for computing $\det B_3$, we arrive at the equation
\[
\Delta _{n}( b_1,\dots ,b_{n-1})
= (1-b_1^2) \Delta_{n-1}(b_2,\dots ,b_{n-1}).
\]
Repeating the procedure we get
\begin{align*}
\Delta _n ( b_1,\dots ,b_{n-1})
&= (1-b_1^2) \dots (1-b_{n-2}^2) \Delta _2(b_{n-1})
\\
&= (1-b_1^2) \dots (1-b_{n-2}^2)
\det \begin{bmatrix}
1 & 1 \\
b_{n-1} & 1
\end{bmatrix},
\end{align*}
hence
\begin{equation}
\Delta_n ( b_1,\dots ,b_{n-1})
= (1-b_{n-1})\prod_{j=1}^{n-2}(1-b_{j}^2). \label{E22}
\end{equation}
Next, by using \eqref{E17}--\eqref{E22} in \eqref{E15} we obtain
\begin{equation}
\alpha _{j}=\frac{1-b_{j-1}b_{j}}{(1+b_{j-1})(1+b_{j})}\lambda ,\quad
j=1,\dots ,n,  \label{E23}
\end{equation}
where we have set
$b_0 = 0$, $ b_n = 0$.

Finally, we need to find the value of $\lambda$ appearing
in \eqref{E10}. In view of \eqref{E23}, \eqref{E5} gives
\begin{equation}
\lambda \sum_{j=1}^n \frac{1-b_{j-1} b_j}{(1 + b_{j-1}) (1 + b_j)} = 1.
\label{T6}
\end{equation}
After some algebra, \eqref{T6} implies
\begin{equation}
\lambda =\frac{1}{2-n+2\sum_{j=1}^{n-1}(1+b_{j})^{-1}}.  \label{E24}
\end{equation}
We summarize our results in the following theorem.



\begin{theorem} \label{thm1}
Let $A$ be the operator
\[
(Av) (x)=\frac{1}{2r}\int_{0}^{b}e^{-r|x-\xi |}v(\xi )d\xi,
\quad x \in [0, b].
\]
If $v(x)$ is an equilibrium solution to \eqref{DD1}--\eqref{DD2}
of the form \eqref{E4} with $n\geq 2$, then \eqref{E23} holds, where
$b_0 = b_n = 0$ and $b_j$, for $j = 1,\dots ,(n-1)$,
are given by \eqref{E11}, while $\lambda $\ is given by \eqref{E24}.
\end{theorem}

We remark that Theorem \ref{thm1} is true even in the case where
$S$ is a semiaxis or the whole real line.

As an example we notice that in the case $n=2$, \eqref{E23} implies
that $\alpha _1=\alpha _2=1/2$, no matter what $c_1$ and $c_2$ are.

\subsection*{Acknowledgments}
This work was partially supported by a $\Pi$.E.B.E. grant
of the National Technical University of Athens.

\begin{thebibliography}{9}

\bibitem{B} {Bomze, I.}, Dynamical aspects of evolutionary stability,
\emph{Monaish. Mathematik} \textbf{110} (1990), 189--206.

\bibitem{I} {Imhof, L. A.}, The long-run behavior of the stochastic
replicator dynamics, \emph{Ann. Appl. Probab.} \textbf{15} (2005), 1019--1045.

\bibitem{K-P-Y} {Kravvaritis, D., Papanicolaou, V. G., and Yannacopoulos, A. N.},
Similarity solutions for a replicator dynamics equation,
\emph{Indiana Univ. Math. J.} \textbf{57} (2008), 1929--1946.

\bibitem{K-P-X-Y} {Kravvaritis, D., Papanicolaou, V. G., Xepapadeas, A., and
Yannacopoulos, A. N.}, On a class of operator equations arising in
infinite dimensional replicator dynamics, \emph{Nonlinear Anal. Real World Appl.}
\textbf{11} (2010), 2537--2556.

\bibitem{K} {Krugman, P.}, \emph{The self-organizing economy}, Mitsui
Lectures in Economics, Wiley-Blackwell, Cambridge, Mass., USA, 1996.

\bibitem{O-R1} {Oechssler, J. and Riedel, F.}, Evolutionary dynamics on
infinite strategy spaces, \emph{Econom. Theory} \textbf{17} (2001), 141--162.

\bibitem{O-R2} {Oechssler, J. and Riedel, F.}, On the dynamic foundation
of evolutionary stability in continuous models, \emph{J. Econom.
Theory} \textbf{107} (2002), 223--252.

\bibitem{P-S} {Papanicolaou, V. G. and Smyrlis, G.}, Similarity solutions for a
multi-dimensional replicator dynamics equation,
\emph{Nonlinear Anal.} \textbf{71} (2009), 3185--3196.

\bibitem{S} {Smith, J. Maynard}, \emph{Evolution and the Theory of Games},
Cambridge University Press, Cambridge, 1982.

\bibitem{T-J} {Taylor, P. D. and Jonker, L. B.}, Evolutionary stable strategies
and game dynamics, \emph{Math. Biosci.} \textbf{40} (1978), 145--156.

\end{thebibliography}

\end{document}
