\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2019 (2019), No. 06, pp. 1--30.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2019 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2019/06\hfil Competitive exclusion in an SIS epidemic model]
{Competitive exclusion in a multi-strain SIS epidemic model on complex networks}

\author[J. Yang, T. Kuniya, X. Luo \hfil EJDE-2019/06\hfilneg]
{Junyuan Yang, Toshikazu Kuniya, Xiaofeng Luo}

\address{Junyuan Yang \newline
Complex Systems Research Center,
Shanxi University,
Taiyuan 030006, Shanxi, China}
\email{yangjunyuan00@126.com}

\address{Toshikazu Kuniya \newline
Graduate School of System Informatics,
Kobe University, 1-1 Rokkodai-cho,
Nada-ku, Kobe 657-8501, Japan}
\email{tkuniya@port.kobe-u.ac.jp}

\address{Xiaofeng Luo \newline
Complex Systems Research Center,
Shanxi University,
Taiyuan 030006,  Shanxi,  China}
\email{luo\_xf1988@163.com}

\thanks{Submitted May 17, 2018. Published January 14, 2019.}
\subjclass[2010]{35Q92, 35R02, 37N25, 92D30}
\keywords{Multi-strain SIS epidemic model; complex network;
\hfill\break\indent infection age; basic reproduction number; global stability;
competitive exclusion}

\begin{abstract}
 In this article, we propose an infection age-structured multi-strain
 SIS  epidemic model on complex networks. We obtain the reproduction
 numbers for each strain by using the classical theory of renewal equations,
 and we define the basic reproduction number $\mathcal{R}_0$ for the whole
 system by the maximum of them. We prove that if $\mathcal{R}_0 < 1$,
 then the disease-free equilibrium of the model is globally asymptotically
 stable, whereas if $\mathcal{R}_0 > 1$, then there exists an endemic
 equilibrium in which only one strain with the largest reproduction number
 survives. Moreover, under an additional assumption that the recovery rate
 is homogeneous, we prove that such an endemic equilibrium is globally
 asymptotically stable. Interestingly, our theoretical results imply that
 the competitive exclusion can occur in a sense that only one strain with
 the largest reproduction number survives.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{assumption}[theorem]{Assumption}
\allowdisplaybreaks


\section{Introduction}\label{sec:int}

Multi-strain epidemic models are systems of ordinary or partial differential
equations, in which the infected population is subdivided into several
homogeneous groups according to the type of strain of a pathogenic agent.
In multi-strain epidemic models, the competitive exclusion often occurs,
which means that only one strain dominates the other strains and persists
alone (see \cite[Section 8.1.3]{MM}).
Bremermann and Thieme \cite{BT} studied a multi-strain SIR epidemic model,
 which is a system of ordinary differential equations and regarded as a
modified Anderson and May model of host parasite dynamics.
They showed that the competitive exclusion can occur in their model in a
sense that only one strain with the largest reproduction number survives.
Since their work, various multi-strain epidemic models have been studied
from the viewpoint of the competitive exclusion (see, for instance,
 \cite{CLM,DLM,ML}).

To make epidemic models more realistic, taking into account the heterogeneity
of contact patterns of each individual is essentially important.
 For instance,  Dalziel \textit{et al.}\ \cite{BHG}
 clarified that the heterogeneity of contact patterns can limit the emergence
and spread of influenza. To take into account it in the modelling,
the complex network structure plays an important role. So far, one-strain
epidemic models have mainly been studied on complex networks
(see, for instance, \cite{KMS}). To our knowledge, there have been less
studies on multi-strain epidemic models on complex networks.
 Wu \textit{et al.}\ \cite{WFY,WSL}, studied two-strain epidemic models
on complex networks from the viewpoint of the competitive exclusion,
and showed that the mutation and superinfection can lead to the coexistence
of strains. In this paper, we aim to clarify the dynamics of a more
general $m (\in \mathbb{N})$-strain epidemic model on complex networks.

In the epidemic modelling, infection age is also an important factor,
 which means the time elapsed since the infection. Most of the previous
epidemic models on complex networks were systems of ODEs with constant
coefficients, and infection age has not been considered. In these models,
it is implicitly assumed that both of the transmission and recovery
processes satisfy the Markov property, that is, the waiting time between
two events of these processes obeys the exponential distribution.
However, many empirical data have shown that non-Markovian distributions
such as the log-normal distribution (\cite{EK}) and the Gamma
distribution \cite{NE} are often more realistic.
Infection age enables us to consider such non-Markovian distributions.
In this article, we focus on an infection age-structured multi-compartment
system of PDEs, for which we need a more rigorous and advanced mathematical
analysis. Through the analysis, we aim to investigate the possibility of
the competitive exclusion in our general framework.

In mathematical epidemiology, the basic reproduction number $\mathcal{ R}_0$
is known as the expected value of secondary cases produced by a typical
infected individual during its entire period of infectiousness in a fully
susceptible population (see, for instance, \cite{DHM,HI2}).
$\mathcal{ R}_0$ is important as it is an indicator of the epidemic size.
Mathematically, $\mathcal{ R}_0$ is defined by the spectral radius of the
next generation operator (see, for instance, \cite{DW}).
However, it is often difficult to obtain the explicit formulation of
$\mathcal{ R}_0$ for epidemic models with general forms. In this paper,
we first define the reproduction numbers for each strain based on the
classical theory of renewal equations, and then define the basic reproduction
number $\mathcal{ R}_0$ for the whole system by the maximum of them.
In terms of $\mathcal{ R}_0$, we investigate the global dynamics of our model.
Specifically, we prove that if $\mathcal{ R}_0 < 1$, then the disease-free
equilibrium of the model is globally asymptotically stable, whereas if
$\mathcal{ R}_0 > 1$, then the model has an endemic equilibrium in which only
one strain with the largest reproduction number persists. Moreover, under an
additional assumption that the recovery rate is homogeneous, we prove that
such an endemic equilibrium is globally asymptotically stable if
$\mathcal{ R}_0 > 1$. Interestingly, our theoretical results imply that the
competitive exclusion can occur in a global sense in our model.

The rest of this paper is organized as follows.
In Section \ref{sec:model}, we propose our model and give some basic assumptions.
In Section \ref{sec:R0}, we define the basic reproduction number $\mathcal{ R}_0$
by the maximum of the reproduction numbers $\mathcal{ R}_{j0}$ for each strain
$j \in \{ 1,2,\dots, m \}$. In Section \ref{sec:as}, we prove the asymptotic
smoothness of the solution semiflow and the existence of a compact attractor,
which are needed for the global stability analysis in the subsequent sections.
In Section \ref{sec:dfe}, we prove the global asymptotic stability of the
disease-free equilibrium for $\mathcal{ R}_0 < 1$ by constructing a Lyapunov
function and applying the invariance principle. In Section \ref{sec:dom},
 we assume without loss of generality that $\mathcal{ R}_0 = \mathcal{ R}_{10}$,
 and prove that if $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$, then there
exists an endemic equilibrium in which only strain $1$ persists.
We further prove the uniform persistence of the system for
$\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$, which is needed to construct a
Lyapunov function for the proof of the global asymptotic stability of
the endemic equilibrium. Under the additional assumption as stated above,
we prove the global asymptotic stability of it for
$\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$. In Section \ref{sec:num},
we perform numerical simulation to illustrate our theoretical results,
which show that the competitive exclusion can occur in the cases of
two-strain and three-strain. Finally, Section \ref{sec:dis} is devoted to
the discussion.


\section{Model formulation}\label{sec:model}

Let $S(t)$ and $I(t)$ denote the number of susceptible and infected nodes at
time $t$, respectively. Following \cite{KMS}, a basic SIS epidemic model on an
arbitrary network is formulated as follows:
\begin{equation}\label{eqn2.1}
\begin{gathered}
 \frac{{\rm d}S(t)}{{\rm d}t}=-\beta[SI](t)+\gamma I(t),\\
\frac{{\rm d}I(t)}{{\rm d} t}=\beta[SI](t)-\gamma I(t), 
\end{gathered} 
\end{equation}
where $t \geq 0$, $[SI]$ denotes the average number of edges connecting susceptible
and infected nodes, and $\beta$ and $\gamma$ denote the transmission
and recovery rates, respectively. Let $N$ be the total number of nodes in
the network, which is constant as $\left[ S(t)+I(t) \right]' =0$
holds for solutions in \eqref{eqn2.1}. If the network is homogeneous,
that is, the degree of every node is equal to $n$, then the average number
of infected nodes connected to one susceptible node is given by $nI/N$.
Therefore, $[SI]$ is described as follows.
\begin{equation}\label{eqn2.2}
[SI](t)=nS(t)\frac{I(t)}{N}, \quad t \geq 0.
\end{equation}
Hence, model \eqref{eqn2.1} can be rewritten to the classical SIS epidemic
 model (see, for instance, \cite{AM}). By considering the heterogeneity of
degree $k \in \mathbb{N}_n = \{ 1,2,\dots, n \}$, $n \in \mathbb{N}$
of each node, system \eqref{eqn2.1}-\eqref{eqn2.2} can be generalized to
the following model on complex networks (see, for instance \cite{PV1}).
\begin{equation}\label{eqnaa2.1}
\begin{gathered}
 \frac{{\rm d} S_k (t)}{{\rm d}t}=-\beta[S_kI](t)+\gamma I_k(t),\\
\frac{{\rm d} I_k (t)}{{\rm d} t}=\beta[S_kI](t)-\gamma I_k(t),
\end{gathered}
\end{equation}
where $t \geq 0$, $k \in \mathbb{N}_n$, $S_k(t)$ and $I_k(t)$ denote 
the number of susceptible and infected
nodes with degree $k \in \mathbb{N}_n$ at time $t \geq 0$, respectively.
$[S_kI]$ is given by
\begin{equation}\label{eqnaa2.2}
[S_k I](t)=k S_k (t)\frac{\sum_{l=1}^nl I_l (t)}{\sum_{l=1}^nlN_l}, \quad
t\geq 0, \; k \in \mathbb{N}_n,
\end{equation}
where $N_l$ denotes the total number of nodes with degree $l \in \mathbb{N}_n$,
which is constant as $[S_k(t)+I_k(t)]' = 0$ holds for all $k \in \mathbb{N}_n$
for solutions in \eqref{eqnaa2.1}. \cite{YCX} incorporated the infection
age into system \eqref{eqnaa2.1}-\eqref{eqnaa2.2}, and studied the following
one-strain SIS epidemic model on complex networks.
\begin{equation}\label{eqn}
\begin{gathered}
\frac{{\rm d}S_k(t)}{{\rm }dt}=-kS_k(t)\Theta(\mathbf{i}(t,\cdot))
+\int_0^\infty\gamma(a) i_{k}(t,a) {\rm d}a,\\
\frac{\partial i_{k}(t,a)}{\partial t}
+\frac{\partial i_{k}(t,a)}{\partial a}=-\gamma(a)i_{k}(t,a),\\
i_{k}(t,0)=kS_k(t)\Theta(\mathbf{i}(t,\cdot)),
\end{gathered}
\end{equation}
where $t \geq 0$, $a \geq 0$, $k \in \mathbb{N}_n$, and
\begin{gather*}
 \Theta (\mathbf{i}(t,\cdot))
=\frac{1}{\langle k\rangle}\sum_{l=1}^nlp(l)\int_0^\infty\beta(a)i_l(t,a) {\rm d}a,
\quad
\mathbf{i}(t,\cdot) = (i_1(t,\cdot), i_2(t,\cdot), \dots, i_n(t,\cdot)),  \\
 \langle k \rangle = \sum_{l=1}^n l p(l), \quad p(k)=\frac{N_k}{N}, \quad
 N_k=S_k(\cdot)+\int_0^\infty i_k(\cdot,a)da, \quad k \in \mathbb{N}_n,\\
 N = \sum_{k=1}^n N_k.
\end{gather*}
Here, $i_k(t,a)$ denotes the number of infected nodes with degree
$k \in \mathbb{N}_n$ and infection age $a \geq 0$ at time $t \geq 0$.
$\beta(a)$ and $\gamma(a)$ denote the age-specific transmission and recovery
rates, respectively. In this paper, we consider a generalization of
system \eqref{eqn} to the following multi-strain SIS epidemic model on
complex networks.
\begin{equation}\label{eqn2.3}
\begin{gathered}
\frac{{\rm d}S_k(t)}{{\rm d}t}=-kS_k(t)\sum_{j=1}^m\Theta_j
 ( \mathbf{i}_j(t,\cdot))+\sum_{j=1}^m\int_0^\infty\gamma_j(a)
 i_{jk}(t,a) {\rm d}a,\\
\frac{\partial i_{jk}(t,a)}{\partial t}
 +\frac{\partial i_{jk}(t,a)}{\partial a}
 =-\gamma_j(a)i_{jk}(t,a),\\
i_{jk}(t,0)=kS_k(t)\Theta_j(\mathbf{i}_j(t,\cdot)),
\end{gathered}
\end{equation}
for $t \geq 0$, $a \geq 0$, $j \in \mathbb{M}$, and
$k \in \mathbb{N}_n$.
Here $\mathbb{M} = \{ 1,2,\dots, m \}$ and
\begin{equation} \label{eqnmz4.1}
\begin{gathered}
 \mathbf{i}_j (t,\cdot) =(i_{j1}(t,\cdot), i_{j2}(t,\cdot), \dots,
 i_{jn}(t,\cdot) ), \quad t \geq 0, \; j \in \mathbb{M},  \\
\Theta_j(\psi)=\frac{1}{\langle k\rangle}\sum_{l=1}^nlp(l)
 \int_0^\infty\beta_j(a) \psi_{l}(a) {\rm d}a, \quad j \in \mathbb{M},  \\
\psi =(\psi_1, \psi_2, \dots, \psi_n) \in ( L^1 (\mathbb{R}_+) )^n, \quad
\langle k \rangle = \sum_{l=1}^n l p(l), \\
p(k) = \frac{N_k}{N}, \quad
 N_k = S_k(\cdot) + \sum_{j=1}^m \int_0^\infty i_{jk}(\cdot,a) {\rm d}a, \quad
 k \in \mathbb{N}_n, \; N=\sum_{k=1}^n N_k.
\end{gathered}
\end{equation}
Here, $i_{jk}(t,a)$ denotes the number of nodes infected by strain
$j \in \mathbb{M}$ with degree $k \in \mathbb{N}_n$ and infection age
$a \geq 0$ at time $t \geq 0$. $\beta_j(a)$ and $\gamma_j(a)$ denote the
age-specific transmission rate and recovery rate for infected nodes with
strain $j \in \mathbb{M}$. We make the following assumptions.

\begin{assumption}\label{assum2.1} \rm \quad
\begin{itemize}
\item[(i)] $\beta_j (\cdot) \in L^\infty_+ (\mathbb{R}_+)$ and
$\gamma_j (\cdot) \in L^\infty_+ (\mathbb{R}_+)$ for all $j \in \mathbb{M}$.

\item[(ii)] $\beta_j (\cdot)$ is Lipschitz continuous on $\mathbb{R}_+$
with Lipschitz constant $L_{\beta_j} > 0$ for all $j \in \mathbb{M}$.

\item[(iii)] There exists $\underline\gamma_j > 0$ such that
$\gamma_j (a)>\underline{\gamma}_j$ for all $a \geq 0$ and $j \in \mathbb{M}$.
\end{itemize}
\end{assumption}

Let $\bar{\beta}_j := \operatorname{ess\,sup}_{a \in [0, \infty)} \beta_j(a)
< \infty$ and $\bar{\gamma}_j := \operatorname{ess\,sup}_{a \in [0, \infty) }
\gamma_j(a) < \infty$ for all $j \in \mathbb{M}$.
By integrating the second equation in \eqref{eqn2.3}, we have
\[
\frac{{\rm d}}{{\rm d}t} \int_0^\infty i_{jk}(t,a) {\rm d}a
= kS_k(t) \Theta_j( \mathbf{i}_j(t,\cdot))
- \int_0^\infty \gamma_j(a) i_{jk}(t,a) {\rm d}a, \quad
j \in \mathbb{M}, \; k \in \mathbb{N}_n.
\]
Note that $i_{jk}(t,+\infty)=0$ for all $t>0$, $j \in \mathbb{M}$ and
$k \in \mathbb{N}_n$ by  Assumption \ref{assum2.1} (iii). Hence,
\[
\frac{{\rm d}}{{\rm d}t} \Big[ S_k(t) + \sum_{j=1}^m \int_0^\infty i_{jk}(t,a)
{\rm d}a \Big] = 0, \quad k \in \mathbb{N}_n,
\]
and thus, $N_k$ is constant for all $k \in \mathbb{N}_n$. In what follows,
without loss of generality, we assume that $N_k$ is normalized as $1$ for all
$k \in \mathbb{N}_n$. Then, $S_k(\cdot) = 1-\sum_{j=1}^m
\int_0^\infty i_{jk}(\cdot,a) {\rm d}a$ for all $k \in \mathbb{N}_n$ and hence,
\eqref{eqn2.3} can be rewritten as follows
\begin{equation}\label{eqn2.3'}
\begin{gathered}
 \frac{\partial i_{jk}(t,a)}{\partial t}
+\frac{\partial i_{jk}(t,a)}{\partial a}=-\gamma_j(a)i_{jk}(t,a),\\
 i_{jk}(t,0)=k \Big[ 1-\sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big]
 \Theta_j (\mathbf{i}_j(t,\cdot)),
\end{gathered}
\end{equation}
for $t \geq 0$, $a \geq 0$, $j \in \mathbb{M}$, and $k \in \mathbb{N}_n$.

We consider the following initial condition for \eqref{eqn2.3'},
\[
i_{jk}(0,\cdot)=i_{jk0}(\cdot)\in L_+^1(0,\infty), \quad
j \in \mathbb{M}, \; k\in \mathbb{N}_n.
\]
For the sake of simplicity, we use the following notation in the rest of this paper.
\begin{gather*}
 \mathbb{X} = ( L^1(\mathbb{R}_+) )^{mn}, \quad
\| \psi \|_{\mathbb{X}}= \sum_{j=1}^m \sum_{k=1}^n \int_0^\infty
 | \psi_{jk}(a) | {\rm d}a, \quad
 \psi =(\psi_{jk}(\cdot))_{(j,k) \in \mathbb{M} \times \mathbb{N}_n} \in \mathbb{X}, \\
 \mathbf{X}(t) = ( i_{11}(t,\cdot), i_{12}(t,\cdot), \dots,
 i_{1n}(t,\cdot), i_{21}(t,\cdot), \dots, i_{mn}(t,\cdot) ) \in \mathbb{X}, \quad
 t \geq 0,  \\
 K_{jk}(t) = \int_0^\infty \beta_j(a) i_{jk}(t,a) {\rm d}a, \quad
b_{jk}(t) = i_{jk}(t,0), \quad j \in \mathbb{M}, \; k \in \mathbb{N}_n, \; t \geq 0,\\
 \pi_j(a) = {\rm e}^{-\int_0^a \gamma_j (\sigma) {\rm d}\sigma}, \quad
j \in \mathbb{M}, \; a \geq 0.
\end{gather*}

\section{The basic reproduction number $\mathcal R_0$}\label{sec:R0}

In this section, we define the basic reproduction number $\mathcal{ R}_0$
for system \eqref{eqn2.3'}.
Obviously,  \eqref{eqn2.3'} always has the unique disease-free equilibrium
$E_0= (0,0, \dots, 0) \in \mathbb{X}$. Linearizing system \eqref{eqn2.3'}
around the disease-free equilibrium $E_0$, we obtain the following system in
the disease invasion phase.
\begin{equation}\label{eqnn3.1}
\begin{gathered}
  \frac{\partial i_{jk}(t,a)}{\partial t}+\frac{\partial i_{jk}(t,a)}{\partial a}
=-\gamma_j(a)i_{jk}(t,a),\\
 i_{jk}(t,0)=k\Theta_j (\mathbf{i}_j(t,\cdot)),
\end{gathered}
\end{equation}
for $t\geq 0$, $a \geq 0$, $j \in \mathbb{M}$,  and $k \in \mathbb{N}_n$.
Integrating the first equation in \eqref{eqn2.3'} or \eqref{eqnn3.1} along
the characteristic line $t-a=c$ (constant), we have
\begin{equation}\label{eqnmm2.6}
i_{jk}(t,a)=\begin{cases}
  b_{jk}(t-a)\pi_j(a), & t>a,\\
 i_{jk0}(a-t)\frac{\pi_j(a)}{\pi_j(a-t)}, & t\le a,
\end{cases}
\end{equation}
for $t\geq 0$, $a \geq 0$, $j \in \mathbb{M}$, and $k \in \mathbb{N}_n$.
By substituting \eqref{eqnmm2.6} into the second equation in \eqref{eqnn3.1},
we obtain the following Volterra integral equation (see, for instance, \cite{IM}).
\begin{equation}\label{eqnnn3.1}
b_{jk}(t)=\frac{k}{\langle k\rangle}\sum_{l=1}^nlp(l)
\int_0^t\beta_j(a)b_{jl}(t-a)\pi_j(a) {\rm d}a+g_{jk}(t),
\end{equation}
where $ t \geq 0$, $j \in \mathbb{M}$, $k \in \mathbb{N}_n$, and
\[
g_{jk}(t)=\frac{k}{\langle k\rangle}\sum_{l=1}^nlp(l)
\int_t^\infty\beta_j(a)i_{jl0}(a-t)\frac{\pi_j(a)}{\pi_j(a-t)} {\rm d}a, \quad
 t \geq 0, \; j \in \mathbb{M}, \; k \in \mathbb{N}_n.
\]
Multiplying both sides of \eqref{eqnnn3.1} by $kp(k)/\langle k\rangle$,  and
summing on $k$ from $1$ to $n$, we have that for all $t\geq 0$,
\begin{equation}\label{eqnn3.2}
\tilde{\Theta} (\mathbf{b}_j(t)) = \int_0^t\Psi_j(a) \tilde{\Theta}
(\mathbf{b}_j(t-a)) {\rm d}a + f_j(t), \quad j \in \mathbb{M},
\end{equation}
where
\begin{gather}
\tilde{\Theta} (\varphi) = \frac{1}{\langle k\rangle}\sum_{l=1}^n lp(l)\varphi_l,
\quad \varphi=(\varphi_1,\varphi_2,\dots, \varphi_n) \in \mathbb{R}^n, \label{Theta} \\
\mathbf{b}_j(t) = (b_{j1}(t), b_{j2}(t), \dots, b_{jn}(t)), \quad
f_j(t) = \frac{1}{\langle k\rangle}\sum_{l=1}^nl^2p(l)g_{jl}(t), \quad
 t \geq 0, \; j \in \mathbb{M},  \nonumber\\
\Psi_j(a)=\frac{\langle k^2 \rangle}{\langle k\rangle} \beta_j(a)\pi_j(a), \quad
 a \geq 0, \; j \in \mathbb{M}, \quad
\langle k^2 \rangle = \sum_{l=1}^n l^2 p(l). \nonumber
\end{gather}
Since \eqref{eqnn3.2} can be regarded as a renewal equation for strain
$j \in \mathbb{M}$, we obtain the reproduction number $\mathcal{ R}_{j0}$
for strain $j$ by using the classical theory of renewal equations as follows.
\[
\mathcal R_{j0}=\int_0^\infty\Psi_j(s)ds
= \frac{\langle k^2\rangle}{\langle k\rangle} K_j, \quad
K_j = \int_0^\infty \beta_j(a) \pi_j(a) da, \quad j \in \mathbb{M}.
\]
Note that
\begin{equation}\label{eqna2.1}
\frac{\langle k^2\rangle}{\langle k\rangle}
=\frac{\sum_{l=1}^nl^2p(l)}{\sum_{l=1}^nlp(l)}
=\frac{\sum_{l=1}^nl^2\frac{N_l}{N}}{\sum_{l=1}^nl\frac{N_l}{N}}
=\sum_{l=1}^nl\frac{lN_l}{\sum_{l=1}^nlN_l}.
\end{equation}
Since $kN_k/\sum_{k=1}^nkN_k$, $k \in \mathbb{N}_n$ denotes the proportion
of edges with degree $k$ in total edges, \eqref{eqna2.1} implies that
 $\langle k^2 \rangle / \langle k \rangle$ is equal to the average number of
 edges in the network. Besides, $\pi_j(a)$, $j \in \mathbb{M}$ denotes the
survival probability for one infected node with strain $j$ to age $a$.
Consequently, $\mathcal R_{j0} = ( \langle k^2 \rangle / \langle k \rangle ) K_j$
represents the average number of secondary infected nodes produced by one
typical infected node with strain $j$ during its infectious period in the network.
Using $\mathcal{ R}_{j0}$, $j \in \mathbb{M}$, the next generation operator
for system \eqref{eqn2.3} is defined by matrix
$\operatorname{diag}_{1\leq j \leq m} ( \mathcal{ R}_{j0} )$.
Hence, the basic reproduction number $\mathcal{ R}_0$ for system \eqref{eqn2.3},
which is the spectral radius of the next generation operator, is obtained as follows.
\begin{equation}\label{eqnzz2.1}
\mathcal R_0=\max \{ \mathcal R_{j0} \}_{j \in \mathbb{M}}
=\max \{ \mathcal{ R}_{10}, \mathcal{ R}_{20}, \dots, \mathcal{ R}_{m0} \}.
\end{equation}

\section{Asymptotic smoothness of the solution semiflow}\label{sec:as}

In this section, we show the asymptotic smoothness of the solution semiflow
and the existence of a compact attractor, which are needed for the global
stability analysis in Sections \ref{sec:dfe} and \ref{sec:dom}.
It is easy to see that system \eqref{eqn2.3'} generates a continuous
semiflow $\Phi : \mathbb{R}_+ \times \mathbb{X} \to \mathbb{X}$, defined by
\begin{equation} \label{semiflow}
\Phi(t, \mathbf{X}_0) = \mathbf{X}(t)
= ( i_{11}(t,\cdot),i_{12}(t,\cdot),\dots,i_{mn}(t,\cdot) ), \quad t \geq 0,
\end{equation}
where $\mathbf{X}_0 = ( i_{110}(\cdot), i_{120}(\cdot), \dots,
i_{mn0}(\cdot) ) \in \mathbb{X}$.
Let us define the following set.
\[
\Omega = \{ \mathbf{X}(\cdot)
 = ( i_{11}(\cdot,\cdot),\dots,i_{mn}(\cdot,\cdot) )
\in \mathbb{X}_+: \ \sum_{j=1}^m \int_0^\infty i_{jk}(\cdot,a)
{\rm d}a \leq 1 \quad \forall  k \in \mathbb{N}_n \},
\]
where $\mathbb{X}_+$ denotes the positive cone of $\mathbb{X}$.

\begin{lemma}\label{lemmm2.1}\quad
\begin{itemize}
\item[(i)] $\Omega$ is positively invariant for system \eqref{eqn2.3'},
that is, $\Phi(t,\Omega) \subset \Omega$ for all $t \geq 0$.

\item[(ii)] For any solution $\Phi(t,\mathbf{X}_0) = \mathbf{X}(t)$ with
$\mathbf{X}_0 \in \Omega$, inequalities $K_{jk}(t)\le\bar\beta_j$ and
$b_{jk}(t)\le \bar\beta_{j} k$ hold for all $t>0$, $j \in \mathbb{M}$ and
$k \in \mathbb{N}_n$.
\end{itemize}
\end{lemma}

\begin{proof}
(i) Let $\mathbf{X}_0 \in \Omega$. The nonnegativity of
$\Phi(t, \mathbf{X}_0) = \mathbf{X}(t)$ for all $t \geq 0$ is a simple matter
and we omit the proof. By integrating the first equation in \eqref{eqn2.3'},
 for all $j \in \mathbb{M}$ and $k \in \mathbb{N}_n$, we have
\[
\frac{{\rm d}}{{\rm d}t} \int_0^\infty i_{jk}(t,a) {\rm d}a
= k \Big[ 1-\sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big]
\Theta_j(\mathbf{i}_j(t,\cdot)) - \int_0^\infty \gamma_j(a) i_{jk}(t,a) {\rm d}a.
\]
Let $Y_k(\cdot):= \sum_{j=1}^m \int_0^\infty i_{jk}(\cdot,a){\rm d}a$ for all
$k \in \mathbb{N}_n$. From the above equality, we have
\begin{equation}\label{Y}
\frac{{\rm d}Y_k(t)}{{\rm d}t} = k [ 1-Y_k(t) ]
\sum_{j=1}^m \Theta_j (\mathbf{i}_j(t,\cdot))
- \sum_{j=1}^m \int_0^\infty \gamma_j(a) i_{jk}(t,a) {\rm d}a, \quad
k \in \mathbb{N}_n.
\end{equation}
By the way of contradiction, suppose that there exist a $t^* > 0$ and a
$k^* \in \mathbb{N}_n$ such that $Y_k (t) \leq 1$ for all
$t \in [0,t^*)$ and $k \in \mathbb{N}_n$, $Y_{k^*}(t^*)=1$ and
$Y_{k^*}'(t^*) > 0$. From \eqref{Y}, we have
\[
\frac{{\rm d}Y_{k^*}(t^*)}{{\rm d}t}
= - \sum_{j=1}^m \int_0^\infty \gamma_j(a) i_{jk^*}(t^*,a) {\rm d}a \leq 0,
\]
which is a contradiction. Thus,
$Y_k(t) = \sum_{j=1}^m \int_0^\infty i_{jk}(t,a){\rm d}a \leq 1$ for all
$t \geq 0$ and $k \in \mathbb{N}_n$, provided $\mathbf{X}_0 \in \Omega$.
This implies that $\Phi(t, \Omega)$ for all $t \geq 0$, and thus, $\Omega$
is positively invariant.

(ii) From (i), we see that $\int_0^\infty i_{jk}(t,a) {\rm d}a \leq 1$ for all
$t \geq 0$, $j \in \mathbb{M}$ and $k \in \mathbb{N}_n$. Hence, by
Assumption \ref{assum2.1} (i), we have
\begin{gather*}
K_{jk} (t) \leq \bar{\beta}_j \int_0^\infty i_{jk}(t,a) {\rm d}a
 \leq \bar{\beta}_j,  \\
b_{jk} (t) \leq \bar{\beta}_j  \frac{k}{\langle k \rangle} \sum_{l=1}^n l p(l)
\int_0^\infty i_{jl}(t,a) {\rm d}a
\leq \bar{\beta}_j \frac{k}{\langle k \rangle} \sum_{l=1}^n l p(l)
= \bar{\beta}_j k,
\end{gather*}
for all $t \geq 0$, $j \in \mathbb{M}$ and $k \in \mathbb{N}_n$.
This completes the proof.
\end{proof}

Next, we prove the asymptotic smoothness of semiflow $\Phi$ with
$\mathbf{X}_0 \in \Omega$. Based on Proposition 3.13 in \cite{GFW},
we decompose $\Phi$ into two operators: $\Phi=\hat{\Phi}+\tilde{\Phi}$, where
\begin{equation}\label{eqnmz2.1}
\begin{gathered}
 \hat\Phi(t, \mathbf{X}_0)=\Big( \hat i_{11}(t,\cdot), \hat i_{12}(t,\cdot), \dots,
\hat i_{mn}(t,\cdot) \Big), \quad t \geq 0, \; \mathbf{X}_0 \in \Omega,\\
\tilde\Phi(t, \mathbf{X}_0)=\Big( \tilde i_{11}(t,\cdot), \tilde i_{12}(t,\cdot),
 \dots, \tilde i_{mn}(t,\cdot) \Big),
\quad t \geq 0, \; \mathbf{X}_0 \in \Omega,\end{gathered} 
\end{equation}
\[
 \hat i_{jk}(t,a)=\begin{cases}
0,&t>a,\\
i_{jk}(t,a),&t\le a,
\end{cases}\qquad
\tilde i_{jk}(t,a)=\begin{cases}
i_{jk}(t,a),&t>a,\\
0,&t\le a,
\end{cases}
\]
for $t\geq 0$, $a \geq 0$, $j \in \mathbb{M}$,  and $k \in \mathbb{N}_n$.
To prove the asymptotic smoothness of $\Phi$, we use the following lemma.

\begin{lemma}[{\cite[Proposition 3.1.3]{GFW}}] \label{lemmn2.1}
Let $\hat \Phi$ and $\tilde \Phi$ be defined as in \eqref{eqnmz2.1}.
Suppose that $\hat \Phi$ and $\tilde \Phi$ satisfy the following properties:
\begin{itemize}
\item[(i)] There exists a function $\theta:\mathbb R_+\times\mathbb R_+\to
\mathbb R_+$ such that for any $r>0$, $\lim_{t \to +\infty} \theta (t,r) = 0$ and
$\| \hat \Phi (t, \mathbf{X}_0) \|_{\mathbb{X}} \leq \theta (t, r)$ for all $t > 0$,
provided $\| \mathbf{X}_0 \|_{\mathbb{X}} \leq r$.

\item[(ii)] For all $t>0$, $\tilde\Phi(t, \cdot)$ maps any bounded set of
$\Omega$ into a set with compact closure in $\mathbb{X}$.
\end{itemize}
Then $\{ \Phi(t, \mathbf{X}_0): t \geq 0 \}$ has compact closure in $\mathbb{X}$.
\end{lemma}

We also use the following lemma.

\begin{lemma}\label{lemmm2.2}
$b_{jk}(\cdot)$ is Lipschitz continuous on $\mathbb{R}_+$ for all
 $j \in \mathbb{M}$ and $k \in \mathbb{N}_n$. That is, there exists
Lipschitz constant $L_{b_{jk}} > 0$ for all $j \in \mathbb{M}$ and
$k \in \mathbb{N}_n$ such that
\[
| b_{jk}(t+h) - b_{jk}(t) | \leq L_{b_{jk}} h, \quad t \geq 0, \; h \geq 0, \;
 j \in \mathbb{M}, \; k \in \mathbb{N}_n.
\]
\end{lemma}

\begin{proof}
Let $Y_k(\cdot)$, $k \in \mathbb{N}_n$ be defined as in the proof of Lemma
\ref{lemmm2.1}. From \eqref{Y}, we have
\begin{equation}
\begin{aligned}
 \big| \frac{{\rm d}Y_k(t)}{{\rm d}t} \big|
&\leq k [1-Y_k(t)] \sum_{j=1}^m \Theta_j (\mathbf{i}_j(t,\cdot))
+\sum_{j=1}^m \int_0^\infty \gamma_j(a) i_{jk}(t,a) {\rm d}a  \\
&\leq k \sum_{j=1}^m \bar{\beta}_j + \sum_{j=1}^m \bar{\gamma}_j
=: L_{Y_k}, \quad t \geq 0, \; k \in \mathbb{N}_n.
\end{aligned}
\end{equation}
This implies that $Y_k(\cdot)$ is Lipschitz continuous on $\mathbb{R}_+$
with Lipschitz constant $L_{Y_k} > 0$ for all $k \in \mathbb{N}_n$.
For any $t \geq 0$ and $h \geq 0$, we then have
\begin{align*}
& | b_{jk}(t+h)-b_{jk}(t) | \\
&=\Big|\frac{k}{\langle k\rangle}\{[1-Y_k(t+h)]\sum_{l=1}^nlp(l)
 \int_0^\infty\beta_j(a)i_{jl}(t+h,a){\rm d}a   \\
&\quad  -[1-Y_k(t)] \sum_{l=1}^n\int_0^\infty\beta_j(a)i_{jl}(t,a){\rm d}a\}\Big|  \\
&\le \frac{k}{\langle k\rangle}\Big\{ | Y_k(t+h)-Y_k(t) |
 \sum_{l=1}^nlp(l)\int_0^\infty\beta_j(a)i_{jl}(t+h,a){\rm d}a  \\
&\quad  +[1+Y_k(t)] \sum_{l=1}^nlp(l)
 \Big| \int_0^\infty \beta_j(a) i_{jl}(t+h,a) {\rm d}a
  - \int_0^\infty \beta_j(a) i_{jl}(t,a) {\rm d}a \Big| \Big\}  \\
& \le \frac{k}{\langle k\rangle}\Big\{L_{Y_k}\bar\beta_j
\langle k\rangle h+2\sum_{l=1}^nlp(l)\int_0^h\beta_j(a)i_{jl}(t+h,a){\rm d}a  \\
&\quad  + 2 \sum_{l=1}^nlp(l)\Big| \int_h^\infty\beta_j(a)i_{jl}(t+h,a){\rm d}a
 -\int_0^\infty\beta_j(a)i_{jl}(t,a){\rm d}a \Big| \Big\}  \\
&  \le \frac{k}{\langle k\rangle}\Big\{L_{Y_k}\bar\beta_j
 \langle k\rangle h + 2 \bar\beta_j^2\langle k^2\rangle h
  + 2\sum_{l=1}^nlp(l)\Big|\int_0^\infty\beta_j(a+h)i_{jl}(t+h,a+h){\rm d}a \\
&\quad -\int_0^\infty\beta_j(a)i_{jl}(t,a){\rm d}a \Big| \Big\} \\
&=\frac{k}{\langle k\rangle} \Big\{L_{Y_k}\bar\beta_j \langle k\rangle h
  +2 \bar\beta_j^2\langle k^2\rangle h
  +2\sum_{l=1}^nlp(l)\Big| \int_0^\infty\beta_j(a+h)i_{jl}(t,a)
 \frac{\pi_j(a+h)}{\pi_j(a)}{\rm d}a \\
&\quad -\int_0^\infty\beta_j(a)i_{jl}(t,a){\rm d}a \Big| \Big\}  \\
& \le \frac{k}{\langle k\rangle}\Big\{L_{Y_k}\bar\beta_j
  \langle k\rangle h +2\bar\beta_j^2\langle k^2\rangle h \\
&\quad +2\sum_{l=1}^nlp(l) \int_0^\infty \beta_j(a+h)
 \Big| \frac{\pi_j(a+h)}{\pi_j(a)}-1 \Big| i_{jl}(t,a){\rm d}a  \\
&\quad  + 2\sum_{l=1}^n l p(l) \int_0^\infty|\beta_j(a+h)
 -\beta_j(a)|i_{jl}(t,a){\rm d}a \Big\} \\
&\le \frac{k}{\langle k\rangle} ( L_{Y_k}\bar\beta_j
 \langle k\rangle + 2 \bar\beta^2\langle k^2\rangle
 + 2 \bar\beta_j \bar \gamma_j \langle k\rangle
  + 2 L_{\beta_j} \langle k\rangle ) h \\
& =: L_{b_{jk}}h, \quad j \in \mathbb{M}, \; k \in \mathbb{N}_n,
\end{align*}
where we use Assumption \ref{assum2.1} (ii), Lemma \ref{lemmm2.1} (ii) and
the facts that
\begin{gather*}
 |{\rm e}^{-x}-{\rm e}^{-y}|\le |x-y|, \quad x,y \in \mathbb{R}_+, \quad
\langle k^2 \rangle = \sum_{l=1}^n l^2 p(l), \\
 i_{jk}(t+h,a+h)=i_{jk}(t,a) \frac{\pi_j(a+h)}{\pi_j(a)}, \quad
t,a,h \in \mathbb{R}_+, \; j \in \mathbb{M}, \; k \in \mathbb{N}_n, \\
 L_{b_{jk}}=\frac{k}{\langle k\rangle} ( L_{Y_k}\bar\beta_j \langle k\rangle
+ 2 \bar\beta^2\langle k^2\rangle + 2 \bar\beta_j \bar \gamma_j \langle k\rangle
+ 2 L_{\beta_j} \langle k\rangle ), \quad j \in \mathbb{M}, \; k \in \mathbb{N}_n.
\end{gather*}
This completes the proof.
\end{proof}

Using Lemmas \ref{lemmn2.1} and \ref{lemmm2.2}, we prove the following
proposition on the asymptotic smoothness of semiflow $\Phi$.

\begin{proposition}\label{prop:as}
Let $\Phi$ be the solution semiflow defined by \eqref{semiflow}.
$\Phi$ is asymptotically smooth. \label{prop2.1}
\end{proposition}

\begin{proof}
It is sufficient to show that the assumptions in Lemma \ref{lemmn2.1}
hold (see also \cite[Lemma 3.2.3]{JKH} and \cite[Theorem 5.1]{M}).
For any $r>0$, let us consider $\mathbf{X}_0 \in \Omega$ such that
$\| \mathbf{X}_0 \|_{\mathbb{X}} \leq r$. Using \eqref{eqnmm2.6}, we obtain
\begin{align*}
\| \hat{\Phi}(t,\mathbf{X}_0) \|_{\mathbb{X}}
&= \sum_{j=1}^m \sum_{k=1}^n \int_0^\infty \hat{i}_{jk}(t,a) {\rm d}a \\
&= \sum_{j=1}^m \sum_{k=1}^n \int_t^{\infty}i_{jk0}(a-t)
 \frac{\pi_j(a)}{\pi_j(a-t)} {\rm d}a \\
&\le \sum_{j=1}^m \sum_{k=1}^n {\rm e}^{-\underline{\gamma}_j t}
 \int_0^{\infty} i_{jk0}(a) {\rm d}a \\
&\le {\rm e}^{-\underline{\gamma} t} \| X_0 \|_{\mathbb{X}}
 \leq {\rm e}^{-\underline{\gamma} t} r, \quad t > 0,
\end{align*}
where $\underline{\gamma} := \min_{j \in \mathbb{M}} \underline{\gamma}_j > 0$.
Hence, Lemma \ref{lemmn2.1} (i) holds for
$\theta(t,r) = {\rm e}^{-\underline{\gamma} t} r$.

Next, we show Lemma \ref{lemmn2.1} (ii).
Let $C \subset \Omega$ be a bounded set such that
$\| \mathbf{X}\|_{\mathbb{X}} \leq r$ for a fixed upper bound $r>0$
and for any $\mathbf{X} \in C$. It is sufficient to show that the
following four conditions hold for all $t>0$, $j \in \mathbb{M}$
and $k \in \mathbb{N}_n$ (see also \cite[Theorem 5.2]{M}).
 \begin{itemize}
\item[(a)] $\sup_{\mathbf{X}_0 \in C} \int_0^\infty \tilde i_{jk}(t,a){\rm d}a
 < \infty$;

\item[(b)] $\lim_{u\to +\infty}\int_u^\infty \tilde i_{jk}(t,a) {\rm d}a = 0$
 uniformly in $\mathbf{X}_0 \in C$;

\item[(c)] $\lim_{h\to 0^+}|\tilde i_{jk}(t,a+h)-\tilde i_k(t,a)| {\rm d}a= 0$
 uniformly in $\mathbf{X}_0 \in C$;

\item[(d)] $\lim_{h\to 0^+}\int_0^h\tilde i_{jk}(t,a) {\rm d}a= 0$
  uniformly in $\mathbf{X}_0 \in C$.
 \end{itemize}
From \eqref{eqnmm2.6}, Assumption \ref{assum2.1} and Lemma \ref{lemmm2.1}
We have
\[
\tilde{i}_{jk} (t,a)
\leq \begin{cases}
\bar{\beta}_j k {\rm e}^{-\underline{\gamma}_j a}, & t > a, \\
0, & t \leq a,
\end{cases}
\]
for $t \geq 0$, $a \geq 0$, $j \in \mathbb{M}$, and $k \in \mathbb{N}_n$.
Then, we easily see that conditions (a), (b) and (d) hold. For any fixed $t>0$,
let $h \in (0,t)$. By Lemma \ref{lemmm2.2}, we have
\begin{align*}
&\int_0^{\infty} | \tilde{i}_{jk} (t,a+h)-\tilde{i}_{jk}(t,a) | {\rm d}a
 \\
&\leq \int_0^{t-h}| b_{jk}(t-a-h)\pi_j(a+h)-b_{jk}(t-a)\pi_j(a)| {\rm d}a
+\int_{t-h}^tb_{jk}(t-a)\pi_j(a) {\rm d}a
 \\
&\le  \int_0^{t-h}b_{jk}(t-a-h)|\pi_j(a+h)-\pi_j(a)| {\rm d}a  \\
&\quad +\int_0^{t-h}|b_{jk}(t-a-h)-b_{jk}(t-a)|\pi_j(a) {\rm d}a +\bar\beta_j kh
 \\
&\le \bar\beta_j k\int_0^{t-h}|\pi_j(a+h)-\pi_j(a)| {\rm d}a
+L_{b_{jk}}h\int_0^{t-h}\pi_j(a) {\rm d}a +\bar\beta_j kh
 \\
&\le ( \bar\beta_j \bar{\gamma}_j k(t-h)+L_{b_{jk}}(t-h)+\bar\beta_j k) h, \quad
 j \in \mathbb{M}, \; k \in \mathbb{N}_n.
\end{align*}
Since the right-hand side of this inequality is independent of
$\mathbf{X}_0 \in C$ and converges to zero as $h \to 0+$, (c)
immediately holds. This completes the proof.
\end{proof}

From Lemma \ref{lemmm2.1} and Proposition \ref{prop2.1}, we see that $\Phi$
is point dissipative, eventually bounded and asymptotically smooth.
Hence, from \cite[Theorem 2.33]{ST}, we obtain the following proposition.

\begin{proposition}\label{prop2.5}
There exists a compact attractor $\mathcal{ A}$ of bounded sets in $\Omega$.
\end{proposition}

\section{Global stability of the disease-free equilibrium for $\mathcal{ R}_0 < 1$}
\label{sec:dfe}

In this section, we investigate the global asymptotic stability of the disease-free
equilibrium $E_0 = ( 0, 0, \dots, 0 ) \in \Omega$ of system \eqref{eqn2.3} for
$\mathcal{ R}_0 < 1$.
On the local asymptotic stability of $E_0$ for $\mathcal{ R}_0 < 1$,
we establish the following theorem.

\begin{theorem}\label{thm3.1}
If $\mathcal R_0<1$, then the disease-free equilibrium
$E_0 = ( 0, 0, \dots, 0 ) \in \Omega$ is locally asymptotically stable.
\end{theorem}

\begin{proof}
We consider the linearized system \eqref{eqnn3.1}.
Substituting $i_{jk}(t,a)=y_{jk}(a){\rm e}^{\lambda t}$, $j \in \mathbb{M}$,
$k \in \mathbb{N}_n$ into \eqref{eqnn3.1} and dividing both sides by
${\rm e}^{\lambda t}$, we have
\begin{equation}\label{eqnzz3.2}
\begin{gathered}
 \lambda y_{jk}(a)+\frac{{\rm d}y_{jk}(a)}{{\rm d}a}=-\gamma_j(a)y_{jk}(a), \\
y_{jk}(0)=k\Theta_j (\mathbf{y}_j(\cdot)),
\end{gathered}
\end{equation}
for $a \geq 0$, $j \in \mathbb{M}$, and $k \in \mathbb{N}_n$.
Here $\mathbf{y}_j(\cdot)=( y_{j1}(\cdot), y_{j2}(\cdot), \dots, y_{jn}(\cdot) )$
for all $j \in \mathbb{M}$. Integrating the first equation in \eqref{eqnzz3.2},
we have
\begin{equation}\label{eqnzz3.3}
y_{jk}(a)=y_{jk}(0)\pi_j(a){\rm e}^{-\lambda a}, \quad
a \geq 0, \; j \in \mathbb{M}, \; k \in \mathbb{N}_n.
\end{equation}
Substituting \eqref{eqnzz3.3} into the second equation in \eqref{eqnzz3.2}, we have
\begin{equation}\label{eqnzz3.4}
y_{jk}(0)=\frac{k}{\langle k\rangle}\sum_{l=1}^nlp(l) \widehat{K_j(\lambda)} y_{jl}(0),
\quad j \in \mathbb{M}, \; k \in \mathbb{N}_n,
\end{equation}
where $\widehat{K_j(\lambda)}$, $j \in \mathbb{M}$ denotes the Laplace transform
of $\beta_j(\cdot) \pi_j(\cdot)$; that is,
\[
\widehat{K_j(\lambda)}
= \int_0^\infty \beta_j(a) \pi_j(a) {\rm e}^{-\lambda a} {\rm d}a, \quad
j \in \mathbb{M}.
\]
Multiplying  both sides of \eqref{eqnzz3.4} by $kp(k)/\langle k\rangle$, and
summing on $k$ from $1$ to $n$, we have
\begin{equation}\label{eqnzz3.5}
\tilde{\Theta} (\mathbf{y}_j(0))=\frac{\langle k^2\rangle}{\langle k\rangle}
\widehat{K_j(\lambda)}\tilde{\Theta} (\mathbf{y}_j(0)), \quad j \in \mathbb{M}.
\end{equation}
If $\tilde{\Theta}(\mathbf{y}_j(0)) \neq0$ for some $j \in \mathbb{M}$,
 we cancel it from both sides of \eqref{eqnzz3.5} and obtain
\begin{equation}\label{eqnzz3.6}
1=\frac{\langle k^2\rangle}{\langle k\rangle}\widehat{K_j(\lambda)}.
\end{equation}
We claim that all characteristic roots of \eqref{eqnzz3.6} have negative
real parts. By way of contradiction, we assume that \eqref{eqnzz3.6}
has a root $\lambda_0$ with positive real part. Then, we have
\[
\big| \frac{\langle k^2 \rangle}{\langle k \rangle} \widehat{K_j(\lambda_0)} \big|
\leq \mathcal{ R}_{j0} < 1,
\]
which contradicts  \eqref{eqnzz3.6}. Therefore, the claim is true.

If $\tilde{\Theta}(\mathbf{y}_j(0))=0$ for all $j \in \mathbb{M}$, then it
follows from \eqref{eqnzz3.3} and \eqref{eqnzz3.4} that $y_{jk}(a) \equiv 0$
for all $j \in \mathbb{M}$ and $k \in \mathbb{N}_n$.
This case can be ruled out since $i_{jk}(t,a)=y_{jk}(a){\rm e}^{\lambda t} \equiv 0$
for all $j \in \mathbb{M}$ and $k \in \mathbb{N}_n$, and there is no perturbation
from $E_0$. This completes the proof.
\end{proof}

On the global asymptotic stability of the disease-free equilibrium $E_0$ for
$\mathcal{ R}_0 < 1$, we establish the following theorem.

\begin{theorem}\label{thmzz3.1}
If $\mathcal R_0<1$, then the disease-free equilibrium
$E_0 = (0,0,\dots, 0) \in \Omega$ is globally asymptotically stable in $\Omega$.
\end{theorem}

\begin{proof}
Let us define
\begin{equation}\label{eqnml3.1}
\alpha_j(a)=\int_a^\infty\beta_j(s)\frac{\pi_j(s)}{\pi_j(a)} {\rm d}s, \quad
a \geq 0, \ j\in\mathbb M.
\end{equation}
Note that under Assumption \ref{assum2.1}, $\alpha_j (\cdot)$ is finite on
$\mathbb{R}_+$ for all $j \in \mathbb{M}$, and
\[
\alpha_j(0)=K_j, \quad \alpha_j'(a)=\alpha_j(a) \gamma_j(a) -\beta_j(a), \quad
a\geq 0, \ j \in \mathbb{M}.
\]
Let us define a Lyapunov function as follows.
\[
V(t) = \sum_{j=1}^m\sum_{k=1}^n\frac{kp(k)}{K_j}
\int_0^\infty \alpha_j(a) i_{jk}(t,a) {\rm d}a, \quad t \geq 0.
\]
Differentiating $V(\cdot)$ along the solution trajectory of system \eqref{eqn2.3'},
we have
\begin{equation}\label{eqnzz3.10}
\begin{aligned}
 \frac{{\rm d}V(t)}{{\rm d}t}
&= \sum_{j=1}^m\sum_{k=1}^n\frac{kp(k)}{K_j}
 \int_0^\infty\alpha_j(a)i_{jk}(t,a){\rm d}a\\
& = \sum_{j=1}^m\sum_{k=1}^n\frac{kp(k)}{K_j}
 \int_0^\infty\alpha_j(a)\frac{\partial i_{jk}(t,a)}{\partial t}{\rm d}a\\
& = \sum_{j=1}^m\sum_{k=1}^n\frac{kp(k)}{K_j}\int_0^\infty\alpha_j(a)
\Big[-\frac{\partial i_{jk}(t,a)}{\partial a}-\gamma_j(a) i_{jk}(t,a) \Big] {\rm d}a,
 \quad t \geq 0.
\end{aligned}
\end{equation}
Calculating the integration by parts, we have
\begin{equation}\label{eqnzz3.11}
\begin{aligned}
&\int_0^\infty\alpha_j(a)\big[-\frac{\partial i_{jk}(t,a)}{\partial a}-\gamma_j(a)
 i_{jk}(t,a) \big]{\rm d}a \\
&=-\int_0^\infty\alpha_j(a)\frac{\partial i_{jk}(t,a)}{\partial a}{\rm d}a
 -\int_0^\infty \alpha_j(a) \gamma_j(a) i_{jk}(t,a) {\rm d}a\\
&=-\alpha_j(a)i_{jk}(t,a)\big|_0^\infty
 +\int_0^\infty\alpha'_j(a)i_{jk}(t,a) {\rm d}a
 -\int_0^\infty\alpha_j(a) \gamma_j(a) i_{jk}(t,a) {\rm d}a \\
&=\alpha_j(0)i_{jk}(t,0)-\int_0^\infty\beta_j(a)i_{jk}(t,a){\rm d}a \\
&=K_j k \Big[ 1-\sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big]
 \Theta_j( \mathbf{i}_j(t,\cdot))-\int_0^\infty\beta_j(a)i_{jk}(t,a) {\rm d}a \\
&\leq K_j k \Theta_j (\mathbf{i}_j(t,\cdot))
 -\int_0^\infty\beta_j(a)i_{jk}(t,a) {\rm d}a, \quad t \geq 0, \; j \in \mathbb{M},
 \; k \in \mathbb{N}_n.
\end{aligned}
\end{equation}
From \eqref{eqnzz3.10} and \eqref{eqnzz3.11}, we obtain
\begin{equation}\label{eqnzz3.12}
\begin{aligned}
  \frac{{\rm d}V(t)}{{\rm d}t}
&\leq \sum_{j=1}^m\Big[\langle k^2\rangle\Theta_j(\mathbf{i}_j(t,\cdot))
-\frac{\langle k\rangle}{K_j}\Theta_j(\mathbf{i}_j(t,\cdot))\Big] \\
& = \sum_{j=1}^m(\mathcal R_{j0}-1)\frac{\langle k\rangle}{K_j}
 \Theta_j (\mathbf{i}_j(t,\cdot)), \quad t \geq 0.
\end{aligned}
\end{equation}
If $\mathcal R_0<1$, it follows from \eqref{eqnzz3.12} that $V'(t) \le0$
with equality holding if and only if $i_{jk}(t,\cdot)=0$ for all
$j \in \mathbb{M}$ and $k\in\mathbb{N}_n$. This implies that the
largest positive invariant subset $\mathcal M$ of set
 $\{(i_{jk})_{(j,k) \in \mathbb{M} \times \mathbb{N}_n} \in \Omega  : \ V'=0\}$
 is a singleton $\{E_0\}$.
Since positive orbit $\cup_{t \geq 0} \{ \Phi(t,\mathbf{X}_0) \}$
is precompact in $\Omega$ by Proposition \ref{prop2.1}, we can apply
the invariance principles stated in \cite[Theorem 4.2 in Chapter IV]{JAW}
to conclude that $E_0$ is globally asymptotically stable in $\Omega$.
This completes the proof.
\end{proof}

Theorem \ref{thmzz3.1} implies that all strains will die out if none of
their reproduction numbers is greater than or equal to $1$.

\section{Competitive exclusion for $\mathcal{ R}_0 > 1$}\label{sec:dom}

In this section, we study the occurrence of the competitive exclusion
for $\mathcal{ R}_0 > 1$.
Without loss of generality, we can assume that the reproduction number
$\mathcal{ R}_{10}$ for strain $1$ is the largest; that is,
\[
\mathcal{ R}_0 = \max_{j\in\mathbb M} \{ \mathcal{ R}_{j0} \}
= \max \{ \mathcal R_{10},\mathcal R_{20},\dots,\mathcal R_{m0} \} 
= \mathcal{ R}_{10}.
\]
We now investigate the existence of an endemic equilibrium of  \eqref{eqn2.3'} 
in which only strain $1$ persists. Let us consider the  system
\begin{equation}\label{eqnmz2.3}
\begin{gathered}
 \frac{{\rm d} i_{jk}(a)}{{\rm d}a}=-\gamma_j(a)i_{jk}(a),\\
 i_{jk}(0)=k\Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(a) da \Big]
 \Theta_j (\mathbf{i}_j(\cdot)),
\end{gathered}
\end{equation}
for $a \geq 0$, $j\in \mathbb{M}$, and $k \in \mathbb{N}_n$.
Here $\mathbf{i}_j(\cdot) = ( i_{j1}(\cdot), i_{j2}(\cdot),
\dots i_{jn}(\cdot) )$ for all $j \in \mathbb{M}$. Let us denote 
\[
E_1^*=( i_{11}^*(\cdot), i_{12}^*(\cdot), \dots, i_{1n}^*(\cdot), 0, \dots, 0 )
 \in \Omega
\]
the endemic equilibrium in which only strain $1$ persists. 
In what follows, we call $E_1^*$ the strain $1$ dominant equilibrium. 
Since the entries of $E_1^*$ satisfy \eqref{eqnmz2.3}, we have
\begin{equation}\label{i1k}
\begin{gathered}
  i_{1k}^*(a) = i_{1k}^*(0) \pi_1(a), \\
 i_{1k}^*(0) = k\Big[ 1-\int_0^\infty i_{1k}^*(a) {\rm d}a \Big]
 \Theta_1(\mathbf{i}_1^*(\cdot)),
\end{gathered} 
\end{equation}
for $a \geq 0$, and $k \in \mathbb{N}_n$;
where $\mathbf{i}_1^*(\cdot)=( i_{11}^*(\cdot), i_{12}^*(\cdot),\dots, 
i_{1n}^*(\cdot) )$. On the existence of $E_1^*$, we have the following theorem.

\begin{theorem}\label{thmm2.2}
If $\mathcal{ R}_0 = \mathcal R_{10} > 1$, then system \eqref{eqn2.3'}
 has the unique strain $1$ dominant equilibrium 
$E_1^* = ( i_{11}^*(\cdot), i_{12}^*(\cdot), \dots, i_{1n}^*(\cdot), 0, 
\dots, 0 ) \in \Omega$.
\end{theorem}

\begin{proof}
Let us define
\begin{equation}\label{Pi}
\Pi_1 = \int_0^\infty \pi_1(a){\rm d}a \quad \text{and} \quad 
i_1^*(0) = \frac{1}{\langle k \rangle} \sum_{l=1}^n l p(l) i_{1l}^*(0).
\end{equation}
Substituting the first equation in \eqref{i1k} into the second equation 
in \eqref{i1k}, we have
\begin{equation}\label{i*}
i_{1k}^*(0) =k ( 1 - \Pi_1 i_{1k}^*(0) ) K_1 i_1^*(0), \quad k \in \mathbb{N}_n,
\end{equation}
and hence,
\begin{equation}\label{i1k0}
i_{1k}^*(0) = \frac{k K_1 i_1^*(0)}{1+k \Pi_1 K_1 i_1^*(0)}, \quad 
k \in \mathbb{N}_n.
\end{equation}
Substituting \eqref{i1k0} into the second equation in \eqref{Pi}, we have
\[
i_1^*(0) = \frac{1}{\langle k \rangle} \sum_{l=1}^n l p(l)
 \frac{l K_1 i_1^*(0)}{1+l \Pi_1 K_1 i_1^*(0)}.
\]
Dividing both side of this equation by $i_1^*(0)$, we have
\[
1 = \frac{1}{\langle k \rangle} \sum_{l=1}^n 
\frac{l^2 p(l) K_1}{1+l \Pi_1 K_1 i_1^*(0)}.
\]
Let us define
\[
F(x) = \frac{1}{\langle k \rangle} 
\sum_{l=1}^n \frac{l^2 p(l) K_1}{1+l \Pi_1 K_1 x}, \quad x \in \mathbb{R}.
\]
If there exists a positive root $x^* > 0$ such that $F(x^*)=1$, then 
$x^* = i_1^*(0)$ and hence, it follows from \eqref{i1k0} and the first 
equation in \eqref{i1k} that the strain $1$ dominant equilibrium $E_1^*$ 
exists in $\Omega$. Since $F(x)$ is monotone decreasing on $x$ and converges 
to zero as $x \to +\infty$, the positive root $x^*$ uniquely exists 
if and only if $F(0)>1$. In fact, we have
\[
F(0) = \frac{1}{\langle k \rangle} \sum_{l=1}^n l^2 p(l) K_1 
= \frac{\langle k^2 \rangle}{\langle k \rangle} K_1 = \mathcal{ R}_{10} > 1.
\]
This completes the proof.
\end{proof}

Next, we prove the locally asymptotic stability of the strain $1$ dominant 
equilibrium $E_1^*$ for $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$ and 
$\gamma_1(a) = \gamma_1 > 0$.

\begin{theorem}\label{thma3.1}
If $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$ and $\gamma_1(a) = \gamma_1 > 0$, 
then the strain $1$ dominant equilibrium 
$E^*_1 = ( i_{11}^*(\cdot), i_{12}^*(\cdot), \dots, i_{1n}^*(\cdot), 0, \dots, 0 ) 
\in \Omega$ is locally asymptotically stable.
\end{theorem}

\begin{proof}
Let us define the perturbation from the strain $1$ dominant equilibrium 
$E_1^*$ by
\begin{gather*}
 y_{1k}(t,a)= i_{1k}(t,a) - i_{1k}^*(a),  \\
 y_{jk}(t,a)=i_{jk}(t,a), \hspace{1em} j \in \mathbb{M} \setminus \{ 1 \},
\end{gather*}
For $t \geq 0$,  $a\geq 0$, and $k \in \mathbb{N}_n$.
By linearizing system \eqref{eqn2.3'} around $E^*_1$, we have, for all
 $t \geq 0$, $a \geq 0$ and $k \in \mathbb{N}_n$,
\begin{equation}\label{eqnaa2.3}
\begin{gathered}
  \frac{\partial y_{1k}(t,a)}{\partial t}+ \frac{\partial y_{1k}(t,a)}{\partial a}
=-\gamma_1(a)y_{1k}(t,a),\\
  y_{1k}(t,0)= k \Big[ 1 - \int_0^\infty i_{1k}^*(a) da \Big]
 \Theta_1(\mathbf{y}_1(t,\cdot)) - k \Theta_1(\mathbf{i}_1^*(\cdot))
 \sum_{j=1}^m \int_0^\infty y_{jk}(t,a) {\rm d}a, \\
  \frac{\partial y_{jk}(t,a)}{\partial t}
 + \frac{\partial y_{jk}(t,a)}{\partial a}=-\gamma_j(a)y_{jk}(t,a), \quad
j \in \mathbb{M} \setminus \{ 1 \}, \\
 y_{jk}(t,0)=k \Big[ 1 - \int_0^\infty i_{1k}^*(a) da \Big] 
 \Theta_j(\mathbf{y}_j(t,\cdot)), \quad  j \in \mathbb{M} \setminus \{ 1 \},
\end{gathered}
\end{equation}
where $\mathbf{y}_j(t,\cdot) = ( y_{j1}(t,\cdot), y_{j2}(t,\cdot), \dots,
 y_{jn}(t,\cdot) )$ for all $j \in \mathbb{M}$. 
Substituting $y_{jk}(t,a)$ $=y_{jk}(a){\rm e}^{\lambda t}$ for all 
$j \in \mathbb{M}$ and $k \in \mathbb{N}_n$ into all equations in \eqref{eqnaa2.3}
 and dividing both sides of each equation by ${\rm e}^{\lambda t}$, 
for all $a \geq 0$ and $k \in \mathbb{N}_n$, we have
\begin{equation}\label{eqnb2.3}
\begin{gathered}
  \frac{{\rm d} y_{1k}(a)}{{\rm d} a}=-( \lambda + \gamma_1(a) ) y_{1k}(a),\\
  y_{1k}(0)= k \Big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \Big] 
\Theta_1 (\mathbf{y}_1(\cdot)) - k \Theta_1( \mathbf{i}_1^*(\cdot))
 \sum_{j=1}^m \int_0^\infty y_{jk}(a){\rm d}a, \\
  \frac{{\rm d} y_{jk}(a)}{{\rm d} a}=-( \lambda + \gamma_j(a) ) y_{jk}(a),
\quad  j \in \mathbb{M} \setminus \{ 1 \}, \\
 y_{jk}(0)=k \Big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \Big] 
\Theta_j (\mathbf{y}_j(\cdot)), \quad j \in \mathbb{M} \setminus \{ 1 \},
\end{gathered}
\end{equation}
where $\mathbf{y}_j(\cdot) = ( y_{j1}(\cdot), y_{j2}(\cdot), \dots,
 y_{jn}(\cdot) )$ for all $j \in \mathbb{M}$. From the first and third 
equations in \eqref{eqnb2.3}, we obtain
\begin{equation}\label{eqnff2.1}
y_{jk}(a)=y_{jk}(0)\pi_j(a) {\rm e}^{-\lambda a}, \quad a \geq 0, \;
 j \in \mathbb{M}, \; k \in \mathbb{N}_n.
\end{equation}
Substituting \eqref{eqnff2.1} into the last equation in \eqref{eqnb2.3}, we obtain
\begin{equation}\label{eqnmn2.1}
y_{jk}(0)=k \Big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \Big]
 \tilde{\Theta} (\mathbf{y}_j(0))\widehat{K_j(\lambda)}, \quad 
j \in \mathbb{M} \setminus \{ 1 \}, \; k \in \mathbb{N}_n.
\end{equation}
Multiplying both sides of \eqref{eqnmn2.1} by $kp(k)/\langle k\rangle$,  and 
summing on $k$ from $1$ to $n$, we obtain
\begin{equation}\label{eqnmn2.2}
\tilde{\Theta}(\mathbf{y}_j(0))=\frac{1}{\langle k\rangle}
\sum_{l=1}^nl^2p(l) \Big[ 1 - \int_0^\infty i_{1l}^*(a) {\rm d}a \Big] 
\tilde{\Theta}(\mathbf{y}_j(0))\widehat{K_j(\lambda)}, \quad
 j \in \mathbb{M} \setminus \{ 1 \}.
\end{equation}
If $\tilde{\Theta}(\mathbf{y}_j(0))\neq0$ for some
 $j \in \mathbb{M} \setminus \{ 1 \}$, we cancel it from both sides 
of \eqref{eqnmn2.2} and obtain
\begin{equation}\label{eqnmn2.3}
1=\frac{1}{\langle k\rangle}\sum_{l=1}^nl^2p(l) 
\Big[ 1 - \int_0^\infty i_{1l}^*(a) {\rm d}a \Big] \widehat{K_j(\lambda)}.
\end{equation}
Multiplying both sides of the second equation
in \eqref{i1k} by $kp(k)/\langle k \rangle$,  and dividing both sides by $i_1^*(0)$ after summing on $k$ 
from $1$ to $n$, we have
\begin{equation}\label{eqnmn2.4}
1=\frac{1}{\langle k\rangle}\sum_{l=1}^nl^2p(l) 
\Big[ 1 - \int_0^\infty i_{1l}^*(a) {\rm d}a \Big] K_1.
\end{equation}
We claim that all characteristic roots of \eqref{eqnmn2.3} have negative 
real parts. On the contrary, we assume that there exists a root $\lambda_0$ 
with positive real part. It follows from \eqref{eqnmn2.3} and \eqref{eqnmn2.4} that
\begin{align*}
 1 &= \Big| \frac{1}{\langle k\rangle}\sum_{l=1}^nl^2p(l) 
\Big[ 1 - \int_0^\infty i_{1l}^*(a) {\rm d}a \Big] \widehat{K_j(\lambda_0)} \Big|  \\
& < \frac{1}{\langle k\rangle}\sum_{l=1}^nl^2p(l) 
\Big[ 1 - \int_0^\infty i_{1l}^*(a) {\rm d}a \Big] K_j  \\
& \leq \frac{1}{\langle k\rangle}\sum_{l=1}^nl^2p(l)
 \Big[ 1 - \int_0^\infty i_{1l}^*(a) {\rm d}a \Big] K_1=1,
\end{align*}
which is a contradiction. Therefore, the claim is true.

If $\tilde{\Theta}(\mathbf{y}_j(0))=0$ for all $j \in \mathbb{M} \setminus \{ 1 \}$,
 then from \eqref{eqnmn2.1} we have that $y_{jk}(0)=0$ for all 
$j \in \mathbb{M} \setminus \{ 1 \}$ and $k \in \mathbb{N}_n$. 
From \eqref{eqnff2.1}, we then have that $y_{jk}(a)=0$ for all 
$a \geq 0$, $j \in \mathbb{M} \setminus \{ 1 \}$ and $k \in \mathbb{N}_n$. 
Hence, the second equation in \eqref{eqnb2.3} can be rewritten as
\[
y_{1k}(0) = k \Big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \Big] 
\Theta_1(\mathbf{y}_1(\cdot)) - k \Theta_1(\mathbf{i}_1^*(\cdot))
 \int_0^\infty y_{1k}(a) {\rm d}a, \quad k \in \mathbb{N}_n.
\]
Substituting \eqref{eqnff2.1} into the right-hand side of this equation, we have
\[
y_{1k}(0) = k \Big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \Big] 
\tilde{\Theta}(\mathbf{y}_1(0)) \widehat{K_1(\lambda)}
 - \frac{k \Theta_1(\mathbf{i}_1^*(\cdot))}{\lambda + \gamma_1} y_{1k}(0), \quad 
k \in \mathbb{N}_n.
\]
Hence, we have
\begin{equation}\label{y1k}
y_{1k}(0) = \frac{k \big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \big]
 \tilde{\Theta}(\mathbf{y}_1(0)) \widehat{K_1(\lambda)}}
{ 1+\frac{k\Theta_1(\mathbf{i}_1^*(\cdot))}{\lambda + \gamma_1}}, \quad
 k \in \mathbb{N}_n.
\end{equation}
Multiplying both sides of \eqref{y1k} by $kp(k)/\langle k \rangle$, and summing 
on $k$ from $1$ to $n$, we have
\begin{equation}\label{Theta1}
\tilde{\Theta} (\mathbf{y}_1(0)) 
= \frac{1}{\langle k \rangle} \sum_{k=1}^n \frac{k^2 p(k) 
\big[ 1 - \int_0^\infty i_{1k}^*(a) da \big] 
\widehat{K_1(\lambda)}}{ 1+\frac{k\Theta_1(i_1^*(\cdot))}
{\lambda + \gamma_1}} \tilde{\Theta} (\mathbf{y}_1(0)).
\end{equation}
If $\tilde{\Theta}(\mathbf{y}_1(0))=0$, then  from \eqref{y1k} we have
 $y_{1k}(0)=0$ for all $k \in \mathbb{N}_n$ and hence, from \eqref{eqnff2.1}, 
$y_{1k}(a)=0$ for all $a \geq 0$ and $k \in \mathbb{N}_n$. 
We can rule out this case since there is no perturbation from $E_1^*$. 
Thus, $\tilde{\Theta} (\mathbf{y}_1(0)) \neq 0$. Dividing both sides 
of \eqref{Theta1} by $\tilde{\Theta}(\mathbf{y}_1(0))$, we have
\begin{equation}\label{KL}
1= \frac{1}{\langle k \rangle} \sum_{k=1}^n 
\frac{k^2 p(k) \big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \big]
 \widehat{K_1(\lambda)}}{ 1+\frac{k\Theta_1(\mathbf{i}_1^*(\cdot))}
{\lambda + \gamma_1}}.
\end{equation}
We claim that all characteristic roots of \eqref{KL} have negative real parts.
 On the contrary, suppose that there exists a root $\lambda_0$ of \eqref{KL} 
with positive real part. Using \eqref{eqnmn2.4}, we have
\begin{align*}
 1 &= \Big| \frac{1}{\langle k \rangle} \sum_{k=1}^n \frac{k^2 p(k) 
\big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \big] 
\widehat{K_1(\lambda_0)}}{ 1+\frac{k\Theta_1(\mathbf{i}_1^*(\cdot))}
{\lambda_0 + \gamma_1}} \Big|  \\
& < \frac{1}{\langle k \rangle} \sum_{k=1}^n k^2 p(k) 
\big[ 1 - \int_0^\infty i_{1k}^*(a) {\rm d}a \big] K_1 = 1,
\end{align*}
which is a contradiction. Therefore, the claim is true. This completes the proof.
\end{proof}

To construct a Lyapunov function for the proof of the global asymptotic stability 
of the strain $1$ dominant equilibrium $E_1^*$, we need to show the uniform 
persistence of system \eqref{eqn2.3'} with respect to a function $\rho_1$ 
defined below in \eqref{rho_1}, which implies the force of infection by strain $1$. 
To this end, we first define the  function 
$\rho: \mathbb{X} \to \mathbb{R}_+$ by
\[
\rho ( \Phi(t, \mathbf{X}_0) ) 
= \frac{1}{\langle k \rangle} \sum_{j=1}^m \sum_{k=1}^n k^2 p(k) 
\int_0^\infty i_{jk}(t,a) {\rm d}a.
\]
We easily see that if $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$,
 then there exists a nonempty interval $(a_1, \tilde{a}_1) \subset \mathbb{R}_+$ 
such that $\beta_1(a) > 0$ for all $a \in (a_1,\tilde{a}_1)$. 
Thus, the following set can not be empty if $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$,
\[
\Omega_1 = \{ \mathbf{X}(\cdot) = ( i_{11}(\cdot,\cdot), 
\dots,i_{mn}(\cdot,\cdot) ) \in \Omega:
 \int_0^\infty \beta_1(a) i_{1k}(\cdot, a) {\rm d}a > 0  \text{for  some }  k \}.
\]
Following the definition in \cite{ST}, we call system \eqref{eqn2.3'} 
\textit{uniformly weakly $\rho$-persistent} in $\Omega_1$ if there exists an 
$\epsilon > 0$ such that
\begin{equation}\label{limsup}
\limsup_{t\to \infty}  \rho(\Phi(t, \mathbf{X}_0))
= \limsup_{t\to \infty} \frac{1}{\langle k \rangle} \sum_{j=1}^m 
\sum_{k=1}^n k^2 p(k) \int_0^\infty i_{jk}(t,a) {\rm d}a
> \epsilon,
\end{equation}
provided $\mathbf{X}_0 \in \Omega_1$. Moreover, we call
 system \eqref{eqn2.3'} \textit{uniformly strongly $\rho$-persistent} in
 $\Omega_1$ if there exists an $\epsilon > 0$ such that
\[
\liminf_{t\to \infty} \rho(\Phi(t,\mathbf{X}_0))
= \liminf_{t\to \infty} \frac{1}{\langle k \rangle} \sum_{j=1}^m 
\sum_{k=1}^n k^2 p(k) \int_0^\infty i_{jk}(t,a) {\rm d}a
> \epsilon,
\]
provided $\mathbf{X}_0 \in \Omega_1$. We next prove the uniform weak 
$\rho$-persistence of system \eqref{eqn2.3'} for
 $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$.

\begin{proposition}\label{prop3.4}
If  $\mathcal {R}_{0} = \mathcal{ R}_{10} >1$, then system \eqref{eqn2.3'}
 is uniformly weakly $\rho$-persistent in $\Omega_1$.
\end{proposition}

\begin{proof}
Since $\mathcal {R}_{0} = \mathcal{ R}_{10} >1$, there exist sufficiently 
small $\epsilon > 0$ and $\lambda >0$ such that
\begin{equation}\label{R10el}
\Big( \frac{\langle k^2 \rangle}{\langle k \rangle} - \epsilon \Big) 
\widehat{K_1(\lambda)} > 1.
\end{equation}
For such $\epsilon > 0$, we prove that inequality \eqref{limsup} holds. 
On the contrary, suppose that \eqref{limsup} does not hold. 
Then, there exists a sufficiently large $t_0 > 0$ such that
\[
\frac{1}{\langle k \rangle} \sum_{j=1}^m \sum_{k=1}^n k^2 p(k)
 \int_0^\infty i_{jk}(t,a) {\rm d}a \leq \epsilon,
\]
for all $t \geq t_0$. From the second equation in \eqref{eqn2.3'}, we have
\begin{equation}
\begin{aligned}
& b_{1k}(t) \\
&= i_{1k}(t,0) \\
&= \Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big] 
\frac{k}{\langle k \rangle} \sum_{l=1}^n l p(l) 
 \int_0^\infty \beta_1(a) i_{1l}(t,a) {\rm d}a  \\
&\geq \Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big]
 \frac{k}{\langle k \rangle} \sum_{l=1}^n l p(l)
  \int_0^t \beta_1(a) \pi_1(a) b_{1l}(t-a) {\rm d}a  \\
& = k \Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big]
 \int_0^t \beta_1(a) \pi_1(a) \tilde{\Theta}(\mathbf{b}_{1}(t-a)) {\rm d}a,
 \quad k \in \mathbb{N}_n, 
\end{aligned}\label{b1k}
\end{equation}
where $\mathbf{b}_1(\cdot) =(b_{11}(\cdot), b_{12}(\cdot), \dots, b_{1n}(\cdot))$. 
Multiplying both sides  by $k p(k)/\langle k \rangle$,  and 
summing on $k$ from $1$ to $n$, we have
\begin{align}
&\tilde{\Theta}(\mathbf{b}_1(t)) \nonumber \\
&\geq \sum_{k=1}^n \frac{k^2 p(k)}{\langle k \rangle } 
\Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big] 
 \int_0^t \beta_1(a) \pi_1(a) \tilde{\Theta}(\mathbf{b}_{1}(t-a)) {\rm d}a 
\label{tTheta} \\
&= \Big[ \frac{\langle k^2 \rangle}{\langle k \rangle} 
 - \frac{1}{\langle k \rangle} \sum_{k=1}^n \sum_{j=1}^m k^2 p(k) 
 \int_0^\infty i_{jk}(t,a) {\rm d}a \Big] 
 \int_0^t \beta_1(a) \pi_1(a) \tilde{\Theta}(\mathbf{b}_{1}(t-a)) {\rm d}a  \nonumber\\
&= \Big( \frac{\langle k^2 \rangle}{\langle k \rangle} - \epsilon \Big)
 \int_0^t \beta_1(a) \pi_1(a) \tilde{\Theta}(\mathbf{b}_{1}(t-a)) {\rm d}a, 
\quad \forall t \geq t_0.\nonumber 
\end{align} 
 Without loss of generality, we can assume that $t_0 = 0$
 by taking $\mathbf{X}(t_0) = \Phi(t_0, \mathbf{X}_0)$ as the new initial value
 $\mathbf{X}_0$. Then, since the boundedness of $\tilde{\Theta}(\mathbf{b}_{1}(t))$ 
follows from Lemma \ref{lemmm2.1}, we can take the Laplace transform of 
\eqref{tTheta} for $\lambda >0$, and obtain
\begin{equation}\label{b1kl}
\tilde{\Theta}(\widehat{\mathbf{b}_1(\lambda)}) \\
\geq ( \frac{\langle k^2 \rangle}{\langle k \rangle} - \epsilon ) 
 \widehat{K_1(\lambda)} \tilde{\Theta}(\widehat{\mathbf{b}_1(\lambda)}) ,
\end{equation}
where
\begin{gather*}
 \widehat{\mathbf{b}_1}(\lambda)
=(\widehat{b_{11}}(\lambda), \widehat{b_{12}}(\lambda), \dots, 
\widehat{b_{1n}}(\lambda)), \quad 
\widehat{b_{1k}}(\lambda) = \int_0^\infty {\rm e}^{-\lambda t} b_{1k}(t) {\rm d}t, 
\quad k \in \mathbb{N}_n, \\
\tilde{\Theta}(\widehat{\mathbf{b}_1(\lambda)})
 = \frac{1}{\langle k \rangle} \sum_{l=1}^n lp(l)
 \int_0^\infty {\rm e}^{-\lambda t} b_{1l}(t) {\rm d}t.
\end{gather*}
It is easy to see from the positivity of the solution that 
$\tilde{\Theta} ( \widehat{\mathbf{b}_{1}(\lambda)} )  > 0$ for any 
$\mathbf{X}_0 \in \Omega_1$. Hence, dividing both sides of \eqref{b1kl} 
by $\tilde{\Theta} ( \widehat{\mathbf{b}_{1}(\lambda)} )$, we have
\[
1 \geq \Big( \frac{\langle k^2 \rangle}{\langle k \rangle} - \epsilon \Big)
 \widehat{K_1(\lambda)},
\]
which contradicts  \eqref{R10el}. This completes the proof.
\end{proof}

To prove the uniform strong $\rho$-persistence of system \eqref{eqn2.3'} 
for $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$, we will use \cite[Theorem 5.2]{ST} 
(see also \cite[Section 8]{M}). For this purpose, let us consider a total 
trajectory of semiflow $\Phi$, which is a function 
$\mathbf{Y}: \mathbb{R} \to \mathbb{X}$ such that 
$\mathbf{Y}(t+s)=\Phi(s,\mathbf{Y}(t))$ for all $t \in \mathbb{R}$ and 
$s \in \mathbb{R}_+$. In the total trajectory, we have
\begin{equation}\label{i_total}
i_{jk}(t,a) = i_{jk}(t-a, 0) \pi_j(a) = b_{jk}(t-a) \pi_j (a), \quad 
j \in \mathbb{M}, \; k \in \mathbb{N}_n,
\end{equation}
for all $t \in \mathbb{R}$ and $a \in \mathbb{R}_+$. 

\begin{lemma}\label{l:i}
For a total trajectory $\mathbf{Y}(\cdot)$, the following inequality holds.
\begin{equation}\label{e:i}
1-\sum_{j=1}^m \int_0^\infty i_{jk}(t,a){\rm d}a 
\geq \frac{\underline{\gamma}}{k \sum_{j=1}^m \bar{\beta}_j +\underline{\gamma}}, 
\quad t \in \mathbb{R}, \; k \in \mathbb{N}_n.
\end{equation}
\end{lemma}

\begin{proof}
For all $t \in \mathbb{R}$, we have
\begin{align*}
& \frac{{\rm d}}{{\rm d}t} 
 \Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a\Big] \\
&\geq -k \sum_{j=1}^m\Theta_j (\mathbf{i}_j(t,\cdot)) 
 \Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a\Big]  
 + \underline{\gamma} \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a  \\
& \geq \underline{\gamma} -\Big( k \sum_{j=1}^m \bar{\beta}_j + \underline{\gamma} \Big)
\Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a\Big], \quad
 k \in \mathbb{N}_n.
\end{align*}
Hence, for arbitrary fixed $r \in \mathbb{R}$ and for all $t > r$, we have
\begin{align*}
 1- \sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a
& \geq \Big[ 1- \sum_{j=1}^m \int_0^\infty i_{jk}(r,a) {\rm d}a \Big] 
{\rm e}^{-( k \sum_{j=1}^m \bar{\beta}_j + \underline{\gamma} )(t-r)}  \\
&\quad + \frac{\underline{\gamma}}{k \sum_{j=1}^m \bar{\beta}_j
  +\underline{\gamma}} [ 1-{\rm e}^{-( k \sum_{j=1}^m \bar{\beta}_j 
+ \underline{\gamma} )(t-r)} ], \quad k \in \mathbb{N}_n.
\end{align*}
Taking $r \to -\infty$, we obtain \eqref{e:i} for all $t \in \mathbb{R}$. 
This completes the proof.
\end{proof}

Now, we need the following additional assumption.

\begin{assumption}\label{assum5.1} \rm
For all $j \in \mathbb{M}$, there exists a nonempty interval 
$(a_j, \tilde{a}_j) \subset \mathbb{R}_+$ such that $\beta_j(a) > 0$ for all 
$a \in (a_j, \tilde{a}_j)$.
\end{assumption}

It is easy to see that above assumption holds if $\mathcal{ R}_{j0} > 0$ for all
 $j \in \mathbb{M}$. Under Assumption \ref{assum5.1}, we next prove the 
following lemma (see also \cite[Lemma 9.12]{ST} and \cite[Proposition 9]{M}).

\begin{lemma}\label{l:positivity}
Suppose that Assumption \ref{assum5.1} holds. 
For each fixed $j \in \mathbb{M}$, $b_{jk}(\cdot)$ for total trajectory 
$\mathbf{Y}(\cdot)$ is identically zero on $\mathbb{R}$ for all
 $k \in \mathbb{N}_n$, or it is strictly positive on $\mathbb{R}$ for all 
$k \in \mathbb{N}_n$.
\end{lemma}

\begin{proof}
Let us fix $j \in \mathbb{M}$ and let 
$\mathbf{b}_j(t)=( b_{j1}(t),b_{j2}(t),\dots, b_{jn}(t) )$. 
Suppose that there exists a $t_1 \in \mathbb{R}$ such that 
$\tilde{\Theta}(\mathbf{b}_j(t))=0$ for all $t \leq t_1$. 
Then, from the second equation in \eqref{eqn2.3'}, we have
\begin{equation}
\begin{aligned}
 b_{jk}(t) &= k \Big[ 1-\sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big]
 \frac{1}{\langle k \rangle} \sum_{l=1}^n l p(l) 
 \int_0^{\infty} \beta_j(a) b_{jl}(t-a) \pi_j(a) {\rm d}a  \\
&= k \Big[ 1-\sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big] 
 \int_0^{\infty} \beta_j(a) \pi_j(a) \tilde{\Theta}(\mathbf{b}_j(t-a)) {\rm d}a  \\
&= k \Big[ 1-\sum_{j=1}^m \int_0^\infty i_{jk}(t,a) {\rm d}a \Big]
  \int_0^{t-t_1} \beta_j(a) \pi_j(a) \tilde{\Theta}(\mathbf{b}_j(t-a)) {\rm d}a  \\
&\leq \bar{\beta}_j k \int_0^{t} \tilde{\Theta}(\mathbf{b}_j(t-a)) {\rm d}a\\
& =  \bar{\beta}_j k \int_0^{t} \tilde{\Theta}(\mathbf{b}_j(a)) {\rm d}a, \quad
 k \in \mathbb{N}_n,
\end{aligned} \label{bjk}
\end{equation}
for all $t > t_1$. Multiplying both sides by $kp(k)/\langle k \rangle$,  and summing 
on $k$ from $1$ to $n$, we have
\[
\tilde{\Theta}(\mathbf{b}_j(t)) \leq \bar{\beta}_j 
\frac{\langle k^2 \rangle}{\langle k \rangle} 
\int_0^t \tilde{\Theta}(\mathbf{b}_j(a)) {\rm d}a,
\]
for all $t > t_1$. From the Gronwall inequality, we see that 
$\tilde{\Theta}(\mathbf{b}_j(t)) = 0$ for all $t > t_1$. 
Thus, $\tilde{\Theta}(\mathbf{b}_j(t))=0$ for all $t \in \mathbb{R}$, 
and this implies that $b_{jk}(\cdot)$ is identically zero on $\mathbb{R}$ 
for all $k \in \mathbb{N}_n$.

Suppose that  does not exist $t_1 \in \mathbb{R}$ such that
 $\tilde{\Theta}(\mathbf{b}_j(t))=0$ for all $t \leq t_1$. 
Then, there exists a monotone decreasing sequence 
$\{ t_\ell \}_{\ell \in \mathbb{N}}$ towards $-\infty$ as
 $\ell \to +\infty$ such that $\tilde{\Theta}(\mathbf{b}_j(t_\ell))>0$ 
for all $\ell \in \mathbb{N}$. From the second equality in \eqref{bjk}
 and Lemma \ref{l:i}, we have
\begin{equation}\label{bjk_2}
b_{jk}(t) \geq \frac{k \underline{\gamma}}{k \sum_{j=1}^m \bar{\beta}_j 
+\underline{\gamma}} \int_0^{\infty} \beta_j(a)
 \tilde{\Theta}(\mathbf{i}_j(t,a)) {\rm d}a, \quad t \in \mathbb{R}, \;
 k \in \mathbb{N}_n.
\end{equation}
Multiplying both sides by $kp(k)/\langle k \rangle$,  and summing on $k$ 
from $1$ to $n$, we have
\begin{align*}
\tilde{\Theta}(\mathbf{b}_j(t)) 
&\geq \frac{\langle k^2 \rangle \underline{\gamma}}
 {\langle k \rangle(n\sum_{j=1}^m \bar{\beta}_j + \underline{\gamma} )} 
 \int_0^{\infty} \beta_j(a) \tilde{\Theta}(\mathbf{i}_j(t,a)) {\rm d}a  \\
&=\frac{\langle k^2 \rangle \underline{\gamma}}{\langle k \rangle
(n\sum_{j=1}^m \bar{\beta}_j + \underline{\gamma})} 
\int_0^t \beta_j(a) \pi_j(a) \tilde{\Theta}(\mathbf{b}_j(t-a)) {\rm d}a 
+ \hat{\Theta}(\mathbf{b}_j(t)), \quad t \in \mathbb{R},
\end{align*}
where
\[
\hat{\Theta}(\mathbf{b}_j(t)) = \frac{\langle k^2 \rangle
 \underline{\gamma}}{\langle k \rangle(n\sum_{j=1}^m \bar{\beta}_j 
+ \underline{\gamma})} \int_t^\infty \beta_j(a) \pi_j(a) 
\tilde{\Theta}(\mathbf{b}_j(t-a)) {\rm d}a, \quad t \in \mathbb{R}.
\]
Let $J_\ell (t) = \tilde{\Theta}(\mathbf{b}_j(t+t_\ell))$ and 
$\hat J_\ell (t) = \hat{\Theta}(\mathbf{b}_j(t+t_\ell))$ for 
$t \in \mathbb{R}$ and $\ell \in \mathbb{N}$. From the above inequality, we have
\[
J_\ell (t) \geq \frac{\langle k^2 \rangle \underline{\gamma}}{\langle k\rangle
 (n\sum_{j=1}^m \bar{\beta}_j + \underline{\gamma})}
 \int_0^t \beta_j(a) \pi_j(a) J_\ell (t-a) {\rm d}a + \hat J_\ell(t), \quad
 t \in \mathbb{R}, \; \ell \in \mathbb{R}.
\]
Since $\hat J_\ell(0)=\hat{\Theta}(\mathbf{b}_j(t_\ell))>0$ and 
$\hat J_\ell(\cdot)$ is continuous at $0$, it follows from 
\cite[Corollary B.6]{ST} that there exists a $c \geq 0$, which does not 
depend on $j$, such that $J_\ell(t) = \tilde{\Theta}(\mathbf{b}_j(t+t_\ell)) > 0$ 
for all $t > c$. That is, $\tilde{\Theta}(\mathbf{b}_j(t)) > 0$ for all 
$t > c + t_\ell$. Taking $\ell \to +\infty$, $t_\ell \to -\infty$ and hence, 
$\tilde{\Theta}(\mathbf{b}_j(t)) > 0$ for all $t \in \mathbb{R}$. 
This implies that $\tilde{\Theta}(\mathbf{b}_j(\cdot))$ is strictly positive 
on $\mathbb{R}$. From \eqref{bjk_2} and Assumption \ref{assum5.1},
 we see that $b_{jk}(\cdot)$ is strictly positive on $\mathbb{R}$ for all 
$k \in \mathbb{N}_n$. This completes the proof.
\end{proof}

From Propositions \ref{prop2.5} and \ref{prop3.4} and Lemma \ref{l:positivity}, 
we can apply \cite[Theorem 5.2]{ST} to prove the following proposition, which 
states the uniform strong $\rho$-persistence of system \eqref{eqn2.3'}.

\begin{proposition}\label{prop3.5}
If $\mathcal {R}_{0} = \mathcal{ R}_{10} >1$ and Assumption \ref{assum5.1} holds, 
then system \eqref{eqn2.3'} is uniformly strongly $\rho$-persistent in $\Omega_1$.
\end{proposition}

Let us define the function $\rho_1: \mathbb{X} \to \mathbb{R}_+$, 
which implies the force of infection by strain $1$:
\begin{equation}\label{rho_1}
\rho_1 (\Phi(t,\mathbf{X}_0)) = \Theta_1(\mathbf{i}_1(t,\cdot))
 = \frac{1}{\langle k \rangle} \sum_{l=1}^n l p(l) 
\int_0^\infty \beta_{1}(a) i_{1l}(t,a) {\rm d}a, \quad t \in \mathbb{R}.
\end{equation}
To construct a Lyapunov function below, we need the uniform strong 
$\rho_1$-persistence of system \eqref{eqn2.3'} in $\Omega_1$ for 
$\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$. To this end, we apply 
\cite[Corollary 4.22]{ST} to prove the following proposition.

\begin{proposition}\label{prop5.7}
If $\mathcal {R}_{0} = \mathcal{ R}_{10} >1$ and Assumption \ref{assum5.1} holds, 
then system \eqref{eqn2.3'} is uniformly strongly $\rho_1$-persistent in $\Omega_1$.
\end{proposition}

\begin{proof}
Let $\mathbf{Y}(\cdot)$ be a total trajectory with precompact range such that
\[
\inf_{t\in \mathbb{R}} \rho (\mathbf{Y}(t)) > 0 \quad\text{and} \quad
\mathbf{Y}(0)=\mathbf{X}_0 \in \Omega_1.
\]
We then have
\[
\rho_1(\mathbf{Y}(0)) = \Theta_1(\mathbf{i}_1(0,\cdot))
 = \frac{1}{\langle k \rangle} \sum_{l=1}^n l p(l) 
\int_0^\infty \beta_{1}(a) i_{1l}(0,a) {\rm d}a > 0.
\]
Hence, from \cite[Corollary 4.22]{ST}, there exists an $\epsilon_0 > 0$ such that
\[
\liminf_{t \to +\infty} \rho_1 (\Phi(t,\mathbf{X}_0)) \geq \epsilon_0,
\]
provided $\mathbf{X}_0 \in \Omega_1$. This implies that system \eqref{eqn2.3'} 
is uniformly strongly $\rho_1$-persistent in $\Omega_1$. This completes the proof.
\end{proof}

From \cite[Theorem 5.7]{ST} (see also \cite[Theorem 8.3]{M}), it follows that 
if $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$ and Assumption \ref{assum5.1} hold, 
then the compact attractor $\mathcal{ A}$ (see Proposition \ref{prop2.5}) 
includes a stable persistent attractor $\mathcal{ A}_1$. 
From Lemma \ref{l:i} and Proposition \ref{prop5.7}, we see that for a total 
trajectory $\mathbf{Y}(\cdot)$ in $\mathcal{ A}_1$, there exists an 
$\epsilon_0 > 0$ such that
\[
i_{1k}(t,0) = b_{1k}(t) \geq \frac{k \underline{\gamma}}{k \sum_{j=1}^m \bar{\beta}_j
 + \underline{\gamma}} \epsilon_0 = \epsilon_1 > 0, \quad t \in \mathbb{R}, \;
 k \in \mathbb{N}_n.
\]
Hence, from Lemma \ref{lemmm2.1} and Theorem \ref{thmm2.2}, we have
\begin{equation} \label{i_ineq}
0 < \frac{\epsilon_1}{i_{1k}^*(0)} \leq \frac{i_{1k}(t,a)}{i^*_{1k}(a)}
 = \frac{i_{1k}(t-a,0)}{i_{1k}^*(0)} 
\leq \frac{\bar{\beta}_1 k}{i_{1k}^*(0)} < \infty,
\end{equation}
for $t \in \mathbb{R}$, $ a \geq 0$, and $k\in \mathbb{N}_n$;
provided $\mathcal{ R}_0=\mathcal{ R}_{10} > 1$ and Assumption \ref{assum5.1} hold.
 By  \eqref{i_ineq}, we can construct the Lyapunov function $V_{1k}(\cdot)$, 
$k \in \mathbb{N}_n$ defined below.

To prove the global asymptotic stability of the strain $1$ dominant 
equilibrium $E_1^*$, we make the following additional assumption,
 which implies that the recovery rate is homogeneous.

\begin{assumption}\label{assum5.2} \rm
$\gamma_j(a) = \gamma > 0$ for all $a \geq 0$ and $j \in \mathbb{M}$.
\end{assumption}

For the sake of simplicity, we now consider the equations of 
$S_k$, $k \in \mathbb{N}_n$ in \eqref{eqn2.3}. Under Assumption \ref{assum5.2}, 
the equations of $S_k$, $k \in \mathbb{N}_n$ can be simplified as follows.
\begin{equation}\label{S_sol}
\frac{{\rm d}S_k(t)}{{\rm d}t}
 = \gamma - \Big[ k \sum_{j=1}^m \Theta_j (\mathbf{i}_j(t,\cdot)) + \gamma 
\Big] S_k(t), \quad k \in \mathbb{N}_n.
\end{equation}
Let $S_k^* = 1-\int_0^\infty i_{1k}^*(a){\rm d}a$ for all $k \in \mathbb{N}_n$. 
Then, under Assumption \ref{assum5.2}, it satisfies the  equation
\begin{equation}\label{S*}
0 = \gamma - \left[ k \Theta_1 (\mathbf{i}_1^*(\cdot)) 
+ \gamma \right] S_k^*, \quad k \in \mathbb{N}_n.
\end{equation}
We finally establish the following theorem on the global asymptotic 
stability of the strain $1$ dominant equilibrium $E_1^*$ for 
$\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$.

\begin{theorem}\label{thm2.2}
Suppose that Assumptions \ref{assum5.1} and \ref{assum5.2} hold. 
If $\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$ and 
$\mathcal{ R}_{10} > \mathcal{ R}_{j0}$ for all $j \in \mathbb{M} \setminus \{ 1 \}$, 
then the strain $1$ dominant equilibrium 
$E_1^*=( i_{11}^*(\cdot), i_{12}^*(\cdot), \dots,  i_{1n}^*(\cdot), 0, \dots 0 ) 
\in \Omega$ is globally asymptotically stable in $\Omega_1$.
\end{theorem}

\begin{proof}
Let $\mathbf{Y}(\cdot)$ be a total trajectory in $\mathcal{ A}_1$ and 
$S_k(\cdot) = 1- \sum_{j=1}^m \int_0^\infty i_{jk}(\cdot,a) {\rm d}a$,
 $k \in \mathbb{N}_n$, which satisfies \eqref{S_sol}. In what follows, 
for simplicity, we write $S_k(t)$ as $S_k$ and $i_{jk}(t,a)$ as $i_{jk}$ 
for all $j \in \mathbb{M}$ and $k \in \mathbb{N}_n$. 
We construct the  Lyapunov function
\[
V_{1k}(t)=S_{k}^*g( \frac{S_{k}}{S_{k}^*} )
+ k\frac{S_{k}^*}{\langle k \rangle} \sum_{l=1}^n lp(l) 
\int_0^\infty \alpha_{1l}(a) g\big( \frac{i_{1l}}{i_{1l}^*(a)} \big) {\rm d}a, \quad
 t \in \mathbb{R}, \; k \in \mathbb{N}_n,
\]
where $\alpha_{1l}(a) = \int_a^\infty \beta_1(\sigma) i_{1l}^*(\sigma) {\rm d}\sigma$, 
$a \geq 0$, $l \in \mathbb{N}_n$.
By Lemma \ref{l:i} and \eqref{i_ineq}, $V_{1k}(t)$ is bounded for all 
$t \in \mathbb{R}$ and $k \in \mathbb{N}_n$.
 Using \eqref{S_sol}-\eqref{S*}, the derivative of $V_{1k}(t)$ along 
the solution trajectory is calculated as
\begin{align}
 \frac{{\rm d}V_{1k}}{{\rm d}t} 
&=\Big( 1-\frac{S_{k}^*}{S_{k}}\Big) \frac{{\rm d} S_{k}}{{\rm d}t} 
 +k\frac{S_{k}^*}{\langle k \rangle} \sum_{l=1}^n lp(l) 
 \int_0^\infty\alpha_{1l}(a) \Big( \frac{1}{i_{1l}^*(a)}-\frac{1}{i_{1l}} \Big)
 \frac{\partial i_{1l}}{\partial t}{\rm d}a \nonumber \\
&= \Big( 1-\frac{S_{k}^*}{S_{k}}\Big) 
 \Big[ \gamma - ( k \sum_{j=1}^m \Theta_j (\mathbf{i}_j(t,\cdot)) + \gamma )
  S_k \Big]  \nonumber\\
&\quad + k\frac{S_{k}^*}{\langle k \rangle} \sum_{l=1}^n lp(l) 
 \int_0^\infty\alpha_{1l}(a) ( \frac{1}{i_{1l}^*(a)}-\frac{1}{i_{1l}} )
 \big[ -\frac{\partial i_{1l}}{\partial a} -\gamma i_{1l} \big] {\rm d}a  \nonumber\\
&= \Big( 1-\frac{S_{k}^*}{S_{k}}\Big) \gamma ( S_k^* - S_k ) 
 + ( 1-\frac{S_{k}^*}{S_{k}}) k \Big[ \Theta_1 (\mathbf{i}_1^*(\cdot)) S_k^* 
 - \sum_{j=1}^m \Theta_j (\mathbf{i}_j(t,\cdot)) S_k \Big]  \nonumber\\
&\quad + k\frac{S_{k}^*}{\langle k \rangle} \sum_{l=1}^n lp(l) 
 \int_0^\infty\alpha_{1l}(a) \big[ -\frac{\partial}{\partial a}
 g\big( \frac{i_{1l}}{i_{1l}^*(a)} \big) \big] {\rm d}a \\
&= \gamma S_k^* \Big( 2 -\frac{S_{k}^*}{S_{k}} - \frac{S_k}{S_k^*}\Big) 
 + k (S_k^* - S_k) \sum_{j=2}^m \Theta_j (\mathbf{i}_j(t,\cdot))  \nonumber\\
& \quad + k \frac{S_k^* }{\langle k \rangle} \sum_{l=1}^n l p(l) 
 \int_0^\infty \beta_1(a) i_{1l}^* (a)
\big[ 1- \frac{S_k^*}{S_k} -\frac{S_k i_{1l}}{S_k^* i_{1l}^*(a)}
  + \frac{i_{1l}}{i_{1l}^*(a)} \big] {\rm d}a  \nonumber\\
&\quad + k\frac{S_{k}^*}{\langle k \rangle } \sum_{l=1}^n lp(l) 
 \int_0^\infty \beta_{1}(a) i_{1l}^*(a) 
 \big[ g\big( \frac{i_{1l}(t,0)}{i_{1l}^*(0)} \big) 
 - g\big( \frac{i_{1l}}{i_{1l}^*(a)} \big) \big] {\rm d}a  \nonumber\\
&= \gamma S_k^* \Big( 2 -\frac{S_{k}^*}{S_{k}} - \frac{S_k}{S_k^*}\Big) 
 + k (S_k^* - S_k) \sum_{j=2}^m \Theta_j (\mathbf{i}_j(t,\cdot)) \\
&\quad + k \frac{S_k^* }{\langle k \rangle} \sum_{l=1}^n l p(l) 
 \int_0^\infty \beta_1(a) i_{1l}^* (a) \Big[ 1- \frac{S_k^*}{S_k} 
 -\frac{S_k i_{1l}}{S_k^* i_{1l}^*(a)} + \frac{i_{1l}(t,0)}{i_{1l}^*(0)}  \nonumber \\
& \quad  + \ln \frac{i_{1l} i_{1l}^*(0)}{i_{1l}(t,0) i_{1l}^*(a)}\Big] {\rm d}a, 
 \quad k \in \mathbb{N}_n. \label{eqn2.4}
\end{align}
Note that from \eqref{Pi} and \eqref{i*},
\begin{equation}\label{K}
\sum_{k=1}^n k^2 p(k) \frac{S_k^*}{\langle k \rangle} K_1 = 1
\end{equation}
and hence,
\begin{equation}
\begin{aligned}
& \sum_{k=1}^n k^2 p(k) \frac{S_k^*}{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a) 
 \frac{i_{1l}(t,0)}{i_{1l}^*(0)} {\rm d}a  \\
& =\sum_{k=1}^n k^2 p(k) \frac{S_k^*}{\langle k \rangle} 
 \sum_{l=1}^n l p(l) K_1 i_{1l}(t,0) \\
&= \sum_{l=1}^n l p(l) i_{1l}(t,0) = \sum_{k=1}^n k p(k) i_{1k}(t,0)  \\
&=\sum_{k=1}^n k^2 p(k) \frac{S_k^*}{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^*(a) 
 \frac{S_k i_{1l}}{S_k^* i_{1l}^*(a)} {\rm d}a.
\end{aligned} \label{Si}
\end{equation}
Let
\[
V_1(t) = \sum_{k=1}^n k p(k) V_{1k}(t).
\]
Then, from \eqref{eqn2.4} and \eqref{Si}, we have
\begin{equation}\label{V}
\begin{aligned}
&\frac{{\rm d}V_{1}}{{\rm d}t}\\
&= \gamma \sum_{k=1}^n k p(k) S_k^* 
 \Big( 2 -\frac{S_{k}^*}{S_{k}} - \frac{S_k}{S_k^*}\Big) 
 +\sum_{k=1}^n k^2 p(k) (S_k^* - S_k) \sum_{j=2}^m \Theta_j (\mathbf{i}_j(t,\cdot)) \\
&\quad + \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
\big[ 1- \frac{S_k^*}{S_k} + \ln \frac{i_{1l} i_{1l}^*(0)}{i_{1l}(t,0) 
 i_{1l}^* (a)}\big] {\rm d}a.
\end{aligned}
\end{equation}
For the last term in the right-hand side of \eqref{V}, we have again 
from \eqref{K} that
\begin{align}
& \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle}
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
\big[ 1- \frac{S_k^*}{S_k} + \ln \frac{i_{1l} i_{1l}^*(0)}{i_{1l}(t,0) i_{1l}^*(a)}
 \big] {\rm d}a   \nonumber\\
&=  \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
 \big[2 - \frac{i_{1l}^*(0)}{i_{1l}^*(0)} - \frac{S_k^*}{S_k} 
 + \ln \frac{i_{1l} i_{1l}^*(0)}{i_{1l}(t,0) i_{1l}^* (a)}\big] {\rm d}a   \nonumber\\
& = \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^*(a)
 \big[ 2 - \frac{S_k^*}{S_k} + \ln \frac{i_{1l} i_{1l}^*(0)}{i_{1l}(t,0)
 i_{1l}^* (a)}\big] {\rm d}a  \nonumber \\
&\quad - \sum_{l=1}^n l p(l) i_{1l}^*(0)   \nonumber\\
&= \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} \sum_{l=1}^n l p(l) 
 \int_0^\infty \beta_1(a) i_{1l}^* (a)
\big[ 2 - \frac{S_k^*}{S_k} + \ln \frac{i_{1l} i_{1l}^*(0)}{i_{1l}(t,0) i_{1l}^* (a)}
 \big] {\rm d}a   \nonumber\\
&\quad - \sum_{k=1}^n k p(k) i_{1k}(t,0) \frac{i_{1k}^*(0)}{i_{1k}(t,0)}  \nonumber \\
& = \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
 \Big[ 2 - \frac{S_k^*}{S_k} - \frac{S_k i_{1k}^*(0)i_{1l}}{S_k^* i_{1k}(t,0) 
 i_{1l}^*(a)}   \nonumber \\
&\quad  + \ln \frac{i_{1l} i_{1l}^*(0)}{i_{1l}(t,0) i_{1l}^* (a)}\Big] {\rm d}a
  \nonumber  \\
&= \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
 \Big[ - g( \frac{S_k^*}{S_k} ) - g( \frac{S_k i_{1k}^*(0)i_{1l}}{S_k^* i_{1k}(t,0) 
 i_{1l}^*(a) } )   \nonumber \\
&\quad  + \ln \frac{i_{1k}(t,0)}{i_{1k}^*(0)}\frac{i_{1l}^*(0)}{i_{1l}(t,0)}\Big] 
 {\rm d}a  \nonumber \\
&= \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
\big[ - g( \frac{S_k^*}{S_k} ) - g( \frac{S_k i_{1k}^*(0)i_{1l}}{S_k^*
  i_{1k}(t,0) i_{1l}^* (a)} ) \big] {\rm d}a  \nonumber \\
& \quad + \sum_{k=1}^n \sum_{l=1}^n k^2 p(k) 
 \frac{S_k^*}{\langle k \rangle} l p(l) K_1 i_{1l}^*(0) 
 \Big( \ln \frac{i_{1k}(t,0)}{i_{1k}^*(0)}
- \ln \frac{i_{1l}(t,0)}{i_{1l}^*(0)}\Big). \label{V2} 
\end{align}
For the last term in the right-hand side of \eqref{V2}, we have
\begin{align}
&\sum_{k=1}^n \sum_{l=1}^n k^2 p(k) 
 \frac{S_k^*}{\langle k \rangle} l p(l) K_1 i_{1l}^*(0) 
\Big( \ln \frac{i_{1k}(t,0)}{i_{1k}^*(0)}- \ln \frac{i_{1l}(t,0)}{i_{1l}^*(0)}\Big)
\nonumber  \\
& = \sum_{k=1}^n \sum_{l=1}^n k^2 p(k) \frac{S_k^*}{\langle k \rangle} l p(l) K_1
l \frac{S_l^*}{\langle k \rangle} \sum_{r=1}^n rp(r) K_1 i_{1r}^*(0)
\Big( \ln \frac{i_{1k}(t,0)}{i_{1k}^*(0)}- \ln \frac{i_{1l}(t,0)}{i_{1l}^*(0)}\Big)  
\nonumber\\
&= \sum_{k=1}^n \sum_{l=1}^n v_{kl} 
\Big(\ln \frac{i_{1k}(t,0)}{i_{1k}^*(0)}- \ln \frac{i_{1l}(t,0)}{i_{1l}^*(0)}\Big), 
\label{V3}
\end{align}
where
\[
v_{kl} = k^2 l^2 p(k) p(l) \frac{S_k^* S_l^*}{\langle k \rangle^2} K_1^2 
\sum_{r=1}^n r p(r) i_{1r}^*(0), \quad k,l \in \mathbb{N}_n.
\]
Since $v_{kl}$, $k,l \in \mathbb{N}_n$ is symmetric, the right-hand side 
of \eqref{V3} is equal to zero. Hence, combining \eqref{V} and \eqref{V2}, 
we obtain
\begin{equation}\label{V4}
\begin{aligned}
 \frac{{\rm d}V_{1}}{{\rm d}t}
&= \gamma \sum_{k=1}^n k p(k) S_k^* \Big( 2 -\frac{S_{k}^*}{S_{k}} 
 - \frac{S_k}{S_k^*}\Big) +\sum_{k=1}^n k^2 p(k) (S_k^* - S_k) \sum_{j=2}^m \Theta_j 
 (\mathbf{i}_j(t,\cdot)) \\
&\quad + \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
\Big[ - g\Big( \frac{S_k^*}{S_k} \Big)  \\
&\quad - g\Big(\frac{S_k i_{1k}^*(0)i_{1l}}{S_k^* i_{1k}(t,0) i_{1l}^*(a)} \Big) 
 \Big] {\rm d}a.
\end{aligned}
\end{equation}
It is easy to see from the arithmetic-geometric mean and the positivity of 
$g(\cdot)$ that the first and third terms in the right-hand side of \eqref{V4} 
are nonpositive. To evaluate the second term, we define the following Lyapunov 
functions for strain $j \in \mathbb{M} \setminus \{ 1 \}$.
\[
V_{j}(t)=\frac{1}{ K_j}\sum_{l=1}^n kp(k) 
\int_0^\infty\alpha_j(a)i_{jk}(t,a) {\rm d}a, \quad j \in \mathbb{M} 
\setminus \{ 1 \},
\]
where $\alpha_j(a)$ is defined as in \eqref{eqnml3.1}. 
Based on the proof of Theorem \ref{thmzz3.1}, we obtain
\begin{equation}\label{eqnml3.2}
\frac{{\rm d}V_j}{{\rm d}t}
= \sum_{k=1}^n k^2 p(k) S_k \Theta_j(\mathbf{i}_j(t,\cdot)) 
- \frac{\langle k \rangle}{K_j} \Theta_j(\mathbf{i}_j(t,\cdot)), \quad 
j \in \mathbb{M} \setminus \{ 1 \}.
\end{equation}
Hence, from \eqref{K} and \eqref{V4}, we obtain the derivative of the Lyapunov 
function $V(t)=\sum_{j=1}^m V_j(t)$ as follows,
\begin{align*}
&\frac{{\rm d}V}{{\rm d}t}\\
&= \sum_{j=1}^m\frac{{\rm d}V_j}{{\rm d}t}\\
&=\gamma \sum_{k=1}^n k p(k) S_k^* 
 \Big( 2 -\frac{S_{k}^*}{S_{k}} - \frac{S_k}{S_k^*}\Big) 
 - \sum_{j=2}^m \frac{\langle k \rangle}{K_j} \Theta_j(\mathbf{i}_j(t,\cdot))\\
&\quad +\sum_{k=1}^n k^2 p(k) (S_k^* - S_k) \sum_{j=2}^m 
 \Theta_j (\mathbf{i}_j(t,\cdot)) 
 + \sum_{j=2}^m \sum_{k=1}^n k^2 p(k) S_k \Theta_j(\mathbf{i}_j(t,\cdot))\\
&\quad + \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^* (a)
\Big[ - g\Big( \frac{S_k^*}{S_k} \Big) 
 - g\Big( \frac{S_k i_{1k}^*(0)i_{1l}}{S_k^* i_{1k}(t,0) i_{1l}^*(a)} \Big) \Big]
  {\rm d}a \\
&=\gamma \sum_{k=1}^n k p(k) S_k^* ( 2 -\frac{S_{k}^*}{S_{k}}
 - \frac{S_k}{S_k^*}) +\langle k \rangle \sum_{j=2}^m ( \frac{1}{K_1} 
 - \frac{1}{K_j} ) \Theta_j(\mathbf{i}_j(t,\cdot))\\
&\quad + \sum_{k=1}^n k^2 p(k) \frac{S_k^* }{\langle k \rangle} 
 \sum_{l=1}^n l p(l) \int_0^\infty \beta_1(a) i_{1l}^*(a)
\Big[ - g\Big( \frac{S_k^*}{S_k} \Big) 
 - g\Big( \frac{S_k i_{1k}^*(0)i_{1l}}{S_k^* i_{1k}(t,0) i_{1l}^*(a)} \Big) \Big]
  {\rm d}a.
\end{align*}
Since $\mathcal{ R}_{10} > \mathcal{ R}_{j0}$ for all 
$j \in \mathbb{M} \setminus \{ 1 \}$, we have $K_1 > K_j$ for all 
$j \in \mathbb{M} \setminus \{ 1 \}$. Hence, we see that $V' \leq 0$, 
and thus, the alpha limit set of $\mathbf{Y}(\cdot)$ must be contained in 
$\tilde{\mathcal{ M}}$, which is the largest invariant subset of
 $\{ (i_{jk})_{(j,k) \in \mathbb{M} \times \mathbb{N}_n} \in \Omega_1  : V' = 0 \}$. 
The equality $V' =0$ holds if and only if
\begin{equation}
\begin{gathered}
S_k(t)=S_{k}^* \quad \forall   k \in \mathbb{N}_n,  \\
\frac{i_{1l}(t,a)}{i^*_{1l}(a)}=\frac{i_{1l}(t-a,0)}{i^*_{1l}(0)}
=\frac{i_{1k}(t,0)}{i^*_{1k}(0)} \quad \forall k,l \in \mathbb{N}_n, \\
i_{jk}(t,a)=0 \quad \forall  j \in \mathbb{M} 
\setminus \{ 1 \},  k \in \mathbb{N}_n.
\end{gathered}
\end{equation}
Thus, we can conclude that $\tilde{\mathcal{ M}} = \{ E_1^* \}$. 
Since $V(\cdot)$ is non-increasing, $0 \leq V(\mathbf{Y}(t)) $ $\leq V(E_1^*) = 0$ 
for all $t \in \mathbb{R}$. This implies that $\mathcal{ A}_1 = \{ E_1^* \}$, 
and therefore, the strain 1 dominant equilibrium $E_1^*$ is globally 
asymptotically stable. This completes the proof.
\end{proof}

Note that the discussion in this section still holds even if we assume 
that the reproduction number $\mathcal{ R}_{j0}$ for any strain 
$j \in \mathbb{M} \setminus \{ 1 \}$ is the largest. In conclusion, 
Theorem \ref{thm2.2} implies that the competitive exclusion can occur 
in our general model on complex networks in a sense that only one strain 
with the largest reproduction number survives.

\section{Numerical simulation}\label{sec:num}

\subsection{Two-strain case}\label{ssec:two}
In this section, we perform numerical simulation to illustrate our theoretical
 results. We first consider the two-strain case, that is, 
$\mathbb{M}=\{ 1, 2 \}$. We assume that the maximum degree of the network 
is $15$, that is, $\mathbb{N}_n = \mathbb{N}_{15} = \{ 1,2,\dots, 15 \}$. 
In this case, $p(k)=N_k/N=1/15$. We fix the following parameters.
\begin{equation}\label{para}
\gamma_1(a)=\gamma_2(a)=\gamma=2, \quad
\beta_1(a) = \beta_1 ( 1+\sin a ), \quad
\beta_2(a) = \beta_2 {\rm e}^{-a}, \quad a \geq 0,
\end{equation}
where $\beta_1$ and $\beta_2$ are positive constants. 
Note that these parameters satisfy Assumptions \ref{assum2.1}, \ref{assum5.1} 
and \ref{assum5.2}. In the numerical simulation, we assume that there exists 
a maximum age $a_\dagger=10$. This choice seems reasonable as the survival
 probability at $a_\dagger$ is almost zero 
(${\rm e}^{-\gamma a_\dagger} = {\rm e}^{-20} \approx 2.0612 \times 10^{-9}$). 
Let us define the total number of nodes infected by strain $1$ and $2$ by
\begin{equation}\label{I}
I_{1k}(t) =\int_0^{a_\dagger} i_{1k}(t,a) {\rm d}a ,\quad
I_{2k}(t) =\int_0^{a_\dagger} i_{2k}(t,a) {\rm d}a, \quad 
t \geq 0, \; k \in \mathbb{N}_{15},
\end{equation}
respectively. The initial condition is chosen as, for $a \in [0,a_\dagger]$,
\[
I_{1k}(0) = \frac{X}{2}, \quad
I_{2k}(0)=\frac{X}{2}, \quad
i_{1k}(0,a)= \frac{I_{1k}(0)}{a_\dagger}, \quad
i_{2k}(0,a) = \frac{I_{2k}(0)}{a_\dagger}, \quad
 k \in \mathbb{N}_{15},
\]
where $X \in (0,1)$ denotes the uniform random variable.

First, we set $\beta_1=0.13$ and $\beta_2=0.27$. 
In this case, we have $\mathcal{R}_{10}\approx 0.9403 < 1$ and 
$\mathcal{R}_{20} \approx 0.9299< 1$, and hence,
 $\mathcal R_0=\mathcal R_{10}<1$. We see from Theorem \ref{thmzz3.1}
 that the disease-free equilibrium $E_0$ is globally asymptotically stable. 
In fact, Figure \ref{fig1} (a) shows that both of the numbers of nodes
infected by strain $1$ ($I_{1k}(t)$, $k \in \mathbb{N}_{15}$) and strain 
$2$ ($I_{2k}(t)$, $k \in \mathbb{N}_{15}$) converge to zero as time evolves.

\begin{figure}[ht]
\begin{center}
\includegraphics[width=.4\textwidth]{fig1a} \\(a) \\
\includegraphics[width=.4\textwidth]{fig1b} \quad 
 \includegraphics[width=.4\textwidth]{fig1c} \\
(b) \hfil (c)
\end{center}
\caption{Time variation of $I_{1k}(t)$ (red) and $I_{2k}(t)$ (blue), 
$k \in \mathbb{N}_{15}$. (a) $\beta_1=0.13$ and $\beta_2=0.27$ 
($\mathcal{R}_0 = \mathcal{R}_{10} \approx 0.9403 < 1$). 
(b) $\beta_1=0.17$ and $\beta_2=0.34$ 
($\mathcal{R}_0=\mathcal{R}_{10} \approx 1.2296 > 
\mathcal{R}_{20} \approx 1.1709> 1$). (c) $\beta_1=0.16$ and 
$\beta_2=0.36$ ($\mathcal{R}_0=\mathcal{R}_{20} 
\approx 1.2398 > \mathcal{R}_{10} \approx 1.1573 > 1$).}
 \label{fig1}
\end{figure}

Second, we set $\beta_1=0.17$ and $\beta_2=0.34$. In this case, we have 
$\mathcal{R}_{10}\approx 1.2296 > 1$ and $\mathcal{R}_{20} \approx 1.1709 > 1$, 
and hence, $\mathcal R_0=\mathcal R_{10}>1$. We see from Theorem \ref{thm2.2} 
that the strain $1$ dominant equilibrium $E_1^*$ is globally asymptotically 
stable. In fact, Figure \ref{fig1} (b) shows that the numbers of nodes infected
by strain $1$ converge to positive values as time evolves, whereas the numbers 
of nodes infected by strain $2$ converge to zero as time evolves.

Finally, we set $\beta_1=0.16$ and $\beta_2=0.36$. In this case, we have 
$\mathcal{R}_{10}\approx 1.1573 > 1$ and $\mathcal{R}_{20} \approx 1.2398 > 1$, 
and hence, $\mathcal{R}_0 = \mathcal{R}_{20} > 1$. From Theorem \ref{thm2.2} 
and the last argument in Section \ref{sec:dom}, we see that the strain $2$ 
dominant equilibrium is globally asymptotically stable in this case. In fact,
 Figure \ref{fig1} (c) shows that the numbers of nodes infected by strain 
$1$ converge to zero as time evolves, whereas the numbers of nodes infected 
by strain $2$ converge to positive values as time evolves.

In conclusion, all examples in Figure \ref{fig1} illustrate our theoretical 
results, and the competitive exclusion occurs in Figure \ref{fig1} (b)-(c).

\subsection{Three-strain case}\label{ssec:three}

We next consider the three-strain case; that is, $\mathbb{M}=\{ 1,2,3 \}$. 
We assume that the maximum degree of the network is $10$, that is, 
$\mathbb{N}_n = \mathbb{N}_{10} = \{ 1,2,\dots, 10 \}$. 
In this case, $p(k)=N_k/N=1/10$. In addition to \eqref{para} and \eqref{I}, 
we fix the following parameters.
\begin{gather*}
\gamma_3(a)=\gamma=2, \quad \beta_3(a) = \beta_3 \frac{a}{1+a}, \quad a \geq 0,  \\
I_{3k}(t) := \int_0^{a_\dagger} i_{3k}(t,a) {\rm d}a, \quad t \geq 0, \;
 k \in \mathbb{N}_{10},
\end{gather*}
where $\beta_3$ is a positive constant, and $a_\dagger$ is fixed to be $10$ 
as in Section \ref{ssec:two}. Note that $\gamma_3(a)$ and $\beta_3(a)$ 
satisfy Assumptions \ref{assum2.1}, \ref{assum5.1} and \ref{assum5.2}. 
The initial condition is chosen as follows.
\begin{gather*}
 I_{1k}(0)=\frac{X}{3}, \quad I_{2k}(0)=\frac{X}{3}, \quad
I_{3k}(0)=\frac{X}{3}, \quad k \in \mathbb{N}_{10},  \\
i_{1k}(0,a)= \frac{I_{1k}(0)}{a_\dagger}, \quad 
i_{2k}(0,a)= \frac{I_{2k}(0)}{a_\dagger}, \quad
i_{3k}(0,a)= \frac{I_{3k}(0)}{a_\dagger}, \quad a \in [0,a_\dagger], \;
 k \in \mathbb{N}_{10}.
\end{gather*}

First, we set $\beta_1=0.2$, $\beta_2=0.41$ and $\beta_3=0.97$. 
In this case, we have $\mathcal{R}_{10} \approx 0.9799 < 1$, 
$\mathcal{R}_{20} \approx 0.9565 < 1$ and $\mathcal{R}_{30} \approx 0.9416 < 1$, 
and hence, $\mathcal{R}_0 = \mathcal{R}_{10} < 1$. From Theorem \ref{thmzz3.1}, 
we see that the disease-free equilibrium $E_0$ is globally asymptotically stable 
in this case. In fact, Figure \ref{fig2} (a) shows that all of the numbers of 
infected nodes converge to zero as time evolves.

\begin{figure}[ht]
\begin{center}
\includegraphics[width=.4\textwidth]{fig2a} \quad
\includegraphics[width=.4\textwidth]{fig2b} \\
(a) \hfil (b) \\
 \includegraphics[width=.4\textwidth]{fig2c} \quad
 \includegraphics[width=.4\textwidth]{fig2d} \\
(c) \hfil (d)
\end{center}
\caption{Time variation of $I_{1k}(t)$ (red), $I_{2k}(t)$ (blue) and 
$I_{3k}(t)$ (green), $k \in \mathbb{N}_{10}$. (a) $\beta_1=0.2$, 
$\beta_2=0.41$ and $\beta_3=0.97$ ($\mathcal{R}_0 
= \mathcal{R}_{10} \approx 0.9799 < 1$). (b) $\beta_1=0.25$, 
$\beta_2=0.5$ and $\beta_3=1.2$ ($\mathcal{R}_0=\mathcal{R}_{10} 
\approx 1.2249 > \mathcal{R}_{20} \approx 1.1665 > \mathcal{R}_{30} 
\approx 1.1648> 1$). (c) $\beta_1=0.24$, $\beta_2=0.52$ and 
$\beta_3=1.2$ ($\mathcal{R}_0=\mathcal{R}_{20} \approx 1.2132 
> \mathcal{R}_{10} \approx 1.1759> \mathcal{R}_{30} \approx 1.1648 > 1$).
 (d) $\beta_1=0.24$, $\beta_2=0.5$ and $\beta_3=1.3$ 
($\mathcal{R}_0=\mathcal{R}_{30} \approx 1.2619> \mathcal{R}_{10} \approx 1.1759> 
\mathcal{R}_{20} \approx 1.1165> 1$).}  \label{fig2}
\end{figure}

Second, we set $\beta_1=0.25$, $\beta_2=0.5$ and $\beta_3=1.2$. 
In this case, we have $\mathcal{R}_{10} \approx 1.2249 > 1$, 
$\mathcal{R}_{20} \approx 1.1665 > 1$ and $\mathcal{R}_{30} \approx 1.1648 > 1$, 
and hence, $\mathcal{R}_0 = \mathcal{R}_{10} > 1$. From Theorem \ref{thm2.2},
 we see that the strain $1$ dominant equilibrium $E_1^*$ is globally 
asymptotically stable in this case. In fact, Figure \ref{fig2} (b) shows 
that the numbers of nodes infected by strain $1$ converges to positive 
values as time evolves, whereas the numbers of nodes infected by other 
strains converge to zero as time evolves.

Third, we set $\beta_1=0.24$, $\beta_2=0.52$ and $\beta_3=1.2$. 
In this case, we have $\mathcal{R}_{10} \approx 1.1759 > 1$, 
$\mathcal{R}_{20} \approx 1.2132 > 1$ and $\mathcal{R}_{30} \approx 1.1648 > 1$, 
and hence, $\mathcal{R}_0=\mathcal{R}_{20} > 1$. From Theorem \ref{thm2.2} 
and the last argument in Section \ref{sec:dom}, we see that the strain $2$ 
dominant equilibrium is globally asymptotically stable in this case. 
In fact, Figure \ref{fig2} (c) shows that the numbers of nodes infected 
by strain $2$ converge to positive values as time evolves, whereas the numbers 
of nodes infected by other strains converge to zero as time evolves.

Finally, we set $\beta_1=0.24$, $\beta_2=0.5$ and $\beta_3=1.3$. 
In this case, we have $\mathcal{R}_{10} \approx 1.1759 > 1$, 
$\mathcal{R}_{20} \approx 1.1665 > 1$ and $\mathcal{R}_{30} \approx 1.2619 > 1$, 
and hence, $\mathcal{R}_0 = \mathcal{R}_{30} > 1$. From Theorem \ref{thm2.2} 
and the last argument in Section \ref{sec:dom}, we see that the strain $3$ 
dominant equilibrium is globally asymptotically stable in this case. 
In fact, Figure \ref{fig2} (d) shows that the numbers of nodes infected by 
strain $3$ converge to positive values as time evolves, whereas the numbers 
of nodes infected by other strains converge to zero as time evolves.
In conclusion, all examples in Figure \ref{fig2} illustrate our theoretical 
results, and the competitive exclusion occurs in Figure \ref{fig2} (b)-(d).

\section{Discussion}\label{sec:dis}

In this paper, we have constructed an infection age-structured multi-strain 
SIS epidemic model \eqref{eqn2.3} on complex networks. We have defined the 
reproduction numbers $\mathcal{ R}_{j0}$ for each strain $j \in \mathbb{M}$ by 
using the classical theory of renewal equations, and defined the basic 
reproduction number $\mathcal{ R}_0$ for the whole system by the maximum 
$\mathcal{ R}_0 = \max \{ \mathcal{ R}_{j0} \}_{j \in \mathbb{M}} 
= \max \{ \mathcal{ R}_{10}, \mathcal{ R}_{20}, \dots, \mathcal{ R}_{m0} \}$ 
of them. We have proved the asymptotic smoothness of solution semiflow $\Phi$ 
(see Proposition \ref{prop:as}) and the existence of a compact attractor 
$\mathcal{ A}$ (see Proposition \ref{prop2.5}), which are needed for the global 
stability analysis in Sections \ref{sec:dfe} and \ref{sec:dom}. 
We have proved that if $\mathcal{ R}_0 < 1$, then the disease-free equilibrium 
$E_0 = (0,0,\dots,0) \in \Omega$ of system \eqref{eqn2.3'} is globally 
asymptotically stable (see Theorem \ref{thmzz3.1}), whereas if 
$\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$, then the strain $1$ dominant 
equilibrium $E_1^* = (i_{11}^*(\cdot),i_{12}^*(\cdot), \dots, i_{1n}^*(\cdot), 
0, \dots, 0) \in \Omega$ exists (see Theorem \ref{thmm2.2}). 
Moreover, under the additional assumption that the recovery rate is homogeneous 
(see Assumption \ref{assum5.2}), we have proved that if 
$\mathcal{ R}_0 = \mathcal{ R}_{10} > 1$, then the strain $1$ 
dominant equilibrium $E_1^*$ is globally asymptotically stable 
(see Theorem \ref{thm2.2}). For the proof, we have constructed the Lyapunov 
function $V_{1k}(\cdot)$, $k \in \mathbb{N}_n$, which is bounded by virtue 
of the uniform $\rho_1$-persistence of system \eqref{eqn2.3'} 
(see Proposition \ref{prop5.7}).

Since the discussion in Section \ref{sec:dom} still holds even if we assume 
that $\mathcal{R}_0 = \mathcal{ R}_{j0} > 1$ for any strain 
$j \in \mathbb{M} \setminus \{ 1\}$, our theoretical results imply that the 
competitive exclusion can occur in our model in the sense that only one strain 
with the largest reproduction number survives. Numerical examples in 
Section \ref{sec:num} have supported this statement for the cases of two-strain 
(see Figure \ref{fig1}) and three strain (see Figure \ref{fig2}). 
From our theoretical results, we can conjecture that the complex network 
structure and the infection age structure may not essentially affect the 
occurrence of the competitive exclusion in multi-strain epidemic models. 
However, we have needed the additional assumption to prove Theorem \ref{thm2.2} 
that the recovery rate is homogeneous, and it will be left as an open problem 
that whether the competitive exclusion can still occur even when the recovery 
rate is given by general function $\gamma_j(a)$ for all $a \geq 0$ and 
$j \in \mathbb{M}$.

From previous studies, we can conjecture that mechanisms such as mutation \cite{WF}, 
reinfection \cite{MP} and superinfection \cite{WSL,IML} can lead to the 
coexistence of multiple strains in our model. As they will make the model
 more difficult to analyze, these topics will also be left as open problems, 
which are important from both of the mathematical and biological points of view.

\subsection*{Acknowledgments} 
The authors would like to thank the handling editor and the anonymous reviewer 
for their careful reading of our manuscript. 
J. Yang was supported by the National Natural Science Foundation 
(No. 61573016, No. 61203228) and by the Shanxi Scholarship Council of China 
(No. 2015094). 
T. Kuniya was supported by Grant-in-Aid for Young Scientists (B) of 
Japan Society for the Promotion of Science (No. 15K17585) and by the program 
of the Japan Initiative for Global Research Network on Infectious Diseases 
(J-GRID), from Japan Agency for Medical Research and Development, AMED.

\begin{thebibliography}{99}

\bibitem{AM} R. M. Anderson, R. M. May;
\emph{Infectious diseases of humans: dynamics and control},
Oxford University Press, Oxford, 1991.

\bibitem{BT} H. J. Bremermann, H. R. Thieme;
\emph{A competitive exclusion principle for pathogen virulence},
J. Math. Biol., 27 (1989), 179--190.

\bibitem{CLM} L. Cai, X. Z. Li, M. Martcheva;
\emph{Competitive exclusion in a vector-host epidemic model},
J. Biol. Dynam., 7 (2013), 47--67.

\bibitem{BHG} B. D. Dalziel, K. Huang, J. L. Geoghhegan, N. Arinaminpathy, 
E.J. Dubovi, B. T. Grenfell, S. P. Ellner, E. C. Holmes, C. R. Parrish;
\emph{Contact heterogeneity, rather than transmission efficacy, 
limits the emergency and spread of canine influenza virus}, 
Plos Pathog., 10 (2014). e1004455. https://doi.org/10.1371/journal.ppat.1004455

\bibitem{DLM} Y. X. Dang, X. Z. Li, M. Martcheva;
\emph{Competitive exclusion in a multi-strain immuno-epidemiological flu 
model with environment transmission}, J. Biol. Dynam., 10 (2016), 416--456.

\bibitem{DHM} O. Diekmann, J. A. P. Heesterbeek, J. A. J. Metz;
\emph{On the definition and the computation of the basic reproduction ratio 
$R_0$ in models for infectious diseases in heterogeneous populations},
J. Math. Biol., 28 (1990), 365--382.

\bibitem{DW} P. van den Driessche, J. Watmough;
\emph{Reproduction numbers and sub-threshold endemic equilibria for compartmental 
models of disease transmission}, Math. Biosci., 180 (2002), 29--48.

\bibitem{EK} M. Eichner, K. Dietz;
\emph{Transmission potential of smallpox: estimates based on detailed data from 
an outbreak}, Am. J. Epidemiol., 158 (2003), 110--117.

\bibitem{GFG} G. F. Gause;
\emph{The struggle for existence}, Williams and Wilkins, Baltimore, 1936.

\bibitem{JKH} J. K. Hale;
\emph{Asymptotic behavior of dissipative systems}, AMS, Providence, 1988.

\bibitem{IML} M. Iannelli; M. Martcheva; X. Z. Li;
\emph{Strain replacement in an epidemic model with super-infection and perfect 
vaccination}, Math. Biosci., 195 (2005), 23--46.

\bibitem{IM} M. Iannelli, F. Milner;
\emph{The basic approach to age-structured population dynamics: 
models, methods and numerics}, Dordrecht, The Netherlands, 2017.

\bibitem{HI2} H. Inaba;
\emph{Age-structured population dynamics in demography and epidemiology},
Springer, Singapore, 2017.

\bibitem{KMS} I. Z. Kiss, J. C. Miller, P. L. Simon;
\emph{Mathematics of epidemics on networks: from exact to approximate models},
Springer International Publishing AG, 2017.

\bibitem{MM} M. Martcheva;
\emph{An introduction to mathematical epidemiology}, Springer, New York, 2015.

\bibitem{ML} M. Martcheva, X. Z. Li;
\emph{Competitive exclusion in an infection-age structured model with environmental 
transmission}, J. Math. Anal. Appl., 408 (2013), 225--246.

\bibitem{MP} M. Martcheva, S. S. Pilyugin;
\emph{An epidemic model structured by host immunity},
J. Biol. Syst., 14 (2006), 185--203.

\bibitem{M} C. C. McCluskey;
\emph{Global stability for an SEI epidemiological model with continuous age-structure 
in the exposed and infectious classes},
Math. Biosci. Eng., 9 (2012), 819--841.

\bibitem{NE} H. Nishiura, M. Eichner;
\emph{Infectiousness of smallpox relative to disease age: estimates based on 
transmission network and incubation period},
Epidemiol. Infect., 135 (2007), 1145--1150.

\bibitem{PV1} R. Pastor-Satorras, A. Vespignani;
\emph{Epidemic spreading in scale-free networks},
Phys. Rev. Lett., 86 (2001), 3200--3203.

\bibitem{ST} H. L. Smith, H. R. Thieme;
\emph{Dynamical systems and population persistence},
AMS, Providence, 2011.

\bibitem{JAW} J. A. Walker;
\emph{Dynamical systems and evolution equations},
Plenum Press, New York and London, 1980.

\bibitem{GFW} G. F. Webb;
\emph{Theory of nonlinear age-dependent population dynamics},
Marcel Dekker, New York and Basel, 1985.

\bibitem{WF} Q. C. Wu, X. C. Fu;
\emph{Dynamics of competing strains with saturated infectivity and mutation 
on networks}, J. Biol. Syst., 24 (2016), 1--17.

\bibitem{WFY} Q. C. Wu, X. C. Fu, M. Yang;
\emph{Epidemic thresholds in a heterogenous population with competing strains},
Chin. Phys. B, 20 (2011), 046401.

\bibitem{WSL} Q. C. Wu, M. Small, H. Liu;
\emph{Superinfection behaviors on scale-free networks with competing strains},
J. Nonlinear. Sci., 23 (2013), 113--127.

\bibitem{YCX} J. Y. Yang, Y. M. Chen, F. Xu;
\emph{Effect of infection age on an SIS epidemic model on complex networks},
J. Math. Biol., 73 (2016), 1227--1249.

\end{thebibliography}

\end{document}

