\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2011 (2011), No. 159, pp. 1--14.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2011 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2011/159\hfil Second-order differential inclusions]
{Stability of second-order differential inclusions}

\author[H. Gonz\'alez\hfil EJDE-2011/159\hfilneg]
{Henry Gonz\'alez}

\address{Henry Gonz\'alez \newline
Faculty of light industry and environmental protection engineering, 
Obuda University,
1034 Budapest, B\'ecsi \'ut 96/B, Hungary}
\email{gonzalez.henry@rkk.uni-obuda.hu}

\thanks{Submitted January 31, 2011. Published November 28, 2011.}
\subjclass[2000]{93D09, 34A60}
\keywords{Robust stability; stability radius; differential inclusions}

\begin{abstract}
 For an arbitrary second-order stable matrix $A$, we calculate
 the maximum positive value $R$ for which the differential inclusion
 $$
 \dot{x}\in F_{R}(x):=\{(A+\Delta)x, \Delta \in \mathbb{R}^{2\times 2},
 \|\Delta \| \leq R \}
 $$
 is asymptotically stable.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\allowdisplaybreaks

\section{Introduction}\label{sec.int}

Let $A$ be a second-order stable matrix
(all the eigenvalues of $A$ have negative real part)
and $R$ be a positive real number. For each vector $x$ in the
plane we consider the set of vectors
\begin{equation}
F_{R}(x):=\{(A+\Delta)x: \Delta \in \mathbb{R}^{2\times 2},\;
\|\Delta \| \leq R \} ,  \label{set}
\end{equation}
where $\|\cdot \|$ denotes the operator norm of a matrix.
The objective of  this work is to study the global asymptotical
stability (g.a.s.) of the parameter-dependent differential
inclusion
\begin{equation}
\dot{x} \in F_{R}(x) . \label{inclusion}
\end{equation}
The main task is computing  the number
\begin{eqnarray}
R_{i} (A)=\inf \{ R>0: \dot{x} \in F_{R}(x)
\text{ is not g.a.s.}\}. \label{incstabradius}
\end{eqnarray}
This number  is closely related to the robustness
of stability of the linear system $\dot{x} =Ax$,
 under unstructured real time-varying and nonlinear perturbations.
As in \cite{Hin1} we consider the perturbed systems of the following
types:
\begin{equation}
\begin{gathered}
\Sigma_{\Delta}:\quad \dot{x}(t)=Ax(t)+\Delta x(t)\\
\Sigma_{N} :\quad \dot{x}(t)=Ax(t)+N(x(t))\\
\Sigma_{\Delta (t)} :\quad \dot{x}(t)=Ax(t)+\Delta (t) x(t)\\
\Sigma_{N(t)} :\quad \dot{x}(t)=Ax(t)+N(x(t),t),
\end{gathered} \label{perturbedsystems}
\end{equation}
where
\begin{itemize}
\item $\Delta \in \mathbb{R}^{2\times 2}$;

\item $N:\mathbb{R}^2 \to \mathbb{R}^2$, $N(0)=0$, $N$ is
differentiable at $0$, is locally Lipschitz and
there exists $\gamma \geq 0$ such that $\| N(x)\| \leq \gamma \| x \|$
for all $x\in \mathbb{R}^2$;

\item $\Delta (\cdot) \in L^{\infty}(R_+ , R^{2\times 2})$;

\item $N(\cdot,\cdot):\mathbb{R}^2 \times \Re_{+}\to \mathbb{R}^2$,
$N(0,t)=0$ for all $t\in \Re_{+}, N(x,t)$ is locally Lipschitz
in $x$ continuous in t and there exists $\gamma \geq 0$ such that
$\| N(x,t)\| \leq \gamma \| x \|$ for all $x\in \mathbb{R}^2, t\in
\Re_{+}$.
\end{itemize}
The corresponding sets of perturbations are denoted by
$\mathbb{R}^{2\times 2}, P_n (\mathbb{R}), P_t (\mathbb{R})$,
$P_{nt} (\mathbb{R})$ respectively. As perturbation norms we choose
\begin{itemize}
\item $\| \Delta \|$ is the operator norm of the matrix;

\item $\| N\|_n =\inf \{ \gamma > 0; \forall x\in \mathbb{R}^2:
\| N(x)\| \leq \gamma \| x \| \}$, $N\in P_n (\mathbb{R})$;

\item $\| \Delta \|_t =\operatorname{ess\,sup}_{t\in \Re_+}
\| \Delta (t) \|$, $\Delta \in P_t (\mathbb{R})$;

\item $\| N\|_{nt} =\inf \{ \gamma > 0; \forall t\in \Re_+ \;
\forall x\in \mathbb{R}^2:
\| N(x,t)\| \leq \gamma \| x \| \}$, $N\in P_{nt} (\mathbb{R})$.
\end{itemize}
Following \cite{Hin1} (also  \cite{Hin2,Hin3}), we define the
 radii of stability for $A$ with respect to the considered
perturbations classes:
\begin{equation}
\begin{gathered}
R(A)=\inf \{ \| \Delta \|; \Delta \in \mathbb{R}^{2\times 2} ,
\Sigma_{\Delta} \text{ is not  g.a.s.} \}\\
R_n (A)=\inf \{ \| N \|; N \in P_n (\mathbb{R}), \Sigma_{N}
\text{ is not g.a.s.} \}\\
R_t (A)=\inf \{ \| \Delta \|_t; \Delta \in P_t (\mathbb{R}),
\Sigma_{\Delta} \text{ is not  g.a.s.} \}\\
R_{nt} (A)=\inf \{ \| N \|; N \in P_{nt} (\mathbb{R}),
\Sigma_{N} \text{ is not  g.a.s.} \}
\end{gathered}
\end{equation}
For the defined stability radii in \cite{Hin1} it has been shown that
\begin{eqnarray}
R(A)\geq R_{n} (A) \geq R_{t} (A) \geq R_{nt} (A) . \label{radineq}
\end{eqnarray}
In \cite{HinMot} it is proved that
\begin{equation}
R(A)=\min \big\{ \underline{\sigma}(A),-\frac{1}{2} \operatorname{tr}(A) \big\},
\label{rtimeinvariant}
\end{equation}
where $\underline{\sigma}(A)$ is the smallest singular value
and $\operatorname{tr}(A)$ is the trace of the matrix $A$.

In section \ref{sec.FilAppl}, we show that $R_{nt} (A)\geq R_i(A)$,
so that based on this fact, \eqref{rtimeinvariant}
and \eqref{radineq} we can restrict the analysis of the asymptotical
stability of differential inclusion \eqref{set}-\eqref{inclusion}
for $R<R(A)=\min\{-\operatorname{tr}(A)/2, \underline{\sigma}(A)\}$.
In section \ref{sec.FilAppl},  we prove that differential inclusion
\eqref{set}-\eqref{inclusion} comes to be unstable throughout
a minimum norm perturbation of the class $P_n (\Re )$,
from what follows that
\begin{equation}
R(A)\geq R_{n} (A) = R_{t} (A) = R_{nt} (A) = R_{i}(A). \label{igualdadderadios}
\end{equation}

The organization of the paper is as follows.
In section \ref{sec.Fil} we enunciate a Filippov's Theorem \cite{F}
about the asymptotical stability of differential inclusions,
which will helps us in the fundamentation of the results.
In section \ref{sec.FilAppl} we apply this theorem and obtain
conditions for the stability of our differential inclusion
\eqref{set}-\eqref{inclusion} in terms of two elliptic integrals
and we prove the relations \eqref{igualdadderadios}.
In section \ref{calculo de integrales} we reduce the elliptic
integrals to elementary functions and the complete elliptic
integral of the third kind and in the section \ref{general}
we give a caracterization of the equality
$R_i(A)=R(A)$ which simplifies the calculation of the number $R_i(A)$.
In the last section we give examples which show
the applicability of the main results to the computation of $R_i(A)$
for arbitrary stable matrix $A$.
The results of this work are a continuation of the paper \cite{U},
where the real time-varying stability radius
of second-order linear systems is calculated taken as the
perturbation norm the Frobenius norm of a matrix.

\section{A Filippov's theorem}\label{sec.Fil}

In this section we enunciate a Filippov's Theorem \cite{F},
which will be the fundamental tool in the analysis of the stability
of differential inclusion \eqref{inclusiontheta}.
Let
\begin{equation}
\dot{x} \in F(x) ,\quad  x \in \mathbb{R}^2 \label{inclusionF}
\end{equation}
be a differential inclusion which satisfies the following properties:
\begin{itemize}
\item[(i)] For all $x$ the set $F(x)$ is non empty, bounded,
 closed and convex;
\item[(ii)] $F(x)$ is upper semi-continuous with respect to the
 set's inclusion as function of $x$;

\item[(iii)] $F(c x)=c F(x)$ for all $x$ and $c \geq 0$.

\end{itemize}
Let $\rho, \varphi $ be the polar coordinates of the point
$x=(x_1 ,x_2)$, then we can write $F(x)= \rho \widetilde F (\varphi)$
and differential inclusion \eqref{inclusion} takes the form
\begin{gather*}
\frac{\dot{\rho} (t)}{\rho}=y_1(t) \\
\dot{\varphi} (t)=y_2(t),
\end{gather*}
where $(y_1(t),y_2(t)) \in \widetilde {F} (\varphi (t))$.

We will use the notation
\begin{gather*}
\widetilde {F}^{+} (\varphi):=\{(y_1 ,y_2)
\in \widetilde {F} (\varphi): y_2 >0 \}, \\
\widetilde {F}^{-} (\varphi):=\{(y_1 ,y_2) \in \widetilde {F} (\varphi): y_2 <0 \}.
\end{gather*}
For $\varphi$ such that $\widetilde {F}^{+} (\varphi)\ne \phi$,
(respect. and $\widetilde {F}^{-} (\varphi)\ne \phi$),
we put
\begin{equation}
K^{+}(\varphi):=\sup_{(y_1 ,y_2)\in \widetilde {F}^{+}
(\varphi)} \frac{y_1}{\|y_2 \|},\quad
\big(\text{respect. }
K^{-}(\varphi):=\sup_{(y_1 ,y_2)\in \widetilde {F}^{-} (\varphi)}
\frac{y_1}{\|y_2 \|} \Big). \label{sup}
\end{equation}
By Filippov's Theorem, differential inclusion \eqref{inclusionF}
satisfying the conditions (i)-(iii) is asymptotically
stable if and only if for all $x \ne 0$ the set $F(x)$ does not
have common points with the ray $c x, 0 \leq c< +\infty$
and when the set $\widetilde {F}^{+} (\varphi)$
(respect. $\widetilde {F}^{-} (\varphi)$) for almost all $\varphi$ is
not empty, the inequality
\[
\int_{0}^{2 \pi} K^{+}(\varphi) d \varphi <0 \quad
\Big(\text{respect. } \int_{0}^{2 \pi} K^{+}(\varphi) d \varphi <0
\Big)
\]
 holds.

\section{Application of the Filippov's theorem}\label{sec.FilAppl}

From Definition \eqref{set} we have that for all $R>0$,
 the set $F_R (x)$ for all $x\in \mathbb{R}^2$ is non empty, bounded,
closed and convex in the plane, and  $F_R (x)$ is linear with
respect to $x$. So differential inclusion
\eqref{set}-\eqref{inclusion} satisfies
properties (i)-(iii) and  Filippov's Theorem can be applied.

The following lemma allows us to write the set $F_R (x)$ in the
form we will use it in the application of the Filippov's
theorem.

\begin{lemma} \label{lem1}
For all $R>0$ and $x\in \mathbb{R}^2$ it holds that
\[
\big\{ \Delta x,\Delta \in \mathbb{R}^{2\times 2},
\| \Delta \| \leq R \big\}=
\big\{ r \| x \|  \begin{pmatrix}\cos \theta \\ \sin \theta
\end{pmatrix}  : 0 \leq r \leq R; 0 \leq \theta < 2 \pi \big\}.
\]
\end{lemma}

\begin{proof}
 Let $z=\Delta x, \Delta \in \mathbb{R}^{2\times 2},\| \Delta \| \leq R$
then
$\|z\|=\|\Delta x\| \leq R\|x\|$.
Thus exist $r$: $0\leq r\leq R$, and $\theta \in [0,2\pi)$ such that
$z=r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta
\end{pmatrix}$ so that we obtained that
$z\in  \{ r \| x \| \begin{pmatrix} \cos\theta \\ \sin \theta
\end{pmatrix}: 0 \leq r \leq R; 0 \leq \theta < 2 \pi \}$.

Let now $z=r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta
\end{pmatrix}$, $0 \leq r \leq R; 0 \leq \theta < 2 \pi$
then there exists $\widetilde \Delta \in \mathbb{R}^{2\times 2}$
such that $\widetilde \Delta x=r\|x\|
\begin{pmatrix} \cos\theta \\ \sin \theta \end{pmatrix}$
so $\| \widetilde \Delta x\|\leq R\|x\|$ and from the well
known theorem of Hahn-Banach
$\widetilde \Delta \in \mathbb{R}^{2\times 2}$ may be chosen such that
$\| \widetilde \Delta \| \leq R$. So we have:
$z=r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta
\end{pmatrix} \in  \{ \Delta x,\Delta \in \mathbb{R}^{2\times 2},
\| \Delta \| \leq R \}$.
\end{proof}

As a direct consequence of this lemma,
 the inclusion \eqref{set}-\eqref{inclusion} can be written in the form
\begin{equation}
\dot{x} \in \big\{ Ax+r\|x\|
\begin{pmatrix} \cos\theta \\  \sin \theta
\end{pmatrix} : 0 \leq r \leq R; 0 \leq \theta < 2 \pi\big \}
=F_R (x). \label{inclusiontheta}
\end{equation}
Changing in \eqref{inclusiontheta} to polar coordinates,
\begin{gather*}
\frac{\dot{\rho} (t)}{\rho}=y_1(t)  \\
\dot{\varphi} (t)=y_2(t),
(y_1 (t),y_2 (t))\in \widetilde {F}_{R} (\varphi)
\end{gather*}
\begin{gather*}
\widetilde {F}_{R} (\varphi)
:=\big\{(y_1 (\varphi, \theta, r),
 y_2 (\varphi, \theta, r)), 0 \leq r \leq R; 0 \leq \theta \leq 2 \pi
\big\} \\
y_1 (\varphi, \theta, r):=f_1(\varphi)+r cos(\theta -\varphi) \\
y_2 (\varphi, \theta, r):=f_2(\varphi)+r sin(\theta -\varphi),
\end{gather*}
where
\begin{gather}
f_1(\varphi):=a_{11} \cos^2 (\varphi)+(a_{12} +a_{21}) \sin(\varphi)
 \cos(\varphi)+a_{22} \sin^2 (\varphi), \label{f1} \\
f_2(\varphi):=a_{21} \cos^2 (\varphi)+(a_{22} -a_{11}) \sin(\varphi)
\cos(\varphi)-a_{12} \sin^2 (\varphi). \label{f2}
\end{gather}
Using trigonometrical identities we have:
\begin{gather}
f_1(\varphi)=m_1 +n \sin 2(\varphi - \chi), \label{f12}\\
f_2(\varphi)=m_2 +n \cos 2(\varphi - \chi), \label{f22}
\end{gather}
where
\begin{eqnarray}
m_1=\frac{a_{11}+a_{22}}{2}, \quad
m_2=\frac{a_{21}-a_{12}}{2}, \quad
n=\sqrt{(\frac{a_{11}-a_{22}}{2})^2+
(\frac{a_{12}+a_{21}}{2})^2} \label{m1m2n}
\end{eqnarray}
and
$$ \cos 2(\chi )=\frac{a_{12}+a_{21}}{2n}, \quad
\sin 2(\chi )=-\frac{a_{11}-a_{22}}{2n}.
$$
From expressions \eqref{f12}, \eqref{f22} it follows that:
\[
\min \{ f_2 (\varphi ), \varphi \in [0, 2\pi ) \}= m_2-n,  \quad
\max \{ f_2 (\varphi ), \varphi \in [0, 2\pi ) \}= m_2+n
\]
For the corresponding sets $\widetilde {F}^{+}(\varphi)$, and
$\widetilde {F}^{-}(\varphi)$ that appears in  Filippov's
theorem, we have
\begin{gather*}
\widetilde {F}^{+}_{R}(\varphi)=\{(y_1 ,y_2)
\in \widetilde {F}_R (\varphi): y_2 >0 \}, \\
\widetilde {F}^{-}_{R}(\varphi)=\{(y_1 ,y_2)
\in \widetilde {F}_R (\varphi): y_2 <0 \}.
\end{gather*}
Denote
\begin{equation}
\begin{gathered}
R^+ (A):=-\min \{0,\min f_2 (\varphi) \}=\max \{ 0,n-m_2 \},\\
R^- (A):=\max \{0,\max f_2 (\varphi) \}=\max \{ 0,n+m_2 \}.
\end{gathered} \label{rplusminus}
\end{equation}

\begin{lemma}\label{boundofR}
Let $R<R(A)$. Then
\begin{itemize}
\item[(a)] The set $F_R(x)$ does not have common points with the
 ray $c x, 0 \leq c< +\infty$ for all $x \ne 0$.

\item[(b)] The set $\widetilde {F}^{+}_{R}(\varphi) \ne \phi$
 for all $\varphi \in [0,2 \pi)$ if and only if $R\in (R^+ (A),R(A))$.

\item[(c)] The set $\widetilde {F}^{-}_{R}(\varphi) \ne \phi$ for
all $\varphi \in [0,2 \pi)$ if and only if $R\in (R^- (A),R(A))$.
\end{itemize}
\end{lemma}

\begin{proof}
(a) The set $F_R(x):=\{(A+\Delta)x, \Delta \in
\mathbb{R}^{2\times 2}, \|\Delta \| \leq R \}$, with $R<R(A)$ does not
have common points with the ray $c x, 0 \leq c< +\infty$ for all
$x \ne 0$ because the matrix $A+\Delta$ is stable
for $\| \Delta \|<R(A)$.

(b) $\widetilde {F}^{+}_{R}(\varphi) \ne \phi$ for all
$\varphi \in [0,2 \pi) $ if and only if for all
$\varphi \in [0,2 \pi) $  there is $\theta \in [0,2 \pi)$ such that
$f_2(\varphi)+r sin(\theta -\varphi)>0$ and this is true if and
only if for all $\varphi \in [0,2 \pi)$ is $f_2 (\varphi)+r>0$
and so if and only if either $f_2(\varphi)\geq 0$ for all
$\varphi \in [0,2 \pi)$ or
$r>-min \{ f_2 (\varphi), \varphi \in [0,2\pi) \}$ condition
equivalent with the assertion  (b) of this lemma.

(c) $\widetilde {F}^{-}_{R}(\varphi) \ne \phi$ for all
$\varphi \in [0,2 \pi) $ if and only if for all $\varphi \in [0,2 \pi)$
there is $\theta \in [0,2 \pi)$ such that
$f_2(\varphi)+r sin(\theta -\varphi)<0$ and this is true if and
only if for all $\varphi \in [0,2 \pi)$ is $f_2 (\varphi)-r<0$ and
so if and only if either $f_2(\varphi)\leq 0$ for all
$\varphi \in [0,2 \pi)$ or
$r>max \{ f_2 (\varphi), \varphi \in [0,2\pi) \}$ condition equivalent
with the assertion (c) of this lemma.
\end{proof}


We denote
\begin{equation}
K(\theta,\varphi,r)
:=\frac{f_1(\varphi)+r \cos(\theta -\varphi)}
{f_2(\varphi)+r \sin(\theta -\varphi)},\label{K}
\end{equation}
then for $R\in (R^+ (A),R(A))$  the function $K^{+}(\varphi)$
that appears in  Filippov's theorem can be written as
\begin{equation}
K^{+}_{R}(\varphi)=\sup_{(r, \theta)\in [0,R]\times [0, 2 \pi)}
\{ K(\theta,\varphi,r):f_2(\varphi)+r \sin(\theta -\varphi) >0\}.
\label{krplus1}
\end{equation}
Similarly for $R\in (R^- (A),R(A))$ the function $K^{-}(\varphi)$
 can be written as
\begin{equation}
K^{-}_{R}(\varphi)=\sup_{(r, \theta)\in [0,R]\times [0, 2 \pi)}
\{ -K(\theta,\varphi,r):f_2(\varphi)+r sin(\theta -\varphi) <0\}.\label{krminus1}
\end{equation}

\begin{lemma}\label{Kfunctions}
(a) For $R\in (R^+ (A),R(A))$ we have
\begin{equation}
K^{+}_{R}(\varphi)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi)
+ f_2 ^2 (\varphi)-R^2}+R f_2(\varphi)}{f_2(\varphi)
\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-R^2}-R f_1(\varphi)}.
\label{Kplus}
\end{equation}
(b) For $R\in (R^- (A),R(A))$ we have:
\begin{equation}
K^{-}_{R}(\varphi)=\frac{f_1(\varphi)\sqrt{f_1 ^2
(\varphi)+ f_2 ^2 (\varphi)-R^2}-R f_2(\varphi)}{-f_2(\varphi)
\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-R^2}-R f_1(\varphi)}.
\label{Kmin}
\end{equation}
\end{lemma}

\begin{proof}
 First for arbitrary $R\in (R^+ (A),R(A))$ we prove \eqref{Kplus}.
Let given $\varphi \in [0,2\pi)$ and $r\in [0,R]$
and let $\theta_0 \in [0,2\pi)$ be such that
$y_2 (\theta_0,\varphi,r)=0$. Then
$y_1(\theta_0,\varphi,r)<0$  and so the limit of $K(\theta,\varphi,r)$
for $\theta \to \theta_0$ and
$y_2(\theta,\varphi,r)>0$ is $-\infty$ and therefore for the
calculation of the supremum in \eqref{krplus1} we can consider
only points in the interior of the set $y_2 (\theta,\varphi,r)>0$.
So the supremum is taken for a value $\theta$ for which the partial
derivative of $K(\theta,\varphi,r)$ with respect to $\theta$ is zero.
From this condition after simplifications we obtain
\begin{equation}
f_2(\varphi)\sin(\theta-\varphi)+f_1(\varphi)\cos(\theta-\varphi)+r=0,
\end{equation}
and solving this equation for  $\sin(\theta-\varphi)$ and
$\cos(\theta-\varphi)$,
\begin{gather}
\sin(\theta-\varphi)=\frac{-rf_2(\varphi)}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)}
\mp \frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi)
 + f_2 ^2 (\varphi)-r^2}}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)}, \label{sintheta}\\
\cos(\theta-\varphi)=\frac{-rf_1(\varphi)}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)}
\pm \frac{f_2(\varphi)\sqrt{f_1 ^2 (\varphi)
 + f_2 ^2 (\varphi)-r^2}}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)}. \label{costheta}
\end{gather}
Substituting in the expression \eqref{K} of $K(\theta,\varphi,r)$
we obtain
\begin{equation}
K(\varphi,r)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi)
 + f_2 ^2 (\varphi)-r^2} \pm rf_2(\varphi)}
{f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2}
\mp rf_1(\varphi)}. \label{Kplusmin}
\end{equation}
When the following inequalities hold:
${f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2
(\varphi)-r^2}+rf_1(\varphi)}>0$ and
${f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2
(\varphi)-r^2}-rf_1(\varphi)}>0$, from the two possible signs
in \eqref{Kplusmin}
by direct comparison we have that the maximum value of $K(\varphi,r)$
is
\begin{equation}
K(\varphi,r)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi)
 + f_2 ^2 (\varphi)-r^2}+rf_2(\varphi)}
{f_2(\varphi)\sqrt{f_1 ^2 (\varphi)
 + f_2 ^2 (\varphi)-r^2}-rf_1(\varphi)},   \label{K+}
\end{equation}
and so taken into account that, according with \eqref{krplus1},
the function  \eqref{K+} is a monotone increasing
function in $r$ we have the assertion \eqref{Kplus} of the lemma.
When one of the numbers
$$
f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2}
+rf_1(\varphi),\quad
f_2(\varphi)\sqrt{f_1 ^2 (\varphi)
+ f_2 ^2 (\varphi)-r^2}-rf_1(\varphi)
$$
is positive and the other negative then we have for the maximum of $K(\varphi,r)$:
\begin{equation}
K(\varphi,r)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi)
+ f_2 ^2 (\varphi)-r^2} -rf_2(\varphi) (sign f_1(\varphi))}
{f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2}
+r |{f_1(\varphi)}|},  \label{Kplusother}
\end{equation}
but in this case we have
\begin{align*}
&\Big( f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2}
 +rf_1(\varphi)\Big)
\Big( f_2(\varphi)\sqrt{f_1 ^2 (\varphi)
+ f_2 ^2 (\varphi)-r^2}
-rf_1(\varphi)\Big)\\
&=( f_2 ^2 (\varphi)-r^2 ) ( f_1 ^2 (\varphi)+ f_2 ^2 (\varphi))<0,
\end{align*}
and so $(f_2 (\varphi)-r)(f_2 (\varphi)+r)<0$ from what follows
that there exists ${\widetilde r}\in (0,R)$ such that
$(f_2 (\varphi)+{\widetilde r})=0$ or
$(f_2 (\varphi)-{\widetilde r})=0$. We consider only the first case,
because in the same form
can be analyzed the second case. Then for
$\theta=\varphi+\frac{\pi}{2}$ we have
$(f_1 (\varphi)+{\widetilde r}\cos(\theta-\varphi),f_2(\varphi)
+{\widetilde r}\sin(\theta-\varphi))
=(f_1(\varphi),f_2 (\varphi)+{\widetilde r})=(f_1(\varphi),0)
\in \widetilde {F}_{R}(\varphi)$
with $R<R(A)$ and so according with the assertion a) of
Lemma \ref{boundofR} we have that $f_1(\varphi)<0$. But then
the expression \eqref{Kplusother} coincide with \eqref{K+} and
again we have the validity of \eqref{Kplus}. So we have proved
the assertion a) of the lemma. The assertion b) follows
from \eqref{krminus1}
and the results obtained in the proof of the part a).
\end{proof}

\begin{theorem}\label{aplicacionFilippov}
The differential inclusion \eqref{inclusiontheta} depending of
the parameter $R$ is asymptotically stable if and only if
$R\in [0,R(A))$ and when $R\in (R^+ (A),R(A))$ (respect.
$R\in (R^- (A),R(A))$) the following inequality holds:
\begin{equation}
I^+(R):=\int_0^{2\pi}{\frac{f_1(\varphi)\sqrt{f_1 ^2
(\varphi)+ f_2 ^2 (\varphi)-R^2}+R f_2(\varphi)}
{f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-R^2}
-R f_1(\varphi)}} d\varphi <0.
\end{equation}
respectively,
\begin{equation}
I^-(R):=\int_0^{2\pi}{\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-R^2}-R f_2(\varphi)}
{-f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2
(\varphi)-R^2}-R f_1(\varphi)}} d\varphi <0.
\end{equation}
\end{theorem}

The assertion of the above theorem follows directly as a consequence
of  Filippov's Theorem and the Lemmas \eqref{boundofR},
\eqref{Kfunctions}.


\noindent\textbf{Remark.}
For $R \in (R^+ (A),R(A))$ and arbitrary vector $x$ in the plane,
using the expressions \eqref{sintheta}  \eqref{costheta} we denote
\begin{gather}
v_1^+ (x) :=\frac{-R f_2(\varphi(x))}{f_1 ^2 (\varphi(x))
+ f_2 ^2 (\varphi(x))}
- \frac{f_1(\varphi(x))\sqrt{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))-R^2}}{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))}, \label{v1+}\\
v_2^+ (x):=\frac{-R f_1(\varphi(x))}{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))}+ \frac{f_2(\varphi(x))\sqrt{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))-R^2}}{f_1 ^2 (\varphi(x))+ f_2 ^2 (\varphi(x))},
 \label{v2+}
\end{gather}
where $\varphi(x)$ is the angle between the vector $x$ and the first
axis of the original coordinate system.
Calculating $\sin \theta$ and $\cos \theta$ from equalities:
$\cos(\theta-\varphi(x))=v_1^+ (x)$,
$\sin(\theta-\varphi(x))=v_2^+ (x)$ and substituting its in the
expression \eqref{inclusiontheta} we obtain a second-order non
linear but homogeneous system which solutions are solutions of
differential inclusion \eqref{inclusiontheta}:
\begin{equation}
\dot{x}= Ax+R
\begin{pmatrix}
v_{1}^+(x)&-v_{2}^+(x)\\
v_{2}^+(x)&v_{1}^+(x)
\end{pmatrix} x.\label{extremalsystem+}
\end{equation}
This  system has as trajectories spirals which turn around the
origin in positive sense and the value of the
integral $I^+(R)$ is the Ljapunov exponent of the solutions of
this system(note that the homogenity of the system and the
rotations of the solutions around the origin implies that all
solution of the sytem have the same Ljapunov exponent).
So the condition $I^+(R)<0$ is true if and only if the
system \eqref{extremalsystem+} is asymptotically stable.
We will name the system \eqref{extremalsystem+} the positive
extremal system of differential inclusion \eqref{inclusiontheta}.
For all stable matrix $A\in \mathbb{R}^2$ the positive extremal
system is the perturbation of the nominal linear system $\dot{x}=Ax$
with the nonlinear perturbation
\[
N_R^+ (A,x):=R
\begin{pmatrix}
v_{1}^+(x)&-v_{2}^+(x\\
v_{2}^+(x)&v_{1}^+(x)
\end{pmatrix} x.
\]
Note that the perturbation  $N_R^+ (A,x)$ is of the class
$P_n (\mathbb{R})$ defined in the introduction of this work,
and that according with \eqref{v1+}, \eqref{v2+} and
\eqref{sintheta}, \eqref{costheta} for all $x$ the matrix
$\begin{pmatrix}
v_{1}^+(x)&-v_{2}^+(x\\
v_{2}^+(x)&v_{1}^+(x)
\end{pmatrix}$
is an orthonormal matrix, from what follows that the perturbation
$N_R^+ (A,x)$ has norm equal $R$.

Similarly For $R \in (R^- (A),R(A))$, an arbitrary vector $x$
in the plane, we denote
\begin{gather*}
v_1^- (x) :=\frac{R f_2(\varphi(x))}{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))}
 - \frac{f_1(\varphi(x))\sqrt{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))-R^2}}{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))}, \\
v_2^- (x):=\frac{R f_1(\varphi(x))}{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))}
 + \frac{f_2(\varphi(x))\sqrt{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))-R^2}}{f_1 ^2 (\varphi(x))
 + f_2 ^2 (\varphi(x))},
\end{gather*}
where $\varphi(x)$ is defined as above.
Then we obtain a second-order non linear but homogeneous system
which solutions are solutions of  differential inclusion
\eqref{inclusiontheta}:
\begin{equation}
\dot{x}= Ax+R
\begin{pmatrix}
v_{1}^-(x)&-v_{2}^-(x)\\
v_{2}^-(x)&v_{1}^-(x)
\end{pmatrix} x.\label{extremalsystem-}
\end{equation}
This system  has as trajectories spirals which turn around the origin
in negative sense and the value of the integral $I^-(R)$
is the Ljapunov exponent of the solutions of this system.
So the condition $I^-(R)<0$ is true if and only if the
system \eqref{extremalsystem-} is asymptotically stable.
We will name system \eqref{extremalsystem-} the negative extremal
system of differential inclusion \eqref{inclusiontheta}.
For all stable matrix $A\in \mathbb{R}^2$ the negative extremal
system is the perturbation of the nominal
linear system $\dot{x}=Ax$ with the nonlinear perturbation of
the class $P_n (\mathbb{R})$ which norm is $R$,
\[
N_R^- (A,x):=R
\begin{pmatrix}
v_{1}^-(x)&-v_{2}^-(x\\
v_{2}^-(x)&v_{1}^-(x)
\end{pmatrix} x.
\]

\begin{lemma}
For an arbitrary stable $A\in \mathbb{R}^{2\times 2}$ matrix we have
\begin{equation}
R(A)\geq R_{n} (A) = R_{t} (A) = R_{nt} (A) = R_{i}(A). \label{radiiresult}
\end{equation}
\end{lemma}

\begin{proof}
Let $N(x,t)\in P_{nt}(\mathbb{R})$, $\|N(x,t)\|_{nt}=R_0$.
Then for all $t\in \Re$, $x\in \mathbb{R}^2$
$N(x,t)=r(t)\|x\| \begin{pmatrix}\cos \theta (t)\\ \sin \theta (t)
\end{pmatrix}$ for suitable $0\leq r(t)\leq R_0$,
$0\leq \theta(t) <2\pi$, and so all solution of the perturbed system
$\dot{x}=Ax+N(x,t)$ is a solution of differential
inclusion \eqref{inclusiontheta} with $R=R_0$, from what follows
that
\begin{equation}
R_{nt}(A)\geq R_i(A). \label{RARiA}
\end{equation}
In the case $R_i(A)=R(A)$ from the inequalities \eqref{radineq}
and \eqref{RARiA} follows that all the considered stability
radii are equals and then the assertion of the lemma is true.

When $R_i(A)<R(A)$ from the remark to
 Theorem \ref{aplicacionFilippov}, there exists $N_{R_i (A)} (A,x)$
nonlinear perturbation of the class $P_n(\mathbb{R})$ and norm
$R_i(A)$ such that the perturbed system
$\dot x= Ax +N_{R_i (A)} (A,x)$ is not g.a.s.,
so $R_n(A)\leq R_i(A)$, and from that
and \eqref{radineq}, \eqref{RARiA} the
assertion of the lemma follows.
\end{proof}

\section{Calculation of the integrals $I^{+}(R)$ and $I^{-}(R)$}
\label{calculo de integrales}

First note that if $A\in \mathbb{R}^{2\times 2} $ is a stable matrix
such that $n=0$, from expressions \eqref{f12}, \eqref{f22}
follows that $f_1(\varphi)$ and $f_2(\varphi)$ are constant functions,
so the integrals $I^+ (R)$ and $I^- (R)$ are
immediate, but as we show in the next section it is not necessary
in this case calculate these integrals,
because easily can be proved that $R_i(A)=R(A)$.

In this section for the case $n\ne 0$ we give the expressions of
the integrals $I^+ (R)$ and $I^- (R)$ that appear in
the Theorem \ref{aplicacionFilippov} in terms of elementary
functions and the complete elliptic integral of the
third kind. For the reduction of the integrals to canonical elliptic
integrals we use the well known method proposed
by example in \cite{Byrd} and the following equality which appears
in the table of integrals of this book:
\begin{equation}
\int_0^{\infty} \frac{dt}{t^2 -p}
\sqrt{\frac{t^2+a^2}{t^2+b^2}}=\frac{1}{a}\prod (\alpha^2,k), \quad
\text{if } a>b, \label{int3tipo}
\end{equation}
where $\prod(\cdot,\cdot)$ denotes the complete elliptic integral
of the third kind and
\begin{eqnarray}
\alpha^2 =1+\frac{p}{a^2}, k^2=1-\frac{b^2}{a^2}.
\end{eqnarray}
After rationalization of the denominators in
\eqref{Kplus}, \eqref{Kmin} we obtain
\begin{gather}
K_R^+ (\varphi)=\frac{f_1 (\varphi) f_2 (\varphi)
 +R\sqrt{f_1^2 (\varphi)+f_2^2 (\varphi)-R^2}}{f_2^2 (\varphi)-R^2},
  \label{kplusr}\\
K_R^- (\varphi)=\frac{-f_1 (\varphi) f_2 (\varphi)
 +R\sqrt{f_1^2 (\varphi)+f_2^2 (\varphi)-R^2}}{f_2^2 (\varphi)-R^2}.
 \label{kminr}
\end{gather}
The rationalization can introduce some singularities in the integrals,
but taken into account that the original integrals
exist as proprius integrals for the considered values of $R$,
we can calculate this integrals in the sense of the Cauchy
principal value.
From  Theorem \ref{aplicacionFilippov} and
\eqref{kplusr}, \eqref{kminr} after decomposition in partial
fractions we have
\begin{align*}
I^+ (R)&=\frac {1}{2} \int_0^{2\pi }
 \Big( \frac{f_1 (\varphi)}{f_2 (\varphi)+R}
 +\frac{f_1 (\varphi)}{f_2 (\varphi)-R}  \\
&\quad - \frac{\sqrt{f_1^2 (\varphi)
 + f_2^2 (\varphi)-R^2}}{f_2 (\varphi)+R}
 +\frac{\sqrt{f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2}}{f_2 (\varphi)-R}
\Big) d \varphi ,
\end{align*}
\begin{align*}
I^- (R)&=\frac {1}{2} \int_0^{2\pi }
 \Big( \frac{-f_1 (\varphi)}{f_2 (\varphi)+R}
 +\frac{-f_1 (\varphi)}{f_2 (\varphi)-R}  \\
&\quad -\frac{\sqrt{f_1^2 (\varphi)
 + f_2^2 (\varphi)-R^2}}{f_2 (\varphi)+R}+ \frac{\sqrt{f_1^2 (\varphi)
 + f_2^2 (\varphi)-R^2}}{f_2 (\varphi)-R}
\Big) d \varphi .
\end{align*}
So if we define
\begin{gather}
I_1(R):=\frac {1}{2} \int_0^{2\pi }
 \frac{f_1 (\varphi)}{f_2 (\varphi)+R} d \varphi
=\frac {1}{2} \int_0^{2\pi } \frac{m_1}{f_2 (\varphi)+R}
 d \varphi \label{I1}
\\
I_2(R):=\frac {1}{2} \int_0^{2\pi } \frac{f_1 (\varphi)}{f_2 (\varphi)-R} d \varphi
=\frac {1}{2} \int_0^{2\pi } \frac{m_1}{f_2 (\varphi)-R} d \varphi \label{I2}
\\
I_3(R):=\frac {1}{2} \int_0^{2\pi } \frac{-\sqrt{f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2}}{f_2 (\varphi)+R}
 d \varphi \label{I3}
\\
I_4(R):=\frac {1}{2} \int_0^{2\pi } \frac{\sqrt{f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2}}{f_2 (\varphi)-R}
 d \varphi \label{I4}
\end{gather}
we have
\begin{gather}
I^+ (R)=I_1 (R)+I_2 (R)+I_3 (R)+I_4 (R) , \label{Imas} \\
I^- (R)=-I_1 (R)-I_2 (R)+I_3 (R)+I_4 (R). \label{Imenos}
\end{gather}

\begin{lemma} \label{integracion}
If $A\in \mathbb{R}^{2\times 2} $ is a stable matrix such that
$n\ne 0$, then for the integrals $I_k (R) , k=1,2,3,4$ in the sense
of Cauchy Principal Value we have
\begin{gather}
I_1(R)=\begin{cases}
0 &\text{if } |m_2+R|<n\\
 \frac{m_1 \pi \operatorname{sgn}(m_2+R)}{\sqrt{(m_2+R)^2-n^2}}
&\text{if } |m_2+R|>n;
\end{cases} \label{I12}
\\
I_2(R)=\begin{cases}
0 &\text{if } |m_2-R|<n\\
 \frac{m_1 \pi \operatorname{sgn}(m_2-R)}{\sqrt{(m_2-R)^2-n^2}}
&\text{if } |m_2-R|>n ;
\end{cases} \label{I22}
\end{gather}
\begin{gather}
I_3(R)=\begin{cases}
0 \quad\text{if }|m_2+R|<n\\
=\alpha_3 (R)\Re\Big[\beta_3 (R)  \prod
\Big(1+\frac{\tau_{3}^2 (R)}{a^2(R)}, \sqrt{1-\frac{1}
{a^2(R)}} \Big) \Big]\\
\quad\text{if } |m_2+R|>n;
\end{cases} \label{I3final}
\\
I_4(R)=\begin{cases}
0 \quad \text{if } |m_2-R|<n,\\
=\alpha_4 (R)\Re \Big[ \beta_4 (R) \prod
\Big( 1+\frac{\tau_{4}^2 (R)}{a^2(R)}, \sqrt{1-\frac{1}
{a^2(R)}} \Big) \Big],\\
\quad\text{if } |m_2-R|>n,
\end{cases}
\end{gather}
where $\prod (\cdot,\cdot)$ denotes the complete elliptical integral
of second kind, ${\underline \sigma}(A), {\overline \sigma}(A)$
are the smallest and largest singular values of the matrix $A$,
and $m_1, m_2, n$ are the numbers given by
 \eqref{m1m2n}, and
\begin{gather}
a(R)=\sqrt{\frac{{\overline \sigma}^2 (A)-R^2}{{\underline \sigma}^2 (A)
-R^2}}
\\
\alpha_3 (R)=\frac{-2({\underline \sigma}^2 (A)-R^2)}{\sqrt{{\overline \sigma}^2(A)-R^2}
\big( m_2+R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}}\big)} \label{alpha3}
\\
\beta_3 (R)=1-\frac{n m_1 i}{\sqrt{m_1^2+m_2^2} \sqrt{(m_2+R)^2-n^2}} \label{beta3}
\\
\tau_3 (R)=\frac{\frac{n m_1}{\sqrt{m_1^2+m_2^2}} +i \sqrt{(m_2+R)^2-n^2}
}{m_2+R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}}
} \label{tau3}
\\
\alpha_4 (R)=\frac{2({\underline \sigma}^2 (A)-R^2)}
{\sqrt{{\overline \sigma}(A)^2-R^2}
\big( m_2-R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}}\big)}
\\
\beta_4 (R)=1-\frac{n m_1 i}{\sqrt{m_1^2+m_2^2} \sqrt{(m_2-R)^2-n^2}}
\\
\tau_4 (R)=\frac{\frac{n m_1}{\sqrt{m_1^2+m_2^2}}
+i \sqrt{(m_2-R)^2-n^2}}{m_2-R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}}}
\end{gather}
\end{lemma}

\begin{proof}
 The integrands in $I_1(R)$ and $I_2(R)$ are very simple rational
functions, which primitive functions are
given in terms of logarithmic or arco tangents functions and so
evaluating the integrals in the sense of the Cauchy Principal
value we obtain easily the results of the lemma.

Now we explain how to compute the more complicated integral $I_3(R)$
(The computation of $I_4(R)$ is completely similar).

In the case $|m_2+R|<n$ using the methods proposed in \cite{Byrd},
the integral $I_3(R)$ can be easily reduced to the form 
$\int_{-\infty}^{\infty} 1/\big((t^2-p^2)\sqrt{P}\big)dt$,
where $P$ is a positive polynomial of fourth degree,
and the parameter $p$ is real and positive. 
It is well known \cite{Byrd}, that the primitive function of this last 
integral is an elliptic integral of the third kind, 
which becomes logarithmically infinite, for $t=p$ as 
$\pm \ln(t-p)/(2\sqrt{P(p)})$ and; 
for $t=-p$ as $\mp \ln(t+p)/(2\sqrt{P(p)})$. 
From that it follows that the integral $I_3(R)$ taken in the sense 
of the Cauchy principal value is equal zero.


In the case $|m_2+R|>n$ from expressions \eqref{f12}, \eqref{f22}
we obtain
\begin{gather*}
f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2=m_1^2+m_2^2
 +n^2-R^2+2n\sqrt{m_1^2+m_2^2}\cos2x ,\\
f_2(\varphi)+R=m_2+R+n \Big[ \frac{m_2}{\sqrt{m_1^2+m_2^2}}
\cos2x -  \frac{m_1}{\sqrt{m_1^2+m_2^2}}\sin2x\Big],
\end{gather*}
where $x=\varphi-\chi-\psi$ and
$\sin \psi=\frac{m_1}{\sqrt{m_1^2+m_2^2}}$,
$\cos \psi=\frac{m_2}{\sqrt{m_1^2+m_2^2}}$.
Using this expressions we write the integral in the form
\[
I_3(R)=-\frac{1}{4} \int_{0}^{4\pi} \frac{\sqrt{m_1^2+m_2^2
+n^2-R^2+2n\sqrt{m_1^2+m_2^2}\cos2x}}{m_2+R
+n \big[ \frac{m_2}{\sqrt{m_1^2+m_2^2}}\cos2x -
 \frac{m_1}{\sqrt{m_1^2+m_2^2}}\sin2x\big]} dx
\]
Now by the change of the variable of integration
$\tan (x/2)=t$ and using the expressions for the smallest and the
largest singular values of the matrix $A$:
\begin{gather*}
{\underline \sigma}(A)=m_1^2+m_2^2+n^2-2n\sqrt{m_1^2+m_2^2} ,  \\
{\overline \sigma}(A)=m_1^2+m_2^2+n^2+2n\sqrt{m_1^2+m_2^2} ,
\end{gather*}
we obtain
\[
 I_3(R)=- \int_{-\infty}^{\infty}
\frac{
\sqrt{\overline \sigma}^2 (A)-R^2
 +({\underline \sigma}^2 (A)-R^2)t^2/ \sqrt{1+t^2}  }
{\big[ (m_2+R-\frac{nm_2}{\sqrt{m_1^2+m_2^2}})t^2
 -\frac{2nm_1 t}{\sqrt{m_1^2+m_2^2}}+ m_2+R+
\frac{nm_2}{\sqrt{m_1^2+m_2^2}}\big]}   \;dt.
\]
Factoring the denominator,
\[
I_3(R)=- \frac{\sqrt{{\underline \sigma}^2(A)-R^2}}
 {m_2+R-\frac{nm_2}{\sqrt{m_1^2+m_2^2}}}
\int_{-\infty}^{\infty} \frac{\sqrt{\frac{{\overline \sigma}^2
(A)-R^2}{{\underline \sigma}^2 (A)-R^2}+t^2} }
{\sqrt{1+t^2}(t-\tau_3(R))(t-{\overline \tau_3(R)})}\;dt,
\]
where $\tau_3(R)$ is given by \eqref{tau3}.
Using  the identity
\begin{eqnarray}
\frac{1}{(t-\tau)(t-\overline \tau)}=2\Re
\big[ \frac{1}{\tau-{\overline \tau}} \big(\frac{\tau}{t^2-\tau^2}
+\frac{t}{t^2-\tau^2}\big) \big] ,
\end{eqnarray}
and taking into account that the integral of an odd function
in the real line is zero, we obtain
\begin{align*}
&I_3(R)\\
&=- \frac{\sqrt{{\underline \sigma}^2 (A)-R^2}}
{m_2+R-\frac{nm_2}{\sqrt{m_1^2+m_2^2}}}
2\Re \int_{-\infty}^{\infty} \frac{\sqrt{\frac{{\overline \sigma}^2
(A)-R^2}{{\underline \sigma}^2 (A)-R^2}+t^2}}
{\sqrt{1+t^2}}
\frac{\tau_3(R)}{\tau_3(R)-{\overline \tau}_3(R)}
\frac{1}{t^2-\tau_3^2(R)} \;dt .
\end{align*}
Now using  expressions \eqref{tau3} and  \eqref{beta3},
\[
\frac{\tau_3(R)}{\tau_3(R)-{\overline \tau_3(R)}}=\frac{1}{2}
\big[ 1- \frac{nm_1 i}{\sqrt{m_1^2+m_2^2}\sqrt{(m_2+R)^2-n^2}}\big]=
\frac{1}{2} \beta_3(R),
\]
\[
I_3(R)=- \frac{\sqrt{{\underline \sigma}^2 (A)-R^2}}{m_2+R
 -\frac{nm_2}{\sqrt{m_1^2+m_2^2}}}
\Re \big\{ \beta_3(R) \int_{0}^{\infty}
 \frac{\sqrt{\frac{{\overline \sigma}^2 (A)
 -R^2}{{\underline \sigma}^2 (A)-R^2}+t^2}}
{\sqrt{1+t^2}} \frac{1}{t^2-\tau_3^2(R)} \;dt \big\}.
\]
And finally from the formula \eqref{int3tipo} and expression
\eqref{alpha3} we obtain the expression \eqref{I3final}.
\end{proof}


\section{Calculation of the radius of stability for arbitrary matrices}
\label{general}

Let us now formulate some important results related to the integrals
$I^+ (R)$, $R\in (R^+ (A),R(A))$  and $I^- (R)$, $R\in (R^- (A),R(A))$,
which allow characterizing the stable matrices
$A\in \mathbb{R}^{2\times 2}$ such that $R_i (A)=R(A)$ and formulate
 the algorithm for the calculation of the number $R_i (A)$.

\begin{lemma} \label{m2n0}
 Let $A\in \mathbb{R}^{2\times 2} $ be a stable matrix such that
$n=0$ or $m_2=0$, then $R_{i}(A)=R(A)$.
\end{lemma}

\begin{proof}
If $n=0$, then from \eqref{f12} and \eqref{f22} we have that
$f_1(\varphi)=m_1$, $f_2(\varphi)=m_2$
are constant functions. So if the differential inclusion
\eqref{inclusiontheta} changes to be unstable throughout
a nonlinear perturbation $N_{R}^+ (A,x)$ or $N_{R}^-(A,x)$,
then this perturbation will be in this case linear
constant perturbation and so from inequalities \eqref{radiiresult}
we have $R_{i}(A)=R(A)$.
If $m_2=0$, then $R^+(A)=n$, $R^-(A)=n$, thus for $R>n$
from \eqref{I12} and \eqref{I22} follows that $I_1(R)+I_2(R)=0$
and from \eqref{I3} and \eqref{I4} that  $I_3(R)<0$ and $I_4(R)<0$,
so using the expressions \eqref{Imas}, \eqref{Imenos}
we conclude that  $I^+(R)<0$, $I^-(R)<0$ and from
Theorem \ref{aplicacionFilippov} $R_{i}(A)=R(A)$.
\end{proof}

\begin{lemma} \label{solounaint}
 Let $A\in \mathbb{R}^{2\times 2} $ be a stable matrix such that
$\max \{ R^- (A), R^+ (A) \} <R(A)$
and $R\in (\max \{ R^- (A), R^+ (A) \},R(A))$, then in the case
$m_2>0$ is $I^- (R)<0$ and
in the case $m_2<0$ is $I^+ (R)<0$.
\end{lemma}

\begin{proof}
 Let $R\in (\max \{ R^- (A), R^+ (A) \},R(A))$ then $f_2(\varphi)+R>0$
and $f_2(\varphi)-R<0$ for all $\varphi \in [0, 2\pi)$ and from
expressions \eqref{I3} and \eqref{I4} we have
that $I_3(R)<0$ and $I_4(R)<0$. Now if $m_2>0$, then $m_2 +R>0, m_2-R<0, m_2 +R> |m_2-R|$ and so from the expressions
\eqref{I12} and  \eqref{I22} follows that $I_1(R)+I_2(R)>0$,
but now from  this and \eqref{Imenos} we conclude $I^- (R)<0$.
The proof in the case $m_2<0$ is completely similar.
\end{proof}

\begin{theorem} \label{thm2}
 Let $A\in \mathbb{R}^{2\times 2} $ be a stable matrix.
The equality $R_i (A)=R(A)$ is true if and only if
 from the inequality  $\max \{ R^- (A), R^+ (A) \} <R(A)$
follows $I^+(R(A))\leq 0$ in the case $m_2>0$ and
$I^-(R(A))\leq 0$ in the case $m_2>0$.
\end{theorem}

\begin{proof}
 From lemma \ref{m2n0} the assertion of the theorem holds in the cases
$m_2=0$ or $n=0$. Thus from now on we assume $m_2\ne 0$ and $n\ne 0$.
In the case $R^- (A)\geq R(A), R^+ (A)\geq R(A)$ in theorem
\ref{aplicacionFilippov} the condition for the integrals
automatically follows, and so $R_i(A)=R(A)$. \\
Now if $R^+ (A)<R(A)$, but $R^- (A)\geq R(A)$, then we have to
cheque only the integral $I^+ (A)$. In this case from
the lemma \ref{boundofR} we have $m_2+R>n$, and $| m_2-R| <n$,
 so from lemma \ref{integracion} $I_1 (R)<0$, $I_2 (R)=0$, $I_3 (R)<0$,
$I_4 (R)=0$, from what we obtain: $I^+ (R)<0$, and from
theorem \ref{aplicacionFilippov} follows the equality $R_i(A)=R(A)$.
The case $R^- (A)<R(A)$, but $R^+ (A)\geq R(A)$ is completely similar.
Finally we analyze the case $m_2>0$ and
$\max \{ R^- (A), R^+ (A) \} <R(A)$.
In this case from the lemma \ref{solounaint} follows that $I^- (R)<0$
for all $R\in (\max \{ R^- (A), R^+ (A) \}, R(A))$ and
then from theorem \ref{aplicacionFilippov} and the fact that $I^+(R)$
is a monotone increasing function of $R$ the equality
$R_i (A)=R(A)$ is true if and only if $I^+(R(A))\leq 0$.
The proof in the case $m_2<0$ is  similar.
\end{proof}

\begin{lemma} \label{extremoizquierdo}
 Let $A\in \mathbb{R}^{2\times 2} $ be a stable matrix.
\begin{itemize}
\item[(i)] If $m_2>0$ and $R^+ (A)<R(A)$, then for $R>R^+ (A)$
 sufficiently near to $R^+ (A)$ is $I^+ (R)<0$;
\item[(ii)] If $m_2<0$ and $R^- (A)<R(A)$, then for $R>R^- (A)$
 sufficiently near to $R^- (A)$ is $I^- (R)<0$.
\end{itemize}
\end{lemma}

\begin{proof}
We prove only the assertion i), the prove of ii) is similar.
For $R>R^+(A)$ sufficiently near to $R^+(A)$ we have from \eqref{I12}
that $I_1(R)<0$ and from \eqref{I3} that $I_3(R)<0$.
Furthermore for $R$ sufficiently near to $R^+(A)$ is $| m_2-R| <n$
and so from lemma \ref{integracion} follows
that $I_2(R)=I_4(R)=0$. Thus from \ref{Imas} follows $I^+ (R)<0$.
\end{proof}

Finally, as a direct consequence of the results proved in this work
and the fact that the functions
$I^+ (R)$, $R\in (R^+ (A),R(A))$ and $I^- (R)$, $R\in (R^- (A),R(A))$
are monotonically increasing functions of the
variable $R$, which follows from \eqref{krplus1}, \eqref{krminus1}
we formulate the general algorithm for the calculation of
the number $R_i(A)$.

\subsection*{Algorithm}
\begin{itemize}
\item[1] For the given stable matrix $A$ calculate the numbers:
 $m_1, m_2$, $n$, ${\underline \sigma}(A)$, $R(A)$;

\item[2] If $m_2=0$ or $n=0$, then put $R_i(A)=R(A)$;

\item[3] If $m_2\ne 0$, $n\ne 0$, calculate $R^+(A)$ and $R^-(A)$.
If $R^+(A)\geq R(A)$ or $R^-(A)\geq R(A)$, then
put $R_i(A)=R(A)$;

\item[4] If $\max \{ R^- (A), R^+ (A) \} <R(A)$ and $m_2>0$
calculate $I^+ (R(A))$. If $I^+ (R(A))\leq 0$ then put
 $R_i(A)=R(A)$;

\item[5] If $\max \{ R^- (A), R^+ (A) \} <R(A)$ and $m_2<0$
calculate $I^- (R(A))$. If $I^- (R(A))\leq 0$ then put
 $R_i(A)=R(A)$;

\item[6] If $\max \{ R^- (A), R^+ (A) \} <R(A)$, $m_2>0$ and
$I^+ (R(A))>0$, search $R_0 \in (R^+ (A),R(A))$ such
that $I^+(R_0)<0$, and use bisection method in the interval
$(R_0,R(A))$ to determine the root $R$ of the equation
$I^+ (R)=0$ and put $R_i(A)=R$;
\item[7] If $\max \{ R^- (A), R^+ (A) \} <R(A)$, $m_2<0$ and
$I^- (R(A))>0$, search $R_0 \in (R^- (A),R(A))$ such
that $I^-(R)<0$, and use bisection method in the interval
$(R_0,R(A))$ to determine the root $R$ of the equation $I^- (R)=0$
and put
$R_i(A)=R$;
\end{itemize}

\section{Examples}

In this section we give applications of the main results of this
work to the calculation of the stability radius $R_i (A)$.

Example 1. Let
\[
A=\begin{bmatrix}
-220&-99\\
181&-220
\end{bmatrix}\,.
\]
Then simple calculations give
$m_1=-220$, $m_2=140$, $n=41$, ${\underline \sigma}(A)=219.768$.
So,
$R(A)=\min \{{\underline \sigma}(A),
-\frac{1}{2} \operatorname{tr}(A) \}=219.768$,
$R^{+}(A)=\max \{ 0, n-m_2\}=0$,
$R^{-}(A)=\max \{ 0, n+m_2\}=181$,
$\max \{ R^{+}(A), R^{-}(A) \} <R(A)$ and
$I^+ (R(A))=I^+ (219.768)=0,37 >0$, so from
theorem 2 we have that $R_i (A)<R(A)$ and
$R_i(A)$ is the root of the equation: $I^+ (R)=0$.
Using lemma \ref{integracion} we calculate the integral
$I^+ (200)= -0.711<0$ from what follows that
$R_i (A)\in (200, 219.768)$. Since $I^+ (R)$
is a monotonically increasing function we can applied
the method of bisection to obtain an approximation for the number
$R_i (A)$. Finally we obtain
\[
I^+ (214.555)=-0.0001034<0, \quad I^+ (214.560)=0.000188>0
\]
and we can take $R_i (A)= 214.555$.

Example2. Let
\[
A=\begin{bmatrix}
-220&-159\\
241&-220
\end{bmatrix}\,.
\]
Then $m_1=-220$, $m_2=200$, $n=41$, ${\underline \sigma}(A)=256.321$.
So, $R(A)=220$, $R^{+}(A)=0, R^{-}(A)=241$.
So $R^{-}(A)>R(A)$ and the assertion of the Theorem 2 implies that
$R_i(A)=R(A)=220$.

Example 3. Let
\[
A=\begin{bmatrix}
-220&-9\\
91&-220
\end{bmatrix}
\]
Then from the calculations we obtain:
$m_1=-220$, $m_2=50$, $n=41$, ${\underline \sigma}(A)=184.610$. So,
$R(A)=\min \{ {\underline \sigma}(A), -\frac{1}{2} \operatorname{tr}(A)
 \}=184.610$, $R^{+}(A)=0$, $R^{-}(A)=9$,
$\max \{ R^{+}(A), R^{-}(A) \} <R(A)$ and
$I^+ (R(A))=I^+ (184.610)=-2.324<0$, so from theorem 2 we have that
$R_i (A)=R(A)=184.610 $.

\subsection*{Conclusion}
In this paper we have solved the problem of the computation of
the number $R_{i}(A)$. We have characterize
the stable matrices $A$ for which the equality $R_{i}(A)=R(A)$ holds.
In the case when this numbers are not equal the results
allow with arbitrary accuracy calculate $R_{i}(A)$ using the bisection
method to search the zero of the integral $I^+(R)$
or $I^-(R)$. We have proved also that
$R_{n} (A) = R_{t} (A) = R_{nt} (A) = R_{i}(A)$ for all stable matrix
$A$. This results to our knowledge are not reported in the
mathematical literature. It is of interest to note also that the
number $R_{i}(A)$ has closed links
with the stability of switched linear systems. For the exposition
of recent advances in this important topic see
\cite{shorten}.

\begin{thebibliography}{9}

\bibitem{Hin1} D. Hinrichsen, A. J. Pritchard;
\emph{Destabilization by output feedback}.
Differential and Integral Equations, 5; pp. 357-386, 1992.

\bibitem{Hin2} D. Hinrichsen, A. J. Pritchard;
\emph{Stability radii of linear systems}. Systems \& Control Letters,
Vol. 7, pp. 1-10, 1986.

\bibitem{Hin3} D. Hinrichsen, A. J. Pritchard;
\emph{Stability radius for structured perturbations and the algebraic
 Riccati equation}. Systems \& Control Letters,
Vol. 8, pp. 105-113, 1986.

\bibitem{HinMot} D. Hinrichsen, M. Motscha;
\emph{Optimization Problems in the Robustness Analysis of Linear State
Space Systems}, Report No. 169,
Institut fur Dynmische Systeme, University of Bremen, 1987.

\bibitem{F} A. F. Filippov;
\emph{Stability conditions of homogeneous systems with arbitrary
switches of the operating modes},
Automation and Remote Control, Vol. 41, pp. 1078-1085, 1980.

\bibitem{U} R. U. Salgado, H. Gonz\'alez;
\emph{Radio de estabilidad real de sistemas bidimensionales para
perturbaciones lineales dependientes del tiempo},
Extracta Mathematicae, Vol. 15, N. 3, pp. 531-545, 2000.

\bibitem{Byrd} P. F. Byrd;
\emph{Handbook of elliptic integrals for engenering and physicists}.
 Springer, 1954.

\bibitem{shorten} R. Shorten, F. Wirth, O. Mason, K. Wulff Ch. King;
\emph{Stability criteria for switched and hibrid  systems}.
SIAM Reviews 49(4) pp. 545-582, 2007.

\end{thebibliography}
\end{document}
