\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2011 (2011), No. 19, pp. 1--21.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2011 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2011/19\hfil Quadratic forms as Lyapunov functions]
{Quadratic forms as Lyapunov functions in the study of stability 
of solutions to difference equations}

\author[A. O. Ignatyev, O. Ignatyev\hfil EJDE-2011/19\hfilneg]
{Alexander O. Ignatyev, Oleksiy Ignatyev}  % in alphabetical order

\address{Alexander O. Ignatyev \newline
 Institute for Applied Mathematics and Mechanics, 
 R. Luxemburg Street,74, Donetsk-83114, Ukraine}
\email{aoignat@mail.ru, ignat@iamm.ac.donetsk.ua}

\address{Oleksiy  Ignatyev \newline
 Department of Statistics and Probability, 
 Michigan State University,
 A408 Wells Hall, East Lansing, MI 48824-1027, USA}
\email{ignatyev@stt.msu.edu,  aignatye@math.kent.edu}

\thanks{Submitted February 1, 2010. Published February 3, 2011.}
\subjclass[2000]{39A11, 34K20}
\keywords{Difference equations; Lyapunov function}

\begin{abstract}
 A system of linear autonomous  difference equations
 $x(n+1)=Ax(n)$ is considered, where $x\in \mathbb{R}^k$,
 $A$ is a real nonsingular $k\times k$ matrix.
 In this paper it has been proved that if $W(x)$ is any quadratic
 form and $m$ is any positive integer, then there exists a unique
 quadratic form $V(x)$ such that
 $\Delta_m V=V(A^mx)-V(x)=W(x)$ holds if and only if
 $\mu_i\mu_j\neq1$ ($i=1, 2 \dots k; j=1, 2 \dots k$) where
 $\mu_1,\mu_2,\dots,\mu_k$ are the roots of the equation
 $\det(A^m-\mu I)=0$.

 A number of theorems on the stability of difference systems have
 also been proved. Applying these theorems, the stability problem
 of the zero solution of the nonlinear system $x(n+1)=Ax(n)+X(x(n))$
 has been solved in the critical case when one  eigenvalue  of
 a matrix $A$ is equal to minus one, and others lie inside the
 unit disk of the complex plane.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\allowdisplaybreaks

\section{Introduction and preliminaries}

The theory of discrete dynamical systems has grown  tremendously
in the last decade. Difference equations can arise in a number of
ways. They may be the natural model of a discrete process (in
combinatoric, for example) or they may be a discrete approximation
of a continuous process. The growth of the theory of difference
systems has been strongly
 promoted  by the advanced technology in scientific computation
and the large number of applications to models in biology,
engineering, and other physical sciences. For example, in papers
\cite{AgORWo:07,BrCa:2001,Br:05,CaSiJu:06,Ch:06,FrYa:06}
 systems of difference equations
are applied as natural models of populations dynamics, in
\cite{CoFeLiMe:06} difference equations are applied as a
mathematical model in genetics.

Many evolution processes are characterized by the fact that at
certain moments of time they experience a change of state
abruptly. These processes are subject to short-term perturbations
which duration is negligible in comparison with the duration of
the process. Consequently, it is natural to assume that these
perturbations act instantaneously, that is, in the form of
impulses. It is known, for example, that many biological phenomena
involving thresholds, bursting rhythm models in medicine and
biology, optimal control models in economics, pharmacokinetics and
frequency modulated systems, do exhibit impulsive effects. Thus
impulsive differential equations, that is, differential
equations involving impulse effects, appear as a natural
description of observed evolution phenomena of several real world
problems
\cite{AnDi:00,AnDiNe:00,DO:02,GeCh:98,GuLaCh:00,Ne:99,SaPe:95,SmWa:04,SmWa:05,TaCh:02,TaCh:05,ZhLi:03,ZhShWa:03,ZhLiCh:03}.
The early work on differential equations with impulse effect were
summarized in monograph \cite{SaPe:95} in which the foundations of
this theory were described. In recent years, the study of
impulsive systems has received an increasing interest
\cite{IgIg:08,IgIgSo:06,GlIg:03,GlIg:04,Ah:06,BaSi:89,CaLi:97,ChBhHa:03,HaChNe:06,HaChNe:02,Ig:03,Ig:08}.
In fact, an impulsive system consists of a continuous system which
is governed by ordinary differential equations and a discrete
system which is governed by difference equations. So the dynamics
of impulsive systems essentially depends on properties of the
corresponding difference systems, and this confirms the importance
of studying the qualitative properties of difference systems.


The stability and asymptotic behaviour of solutions of these
models that are especially important to many investigators. The
stability of a discrete process is the ability of the process to
resist  {\it a priori} unknown small influences. A process is said
to be stable if such disturbances do not change it. This  property
turns out to be of utmost importance since, in general, an
individual predictable process can be physically realized only if
it is stable in the corresponding natural sense. One of the most
powerful methods, used in stability theory, is Lyapunov's direct
method. This method consists in the use of an auxiliary function
(the Lyapunov function).

Consider the system of difference equations
\begin{equation}\label{2d1.1}
x(n+1)=f(n,x(n)),\quad f(n,0)=0,
\end{equation}
where $n=0, 1, 2, \dots$ is discrete time, $x(n)=(x_1(n),\dots,
x_k(n))^T\in \mathbb{R}^k$, $f=(f_1,\dots,f_k)^T\in \mathbb{R}^k$. The
function $f$ we assume to be continuous and to satisfy Lipschitz
condition in $x$.
  System \eqref{2d1.1} admits the trivial solution
\begin{equation}\label{2d1.2}
x(n)=0.
\end{equation}
Denote $x(n,n_0,x^0)$ the solution of  \eqref{2d1.1} coinciding with
$x^0=(x^0_1,x_2^0,\dots,x_k^0)^T$ for $n=n_0$.  We also denote
$\mathbb{Z}_+$  the set of nonnegative real integers,
$\mathbb{N}_{n_0}=\{n\in \mathbb{Z}_+: n\ge n_0\}$,
$\mathbb{N}=\{n\in \mathbb{Z}_+: n\ge 1\}$, $B_r=\{x\in \mathbb{R}^k: \| x\|\le r\}$.

By analogy to ordinary differential equations, let us introduce
the following definitions.

\begin{definition} \label{def1.1} \rm
 The trivial solution of system \eqref{2d1.1}  is
said to be stable if for any $\varepsilon>0$ and $n_0\in {\mathbb{Z}_+}$
there exists a
$\delta=\delta(\varepsilon,n_0)>0$ such that $\| x^0\|<\delta$
implies
 $\| x(n,n_0,x^0)\|<\varepsilon$ for $n\in \mathbb{N}_{n_0} $.
Otherwise the trivial solution
of system \eqref{2d1.1}  is called unstable. If in this definition
$\delta$ can be chosen independent of $n_0$ (i.e.
$\delta=\delta(\varepsilon)$), then the zero solution of system
\eqref{2d1.1}  is said to be uniformly stable.
\end{definition}

\begin{definition} \label{def1.2} \rm
 Solution \eqref{2d1.2}  of system \eqref{2d1.1}  is said
to be attracting if for any
 $n_0\in \mathbb{Z}_+$ there exists an $\eta=\eta(n_0)>0$  such that for any
$\varepsilon>0$ and $x^0\in B_{\eta}$ there exists an
$N=N(\varepsilon,n_0,x^0)\in \mathbb{N}$ such that
  $\| x(n,n_0,x^0)\|<\varepsilon$ for all
$n\in \mathbb{N}_{n_0+N}$.
\end{definition}

In other words, solution  \eqref{2d1.2}  of system \eqref{2d1.1}  is called
attracting if
\begin{equation}\label{2d1.3}
\lim_{n\to\infty}\| x(n,n_0,x^0)\|=0.
\end{equation}

\begin{definition} \label{def1.3} \rm
The trivial solution of system \eqref{2d1.1}   is
said to be uniformly attracting if for some $\eta>0$ and for each
$\varepsilon>0$ there exists an $N=N(\varepsilon)\in \mathbb{N}$
such that
  $\| x(n,n_0,x^0)\|<\varepsilon$ for all $n_0\in \mathbb{Z}_+$,
$x^0\in B_{\eta}$, and
$n\ge n_0+N$.
\end{definition}

In other words, solution  \eqref{2d1.2}  of system \eqref{2d1.1}   is called
uniformly attracting if \eqref{2d1.3}  holds uniformly
in $n_0\in \mathbb{Z}_+$ and $ x^0\in B_{\eta}$.

\begin{definition} \label{def1.4} \rm
The zero solution of system \eqref{2d1.1}   is called:
\begin{itemize}
\item
asymptotically stable if it is both stable and attracting;
\item
uniformly asymptotically stable if it is both uniformly stable and
uniformly attracting.
\end{itemize}
\end{definition}

\begin{definition} \label{def1.5}
 The trivial solution of system \eqref{2d1.1}   is
said to be exponentially stable if there exist $M>0$ and
$\eta\in(0,1)$ such that $\| x(n,n_0,x^0)\|<M\|
x^0\|\eta^{n-n_0}$ for  $n\in\mathbb{N}_{n_0}$.
\end{definition}

A great number of papers is devoted to investigation of the
stability of solution \eqref{2d1.2}  of system \eqref{2d1.1}.
The general theory of
difference equations and the base of the stability theory are
stated in \cite{Ag:2000,El:05,KePe:2000,CuFlRo:05,LaTr:02}. It has
been proved in \cite{IgIg:06} that if system \eqref{2d1.1}
 is autonomous
(i.e. $f$ does not depend explicitly in $n$) or periodic (i.e.
there exists $\omega\in\mathbb{N}$  such that $f(n,x)\equiv
f(n+\omega,x)$),  then from the stability of solution \eqref{2d1.2}  it
follows its uniform stability, and from its asymptotic stability
it follows its uniform asymptotic stability. Papers
\cite{FrSe:03,Ig:04,SaGr:05} deal with the stability investigation
of the zero solution of system \eqref{2d1.1} when this system is
periodic or almost periodic.

Let us formulate the main theorems of Lyapunov's direct
method about the stability of the zero solution of the system of
autonomous difference equations
\begin{equation}\label{2d1.4}
x(n+1)=F(x(n))
\end{equation}
where $x,F\in\mathbb{R}^n$, $F$ is a continuous function; $F(0)=0$.
These statements have been mentioned in \cite[Theorems 4.20 and
4.27]{El:05}. They are connected with the existence of an
auxiliary function
 $V(x)$; the analog of its derivative is the variation of $V$ relative to \eqref{2d1.4}
which is defined as $\Delta V(x)=V(F(x))-V(x)$.

\begin{theorem} \label{thmA}
If there exists a positive definite
continuous function $V(x)$ such that $\Delta V(x)$ relative to
\eqref{2d1.4} is negative semi-definite function or identically equals to
zero, then the trivial solution of system \eqref{2d1.4} is stable.
\end{theorem}

\begin{theorem} \label{thmB}
 If there exists a positive definite
continuous function $V(x)$ such that $\Delta V(x)$ relative to
\eqref{2d1.4} is negative definite, then the trivial solution of system
\eqref{2d1.4} is asymptotically stable.
\end{theorem}


\begin{theorem} \label{thmC}
 If there exists a continuous function $V(x)$
such that $\Delta V(x)$ relative to \eqref{2d1.4} is negative definite,
and the function $V$ is not positive semi-definite, then the
trivial solution of system \eqref{2d1.4} is  unstable.
\end{theorem}


Consider the autonomous system
\begin{equation}\label{2d1.5}
x(n+1)=Ax(n)+X(x(n)),
\end{equation}
where $A$ is a $k\times k$ nonsingular matrix,   $X$ is a function
such that
\begin{equation}\label{2d1.6}
\lim_{\| x\|\to0}\frac{\| X(x)\|}{\|
x\|}=0.
\end{equation}
Recall that for a real $k\times k$ matrix $A=(a_{ij})$, an
eigenvalue of $A$ is a real or complex number $\lambda$ such that
\begin{equation}\label{2d1.7}
\det(A-\lambda I_k)=0
\end{equation}
where $I_k$ is the unit $k\times k$ matrix. Let
$\lambda_1,\lambda_2,\dots,\lambda_k$ be eigenvalues of $A$. According to
\cite[p.175]{El:05},  let us denote $\rho(A)=\max_{1\le i\le
k}|\lambda_i|$. In \cite{El:05}  the following theorems have been
proved.

\begin{theorem} \label{thm1.1}
If $\rho(A)<1$, then the zero solution of
system \eqref{2d1.5} is asymptotically stable (moreover, the exponential
stability holds in this case).
\end{theorem}

\begin{theorem} \label{thm1.2}
 Let  $\rho(A)\le1$ and modulus of some
eigenvalues of $A$ are equal to one. Then a function  $X(x)$ in
system \eqref{2d1.5} can be chosen such that the zero solution of system ( \ref{2d1.5})
 is either stable or unstable.
\end{theorem}

The goal of this paper is to extend Theorems \ref{thmA}, \ref{thmB},
\ref{thmC} and to apply
the obtained theorems for the study of the stability of the zero
solution of system \eqref{2d1.5}  in critical case $\lambda=-1$.
The paper is organized as following. In chapter 2,
 Theorems \ref{thmA}, \ref{thmB}, and \ref{thmC} are
extended, and the theorems on the instability are proved. In
chapter 3, the problem on the possibility to construct Lyapunov function
in the form of quadratic polynomial is considered. In chapter 4,
the problem of the stability of the zero solution of system
\eqref{2d1.5} is considered in the critical case when equation
 \eqref{2d1.7}  has a root
$\lambda=-1$ and other roots lie in the unit disk of the complex
plane.

\section{Some general theorems extending Theorems \ref{thmA},
\ref{thmB}, \ref{thmC}}

Consider system of difference equations \eqref{2d1.1} and a
function $V: \mathbb{Z}_+\times B_H\to\mathbb{R}$, continuous
in $B_H$ and satisfying the equality $V(n,0)=0$. We remind that
the function $f$ in
\eqref{2d1.1} is Lipschitzian in $x$, so there is a constant $L$ such that
$\| f(n,x)-f(n,y)\|\le L\| x-y\|$.
Denote the $m$-th variation of $V$ at the moment $n$
\[
\Delta_mV(n,x(n))=V(n+m,x(n+m))-V(n,x(n))
\]
where $m\in\mathbb{N}$.


\begin{definition} \label{def2.1}
 A function $r: \mathbb{R}_+\to\mathbb{R}_+$ is called a Hahn's
function if it is continuous, increasing
and $r(0)=0$. The class of Hahn's functions will be denoted
$\mathcal{K}$.
\end{definition}

\begin{theorem} \label{thm2.1}
 If system \eqref{2d1.1} is such that there exist
$m\in \mathbb{N}$, a function $a\in \mathcal{K}$, and a function $V:
\mathbb{Z}_+\times B_H\to\mathbb{R}$ such that $V(n,0)=0$,
\begin{equation}\label{2d2.1}
V(n,x)\ge a(\| x\|),
\end{equation}
and
\begin{equation}\label{2d2.2}
\Delta_m V\le0,
\end{equation}
then the trivial solution of system \eqref{2d1.1} is stable.
\end{theorem}

\begin{proof}
 Let $n_0\in \mathbb{Z}_+$ and $\varepsilon\in(0,H)$. We shall
show that there exists a $\delta=\delta(\varepsilon,n_0)>0$ such that $x^0\in
B_{\delta}$ implies $\| x(n,n_0,x^0)\|<\varepsilon$ for $n\in\mathbb{N}_{n_0}$.
First we shall show that this inequality is true for
$n=n_0+sm$ where $s\in\mathbb{Z}_+$. Since $V$ is continuous and
$V(n_0,0)=0$, there is a $\delta=\delta(\varepsilon,n_0)>0$ such that
\begin{equation}\label{2d2.3}
V(n_0,x^0)<
a\Bigl(\frac\varepsilon{1+L+L^2+\dots+L^{m-1}}\Bigr)                                 %\eqno(2.3)
\end{equation}
for all $x^0\in B_{\delta}$. From conditions \eqref{2d2.1}, \eqref{2d2.2},
and \eqref{2d2.3} it follows
\begin{align*}
a(\| x(n_0+sm,n_0,x^0)\|)&\le V(n_0+sm,x(n_0+sm,n_0,x^0))\\
&\le V(n_0,x^0) <a\Bigl(\frac\varepsilon{1+L+L^2+\dots+L^{m-1}}\Bigr);
\end{align*}
therefore,
$$
\| x(n_0+sm,n_0,x^0)\| <\frac\varepsilon{1+L+L^2+\dots+L^{m-1}}.
$$
Estimate the value of $\| x(n_0+sm+1,n_0,x^0)\|$:
\begin{align*}
\| x(n_0+sm+1,n_0,x^0)\|
&=\|f(n_0+sm,x(n_0+sm,n_0,x^0))\|\\
&\le L\| x(n_0+sm,n_0,x^0)\|\\
&<\frac{L\varepsilon}{1+L+L^2+\dots+L^{m-1}}<\varepsilon .
\end{align*}
Similarly we obtain
\begin{gather*}
\|x(n_0+sm+2,n_0,x^0)\|<\frac{L^2\varepsilon}{1+L+L^2+\dots+L^{m-1}}
 <\varepsilon,\dots, \\
\|x(n_0+sm+m-1,n_0,x^0)\|<\frac{L^{m-1}\varepsilon}{1+L+L^2+\dots+L^{m-1}}<\varepsilon.
\end{gather*}
Hence the zero solution of system \eqref{2d1.1} is stable.
\end{proof}


\begin{theorem} \label{thm2.2}
If the conditions of the previous theorem are satisfied, and
there exists $b\in \mathcal{K}$ such that
\begin{equation}\label{2d2.4}
V(n,x)\le b(\| x\|),
\end{equation}
then the zero solution of system \eqref{2d1.1} is uniformly stable.
\end{theorem}

\begin{proof}
Under condition \eqref{2d2.4}, the value $\delta$ can be chosen
independent of $n_0$. Set $\delta=b^{-1}(a(\varepsilon))$, where $b^{-1}$ is
the function inverted to $b$. In this case
\begin{align*}
a(\| x(n_0+sm,n_0,x^0)\|)
&\le V(n_0+sm,x(n_0+sm,n_0,x^0))\le V(n_0,x^0)\\
&\le b(\| x^0\|)<b\Bigl(b^{-1}\Bigl(a\bigl(
 \frac{\varepsilon}{1+L+L^2+\dots+L^{m-1}}\bigr)\Bigr)\Bigr)\\
&=a\Bigl(\frac{\varepsilon}{1+L+L^2+\dots+L^{m-1}}\Bigr),
\end{align*}
whence it follows $\| x(n,n_0,x^0)\|<\varepsilon$ for $n\in\mathbb{N}_{n_0}$.
This completes the proof.
\end{proof}

\begin{theorem} \label{thm2.3}
 If system \eqref{2d1.1} is such that there exist
$m\in \mathbb{N}$,  functions $a,b,c\in \mathcal{K}$, and a
continuous function $V: \mathbb{Z}_+\times B_H\to\mathbb{R}$ such
that inequalities \eqref{2d2.1},\eqref{2d2.4}, and
\begin{equation}\label{2d2.5}
\Delta_mV(n,x)\leq-c(\| x\|)
\end{equation}
hold, then the zero solution of system \eqref{2d1.1} is uniformly
asymptotically stable.
\end{theorem}

\begin{proof}
Let $h\in(0,H)$ and $\eta>0$ be such that $\|x(n,n_0,x^0)\|<h$
whenever $x^0\in B_{\eta}, n_0\in \mathbb{Z}_+,
n\in\mathbb{N}_{n_0}$. The existence of such $\eta$ follows from
the uniform stability of solution \eqref{2d1.2} of system \eqref{2d1.1}. Let
$\varepsilon\in(0,\eta)$ be small enough, and
 $\delta=\delta(\varepsilon)$ be a number chosen by correspondence to definition of
the uniform stability: if
$\| x^0\|<\delta$, then $\| x(n,n_0,x^0)\|<\varepsilon$ for
$n_0\in\mathbb{Z}_+,  n\ge n_0$. Take arbitrary $x^0\in B_{\eta}$
and $n_0\in\mathbb{Z}_+$. Estimate the interval of the discrete
time, during which the trajectory $x(n,n_0,x^0)$ may lie in the
set $B_h\setminus\delta(\varepsilon)$. According to \eqref{2d2.5},
for $x\in B_h\setminus\delta(\varepsilon)$
we have $\Delta_mV\le-c(\delta(\varepsilon))$, whence we obtain
$$
V(n_0+sm,x(n_0+sm,n_0,x^0))-V(n_0,x^0)\le -sc(\delta(\varepsilon)),
$$
whence
$$
s\le\frac{V(n_0,x^0)-V(n_0+sm,x(n_0+sm,n_0,x^0))}{c(\delta(\varepsilon))}<\frac{b(h)}{c(\delta(\varepsilon))}.
$$
So choosing $N=N(\varepsilon)=[\frac{b(h)}{c(\delta(\varepsilon))}]+1$, we
obtain that there exists $s_0$ such that $s_0m\le N(\varepsilon)$ and
$x(n_0+s_0m,n_0,x^0)\in B_{\delta(\varepsilon)}$, therefore due the uniform
stability of the zero solution we have $x(n,n_0,x^0)\in B_{\varepsilon}$
for $n\ge n_0+N$. This completes the proof.
\end{proof}

\begin{theorem} \label{thm2.4}
If system \eqref{2d1.1} is such that there exist
$m\in \mathbb{N}$ and a continuous bounded function
$V: \mathbb{Z}_+\times B_H\to\mathbb{R}$ such that $\Delta_mV$
is positive definite and $V$ is not negative semidefinite,
then the zero solution of system \eqref{2d1.1} is unstable.
\end{theorem}

\begin{proof}
Since $\Delta_mV$ is positive definite, there exists
a $c\in\mathcal{K}$ such that
\begin{equation}\label{2d2.5*}
\Delta_mV(n,x)\ge c(\| x\|)
\end{equation}
 holds. Let $\varepsilon\in(0,H)$ be
an arbitrary number and $n_0\in
\mathbb{Z}_+$. We shall show that for each $\delta>0$  there
exist $x^0\in B_{\delta}$ and $n\ge n_0$ such that $\| x(n,n_0,x^0)\|\ge\varepsilon$. Let $\delta$ be
a positive number as small  as desired. As
an initial value,  we take $x^0$ such that $0<\| x^0\|<\delta$
and $V(n_0,x^0)=V_0>0$. Let us show that there exists an $n\in
\mathbb{N}_{n_0}$ such that inequality $\| x(n,n_0,x^0)\|\ge\varepsilon$ holds.
Suppose the contrary:
\begin{equation}\label{2d2.6}
\| x(n,n_0,x^0)\|<\varepsilon
\end{equation}
is valid for all $n\in\mathbb{N}_{n_0}$. From \eqref{2d2.5*}
it follows that $V(n_0+m,x(n_0+m,n_0,x^0))\ge
V_0+c(\| x_0\|)$, $V(n_0+2m,x(n_0+2m,n_0,x^0))\ge
V_0+2c(\| x_0\|)$, $\dots$,
\begin{equation}\label{2d2.7}
V(n_0+sm,x(n_0+sm,n_0,x^0))\ge V_0+sc(\| x_0\|).
\end{equation}
Inequality \eqref{2d2.7} contradicts the boundedness of  $V$ in
$\mathbb{Z}_+\times B_H$. Thus, assuming the validity
of \eqref{2d2.6} we have the contradiction. The obtained
contradiction completes the proof.
\end{proof}

\begin{theorem} \label{thm2.5}
If system \eqref{2d1.1} is such that there exist
$m\in \mathbb{N}$, positive constants $\alpha_1, \alpha_2$, and
a function $V(n,x)$,  bounded in $\mathbb{Z}_+\times B_H$,
such that $\Delta_mV$ has the form
\begin{equation}\label{2d2.8}
\Delta_mV=\alpha_1V(n,x)+\alpha_2W(n,x)
\end{equation}
where $W$ is positive semidefinite and $V$ is not negative
semidefinite, then the zero solution of system \eqref{2d1.1}
is unstable.
\end{theorem}

\begin{proof}
 From \eqref{2d2.8} it follows
\begin{equation}\label{2d2.9}
\Delta_mV(n,x)\ge\alpha_1 V(n,x).
\end{equation}
Let $0<\varepsilon<H$ and $n_0\in \mathbb{Z}_+$. Choose the initial
value $x^0$ such that $\| x^0\|<\delta$ and $V(n_0,x^0)=v_0>0$,
where $\delta$ is a positive number, as small  as desired.
Let us show that there exists $n>n_0$ such that
$\| x(n,n_0,x^0)\|\ge\varepsilon$. Suppose the contrary:
\begin{equation}\label{2d2.10}
\| x(n,n_0,x^0)\|<\varepsilon
\end{equation}
holds for all $n\in \mathbb{N}_{n_0}$. Inequality \eqref{2d2.9}
is true for all $n\in \mathbb{N}_{n_0}$, and since $V(n_0,x^0)>0$,
the value $\Delta_mV$ is positive for all $m\in\mathbb{N}$.
Therefore the sequence $\{V(n_0+sm,x(n_0+sm,n_0,x^0))\}_{s=0}^{\infty}$
 is increasing. From \eqref{2d2.9} we find that
$$
\Delta_mV(n_0+sm,x(n_0+sm,n_0,x^0))\ge\alpha_1
V(n_0+sm,x(n_0+sm,n_0,x^0))\ge \alpha_1v_0,
$$
 hence $V(n_0+sm,x(n_0+sm,n_0,x^0))\ge \alpha_1v_0s$. But this is
impossible because of the boundedness of the function $V$ in $B_{\varepsilon}$.
The obtained contradiction shows that assumption \eqref{2d2.10}
is false. This completes the proof.
\end{proof}

\section{Lyapunov functions for linear autonomous systems}

Side by side with system \eqref{2d1.5}, let us consider the
system of linear difference equations
\begin{equation}\label{2d3.1}
x(n+1)=Ax(n),
\end{equation}
whence we obtain
\begin{equation}\label{2d3.1*}
x(n+m)=A^mx(n).
\end{equation}
To study the stability properties of the zero solution of system
\eqref{2d3.1}, Elaydi \cite{El:05,El:08} suggested to use
quadratic forms
\begin{equation}\label{2d3.2}
V(x)=\sum_{\substack{i_1+i_2+\dots+i_k=2, \\ i_j\ge 0 \,
(j=1,\dots,k)} } b_{i_1,i_2,\dots,i_k}x_1^{i_1}x_2^{i_2}\dots
x_k^{i_k}
\end{equation}
as Lyapunov functions.
Let
\begin{equation}\label{2d3.3}
W(x)=\sum_{\substack{i_1+i_2+\dots+i_k=2, \\ i_j\ge 0
(j=1,\dots,k)} } q_{i_1,i_2,\dots,i_k}x_1^{i_1}x_2^{i_2}\dots
x_k^{i_k}
\end{equation}
be an arbitrary real quadratic form. Let us clarify
the conditions under which there exists a quadratic
form \eqref{2d3.2} such that
\begin{equation}\label{2d3.4}
\Delta_mV(x)=V(A^mx)-V(x)=W(x).
\end{equation}

\begin{theorem} \label{thm3.1}
 If the roots $\mu_1,\mu_2,\dots,\mu_k$ of the polynomial
\begin{equation}\label{2d3.5}
\det(A^m-\mu I_k)=0
\end{equation}
are such that
\begin{equation}\label{2d3.6}
\mu_i\mu_j\neq1\quad (i=1,\dots,k;   j=1,\dots,k),
\end{equation}
then for any quadratic form \eqref{2d3.3}
there exists the unique quadratic form
\eqref{2d3.2} such that equality \eqref{2d3.4} holds.
\end{theorem}

\begin{proof}
 Denote $N$ the number of terms of a quadratic form
 in $x_1,x_2,\dots,x_k$. It is
obvious that this number is equal to the number of different
systems of nonnegative integers  $i_1, i_2,\dots,i_k$ constrained
by the condition $i_1+i_2+\dots+i_k=2$. This number is equal to
$$
N=\frac{k(k+1)}{2}.
$$
Let us enumerate the coefficients of forms $V(x)$ and $W(x)$
and denote them by letters $b_1, b_2,\dots,b_N$ and $q_1,
q_2,\dots, q_N$ respectively:
\begin{gather*}
b_{2,0,\dots,0}=b_1,\quad b_{1,1,\dots,0}=b_2,\quad
b_{1,0,\dots,1}=b_k, \\
b_{0,2,\dots,0}=b_{k+1},\quad b_{0,1,1,\dots,0}=b_{k+2},\dots,
b_{0,1,\dots,1}=b_{2k-1},\dots, \\
b_{0,0,\dots,2,0}=b_{N-2},\quad b_{0,0,\dots,1,1}=b_{N-1},\quad
b_{0,0,\dots,0,2}=b_N, \\
q_{2,0,\dots,0}=q_1,\quad q_{1,1,\dots,0}=q_2,\quad
q_{1,0,\dots,1}=q_k, \\
q_{0,2,\dots,0}=q_{k+1},\quad q_{0,1,1,\dots,0}=q_{k+2},\dots,
q_{0,1,\dots,1}=q_{2k-1},\dots, \\
q_{0,0,\dots,2,0}=q_{N-2},\quad q_{0,0,\dots,1,1}=q_{N-1},\quad
q_{0,0,\dots,0,2}=q_N.
\end{gather*}
Denote  $b=(b_1,b_2,\dots,b_N)^T$, $q=(q_1,q_2,\dots,q_N)^T$. The
left-hand and the right-hand sides of equality \eqref{2d3.4}
represent quadratic forms  with respect to
$x_1,x_2,\dots,x_k$. Equating coefficients corresponding to
products
 $x_1^{i_1}x_2^{i_2}\dots x_k^{i_k}$, we obtain the system of
linear equations with respect to
 $b_1, b_2, \dots, b_N$. This system has the form
\begin{equation}\label{2d3.7}
Rb=q,
\end{equation}
where $R=(r_{ij})_{i,j=1}^N$; elements $r_{ij}$ of the matrix $R$ can
be expressed via elements of the matrix $A$. System \eqref{2d3.7} has the
unique solution for any vector $q$ if and only if
\begin{equation}\label{2d3.8}
\det R\neq0.
\end{equation}
Let us show that condition \eqref{2d3.8} holds if inequalities \eqref{2d3.6} are
valid.  To do this, let us introduce new variable
$z=(z_1,\dots,z_k)^T$ by the linear transformation
 $x=Gz$ with a nonsingular matrix $G$ such that in new variables system \eqref{2d3.1*} has the form
\begin{equation}\label{2d3.9}
z(n+m)=Pz(n),
\end{equation}
where  $P=(p_{ij})_{i,j=1}^k$; $p_{ii}$ are the eigenvalues of
the matrix $A^m$,  $p_{i,i+1}$ are equal to 0 or 1, and all
other elements of the matrix $P$ are equal to zero. According to
\cite[Theorem 3.23]{El:05},  such transformation does exist. In
general case, if the matrix  $A^m$ has complex eigenvalues, the
variables  $z_1,\dots,z_k$ and elements of the matrix  $G$ are also
complex. Polynomials \eqref{2d3.2} and \eqref{2d3.3} have the
following forms in variables $z_1,z_2,\dots,z_k$:
\begin{equation}\label{2d3.10}
V(z)=\sum_{\substack{i_1+i_2+\dots+i_k=2, \\ i_j\ge 0
(j=1,\dots,k)}}c_{i_1,i_2,\dots,i_k}z_1^{i_1}z_2^{i_2}\dots
z_k^{i_k},
\end{equation}
\begin{equation}\label{2d3.11}
W(z)=\sum_{\substack{i_1+i_2+\dots+i_k=2, \\ i_j\ge 0
(j=1,\dots,k)}}d_{i_1,i_2,\dots,i_k}z_1^{i_1}z_2^{i_2}\dots
z_k^{i_k}.
\end{equation}
The quadratic form  $W(z)$ is real, hence in relation \eqref{2d3.11},
side by side with any nonreal summand
$d_{i_1,i_2,\dots,i_k}z_1^{i_1}z_2^{i_2}\dots z_k^{i_k}$ there is
the summand
  $d_{i_1^*,i_2^*,\dots,i_k^*}z_1^{i_1^*}z_2^{i_2^*}\dots z_k^{i_k^*}$
such that
$$
d_{i_1^*,i_2^*,\dots,i_k^*}z_1^{i_1^*}z_2^{i_2^*}\dots
z_k^{i_k^*}= \overline{d}_{i_1,i_2,\dots,i_k}
\overline{z}_1^{i_1}\overline{z}_2^{i_2}\dots \overline{z}_k^{i_k}
$$
 where the over line means the complex conjugate symbol.
Enumerating $d_{i_1,\dots, i_k}$ and $c_{i_1,\dots, i_k}$  as follows
\begin{gather*}
d_{2,0,\dots,0}=d_1,\quad d_{1,1,\dots,0}=d_2,\quad
d_{1,0,\dots,1}=d_k,\\
d_{0,2,\dots,0}=d_{k+1},\quad d_{0,1,1,\dots,0}=d_{k+2},\dots,
d_{0,1,\dots,1}=d_{2k-1},\dots, \\
d_{0,0,\dots,2,0}=d_{N-2},\quad d_{0,0,\dots,1,1}=d_{N-1},\quad
d_{0,0,\dots,0,2}=d_N, \\
c_{2,0,\dots,0}=c_1,\quad c_{1,1,\dots,0}=c_2,\quad
c_{1,0,\dots,1}=c_k, \\
c_{0,2,\dots,0}=c_{k+1},\quad c_{0,1,1,\dots,0}=c_{k+2},\dots,
c_{0,1,\dots,1}=c_{2k-1},\dots, \\
c_{0,0,\dots,2,0}=c_{N-2},\quad c_{0,0,\dots,1,1}=c_{N-1},\quad
c_{0,0,\dots,0,2}=c_N,
\end{gather*}
and denoting $c=(c_1,\dots,c_N)^T$, $d=(d_1,\dots, d_N)^T$, let us
rewrite equality \eqref{2d3.4} in variables $z_1,\dots,z_k$:
\begin{equation}\label{2d3.12}
V(Pz)-V(z)=W(z).
\end{equation}
The left-hand and right-hand sides of equality  \eqref{2d3.12}
represent quadratic forms with respect to $z_1,\dots,z_k$.
 Equating the coefficients corresponding
to the products $z_1^2, z_1z_2,\dots,z_1z_k$,
$z_2^2$,$\dots,z_{k-1}z_k, z_k^2$, we obtain the system of linear
algebraic equations  with respect to $c_1,\dots, c_N$, which
we write in the matrix form
\begin{equation}\label{2d3.13}
Uc=d,
\end{equation}
where $U=(u_{ij})_{i,j=1}^N$.  The matrix $U$ has the triangular form
$$ {\footnotesize
U=
\begin{pmatrix}
p^2_{11}-1 & 0 & \dots & 0 & 0 & \dots  & 0 & 0 \\
2p_{11}p_{12} & p_{11}p_{22}-1 & \dots & 0 & 0 & \dots  & 0 & 0 \\
\dots & \dots & \dots & \dots & \dots & \dots & \dots & \dots \\
0 & 0 & \dots & p_{11}p_{kk}-1 & 0 & \dots  & 0 & 0 \\
p_{12}^2 & p_{12}p_{22} & \dots & 0 & p_{22}^2-1 & \dots  & 0 & 0 \\
\dots & \dots & \dots & \dots & \dots & \dots & \dots & \dots \\
0 & 0 & \dots & 0 & 0 & \dots & p_{k-1,k-1}p_{kk}-1 & 0\\
0 & 0 & \dots & 0 & 0 & \dots & p_{k-1,k}p_{kk} & p_{kk}^2-1
\end{pmatrix}
}$$
System \eqref{2d3.13} has a unique solution if and only if
$\det U\neq0$.
Taking into account that $u_{ij}=0$  for $j>i$, we obtain that
$\det U$
is equal to the product of diagonal elements of the matrix $U$:
$$
\det U=\prod_{i=1,2,\dots,k;
j=i,i+1,\dots,k}(p_{ii}p_{jj}-1).
$$
Bearing in mind that  $p_{ii}=\mu_i$ and returning in \eqref{2d3.12}
from variables $z_1,\dots,z_k$ to variables $x_1,\dots,x_k$ by means
of the transformation $z=G^{-1}x$, we obtain that a quadratic form
$V$ satisfying  \eqref{2d3.4} exists
and is unique if and only if $\mu_i\mu_j\neq1\quad(i,j=1,\dots,k)$.
The proof is complete.
\end{proof}

In the case $m=1$ we have the following corollary.

\begin{corollary} \label{coro3.1}
 If the eigenvalues $\lambda_1,\dots,\lambda_k$ of the matrix $A$
are such that
\begin{equation}\label{2d3.15*}
\lambda_i\lambda_j\neq1\quad(i=1,\dots,k;  j=1,\dots,k),
\end{equation}
then for any quadratic form \eqref{2d3.3} there exists the unique quadratic form
\eqref{2d3.2} such that
\begin{equation}\label{2d3.15**}
\Delta V=V(Ax)-V(x)=W(x)\,.
\end{equation}
\end{corollary}



\begin{theorem} \label{thm3.2}
If for some $m\in\mathbb{N}$, the roots $\mu_1,\dots,
\mu_k$ of characteristic equation \eqref{2d3.5} satisfy conditions
\begin{equation}\label{2d3.14}
|\mu_i|<1\quad(i=1,\dots,k),             %\eqno(3.14)
\end{equation}
then for any positive definite quadratic form $W(x)$ there exists
 the unique negative definite quadratic form $V(x)$ such that
$$
\Delta_mV(x)=W(x).
$$
\end{theorem}

\begin{proof}
According to \cite{El:05}, the sets $\{\mu_1,\mu_2,\dots,\mu_k\}$ and
$\{\lambda_1^m,\lambda_2^m,\dots,\lambda_k^m\}$ are identical, hence
from \eqref{2d3.14} it follows
\begin{equation}\label{2d3.15}
|\lambda_i|<1\quad(i=1,\dots,k).
\end{equation}
Let $W(x)$ be an arbitrary positive definite quadratic form.
If \eqref{2d3.14} holds, then \eqref{2d3.6} is valid. Therefore,
there exists a unique quadratic form $V(x)$ such that \eqref{2d3.4}
holds.
Let us show that $V(x)$ is negative definite. Suppose the contrary:
there is a nonzero $x^0$ such that $V(x^0)\ge0$. In this case,
we have that
$V(x^1)=V(A^mx^0)=V(x^0)+W(x^0)>0$, and according to
Theorem \ref{thm2.4},
the zero solution of system
\eqref{2d3.1} is unstable. But on the other hand, \eqref{2d3.15}
and Theorem \ref{thm1.1} imply that the zero
solution of system \eqref{2d3.1} is asymptotically stable.
The obtained contradiction completes the proof.
\end{proof}

\begin{theorem} \label{thm3.3}
 If for some $m\in\mathbb{N}$, the roots $\mu_1,\dots,
\mu_k$ of the characteristic equation \eqref{2d3.5} are such that
\begin{equation}\label{2d3.16}
\rho(A)>1
\end{equation}
and conditions \eqref{2d3.6} hold, then for any positive definite
quadratic form $W(x)$ there
exists a unique quadratic form $V(x)$ satisfying \eqref{2d3.4},
 and this form is not negative semidefinite (in particular,
 negative definite).
\end{theorem}

\begin{proof}
 Let $W(x)$ be a positive definite quadratic form. By virtue of
Theorem \ref{thm3.1}, there exists a unique quadratic form $V(x)$ which
satisfies  \eqref{2d3.4}. To complete the proof of Theorem \ref{thm3.3},
all we need is to show that
$V(x)$ can be neither negative definite nor negative semidefinite. If
$V(x)$ is negative definite, then by virtue of Theorem \ref{thm2.3}, the zero solution
of system \eqref{2d3.1} is asymptotically stable, and therefore $\rho(A)<1$, but it
contradicts to \eqref{2d3.16}. On the other hand, $V(x)$ cannot be negative semidefinite
no matter of values of $|\mu_i|$. To verify this, consider any solution of system
\eqref{2d3.1} with the initial condition $x^0\neq0$ vanishing $V$: $V(x^0)=0$.
Hence $V(A^mx^0)=W(x^0)>0$, but this contradicts
to its negative semidefiniteness. The obtained contradiction
completes the proof.
\end{proof}

\begin{remark} \label{rmk3.1} \rm
 Conditions \eqref{2d3.6} (or \eqref{2d3.15*} for $m=1$)
 in Theorem \ref{thm3.3} are essential because if at least
one of these conditions is not valid, then, in general,
Theorem \ref{thm3.3} is not true.
\end{remark}

To show this, let us consider the system $x(n+1)=Ax(n)$, where
 $A=\begin{pmatrix}3&0\\0&1\end{pmatrix}$.
Here $\rho(A)=3>1$; for all $m\in\mathbb{N}$ we have
$\mu_1=3^m, \mu_2=1$. Conditions \eqref{2d3.6}
are not satisfied because $\mu_2\cdot\mu_2=1$.
For {\bf any} quadratic form $V=ax_1^2+bx_1x_2+cx_2^2$
we obtain
$$
V(A^mx)-V(x)=a\bigl(3^{2m}-1\bigr)x_1^2+b\bigl(3^m-1\bigr)x_1x_2.
$$
This form cannot be positive definite; so there is no quadratic
form $V$ such that \eqref{2d3.4} holds.


Consider now the case when at least one of conditions \eqref{2d3.6}
is not satisfied but $\rho(A)>1$. Let us show that in this case
the zero solution of system \eqref{2d3.1} is also
unstable.

\begin{theorem} \label{thm3.4}
 If the matrix $A$ in system \eqref{2d3.1} is such that $\rho(A)>1$
and at least one of conditions \eqref{2d3.6} is not satisfied,
then for any positive definite quadratic form $W(x)$ there exists
a quadratic form $V(x)$ and positive
numbers $\alpha_1, \alpha_2$ such that $\Delta_m V=\alpha_1V+\alpha_2W$ holds,
and $V(x)$ is not negative semidefinite.
\end{theorem}

\begin{proof}
 Side by side with system \eqref{2d3.1}, let us consider the system
\begin{equation}\label{2d3.17}
x(n+1)=\alpha Ax(n)
\end{equation}
where $\alpha>0$. From system \eqref{2d3.17} we obtain
\begin{equation}\label{2d3.17*}
x(n+m)=\alpha^m A^mx(n).
\end{equation}
 The roots $\sigma_1,  \sigma_2, \dots, \sigma_k$ of its characteristic equation
$$
\det(\alpha^m A^m-\sigma I_k)=0
$$
continuously depend on $\alpha$, and for $\alpha=1$ they coincide with
the roots $\mu_1, \mu_2, \dots, \mu_k$ of the characteristic
equation \eqref{2d3.5} of system \eqref{2d3.1*}.
Moreover, there exist values of $\alpha$, close to the value $\alpha=1$
such that   $\sigma_i$ satisfy inequalities
$$
\sigma_i\sigma_j\neq1\quad (i,j=1,\dots,k)
$$
and $\rho(\alpha^m A^m)>1$. Let $W(x)$ be an arbitrary positive
definite quadratic form. According to
Theorem \ref{thm3.3}, there exists the unique quadratic form $V(x)$ such that
\begin{equation}\label{2d3.18}
\Delta_m V(x)\big|_{\eqref{2d3.17}}=V(\alpha^m A^mx)-V(x)=W(x),
\end{equation}
and $V(x)$ is not negative semidefinite. On the other hand,
it is easy to check that
\begin{equation}\label{2d3.19}
\begin{aligned}
\Delta_m V(x)\big|_{\eqref{2d3.17}}
&=(V(\alpha^m A^mx)-V(\alpha^m x))+(V(\alpha^m x)-V(x))\\
&= \alpha^{2m}\Delta_m V(x)\big|_{\eqref{2d3.1}}
+(\alpha^{2m}-1)V(x).
\end{aligned}
\end{equation}
Comparing \eqref{2d3.18} and \eqref{2d3.19} we obtain
$$
\Delta_m V(x)\big|_{\eqref{2d3.1}}=\alpha_1 V(x)+\alpha_2 W(x),
\quad\text{where}\quad \alpha_1=\frac{1-\alpha^{2m}}{\alpha^{2m}},
\alpha_2=\frac1{\alpha^{2m}}.
$$
Choosing $0<\alpha<1$ we have  $\alpha_1>0$, $\alpha_2>0$.
This completes the proof.
\end{proof}

So now we can formulate the well-known criterion of the instability
by linear approximation
(see for example  \cite{Ag:2000}) as the following corollary of
the above theorems.

\begin{corollary} \label{coro3.2}
 From Theorems \ref{thm2.5}, \ref{thm3.3}, and \ref{thm3.4}
 it follows that if $\rho(A)>1$,
then the trivial solution of system \eqref{2d3.1} is unstable.
\end{corollary}

\section{Critical case $\lambda=-1$}

In this section, we consider the critical case when one root of the
characteristic equation \eqref{2d1.7}  is equal to minus one; i.e.,
 we shall assume that \eqref{2d1.7} has one root
$\lambda_1=-1$, and other roots satisfy the conditions
$|\lambda_i| <1\quad (i=2, 3, \dots, k)$.
The function $X=(X_1,\dots,X_k)^T$ is supposed to be holomorphic,
and its expansion into Maclaurin series begins with terms of
the second order of smallness. So system
\eqref{2d1.5} takes the form
\begin{equation} \label{2d4.1}
\begin{split}
x_j(n+1)&=a_{j1}x_1(n)+a_{j2}x_2(n)+\dots+a_{jk}x_k(n)\\
&\quad +X_j(x_1(n),\dots,x_k(n))\quad
(j=1,\dots,k).
\end{split}
\end{equation}
Henceforth we shall consider the critical case when the
characteristic equation of the system of the first approximation
\begin{equation}\label{2d4.2}
x_j(n+1)=a_{j1}x_1(n)+a_{j2}x_2(n)+\dots+a_{jk}x_k(n)\quad
(j=1,\dots,k)
\end{equation}
has one root, equal to minus one, and other $k-1$ roots
which modules  are less then one.

 From \eqref{2d4.1} we obtain
\begin{equation} \label{2d4.1'}
\begin{split}
x_j(n+2)&=A_{j1}x_1(n)+A_{j2}x_2(n)+\dots+A_{jk}x_k(n)\\
&\quad +X^*_j(x_1(n),\dots,x_k(n))\quad
(j=1,\dots,k).
\end{split}
\end{equation}
Here $\mathcal A=(A_{ij})_{i,j=1}^k=A^2$ and
$X^*=(X^*_1,\dots,X^*_k)^T$ is a vector all of
whose components are power series in the components of $x$
lacking constant and first
degree terms and convergent for $\| x\|$ sufficiently small.
Let us introduce in system \eqref{2d4.2} the variable
$y$  instead of one variable $x_j$ by means of the substitution
\begin{equation}\label{2d4.3}
y=\beta_1x_1+\beta_2x_2+\dots+\beta_kx_k,
\end{equation}
where $\beta_j\quad(j=1,\dots,k)$ are some constants which
we choose such that
\begin{equation}\label{2d4.4}
y(n+1)=-y(n).
\end{equation}
 From \eqref{2d4.3} and  \eqref{2d4.4} we obtain
\begin{align*}
y(n+1)&= \beta_1 x_1(n+1)+\beta_2x_2(n+1)+\dots+\beta_kx_k(n+1)\\
&= \beta_1[a_{11}x_1(n)+a_{12}x_2(n)+\dots+a_{1k}x_k(n)]\\
&\quad +\beta_2[a_{21}x_1(n)+a_{22}x_2(n)+\dots+a_{2k}x_k(n)]+\dots\\
&\quad +\beta_k[a_{k1}x_1(n)+a_{k2}x_2(n)+\dots+a_{kk}x_k(n)]\\
&= -(\beta_1 x_1(n)+\beta_2x_2(n)+\dots+\beta_kx_k(n)).
\end{align*}
Equating the coefficients corresponding to $x_j(n)$ $(j=1,2,\dots,k)$,
we obtain the system of linear homogeneous algebraic equations
with respect to  $\beta_j$ ($j=1,\dots,k$):
\begin{equation}\label{2d4.5}
a_{1j}\beta_1+a_{2j}\beta_2+\dots+a_{kj}\beta_k=-\beta_j,
\end{equation}
or in the matrix form
$$
(A^T+I_k)\beta=0,
$$
where $\beta=(\beta_1,\dots,\beta_k)^T$. Since the equation
$\det(A^T+\lambda I_k)=0$ has the root
 $\lambda=-1$, the determinant of system  \eqref{2d4.5} is equal
to zero. Therefore
this system has a solution in which not all constants
$\beta_1,\dots,\beta_k$ are equal
to zero. To be definite, let us assume that $\beta_k\neq0$.
Then we can use the variable
$y$ instead of the variable $x_k$. Other variables
$x_j$ $(j=1,\dots,k-1)$ we preserve without change. Denoting
$$
\nu_{ji}=a_{ji}-\frac{\beta_i}{\beta_k}a_{jk}, \quad
\nu_j=\frac{a_{jk}}{\beta_k}\quad (i,j=1,2,\dots,k-1),
$$
we transform equations  \eqref{2d4.2} to the form
\begin{gather}
\begin{aligned}
x_j(n+1)&= \nu_{j1}
x_1(n)+\nu_{j2}x_2(n)+\dots+\nu_{j,k-1}x_{k-1}(n)+\nu_jy(n)      \\
& \quad(j=1,\dots,k-1),
\end{aligned} \label{2d4.6}\\
y(n+1)= -y(n),                   \label{2d4.7}                                      %\eqno(2.7)
\end{gather}
where $\nu_{ji}$ and  $\nu_j$ are constants.

The characteristic equation of system  \eqref{2d4.6} and
 \eqref{2d4.7} reduces to two equations:
$\lambda+1=0$
and
\begin{equation}\label{2d4.*}
\det(\Upsilon-\lambda I_{k-1})=0,
\end{equation}
where  $\Upsilon=(\nu_{ij})_{i,j=1}^{k-1}$.   Since a characteristic
equation is invariant with respect to linear transformations and
in this case has
$k-1$ roots, whose modules are less then one, then equation
 \eqref{2d4.*} has  $k-1$
roots, and modules of all these roots are less then one. Denote
\begin{equation}\label{2d4.9}
x_j=y_j+l_jy\quad(j=1,\dots,k-1),
\end{equation}
 where $l_j\quad(j=1,\dots,k-1)$ are constants which we choose such
that right-hand sides of system  \eqref{2d4.6} do not contain $y(n)$.
In this designations, taking into account
 \eqref{2d4.7}, system  \eqref{2d4.6} takes the form
\begin{align*}
y_j(n+1)&=\nu_{j1}
y_1(n)+\nu_{j2}y_2(n)+\dots+\nu_{j,k-1}y_{k-1}(n)\\
&\quad +[\nu_{j1} l_1+\nu_{j2}l_2 +\dots+(\nu_{jj}-1)l_j+\dots
 +\nu_{j,k-1}l_{k-1}+\nu_j]y(n),
\end{align*}
($j=1,\dots,k-1$).
We choose constants $l_j$ such that
\begin{equation}\label{2d4.10}
\nu_{j1}
l_1+\nu_{j2}l_2+\dots+(\nu_{jj}+1)l_j+\dots+\nu_{j,k-1}l_{k-1}
=-\nu_j\quad(j=1,\dots,k-1).
\end{equation}
Minus one is not a root of the characteristic equation  \eqref{2d4.*},
 hence the determinant
of system  \eqref{2d4.10} is not equal to zero, therefore this
system has the unique solution
$(l_1,\dots,l_{k-1})$. As a result of  change  \eqref{2d4.9},
system  \eqref{2d4.6} and \eqref{2d4.7} transforms to the form
\begin{equation} \label{2d4.13*}
\begin{gathered}
\begin{aligned}
y_j(n+1)&= \nu_{j1} y_1(n)+\nu_{j2}y_2(n)+\dots
+\nu_{j,k-1}y_{k-1}(n)\\
&\quad(j=1,\dots,k-1),
\end{aligned} \\
y(n+1)= -y(n),
\end{gathered}
\end{equation}
and nonlinear system  \eqref{2d4.1} takes the form
\begin{equation} \label{2d4.11}
\begin{gathered}
\begin{aligned}
y_j(n+1)&= \nu_{j1} y_1(n)+\nu_{j2}y_2(n)+\dots+\nu_{j,k-1}y_{k-1}(n) \\
&\quad +  \Psi_j(y_1(n),\dots,y_{k-1}(n),y(n))
\quad(j=1,\dots,k-1),
\end{aligned} \\
y(n+1)= -y(n)+\Psi(y_1(n),\dots,y_{k-1}(n),y(n)),
\end{gathered}
\end{equation}
where $\Psi_j\quad(j=1,\dots,k-1)$ and $\Psi$ are holomorphic
functions of  $y_1,\dots,y_{k-1},y$ whose expansions
in power series lack constant and first degree terms:
\begin{gather*}
\Psi_j(y_1,y_2,\dots,y_{k-1},y)
= \sum_{i_1+i_2+\dots+i_{k-1}+i_k=2}^\infty
\psi^{(j)}_{i_1,i_2,\dots,i_{k-1},i_k} y_1^{i_1}y_2^{i_2}\dots
y_{k-1}^{i_{k-1}}y^{i_k} \\
(j=1,\dots,k-1),\\
\Psi(y_1,y_2,\dots,y_{k-1},y)
= \sum_{i_1+i_2+\dots+i_{k-1}+i_k=2}^\infty
\psi_{i_1,i_2,\dots,i_{k-1},i_k}y_1^{i_1}y_2^{i_2}\dots
y_{k-1}^{i_{k-1}}y^{i_k}.
\end{gather*}
By  \eqref{2d4.9} it is clear that the problem of the stability of
the trivial solution of system   \eqref{2d4.1} is equivalent to the problem
of stability of the zero solution of system   \eqref{2d4.11}.
Further, form  \eqref{2d4.11} will be basic for the study of the stability
of the zero solution in the case when this problem can be solved by means of terms
of the first and second powers in expansions of $\Psi_j$
($j=1,\dots,k-1$) and $\Psi$.

 From equations  \eqref{2d4.11} we find
\begin{gather}
\begin{aligned}
y_j(n+2)&= c_{j1}y_1(n)+c_{j2}y_2(n)+\dots+c_{j,k-1}y_{k-1}(n)\\
&\quad + Y_j(y_1(n),\dots,y_{k-1}(n),y(n)) \quad
 (j=1,\dots,k-1),
\end{aligned} \label{2d4.6'}\\
y(n+2)= y(n)+Y(y_1(n),\dots,y_{k-1}(n),y(n)), \label{2d4.7'}
\end{gather}
where $c_{ij}=\sum_{s=1}^{k-1}\nu_{is}\nu_{sj}$;
$Y_j$ ($j=1,\dots,k-1$) and $Y$ are holomorphic functions of
$y_1,\dots$,$y_{k-1},y$ whose expansions
in power series lack constant and first degree terms:
\begin{gather*}
Y_j(y_1,y_2,\dots,y_{k-1},y)
= \sum_{i_1+i_2+\dots+i_{k-1}+i_k=2}^\infty
v^{(j)}_{i_1,i_2,\dots,i_{k-1},i_k} y_1^{i_1}y_2^{i_2}\dots
y_{k-1}^{i_{k-1}}y^{i_k} \\
 (j=1,\dots,k-1),\\
Y(y_1,y_2,\dots,y_{k-1},y) = \sum_{i_1+i_2+\dots+i_{k-1}+i_k=2}^\infty
v_{i_1,i_2,\dots,i_{k-1},i_k}y_1^{i_1}y_2^{i_2}\dots
y_{k-1}^{i_{k-1}}y^{i_k}.
\end{gather*}



\begin{theorem} \label{thm4.1}
If the function $Y$ is such that
the coefficient $v_{0,0,\dots,0,2}$ is not equal to zero, then
the solution
$$
y_1=0,\quad y_2=0,\quad \dots,\quad y_{k-1}=0,\quad y=0
$$
of system  \eqref{2d4.11} is unstable.
\end{theorem}

\begin{proof}  Let
$$
V_1(y_1,\dots,y_{k-1})=\sum_{s_1+s_2+\dots+s_{k-1}=2}B_{s_1,s_2,
\dots,s_{k-1}} y_1^{s_1}y_2^{s_2}\dots y_{k-1}^{s_{k-1}}
$$
be the quadratic form such that
\begin{equation} \label{2d4.12}
\begin{aligned}
\Delta_2V_1\big|_{\eqref{2d4.13*}}
&= V_1(c_{11}y_1+\dots +c_{1,k-1}y_{k-1},\dots,c_{k-1,1}y_1+\dots\\
&\quad +c_{k-1,k-1}y_{k-1}) -V_1(y_1,\dots,y_{k-1})\\
&=y_1^2+y_2^2+\dots+y_{k-1}^2.
\end{aligned}
\end{equation}
Since modules of all eigenvalues of matrix
$\mathcal C=(c_{ij})_{I,j=1}^{k-1}$
are less then one, then according to  \cite[Theorem 4.30]{El:05}
such quadratic form is unique and negative definite.
Consider the Lyapunov function
\begin{equation}\label{2d4.13}
V(y_1,\dots,y_{k-1},y)=V_1(y_1,\dots,y_{k-1})+\alpha y,
\end{equation}
where $\alpha=const$. Let us find $\Delta_2 V$:
\begin{align*}
\Delta_2 V\big|_{\eqref{2d4.11}}
&= \sum_{s_1+\dots+s_{k-1}=2}B_{s_1,\dots,s_{k-1}}
\{[c_{11}y_1+\dots+c_{1,k-1}y_{k-1}\\
&\quad +Y_1(y_1,\dots,y_{k-1},y)]^{s_1}\times \dots\times
[c_{k-1,1}y_1+\dots+c_{k-1,k-1}y_{k-1}\\
&\quad +Y_{k-1}(y_1,\dots,y_{k-1},y)]^{s_{k-1}}
-y_1^{s_1}\dots y_{k-1}^{s_{k-1}}\}+\alpha Y(y_1,\dots,y_{k-1},y).
\end{align*}
Taking into account  \eqref{2d4.12},  $\Delta_2 V$ can be written
in the form
$$
\Delta_2 V\big|_{  \eqref{2d4.11}   }=W(y_1,\dots,y_{k-1},y)
+W_*(y_1,\dots,y_{k-1},y),
$$
where
\begin{align*}
W&=(y_1^2+y_2^2+\dots+y_{k-1}^2)+\alpha v_{0,0,\dots,0,2}y^2\\
&\quad +\alpha(v_{2,0,\dots,0}y_1^2+v_{1,1,\dots,0}y_1y_2
  +\dots+v_{1,0,\dots,1,0}y_1y_{k-1}\\
&\quad +v_{1,0,\dots,0,1}y_1y+v_{0,2,\dots,0}y_2^2+\dots
 +v_{0,0,\dots,1,1}y_{k-1}y),
\end{align*}
and $W_*$ is a holomorphic function whose Maclaurin-series expansion
begins with terms of the third power in
$y_1,\dots$, $y_{k-1}$, $y$. We choose the sign of $\alpha$ such that
$\alpha v_{0,\dots,0,2}>0$. Let us show that  $\vert\alpha\vert$
can be chosen
so small that the quadratic form $W$ is positive definite. To do this,
let us show that  $\alpha$ can be chosen such that principal minors
of the matrix
{\footnotesize
$$
\begin{pmatrix}
1+\alpha v_{2,0,\dots,0} & \frac12\alpha v_{1,1,\dots,0} &
\frac12\alpha v_{1,0,1,\dots,0}
& \dots & \frac12\alpha v_{1,0,\dots,1,0} & \frac12\alpha v_{1,0,
 \dots,0,1}  \\
\frac12\alpha v_{1,1,\dots,0} & 1+\alpha v_{0,2,\dots,0} &
\frac12\alpha v_{0,1,1,\dots,0}
& \dots & \frac12\alpha v_{0,1,\dots,1,0} & \frac12\alpha v_{0,1,
 \dots,0,1}  \\
\frac12\alpha v_{1,0,1,\dots,0} & \frac12\alpha v_{0,1,1,\dots,0}
& 1+\alpha v_{0,0,2,\dots,0}
& \dots & \frac12\alpha v_{0,0,1,\dots,1,0} & \frac12\alpha v_{0,0,1,\dots,0,1}  \\
\dots & \dots & \dots & \dots & \dots & \dots  \\
\frac12\alpha v_{1,0,\dots,1,0} & \frac12\alpha v_{0,1,\dots,1,0}&
\frac12\alpha v_{0,0,1,\dots,1,0}&
 \dots & 1+\alpha v_{0,\dots,0,2,0} & \frac12\alpha v_{0,\dots,0,1,1}
\\
\frac12\alpha v_{1,0,\dots,0,1} & \frac12\alpha v_{0,1,\dots,0,1}
&\frac12\alpha v_{0,0,1,\dots,0,1} &\dots & \frac12\alpha
v_{0,0,\dots,1,1} & \frac12\alpha v_{0,0,\dots,0,2}
\end{pmatrix}
$$ }
are positive. In fact, any principal minor
$\Omega_s$  of this matrix is a continuous function of
$\alpha$: $\Omega_s=\Omega_s(\alpha)$. Note that
$\Omega_s(0)=1$
 for $s=1,2,\dots,k-1$. Thus there exists $\alpha_*>0$ such that
for $|\alpha|<\alpha_*$ we have
 $\Omega_s(\alpha)\ge\frac12\quad (s=1,2,\dots,k-1)$. Let us prove
that the inequality $\Omega_k>0$ holds for sufficiently small
$|\alpha|$.  To do this, let us expand $\Omega_k$ in terms of the
 elements of the last row. We obtain
$\Omega_k=\frac12\alpha
v_{0,0,\dots,0,2}\Omega_{k-1}+\alpha^2\Omega_*$ where  $\Omega_*$
is a polynomial with respect to  $\alpha$ and
$v_{i_1,i_2,\dots,i_k}$   $(i_1+i_2+\dots+i_k=2,   i_j\ge0)$.
Hence we have $\Omega_k>0$ for sufficiently small $|\alpha|$.
So for  $\alpha$ which absolute value is small enough and the sign
of which coincides with the sign of $v_{0,0,\dots,2}$, the quadratic
form $W$ is positive definite. Therefore the sum $W+W_*$  is also
positive definite in sufficiently small neighbourhood of the origin.
At the same time, the function $V$ of  form  \eqref{2d4.13}
is alternating.
Hence by virtue of Theorem \ref{thm2.4},  the zero solution of
system  \eqref{2d4.11} is unstable.
\end{proof}

\begin{remark} \label{rmk4.1} \rm
 It is impossible to construct a Lyapunov function $V$ such that
its first variation $\Delta_1V=\Delta V$ relative to system
\eqref{2d4.11}  is positive (or negative) definite, so we cannot
apply Theorem \ref{thmC} and have to apply Theorem \ref{thm2.4}
 for $m=2$.
\end{remark}

Thus in the case  $v_{0,0,\dots,2}\neq0$, the stability problem
has been solved
independently of the terms whose degrees are higher then two.
Consider now the case
$v_{0,0,\dots,2}=0$. We shall transform system  \eqref{2d4.11}
to the form where  $v^{(j)}_{0,0,\dots,2}=0$
($j=1,2,\dots,k-1$). Denote
\begin{equation}\label{2d4.14}
y_j=\xi_j+m_j y^2\quad(j=1,2,\dots,k-1),
\end{equation}
where  $m_j$ are constants. In these designations,
system  \eqref{2d4.11} has the form
\begin{equation}
\begin{aligned}
\xi_j(n+1)
&= \nu_{j1}\xi_1(n)+\nu_{j2}\xi_2(n)+\dots+\nu_{j,k-1}\xi_{k-1}(n)\\
&\quad +y^2(n)(\nu_{j1}m_1+\nu_{j2}m_2+\dots+\nu_{j,k-1}m_{k-1})\\
&\quad +\Psi_j(\xi_1(n)+m_1y^2(n),\dots,\xi_{k-1}(n)
 +m_{k-1}y^2(n),y(n))\\
&\quad -m_j\big[y^2(n)-2y(n)\Psi(\xi_1(n)+m_1y^2(n),\dots,\xi_{k-1}(n)\\
&\quad +m_{k-1}y^2(n),y(n))\\
&\quad +\Psi^2(\xi_1(n)+m_1y^2(n),\dots,\xi_{k-1}(n)
+m_{k-1}y^2(n),y(n))\big],
\end{aligned}\label{2d4.15}
\end{equation}
\begin{equation}\label{2d4.16}
y(n+1)=-y(n)+\Psi(\xi_1(n)+m_1y^2(n),\dots,\xi_{k-1}(n)
+m_{k-1}y^2(n),y(n)).
\end{equation}
Choose constants  $m_1,\dots,m_{k-1}$ such that the coefficients
corresponding to $y^2(n)$ in right-hand sides of system
 \eqref{2d4.15}, are equal to zero.

Equating to zero the corresponding coefficients, we obtain the
system of linear algebraic
equations with respect to  $m_1,\dots,m_{k-1}$:
$$
\nu_{j1}m_1+\nu_{j2}m_2+\dots+\nu_{j,k-1}m_{k-1}
=m_j-\psi^{(j)}_{0,0,\dots,2}\quad(j=1,2,\dots,k-1).
$$
This system has a unique solution because one is not an eigenvalue
of the matrix $\Upsilon$. Substituting the obtained values $m_1,\dots$,
$m_{k-1}$ to  \eqref{2d4.15} and
\eqref{2d4.16}, we obtain the system
\begin{gather}
\begin{aligned}
\xi_j(n+1)&= \nu_{j1}\xi_1(n)+\nu_{j2}\xi_2(n)+\dots+\nu_{j,k-1}
 \xi_{k-1}(n)\\
&\quad +\Phi_j(\xi_1(n),\dots,\xi_{k-1}(n),y(n))\quad
(j=1,\dots,k-1),\end{aligned}
\label{2d4.17}\\
y(n+1)= -y(n)+\Phi(\xi_1(n),\dots,\xi_{k-1}(n),y(n)), \label{2d4.18}
\end{gather}
where
\begin{align*}
\Phi_j(\xi_1,\dots,\xi_{k-1},y)
&= \Psi_j(\xi_1+m_1y^2,\dots,\xi_{k-1}+m_{k-1}y^2,y)\\
&\quad +2m_jy\Psi(\xi_1+m_1y^2,\dots,\xi_{k-1}+m_{k-1}y^2,y)\\
&\quad -m_j\Psi^2(\xi_1+m_1y^2,\dots,\xi_{k-1}
 +m_{k-1}y^2,y)-\psi^{(j)}_{0,0,\dots,2}y^2,
\end{align*}
\[
 \Phi(\xi_1,\dots,\xi_{k-1},y)= \Psi(\xi_1+m_1y^2,\dots,\xi_{k-1}
+m_{k-1}y^2,y).
\]

Expansions of $\Phi_j$ and $\Phi$ in power series begin with terms
of the second degree, and
coefficients corresponding to  $y^2$ in expansions of $\Phi_j$
 and $\Phi$ are equal to zero.
System  \eqref{2d4.17} and  \eqref{2d4.18} will be basic in our
further investigation
of the stability of the zero solution
\begin{equation}\label{2d4.19}
\xi_1=0, \quad  \xi_2=0, \quad  \dots,\quad   \xi_{k-1}=0,\quad   y=0.    %\eqno(2.19)
\end{equation}

Side by side with system \eqref{2d4.17} and \eqref{2d4.18}, let
 us consider the system
\begin{gather}
\begin{aligned}
\xi_j(n+2)&= c_{j1}\xi_1(n)+c_{j2}\xi_2(n)+\dots+c_{j,k-1}\xi_{k-1}(n)\\
&\quad +\Xi_j(\xi_1(n),\dots,\xi_{k-1}(n),y(n))\quad
(j=1,\dots,k-1),
\end{aligned}\label{2d4.17*}\\
y(n+2)= y(n)+Y_*(\xi_1(n),\dots,\xi_{k-1}(n),y(n)), \label{2d4.18*}
\end{gather}
where expansions of $\Xi_j$ and $Y_*$ in power series begin
with terms of the second degree, and expansions of $\Xi_j$
do not include terms corresponding to $y^2(n)$.

Denote  by $\Xi_j^{(0)}(y)$  ($j=1,\dots,k-1$) and
$Y_*^{(0)}(y)$  the sum of all terms in functions
$\Xi_j$ and $Y_*$ respectively, which do not include
$\xi_1,\dots,\xi_{k-1}$, so
\begin{gather*}
\Xi_j^{(0)}(y)=\Xi_j(0,\dots,0,y)=h_jy^3
 +\sum_{s=4}^{\infty}h_j^{(s)}y^s, \\
Y_*^{(0)}(y)=Y_*(0,\dots,0,y)=hy^3+\sum_{s=4}^{\infty}h^{(s)}y^s,
\end{gather*}
where   $h, h_j, h^{(s)}, h_j^{(s)}$ ($j=1,\dots,k-1$;
$s=4,5,\dots$) are constants.

\begin{theorem} \label{thm4.2}
 The solution \eqref{2d4.19} of system  \eqref{2d4.17} and
 \eqref{2d4.18} is asymptotically stable if $h<0$ and unstable if
 $h>0$.
\end{theorem}

\begin{proof}
 We shall show that there exists a Lyapunov function $V$ such that
it depends on $\xi_1,\dots,\xi_{k-1},y$, and    $\Delta_2 V$
is positive definite. Consider the system of
linear equations
\begin{equation}\label{2d4.20}
\xi_j(n+1)=\nu_{j1}\xi(n)+\nu_{j2}\xi_2(n)+\dots
+\nu_{j,k-1}\xi_{k-1}(n)\quad(j=1,\dots,k-1).
\end{equation}
Let
$W=\sum_{i_1+\dots+i_{k-1}=2}w_{i_1,\dots,i_{k-1}}\xi_1^{i_1}
\dots\xi_{k-1}^{i_{k-1}}$
be a quadratic form of variables $\xi_1, \dots, \xi_{k-1}$,
such that
\begin{equation}\label{2d4.21}
\Delta_2 W\big|_{ \eqref{2d4.20}}=\xi_1^2+\dots+\xi_{k-1}^2.
\end{equation}
Since all eigenvalues of the matrix $\Upsilon$ are inside of
the unit disk, the form $W$ satisfying  \eqref{2d4.21}, exists,
is unique and negative definite
\cite[Theorem 4.30]{El:05}.

If functions  $\Xi_j\quad(j=1,\dots,k-1)$ do not depend on $y$,
then the second variation  $\Delta_2 $ of the function $W$
along system    \eqref{2d4.17};
i.e., the expression
\begin{equation} \label{2d4.22}
\begin{aligned}
&\sum_{i_1+\dots+i_{k-1}=2}w_{i_1,\dots,i_{k-1}}
\big\{[c_{11}\xi_1+c_{12}\xi_2+\dots+
c_{1,k-1}\xi_{k-1}+\Xi_1]^{i_1}\dots \\
&[c_{k-1,1}\xi_1+\dots+c_{k-1,k-1}\xi_{k-1}+\Xi_{k-1}]^{i_{k-1}}
-\xi_1^{i_1} \dots\xi_{k-1}^{i_{k-1}}\big\}
\end{aligned}
\end{equation}
is a positive definite function on the variables
$\xi_1,\dots,\xi_{k-1}$ for $\xi_1,\dots,\xi_{k-1}$ sufficiently
small.

On the other hand, if the function $Y_*$ does not depend on
$\xi_1,\dots,\xi_{k-1}$
(i.e. if $Y_*=Y_*^{(0)}$), then the second variation $\Delta_2$
of the function $\frac12hy^2$ is equal to
\begin{equation}\label{2d4.23}
\Delta_2\big(\frac12hy^2\big)=\frac12h\left[2yY_*^{(0)}+
{Y_*^{(0)}}^2\right]=h^2y^4+hh^{(4)}y^5+o(y^5),
\end{equation}
and this variation is a positive definite function with respect
to  $y$ for sufficiently small $|y|$. Therefore, under these
conditions, the variation  $\Delta_2$ of the function
$V_1=\frac12hy^2+W(\xi_1,\dots,\xi_{k-1})$ along the total system
 \eqref{2d4.17} and  \eqref{2d4.18} is a positive definite function
of all variables   $\xi_1,\dots,\xi_{k-1},y$ in some neighbourhood
of the origin. Taking into account   \eqref{2d4.21} and  \eqref{2d4.23},
 this variation can be represented in the form
\begin{equation}\label{2d4.24}
(h^2+g_1)y^4+\xi_1^2+\dots+\xi_{k-1}^2
+\sum_{i,j=1}^{k-1}g_{ij}^{(1)}\xi_i\xi_j,
\end{equation}
where  $g_1$ is a holomorphic function of the variable  $y$, vanishing
for   $y=0$, and   $g_{ij}^{(1)}$ are holomorphic functions of variables
 $\xi_1,\dots,\xi_{k-1}$, vanishing for $\xi_1=\dots=\xi_{k-1}=0$.

But since the functions $\Xi_j\quad(j=1,\dots,k-1)$ include  $y$, and
the function   $Y_*$ includes
  $\xi_1,\dots,\xi_{k-1}$, the variation $\Delta_2$  of the function
 $V_1$  along system  \eqref{2d4.17} and  \eqref{2d4.18}, in general,
is not positive definite.
In this difference, there appear the terms breaking the positive
definiteness.

Note that expression \eqref{2d4.24} remains positive definite if
the function   $g_1$ includes not only the variable  $y$, but also
the variables $\xi_1,\dots,\xi_{k-1}$, and functions $g_{ij}^{(1)}$
include not only variables  $\xi_1,\dots,\xi_{k-1}$, but also the
variable   $y$. It is only important
the functions $g_1$ and $g_{ij}^{(1)}$ to vanish for
$\xi_1=\dots=\xi_{k-1}=y=0$. Taking into account this fact, let us
write the second variation of the function  $V_1$ along  \eqref{2d4.17}
and  \eqref{2d4.18} in the form
\begin{equation} \label{2d4.25}
\begin{aligned}
\Delta_2 V_1
&=\Delta_2\big(\frac12hy^2\big)+\Delta_2
W=hyY_*+\frac12hY_*^2\\
&\quad +\sum_{i_1+\dots+i_{k-1}=2}w_{i_1,\dots,i_{k-1}}\{[c_{11}\xi_1+c_{12}\xi_2+
\dots+c_{1,k-1}\xi_{k-1}+\Xi_1]^{i_1}\times\dots\\
&\quad \times
[c_{k-1,1}\xi_1+\dots+c_{k-1,k-1}\xi_{k-1}+\Xi_{k-1}]^{i_{k-1}}-\xi_1^{i_1}
\dots\xi_{k-1}^{i_{k-1}}\}\\
&=[h^2+g_1(\xi_1,\dots,\xi_{k-1},y)]y^4+\xi_1^2+\dots+\xi_{k-1}^2\\
&\quad +\sum_{i,j=1}^{k-1}
g_{ij}^{(1)}(\xi_1,\dots,\xi_{k-1},y)\xi_i\xi_j
+Q(\xi_1,\dots,\xi_{k-1},y),
\end{aligned}
\end{equation}
where functions  $g_1$ and  $g_{ij}^{(1)}\quad(i,j=1,\dots,k-1)$ vanish
for  $\xi_1=\dots=\xi_{k-1}=y=0$, and $Q$ is the sum of all
terms, which can be included neither to the expression
\begin{equation}\label{2d4.26}
g_1(\xi_1,\dots,\xi_{k-1},y)y^4,
\end{equation}
nor to the expression
\begin{equation}\label{2d4.27}
\sum_{i,j=1}^{k-1}g_{ij}^{(1)}(\xi_1,\dots,\xi_{k-1},y)\xi_i\xi_j.
\end{equation}
All terms which are included to the expression  $Q$, can be divided into next four
groups: the terms free of  $\xi_1,\dots,\xi_{k-1}$, the terms linear with respect to
$\xi_1,\dots,\xi_{k-1}$, the terms quadratic with respect to $\xi_1,\dots,\xi_{k-1}$,
and the terms having degree higher than two with respect to
$\xi_1,\dots,\xi_{k-1}$. It is evident that all terms of the last group can be
included into expression  \eqref{2d4.27}; therefore we shall
consider only first three
groups of terms.

All terms, free of   $\xi_1,\dots,\xi_{k-1}$, are obviously included
in expressions   \eqref{2d4.23} (where they have been written explicitly) and in
$\sum_{i_1+\dots+i_{k-1}=2}w_{i_1,\dots,i_{k-1}}{\Xi_1^{(0)}}^{i_1}
\dots{\Xi_{k-1}^{(0)}}^{i_{k-1}}$ (where there are summands of the sixth and higher
degrees with respect to  $y$). All these summands can be included into expression
 \eqref{2d4.26}. Hence the function   $Q$ does not include the terms, free of
 $\xi_1,\dots,\xi_{k-1}$.

Terms, linear with respect to  $\xi_1,\dots,\xi_{k-1}$, are included
into expression  \eqref{2d4.25} both by means of summands from
$hyY_*+\frac12hY_*^2$ and from  \eqref{2d4.22}. If these terms have
order not less than fourth with respect to $y$, then it is clear that
they can  be included into expression  \eqref{2d4.26}.
Thus the function $Q$ has only those terms, linear with respect
to $\xi_1,\dots,\xi_{k-1}$,
which have degrees two and three with respect to $y$.

Finally, consider the terms, quadratic with respect to
$\xi_1,\dots,\xi_{k-1}$. If these terms have the total degree
higher than two, then
they can be included into expression  \eqref{2d4.27} and therefore
they are not contained in the function  $Q$. All quadratic terms
with respect to  $\xi_1,\dots,\xi_{k-1}$ having the second degree
 (i.e. the terms with constant coefficients) are contained in the
expression
\begin{align*}
&\sum_{i_1+\dots+i_{k-1}=2}w_{i_1,\dots,i_{k-1}}\{[c_{11}\xi_1+c_{12}\xi_2+\dots+
c_{1,k-1}\xi_{k-1}]^{i_1}\times\dots\\
&\times [c_{k-1,1}\xi_1+\dots+c_{k-1,k-1}\xi_{k-1}]^{i_{k-1}}
 -\xi_1^{i_1}\dots\xi_{k-1}^{i_{k-1}}\}\\
&= \xi_1^2+\dots+\xi_{k-1}^2,
\end{align*}
and hence are not contained in the function $Q$.
Thus the function  $Q$ has the form
\begin{equation}\label{2d4.28}
Q=y^2Q_2(\xi_1,\dots,\xi_{k-1})+y^3Q_3(\xi_1,\dots,\xi_{k-1}),
\end{equation}
where $Q_2$ and $Q_3$ are linear forms with respect to
$\xi_1,\dots,\xi_{k-1}$:
$$
Q_2=q_1^{(2)}\xi_1+q_2^{(2)}\xi_2+\dots+q_{k-1}^{(2)}\xi_{k-1},\quad
Q_3=q_1^{(3)}\xi_1+q_2^{(3)}\xi_2+\dots+q_{k-1}^{(3)}\xi_{k-1}.
$$

The presence of summand  \eqref{2d4.28} in  \eqref{2d4.25}
breaks the positive
definiteness of $\Delta_2 V_1$. To get rid of the summand
$y^2Q_2(\xi_1,\dots,\xi_{k-1})$,  let us add the the summand
$y^2P_2(\xi_1,\dots,\xi_{k-1})=y^2(p_1^{(2)}\xi_1+p_2^{(2)}\xi_2+\dots+
p_{k-1}^{(2)}\xi_{k-1})$, to the function $V_1$. Here $p_j^{(2)}\quad (j=1,\dots,k-1)$
are constants. In other words, consider the function
\begin{equation}\label{2d4.29}
V_2=\frac12hy^2+W(\xi_1,\dots,\xi_{k-1})
+y^2P_2(\xi_1,\dots,\xi_{k-1})
\end{equation}
instead of the function $V_1$.
The term  $y^2P_2(\xi_1,\dots,\xi_{k-1})$ brings the following
summands to $\Delta_2V_1$:
\begin{align*}
&\Delta_2 (y^2P_2(\xi_1,\dots,\xi_{k-1}))\\
&= [y^2+2yY_*(\xi_1,\dots,\xi_{k-1},y)+Y_*^2(\xi_1,\dots,\xi_{k-1},y)]\\
&\times\sum_{j=1}^{k-1}p_j^{(2)}[c_{j,1}\xi_1+c_{j,2}\xi_2
 +\dots+c_{j,k-1}\xi_{k-1}+\Xi_{j}
(\xi_1,\dots,\xi_{k-1},y)]\\
&\quad -y^2[p_1^{(2)}\xi_1+p_2^{(2)}\xi_2+\dots+p_{k-1}^{(2)}\xi_{k-1}]\\
&= y^2\Big[\sum_{j=1}^{k-1}p_j^{(2)}(c_{j1}\xi_1+c_{j2}\xi_2
+\dots+c_{j,k-1}\xi_{k-1}-\xi_j)\Big]+G(\xi_1,\dots,\xi_{k-1},y).
\end{align*}
 Here the function $G$ is the sum of summands every of which can be
included either to expression   \eqref{2d4.26} or to   \eqref{2d4.27}.
Let us choose constants   $p_1^{(2)},\dots,p_{k-1}^{(2)}$ such that
the equality
\begin{equation}\label{2d4.30}
\sum_{j=1}^{k-1}p_j^{(2)}(c_{j1}\xi_1+c_{j2}\xi_2+\dots+c_{j,k-1}
\xi_{k-1}-\xi_j)
=-\sum_{j=1}^{k-1} q_j^{(2)}\xi_j
\end{equation}
holds.
To do this, let us equate the coefficients corresponding to
$\xi_j\quad(j=1,\dots,k-1)$
in the right-hand and left-hand sides
of equality  \eqref{2d4.30}. We obtain the system of linear
equations with respect to
  $p_j^{(2)}\quad(j=1,\dots,k-1)$:
\begin{equation}\label{2d4.31}
c_{1j}p_1^{(2)}+c_{2j}p_2^{(2)}+\dots+(c_{jj}-1)p_{j}^{(2)}
+\dots+c_{k-1,j}p_{k-1}^{(2)}=
-q_j^{(2)}\quad(j=1,\dots,k-1).
\end{equation}
The determinant of this system is not equal to zero because all
eigenvalues of $\mathcal C$
are inside the unit disk. Therefore system  \eqref{2d4.31} has
the unique solution.
Substituting the obtained values  $p_1^{(2)},\dots,p_{k-1}^{(2)}$
into the expression
$P_2(\xi_1,\dots,\xi_{k-1})$, we obtain
\begin{equation} \label{2d4.32}
\begin{aligned}
\Delta_2 V_2
&=[h^2+g_2(\xi_1,\dots,\xi_{k-1},y)]y^4
+(\xi_1^2+\dots+\xi_{k-1}^2)\\
&\quad +\sum_{i,j=1}^{k-1}
g_{ij}^{(2)}(\xi_1,\dots,\xi_{k-1},y)\xi_i\xi_j+y^3Q_3(\xi_1,
\dots,\xi_{k-1}),
\end{aligned}
\end{equation}
where  $g_2$ and  $g_{ij}^{(2)}$ are functions, vanishing for
$\xi_1=\xi_2=\dots=\xi_{k-1}=y=0$.

Similarly, one can show that it is possible to be rid of the
summand $y^3Q_3(\xi_1$, $\dots,\xi_{k-1})$  in expression
 \eqref{2d4.32}. To do this,
all we need is to add to the function  $V_2$ the summand
$$
y^3P_3(\xi_1,\dots,\xi_{k-1})
=y^3(p_1^{(3)}\xi_1+p_2^{(3)}\xi_2+\dots+
p_{k-1}^{(3)}\xi_{k-1}),
$$
where $p_j^{(3)}$ ($j=1,\dots,k-1$) are constants.
In other words, consider the function
\begin{equation}\label{2d4.33}
V=\frac12hy^2+W(\xi_1,\dots,\xi_{k-1})
+y^2P_2(\xi_1,\dots,\xi_{k-1})+y^3P_3(\xi_1,\dots,\xi_{k-1})
\end{equation}
instead of the function $V_2$.
Its difference $\Delta_2$ along system  \eqref{2d4.17} and
\eqref{2d4.18} is
\begin{equation} \label{2d4.34}
\begin{aligned}
\Delta_2 V&=[h^2+g(\xi_1,\dots,\xi_{k-1},y)]y^4+(\xi_1^2+\dots
+\xi_{k-1}^2)\\
&\quad +\sum_{i,j=1}^{k-1} g_{ij}(\xi_1,\dots,\xi_{k-1},y)\xi_i\xi_j,             %\eqno(2.34)
\end{aligned}
\end{equation}
where  $g$ and  $g_{ij}$ are functions vanishing for
$\xi_1=\xi_2=\dots=\xi_{k-1}=y=0$.

It follows from  \eqref{2d4.34} that $\Delta_2 V$ is positive definite
in sufficiently small neighbourhood of the origin, and the function $V$ of the form
  \eqref{2d4.33} is negative definite for $h<0$ and changes its sign for $h>0$. Hence according
to Theorems \ref{thm2.3} and \ref{thm2.4}, we can conclude that
the solution \eqref{2d4.19} of system
 \eqref{2d4.17} and  \eqref{2d4.18} is asymptotically stable for
$h<0$ and unstable for $h>0$.
This completes the proof.
\end{proof}


\begin{remark} \label{rmk4.2}  \rm
 Obviously, that substitutions  \eqref{2d4.3},
 \eqref{2d4.9}, and   \eqref{2d4.14} are such that the
investigation of the stability
of solution  \eqref{2d4.19} of system   \eqref{2d4.17} and
\eqref{2d4.18} is equivalent to the investigation of the stability
of the zero solution of system  \eqref{2d4.1}.
\end{remark}


\begin{remark} \label{rmk4.3}  \rm
In Theorems \ref{thm4.1} and \ref{thm4.2} there are conditions under which the
problem of the stability of the zero solution of system
\eqref{2d4.1} can be solved in the critical case when one
eigenvalue of the linearized system is equal to minus one. The
obtained criteria do not depend on nonlinear terms with degrees of
smallness more than three. If we obtain  $h=0$, then the stability
problem cannot be solved by terms of the first, second, and third
degrees of smallness in the expansions of the right-hand sides of
the system of difference equations. To solve this problem, it is
necessary to consider also the terms of higher degrees.
\end{remark}


\begin{thebibliography}{00}

\bibitem{Ag:2000}
 R. P. Agarwal.
\newblock {\em Difference Equations and Inequalities}.
\newblock Marcel Dekker, New York, 2nd edition, 2000.

\bibitem{AgORWo:07}
R. P. Agarwal, D. O'Regan, and P. J. Y. Wong.
\newblock Dynamics of epidemics in homogeneous/heterogeneous populations and
  the spreading of multiple inter-related infectious deseases: constant-sign
  periodic solutions for the discrete model.
\newblock {\em Nonlinear Anal. Real World Appl.}, 8(4):1040--1061, 2007.

\bibitem{Ah:06}
N. U. Ahmed.
\newblock {\em Dynamic Systems and Control with Applications}.
\newblock World Scientific, Singapore, 2006.

\bibitem{AnDi:00}
J. Angelova and A. Dishliev.
\newblock Optimization problems for one-impulsive models from population
  dynamics.
\newblock {\em Nonlinear Analysis}, 39:483--497, 2000.

\bibitem{AnDiNe:00}
J. Angelova, A. Dishliev, and S. Nenov.
\newblock Comparison of zero-solutions of systems {O}{D}{E} via asymptotical
  stability.
\newblock {\em Nonlinear Analysis}, 42:339--350, 2000.

\bibitem{BaSi:89}
D. D. Bainov and P. S. Simeonov.
\newblock {\em Systems with impulse effect: stability, theory and
  applications}.
\newblock Halsted Press, New York -- Chichester -- Brisbane -- Toronto, 1989.

\bibitem{BrCa:2001}
F. Brauer and C. Castillo-Chavez.
\newblock {\em Mathematical Models in Population Biology and Epide\-miology}.
\newblock Springer, New York, 2001.

\bibitem{Br:05}
E. Braverman.
\newblock On a discrete model of population dynamics with impulsive harvesting
  or recruitment.
\newblock {\em Nonlinear Analysis}, 63(5-7):e751--e759, 2005.

\bibitem{CaLi:97}
A. Cabada and E. Liz.
\newblock Discontinuous impulsive differential equations with nonlinear
  boundary conditions.
\newblock {\em Nonlinear Analysis}, 28:1491 -- 1497, 1997.

\bibitem{CaSiJu:06}
M. L. Castro and D. A. R. J. A. L. Silva, J. A. L. and D. A. R. Justo.
\newblock Stability in an age-structured meta\-population model.
\newblock {\em J. Math. Biol.}, 52(2):183--208, 2006.

\bibitem{ChBhHa:03}
V. Chellaboina, S. P. Bhat, and W. M. Haddad.
\newblock An invariance principle for nonlinear hybrid and impulsive dynamical
  systems.
\newblock {\em Nonlinear Analysis}, 53:527--550, 2003.

\bibitem{Ch:06}
F. Chen.
\newblock Permanence and global attractivity of a discrete multispecies
  {L}otka-{V}olterra competition predador-prey systems.
\newblock {\em Applied Mathematics and Computation}, 182(1):3--12, 2006.

\bibitem{CoFeLiMe:06}
R. Continho, B. Fernandez, R. Lima, and A. Meyroneinc.
\newblock Discrete time piecewise affine models of genetic regulatory networks.
\newblock {\em J. Math. Biol.}, 52(4):524--570, 2006.

\bibitem{CuFlRo:05}
P. Cull, M. Flahive, and R. Robson.
\newblock {\em Difference Equations. From Rabbits to Chaos}.
\newblock Springer, New York, 2005.

\bibitem{DO:02}
A. D'Onofrio.
\newblock Pulse vaccination strategy in the {S}{I}{R} epidemic model: Global
  asymptotic stable eradica\-tion in presence of vaccine failures.
\newblock {\em Mathematical and Computer Modelling}, 36:473 -- 489, 2002.

\bibitem{El:05}
S. Elaydi.
\newblock {\em An Introduction to Difference Equations}.
\newblock Springer, New York, 3rd edition, 2005.

\bibitem{El:08}
S. Elaydi.
\newblock {\em Discrete Chaos}.
\newblock Chapman \& Hall/CRC, Boca Raton, 2nd edition, 2008.

\bibitem{FrSe:03}
J. E. Franke and J. F. Selgrade.
\newblock Attractors for discrete periodic dynamical systems.
\newblock {\em J. Math. Anal. Appl.}, 286(1):64--79, 2003.

\bibitem{FrYa:06}
J. E. Franke and A. A. Yakubu.
\newblock Globally attracting attenuant versus resonant cycles in periodic
  compensatory leslie models.
\newblock {\em Mathematical Biosciences}, 204(1):1--20, 2006.

\bibitem{GeCh:98}
A. K. Gelig and A. N. Churilov.
\newblock {\em Stability and Oscillations of Nonlinear Pulse-Modulated
  Systems}.
\newblock Birkhauser, Boston, 1998.

\bibitem{GlIg:03}
R. I. Gladilina and A. O. Ignatyev.
\newblock On necessary and sufficient conditions for the asymptotic stability
  of impulsive systems.
\newblock {\em Ukrainian Mathematical Journal}, 55(8):1254--1264, 2003.

\bibitem{GlIg:04}
R. I. Gladilina and A. O. Ignatyev.
\newblock On the stability of periodic impulsive systems.
\newblock {\em Mathematical Notes}, 76(1):41--47, 2004.

\bibitem{GuLaCh:00}
Z.-H. Guan, J. Lam, and G. Chen.
\newblock On impulsive autoassociative neural networks.
\newblock {\em Neural Networks}, 13:63--69, 2000.

\bibitem{HaChNe:02}
W. M. Haddad, V. Chellaboina, and S. G. Nersesov.
\newblock Hybrid nonnegative and compartmental dynamical systems.
\newblock {\em Mathematical Problems in Engineering}, 8(6):493--515, 2002.

\bibitem{HaChNe:06}
W. M. Haddad, V. Chellaboina, and S. G. Nersesov.
\newblock {\em Impulsive and hybrid dynamical systems: stability,
  dissipativity, and control}.
\newblock Princeton University Press, Princeton, 2006.

\bibitem{IgIgSo:06}
A. O. Ignat'ev, O. A. Ignat'ev, and A. A. Soliman.
\newblock Asymptotic stability and instability of the solutions of systems with
  impulse action.
\newblock {\em Mathematical Notes}, 80(4):491--499, 2006.

\bibitem{Ig:03}
A. O. Ignatyev.
\newblock Method of {L}yapunov functions in problems of stability of solutions
  of systems of differential equations with impulse action.
\newblock {\em Sbornik: Mathematics}, 194(10):117--132, 2003.

\bibitem{Ig:04}
A. O. Ignatyev.
\newblock Stability of the zero solution of an almost periodic system of
  finite-difference equations.
\newblock {\em Differential Equations}, 40(1):98--103, 2004.

\bibitem{Ig:08}
A. O. Ignatyev.
\newblock Asymptotic stability and instability with respect to part of
  variables for solutions to impulsive systems.
\newblock {\em Siberian Mathematical Journal}, 49(1):102--108, 2008.

\bibitem{IgIg:06}
A. O. Ignatyev and O. A. Ignatyev.
\newblock On the stability in periodic and almost periodic difference systems.
\newblock {\em Journal of Mathematical Analysis and Applica\-tions},
  313(2):678--688, 2006.

\bibitem{IgIg:08}
A. O. Ignatyev and O. A. Ignatyev.
\newblock Investigation of the asymptotic stability of solutions of systems
  with impulse effect.
\newblock {\em International Journal of Mathematics, Game Theory and Algebra},
  17(3):141--164, 2008.

\bibitem{KePe:2000}
W. G. Kelley and A. C. Peterson.
\newblock {\em Difference equations: an introduction with applications}.
\newblock Academic Press, New York, 2nd edition, 2000.

\bibitem{LaTr:02}
V. Lakshmikantham and D. Trigiante.
\newblock {\em Theory of Difference Equations: Nume\-rical Methods and
  Applications}.
\newblock Marcel Dekker, New York, 2nd edition, 2002.

\bibitem{Ne:99}
S. I. Nenov.
\newblock Impulsive controllability and optimization problems in population
  dynamics.
\newblock {\em Nonlinear Analysis}, 36:881--890, 1999.

\bibitem{SaGr:05}
M. Sadkane and L. Grammont.
\newblock A note on the {L}yapunov stability of periodic discrete-time systems.
\newblock {\em Journal of Computational and Applied Mathematics}, 176:463--466,
  2005.

\bibitem{SaPe:95}
A. M. Samoilenko and N. A. Perestyuk.
\newblock {\em Impulsive differential equations}.
\newblock World Scientific, Singapure--New Jersey--London, 1995.

\bibitem{SmWa:04}
R. J. Smith and L. M. Wahl.
\newblock Distinct effects of protease and reverse transcriptase inhibition in
  an immunological model of {H}{I}{V}-1 infection with impulsive drug effects.
\newblock {\em Bulletin of Mathematical Biology}, 66:1259--1283, 2004.

\bibitem{SmWa:05}
R. J. Smith and L. M. Wahl.
\newblock Drug resistence in an immunological model of {H}{I}{V} - 1 infection
  with impulsive drug effects.
\newblock {\em Bulletin of Mathematical Biology}, 67:783--813, 2005.

\bibitem{TaCh:02}
S. Tang and R. A. Cheke.
\newblock Density-dependent birth rate, birth pulses and their population
  dynamic consequences.
\newblock {\em Journal of Mathematical Biology}, 44:185--199, 2002.

\bibitem{TaCh:05}
S. Tang and R. A. Cheke.
\newblock State-dependent impulsive models of integrated pest management
  ({I}{P}{M}) strategies and their consequences.
\newblock {\em Journal of Mathematical Biology}, 50:257--292, 2005.

\bibitem{ZhShWa:03}
X. Zhang, Z. Shuai, and K. Wang.
\newblock Optimal impulsive harvesting policy for single population.
\newblock {\em Nonlinear Analysis: Real World Applications}, 4:639--651, 2003.

\bibitem{ZhLiCh:03}
Y. Zhang, B. Liu, and L. Chen.
\newblock Extinction and permanence of a two-prey one-predator system with
  impulsive effect.
\newblock {\em Mathematical Medicine and Biology}, 20:309--325, 2003.

\bibitem{ZhLi:03}
Y. Zhou and H. Liu.
\newblock Stability of periodic solutions for an {SIS} model
with pulse  vaccination.
\newblock {\em Mathematical and Computer Modelling},
38:299 -- 308, 2003.

\end{thebibliography}

\end{document}

