\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2013 (2013), No. 50, pp. 1--13.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2013 Texas State University - San Marcos.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2013/50\hfil Uniqueness of solutions]
{Uniqueness of solutions to matrix equations\\ on time scales}

\author[A. H. Zaidi \hfil EJDE-2013/50\hfilneg]
{Atiya H. Zaidi}  % in alphabetical order

\address{Atiya H. Zaidi \newline
School of Mathematics and Statistics,
The University of New South Wales,
Sydney NSW 2052, Australia}
\email{a.zaidi@unsw.edu.au}

\thanks{Submitted November 14, 2012. Published February 18, 2013.}
\subjclass[2000]{34N05,  26E70, 39B42, 39A12}
\keywords{Matrix equations;  matrix dynamic equations;  \hfill\break\indent
first order dynamic equations; time scales}

\begin{abstract}
 In this article we establish the uniqueness of solutions to
 first-order matrix dynamic equations on time scales. These results
 extend the results presented in \cite{thesis}, to more complex systems
 of $n \times n$ matrices. Following the ideas in \cite[Chap 5]{BP}, we
 identify Lipschitz  conditions that are suitable for generalizing
 $n \times n$ models on time scales.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction}

The study of dynamic equations on time scales was initiated in 1988
by  Hilger when he introduced the concept and the calculus of unifying
mathematical analyses of continuous and discrete dynamics,
see \cite{Hilger,Hilger2}. Since then, several results have been developed
to complement his ideas to shape the linear and the nonlinear theory of
dynamic equations on time scales. These equations describe continuous,
discrete or both types of phenomena occurring simultaneously,
through a single model.

In \cite{TZ1} and \cite{thesis} we presented results regarding
non-multiplicity of solutions to nonlinear models of dimension $n$ on time scales.
In this work we use some of these notions to understand more
complex systems of dimension $n \times n$ for $n \ge 1$.
 Most physical processes that occur in nature, industry and society are
 nonlinear in structure and depend on several factors and their interactions.
Also, in real life problems, it may not be possible to change the initial
or prevailing states of a dynamic model as well as the natural
or circumstantial relationships of the variables involved.
 Knowing that a mathematical formulation of such a system with the given
initial conditions has either one solution or no solution would lead
to the guarantee that `existence' of a solution implies its uniqueness.

 This article considers two basic types of dynamic
initial-value problems (IVPs) of dimension $n \times n$.
These are:
\begin{equation}
X^\Delta = F(t, X);  \label{1}
\end{equation}
and
\begin{equation}
 X^\Delta = F(t, X^\sigma), \label{2}
\end{equation}
subject to the initial condition
\begin{equation}
X(a) = A. \label{1i}
\end{equation}

In the above systems, $X$ is a $n \times n$ matrix-valued function
on a time scale interval $[a,b]_\mathbb{T}:=[a,b] \cap \mathbb{T}$, where $b > a$
and $\mathbb{T}$ is a non-empty and closed subset of $\mathbb{R}$;
$F:[a,b]_\mathbb{T} \times \mathbb{R}^{n \times n} \to \mathbb{R}^{n \times n}$;
$X^\sigma = (x^\sigma_{ij})$ and $X^\Delta = (x^\Delta_{ij})$ for $1\le i,j \le n$;
and $A$ is a given constant $n \times n$ matrix.
A solution of \eqref{1}, \eqref{1i} (respectively \eqref{2}, \eqref{1i})
 will be a matrix-valued function $X$ which solves \eqref{1} and \eqref{1i}
(respectively \eqref{2}, \eqref{1i}) on $[a,b]_{\mathbb{T}}$.

Our main aim in this work is to derive conditions that would ensure that
there is either one or no solution to initial value problems
\eqref{1}, \eqref{1i} and \eqref{2}, \eqref{1i}. Our new results significantly
improve those in \cite{thesis} and present some novel ideas.

In the next section, we identify some basic concepts of the time scale
calculus associated with matrix-valued functions, used in this work.

\section{Preliminaries}\label{sec2}


The following definitions and descriptions explain how we use the time
scale notation within the set of $m \times n$ matrices on $\mathbb{T}$.
For more detail see
\cite{BS,BP,Hilger,KRP,thesis}.


\begin{definition} \label{def2.1}\rm
Let $\mathbb{T}$ be an arbitrary time scale and $t$ be a point in $\mathbb{T}$.
The forward jump operator, $\sigma(t): \mathbb{T} \to \mathbb{T}$, is defined
as $\sigma (t):= \inf [ s \in \mathbb{T}: s > t \rbrace$ for all $t \in \mathbb{T}$.
In a similar way, we define the backward jump operator,
$\rho(t): \mathbb{T} \to \mathbb{T}$, as $\rho (t):= \sup [ s \in \mathbb{T}: s < t \rbrace$ for all
$t \in \mathbb{T}$.
\end{definition}

In this way, the forward and backward (or right and left) jump operators
declare whether a point in a time scale is discrete and give the
direction of discreteness of the point. The results in this paper concern
the forward or rightward motion on $[a,b]_\mathbb{T}$. Hence, further notation
and definitions will be presented accordingly.

Continuity of a function at a point $t \in \mathbb{T}$ is said to be `right-dense'
when $t = \sigma(t)$, otherwise it is called right-scattered.
The `step size' at each point of a time scale is given by the graininess
function, $\mu(t)$, defined as $\mu(t):= \sigma(t) - t$ for all $t \in \mathbb{T}$.
 If $\mathbb{T}$ is discrete, it has a left-scattered maximum value $m$ and
we define $\mathbb{T}^{{\kappa}}:= \mathbb{T} \setminus m$, otherwise $\mathbb{T}^{{\kappa}} := \mathbb{T}$.

Analogous to left-Hilger-continuous functions \cite[p.3]{zaidi}
for any ordered $n$-pair $(t, \mathbf{x}) \in \mathbb{T} \times \mathbb{R}^n$,
we define a \emph{right-Hilger-continuous} function
$\mathbf{f}(t, \mathbf{x})$ \cite{Hilger}, \cite[Chap.2]{thesis}
as a function $\mathbf{f}: \mathbb{T}^\kappa \times \mathbb{R}^{n} \to \mathbb{R}^n$
having the property that $\mathbf{f}$ is continuous at each $(t,\mathbf{x})$
 where $t$ is right-dense; and the limits
$$
\lim_{(s,{\bf y}) \to (t^{-},\mathbf{x})} \mathbf{f}(s, {\bf y})
\quad \text{and} \quad \lim_{{\bf y} \to \mathbf{x}} \mathbf{f}(t,{\bf y})
$$
both exist and are finite at each $(t,\mathbf{x})$ where $t$ is left-dense.

It should be noted that $\mathbf{f}$ is \emph{rd-continuous}
 if $\mathbf{f}(t, \mathbf{x}) = {\bf g}(t)$ for all $t \in \mathbb{T}$ and
is \emph{continuous} if $\mathbf{f}(t,\mathbf{x}) = \mathbf{h}(\mathbf{x})$
for all $t \in \mathbb{T}$.

Continuity of a matrix-valued function at a point $t \in \mathbb{T}$ depends
on the continuity of its elements at $t$. Thus, for any $t \in \mathbb{T}$,
 a rd-continuous matrix-valued function is a function $X: \mathbb{T} \to \mathbb{R}^{m \times n}$
with entries $(x_{ij})$, where $x_{ij}:\mathbb{T} \to \mathbb{R}$;
$1\le i \le m$, $1 \le j \le n$; and each $x_{ij}$ is rd-continuous on $\mathbb{T}$.
Moreover, we say that $X \in C_{rd}=C_{rd}(\mathbb{T};\mathbb{R}^{m \times n})$ \cite[p.189]{BP}.

Thus, a right-Hilger-continuous matrix-valued function can be defined as follows.

\begin{definition}  \label{rhcont} \rm
Assume $F: \mathbb{T} \times \mathbb{R}^{m \times n} \to \mathbb{R}^{m \times n}$
be a matrix-valued function with entries $(f_{ij})$, where each
 $f_{ij}:\mathbb{T} \times \mathbb{R} \to \mathbb{R}$ for $1\le i \le m, 1 \le j \le n$.
We define $F$ to be right-Hilger-continuous if each
 $f_{ij}(t,x_{kl})$ is right-Hilger-continuous for all $t \in \mathbb{T}$
and $x_{kl}:\mathbb{T} \to \mathbb{R}$ for all $k,l$.
\end{definition}

For a fixed $t \in \mathbb{T}^{{\kappa}}$ and $x: \mathbb{T} \to \mathbb{R}$,
the delta-derivative of $x$ (if it exists) is $x^\Delta(t)$,
having the property that given $\epsilon > 0$ there is a neighbourhood
 $U$ of $t$, that is, $U = (t - \delta, t + \delta) \cap \mathbb{T}$
for some $\delta > 0$, such that
$$
|(x^\sigma(t) - x(s)) - x^\Delta (\sigma(t) - s)| \le \epsilon |\sigma(t) - s|,
\quad \text{for all }  s \in U.
$$
Hence, the delta-derivative of a matrix-valued function on a time
scale is defined as follows.


\begin{definition} \label{def2.3}\rm
Consider a function $X:\mathbb{T} \to \mathbb{R}^{m \times n}$.
We define $X^{\Delta}:= (x_{ij}^\Delta)$  to be the delta-derivative of
 $X$ on $\mathbb{T}$ if $x_{ij}^\Delta(t)$ exists for all $t \in \mathbb{T}^{\kappa}$
for  $1 \le i \le m, 1 \le j \le n$ and say that
$X$ is delta-differentiable on $\mathbb{T}$.
\end{definition}

The set of delta-differentiable matrix-valued functions
$K: \mathbb{T} \to \mathbb{R}^{m \times n}$ satisfy the simple useful formula
\cite[Theorem 5.2]{BP}
\begin{equation}
K^{\sigma}(t) = K(t) + \mu(t)K^{\Delta}(t),  \quad \text{for all }
 t \in \mathbb{T}^{\kappa}. \label{suf}
\end{equation}

The next theorem describes some more identities related to
delta-differentiable matrix-valued functions that will be used
in this work \cite[Theorem 5.3]{BP}.

\begin{theorem} \label{main} %(2.4)
Let $X,Y: \mathbb{T} \to \mathbb{R}^{n \times n}$ be matrix-valued functions.
If $X, Y$ are delta-differentiable on $\mathbb{T}$ then for all
$t \in \mathbb{T}^{\kappa}$ we have
\begin{enumerate}
\item $(X+Y)^\Delta(t) = X^\Delta(t) + Y^\Delta(t)$;
\item for any constant $k \in \mathbb{R}$, $(kX)^\Delta(t) = k X^\Delta(t)$;
\item $(XY)^\Delta(t) = [X^\Delta Y + X^\sigma Y^\Delta](t) = [X Y^\Delta + X^\Delta Y^\sigma](t)$;
\item If $X(t)$ and $X^\sigma(t)$ are invertible for all $t \in \mathbb{T}^{\kappa}$ then $$(X^{-1})^\Delta(t) = [-X^{-1} X^\Delta (X^\sigma)^{-1}](t) = [-(X^\sigma)^{-1} X^\Delta X^{-1}](t);$$
\item If $Y(t)$ and $Y^\sigma(t)$ are invertible for all $t \in \mathbb{T}^{\kappa}$ then $$[XY^{-1}]^\Delta(t) = [X^\Delta - X Y^{-1} Y^\Delta](t) (Y^\sigma(t))^{-1} = [X^\Delta - (XY^{-1})^\sigma Y^\Delta](t) Y^{-1}(t);$$
\item $(X^{*})^{\Delta} = (X^{\Delta})^{*}$, where $*$ refers to the conjugate transpose.
\end{enumerate}
\end{theorem}

Since all rd-continuous functions are delta-integrable, the antiderivative
of a right-Hilger-continuous matrix-valued function can be defined as follows:

\begin{theorem} \label{intf1}
Let $F: \mathbb{T}^{\kappa} \times \mathbb{R}^{n \times n} \to \mathbb{R}^{n \times n}$ and
$a \in \mathbb{T}$. If $F$ is right-Hilger-continuous on
$\mathbb{T}^{\kappa} \times \mathbb{R}^{n \times n}$ then there exists a function
$\mathcal{F}:C(\mathbb{T};\mathbb{R}^{n \times n}) \to C(\mathbb{T};\mathbb{R}^{n \times n})$ called the
 delta integral of $F$ such that
\begin{equation} \label{Hintegral}
[\mathcal{F}X](t):= \int_{a}^{t} F(s, X(s)) \, \Delta s, \quad \text{for all } t \in \mathbb{T}.
\end{equation}
\end{theorem}

Next, we describe positive definite (respectively semi-definite)
$n \times n$ matrices and some of their properties
 \cite{bhatia,Horn, KRP} on a time scale $\mathbb{T}$.
This class of square matrices on $\mathbb{T}$ plays a vital role in establishing
the non-multiplicity of solutions in this work.

\begin{definition} \label{posdef} \rm
Let $X:[a,b]_\mathbb{T} \to \mathbb{R}^{n \times n}$ and
$\mathbf{z}:\mathbb{T} \to \mathbb{R}^n$. Assume $\bf z \neq 0$ for all
$t \in [a,b]_\mathbb{T}$. We say that $X$ is positive definite
(respectively semi-definite) on $[a,b]_\mathbb{T}$ if $\mathbf{z}^TX\mathbf{z} > 0$
(respectively $\mathbf{z}^TX\mathbf{z} \ge 0$) on $[a,b]_\mathbb{T}$ and write $X > 0$
(respectively $X \ge 0$) on $[a,b]_\mathbb{T}$.
\end{definition}

It is clear from the above definition that a negative definite
(respectively semi-definite) matrix $Y$ on $\mathbb{T}$ will satisfy
$\mathbf{z}^{T}Y\mathbf{z} < 0$ (respectively $\mathbf{z}^{T}Y\mathbf{z} \le 0$)
for all $\mathbf{z}:\mathbb{T} \to \mathbb{R}^n$ and we say that $Y < 0$
(respectively $Y \le 0$).

The class of positive definite matrices defined above has the following
 properties.

\begin{theorem} \label{propposdef}
Let $A, B:[a,b]_\mathbb{T} \to \mathbb{R}^{n \times n}$. If $A, B > 0$ on $[a,b]_\mathbb{T}$
then the following properties hold on $[a,b]_\mathbb{T}$:
\begin{enumerate}
\item $A$ is invertible and $A^{-1} > 0$;
\item if $\alpha \in \mathbb{R}$ such that $\alpha > 0$ then $\alpha A > 0$;
\item if $\lambda$ is an eigenvalue of $A$ then $\lambda > 0$;
\item $\det(A) > 0$ and $\operatorname{tr}(A)>0$.
\item $A+B > 0$, $ABA > 0$ and $BAB > 0$;
\item if $A$ and $B$ commute then $AB > 0$ and similarly, if there exists
 $\mathcal{C} \le 0$ such that $A$ and $\mathcal{C}$ commute then $AC \le 0$;
\item if $A - B \ge 0$ then $A \ge B$ and $B^{-1} \ge A^{-1} > 0$;
\item there exists $\beta > 0$ such that $A > \beta I$.
\end{enumerate}
\end{theorem}

 From now onwards we will write `matrix-valued functions' simply
as `matrix functions'.

The regressiveness of $n \times n$ matrix functions and their properties
is described \cite{BP} in a similar manner as for regressive $n$-functions,
 as follows.

\begin{definition} \label{def2.8}\rm
Consider a function $K:\mathbb{T} \to \mathbb{R}^{n \times n}$. We call $K$ regressive
on $\mathbb{T}$ if the following conditions hold:
\begin{itemize}
\item $K$ is rd-continuous on $\mathbb{T}$; and
\item the matrix $I+\mu(t)K$ is invertible for all
$t \in \mathbb{T}^{\kappa}$, where $I$ is the identity matrix.
\end{itemize}
We denote by $ \mathcal{R}:= \mathcal{R}(\mathbb{T};\mathbb{R}^{n \times n})$ the set of all
 regressive $n \times n$ matrix functions on $\mathbb{T}$.
\end{definition}

It is clear from above that all positive and negative definite
 matrix functions on $\mathbb{T}$ are regressive.
The following theorem \cite[pp. 191-192]{BP} lists some important
properties of regressive $n \times n$ matrix functions on $\mathbb{T}$.

\begin{theorem} \label{propoplus}
Let $A,B:\mathbb{T} \to \mathbb{R}^{n \times n}$. If $A,B \in \mathcal{R}$ then the following
identities hold for all $t \in \mathbb{T}^{\kappa}$:
\begin{enumerate}
\item $(A \oplus B)(t) = A(t) + B(t) + \mu(t)A(t)B(t)$;
\item $(\ominus A)(t) = -[I+\mu(t)A(t)]^{-1} A(t) = -A(t)[I+\mu(t)A(t)]^{-1}$;
\item $A^{*} \in \mathcal{R}$ and $(\ominus A)^{*}=\ominus A^{*}$;
\item $I+\mu(t)(A(t)\oplus B(t)) = [I+\mu(t)A(t)][I+\mu(t)B(t)]$;
\item $I+\mu(t)(\ominus A(t))= [I+\mu(t)A(t)]^{-1}$;
\item $(A \ominus B)(t) = (A \oplus (\ominus B))(t) = A(t)-[I+\mu(t)A(t)][I+\mu(t)B(t)]^{-1} B(t)$;
\item $[A(t) \oplus B(t)]^{*} = A(t)^{*} \oplus B(t)^{*}$.
\end{enumerate}
\end{theorem}

An important implication of regressive matrices is the generalized matrix
exponential function on a time scale.

\begin{definition} \label{exp} \rm
Let $K:\mathbb{T} \to \mathbb{R}^{n \times n}$ be a matrix function.
Fix $a \in \mathbb{T}$ and assume $P \in \mathcal{R}$. The matrix exponential
function denoted by $e_{K}(\cdot, a)$ is defined as
\begin{equation} \label{e2.3}
e_{K}(t,a):= \begin{cases}
\exp \big(\int_{a}^{t} K(s) \ d s \big), & \text{for }  t \in \mathbb{T}, \; \mu = 0; \\
\exp \big(\int_{a}^{t} \frac{\log (I + \mu(s) K(s))}{\mu(s)} \,\Delta s \big),
 & \text{for }  t \in \mathbb{T},\; \mu > 0,
\end{cases}
\end{equation}
where $\text{Log}$ is the principal logarithm function.
\end{definition}

Further properties of the matrix exponential function \cite[Chap 5]{BP}
are shown in the following theorem and will be used in this work.

\begin{theorem} \label{propexp}
Let $K,L:\mathbb{T} \to \mathbb{R}^{n \times n}$. If $K,L \in \mathcal{R}$ then the
following properties hold for all $t,s,r \in \mathbb{T}$:
\begin{enumerate}
\item $e_{0}(t,s) = I = e_{K}(t,t)$, where $0$ is the $n \times n$ zero matrix;
\item $e_{K}^{\sigma}(t,s) = e_{K}(\sigma(t),s) = (I + \mu(t)K(t))e_{K}(t,s)$;
\item $e_{K}(s,t) = e_{K}^{-1}(t,s) = e_{\ominus K^*}^{*}(t,s)$;
\item $e_{K}(t,s) e_{K}(s,r) = e_{K}(t,r)$;
\item $e_{K}(t,s)e_{L}(t,s) = e_{K \oplus L}(t,s)$;
\item $e_{K}^{\Delta}(t,s) = -e_{K}^{\sigma}(t,s) K(t) = K(t) e_{K}(t,s)$.
\end{enumerate}
\end{theorem}

\section{Lipschitz continuity of matrix functions on $\mathbb{T}$} \label{sec3}

In this section, we present Lipschitz conditions for matrix functions
defined on a subset of $\mathbb{T} \times \mathbb{R}^{n \times n}$ that allow
positive definite matrices as Lipschitz constants for these functions.
The ideas are obtained from \cite{AL,EC,Hart,KP,thesis}.

\begin{definition} \label{leftsided} \rm
Let $S \subset \mathbb{R}^{n \times n}$ and $F: [a,b]_{\mathbb{T}} \times S \to \mathbb{R}^{n \times n}$
be a right-Hilger-continuous function. If there exists a positive
definite matrix $B$ on $\mathbb{T}$ such that for all $P, Q \in S$ with $P > Q$,
the inequality
\begin{equation} \label{LCOS}
F(t,P) - F(t,Q) \le B(t) (P-Q), \quad \text{for all }
 (t,P), (t,Q) \in [a,b]^{\kappa}_{\mathbb{T}} \times S
\end{equation}
holds, then we say $F$ satisfies a left-handed-Lipschitz condition
(or is left-handed Lipschitz continuous) on $[a,b]_{\mathbb{T}} \times S$.
\end{definition}

\begin{definition} \label{rightsided} \rm
Let $S \subset \mathbb{R}^{n \times n}$ and
$F: [a,b]_{\mathbb{T}} \times S \to \mathbb{R}^{n \times n}$ be a right-Hilger-continuous
function. If there exists a positive definite matrix $\mathcal{C}$ on $\mathbb{T}$
such that for all $P, Q \in S$ with $P > Q$, the inequality
\begin{equation} \label{LCOSright}
F(t,P) - F(t,Q) \le (P-Q) \mathcal{C}(t), \quad \text{for all }
 (t,P), (t,Q) \in [a,b]^{\kappa}_{\mathbb{T}} \times S
\end{equation}
holds, then we say $F$ satisfies a right-handed-Lipschitz condition
(or is right-handed Lipschitz continuous) on $[a,b]_{\mathbb{T}} \times S$.
\end{definition}

Classically, any value of matrix $B$ or $\mathcal{C}$ satisfying \eqref{LCOS}
or \eqref{LCOSright} would depend only on $[a,b]_{\mathbb{T}} \times S$
\cite[p.6]{Hart}. For the sake of simplicity, we consider
$[a,b]^{\kappa}_\mathbb{T} \times S$ to be convex and $F$ smooth on
 $[a,b]^{\kappa}_\mathbb{T} \times S$, then the following
theorem \cite[p.248]{EC}, \cite[Lemma 3.2.1]{AL} will be helpful to
identify a Lipschitz constant for $F$ on $[a,b]^{\kappa}_\mathbb{T} \times S$
and obtain a sufficient condition for $F$ to satisfy the left- or
right-handed Lipschitz condition on $[a,b]^{\kappa}_\mathbb{T} \times S$.

\begin{corollary} \label{exisLC}
Let $a, b \in \mathbb{T}$ with $b > a$ and $A \in \mathbb{R}^{n \times n}$.
Let $k>0$ be a real constant and consider a function $F$ defined
either on a rectangle
\begin{equation}
R^{\kappa}:= \{(t,P) \in [a,b]^{\kappa}_{\mathbb{T}} \times \mathbb{R}^{n \times n}:
\|P - A\| \le k \}
\end{equation}
or on an infinite strip
\begin{equation}
{\mathcal{S}^\kappa}:= \{(t,P) \in [a,b]^{{\kappa}}_{\mathbb{T}} \times \mathbb{R}^{n \times n}:
 \|P\| \le \infty\}
\end{equation}
If $\frac{\partial F(t, P)}{\partial p_{ij}}$ exists for
 $1 \le i,j \le n$ and is continuous on $R^{{\kappa}}$
(or $\mathcal{S}^{\kappa}$), and there is a positive definite matrix $L$
such that for all $(t, P) \in R^{{\kappa}}$ (or $\mathcal{S}^{\kappa}$), we have
\begin{equation} \label{parder}
\frac{\partial F(t, P)}{\partial p_{ij}} \le L, \quad \text{for all }
 i,j = 1,2,\cdots,
\end{equation}
then $F$ satisfies \eqref{LCOS} with $B(t) = L$ or \eqref{LCOSright}
with $\mathcal{C}(t) = L$, on $R^{{\kappa}}$ (or $\mathcal{S}^{\kappa}$)
for all $t \in [a,b]_{\mathbb{T}}$.
\end{corollary}

\begin{proof}
 The proof is similar to that of \cite[Lemma 3.2.1]{AL} except
that $\frac{\partial F(t, P)}{\partial p_{ij}}$ is considered bounded
 above by $B(t) = L$ in the left-handed case or $\mathcal{C}(t) = L$ in
the right-handed case, for all $t \in [a,b]_{\mathbb{T}}$.
\end{proof}

\section{non-multiplicity results}

In this section, we present generalized results regarding non-multiplicity
 of solutions to the dynamic IVPs \eqref{1}, \eqref{1i} and
\eqref{2}, \eqref{1i} within a domain $S \subseteq \mathbb{R}^{n \times n}$.
 The results are based on ideas in \cite[Chap 5]{BP},
 methods from ordinary differential equations
\cite{Birk,EC,KP} and matrix theory \cite{bhatia,Horn,pipes}.

The following lemma establishes conditions for a function to be a solution
of \eqref{1}, \eqref{1i} and \eqref{2}, \eqref{1i}.


\begin{lemma} \label{soldeqX}
Consider the dynamic IVP \eqref{1}, \eqref{1i}.
Let $F: [a,b]^{\kappa}_{\mathbb{T}} \times \mathbb{R}^{n \times n} \to \mathbb{R}^{n \times n}$
be a right-Hilger-continuous matrix-valued function.
Then a function $X$ solves \eqref{1}, \eqref{1i} if and only if it satisfies
\begin{equation}  \label{deqsolX}
X(t) = \int_{a}^{t} F(s, X(s)) \,\Delta s + A, \quad  \text{for all }
 t \in [a,b]_{\mathbb{T}},
\end{equation}
where $A$ is the initial value defined by \eqref{1i}.
\end{lemma}

Similarly, a function can be defined as solution of \eqref{2}, \eqref{1i}.

\begin{theorem} \label{exis1}
Let $S \subseteq \mathbb{R}^{n \times n}$ and let
$F:[a,b]_\mathbb{T} \times S \to \mathbb{R}^{n \times n}$ be a right-Hilger-continuous function.
If there exist $P, Q \in S$ with $P > Q$ and a positive definite matrix
 $B$ on ${\mathbb{T}}$ such that
\begin{itemize}
\item[(1)] $B \in C_{rd}([a,b]_{\mathbb{T}};\mathbb{R}^{n \times n})$;
\item[(2)] $e_{B}(t,a)$ commutes with $B(t)$ for all $t \in [a,b]_{\mathbb{T}}$
and with $P(t)$ for all $(t,P) \in [a,b]_{\mathbb{T}} \times S$;
\item[(3)] the left-handed Lipschitz condition, $F(t,P) - F(t,Q) \le B(t) (P-Q)$
holds for all $(t,P), (t,Q) \in [a,b]]^{\kappa}_\mathbb{T} \times S$,
\end{itemize}
then  \eqref{1}, \eqref{1i} has, at most, one solution, $X$, with
$X(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{theorem}

\begin{proof}
By contradiction, and without loss of generality, assume two solutions
$X, Y$ of \eqref{1}, \eqref{1i} in $S$ such that $X-Y \ge 0$
on $[a,b]_{\mathbb{T}}$, and show that $X \equiv Y$ on $[a,b]_{\mathbb{T}}$.

By Lemma \ref{soldeqX}, $X$ and $Y$ must satisfy \eqref{deqsolX}.
Define $U:= X-Y$ on $[a,b]_{\mathbb{T}}$. We show that $U \equiv 0$ on $[a,b]_{\mathbb{T}}$.

Since (3) holds, we have that for all $t \in [a,b]_{\mathbb{T}}^{\kappa}$,
\begin{equation} \label{LCOS2}
U^{\Delta}(t) - B(t) U(t) =  F(t, X(t)) - F(t, Y(t)) - B(t) (X(t) - Y(t)) \le 0.
\end{equation}
Note that $B$ being positive definite is regressive on $[a,b]_{\mathbb{T}}$.
Thus,  $e_{B}(t,a)$ and $e_{B}^{\sigma}(t,a)$ are positive definite with
positive definite inverses on $[a,b]_{\mathbb{T}}$, by Theorem \ref{propposdef}(1).
Hence, using Theorem \ref{main} and Theorem \ref{propexp} we obtain,
for all $t \in [a,b]_{\mathbb{T}}^{\kappa}$,
\begin{align*}
[e_{B}^{-1}(t,a) U(t)]^{\Delta}
&= [e_{B}^{-1}(t,a)]^{\sigma} U^{\Delta}(t) + [e_{B}^{-1}(t,a)]^{\Delta} U(t)\\
&= [e_{B}^{\sigma}(t,a)]^{-1} U^{\Delta}(t) - [e_{B}^{\sigma}(t,a)]^{-1}
  e_{B}^{\Delta}(t,a) e_{B}^{-1}(t,a) U(t)\\
&= [e_{B}^{\sigma}(t,a)]^{-1} [U^{\Delta}(t) - e_{B}^{\Delta}(t,a) e_{B}^{-1}(t,a) U(t)]\\
&= (e_{B}^{\sigma}(t,a))^{-1} [U^{\Delta}(t) - B(t) U(t)].
\end{align*}
By (2), $e_{B}^{-1}(t,a)$ also commutes with $B(t)$ for all
$t \in [a,b]_{\mathbb{T}}$ and with $P^{\Delta}(t)$ for all
$(t,P) \in [a,b]_{\mathbb{T}} \times S$. Thus,
$e_{B}^{-1}(t,a)$ commutes with $U^{\Delta}(t) - B(t)U(t)$ for all
$t \in [a,b]_{\mathbb{T}}$. Hence, by Theorem \ref{propposdef}(6)
and \eqref{LCOS2}, we obtain
$$
[e_{B}^{-1}(t,a) U(t)]^{\Delta} \le 0, \quad \text{for all }
 t \in [a,b]_{\mathbb{T}}^{\kappa}.
$$
This means that $e_{B}^{-1}(t,a) U(t)$ is non-increasing for all
$t \in [a,b]_{\mathbb{T}}$. But $U$ is positive semi-definite on $[a,b]_{\mathbb{T}}$
and $U(a)=0$. Hence, $U \equiv 0$ on $[a,b]_{\mathbb{T}}$.
This means that $X(t) = Y(t)$ for all $t \in [a,b]_{\mathbb{T}}$.

A similar argument holds for the case where $Y - X \ge 0$ on $[a,b]_\mathbb{T}$.
\end{proof}


\begin{corollary} \label{exis2}
The above theorem also holds if $F$ has continuous partial derivatives
with respect to the second argument and there exists a positive definite
matrix $L$ such that $\frac{\partial F(t,P)}{\partial p_{ij}} \le L$.
In that case, $F$ satisfies \eqref{LCOS} on $R^{\kappa}$ or $\mathcal{S}^{\kappa}$
 with $B:= L$ by Corollary \ref{exisLC}.
\end{corollary}

\begin{theorem} \label{exis-right}
Let $S \subseteq \mathbb{R}^{n \times n}$ and let $F:[a,b]_\mathbb{T} \times S \to \mathbb{R}^{n \times n}$ be a right-Hilger-continuous function. If there exist $P, Q \in S$ with $P > Q$ and a positive definite matrix $\mathcal{C}$ on ${\mathbb{T}}$ such that
\begin{itemize}
\item[(1)] $\mathcal{C} \in C_{rd}([a,b]_{\mathbb{T}};\mathbb{R}^{n \times n})$;
\item[(2)] $e_\mathcal{C}^{-1}(t,a)$ commutes with $\mathcal{C}(t)$ for all
 $t \in [a,b]_{\mathbb{T}}$ and with $P(t)$ for all $(t,P) \in [a,b]_{\mathbb{T}} \times S$;
\item[(3)] the right-handed Lipschitz condition,
 $F(t,P) - F(t,Q) \le (P-Q) \mathcal{C}(t)$ holds for all
 $(t,P), (t,Q) \in [a,b]]^{\kappa}_\mathbb{T} \times S$,
\end{itemize}
then the IVP \eqref{1}, \eqref{1i} has, at most, one solution,
 $X$, with $X(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{theorem}

The proof of the above theorem is similar to that of
Theorem \ref{exis1} and is omitted.

\begin{corollary} \label{exisright}
Theorem  \ref{exis-right} also holds if $F$ has continuous partial
derivatives with respect to the second argument and there exists a
positive definite matrix $H$ such that
$\frac{\partial F(t,P)}{\partial p_{ij}} \le H$.
In that case, $F$ satisfies \eqref{LCOSright} on $R^{\kappa}$
or $\mathcal{S}^{\kappa}$ with $\mathcal{C}:= H$ by Corollary \ref{exisLC}.
\end{corollary}

Our next two results are based on the, so called,
\textit{inverse Lipschitz condition}, in conjunction with
\eqref{LCOS} and \eqref{LCOSright} and determine the existence of
at most one solution for \eqref{1}, \eqref{1i} in the light of
Theorem \ref{propposdef}(7).

\begin{corollary} \label{exis4}
Let $S \subseteq \mathbb{R}^{n \times n}$ and
$F: [a,b]^{\kappa}_{\mathbb{T}} \times S \to \mathbb{R}^{n \times n}$
be right-Hilger-continuous. Assume there exists a positive definite
matrix $B$ on $\mathbb{T}$ such that conditions (1) and (2)of Theorem \ref{exis1} hold.
If $P(t)-Q(t)$ is positive definite and increasing for all
$(t,P),(t,Q) \in [a,b]_{\mathbb{T}} \times S$ and the inequality
\begin{equation} \label{LCOS4}
(P-Q)^{-1} \le (P^{\Delta} - Q^{\Delta})^{-1} B(t) , \quad \text{for all }
 (t,P), (t,Q) \in [a,b]]^{\kappa}_{\mathbb{T}} \times S
\end{equation}
holds, then the IVP \eqref{1}, \eqref{1i} has, at most, one solution $X$
with $X(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{corollary}

\begin{proof}
 If \eqref{LCOS4} holds then \eqref{LCOS} holds,
 by Theorem \ref{propposdef}(7). Hence, the IVP \eqref{1}, \eqref{1i} has,
at most, one solution by Theorem \ref{exis1}.
\end{proof}


\begin{corollary} \label{exis4right} %4.7
Let $S \subseteq \mathbb{R}^{n \times n}$ and
$F: [a,b]^{\kappa}_{\mathbb{T}} \times S \to \mathbb{R}^{n \times n}$
be right-Hilger-continuous. Assume there exists a positive definite
matrix $\mathcal{C}$ on $\mathbb{T}$ such that conditions $\emph{(1)}$ and $\emph{(2)}$
of Theorem \ref{exis-right} hold.  If $P(t)-Q(t)$ is positive definite
and increasing for all $(t,P), (t,Q) \in [a,b]_{\mathbb{T}} \times S$ and the inequality
\begin{equation} \label{LCOS4right}
(P-Q)^{-1} \le \mathcal{C}(t) (P^{\Delta} - Q^{\Delta})^{-1} , \quad \text{for all }
(t,P), (t,Q) \in [a,b]]^{\kappa}_{\mathbb{T}} \times S
\end{equation}
holds, then the IVP \eqref{1}, \eqref{1i} has, at most, one solution
$x$ with $x(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{corollary}

\begin{proof} If \eqref{LCOS4right} holds then \eqref{LCOSright} holds,
by Theorem \ref{propposdef}(7). Hence, the IVP \eqref{1}, \eqref{1i}
has, at most, one solution by Theorem \ref{exis-right}.
\end{proof}

We now present examples that illustrate the results presented above.


\begin{example} \label{examp4.8} \rm
Let $S:= \{P \in \mathbb{R}^{2 \times 2}: \operatorname{tr}(P^{T}P) \le 2\}$, where
 $P = \begin{pmatrix} p_{1} & -p_{2} \\ -p_{2} & p_{1} \end{pmatrix}$.
Consider the initial-value problem
\begin{equation} \label{ex1}
\begin{gathered}
X^\Delta = F(t, X) = \begin{pmatrix}
 1+x_{1}^{2} & t^{2}-x_{2} \\ x_{2} + t & t-x_{1} \end{pmatrix}, \quad
\text{for all }  t \in [0, b]^{\kappa}_{\mathbb{T}};\\
X(0) = I.
\end{gathered}
\end{equation}
We shall show that the conditions of Theorem \ref{exis1}
are satisfied for all $(t,P) \in [0, b]^{\kappa}_{\mathbb{T}} \times S$;
Then there is at most one solution, $X$, such that
$\operatorname{tr}(X^{T}X)\le 2$ for all $t \in [0, b]_{\mathbb{T}}$.

Note that for $P \in S$, we have $\sum_{j=1}^{2} p_{j}^{2} \le 1$.
Thus, $|p_{j}| \le 1$ for $j=1,2$. Let
$L:=  \begin{pmatrix} k & 0 \\ 0 & k \end{pmatrix}$, where $k \ge 2$, and
let $\mathbf{z} \in \mathbb{R}^2$ such that
$\mathbf{z} = \begin{pmatrix} x \\ y \end{pmatrix}$, where $x \neq 0$,
$y\neq 0$. Then
\begin{equation} \label{Lposdef}
\mathbf{z}^{T} L \mathbf{z} = k(x^{2} + y^{2}) > 0.
\end{equation}
Hence, $L$ is positive definite. We note that $F$ is right-Hilger-continuous
on $[0,1]^{\kappa}_\mathbb{T} \times S$ as all of its components are rd-continuous
on $[0, b]_\mathbb{T}$. Moreover, since $L$ is a diagonal matrix,
it commutes with $e_{L}(t,a)$ for all $t \in [0,b]_{\mathbb{T}}$.
It can be easily verified that $e_{L}(t,a)$ also commutes with $P$
for all $(t,P) \in [0,b]_{\mathbb{T}} \times S$.

We show that $F$ satisfies \eqref{LCOS} on $[0,b]^{\kappa}_\mathbb{T} \times S$.
Note that for all $t \in [0, b]^{\kappa}_{\mathbb{T}}$ and $P \in S$, we have
\[
\frac{\partial F}{\partial p_{1}}
= \begin{pmatrix} 2p_{1} & 0 \\ 0 & -1 \end{pmatrix}
 \quad \text{and} \quad
\frac{\partial F}{\partial p_{2}}
= \begin{pmatrix} 0 & -1 \\ 1  & 0 \end{pmatrix}.
\]
Then, we have
\[
\mathbf{z}^{T}\Big(L - \frac{\partial F}{\partial p_{1}} \Big) \mathbf{z}
= (k-2p_{1})x^{2}+(k+1)y^{2}
\ge (k-2)x^{2}+(k+1)y^{2},
\]
and
\[
\mathbf{z}^{T} \Big( L - \frac{\partial F}{\partial p_{2}}\Big) \mathbf{z}
= k(x^{2} + y^2).
\]
Therefore, $L - \frac{\partial F}{\partial p_{j}} > 0$ for $j =1, 2$.
Hence, by Theorem \ref{propposdef}(7),
$\frac{\partial F}{\partial p_{j}} < L$ for $j =1, 2$.
Using Corollary \ref{exis2}, condition  \eqref{LCOS} holds for
$L = \begin{pmatrix} k & 0 \\
0 & k \end{pmatrix}$ and all $k \ge 0$.
In this way, all conditions of Theorem \ref{exis1} are satisfied
and we conclude that our example has at most one solution,
 $X(t) \in S$, for all $t \in [0, b]_{\mathbb{T}}$.
\end{example}


\begin{example} \label{examp4.9}\rm
Let $u,w$ be differentiable functions on $(0, \infty)_{\mathbb{T}}$ with $u$
increasing and $u(t)>1$ for all $t \in (0, \infty)_{\mathbb{T}}$.
Let $D$ be the set of all $2 \times 2$ positive definite symmetric matrices.
We shall show that, for any matrix
 $P= \begin{pmatrix} 2u+t^{2} & w-t \\ w-t & 2u+t^{2} \end{pmatrix}$ in $D$,
there exists a matrix
$Q :=  \begin{pmatrix} u+t^{2} & w-t \\ w-t & u+t^{2} \end{pmatrix}$
also in $D$,
such that the dynamic IVP \eqref{1}, \eqref{1i} has at most one solution,
$X$, on $(0, \infty)_{\mathbb{T}}$ such that $X \in D$.
To do this we show that \eqref{1} satisfies the conditions of
Corollary \ref{exis4} for all
$(t,P), (t,Q) \in (0, \infty)^{\kappa}_{\mathbb{T}} \times D$.

Note that since $u,w$ are differentiable on $(0, \infty)_{\mathbb{T}}$,
 we have $P^{\Delta} = F(t,P)$ and $Q^{\Delta} = F(t,Q)$ right-Hilger-continuous
on $(0, \infty)^{\kappa}_{\mathbb{T}}$. We also note that
$P-Q =  \begin{pmatrix} u & 0 \\ 0 & u \end{pmatrix}$, which is
positive definite and, hence, invertible by Theorem \ref{propposdef}(1).
 Moreover, since $u^{\Delta},v^{\Delta} > 0$ on $(0, \infty)^{\kappa}_{\mathbb{T}}$,
we have $P^{\Delta} - Q^{\Delta} > 0$ and thus, invertible on
$(0, \infty)^{\kappa}_{\mathbb{T}}$.
Define
$$
B:=\begin{pmatrix} a(t) & b(t) \\ b(t) & a(t) \end{pmatrix}
$$
with $a(t)>b(t)$ for all $t \in (0, \infty)_{\mathbb{T}}$.
Then $B$ and any real symmetric matrix of the form $Q$ will
commute with $e_{B}(t,0)$, as there exists an orthogonal matrix
$M = \begin{pmatrix} 1 & 1 \\ -1 & 1 \end{pmatrix}$ such that
$M^{-1}BM$, $M^{-1}e_{B}(t,0)M$ and $M^{-1}QM$ are diagonal matrices
of their respective eigenvalues. Thus, the principal axes
of the associated quadric surface of $e_{B}(t,0)$ coincide with
the principal axes of the associated quadric surfaces of $B$
and $Q$ (see \cite[p.7]{pipes}).

Therefore, taking $a=u^{\Delta}$ and $b=0$, we obtain that for all
$t \in (0, \infty)_{\mathbb{T}}$, 
\[
(P-Q)^{-1} - (P^{\Delta} - Q^{\Delta})^{-1} B(t)
=  \begin{pmatrix} 1/u - 1 & 1 \\ -1 & 1/v - 1 \end{pmatrix}\,.
\]
Thus, for any non-zero vector 
$\mathbf{z}=  \begin{pmatrix} x \\ y \end{pmatrix}$,
 and all $t \in (0, \infty)_{\mathbb{T}}$, we have
\[
\mathbf{z}^{T} [(P-Q)^{-1} - (P^{\Delta} - Q^{\Delta})^{-1} B(t)] \mathbf{z}
 = \frac{1-u}{u} x^2 + \frac{1-v}{v} y^{2} < 0.
\]
Therefore, $(P-Q)^{-1} < (P^{\Delta} - Q^{\Delta})^{-1} B(t)$ for all
$t \in (0, \infty)_{\mathbb{T}}$ by Theorem \ref{propposdef}(7).
This completes all conditions of Corollary \ref{exis4} and we
conclude that \eqref{1}, \eqref{1i} has at most one positive definite
symmetric solution, $X$, on $(0, \infty)_{\mathbb{T}}$.
\end{example}

Our next result concerns the non-multiplicity of solutions to
the dynamic IVPs \eqref{2}, \eqref{1i} for which Theorem \ref{exis1}
or Corollary \ref{exis2} do not apply directly.
 However, we employ the regressiveness of a positive definite
matrix $B$ to prove the non-multiplicity of solutions to
the IVP \eqref{2}, \eqref{1i}, within a domain
$S \subseteq \mathbb{R}^{n \times n}$ by constructing a modified Lipschitz condition.

\begin{theorem} \label{exis3}
Let $S \subseteq \mathbb{R}^{n \times n}$ and let
$F:[a,b]_\mathbb{T} \times S \to \mathbb{R}^{n \times n}$ be a right-Hilger-continuous function.
If there exist $P, Q \in S$ with $P>Q$ on $[a,b]_{\mathbb{T}}$ and a
positive definite matrix $B$ on $\mathbb{T}$ such that
\begin{itemize}
\item[(1)] $B \in C_{rd}([a,b]_{\mathbb{T}};\mathbb{R}^{n \times n})$;
\item [(2)] $e_{B}(t,a)$ commutes with $B(t)$ for all $t \in [a,b]_{\mathbb{T}}$
and with $P(t)$ for all $(t,P) \in [a,b]_{\mathbb{T}} \times S$;
\item[(3)] the inequality
\begin{equation} \label{LCOS3}
F(t,P) - F(t,Q) \le - \ominus B(t) ( P - Q )
\end{equation}
holds for all $(t,P), (t,Q) \in [a,b]]^{\kappa}_\mathbb{T} \times S$,
\end{itemize}
then the IVP \eqref{2}, \eqref{1i} has at most one solution,
$X$, with $X(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{theorem}

\begin{proof}
 As before, we consider $X, Y \in S$ as two solutions of \eqref{2}, \eqref{1i}
and assume $X-Y \ge 0$ on $[a,b]_\mathbb{T}$. Let $W:= X - Y$.
We show that $W \equiv 0$ on $[a,b]_{\mathbb{T}}$, and so $X(t) = Y(t)$
for all $t \in [a,b]_{\mathbb{T}}$.

Since \eqref{LCOS3} holds, we have that for all 
$t \in [a,b]_{\mathbb{T}}^{\kappa}$,
\begin{equation}
W^{\Delta}(t) + \ominus B(t) W^{\sigma}(t) =  F(t, X^{\sigma}(t)) - F(t, Y^{\sigma}(t))
 + \ominus B(t)\ (X^{\sigma}(t) - Y^{\sigma}(t)) \le 0.
\end{equation}
Note that $I+\mu(t)B(t)$ is invertible for all $t \in [a,b]_\mathbb{T}$.
Then, by Theorem \ref{propoplus}{(2)}, the above inequality reduces to
\begin{equation}
W^{\Delta}(t) - [I + \mu(t)B(t)]^{-1} B(t) W^{\sigma}(t) \le 0,
\quad \text{for all }  t \in [a,b]_{\mathbb{T}} \label{ineq1}
\end{equation}
Also, $e_{B}(t,a)$ and $e_{B}^{\sigma}(t,a)$ are positive definite
and hence invertible on $[a,b]_\mathbb{T}$ and, thus, from Theorem \ref{propexp}{(2)},
\begin{equation}
W^{\Delta}(t) - e_{B}(t,a)(e_{B}^{\sigma}(t,a))^{-1} B(t)\ W^{\sigma}(t)
 \le 0, \quad \text{for all} \ t \in [a,b]_{\mathbb{T}}. \label{last}
\end{equation}
By (2), $e_{B}^{-1}(t,a)$ commutes with $B(t)$ and, so, $e_{B}(t,a)$
commutes with $e_{B}^{\sigma}(t,a)$, for all $t \in [a,b]_{\mathbb{T}}$.
Thus, $e_{B}^{-1}(t,a)$ commutes with $(e_{B}^{\sigma}(t,a))^{-1}$
for all $t \in [a,b]_{\mathbb{T}}$. We also see from $\emph{(2)}$ that
$e_{B}^{-1}(t,a)$ commutes with $P(t)$ for all $t \in [a,b]_{\mathbb{T}}$.
 Hence, $e_{B}^{-1}(t,a)$ commutes with $P^{\sigma}$ and $P^{\Delta}$ and, thus,
with $W^{\Delta}$ and $W^{\sigma}$ for all $t \in [a,b]_{\mathbb{T}}$.
Thus, rearranging inequality \eqref{last} and using
Theorem \ref{propposdef}(6) yields
\begin{equation} \label{ebw}
e_{B}^{-1}(t,a) W^{\Delta}(t) - (e_{B}^{\sigma}(t,a))^{-1} B(t)\ W^{\sigma}(t) \le 0,
 \quad \text{for all } t \in [a,b]^{\kappa}_{\mathbb{T}}.
\end{equation}

Hence, using properties of Theorem \ref{main},  Theorem \ref{propposdef}
and Theorem \ref{propexp} and  with \eqref{ebw},  we obtain that
for all $t \in [a,b]_\mathbb{T}$,
\begin{align*}
[e_{B}^{-1}(t,a) W(t)]^\Delta
&= e_{B}^{-1}(t,a) W^\Delta(t) + [e_{B}^{-1}(t,a)]^\Delta W^\sigma(t) \\
&\le e_{B}^{-1}(t,a) W^\Delta(t) - [e_{B}^{\sigma}(t,a)]^{-1} B(t) W^\sigma(t)
\le 0.
\end{align*}
Thus $e_{B}^{-1}(t,a) W(t)$ is non-increasing for all $t \in [a,b]_{\mathbb{T}}$.
Since $e_{B}^{-1}(t,a) > 0$ for all $t \in [a,b]_{\mathbb{T}}$ and $W(a) = 0$,
we have $W \equiv 0$ on $[a,b]_{\mathbb{T}}$. This means that $X(t) = Y(t)$
for all $t \in [a,b]_{\mathbb{T}}$.
\end{proof}


\begin{theorem} \label{exis3right}
Let $S \subseteq \mathbb{R}^{n \times n}$ and let
$F:[a,b]_\mathbb{T} \times S \to \mathbb{R}^{n \times n}$ be a right-Hilger-continuous
function. If there exist $P, Q \in S$ with $P > Q$ on $[a,b]_{\mathbb{T}}$
 and a positive definite matrix $\mathcal{C}$ on $\mathbb{T}$ such that
\begin{itemize}
\item[(1)] $\mathcal{C} \in C_{rd}([a,b]_{\mathbb{T}};\mathbb{R}^{n \times n})$;
\item [(2)] $e_{\mathcal{C}}(t,a)$ commutes with $\mathcal{C}(t)$ for all $t \in [a,b]_{\mathbb{T}}$ and with $P(t)$ for all $(t,P) \in [a,b]_{\mathbb{T}} \times S$;
\item[(3)]  the inequality
\begin{equation} \label{LCOS3right}
F(t,P) - F(t,Q) \le ( P - Q ) (-\ominus \mathcal{C}(t))
\end{equation}
holds for all $(t,P), (t,Q) \in [a,b]]^{\kappa}_\mathbb{T} \times S$,
\end{itemize}
then the IVP \eqref{2}, \eqref{1i} has at most one solution, $X$,
with $X(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{theorem}

 The proof of the above theorem is similar to that of Theorem \ref{exis3}
and is omitted.

\begin{corollary} \label{exis5}
Let $S \subseteq \mathbb{R}^{n \times n}$ and
$F: [a,b]^{\kappa}_{\mathbb{T}} \times S \to \mathbb{R}^{n \times n}$
be right-Hilger-continuous. Assume there exists a positive definite
matrix $B$ on $\mathbb{T}$ such that conditions (1) and (2) of
Theorem \ref{exis3} hold.  If $P-Q$ is positive definite and
increasing on $[a,b]_{\mathbb{T}}$ and the inequality
\begin{equation} \label{LCOS5}
(P-Q)^{-1} \le (P^{\Delta} - Q^{\Delta})^{-1} (- \ominus B) , \quad \text{for all }
  (t,P), (t,Q) \in [a,b]]^{\kappa}_{\mathbb{T}} \times S
\end{equation}
holds, then the IVP \eqref{2}, \eqref{1i} has at most one solution
$x$ with $x(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{corollary}

\begin{proof}
If \eqref{LCOS5} holds then \eqref{LCOS3} holds, by
Theorem \ref{propposdef}(7). Hence, the IVP \eqref{2}, \eqref{1i} has at most
 one solution by Theorem \ref{exis3}.
\end{proof}


\begin{corollary} \label{exis5right} %4.13
Let $S \subseteq \mathbb{R}^{n \times n}$ and
$F: [a,b]^{\kappa}_{\mathbb{T}} \times S \to \mathbb{R}^{n \times n}$
be right-Hilger-continuous. Assume there exists a positive definite
matrix $\mathcal{C}$ on $\mathbb{T}$ such that conditions (1) and (2) of
 Theorem \ref{exis3right} hold.  If $P-Q$ is positive definite and
increasing on $[a,b]_{\mathbb{T}}$ and the inequality
\begin{equation} \label{LCOS5right}
(P-Q)^{-1} \le  -\ominus \mathcal{C} (P^{\Delta} - Q^{\Delta})^{-1}  , \quad \text{for all }
 (t,P), (t,Q) \in [a,b]_{\mathbb{T}} \times S
\end{equation}
holds, then the IVP \eqref{2}, \eqref{1i} has at most one solution $x$
with $x(t) \in S$ for all $t \in [a,b]_{\mathbb{T}}$.
\end{corollary}

\begin{proof}
If \eqref{LCOS5right} holds then \eqref{LCOS3right} holds,
by Theorem \ref{propposdef}(7). Hence, the IVP \eqref{2}, \eqref{1i}
has at most one solution by Theorem \ref{exis3right}.
\end{proof}

We will present an example of a matrix dynamic equation that has a unique
solution. This is shown by
using Theorem \ref{exis3} and the following lemma \cite[Theorem 5.27]{BP}.

\begin{lemma} \label{linear}
Let $a,b \in \mathbb{T}$ with $b > a$ and $X:[a,b]_{\mathbb{T}} \to \mathbb{R}^{n \times n}$.
Consider the matrix initial value problem
\begin{equation}
\begin{gathered} \label{4.15}
X^{\Delta} = -V^{*}(t) X^{\sigma} + G(t), \quad 
 \text{for all }  t \in [a,b]_{\mathbb{T}}; \\
X(a) = A,
\end{gathered}
\end{equation}
where $G$ is a rd-continuous $n \times n$-matrix function
on $[a,b]_{\mathbb{T}}$. If $V:[a,b]_{\mathbb{T}} \to \mathbb{R}^{n \times n}$
is regressive then the above IVP has a unique solution
 $$
X(t) = e_{\ominus V^{*}}(t,a) A + \int_{a}^{t} e_{\ominus V^{*}}(t,s) G(s) \Delta s,
\quad \text{for all } t \in [a,b]_{\mathbb{T}}.
$$
\end{lemma}

\begin{example}  \label{examp4.15}\rm
Let $S$ be the set of all non-singular symmetric $n \times n$
matrices.
Let $K = a_{i}I$ for $1 \le i \le n$, where
 $a_{i} \in (0, \infty)_{\mathbb{T}}$.
 Consider the IVP
\begin{gather}
\begin{aligned}
X^\Delta &= F(t, X^{\sigma}) \\
&= -K(I+2\mu(t)K)^{-1} X^{\sigma}
+ e_{ \ominus K (I + 2\mu(t)K)^{-1}}(t,a), \quad
 \text{for all }  t \in [a, b]^{\kappa}_{\mathbb{T}};
\end{aligned} \label{ex2}\\
X(a) = I. \label{ex2i}
\end{gather}
We shall show that \eqref{ex2}, \eqref{ex2i} has at most one solution, $X$,
such that $X \in S$ for all $t \in [a, b]_{\mathbb{T}}$.

 We note that $K$ is a positive definite and diagonal matrix and hence
$I+\mu K$ is invertible on $[a,b]_{\mathbb{T}}^{\kappa}$ and commutes with $K$.
 Moreover, $-K(I+\mu K)^{-1}$ is also diagonal and, thus, commutes with $P$.
We also note that $F$ is right-Hilger-continuous on
$[a,b]_{\mathbb{T}} \times \mathbb{R}^{n \times n}$, as each of its components
is rd-continuous on $[a,b]_{\mathbb{T}}$. It follows from Theorem \ref{propoplus}(2)
that for all $t \in [a,b]_{\mathbb{T}}$,
\begin{align*}
&F(t,P) - F(t,Q) + \ominus 2K(P-Q) \\
&= [-K(I + 2\mu(t)K)^{-1}-2K(I + 2\mu(t)K)^{-1}] (P - Q) \\
&= -3K(I + 2\mu(t)K)^{-1}(P - Q) < 0,
\end{align*}
where we used Theorem \ref{propposdef}(6) in the last step.
Therefore, \eqref{LCOS3} holds for $B=2K$.
Hence,  \eqref{ex2}, \eqref{ex2i} has at most one solution
$X$ such that $X \in S$.
Moreover, by Lemma \ref{linear}, the non-singular
matrix function
$$
X(t) = e_{ \ominus K(I + 2\mu(t)K)^{-1}}(t,a) (1+ t-a)
$$
uniquely solves \eqref{ex2}, \eqref{ex2i} for all $t \in [a,b]_{\mathbb{T}}$.
\end{example}

\section*{Conclusions and future directions}

In this paper, we presented results identifying conditions
that guarantee that if the systems \eqref{1}, \eqref{1i} and
 \eqref{2}, \eqref{1i} have a solution then it is unique.
We did this by formulating suitable Lipschitz conditions
for matrix functions on time scales. The conditions will also
be helpful to determine the existence and uniqueness of solutions
to dynamic models of the form \eqref{1}, \eqref{1i}
and \eqref{2}, \eqref{1i} and of the higher order.
 The results will also be helpful to establish properties
of solutions for matrix-valued boundary value problems on time scales.


\subsection*{Acknowledgements}
The author is grateful to Dr. Chris Tisdell for his useful questions
and comments that helped to develop the ideas in this work.

\begin{thebibliography}{12}

\markboth{Taylor \& Francis and I.T. Consultant}
{Journal of Difference Equations and Applications}

\bibitem{AL} R. P. Agarwal,  V. Lakshmikantham;
 \emph{Uniqueness and nonuniqueness criteria for ordinary differential equations},
 World Scientific Publishing Co. Inc., River Edge, NJ, 1993.

\bibitem{BS} M. Berzig,  B. Samet;
\emph{Solving systems of nonlinear matrix equations involving Lipshitzian
 mappings}, Fixed Point Theory Appl. 89 (2011), 10, pp.1687-1812.

\bibitem{bhatia} R. Bhatia;
 \emph{Positive definite matrices}, Princeton Series in Applied Mathematics,
Princeton University Press, Princeton, NJ, 2007.

\bibitem{Birk} G. Birkhoff,  G. Rota;
\emph{Ordinary differential equations},
 Fourth Edition, John Wiley \& Sons Inc., New York, 1989.

\bibitem{BP} M. Bohner, A. Peterson;
\emph{Dynamic equations on time scales: An introduction with applications},
Birkh\"auser Boston Inc., Boston, MA, 2001.

\bibitem{EC} E. A. Coddington;
\emph{An introduction to ordinary differential equations},
 Prentice-Hall Mathematics Series, Prentice-Hall Inc.,
Englewood Cliffs, N.J., 1961.

\bibitem{Hart} P. Hartman;
\emph{Ordinary differential equations}, John Wiley \& Sons Inc.,
New York, 1964.

\bibitem{Hilger} S. Hilger;
\emph{Analysis on measure chains-a unified approach to continuous
and discrete calculus}, Results Math., Results in Mathematics,
18 (1990),  No. 1-2, pp. 18-56.

\bibitem{Hilger2} S. Hilger;
\emph{Differential and difference calculus--unified!},
 Proceedings of the {S}econd {W}orld {C}ongress of {N}onlinear
{A}nalysts, {P}art 5 ({A}thens, 1996), Nonlinear Analysis.
Theory, Methods \& Applications, 30 (1997), No. 5, pp 2683-2694.

\bibitem{Horn} R. A. Horn, C. R. Johnson;
\emph{Matrix analysis}, Cambridge University Press, Cambridge, 1990.

\bibitem{KP} W. Kelly, A. Peterson;
\emph{The theory of differential equations classical and qualitative},
Pearson Education, Inc., Upper Saddle River, NJ 07458, 2004.

\bibitem{pipes} L. A. Pipes;
\emph{Matrix methods for engineering}, Prentice-Hall Inc.,
Englewood Cliffs, NJ, 1963.

\bibitem{KRP} K. R. Prasad;
 \emph{Matrix Riccati differential equations on time scales},
Comm. Appl. Nonlinear Anal., 8 (2001), No 4, pp 63-75.

\bibitem{TZ1} C. C. Tisdell,  A. H. Zaidi;
\emph{Successive approximations to solutions of dynamic equations on time scales},
 Comm. Appl. Nonlinear Anal., 16 (2009), No. 1, pp. 61-87.

\bibitem{TZ} C. C. Tisdell,  A. Zaidi;
\emph{Basic qualitative and quantitative results for solutions to nonlinear,
 dynamic equations on time scales with an application to economic modelling},
 Nonlinear Analysis. Theory, Methods \& Applications, 68 (2008),
No. 11, pp. 3504-3524.

\bibitem{thesis} A. Zaidi;
 \emph{Existence and Uniqueness of solutions to nonlinear first order
dynamic equations on time scales}, Ph.D. thesis, University of
New South Wales, 2009.

\bibitem{zaidi} A. H. Zaidi;
\emph{Existence of solutions and convergence results for dynamic initial
 value problems using lower and upper solutions},
Electronic Journal of Differential Equations, 2009(2009), No. 161, pp. 1-13.

\end{thebibliography}


\end{document}

