\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2017 (2017), No. 263, pp. 1--13.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2017 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2017/263\hfil
 A simplified approach to Gronwall's inequality]
{A simplified approach to Gronwall's inequality on time scales with
applications to new bounds for solutions to linear dynamic equations}

\author[C. C. Tisdell, S. Meagher \hfil EJDE-2017/263\hfilneg]
{Christopher C. Tisdell, Stephen Meagher}

\address{Christopher C. Tisdell \newline
 School of Mathematics and Statistics,
The University of New South Wales, UNSW, 2052, Australia.\newline
YouTube:  www.youtube.com/DrChrisTisdell \newline
Facebook:  www.facebook.com/DrChrisTisdell.Edu \newline
Twitter: www.twitter.com/DrChrisTisdell
ORCiD orcid.org/0000-0002-3387-2505}
\email{cct@unsw.edu.au}

\address{Stephen Meagher \newline
 School of Mathematics and Statistics,
The University of New South Wales, UNSW, 2052, Australia. \newline
ORCiD orcid.org/0000-0003-3543-6392}
\email{s.meagher@unsw.edu.au}

\thanks{Submitted April 5, 2017. Published October 19, 2017.}
\subjclass[2010]{34N05, 26E70, 97I99, 97D99}
\keywords{Gronwall inequality; linear dynamic equations on time scales;
\hfill\break\indent  uniqueness of solutions;
 a priori bounds; taxicab distance}

\begin{abstract}
 The purpose of this work is to advance and simplify our understanding of
 some of the basic theory of linear dynamic equations and dynamic inequalities
 on time scales.

 Firstly, we revisit and simplify approaches to Gronwall's inequality on time scales.
 We provide new, simple and direct proofs that are accessible to those with only
 a basic understanding of calculus.

 Secondly, we apply the ideas to second and higher order linear dynamic equations
 on time scales.  Part of the novelty herein involves a strategic choice of metric,
 notably the taxicab metric, to produce {\em a priori} bounds on solutions.
 This choice of metric significantly simplifies usual approaches and extends ideas
 from the literature.

 Thirdly, we examine mathematical applications of the aforementioned bounds.
 We form results concerning the non-multiplicity of solutions to linear problems;
 and error estimates on solutions to initial value problems when the initial
 conditions are imprecisely known.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction}

For hundreds of years, second and higher order differential equations of
linear type have gained attention from mathematicians, engineers,
scientists and educators due to their simplicity and accessibility \cite{TisdellLin}.
  These equations take the form of an initial value problem, namely
\begin{gather}
 x^{(n)} + a_{n-1}(t)x^{(n-1)} + \dots + a_1(t)x'+ a_0(t)x =f(t),
 \label{O1} \\
  x^{(i)}(0)  =  b_i, \quad \text{for } i \in \{ 0,  \dots, n-1 \}.  \label{O2}
\end{gather}

Agnew makes the significance of \eqref{O1}, \eqref{O2} clear via the now
classic statement  that they ``are so important that many persons with few
 mathematical interests know enough about them to be able to use them
in the solution of problems''  Agnew \cite[p.95]{Agnew}.

As mathematical modelling has developed and matured, we have seen the rise of
linear difference equations in the modelling of discrete phenomena and also
as approximations to differential equations through numerical methods.
These equations can take the classical form
 \begin{gather}
\Delta^{(n)}x(t) + a_{n-1}(t)\Delta^{(n-1)}x(t) + \dots
 + a_1(t) \Delta x(t)+ a_0(t)x(t) =f(t),  \label{D1}  \\
\Delta^{(i)}x(0)  =  b_i, \quad  \text{for } i \in \{ 0,  \dots, n-1 \}. \label{D2}
\end{gather}
In the case of $q-$difference equations \cite[p.1487]{AT}, the ``dynamic"
 equation with $n=2$ looks like
\begin{equation}
\begin{gathered}
 D_h(D_h x)(t)+a_1(t)D_h x(t)+a_0(t)x(t)
=f(t), \quad t\in h^\mathbb{Z}, \quad h>1,   \\
\text{where } D_h y(t):=\frac{y(ht)-y(t)}{ht-t}.
\end{gathered} \label{Q1}
\end{equation}

In the past 20 years, or so, we have seen the birth and evolution of
``dynamic equations on time scales" \cite{BP,Tisdell-JIEA}.
The field of dynamic equations on time scales offers a mathematical
framework that encompasses differential equations and difference equations
simultaneously. Prototypical time scales are the set of real numbers
 (corresponding to differential equations) and the set of integers
(corresponding to difference equations).  This framework provides an opportunity
to simultaneously model continuous, discrete and hybrid processes.

Let $\mathbb{T}$ be a time scale (precise definitions will be presented in
Section \ref{Sec2}). The general problem of solving an $n$th order
linear ``dynamic" equation, with initial values $b_i \in \mathbb{R}$, is to find an
 $n$th order delta differentiable function $x : \mathbb{T} \to \mathbb{R}$ satisfying
\begin{gather}
 x^{\Delta^{(n)}} + a_{n-1}(t)x^{\Delta^{(n-1)}} + \dots + a_1(t)x^\Delta + a_0(t)x
 =f(t), \label{nde1} \\
 x^{\Delta^{(i)}}(0)  =  b_i, \quad \text{for } i \in \{ 0,  \dots, n-1 \}.
 \label{ic1}
\end{gather}
on some suitable interval.  Above, the $a_i : \mathbb{T}^{\kappa^i} \to \mathbb{R}$ and
$f : \mathbb{T}^{\kappa^i} \to \mathbb{R}$ are arbitrary functions, and $0 \in \mathbb{T}$.

Equations \eqref{nde1} and \eqref{ic1} simultaneously encompass:
 \eqref{O1}, \eqref{O2};
and \eqref{D1}, \eqref{D2}; plus many more ``in-between" and hybrid cases
such as \eqref{Q1}.

The purpose of this work is to advance and simplify our understanding of some
of the basic theory of linear dynamic equations and dynamic inequalities on
time scales, with Agnew's famous aforementioned quote taking on even more
important meaning for \eqref{nde1}, \eqref{ic1} given its wide-ranging and
flexible characteristics.

 Much work has been done generalising the basic inequalities found in
 Chapter 6 of Bohner and Peterson \cite{BP} (see \cite{ABP} and the introduction
of \cite{Saker} for a recent overview). There have also been various
generalisations to multi-variable situations
(see e.g. \cite{Anderson1,Anderson2}), and to situations involving delay
equations (see \cite{FZ} and the references therein for a recent overview).
However, unlike present article, none of these works provide such a simple
and direct approach as we do herein; nor do they prove an inequality where
the bounds depend on the classical real analysis exponential function \emph{alone},
and are therefore independent of the time scale. The inequalities and methods
that we show are striking in their simplicity and independence from the time scale.

Our work is organised as follows:

Section \ref{Sec2} briefly recalls some of the basic notation and concepts
from the field of time scales to keep this work  reasonably self contained.

In Section \ref{Sec3}, we revisit and simplify approaches to Gronwall's
inequality on time scales.  This fundamental inequality has opened up many
new directions for scientific investigation and mathematical research into
nonlinear problems, and continues to be a fruitful resource within the area
of time scales.  Several of our results out important and novel and complement
existing theorems and, in particular, provide new, simple and direct proofs
that are accessible to those with only a basic understanding of calculus.
Unlike more well--known approaches, the bounds that we obtain do not rely
on the exponential function on times scales, rather they involve  the exponential
function from classical real analysis. This means the bounds are independent of
the time scale itself and thus are easily calculable.
Our results are also timely in view of the upcoming centenary of Gronwall's
original results from 1919 \cite{Gron} for differential inequalities.

In Section \ref{Main} we analyse second and higher order linear dynamic equations
on time scales.  The novelty herein involves a strategic choice of metric,
notably the taxicab metric \cite{TisdellLin}, to produce {\em a priori} bounds
on solutions. This choice of metric significantly simplifies usual approaches
and extends ideas from the literature in the second and higher order cases.
Once again, these bounds are in terms of the classical exponential function
and so are easily accessible and computable by a wide audience.

Finally, in Section \ref{NM}, we look at mathematical applications of the
aforementioned bounds.  We form results concerning the non-multiplicity of
solutions to second and higher order problems; and error estimates on solutions
to initial value problems when the initial conditions are imprecisely known.
Once again, the methods involved are direct and accessible, and differ from
the existing literature by not relying on an understanding of matrix theory.

The present article is motivated by the recent works \cite{TisdellFrac}
and \cite{TisdellLin}, where new Gronwall-type results were derived in the
fractional integral operator setting; and the taxicab metric was applied
to obtain {\em a priori} bounds on linear, ordinary differential equations.


\section{Review of time scales} \label{Sec2}

We briefly recall some of the basic notation and concepts from the field of
time scales so that this work is reasonably self contained.  For more details
we refer the reader to the seminal work of Bohner and Peterson \cite{BP}.

A time scale $\mathbb{T}$ is a closed (and nonempty) subset of $\mathbb{R}$.
For each $t \in \mathbb{T}$, the forward jump operator $\sigma : \mathbb{T} \to \mathbb{R}$ is defined by
\[
\sigma(t) :=
\begin{cases} \inf \{ s \in \mathbb{T} \mid s > t \}, & \text{if $t$is not the maximum of
$\mathbb{T}$;} \\
t, & \text{ if $t$ is the maximum of $\mathbb{T}$.}
\end{cases}
\]
E.g. if $\mathbb{T} = \mathbb{R}$ then $\sigma(t) = t$, while if $\mathbb{T} = \mathbb{Z}$ then $\sigma(t) = t+1$.

We define the set $\mathbb{T}^\kappa$ to be $\mathbb{T}$ if $\mathbb{T}$ does not have a discrete
maximum,\footnote{In the time scale literature this is called a left-scattered
maximum, see below for a definition of left-scattered.}
otherwise $\mathbb{T}^\kappa$ is $\mathbb{T}$ with its discrete maximum removed.
Note that $\mathbb{T}^\kappa$ is itself a time scale.

A function $x : \mathbb{T} \to \mathbb{R}$ is delta differentiable if there is a function
$x^\Delta : \mathbb{T}^\kappa \to \mathbb{R}$ such that for each $t \in \mathbb{T}^\kappa$ and for each
$\epsilon > 0$ there exists a $\delta > 0$ such that for any $s \in \mathbb{T}$ satisfying
\[
| t - s | < \delta
\]
we have
\[
| x(\sigma(t)) - x(s) - x^\Delta(t) ( \sigma(t) - s) |
\leq \epsilon | \sigma(t) - s |.
\]
For example if $\mathbb{T} = \mathbb{R}$ then this just the ordinary derivative of $x$.
If $\mathbb{T} = \mathbb{Z}$ then
\[
x^\Delta(t) = x(t+1) - x(t).
\]
Note that $\mathbb{T}^\kappa$ is needed to ensure uniqueness of $x^{\Delta}(t)$:
for if $t_1$ is a discrete maximum of $\mathbb{T}$, then for $\epsilon$ sufficiently small,
 $s = t_1$ and therefore $\sigma(t_1) = s$ which would mean $x^\Delta(t_1)$
could take any value.

The higher delta derivatives are defined recursively by
\[
x^{\Delta^{(n)}}(t) = (x^{\Delta^{(n-1)}})^\Delta(t)
\]
for $t \in \mathbb{T}^{\kappa^n}$ where $\mathbb{T}^{\kappa^n} = (\mathbb{T}^{\kappa^{n-1}})^\kappa$.

The anti-derivative $X$ of $x$ is a function such that $X^\Delta = x$,
and the delta integral is given by
\[
\int^t_{t_0} x(s)  \Delta s = X(t) - X(t_0).
\]
From this definition it is easy to see that delta integrals are linear
operators in $x$.

To state existence results for anti-derivatives, we call on the notion of
an rd-continuous function. It turns out that all rd-continuous functions
have anti-derivatives.
This necessitates defining the backward jump operator $\rho : \mathbb{T} \to \mathbb{R}$
\[
\rho(t) := \begin{cases}
\sup \{ s \in \mathbb{T} \mid s < t \} & \text{if $t$ is not the minimum of $\mathbb{T}$;} \\
t & \text{ if $t$ is the minimum of $\mathbb{T}$.}
\end{cases}
\]
A point $t$ is called right-dense if $\sigma(t) = t$ and left-dense if
$\rho(t) = t$, it is called right-scattered if $\sigma(t) > t$ and
left-scattered if $\rho(t) < t$.

A function $x : \mathbb{T} \to \mathbb{R}$ is called rd-continuous if it is continuous
at right-dense points and left-continuous at left-dense points.

If a function is delta differentiable it is rd-continuous, and if a
function is continuous it is rd-continuous.

We will use the fact that a function of the form
\[
|x(t)| +  |x^\Delta(t)| + \dots + |x^{\Delta^{(n-1)}}(t)|
\]
is rd-continuous if $x$ has $n$th order delta derivatives.

If $I \subset \mathbb{R}$ is an interval we denote $I \cap \mathbb{T}$ by $I_\mathbb{T}$.
If $I$ is compact and $x : {\mathbb{T}} \to {\mathbb{R}}$ is rd-continuous, then $x$
is bounded on $I_\mathbb{T}$, and $x$ attains its maximum on $\mathbb{T}$, i.e.
there exists a $t_1 \in I_\mathbb{T}$ such that
$x(t_1) = \sup \{ x(t) : t \in I_\mathbb{T} \}$ \cite[Theorems 1.60 and 1.65, pp 22-23]{BP}.


We will  use the following facts regarding delta integrals:

If $x(s) \leq y(s)$ for all $s \in [t_0,t_1]_\mathbb{T}$ then
\begin{equation} \label{intbnd1}
\int^t_{t_0} x(s) \, \Delta s \leq \int^t_{t_0} y(s) \, \Delta s, \quad
\text{for all }  t \in [t_0,t_1]_\mathbb{T}
\end{equation}
(see, e.g. \cite[Theorem 1.77, p29]{BP}).

In particular if $M > 0$ is a constant and $x \leq M$ then
\begin{equation} \label{intbnd2}
\int^t_{t_0} x(s) \,\Delta s \leq M(t-t_0).
\end{equation}
as the anti-derivative of a constant $M$ is $Ms$ (see \cite[Example 1.13(ii)]{BP}).

If $h : [t_0, t] \to \mathbb{R}$ is continuous and non-decreasing then
\begin{equation} \label{intcmp}
\int^t_{t_0} h(s) \, \Delta s \leq \int^t_{t_0} h(s) \, ds
\end{equation}
(see e.g. \cite[Theorem 2.3]{LX} or \cite[Lemma 2.1]{ACCK}).


\section{Gronwall-type results for dynamic equations on time scales} \label{Sec3}

In this section, we present some Gronwall--type results on time scales.
 Gronwall's original results \cite{Gron} are nearly 100 years old and
they have had a profound effect on the study of differential and integral equations.
  For example, for recent results in this area, see \cite{TisdellFrac}.

There are two important distinctions between our approach and the results
 already in the literature \cite[Chapter 6]{BP} regarding Gronwall's
 results on time scales. Firstly, we provide two methods of proof for
the result that simplify existing approaches.  Secondly, our bounds are
in terms of the classical exponential function from real analysis.
This means the bounds are independent of the time scale which means that
the bounds are easier to calculate than traditional bounds that use the
time-scale exponential function.


\begin{theorem} \label{Gronwall}
Let $a>0$ be a constant and let $\rho:[0,a]_\mathbb{T} \to [0,\infty)$ be rd-continuous.
If there are non--negative constants $A$ and $B$ such that
\begin{equation} \label{I-4}
 \rho(t) \le B + \int_{0}^t A \rho(s) \, \Delta s, \quad \text{for all }
 t \in [0,a]_\mathbb{T}
\end{equation}
then
\begin{equation}  \label{Bp-6}
\rho(t) \le B  e^{A t} , \quad  \text{for all }  t \in [0,a]_\mathbb{T}.
\end{equation}
\end{theorem}

In the interest of diversity, we present two different styles of proof.
They offer very simple approaches and each only requires a basic
understanding of functions and time scales calculus.
The style of first proof is motivated by  \cite[p82-83]{Zeidler}
with appropriate modifications for time scales.

\begin{proof}[Proof 1:]
The case $A=0$ is trivial, so let $A>0$.
Since $\rho$ is non--negative and rd-continuous on $[0,a]_\mathbb{T}$, there is a
constant $M>0$ such that
\begin{equation} \label{r1}
0 \le \rho (t) \le M, \quad \text{for all } t \in [0,a]_\mathbb{T}.
\end{equation}
Inserting \eqref{r1} into the right--hand side of \eqref{I-4} and
using \eqref{intbnd2} we obtain, for all $t\in [0,a]_\mathbb{T}$:
\begin{equation}
\rho(t) \le B +  \int_{0}^t A M\,\Delta s
= B + MA t. \label{r2}
\end{equation}
Now, in a similar fashion, inserting \eqref{r2} into  \eqref{I-4}
and then applying \eqref{intbnd1} and \eqref{intcmp} with $h(s) = B + MAs$,
 we obtain:
\begin{align*}
\rho(t) &\leq  B + \int_{0}^t A [ B + MAs ] \,\Delta s  \\
&\leq  B + \int_{0}^t A [ B + MAs ] \ d s \\
&=  B + BA t +  \dfrac{MA^2 t^{2}}{2!}.
\end{align*}
Continuing with this process, we see that the $n$-th iteration is
\begin{equation} \label{beaut}
\rho(t) \le B \sum_{k=0}^{n-1} \dfrac{(At)^k}{k!} + \dfrac{M(At)^n}{n!}.
\end{equation}
Taking limits as $n \to \infty$ in \eqref{beaut} we obtain \eqref{Bp-6}.
\end{proof}


\begin{proof}[Proof 2:]
The case $A=0$ is trivial, so let $A>0$.
For $t \in [0,a]_\mathbb{T}$, define
\begin{equation} \label{g}
g(t) := \frac{\rho(t)}{e^{At}}.
\end{equation}
Since $g$ is rd-continuous on a compact interval, it must attain its
maximum value at some point $t_1 \in [0,a]_\mathbb{T}$. Let
$$
m := \max_{t \in [0,a]_\mathbb{T}} g(t) = g(t_1).
$$
Thus, from \eqref{g} we see that
\begin{equation} \label{r0}
\rho(t_1) = m e^{At_1}.
\end{equation}
Using \eqref{r0}, \eqref{g} and \eqref{I-4} we have
\begin{align*}
m  e^{At_1}
&=  \rho(t_1)  \\
&\leq   B +  \int_0^{t_1} A \rho(s) \,\Delta s \\
&=   B +  \int_0^{t_1} A e^{As} g(s) \,\Delta s \\
&\leq   B + \int_0^{t_1} A  e^{As} m  \,\Delta s \\
&\leq  B + \int_0^{t_1} A  e^{As} m  \,ds \\
&=   B  + m [ e^{At_1} - 1]
\end{align*}
where, in the second last line we applied the fundamental inequality \eqref{intcmp}.

Thus, we have
$$
m  e^{At_1} \le  B + m [ e^{At_1} - 1]
$$
from which we can eliminate the exponential function and simplify to
\begin{equation}
m \leq   B. \label{m}
\end{equation}
Thus, from \eqref{g} and \eqref{m}, for each $t \in [0,a]_\mathbb{T}$ we have
\begin{equation}
\rho(t) =  g(t)e^{At}
\leq    m e^{At}
\leq   B e^{At}.
\end{equation}
\end{proof}


\begin{remark} \label{rmk1} \rm
We make no claim that inequality \eqref{Bp-6} is ``sharp"
(i.e., the least upper bound) for all time scales.
Indeed, it can be considered as a rather ``rough" estimate.
There is a natural trade-off between our simple methods of proof and
the degree of sharpness of the conclusion of Theorem \ref{Gronwall}.
 The significance, interest and distinction from existing literature
is in the method of proof.

While inequality \eqref{Bp-6} could be classed as a ``rough" estimate,
this does not affect its applications in the remainder of this paper.
Indeed, the value and importance of rough inequalities like \eqref{Bp-6}
has been confirmed by well--known mathematicians such as Nirenberg
and Friedrichs, who ``often stressed the applicability of rough
inequalities to various problems!" \cite[p483]{N}.
\end{remark}

The following generalisation of Theorem \ref{Gronwall} is now presented.

\begin{theorem} \label{GronwallGen}
Let $A$ be a non-negative constant; let $B:[0,a]_\mathbb{T} \to [0,\infty)$ be
 rd-continuous and nondecreasing; and let $\rho:[0,a]_\mathbb{T} \to [0,\infty)$
 be rd-continuous. If
\begin{equation} \label{I-2}
 \rho(t) \le B(t) + \int_0^t A \rho(s) \,\Delta s, \quad \text{for all }
 t \in [0,a]_\mathbb{T}
\end{equation}
then
\begin{equation}  \label{Bp-2}
\rho(t) \le B(t) e^{At}, \quad \text{for all }  t \in [0,a]_\mathbb{T}.
\end{equation}
\end{theorem}

\begin{proof}
If \eqref{I-2} holds then, for each $t_1 \in \mathbb{T}$ with $0 \le t \le t_1 \le a$
we have $B(t) \leq B(t_1)$. Therefore
\begin{equation*}
 \rho(t) \le B(t_1) + \int_0^t A \rho(s)  \,\Delta s, \quad  t \in [0,t_1]_\mathbb{T}
\end{equation*}
where $t_1$ is now regarded as a constant.
 The conditions of Theorem \ref{Gronwall} hold and the conclusion \eqref{Bp-6}
can then be applied, so that we have
\begin{equation}  \label{Bp-4}
\rho(t) \le  B(t_1)  e^{At}.
\end{equation}
Thus replacing $t$ with $t_1$ in \eqref{Bp-4} we obtain
\begin{equation*}
\rho(t_1) \le  B(t_1)  e^{At_1}, \quad \text{for all } \ t_1 \in [0,a]_\mathbb{T}.
\end{equation*}
so that \eqref{Bp-2} holds.
\end{proof}

\section{{\em A priori} bounds via a taxicab approach} \label{Main}

In this section we present our results concerning {\em a priori} bounds
for the general homogeneous problem associated with \eqref{nde1}, \eqref{ic1},
namely
\begin{gather}
 x^{\Delta^{(n)}} + a_{n-1}(t)x^{\Delta^{(n-1)}} + \dots
+ a_1(t)x^\Delta + a_0(t)x = 0,  \label{hnde2}\\
 x^{\Delta^{(i)}}(0) = b_i, \quad \text{ for } i \in \{ 0,  \dots, n-1 \}.
\label{ic2}
\end{gather}

 Our methodology involves the taxicab size of a solution to homogeneous
problems combined with applications of our earlier Gronwall inequalities
from the previous section.

In \cite{AT} the {\em a priori} bounds on solutions to the basic second order
($n=2$) form of \eqref{nde1}, \eqref{ic1} with constant coefficients were
obtained via an approach that used the Euclidean size of a solution, namely
$$
d_1(t) := \sqrt{(x(t))^2 + (x'(t))^2}.
$$

While the Euclidean approach to {\em a priori} bounds on solutions is somewhat
manageable in the proofs concerning  second--order, linear problems with
constant coefficients, we believe it is not optimal. Moreover, the Euclidean
method becomes unwieldy in the proofs involving higher-order cases,
for example, when attempting to apply
$$
d_{n-1}(t) := \sqrt{(x(t))^2 + (x'(t))^2 + \dots  + (x^{(n-1)}(t))^2}
$$
to $n$th order problems.

The purpose of this section is to propose a simpler approach that establishes
{\em a priori} bounds on solutions by considering a different way of
measuring the size of a solution to linear dynamic equations.
 We shall refer to this as the taxicab (or Manhattan) size, namely
\begin{equation} \label{T}
\rho(t) := |x(t)| + |x^\Delta (t)|+  \dots  + |x^{\Delta^{(n-1)}}(t)|
\end{equation}
for each $t$ in an interval.

Taxicab geometry (in $\mathbb{R}^n$) dates back to mathematician Hermann Minkowski
in the 19th century where the distance between points is the sum of the
absolute difference of the Cartesian  coordinates, as opposed to the
straight line Euclidean distance.

The taxicab form \eqref{T} of the size of a solution to linear differential
equations enables a simplification and extension of the mathematical
literature such as \cite{AT}, to higher order equations. For instance,
there is no need to apply the AM--GM inequality ad nauseam in the proofs;
 and the product rule for delta differentiation is not required.
The ideas are widely accessible to to those who have an understanding of
the Fundamental Theorem of Calculus and the classic exponential function.

\begin{theorem} \label{thm3}
Consider the homogeneous IVP \eqref{hnde2}, \eqref{ic2} where each function
$a_i:[0,a]_\mathbb{T}^{\kappa^i} \to \mathbb{R}$ and  $a_i$ is rd-continuous.
If $x=x(t)$ is a solution to \eqref{hnde2}, \eqref{ic2} on $[0,a]_\mathbb{T}$ then
\begin{equation} \label{conc2}
|x^{\Delta^{(i)}}(t)| \le B e^{At}, \quad  \text{for }
i = 0,1,\dots,n-1  \text{ for each }  t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}
\end{equation}
where
\begin{gather*}
|a_i| \leq  A_i, \quad \text{on }   [0,a]_\mathbb{T}^{\kappa^{n-1}}, \; i=0,1\dots,n-1; \\
A:= \max\{A_0,  A_1, \ldots,A_{n-1}\} + (n-1); \\
B:=  |b_0| + |b_1| + \dots + |b_{n-1}|.
\end{gather*}
\end{theorem}

The proofs of Theorems \ref{thm3} and \ref{expBnd} are motivated by
 \cite[Theorem B, p284]{Driver} (which applies only to $[0,\infty)$),
except that we make the constants explicit.

\begin{proof}
The constants $A_i$ defined as if each $a_i$ is rd-continuous on the compact
set $[0,a]_\mathbb{T}^{\kappa^i}$ then they are uniformly bounded on $[0,a]_\mathbb{T}^{\kappa^i}$.

Let $x = x(t)$ be a solution to \eqref{hnde2} on $[0,a]_\mathbb{T}$.  We have
 for each $t \in [0,a]_\mathbb{T}^{\kappa^i}$ and each $i=0,1,\ldots,n-2$
\begin{equation}
\begin{aligned}
|x^{\Delta^{(i)}}(t)|
&=  \big| b_i + \int_{0}^t x^{\Delta^{(i+1)}}(s) \,\Delta s \big|   \\
&\leq  |b_i| + \big| \int_{0}^t |x^{\Delta^{(i+1)}}(s)| \,\Delta s \big|  \\
&\leq  |b_i| + \big| \int_{0}^t |x(s)| +  |x^\Delta(s)| + \dots
+ |x^{\Delta^{(n-1)}}(s)| \,\Delta s \big|
\end{aligned} \label{inty1}
\end{equation}

In addition, using the dynamic equation \eqref{hnde2} we have for each
$t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}$
\begin{align}
&|x^{\Delta^{(n-1)}}(t)|   \nonumber \\
&\leq  |b_{n-1}| + \Big| \int_{0}^t |x^{\Delta^{(n)}}(s)| \,\Delta s \Big| 
\nonumber \\
&=  |b_{n-1}| + \Big| \int_{0}^t \big| - \big[ a_{n-1}(s)x^{\Delta^{(n-1)}}(s)
 + \dots + a_1(s)x^\Delta(s) + a_0(s)x(s)  \big] \big|  \,\Delta s \Big| 
\nonumber \\
&\leq   |b_{n-1}| + \Big| \int_{0}^t \big[ |a_{n-1}(s)| \ |x^{\Delta^{(n-1)}}(s)|
  + \dots + |a_1(s)| \ |x^\Delta(s)| + |a_0(s)| \ |x(s)|    \big] \,\Delta s \Big| 
 \nonumber \\
&\leq   |b_{n-1}| +  \Big| \int_{0}^t (A-(n-1))
\big[ |x^{\Delta^{(n-1)}}(s)| + \dots +  |x^\Delta(s)| +  |x(s)| \big] \,\Delta s
 \Big|. \nonumber \\
& \le  |b_{n-1}| + (A - (n-1)) \Big| \int_{0}^t \big[ |x^{\Delta^{(n-1)}}(s)|
 + \dots +  |x^\Delta(s)| +  |x(s)|    \big] \,\Delta s \Big|. \label{inty2}
\end{align} 
Summing the inequalities in \eqref{inty1} with \eqref{inty2}, for all
$t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}$, we obtain
\begin{equation}
\begin{aligned}
&|x(t)| + |x^\Delta(t)| \ + \dots + \ |x^{{\Delta^{(n-1)}}}(t)|  \\
&\leq  |b_0| + |b_1| + \dots + |b_{n-1}|   \\
&\quad  + (n-1) \Big| \int_{0}^t |x(s)| +  |x^\Delta(s)| + \dots
 + |x^{\Delta^{(n-1)}}(s)| \,\Delta s \Big|   \\
&\quad + \Big| \int_{0}^t (A- (n-1)) \big[ |x^{\Delta^{(n-1)}}(s)| + \dots
 +  |x^\Delta(s)| +  |x(s)|    \big] \,\Delta s \Big|   \\
&=  B + \Big| \int_{0}^t A \big[ |x^{\Delta^{(n-1)}}(s)| + \dots
+  |x^\Delta(s)| +  |x(s)|  \big] \,\Delta s \Big|.
\end{aligned}\label{inty3}
\end{equation}

For each $t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}$, define $\rho$ via
$$
\rho(t):=  |x(t)| +  |x^\Delta(t)| + \dots + |x^{\Delta^{(n-1)}}(t)|
$$
so that \eqref{inty3} now simplifies to
\begin{equation*}
\rho(t) \le B +  \int_{0}^t A \rho(s) \,\Delta s , \quad  \text{for all }
 t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}.
\end{equation*}
Note that $\rho$ is rd-continuous and non-negative. Thus, applying
Theorem \ref{Gronwall}, we obtain
\begin{equation*}
\rho(t) \le B e^{At}, \quad  \text{for all }  t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}
\end{equation*}
which, in turn, implies \eqref{conc2}.
\end{proof}

We now examine the concept of exponential boundedness of solutions to the
 inhomogeneous problem \eqref{nde1}, \eqref{ic1}.  We say that a function
 $\rho:I_\mathbb{T} \to \mathbb{R}$ is exponentially bounded on $I_\mathbb{T}$ if there exist
non-negative constants $M$ and $L$ such that for each $t \in I_\mathbb{T}$ we have
\[
| \rho(t) | \leq M e^{Lt}, \quad \text{for all }  t \in I_\mathbb{T}.
\]


\begin{theorem} \label{expBnd}
Let each $a_i:[0,a]_\mathbb{T} \to \mathbb{R}$ be rd-continuous and let $f$ be exponentially
bounded on $[0,a]_\mathbb{T}$.
If $x$ is a solution of \eqref{nde1}, \eqref{ic1} on $[0,a]_\mathbb{T}$ then
 $x^{\Delta^{(i)}}$ is also exponentially bounded for $i =0, \dots, n$,
and the bound is independent of $i$.  In particular, for all
$t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}$ we have
\[
| x^{\Delta^{(i)}}(t)| \leq \big(B + \frac{M}{L} \big)    e^{(L + A)t}
\]
where
\begin{gather*}
|a_i| \leq  A_i, \quad \text{on }   [0,a]_\mathbb{T}^{\kappa^{n-1}}, \quad i=0,1\dots,n-1; \\
A := \max\{A_0, \ A_1, \ldots,A_{n-1}\} + (n-1); \\
B :=  |b_0| + |b_1| + \dots + |b_{n-1}|;  \\
| f(t) | \leq M e^{Lt} \quad \text{for all } t \in [0,a]_\mathbb{T}^{\kappa^{n-1}},
\end{gather*}
where $M$ and $L$ are non-negative constants independent of $t$.
\end{theorem}

\begin{proof}
The argument is very similar to that of Theorem \ref{thm3} except that
the inequality \eqref{inty2} is modified as follows.
For all $t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}$ we have
\begin{equation}
\begin{aligned}
|x^{\Delta^{(n-1)}}(t)|  
&\leq  |b_{n-1}| + \Big| \int_{0}^t |x^{\Delta^{(n)}}(s)| \,\Delta s \Big|  \\
&=  |b_{n-1}| + \Big| \int_{0}^t \big| f(s)- \big[ a_{n-1}(s)x^{\Delta^{(n-1)}}(s)
 + \dots + a_1(s)x^\Delta(s) \\
&\quad + a_0(s)x(s)  \big] \big|  \,\Delta s \Big|  \\
&\leq   |b_{n-1}| +  \Big| \int_{0}^t \big[ |f(s) | + |a_{n-1}(s)| \,
 |x^{\Delta^{(n-1)}}(s)| + \dots \\
&\quad + |a_1(s)| \, |x^\Delta(s)| + |a_0(s)| \, |x(s)|    \big] \,\Delta s \Big|  \\
&\leq   |b_{n-1}| +  \Big| \int_{0}^t Me^{Ls} + (A-(n-1))
\big[ |x^{\Delta^{(n-1)}}(s)| + \dots \\
&\quad +  |x^\Delta(s)|  +  |x(s)|    \big] \,\Delta s \Big|  \\
& \le  |b_{n-1}| +  \int_{0}^t Me^{Ls} \Delta s
 +  (A - (n-1)) \Big| \int_{0}^t \big[ |x^{\Delta^{(n-1)}}(s)| + \dots \\
&\quad +  |x^\Delta(s)| +  |x(s)|    \big] \,\Delta s \Big| .
\end{aligned}\label{Iinty2}
\end{equation}
Inequality \eqref{inty1} still holds and so putting
$$
\rho(t):=  |x(t)| +  |x^\Delta(t)| + \dots + |x^{\Delta^{(n-1)}}(t)|
$$
and using \eqref{inty1} and \eqref{Iinty2} we get
\begin{equation} \label{G3}
\rho(t) \le B +  \int_{0}^t Me^{Ls} \,\Delta s +  \int_{0}^t A \rho(s) \,\Delta s
\quad \text{for all }  t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}.
\end{equation}

Now using  inequality \eqref{intcmp} and \eqref{G3} gives
\begin{equation}
\begin{aligned}
 \rho(t)
& \le  B +  \int_{0}^t Me^{Ls} \, ds +  \int_{0}^t A \rho(s) \,\Delta s    \\
& =  B + \frac{M}{L}( e^{Lt} - 1 ) + \int_{0}^t A \rho(s) \,\Delta s   \\
& \le  \big(B + \frac{M}{L}\big) e^{Lt}
 + \int_{0}^t A \rho(s) \,\Delta s
\end{aligned}\label{Iinty4}.
\end{equation}
Now we can apply Theorem \ref{GronwallGen} to \eqref{Iinty4} to obtain
\[
\rho(t) \leq \big(B + \frac{M}{L}\big) e^{Lt} e^{At}
= \big(B + \frac{M}{L}\big) e^{(L+A)t}
\]
for all $t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}$ and the result follows.
\end{proof}

\begin{example} \label{example} \rm
Consider the dynamic equation
\[
 x^{\Delta^3}(t) + t x^{\Delta^2}{(t)} + t^2 x^{\Delta}(t) + t^3 x(t) = t,
\]
with initial conditions
\[
x(0) = 0, \quad x^\Delta(0) = 0, \quad x^{\Delta^2}(0) =  0.
\]
Within the context of Theorem \ref{expBnd} we have: $n=3$; each
$A_i = 1$; $A= 3$; and $B=0$.  Furthermore, we can choose $M=1$ and $L=1$.

By Theorem \ref{expBnd}, we see that solutions $x(t)$ on the interval
$[0,1]_{\mathbb{T}}$ satisfy
\[
| x(t) | \leq e^{4t}.
\]
\end{example}

\section{Mathematical applications} \label{NM}

In this section we apply the {\em a priori} bounds from earlier to obtain
results regarding the nonmultiplicity of solutions to the inhomogeneous
initial value problem \eqref{nde1}, \eqref{ic1}.  We also explore error
bounds on solutions  to \eqref{nde1}, \eqref{ic1}
when the initial conditions are imprecisely known.

As previously assumed, throughout this section $\mathbb{T}$ will be a time scale
 which is unbounded above with $0 \in \mathbb{T}$.

\begin{theorem} \label{thm7}
If each $a_i:[0,\infty)_\mathbb{T} \to \mathbb{R}$ is rd-continuous, then the inhomogeneous
initial value problem \eqref{nde1}, \eqref{ic1} has, at most, one
solution on $[0,\infty)_\mathbb{T}$.
\end{theorem}

\begin{proof}
Let $y=y(t)$ and $z= z(t)$ be two solutions to \eqref{nde1}, \eqref{ic1}
on $[0,\infty)_\mathbb{T}$.  Define $r = r(t)$ on $[0,\infty)_\mathbb{T}$ via
$$
r:= y - z.
$$
We show that $r \equiv 0$ on $[0,\infty)_\mathbb{T}$ and thus $y\equiv z$.

Due to the linearity of \eqref{nde1} we see that $r$ satisfies the
homogeneous problem
\begin{equation}\label{r}
r^{\Delta^{(n)}} + a_{n-1}(t)r^{\Delta^{(n-1)}} + \dots + a_1(t)r^\Delta + a_0(t)r = 0
\end{equation}
subject to the homogeneous initial conditions
\begin{equation} \label{rIC}
r(0) = 0, \; r^\Delta(0) = 0, \;  \dots , \; r^{\Delta^{(n-1)}}(0) = 0.
\end{equation}

Let $t\neq 0$ be any point in $[0,\infty)_\mathbb{T}$. As $\mathbb{T}$ has no right maximum
there are points $t_1, \dots, t_{n-1} \in \mathbb{T}$ such that
$t < t_1 < t_2 < \dots < t_{n-1}$.  Let $J := [0, t_{n-1}]$.
Then $J_\mathbb{T} \subset [0,\infty)_\mathbb{T}$ and $J_\mathbb{T}$ contains both $0$ and $t$.
Since we have assumed each $a_i$ is rd-continuous, each $a_i$ must be
 bounded on $J_\mathbb{T}$ (with the bound possibly depending on $J$).
We can now apply Theorem \ref{thm3} to \eqref{r}, \eqref{rIC} on $J$.
By construction $J_\mathbb{T}^{\kappa^{n-1}}$ contains $[0,t]_\mathbb{T}$.

Since the initial conditions \eqref{rIC} give $B=0$, from Theorem \ref{thm3},
 we see that $r$ satisfies $|r|\le0$ on $J_\mathbb{T}$, which means $r \equiv 0$ on $J_\mathbb{T}$.
 Hence $y \equiv z$ on $J_\mathbb{T}$.  Now, since $t$ was chosen to be any point in
$[0,\infty)_\mathbb{T}$ with $t\neq 0$, we have in fact shown that $y(t) = z(t)$
for all $t \in [0,\infty)_\mathbb{T}$, that is, $y \equiv z$ on $[0,\infty)_\mathbb{T}$.

We conclude that the inhomogeneous initial value problem \eqref{nde1}, \eqref{ic1}
has, at most, one solution on $[0,\infty)_\mathbb{T}$.
\end{proof}

\begin{example} \label{examp5.2}\rm
Returning to Example \ref{example} we see that the initial value problem
\[
x^{\Delta^3}(t) + t x^{\Delta^2}{(t)} + t^2 x^{\Delta}(t) + t^3 x(t) = t,
\]
with initial conditions
\[
x(0) = 0, \quad  x^\Delta(0) = 0, \quad   x^{\Delta^2}(0)  = 0
\]
has, at most, one solution on $[0,\infty)_\mathbb{T}$.
\end{example}

Suppose we wish to solve \eqref{nde1}, \eqref{ic1} for a solution $x=x(t)$
but the initial conditions \eqref{ic1} are imprecisely known.
Let $y = y(t)$ be a solution to \eqref{nde1} subject to the initial conditions
\begin{equation} \label{ic3}
y(0) = c_0, \; y^\Delta(0) = c_1, \;  \dots , \; y^{\Delta^{(n-1)}}(0) = c_{n-1}
\end{equation}
where the $c_i$ are known constants (with each $c_i$ ideally close to each
$b_i$ in \eqref{ic1}).
The following result gives us an estimate on the error between $x$ and
 $y$ on $[0,a]_\mathbb{T}^{\kappa^{n-1}}$.

\begin{theorem} \label{error}
Let each $a_i:[0,a]_\mathbb{T}^{\kappa^i} \to \mathbb{R}$ be rd-continuous.
If $x = x(t)$ solves \eqref{nde1}, \eqref{ic1} on $[0,a]_\mathbb{T}$ and $y=y(t)$
solves \eqref{nde1}, \eqref{ic3} on $[0,a]_\mathbb{T}$, then for each
$t \in [0,a]_\mathbb{T}^{\kappa^{n-1}}$ we have
\begin{equation*}
|x^{(i)}(t) - y^{(i)}(t)| \le D e^{At}, \quad  \text{for }
 i = 0,1,\dots,n-1.
\end{equation*}
where
\begin{gather*}
A_i := \max \{ | a_i(t) | : t \in [0,a]_\mathbb{T}^{\kappa^{n-1}} \}; \\
A := \max\{A_0, \ A_1, \ldots,A_{n-1}\} + (n-1); \\
D :=  |b_0- c_0| + |b_1 - c_1| + \dots + |b_{n-1} - c_{n-1}|.
\end{gather*}
\end{theorem}

\begin{proof}
In a similar way as in the proof of Theorem \ref{thm7} we define $r= x - y$
and see that $r$ satisfies \eqref{r} subject to the initial conditions
\begin{equation*}
r(0) = b_0 - c_0, \; r^\Delta(0) = b_1 - c_1, \;  \dots , \;
 r^{\Delta^{(n-1)}}(0) = b_{n-1} - c_{n-1}.
\end{equation*}
We can then apply Theorem \ref{thm3} to obtain the conclusion.
\end{proof}

\begin{thebibliography}{99}

\bibitem{ABP} Agarwal, R.; Bohner M.; Peterson, A. C.;
\emph{Inequalities on time scales: a survey}. Mathematical Inequalities
\& Applications. 4 (4) (2001), 535-557. dx.doi.org/10.7153/mia-04-48

\bibitem{Agnew} Agnew, R. P.;
\emph{Differential Equations}. New York: McGraw-Hill, 1942.

\bibitem{Anderson1} Anderson, D. R.;
\emph{Nonlinear dynamic integral inequalities in two independent variables on
time scale pairs}. Advances in Dynamical Systems and Applications. 3 (1) (2008).
1-13.

\bibitem{Anderson2} Anderson, D. R.;
\emph{Dynamic double integral inequalities in two independent variables on
time scales}. Journal of Mathematical Inequalities. 2 (2) (2008). 163-184.
doi.org/10.7153/jmi-02-16

\bibitem{AT} Anderson, D. R.; Tisdell, C. C.;
\emph{Alternative solutions of inhomogeneous second-order linear dynamic
equations on time scales}. Journal of Difference Equations and Applications,
17 (10) (2011), 1487-1498. doi:10.1080/10236191003639483

\bibitem{ACCK} Atici, F.; Cabada, A.; Chyan, C.; Kaymakcalan, B.;
\emph{Nagumo type existence results for second-order nonlinear dynamic BVPS}.
 Nonlinear Analysis, 60 (2) (2005), 209-220. doi:10.1016/s0362-546x(04)00288-3

\bibitem{BP} Bohner, M.; Peterson, A. C.;
\emph{Dynamic equations on time scales: an introduction with applications}.
Boston, Mass.: Birkha\"auser, 2001.

\bibitem{Driver} Driver, R. D.;
\emph{Introduction to ordinary differential equations}. New York: Harper  Row, 1978.

\bibitem{FZ} Feng, Q.; Zheng, B.;
\emph{Generalized Gronwall-Bellman type delay dynamic inequalities on time
scales and their applications}. Applied Mathematics and Computation,
 218 (2012), 7880-7892. doi:10.1016/j.amc.2012.02.006

\bibitem{Gron} Gronwall, T. H.;
\emph{Note on the Derivatives with Respect to a Parameter of the Solutions
 of a System of Differential Equations}. The Annals of Mathematics,
20 (4) (1919), 292. doi:10.2307/1967124

\bibitem{LX} Liu, H.; Xiang, X.;
\emph{A class of the first order impulsive dynamic equations on time scales}.
Nonlinear Analysis: Theory, Methods \& Applications, 69 (9) (2008), 2803-2811.
doi:10.1016/j.na.2007.08.052


\bibitem{N} Nirenberg, L.;
\emph{Partial differential equations in the first half of the century}.
Development of mathematics 1900--1950 (Luxembourg, 1992) (1994).), 479--515,
Birkh\"auser, Basel.

\bibitem{Saker} Saker, S. H.;
\emph{Nonlinear dynamic inequalities of Gronwall-Bellman type on time scales}.
Electronic Journal of Qualitative Theory of Differential Equations 86 (2001). 1-26.
 doi: 10.14232/ejqtde.2011.1.86

\bibitem{Tisdell-JIEA} Tisdell C. C.;
\emph{On the application of sequential and fixed-point methods to fractional
differential equations of arbitrary order}, Journal of Integral Equations and
Applications, 24 (2012), 283-319, doi:10.1216/JIE-2012-24-2-283

\bibitem{TisdellFrac} Tisdell C. C.;
\emph{Improved mathematical results and simplified pedagogical approaches
for Gronwall's inequality for fractional calculus}.
Fractional Differential Calculus. (to appear)

\bibitem{TisdellLin} Tisdell, C. C.;
\emph{Improved pedagogy for linear differential equations by reconsidering
how we measure the size of solutions}. International Journal of Mathematical
Education in Science and Technology, 48 (2017), 1087-1095.
doi:10.1080/0020739x.2017.1298856

\bibitem{Zeidler} Zeidler, E.; Wadsack, P. R.;
\emph{Nonlinear Functional Analysis and its Applications:
I Fixed-Point Theorems}. New York: Springer, 1986.

\end{thebibliography}

\end{document}

