\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{mathrsfs}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2017 (2017), No. 210, pp. 1--18.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2017 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2017/210\hfil Self-adjoint BVPs for proportional derivatives]
{Even-order self-adjoint boundary value problems for proportional derivatives}

\author[D. R. Anderson \hfil EJDE-2017/210\hfilneg]
{Douglas R. Anderson}

\address{Douglas R. Anderson \newline
Department of Mathematics,
Concordia College,
Moorhead, MN 56562, USA}
\email{andersod@cord.edu}

\dedicatory{Communicated by Mokhtar Kirane}

\thanks{Submitted July 7, 2017. Published September 11, 2017.}
\subjclass[2010]{26A24, 34A05, 49J15, 49K15}
\keywords{Proportional derivatives; PD controller; Green's function;
\hfill\break\indent  self-adjoint boundary value problem}

\begin{abstract}
 In this study, even order self-adjoint differential equations incorporating
 recently introduced proportional derivatives, and their associated 
 self-adjoint boundary conditions, are discussed. Using quasi derivatives, 
 a Lagrange bracket and bilinear functional are used to obtain a Lagrange
 identity and Green's formula; this also leads to the classification of
 self-adjoint boundary conditions. Next we connect the self-adjoint
 differential equations with the theory of Hamiltonian systems and 
 $(n,n)$-disconjugacy. Specific formulas of Green's functions for two 
 and four iterated proportional derivatives are also derived.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction}

We study the $2n$th order differential expression
\begin{equation} \label{Ly}
\begin{aligned}
Ly(t) &=  \sum_{j=0}^n \left(-D^{\alpha}\right)^{j}
\big[p_{j}\left(D^{\alpha}\right)^{j}y\big](t) \\
  &=  \left(-D^{\alpha}\right)^{n}\left[p_n\left(D^{\alpha}\right)^{n}y\right](t) +
  \dots  - \left(D^{\alpha}\right)^{3}\big[p_{3}\left(D^{\alpha}\right)^{3}y\big](t) \\
  &\quad + \left(D^{\alpha}\right)^{2}\big[p_{2}\left(D^{\alpha}\right)^{2}y\big](t)
 - D^{\alpha}\left[p_{1}D^{\alpha}y\right](t)
      + p_0(t)y(t),
\end{aligned}
\end{equation}
for continuous functions $p_i$ with $p_n\ne 0$, and show that it is formally
self adjoint with respect to the inner product
\[
\langle y,z \rangle = \int_a^b y(t)z(t)e^2_0(b,t)d_{\alpha}t,
\quad d_{\alpha}t:=\frac{dt}{\kappa_0(t)};
\]
that is, the identity
\[
\langle Ly, z\rangle = \langle y, Lz \rangle
\]
holds provided that $y$ and $z$ satisfy some appropriate self-adjoint
boundary conditions at $a$ and $b$. Here $D^{\alpha}$ is a proportional
derivative operator \cite{and,au,cobber} modeled after a proportional-derivative
controller (PD controller) \cite{ding}. This proportional derivative
 $D^\alpha$ of order $\alpha\in[0,1]$, where $D^0$ is the identity operator,
and $D^1$ is the classical differential operator, will be used to explore
corresponding higher-order linear self-adjoint equations of the form \eqref{Ly}.
 We will refer to an equation with $2n$ iterations of $D^{\alpha}$ as $2n$th-order
equations.

\begin{remark}\cite{and,au}\rm
In control theory, a PD controller for controller output $u$ at time $t$ with
two tuning parameters has the algorithm
\[
u(t) =  \kappa_pE(t) + \kappa_d\frac{d}{dt}E(t) ,
\]
where $\kappa_p$ is the proportional gain, $\kappa_d$ is the derivative gain,
and $E$ the is input deviation, or the error between the state variable and
the process variable; see \cite{ding}, for example. This is the impetus for
the next definition.
\end{remark}

\begin{definition}[A Class of Proportional Derivatives \cite{and,au}] \rm
Let $\alpha\in[0,1]$, $\mathcal{I}\subseteq\mathbb{R}$, and let the functions
$\kappa_0,\kappa_1:[0,1]\times\mathcal{I}\to [0,\infty)$ be continuous such that
\begin{equation}
\begin{gathered}
 \lim_{\alpha\to  0^+} \kappa_1(\alpha,t) = 1, \quad
 \lim_{\alpha\to  0^+} \kappa_0(\alpha,t) = 0, \quad \forall\;t\in\mathcal{I},  \\
 \lim_{\alpha\to  1^-} \kappa_1(\alpha,t) = 0, \quad
 \lim_{\alpha\to  1^-} \kappa_0(\alpha,t) = 1, \quad
 \forall\;t\in\mathcal{I},  \\
 \kappa_1(\alpha,t)\ne 0, \alpha\in[0,1), \quad
 \kappa_0(\alpha,t)\ne 0, \alpha\in(0,1], \quad \forall\;t\in\mathcal{I}.
\end{gathered} \label{kappaconditions}
\end{equation}
Define the proportional differential operator $D^{\alpha}$ via
\begin{equation}\label{derivdef}
 D^{\alpha} f(t) = \kappa_1(\alpha,t) f(t) + \kappa_0(\alpha,t) f'(t), \quad
 t\in\mathcal{I}
\end{equation}
provided the right-hand side exists at $t$, where $f':=\frac{d}{dt}f$.
\end{definition}

\begin{remark}[\cite{and,au}] \rm
 For the operator given in \eqref{derivdef}, $\kappa_1$ is a type of proportional
gain $\kappa_p$, $\kappa_0$ is a type of derivative gain $\kappa_d$, $f$
is the error, and $u=D^{\alpha}f$ is the controller output.
To illustrate, one could take $\kappa_1\equiv \cos\left(\alpha\pi/2\right)$
and $\kappa_0\equiv \sin\left(\alpha\pi/2\right)$, or
$\kappa_1\equiv (1-\alpha)\omega^{\alpha}$ and
$\kappa_0\equiv \alpha\omega^{1-\alpha}$ for any
$\omega\in(0,\infty)$; or, $\kappa_1=(1-\alpha)|t|^{\alpha}$  and
$\kappa_0=\alpha |t|^{1-\alpha}$ on $\mathcal{I}=\mathbb{R}\backslash\{0\}$, so that
\[
D^{\alpha} f(t) = (1-\alpha) |t|^{\alpha} f(t) + \alpha |t|^{1-\alpha} f'(t).
 \]
If $\kappa_1$ and $\kappa_0$ are constant with respect to the independent variable,
then $D^\beta D^\alpha = D^\alpha D^\beta$, but
$D^\beta D^\alpha \ne D^\alpha D^\beta$ for $\alpha,\beta\in[0,1]$ in general;
see also \cite{zulf}. By \eqref{kappaconditions} and \eqref{derivdef},
\[
\lim_{\alpha\to  0^+}D^{\alpha}f = D^0f = f \quad\text{and}\quad
\lim_{\alpha\to  1^-}D^{\alpha}f = D^1f = f'.
\]
\end{remark}

Throughout the discussion to follow we will need a vital definition
 \cite[Definition 1.6]{au}, which establishes a type of exponential function
for derivative \eqref{derivdef}.

\begin{definition}[Proportional Exponential Function \cite{and,au}]\rm
Let $\alpha\in(0,1]$, the points $s,t\in\mathbb{R}$ with $s\le t$, and let the
 function $p:[s,t]\to \mathbb{R}$ be continuous. Let
$\kappa_0,\kappa_1:[0,1]\times\mathbb{R}\to [0,\infty)$ be continuous and satisfy
\eqref{kappaconditions}, with $p/\kappa_0$ and $\kappa_1/\kappa_0$ Riemann
integrable on $[s,t]$. Then the conformable exponential function with respect
to $D^{\alpha}$ in \eqref{derivdef} is defined to be
\begin{equation}\label{epts}
 e_p(t,s):= e^{\int_s^t \frac{p(\tau)-\kappa_1(\alpha,\tau)}{\kappa_0(\alpha,\tau)} d\tau}, \quad
 e_0(t,s) = e^{-\int_s^t \frac{\kappa_1(\alpha,\tau)}{\kappa_0(\alpha,\tau)} d\tau},
\end{equation}
and satisfies
\begin{equation}\label{expderiv}
 D^{\alpha} e_p(t,s) = p(t) e_p(t,s), \quad D^{\alpha} e_0(t,s) = 0.
\end{equation}
\end{definition}

The following fundamental theorem, given in \cite[Theorem 2.4]{and}
and \cite[Lemma 1.9 (ii)]{au}, relates the proportional derivative and
the proportional integral using the above proportional exponential function.


\begin{theorem}[Fundamental Theorem of Integral Calculus]\label{ftc}
Let $\alpha\in(0,1]$. Suppose $f:[a,b]\to \mathbb{R}$ is differentiable on $[a,b]$
and $f'$ is integrable on $[a,b]$. Then
\[
\int_a^b D^{\alpha}[f(t)]e_0(b,t)d_{\alpha}t = f(b) - f(a)e_0(b,a),
\]
where $d_{\alpha}t:=dt/\kappa_0(t)$.
\end{theorem}


\begin{remark}\rm
As in \cite{cobber}, consider \eqref{derivdef} with $\kappa_1=(1-\alpha)$
and $\kappa_0=\alpha$, so that
\[
D^{\alpha} f(t) = (1-\alpha)f(t) + \alpha f'(t).
\]
Then using the FTC, Theorem \ref{ftc}, as motivation and simplifying
$e_0(t,\tau)$ via \eqref{epts}, define this special case of the proportional
integral of $f$ as
\begin{equation}\label{propint}
  {}_{a}\mathcal{I}_t^{\alpha}f(t):=\frac{1}{\alpha}
\int_a^t f(\tau)e^{-\frac{1-\alpha}{\alpha}(t-\tau)}d\tau.
\end{equation}
In two recent papers \cite{caputo,caputo2}, Caputo and Fabrizio introduce a
new fractional time derivative of the form
\[
\mathscr{D}_t^{(\alpha)}f(t) = \frac{1}{1-\alpha}
\int_a^t f'(\tau)e^{-\frac{\alpha}{1-\alpha}(t-\tau)}d\tau,
\]
with related fractional time integral
\[
{}_{a}\mathscr{I}_{t}^{\alpha}f(t)
= \frac{1}{\alpha}\int_a^t f(\tau)e^{-\frac{1-\alpha}{\alpha}(t-\tau)}d\tau.
\]
Note that we then have the relationships
\[
\mathscr{D}_t^{(\alpha)}f(t) =  {}_{a}\mathcal{I}_t^{1-\alpha}f'(t)
 \quad\text{and}\quad
{}_{a}\mathscr{I}_{t}^{\alpha}f(t) ={}_{a}\mathcal{I}_t^{\alpha}f(t) \]
using \eqref{propint}; further research needs to be done on connecting
the results of \cite{caputo,caputo2} with those to follow.
\end{remark}

\section{Self-adjoint proportional equations}

For the theory of higher order differential equations refer
to \cite{cod, He, KelPet, Nai, Reid}.  Consider the $2n$th-order proportional
differential expression \eqref{Ly}, in which the coefficient functions
$p_j:\mathcal{I}\to\mathbb{R}$ are continuous for $0\le j\le n$ and $p_n(t)\neq  0 $
for all $t\in\mathcal{I}$.

\begin{definition} \rm
Let $\mathbb{D}$ be the linear set of all functions $y:\mathcal{I}\to\mathbb{R}$ such that
the function
\[
\left(D^{\alpha}\right)^{j}\big[p_{j}\left(D^{\alpha}\right)^{j}y\big]
 \]
is defined on $\mathcal{I}$ and is continuous for $0\le j\le n$.
\end{definition}

For each $y\in\mathbb{D}$ the expression $Ly$ is defined and presents a continuous 
function on $\mathcal{I}$.


\begin{definition}[Quasi-Derivatives] \rm
As in the traditional case when $\alpha=1$ (see \cite[pp. 49]{Nai}), 
we introduce the functions $y^{[j]}$, $0\le j\le 2n$, as the 
quasi-derivatives of $y$ related to the expression $Ly$. Given $y\in\mathbb{D}$, set
\begin{gather}
  y^{[j]}   =  \left(D^{\alpha}\right)^{j}y, \quad 0\le j\le n-1, \quad 
 y^{[0]} =  \left(D^{\alpha}\right)^{0}y = y,  \label{yj} \\
  y^{[n]}   =  p_n \left(D^{\alpha}\right)^{n}y, \label{yn} \\
\begin{aligned}
  y^{[n+j]} &=  p_{n-j} \left(D^{\alpha}\right)^{n-j}y 
-  D^{\alpha}\big[y^{[n+j-1]}\big], \quad 1 \le j \le n-1  \\
            &=  \sum_{i=0}^j \left(-D^{\alpha}\right)^{j-i}
 \big[p_{n-i} \left(D^{\alpha}\right)^{n-i}y\big], \quad 0 \le j \le n-1, 
\end{aligned} \label{ynj}\\
\begin{aligned}
  y^{[2n]}  &=  p_0y -  D^{\alpha}\big[y^{[2n-1]}\big]  \\
            &=  \sum_{j=0}^n \left(-D^{\alpha}\right)^{j}
\big[p_{j}\left(D^{\alpha}\right)^{j}y\big] = Ly.
\end{aligned} \label{y2nLy}
\end{gather}
\end{definition}

\begin{definition}[Lagrange Bracket] \rm
Assume $y,z\in\mathbb{D}$ and $t\in\mathcal{I}$. The Lagrange bracket of $y$ and $z$ is given by
\begin{equation}\label{bracket}
  \{y,z\}(t) = \sum_{j=1}^n \left\{y^{[j-1]}z^{[2n-j]} - y^{[2n-j]}z^{[j-1]}\right\}(t).
\end{equation}
\end{definition}

\begin{definition}[Bilinear Functional]\rm
Assume $y,z\in\mathbb{D}$ and $t\in\mathcal{I}$. The bilinear (in $y$ and $z$) 
functional $F$ is given by
\begin{equation} \label{F1}
  F(y,z,t) = \sum_{j=1}^n \left(y^{[j-1]}z^{[2n-j]}\right)(t).
\end{equation}
\end{definition}

Note that by combining \eqref{bracket} and \eqref{F1}, we have the Lagrange 
bracket in terms of the bilinear functional, namely
\[ 
\{y,z\}(t) = F(y,z,t) - F(z,y,t). 
\]
Using \eqref{yj} and \eqref{ynj} we get that
\begin{equation} \label{F2}
 F(y,z,t) = \sum_{j=0}^{n-1} (-1)^j \left(D^{\alpha}\right)^{n-j-1}y(t)
 \sum_{i=0}^j (-1)^i \left(D^{\alpha}\right)^{j-i}
 \big[p_{n-i} \left(D^{\alpha}\right)^{n-i}z\big](t).
\end{equation}

\begin{lemma}\label{Fdelta}
The bilinear functional $F$ in \eqref{F1} satisfies
\[ 
e_0(t,a)D^{\alpha}\Big[\frac{F(y,z,\cdot)}{e_0(\cdot,a)}\Big](t)
    = \Big( -yLz + \sum_{j=0}^n p_{j}\left(D^{\alpha}\right)^{j}
y\left(D^{\alpha}\right)^{j}z\Big)(t) 
\]
for $t,a\in\mathcal{I}$.
\end{lemma}

\begin{proof}
Differentiating both sides of \eqref{F1}, employing the quotient rule 
for $\alpha$-deriva\-tives, and taking into account the formulas \eqref{yn} 
and \eqref{y2nLy}, we get
\begin{align*}
  e_0(t,a)D^{\alpha}\Big[\frac{F(y,z,\cdot)}{e_0(\cdot,a)}\Big](t)
  &=  D^{\alpha}F(y,z,t)+\kappa_1(t)F(y,z,t) \\
  &=  \sum_{j=1}^n \left(y^{[j-1]}D^{\alpha}\big[z^{[2n-j]}\big] 
+ z^{[2n-j]}D^{\alpha}\big[y^{[j-1]}\big]\right)(t) \\
  &=  \Big(y^{[0]}D^{\alpha}\big[z^{[2n-1]}\big]+\sum_{j=2}^n 
 y^{[j-1]}D^{\alpha}\big[z^{[2n-j]}\big] \\
  & \quad + z^{[n]}D^{\alpha}\big[y^{[n-1]}\big] 
 + \sum_{j=1}^{n-1} z^{[2n-j]}D^{\alpha}\big[y^{[j-1]}\big]\Big)(t) \\
  &=  \Big(y(p_0z-Lz) + \sum_{j=2}^n y^{[j-1]}D^{\alpha}\big[z^{[2n-j]}\big] \\
  &\quad + p_n\left(D^{\alpha}\right)^n y\left(D^{\alpha}\right)^nz 
 + \sum_{j=2}^n z^{[2n-j+1]}D^{\alpha}\big[y^{[j-2]}\big]\Big)(t).
\end{align*}
Further, by \eqref{yj} we have
\[ 
D^{\alpha}\big[y^{[j-2]}\big](t) = y^{[j-1]}(t) \quad\text{for }
 2\le j\le n, \; t\in\mathcal{I}, 
\]
and from \eqref{ynj} for $z$, replacing the $j$ by $n-j+1$, we find
\[ 
z^{[2n-j+1]} = p_{j-1} \left(D^{\alpha}\right)^{j-1}z 
- D^{\alpha}\big[z^{[2n-j]}\big] \mbox{ for } 2\le j\le n. 
\]
Consequently we obtain the desired result.
\end{proof}

\begin{theorem}[Lagrange Identity]
If $y,z\in\mathbb{D}$, then for $t,a\in\mathcal{I}$ we have
\begin{equation} \label{zLy}
  \left(zLy - yLz\right)(t) 
= e_0(t,a)D^{\alpha}\big[\frac{\{y,z\}}{e_0(\cdot,a)}\big](t),
\end{equation}
where $\{y,z\}$ is the Lagrange bracket of $y$ and $z$ defined by \eqref{bracket}.
\end{theorem}

\begin{proof}
By \eqref{bracket} and \eqref{F1} we have
\[ \{y,z\}(t) = F(y,z,t)-F(z,y,t); \]
dividing both sides by $e_0(t,a)$, taking the $\alpha$ derivative,
 multiplying the result by $e_0(t,a)$ on both sides, and applying 
Lemma \ref{Fdelta} we obtain \eqref{zLy}.
\end{proof}

\begin{remark}[Green's Formula]\label{greenformula}\rm
Let the numbers $a,b,t\in\mathcal{I}$ with $a<b$. If we multiply both sides of \eqref{zLy} by $e^2_0(b,t)d_{\alpha}t$ and integrate from $a$ to $b$, then we obtain Lagrange's identity in integral form, also called Green's formula,
\begin{align*}
  \langle Ly, z\rangle - \langle y, Lz \rangle
	&=  \int_a^b \left(zLy\right)(t)e^2_0(b,t)d_{\alpha}t 
- \int_a^b \left(yLz\right)(t)e^2_0(b,t)d_{\alpha}t \\
	&=  \{y,z\}(b)-e^2_0(b,a)\{y,z\}(a).
\end{align*}
\end{remark}

Let $g:\mathcal{I} \to \mathbb{R}$ be a continuous function, and consider the 
non-homogeneous equation
\begin{equation} \label{Lyg}
 Ly(t) = g(t) \quad\text{for } t \in \mathcal{I}.
\end{equation}
If $y \in \mathbb{D}$ and \eqref{Lyg} holds for $y$, we say that $y$ is a solution 
of \eqref{Lyg}. In order to obtain an existence and uniqueness theorem
 for initial value problems involving \eqref{Lyg}, it is necessary to 
rewrite \eqref{Lyg} in the form of an equivalent system of first order equations. 
From \eqref{yj}, \eqref{ynj}, and \eqref{y2nLy} we have the following system of 
equations
\begin{equation}
\begin{gathered}
  D^{\alpha}\big[y^{[j]}\big] = y^{[j+1]}, \quad 0 \le j \le n-2,  \\
  D^{\alpha}\big[y^{[n-1]}\big] = \left(D^{\alpha}\right)^ny 
 = \frac{y^{[n]}}{p_n},  \\
  D^{\alpha}\big[y^{[n+j-1]}\big] 
= p_{n-j}\left(D^{\alpha}\right)^{n-j}y-y^{[n+j]} = p_{n-j}y^{[n-j]}- y^{[n+j]},
 \quad 1\le j\le n-1,
     \\
 D^{\alpha}\big[y^{[2n-1]}\big] = p_0 y - Ly. 
\end{gathered}\label{quasiDs}
\end{equation}
Define the following column vectors via
\[ 
\vec{y} = \left(y^{[0]}, y^{[1]}, \dots , y^{[2n-1]}\right)^\top, \quad 
\vec{g} = \left(0, 0, \dots , 0, -g \right)^\top, \]
where $^\top$ indicates transpose.  In addition, define the $n\times n$ 
matrix functions
\[ A_1 = -A_4 = \begin{pmatrix} 0 & 1 & 0 & 0 & \cdots & 0 & 0 \\
     0 & 0 & 1 & 0 & \cdots & 0 & 0 \\
     0 & 0 & 0 & 1 & \cdots & 0 & 0  \\
     \vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots  \\
     0 & 0 & 0 & 0 & \cdots & 1 & 0  \\
     0 & 0 & 0 & 0 & \cdots & 0 & 1 \\
     0 & 0 & 0 & 0 & \cdots & 0 & 0 \\
     \end{pmatrix}, \]
\[ A_2 = \begin{pmatrix} 0 & 0 & 0 & \cdots & 0 & 0 & 0 \\
     0 & 0 & 0 & \cdots & 0 & 0 & 0 \\
     \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots  \\
     0 & 0 & 0 & \cdots & 0 & 0 & 0 \\
     \frac{1}{p_n} & 0 & 0 & \cdots & 0 & 0 & 0
     \end{pmatrix}, \]
\[ A_3 = \begin{pmatrix}
  0 & 0 & 0 & 0 & \cdots & 0 & p_{n-1} \\
  0 & 0 & 0 & 0 & \cdots & p_{n-2} & 0 \\
  \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots \\
  0 & 0 & p_2 & 0 & \cdots & 0 & 0 \\
  0 & p_1 & 0 & 0 & \cdots & 0 & 0 \\
  p_0 & 0 & 0 & 0 & \cdots & 0 & 0
  \end{pmatrix}, \]
so that
\[ 
A(t) = \begin{pmatrix} A_1(t) & A_2(t) \\ A_3(t) & A_4(t) \end{pmatrix}
 \]
is a $(2n) \times (2n)$ variable matrix function on $\mathcal{I}$. 
From this we see that the equation \eqref{Lyg} is equivalent to the 
first order system
\begin{equation} \label{first}
 D^{\alpha}\vec{y}(t) = A(t)\vec{y} + \vec{g}(t) \quad\text{for } t\in\mathcal{I}.
\end{equation}
We are now able to prove the following theorem.

\begin{theorem}[Existence and Uniqueness]
Fix $t_0\in\mathcal{I}$ and let $c_j\in\mathbb{R}$, $0 \le j\le 2n-1$, be given. 
Then for $\alpha\in(0,1]$, equation \eqref{Lyg} has a unique solution 
$y:\mathcal{I}\to\mathbb{R}$ such that
\[ 
y^{[j]}(t_0) = c_j, \quad 0\le j\le 2n-1. 
\]
\end{theorem}

\begin{proof}
Since equation \eqref{Lyg} is equivalent to the system \eqref{first}, 
and \eqref{first} is equivalent to
\[ 
\frac{d}{dt}\vec{y} = \frac{1}{\kappa_0}\left(A-\kappa_1 I\right)\vec{y}
+\frac{1}{\kappa_0}\vec{g}, 
\]
the result follows from classical ODE theory.
\end{proof}

Consider the homogeneous equation $Ly(t)=0$.

\begin{definition}[Wronskian]\rm
Let $y_j$, $1 \le j \le 2n$, be solutions of $Ly(t)=0$. The Wronskian of 
these solutions is defined to be
the determinant
\[ 
W_t(y_1, \dots, y_{2n})=\begin{vmatrix} y_1 & y_2 & \cdots & y_{2n}\\ 
y_1^{[1]} & y_2^{[1]} & \cdots & y_{2n}^{[1]} \\
   \vdots & \vdots & \ddots & \vdots \\ y_1^{[2n-1]} & y_2^{[2n-1]} 
& \cdots & y_{2n}^{[2n-1]} \end{vmatrix}.
 \]
\end{definition}

The proofs of the following two theorems follow in the same manner as the 
differential equations case; see \cite[pp. 57--58]{Nai}.

\begin{theorem}
If the solutions $y_i$, $1 \le i \le 2n$, of the homogeneous equation $Ly=0$ 
are linearly dependent, then their Wronskian vanishes identically on $\mathcal{I}$.  
Conversely, if the Wronskian vanishes at
at least one point in $\mathcal{I}$, then the solutions $y_i$, $1\le i\le 2n$, 
are linearly dependent.
\end{theorem}

We can easily construct a linearly independent system of solutions $y_i$, 
$1\le i\le 2n$, of a homogeneous system.
We need only choose a system of solutions which satisfy initial conditions 
of the form
\[ 
y_i^{[j-1]}(t_0) = a_{ij}, \quad 1 \le i,j \le 2n, 
\]
where the determinant of the matrix $[a_{ij}]$ is different from zero. 
A linearly independent system of solutions
$y_i$, $1\le i\le 2n$, is a fundamental system.

\begin{theorem}
Every solution of a homogeneous equation is a linear combination of a fixed, 
arbitrarily chosen, fundamental system.
\end{theorem}


\section{Self-adjoint boundary conditions and Green's functions}

Let $a,b\in\mathcal{I}$ with $a<b$. If $y$ and $z$ are real valued continuous 
functions and bounded on $[a,b]$,
define their inner product to be
\[ 
\langle y,z \rangle = \int_a^b y(t)z(t)e^2_0(b,t)d_{\alpha}t, 
\quad d_{\alpha}t:=\frac{dt}{\kappa_0(t)}. 
\]
Suppose for $0\le j\le n-1$ that $p_j:[a,b]\to\mathbb{R}$ is continuous with 
$p_n(t)\neq  0$ on $[a,b]$.

\begin{definition}\rm
Denote by $\mathbb{D}[a,b]$ the linear set of all continuous functions $y:[a,b]\to\mathbb{R}$ 
such that
\[ 
\left(D^{\alpha}\right)^{j}\big[p_{j}\left(D^{\alpha}\right)^{j}y\big] 
\]
is defined on $\mathcal{I}$ and is continuous for $0\le j\le n$.
\end{definition}

For $y\in\mathbb{D}[a,b]$ let
\begin{equation} \label{Ly2}
  Ly(t) = \sum_{j=0}^n \left(-D^{\alpha}\right)^{j}
\big[p_{j}\left(D^{\alpha}\right)^{j}y\big](t), \quad t\in[a,b].
\end{equation}
Then $Ly$ is continuous and bounded on $[a,b]$.  
Together with the equation \eqref{Ly2}, define the boundary conditions
\begin{equation} \label{bnd}
  U_i(y):= e_0(b,a)\sum_{j=1}^{2n} \eta_{ij}y^{[j-1]}(a) 
+ e_0(a,b)\sum_{j=1}^{2n} \beta_{ij}y^{[j-1]}(b), \quad 1 \le i \le 2n,
\end{equation}
where $\eta_{ij}, \beta_{ij}$, $1 \le i,j \le 2n$ are given real numbers.

\begin{definition}\rm
The boundary conditions \eqref{bnd} are self adjoint with respect to 
the equation \eqref{Ly2} if and only if
\begin{equation}\label{LyzyLz}
  \langle Ly, z \rangle = \langle y, Lz \rangle
\end{equation}
for all functions $y,z \in \mathbb{D}[a,b]$ satisfying the boundary conditions \eqref{bnd}.
\end{definition}

By Green's formula given in Remark \ref{greenformula} we have, for all 
$y,z\in\mathbb{D}[a,b]$,
\[ 
\langle Ly,z\rangle - \langle y, Lz \rangle = \{y,z\}(b)-e^2_0(b,a)\{y,z\}(a), 
\]
where the Lagrange bracket $\{y,z\}$ is as defined previously in \eqref{bracket}. 
Therefore boundary conditions \eqref{bnd} are self adjoint if and only if
\[ 
\{y,z\}(b)=e^2_0(b,a)\{y,z\}(a) 
\]
for all functions $y,z \in \mathbb{D}[a,b]$ satisfying \eqref{bnd}.  
For example the boundary conditions
\[ 
y^{[j]}(a) = 0 = y^{[j]}(b), \quad 0 \le j \le n-1, 
\]
and also the boundary conditions
\[ 
e_0(b,a)y^{[j]}(a) = e_0(a,b)y^{[j]}(b), \quad 0 \le j \le 2n-1, 
\]
are self adjoint.  The boundary value problem $Ly(t)=0$, $U_i(y)=0$, 
$1 \le i \le 2n$ has Green's function $G(t,s)$ if for any continuous and 
bounded function $g:[a,b]\to\mathbb{R}$ the nonhomogeneous boundary value problem 
$Ly(t) = g(t)$, $U_i(y)=0$, $1\le i\le 2n$, has a unique solution
$y:[a,b] \to \mathbb{R}$ which is given by
\[
 y(t) = \int_a^b G(t,s)g(s) d_{\alpha}s. 
\]

\section{Self-adjoint equations as Hamiltonian systems}

One important type of differential system is a Hamiltonian system
 \cite{AlBoRi,Hilscher}. Let us show that the $2n$th order self-adjoint 
equation $Ly=0$, in which $Ly$ is of the form \eqref{Ly}, can be written 
as an equivalent complex linear Hamiltonian system given by
\begin{equation}\label{nabham}
 D^{\alpha}\vec{x}(t) = \mathcal{A}(t)\vec{x}(t) + \mathcal{B}(t)\vec{u}(t), \quad
 D^{\alpha}\vec{u}(t) = \mathcal{C}(t)\vec{x}(t) - \mathcal{A}^*(t)\vec{u}(t), 
\quad t\in\mathcal{I},
\end{equation}
where $\mathcal{A}$, $\mathcal{B}$, and $\mathcal{C}$ are $n\times n$ 
complex matrices with $\mathcal{B}$ and $\mathcal{C}$ Hermitian;
$\mathcal{A}^*$ denotes the complex conjugate of $\mathcal{A}$; 
$\mathcal{I}\subseteq[a,\infty)$. In particular, we will show \eqref{Ly} 
can be written in the form of \eqref{nabham}, where
\begin{gather*}
  \mathcal{A}=(a_{ij})_{1\le i,j\le n} \quad\text{with}\quad
   a_{ij}=\begin{cases}1: & \text{if } j=i+1,\; 1\le i\le n-1, \\
                       0: & \text{otherwise,} \end{cases}\\
  \mathcal{B}=\operatorname{diag}\Big\{0,\dots,0,\frac{1}{p_n}\Big\},\quad
  \mathcal{C}=\operatorname{diag}\{p_0,p_1,p_2,\ldots,p_{n-1}\}.
\end{gather*}
Recall for any function $y\in\mathbb{D}$ the system of equations in \eqref{quasiDs}.
 Then using the substitution
\begin{equation}\label{xuvecdef}
 \vec{x} = \begin{pmatrix} y^{[0]} \\ y^{[1]} \\ \dots \\ y^{[n-1]} \end{pmatrix}, \quad
 \vec{u} = \begin{pmatrix} y^{[2n-1]} \\ y^{[2n-2]} \\ \dots \\ y^{[n]} 
\end{pmatrix},
\end{equation}
and the matrices $\mathcal{A}$, $\mathcal{B}$, and $\mathcal{C}$ above, 
we have that $Ly(t)=0$, $t \in \mathcal{I}$ is equivalent to the linear 
Hamiltonian system \eqref{nabham}.

Now let us present some properties of solutions to the homogeneous equation 
$Ly(t)=0$, $t \in \mathcal{I}$.
From the Lagrange identity \eqref{zLy} we immediately get the following theorem.

\begin{theorem}
If $y$ and $z$ are solutions of $Ly(t)=0$ for $t\in\mathcal{I}$, then the 
Lagrange bracket of $y$ and $z$ satisfies
\[ 
\{y,z\}(t) = ce^2_0(t,a), \quad t\in\mathcal{I}, 
\]
where $a\in\mathcal{I}$ and $c\in\mathbb{R}$.
\end{theorem}

Lemma \ref{Fdelta} yields the following result.

\begin{theorem}\label{Fincr}
Let $F(y,z,t)$ be defined as in \eqref{F1} (see also \eqref{F2}), and let
 $a\in\mathcal{I}$. If $y$ is a solution of $Ly(t)=0$,
$t\in\mathcal{I}$, then
\[ e_0(t,a)D^{\alpha}\big[\frac{F(y,y,\cdot)}{e_0(\cdot,a)}\big](t)
   = \sum_{j=0}^n p_{j}(t)\left[(D^{\alpha})^jy\right]^2(t), \quad t\in\mathcal{I}. 
\]
In particular, if $p_j(t) \ge 0$ for $0\le j\le n$ and $t\in\mathcal{I}$, then 
$F(y,y,t)$ satisfies
\[ 
e_0(a,t)F(y,y,t) \ge e_0(t,a)F(y,y,a) 
\]
along solutions of $Ly(t)=0$ for all $t\in\mathcal{I}$ with $t\ge a$.
\end{theorem}

\begin{proof}
If $y$ is a solution of $Ly(t)=0$, then by Lemma \ref{Fdelta} we know that 
$F(y,y,t)$ satisfies
\[ 
e_0(t,a)D^{\alpha}\big[\frac{F(y,y,\cdot)}{e_0(\cdot,a)}\big](t)
    = \sum_{j=0}^n p_{j}\left[(D^{\alpha})^{j}y\right]^2(t) 
\]
for $t,a\in\mathcal{I}$. Furthermore, if $p_j(t)\ge 0$ for $0\le j\le n$ and 
$t\in\mathcal{I}$, then
\[ 
D^{\alpha}\big[\frac{F(y,y,\cdot)}{e_0(\cdot,a)}\big](t) \ge 0, \quad 
t\in\mathcal{I}, 
\]
and the function $F(y,y,\cdot)/e_0(\cdot,a)$ is $\alpha$-increasing on $\mathcal{I}$. 
Thus,
\[ 
e_0(t_1,t_2)F(y,y,t_2)/e_0(t_2,a) \ge F(y,y,t_1)/e_0(t_1,a),
\] 
whenever $t_2 > t_1$, $t_1,t_2\in\mathcal{I}$. 
The result follows if we take $t_1=a$ and $t_2=t$.
\end{proof}


\begin{lemma} \label{etaeta}
Assume $\eta\in\mathbb{D}[a,b]$. Then
\begin{equation} \label{eta}
 F(\eta,\eta,b)-F(\eta,\eta,a)e^2_0(b,a)
= -\langle \eta, L\eta \rangle + \sum_{j=0}^n 
\langle p_{j},[(D^{\alpha})^j\eta]^2 \rangle.
\end{equation}
\end{lemma}

\begin{proof}
Setting $y=z=\eta$ in Lemma \ref{Fdelta} we have
\[ 
e_0(t,a)D^{\alpha}\big[\frac{F(\eta,\eta,\cdot)}{e_0(\cdot,a)}\big](t)
    = \Big(-\eta L\eta + \sum_{j=0}^n p_{j} \big[(D^{\alpha})^{j}\eta\big]^2\Big)(t) 
\]
for $t,a\in\mathcal{I}$. If we multiply both sides by $e^2_0(b,t)d_{\alpha}t$ 
and then integrate from $a$ to $b$ we get the desired result.
\end{proof}

\begin{definition} \rm
The set of admissible variations is given by
\[
 \mathcal{S} = \left\{\eta\in\mathbb{D}[a,b]: (D^{\alpha})^j\eta(a) 
= (D^{\alpha})^j\eta(b) =0, \quad 0\le j\le n-1 \right\}, 
\]
with corresponding functional
\begin{equation} \label{calF}
 \mathcal{F}(\eta) = \sum_{j=0}^n \langle p_{j},[(D^{\alpha})^j\eta]^2\rangle.
\end{equation}
\end{definition}

For an admissible variation $\eta \in \mathcal{S}$, Lemma \ref{etaeta} implies that
\[ 
\mathcal{F}(\eta) = \langle \eta, L\eta \rangle. 
\]
The functional $\mathcal{F}$ is positive definite on the set of admissible 
variations $\mathcal{S}$ if $\mathcal{F}(\eta) \ge 0$ for all 
$\eta\in\mathcal{S}$, and $\mathcal{F}(\eta)=0$ 
if and only if $\eta =0$.

Note that the bilinear functional $F$ in \eqref{F1} and the vector-valued 
functions $\vec{x}$ and $\vec{u}$ given above in \eqref{xuvecdef} satisfy 
the dot product equation
\[
 (\vec{x} \cdot \vec{u})(t) = F(y,y,t). 
\]
We will use this in the proof of the next theorem.

\begin{theorem}\label{posdefdisconj}
Assume $p_j(t)\ge 0$ for $0\le j\le n$ and $t\in\mathcal{I}$, and $p_n(t)>0$ 
for $t\in\mathcal{I}$. Then the functional $\mathcal{F}$ is positive 
definite on $\mathcal{S}$ 
and the linear Hamiltonian system \eqref{nabham} being considered for $t\in[a,b]$ 
is disconjugate on $[a,b]$. In particular the self-adjoint BVP
\begin{gather*}
   Ly(t) = 0, \quad t \in [a,b], \\
   (D^{\alpha})^jy(a) = 0 = (D^{\alpha})^jy(b), \quad j=0,1,\ldots,n-1,
\end{gather*}
has only the trivial solution.
\end{theorem}

\begin{proof}
Let $t\in\mathcal{I}$. From $p_j(t)\ge 0$ for $0\le j\le n$ and \eqref{calF}, 
it is clear that $\mathcal{F}(\eta)\ge 0$ for all $\eta\in\mathcal{S}$, and that
$\mathcal{F}(0)=0$. Now suppose $\eta\in\mathcal{S}$ and $F(\eta)=0$. Then
\[ 
0 = \sum_{j=0}^n \langle p_{j},[(D^{\alpha})^j\eta]^2\rangle
       \ge \langle p_{n},[(D^{\alpha})^n\eta]^2\rangle, 
\]
and since $p_n(t)>0$, we have that $(D^{\alpha})^n\eta(t) = 0$ for 
$t\in[a,b]$. Because $\eta$ is admissible,
it solves the initial value problem
\begin{gather*}
  (D^{\alpha})^n\eta(t) = 0, \quad t\in[a,b] \\
  (D^{\alpha})^j\eta(a) = 0, \quad 0\le j\le n-1.
\end{gather*}
By uniqueness of solutions to initial value problems, $\eta$ is the trivial 
solution in the set of admissible functions, whence $\mathcal{F}$
 is positive definite on that set. By \eqref{etaeta}, if $y$ is a solution
 of $Ly(t)=0$, $t\in[a,b]$, then
\begin{align*}
   (\vec{x}\cdot\vec{u})(b) - (\vec{x}\cdot\vec{u})(a)e^2_0(b,a) 
&=  F[y,y,b]-F[y,y,a]e^2_0(b,a) \\
   &=  \sum_{j=0}^n \langle p_{j},\left[(D^{\alpha})^jy\right]^2\rangle \\
   &=  \mathcal{F}(y).
\end{align*}
Note that the Hamiltonian system \eqref{nabham} is disconjugate on $[a,b]$ 
if and only if for a vector solution $\vec{x}$, $\vec{u}$ of
\eqref{nabham}, the following is positive definite:
\[ 
\int_a^b \left(\vec{x}^\top\mathcal{C}\vec{x} 
+ \vec{u}^\top\mathcal{B}\vec{u}\right)(t)e^2_0(b,t) d_{\alpha}t 
= \sum_{j=0}^{n-1} \langle p_{j},\big(y^{[j]}\big)^2\rangle 
+ \langle 1/p_n,\big(y^{[n]}\big)^2\rangle= \mathcal{F}(y). 
\]
This completes the proof.
\end{proof}

 The point $t=t_0$ is a zero of order (at least) $n$ of $y$ if
\[ (D^{\alpha})^jy(t_0)=0, \quad j=0,1,\ldots,n-1. \]
The equation $Ly=0$ is $(n,n)$ disconjugate on $[a,b]$ provided there 
is no nontrivial solution of $Ly=0$ with a zero of order (at least) $n$ 
in $(a,b]$ preceded by a zero of order (at least) $n$ in $[a,b]$.  
These ideas lead to the next conclusion.

\begin{theorem}
If $p_n(t)>0$ for $t\in[a,b]$, then $Ly(t)=0$ is $(n,n)$ disconjugate on $[a,b]$.
\end{theorem}

\begin{proof}
Suppose $y$ is a solution of $Ly=0$, and without loss of generality assume $y$ 
has a zero of order $n$ at $a$, namely $(D^{\alpha})^jy(a)=0$, $j=0,1,\ldots,n-1$. 
Then from \eqref{F2} we have $F(y,y,a)=0$, and $F(y,y,t)\ge 0$ for all $t\in[a,b]$ 
by Theorem \ref{Fincr}. If $y$ has a zero at $t_0\in(a,b]$ of order $n$, then
\[ 
(D^{\alpha})^jy(t_0)=0, \quad j=0,1,\ldots,n-1. 
\]
But then $y$ is a trivial solution of $Ly=0$ by the previous theorem.
\end{proof}


\section{Second-order proportional equations}

Analogous to the classic and time scales cases \cite{AtiGus}, in this section 
we find Green's function associated to second-order proportional equations. 
With this in mind, again consider \eqref{Ly}. Taking $n=1$, we find that
\[ 
Ly(t) = -D^{\alpha}\left[p_1 D^{\alpha}y\right](t) + p_0(t)y(t), \quad 
t\in\mathcal{I}, 
\]
and for each function $y\in\mathbb{D}$,
\[ 
y^{[0]} = y, \quad y^{[1]} = p_1 D^{\alpha}y, \quad y^{[2]} 
= p_0y - D^{\alpha}\big[y^{[1]}\big]. 
\]
Then
\[ 
Ly = y^{[2]} 
\]
as expected. In addition, the equation $Ly(t) = g(t)$ for $t\in\mathcal{I}$ 
is equivalent to the first order system
\[ 
D^{\alpha}\vec{y}(t) = A(t)\vec{y}(t) + \vec{g}(t), \quad t \in \mathcal{I}, 
\]
where
\[ 
\vec{y} = \begin{pmatrix} y^{[0]} \\ y^{[1]} \end{pmatrix}, \quad 
\vec{g} = \begin{pmatrix} 0 \\ -g \end{pmatrix},
   \quad A(t) = \begin{pmatrix} 0 & \frac{1}{p_1(t)} \\ p_0(t) & 0 \end{pmatrix}. 
\]
The Wronskian of two solutions $y,z$, is 
\[ 
W_t(y,z) = \begin{vmatrix} y^{[0]}(t) & z^{[0]}(t) \\ y^{[1]}(t) & z^{[1]}(t) 
\end{vmatrix}
    = p_1(t)\left(yD^{\alpha}z - zD^{\alpha}y\right)(t) = \{y,z\}(t), 
\]
the Lagrange bracket \eqref{bracket} of $y$ and $z$, giving rise to the 
following theorem.

\begin{theorem}\label{wrone2thm}
The Wronskian of any two solutions $y,z$ of $Ly(t) = 0$ satisfies
\[ 
W_t(y,z)=e^2_0(t,a)W_a(y,z). 
\]
\end{theorem}

The following theorem presents a variation of constants formula for the
 nonhomogeneous equation $Ly(t) = g(t)$.

\begin{theorem}[Variation of Constants]
Suppose that $y_1,y_2$ form a fundamental system of solutions of the homogeneous 
equation $Ly(t)=0$. Then the general solution of the nonhomogeneous equation 
$Ly(t) = g(t)$ is given by
\[
 y(t) = c_1y_1(t) + c_2y_2(t) + \int_{t_0}^t \frac{y_1(t)y_2(s) 
- y_1(s)y_2(t)}{W_s(y_1,y_2)} g(s) d_{\alpha}s, 
\]
where $t_0\in \mathcal{I}$ and $c_1, c_2$ are real constants.
\end{theorem}

\begin{proof}
It suffices to show that the function
\[ 
z(t) = \int_{t_0}^t \frac{y_1(t)y_2(s) - y_1(s)y_2(t)}{W_s(y_1,y_2)} 
g(s) d_{\alpha}s 
\]
is a particular solution of the nonhomogeneous equation $Ly(t) = g(t)$. 
Differentiating both sides yields
\[ 
D^{\alpha}z(t) = \int_{t_0}^t \frac{y_2(s)D^{\alpha}y_1(t) 
- y_1(s)D^{\alpha}y_2(t)}{W_s(y_1,y_2)} g(s) d_{\alpha}s. 
\]
Hence
\begin{align*}
  D^{\alpha}\left[p_1D^{\alpha}z\right](t) 
&=  \frac{y_2(t)p_1(t)D^{\alpha}y_1(t)-y_1(t)p_1(t)D^{\alpha}y_2(t)}{W_t(y_1,y_2)} 
g(t) \\
  &\quad +\int_{t_0}^t \frac{y_2(s)D^{\alpha}[p_1D^{\alpha}y_1](t) 
- y_1(s)D^{\alpha}[p_1D^{\alpha}y_2](t)}{W_s(y_1,y_2)} g(s) d_{\alpha}s \\
	&=  -g(t) + p_0(t)z(t),
\end{align*}
that is $z$ satisfies $Ly(t) = g(t)$.
\end{proof}

For $y\in\mathbb{D}[a,b]$ let
\[ 
Ly(t) = -D^{\alpha}\left[p_1 D^{\alpha}y\right](t) + p_0(t)y(t), \quad t\in[a,b],
\]
together with the boundary conditions
\begin{equation} \label{bnd1}
\begin{gathered}
   \eta_{11}e_0(b,a)y(a) + \eta_{12}e_0(b,a)y^{[1]}(a) + \beta_{11}e_0(a,b)y(b) + \beta_{12}e_0(a,b)y^{[1]}(b) = 0, \\
   \eta_{21}e_0(b,a)y(a) + \eta_{22}e_0(b,a)y^{[1]}(a) + \beta_{21}e_0(a,b)y(b) + \beta_{22}e_0(a,b)y^{[1]}(b) = 0,
\end{gathered}
\end{equation}
where $\eta_{ij}, \beta_{ij}$ are given real numbers, $i,j=1,2$.  Set
\[ 
N = \begin{pmatrix} \eta_{11} & \eta_{12} & \beta_{11} & \beta_{12} \\
       \eta_{21} & \eta_{22} & \beta_{21} & \beta_{22} \end{pmatrix}.
 \]
We will assume that the matrix $N$ has rank 2. This means that the two boundary 
conditions \eqref{bnd1} are linearly independent. As before, we call the 
boundary conditions \eqref{bnd1} self adjoint with respect to the expression 
$Ly$ if
\[ 
\langle Ly, z\rangle - \langle y, Lz \rangle = \{y,z\}(b)- e^2_0(b,a)\{y,z\}(a) 
\]
for all functions $y,z\in\mathbb{D}[a,b]$ satisfying the boundary conditions \eqref{bnd1}.
Recall that by Green's formula, the boundary conditions \eqref{bnd1} are self 
adjoint if and only if
\[ 
e_0(a,b)\{y,z\}(b) = e_0(b,a)\{y,z\}(a). 
\]
Set
\[ 
N_1 = \begin{pmatrix} \eta_{11} & \eta_{12} \\ \eta_{21} & \eta_{22} \end{pmatrix},
          \quad 
N_2 = \begin{pmatrix} \beta_{11} & \beta_{12} \\ \beta_{21} & \beta_{22} 
\end{pmatrix}. 
\]

\begin{theorem}
If $\det N_1=\det N_2$, then the boundary conditions \eqref{bnd1} are self adjoint.
\end{theorem}

\begin{proof}
Let $y,z\in\mathbb{D}[a,b]$, be functions which satisfy boundary conditions \eqref{bnd1}. 
Then we have
\[ 
e_0(b,a)N_1\begin{pmatrix} y(a) & z(a) \\ y^{[1]}(a) & z^{[1]}(a) \end{pmatrix}
   = e_0(a,b)N_2 \begin{pmatrix} -y(b) & -z(b) \\ -y^{[1]}(b) & -z^{[1]}(b) 
\end{pmatrix}. 
\]
Passing to determinants we have
\[ 
(\det N_1)e_0(b,a)\{y,z\}(a) = (\det N_2)e_0(a,b)\{y,z\}(b). 
\]
If $\det N_1=\det N_2\neq 0$, then
\[ 
e_0(b,a)\{y,z\}(a)=e_0(a,b)\{y,z\}(b). 
\]
Suppose $\det N_1=\det N_2 =0$. Since $N$ has rank 2, it is clear that the boundary
conditions \eqref{bnd1} are equivalent to separated boundary conditions of the form
\begin{equation} \label{sep}
  \begin{gathered}
  \eta_1y(a) + \eta_2y^{[1]}(a) = 0, \quad |\eta_1|+|\eta_2| \neq  0, \\
  \beta_1y(b) + \beta_2y^{[1]}(b) = 0, \quad |\beta_1|+|\beta_2| \neq  0,
\end{gathered}
\end{equation}
where $\eta_i, \beta_i$, $i=1,2$ are real numbers.  
It can easily be verified that for any functions $y,z\in\mathbb{D}[a,b]$ satisfying 
boundary conditions \eqref{bnd1} we have
\[ 
\{y,z\}(a) = 0 = \{y,z\}(b), 
\]
completing the proof.
\end{proof}

\begin{remark} \rm
As was noted above, the separated boundary conditions \eqref{sep}, 
in particular the boundary conditions $y(a)=y(b)=0$ are self adjoint.
The ``periodic'' boundary conditions
\[ 
e_0(b,a)y(a)=e_0(a,b)y(b), \quad e_0(b,a)y^{[1]}(a) = e_0(a,b)y^{[1]}(b) 
\]
which are non-separated, are also self adjoint.
\end{remark}

We will now construct Green's function for the self-adjoint (separated) BVP
\begin{gather}
   -D^{\alpha}\left[p_1 D^{\alpha}y\right](t) + p_0(t)y(t) = g(t) \label{bvp1} \\
  \eta y(a) - \beta y^{[1]}(a) = 0, \quad \gamma y(b) +
  \delta y^{[1]}(b) =0, \label{cond}
\end{gather}
where $\eta, \beta, \gamma, \delta$ are real numbers such that 
$|\eta| + |\beta| \neq  0 $,
$|\gamma|+|\delta| \neq  0$.

\begin{remark}\rm
The minus sign on the left hand side of \eqref{bvp1}, as well as in the first 
boundary condition of \eqref{cond}, is taken so that the positivity of 
Green's function can be formulated in terms of $p_1(t)>0$, $p_0(t)\ge 0$, 
for $\eta, \beta, \gamma, \delta \ge 0$.
\end{remark}

Denote by $\phi$ and $\psi$ the solutions of the corresponding homogeneous 
equation
\begin{equation} \label{bvp0}
  -D^{\alpha}\left[p_1 D^{\alpha}y\right](t) + p_0(t)y(t) = 0, \quad t\in[a,b],
\end{equation}
under the initial conditions
\begin{gather}
  \phi(a) = \beta, \quad \phi^{[1]}(a)=\eta, \label{cond1} \\
  \psi(b) = \delta, \quad \psi^{[1]}(b) = -\gamma, \label{cond2}
\end{gather}
so that $\phi$ and $\psi$ satisfy the first and second boundary conditions 
in \eqref{cond}, respectively.  From Theorem \ref{wrone2thm} we have that 
the Wronskian of $\phi$ and $\psi$ satisfies
\[ 
W_t(\phi, \psi) = \phi(t)\psi^{[1]}(t) - \phi^{[1]}(t)\psi(t) 
= e^2_0(t,a) W_a(\phi, \psi); 
\]
evaluating this expression at $t=a$, $t=b$, and using the boundary 
conditions \eqref{cond1}, \eqref{cond2} yields
\[
 W_a(\phi, \psi) = \beta\psi^{[1]}(a) - \eta\psi(a) 
= \frac{-\gamma\phi(b) - \delta\phi^{[1]}(b)}{e^2_0(b,a)}. 
\]
Additionally, $W_a(\phi, \psi)\neq  0$ if and only if the homogeneous
equation \eqref{bvp0} has only the trivial solution satisfying the boundary 
conditions \eqref{cond}.

\begin{theorem}\label{2ptbvpthm}
If $W_a(\phi, \psi)\neq  0$, then the nonhomogeneous BVP \eqref{bvp1},
\eqref{cond}, has a unique solution $y$ for which the formula
\[
 y(t) = \int_a^b G(t,s)g(s)d_{\alpha}s, \quad t\in[a,b] 
\]
holds, where the function $G(t,s)$ is given by
\[ 
G(t,s) = \frac{-1}{W_s(\phi,\psi)}
\begin{cases} \phi(t)\psi(s): & a\le t\le s\le b, \\ 
\phi(s)\psi(t): & a\le s\le t\le b, 
\end{cases} 
\]
and this $G(t,s)$ is Green's function of the BVP \eqref{bvp1}, \eqref{cond}. 
Furthermore the Green function satisfies the property
$e_0(s,t)G(t,s) = e_0(t,s)G(s,t)$ for all $t,s\in[a,b]$.
\end{theorem}

\begin{proof}
Since $W_a(\phi, \psi)\neq  0$, the solutions $\phi$ and $\psi$ of the
 homogeneous equation \eqref{bvp0} are linearly
independent.  Thus the general solution of the nonhomogeneous equation 
\eqref{bvp1} has the variation of constants form
\begin{equation}\label{y1}
  y(t) = c_1\phi(t) + c_2\psi(t) 
+ \int_a^t \frac{\phi(t)\psi(s)-\phi(s)\psi(t)}{W_s(\phi, \psi)}g(s) d_{\alpha}s,
\end{equation}
where $c_1$ and $c_2$ are real constants. We now construct $c_1$ and $c_2$ 
so that the function $y$ satisfies the boundary conditions \eqref{bnd1}. 
Using \eqref{y1} we have
\begin{equation} \label{y2}
  y^{[1]}(t) = c_1\phi^{[1]}(t) + c_2\psi^{[1]}(t) 
+ \int_a^t \frac{\phi^{[1]}(t)\psi(s)-\phi(s)\psi^{[1]}(t)}{W_s(\phi, \psi)}g(s) 
d_{\alpha}s.
\end{equation}
Consequently,
\begin{gather*}
 y(a) = c_1\phi(a) + c_2\psi(a) = c_1\beta + c_2\psi(a), \\
 y^{[1]}(a) = c_1\phi^{[1]}(a) + c_2\psi^{[1]}(a) = c_1\eta + c_2\psi^{[1]}(a).
\end{gather*}
Substituting these values of $y(a)$ and $y^{[1]}(a)$ into the first condition 
of \eqref{cond} we have
\[ 
c_2\left(\eta\psi(a) - \beta\psi^{[1]}(a)\right) = 0. 
\]
On the other hand, using the definition of $W_a(\phi, \psi)$,
\[ 
\eta\psi(a)-\beta\psi^{[1]}(a) = -W_a(\phi, \psi) \neq  0.
\]
Consequently $c_2 = 0$, and \eqref{y1}, \eqref{y2}, take the form
\begin{gather*}
  y(t) = c_1\phi(t) + \int_a^t \frac{\phi(t)\psi(s)-\phi(s)\psi(t)}
 {W_s(\phi, \psi)}g(s) d_{\alpha}s, \\
  y^{[1]}(t) = c_1\phi^{[1]}(t) + \int_a^t 
 \frac{\phi^{[1]}(t)\psi(s)-\phi(s)\psi^{[1]}(t)}{W_s(\phi, \psi)}g(s) d_{\alpha}s,
\end{gather*}
respectively. Hence
\begin{gather*}
 y(b) = c_1\phi(b) + \int_a^b 
 \frac{\phi(b)\psi(s)-\phi(s)\psi(b)}{W_s(\phi, \psi)}g(s) d_{\alpha}s, \\
 y^{[1]}(b) = c_1\phi^{[1]}(b) + \int_a^b 
 \frac{\phi^{[1]}(b)\psi(s)-\phi(s)\psi^{[1]}(b)}{W_s(\phi, \psi)}g(s) d_{\alpha}s.
\end{gather*}
Substituting these values into the second condition of \eqref{cond} yields
\[ 
c_1\left(\gamma\phi(b) + \delta\phi^{[1]}(b)\right) 
+ \int_a^b \frac{\left(\gamma\phi(b) + \delta\phi^{[1]}(b)\right)}
{W_s(\phi, \psi)}\psi(s)g(s)d_{\alpha}s = 0. 
\]
Again using the definition of $W_a(\phi,\psi)$,
\[ 
\gamma\phi(b) + \delta\phi^{[1]}(b) = -e^2_0(b,a)W_a(\phi, \psi) \neq  0.
 \]
Hence
\[ 
c_1 = -\int_a^b \frac{\psi(s)}{W_s(\phi, \psi)}g(s)d_{\alpha}s. 
\]
Thus $y$ has the desired form, and $G(t,s)$ satisfies 
$e^2_0(s,a)G(t,s)=e^2_0(t,a)G(s,t)$; this is equivalent to 
$e_0(s,t)G(t,s)=e_0(t,s)G(s,t)$, completing the proof.
\end{proof}

\begin{corollary}[Green's Function for the Two-Point Problem]\label{oldcor6}
If
\[ 
d:=\beta\gamma+\eta\delta+\eta\gamma\int_a^b\frac{d_{\alpha}\tau}{p_1(\tau)}\neq  0,
\]
then the nonhomogeneous BVP \eqref{bvp1}, \eqref{cond} with $p_0\equiv 0$ 
has a unique solution $y$ for which the formula
\[ 
y(t) = \int_a^b G(t,s)g(s)d_{\alpha}s, \quad t\in[a,b] 
\]
holds, where the function $G(t,s)$ is given by
\[ 
G(t,s) = \frac{e_0(t,s)}{d}\begin{cases}
 \big[\beta+\eta\int_a^t\frac{d_{\alpha}\tau}{p_1(\tau)}\big]
\big[\delta+\gamma\int_s^b\frac{d_{\alpha}\tau}{p_1(\tau)}\big]:
      & a\le t\le s\le b, \\[4pt]
 \big[\beta+\eta\int_a^s\frac{d_{\alpha}\tau}{p_1(\tau)}\big]
\big[\delta+\gamma\int_t^b\frac{d_{\alpha}\tau}{p_1(\tau)}\big]:
      & a\le s\le t\le b. \end{cases} 
\]
This $G(t,s)$ is Green's function of the BVP \eqref{bvp1}, \eqref{cond} 
with $p_0\equiv 0$.
\end{corollary}

\begin{proof}
Assume
\[ 
d:=\beta\gamma+\eta\delta+\eta\gamma\int_a^b\frac{d_{\alpha}\tau}{p_1(\tau)}\neq  0.
\]
Note that
\[ 
\phi(t)=\eta  e_0(t,a)\int_a^t \frac{d_{\alpha}\tau}{p_1(\tau)}
+\beta  e_0(t,a), \quad \psi(t)
=\gamma  e_0(t,b) \int_t^b \frac{d_{\alpha}\tau}{p_1(\tau)}+\delta e_0(t,b) 
\]
satisfy \eqref{bvp0} with $p_0\equiv 0$, along with conditions \eqref{cond1} 
and \eqref{cond2}. The result then follows from Theorem \ref{2ptbvpthm}.
\end{proof}

\begin{corollary}[Green's Function for the Conjugate Problem]
 \index{Green's function!conjugate problem}
Green's function for the conjugate boundary value problem
\begin{equation}\label{cojubvp}
  -D^{\alpha}\left[p D^{\alpha}y\right](t)=0,\quad y(a)=y(b)=0
\end{equation}
is given by
\[ 
G(t,s) = \frac{e_0(t,s)}{\int_a^{b} \frac{1}{p(\tau)}d_{\alpha}\tau}
\begin{cases}
  \int_a^t \frac{1}{p(\tau)} d_{\alpha}\tau \int_s^b \frac{1}{p(\tau)} 
 d_{\alpha}\tau &: a\le t\le s\le b, \\[4pt]
  \int_a^s \frac{1}{p(\tau)} d_{\alpha}\tau \int_t^b \frac{1}{p(\tau)} 
d_{\alpha}\tau &: a\le s\le t\le b. 
\end{cases} 
\]
\end{corollary}

\begin{proof}
By Theorem \ref{posdefdisconj}, the BVP \eqref{cojubvp} has only the trivial 
solution. Due to the boundary conditions $y(a)=y(b)=0$, we see that 
$\eta=\gamma=1$ and $\beta=\delta=0$ in \eqref{cond1} and \eqref{cond2}. 
The result then follows from Corollary \ref{oldcor6}.
\end{proof}

\begin{corollary}[Green's Function for the Focal Problem]\label{ggff}
Green's function for the focal boundary value problem
\begin{equation}\label{fokalbvp}
  -D^{\alpha}\left[p D^{\alpha}y\right](t)=0, \quad y(a) = D^{\alpha}y(b)=0
\end{equation}
is given by
\[ 
G(t,s) = e_0(t,s)\begin{cases}
  \int_a^t\frac{1}{p(\tau)} d_{\alpha}\tau   &: a\le t\le s\le b, \\[4pt]
	\int_a^{s}\frac{1}{p(\tau)} d_{\alpha}\tau &: a\le s\le t\le b. 
\end{cases} 
\]
\end{corollary}

\begin{proof}
The boundary conditions imply $\eta=\delta=1$ and $\beta=\gamma=0$ 
in \eqref{cond1} and \eqref{cond2}. The result again follows from 
Corollary \ref{oldcor6}.
\end{proof}

\section{Fourth-order proportional equations}

In equation \eqref{Ly} let $n=2$, and consider the fourth order expression
\begin{equation} \label{Ly1}
  Ly(t) = (D^{\alpha})^2\left[p_2 (D^{\alpha})^2y\right](t)
 - D^{\alpha}\big[p_1D^{\alpha}y\big](t) + p_0(t)y(t).
\end{equation}
For $y \in \mathbb{D}$ we have by definition
\begin{gather*}
  y^{[0]} = y, \quad y^{[1]} = D^{\alpha}y, \quad  
 y^{[2]} = p_2(D^{\alpha})^2y, \\
  y^{[3]} = p_1D^{\alpha}y - D^{\alpha}\big[y^{[2]}\big], \quad 
 y^{[4]} = p_0y - D^{\alpha}\big[y^{[3]}\big].
\end{gather*}
It follows that
\[
 Ly = y^{[4]}.
\] 
In this case, for $y,z \in \mathbb{D}$ the Lagrange bracket of $y$ and $z$ is
\[ 
\{y,z\}(t) = y(t)z^{[3]}(t) - y^{[3]}(t)z(t) + y^{[1]}(t)z^{[2]}(t) 
- y^{[2]}(t)z^{[1]}(t), 
\]
and the Lagrange identity
\[ 
\left(zLy - yLz\right)(t) = e_0(t,a)D^{\alpha}
\big[\frac{\{y,z\}}{e_0(\cdot,a)}\big](t) 
\]
holds. Using the same techniques as in previous sections, for each 
function $y \in \mathbb{D}$ we have the following system of relations at 
$t \in \mathcal{I}$,
\begin{gather*}
  D^{\alpha}\big[y^{[0]}\big] = y^{[1]}, \quad
 D^{\alpha}\big[y^{[1]}\big] = \frac{y^{[2]}}{p_2}, \\
  D^{\alpha}\big[y^{[2]}\big] = p_1 y^{[1]} - y^{[3]}, \quad  
D^{\alpha}\big[y^{[3]}\big] = p_0 y - Ly.
\end{gather*}
Thus the equation $Ly(t) = g(t)$ for $t\in\mathcal{I}$ where 
$g:\mathcal{I}\to\mathbb{R}$ is a continuous function is equivalent to the 
first order system
\[ 
D^{\alpha}\vec{y}(t) = A(t)\vec{y}(t) + \vec{g}(t), \quad t\in\mathcal{I}, 
\]
where
\[ 
\vec{y} = \begin{pmatrix} y^{[0]} \\ y^{[1]} \\ y^{[2]} \\ y^{[3]} \end{pmatrix},
    \quad \vec{g} = \begin{pmatrix} 0 \\ 0 \\ 0 \\ -g \end{pmatrix},
    \quad A = \begin{pmatrix}  0 & 1 & 0 & 0 \\ 0 & 0 & \frac{1}{p_2} & 0 \\ 0 & p_1 & 0 & -1 \\
  p_0 & 0 & 0 & 0 \end{pmatrix}. 
\]
Together with the expression \eqref{Ly1}, take boundary conditions of the form
\begin{equation} \label{bnd4}
 e_0(b,a)\sum_{j=1}^4 \eta_{ij}y^{[j-1]}(a) 
+ e_0(a,b)\sum_{j=1}^4 \beta_{ij}y^{[j-1]}(b)=0, \quad 1 \le i \le 4.
\end{equation}
These boundary conditions are self adjoint if and only if
\begin{align*}
  0 &=  e_0(a,b)\Big\{y(b)z^{[3]}(b) - y^{[3]}(b)z(b) + y^{[1]}(b)z^{[2]}(b)
     - y^{[2]}(b)z^{[1]}(b)\Big\} \\
  &\quad -e_0(b,a)\Big\{y(a)z^{[3]}(a) + y^{[3]}(a)z(a) - y^{[1]}(a)z^{[2]}(a)
     + y^{[2]}(a)z^{[1]}(a)\Big\}
\end{align*}
for all $y,z \in \mathbb{D}_{[a,b]}$. As is the case when $\alpha=1$, 
it follows that by joining any one of the four types of conditions
\begin{itemize}
   \item[(i)] $y(a) = y^{[1]}(a) = 0,$
   \item[(ii)] $y^{[1]}(a) = y^{[3]}(a) = 0$,
   \item[(iii)] $y(a) = y^{[2]}(a) = 0$,
   \item[(iv)] $y^{[2]}(a) = y^{[3]}(a) = 0$,
\end{itemize}
with any one of the four types of conditions
\begin{itemize}
   \item[(i)] $y(b) = y^{[1]}(b) = 0,$
   \item[(ii)] $y^{[1]}(b) = y^{[3]}(b) = 0$,
   \item[(iii)] $y(b) = y^{[2]}(b) = 0$,
   \item[(iv)] $y^{[2]}(b) = y^{[3]}(b) = 0$,
\end{itemize}
yields the sixteen types of self-adjoint boundary conditions.  
The ``periodic'' boundary conditions
\begin{gather*}
 e_0(b,a)y(a) = e_0(a,b)y(b), \quad e_0(b,a)y^{[1]}(a) = e_0(a,b)y^{[1]}(b), \\
 e_0(b,a)y^{[2]}(a) = e_0(a,b)y^{[2]}(b), \quad e_0(b,a)y^{[3]}(a) = e_0(a,b)y^{[3]}(b),
\end{gather*}
are also self adjoint.

\begin{example} \rm
The Green function $G(t,s)$ for
\[ 
(D^{\alpha})^2[p(D^{\alpha})^2y](t), \quad t\in[a,b], 
\]
with the boundary conditions
\[ 
y(a) = y^{[1]}(a) = y^{[2]}(b) = y^{[3]}(b) = 0 
\]
is given by
\[ 
G(t,s)=\begin{cases}
      e_0(t,s)\int_a^t \big(\int_a^\tau \frac{h_1(s,\xi)}{p(\xi)} d_{\alpha}\xi \big)
 d_{\alpha}\tau &: a\le t\le s\le b, \\[4pt]
      e_0(t,s)\int_a^s \big(\int_a^\tau \frac{h_1(t,\xi)}{p(\xi)} d_{\alpha}\xi \big) 
 d_{\alpha}\tau &: a\le s\le t\le b, 
\end{cases} 
\]
where $h_1(v,\xi):=\int_{\xi}^v 1 \, d_{\alpha}w$.
\end{example}

\begin{thebibliography}{00}

\bibitem{AlBoRi} C. Ahlbrandt, M. Bohner, J. Ridenhour;
Hamiltonian systems on time scales,
\emph{J. Math. Anal. Appl.}, 250 (2000), 561--578.

\bibitem{and} D. R. Anderson;
 Second-order self-adjoint differential equations using a proportional-derivative controller,
\emph{Communications Appl. Nonlinear Anal.}, Volume 24 (2017), Number 1, 17--48.

\bibitem{au} D. R. Anderson, D. J. Ulness;
Newly defined conformable derivatives,
{\em Adv. Dyn. Sys. Appl.} Vol. 10, No. 2 (2015), pp. 109--137.

\bibitem{AtiGus} F. M. Atici, G. Sh. Guseinov;
On Green's functions and positive solutions for boundary value problems 
on time scales, \emph{J. Comput. Appl. Math.,} 141 (2002), 75--99.

\bibitem{cobber} G. Bryan and L. LeGare;
 The calculus of proportional $\alpha$-derivatives, 
\emph{Rose-Hulman Undergraduate Math. J.}, Vol. 18: Iss. 1, Article 2 (2017).

\bibitem{caputo} M. Caputo, M. Fabrizio;
 A new definition of fractional derivative without singular kernel, 
\emph{Progr. Fract. Differ. Appl.} 1 (2015), No. 2, 73--85.

\bibitem{caputo2} M. Caputo, M. Fabrizio;
3D Memory Constitutive Equations for Plastic Media, 
\emph{J. Engineering Mechanics}, Vol. 143, Issue 5 (May 2017).

\bibitem{cod} E. A. Coddington, N. Levinson;
\emph{Theory of Ordinary Differential Equations}, McGraw-Hill, New York, 1955.

\bibitem{ding} Dawei Ding, Xiaoyun Zhang, Jinde Cao, Nian Wang, Dong Liang;
Bifurcation control of complex networks model via PD controller, 
\emph{Neurocomputing} 175 (2016), 1--9.

\bibitem{He} J. Henderson;
Multiple solutions for $2m$th order Sturm-Liouville boundary value problems 
on a measure chain,
\emph{J. Difference Equations and Appl.}, 6 (2000), 427--429.

\bibitem{Hilscher} R. Hilscher;
Linear Hamiltonian systems on time scales: positivity of quadratic functionals,
\emph{Math. Comput. Modelling}, 32 (2000), 507--527.

\bibitem{KelPet} W. Kelley, A. Peterson;
\emph{The Theory of Differential Equations: Classical and Qualitative},
 Prentice Hall, New Jersey, 2004.

\bibitem{Nai} M. A. Naimark;
\emph{Linear Differential Operators}, Part 2. Ungar, New York, 1968.

\bibitem{Reid} W. T. Reid;
\emph{Ordinary Differential Equations}, Wiley, New York, 1971.

\bibitem{zulf} F. Zulfeqarr, A. Ujlayan, P. Ahuja;
 A new fractional derivative and its fractional integral with some applications,
 arXiv preprint (2017) arXiv:1705.00962.

\end{thebibliography}

\end{document}
