\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2018 (2018), No. 08, pp. 1--13.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2018 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2018/08\hfil Sturm-Liouville preoblems]
{Bounded variation solutions to \\ Sturm-Liouville problems}

\author[J. Gulgowski \hfil EJDE-2018/08\hfilneg]
{Jacek Gulgowski}

\address{Jacek Gulgowski \newline
Institute of Mathematics,
Faculty of Mathematics,
Physics and Informatics,
University of Gda\'nsk, 80-308 Gda\'nsk, Poland}
\email{dzak@mat.ug.edu.pl}

\dedicatory{Communicated by Pavel Drabek}

\thanks{Submitted November 6, 2017. Published January 6, 2018.}
\subjclass[2010]{34B24, 26A45, 45A05}
\keywords{Boundary value problem; bounded variation; Green function;
\hfill\break\indent Jordan variation, Sturm-Liouville problem}

\begin{abstract}
 In this article we consider singular Sturm-Liouville problems whose
 right-hand side is a function of bounded Jordan variation.
 We present necessary and sufficient conditions for all solutions 
 to be of bounded Jordan variation.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Regular and singular Sturm-Liouville problems}

Let $I=[0,1]$ denote the closed unit interval.
As usual, $L^p(a,b)$ will denote the space of all equivalence classes
(of almost everywhere equality) of the real-valued functions whose $p$-th
power is Lebesgue-integrable on $(a,b)$. By $L^p_{loc}(a,b)$ we will denote
the space of functions belonging to $L^p(a',b')$ for all $[a',b']\subset (a,b)$.
The space of functions absolutely continuous in the closed interval $[a,b]$
will be denoted by  $AC[a,b]$, while the set of all functions
$x\colon (a,b)\to\mathbb{R}$ which restrictions belong to all spaces
$AC[a',b']$, for $[a',b']\subset (a,b)$ will be denoted by $AC_{loc}(a,b)$.
The Jordan variation of the function $x\colon[a,b]\to\mathbb{R}$ will be denoted by
$\bigvee_a^b x$ and the space of all functions of bounded Jordan variation
in the interval $[a,b]$ will be denoted by $BV([a,b])$. By $\chi_A$ we will
denote the characteristic function of the set $A\subset I$.

Let us now have a look at the classical linear Sturm-Liouville problem
\begin{equation}\label{eq:lin:sl}
\begin{gathered}
 -(p(t)x'(t))'+ q(t)x(t) = h(t) \quad \text{for a.e. } t\in(0,1) \\
 x(0)\sin\eta - p(0)x'(0)\cos\eta = 0, \\
 x(1)\sin\zeta + p(1)x'(1)\cos\zeta = 0,
\end{gathered}
\end{equation}
where $\eta,\zeta\in [0,\frac{\pi}{2}]$.
The solution to this problem is such a function $x:I\to\mathbb{R}$
that both $x'(t)$ and $(p(t)x'(t))'$ exists a.e. in $I$, and $x, px'\in AC(I)$
and the boundary conditions are satisfied.

Assume now that $p\in C^1(I)$, $q\in C(I)$ and $p(t)>0$ for $t\in I$.
In this case we may talk of the solutions to the problem \eqref{eq:lin:sl}
in a classical sense (i.e. belonging to $C^2(I)$ when $h\in C(I)$ and
to $C^1(I)$ with $x'\in AC(I)$ when $h\in L^1(I)$) -- see \cite{CL,H}.
Such solutions  are, of course, functions of bounded Jordan variation.
But the situation may change when the assumptions on coefficients $p$ and $q$
are released -- as we will further observe.

The more general attitude towards Sturm\--Liouville boundary value problems
allows for such $p, q$ that $1/p,q \in L^1(I)$, with the function $r = 1/p$
allowing for sign changes and even for achieving $0$ on the positive measure set.
In this last case we should think of $x'(t) = 0$ whenever $r(t) = 0$.
The assumptions of this kind still keep us in the orbit of the so-called
\emph{regular Sturm\--Liouville problems} but we may also
release even these, leading to the so-called
\emph{singular Sturm\--Liouville problems} with Fokker-Planck,
Bessel's or Whittaker problems as an example (see \cite[Chapter 2.5]{Duffy}).
In this case it may happen that the solutions $x$ are not defined in the
endpoints $a\in\{0,1\}$, hence the boundary conditions should be understood
in a different way. This issue will be discussed in details later.

In case of regular Sturm\--Liouville problems the question whether a solution
$x$ is of bounded Jordan variation seems to have a trivially positive answer
(since $x\in AC(I)$), but actually this may be stated only when $x'\in L^1(I)$,
which is not always the case! We should note that in case of singular problems we cannot assume that $x$ is absolutely continuous in the closed interval $I$. Hence there may exist a  solution $x$ of unbounded Jordan variation.

\begin{example} \label{examp1} \rm
Let $p:I\to\mathbb{R}$ be given by $p(t) = (-1)^{n+1}\frac{1}{n}$ for
$t\in [t_{n-1},t_n]$, where $t_ n = \frac{6}{\pi^2}\sum_{k=1}^n \frac{1}{k^2}$,
 $n=1,2,\dots $. Then $r = 1/p\not \in L^1(I)$ but $r\in L^1([0,a])$
for each $a\in[0,1)$. Let us look at the boundary value problem
\begin{equation}
\begin{gathered}
(p(t)x'(t))' = 1 \quad  \text{ for a. e. } t\in I \cr x(0) = x(1) = 0\,.
\end{gathered}
\end{equation}
As it may be easily observed, we have
\[
x'(t) = r(t)(t - C),
\]
for some constant $C\in\mathbb{R}$ and $t\in [0,1)$.

Since $r(\tau)(\tau-C) \in L^1(0,t)$ for any $t\in[0,1)$ we can see that,
 having the boundary condition $x(0)=0$,
\[
x(t)  = \int_0^t \tau r(\tau)\mathrm{d}\tau - C \int_0^t  r(\tau)\mathrm{d}\tau.
\]
Let us denote $R(t) = \int_0^t  r(\tau)\mathrm{d}\tau$ and
$R_0(t) = \int_0^t \tau r(\tau)\mathrm{d}\tau$, then we are looking for
a function $x(t) = R_0(t) - C R(t)$.

Let us now assume that $t=t_{N}$. We will find the values $R(t_N)$ and $R_0(t_N)$.
\[
R(t_N) = \sum_{k=1}^N \int_{t_{k-1}}^{t_k} (-1)^{k+1}k\mathrm{d}\tau 
= \frac{6}{\pi^2}\sum_{k=1}^N  (-1)^{k+1}\frac{1}{k}.
\]
Since the series $\sum_{k=1}^{+\infty} (-1)^{k+1}\frac{1}{k}$ converges
 to $\ln 2$, it follows that $\lim_{N\to +\infty} R(t_N) = \frac{6}{\pi^2}\ln 2$.
Similarly,
\[
R_0(t_N) = \sum_{k=1}^N \int_{t_{k-1}}^{t_k} (-1)^{k+1}k\tau\mathrm{d}\tau 
= \frac{6}{\pi^2}\sum_{k=1}^N  (-1)^{k+1}\frac{1}{k}\frac{t_{k-1}+t_k}{2}.
\]
Since the sequence $\bigl( \frac{1}{2}(t_{k-1}+t_k)\bigr)_{k\in\mathbb{N}}$ 
is monotone and bounded we  conclude, by Abel's convergence test, that  
$\lim_{N\to +\infty} R_0(t_N) = r_0$ exists. We  also observe that 
$r_0 < \frac{6}{\pi^2}\ln 2$. This is a consequence of the estimate
\begin{align*}
r_0 &= \frac{6}{\pi^2}\sum_{k=1}^{+\infty} \bigl( \frac{1}{2k-1}\frac{t_{2k-2}
+t_{2k-1}}{2} - \frac{1}{2k}\frac{t_{2k-1}+t_{2k}}{2} \bigr) \\
&<
\frac{6}{\pi^2}\sum_{k=1}^{+\infty} \bigl( \frac{1}{2k-1}\frac{t_{2k-2}
 +t_{2k-1}}{2} - \frac{1}{2k}\frac{t_{2k-2}+t_{2k-1}}{2} \bigr) \\
&=\frac{6}{\pi^2}\sum_{k=1}^{+\infty} \frac{t_{2k-2}+t_{2k-1}}{2}
 \bigl( \frac{1}{2k-1} - \frac{1}{2k}\bigr)  \\
&< \frac{6}{\pi^2}\sum_{k=1}^{+\infty} \bigl( \frac{1}{2k-1} - \frac{1}{2k}\bigr) 
= \frac{6}{\pi^2}\ln 2.
\end{align*}
As we can see in case $t\in(t_{N-1},t_{N})$ the values of  $R(t)$ and $R_0(t)$ 
lay between $R(t_{N-1})$ and $R(t_N)$ or $R_0(t_{N-1})$ and $R_0(t_N)$ respectively, 
so
$\lim_{t\to 1^-} R(t) = \frac{6}{\pi^2}\ln 2$ and 
$\lim_{t\to 1^-} R_0(t) = r_0$. This means that the 
$\lim_{t\to 1^-} x(t) = r_0 - C\frac{6}{\pi^2}\ln 2$ and with the appropriate 
value of constant $C=C_0 = \frac{\pi^2 r_0}{6\ln 2}$ we may say that $x(1) = 0$.

Let us now estimate the difference
\[
|x(t_n) - x(t_{n-1})| = \frac{6}{\pi^2}\Bigl| 
(-1)^{n+1}\frac{1}{n}\frac{t_n + t_{n-1}}{2} -C_0  (-1)^{n+1}\frac{1}{n}\Bigr| =
\frac{6}{n\pi^2}\Bigl|\frac{t_n+t_{n-1}}{2}-C_0\Bigr|.
\]
Since $\lim_{n\to +\infty}\frac{t_n+t_{n-1}}{2} = 1 \neq C_0$ ,
it follows that $\sum_{n=1}^{+\infty} |x(t_n) - x(t_{n-1})|= +\infty$
 which shows that $x$ is not the function of bounded Jordan variation.
\end{example}


In case of the singular Sturm\--Liouville problems the boundary conditions 
as given in \eqref{eq:lin:sl} may turn out to be not reasonable any more. 
For a detailed review of different situations we suggest the monograph by 
Zettl (see \cite{Z}). Also the review paper \cite{Ev} would be of much help here.

The most general form of the Sturm\--Liouville equation is given as the spectral 
problem
\begin{equation}\label{sl:eq:spectral}
-(p(t)x'(t))' + q(t)x(t) = \lambda w(t) x(t).
\end{equation}
We will refer to this general theory in a special case of $w(t) \equiv 1$ 
and $\lambda = 0$. We will also limit ourselves to the real-valued functions 
belonging to the domain $D$, which is defined as the natural domain of the 
 differential operator
\begin{equation}\label{diffop:map}
x \mapsto -(p(t)x'(t))'+ q(t)x(t).
\end{equation}
i.e. is given as the set  $D$ of all such functions $x:I\to\mathbb{R}$ that both 
$x, px'\in AC_{loc}(I)$. Actually, we will consider the so-called 
\emph{maximal domain} of the differential operator \eqref{diffop:map} 
$D_{\rm max} = D\cap L^2(I)$ (see \cite{Z}). This is the very natural 
assumption to take when we are going to look at the problem from the operator 
theory perspective.

Hence, we will look at the linear differential equation
\begin{equation}\label{sl:eq:zero}
-(p(t)x'(t))' + q(t)x(t) = 0
\end{equation}
in the search for solutions $x\in D_{\rm max}$.

The interesting issues are related to the behaviour of the differential 
operator near the endpoints of the interval $I$, i.e. in the neighbourhood of 
$0$ and $1$.  There is a standard classification of endpoints 
(see \cite[Definition 7.3.1]{Z}), which dates back to 1910 and the works of 
Weyl. Let us assume that the endpoint $a\in \{0,1\}$. Then we have the following:
\begin{itemize}
\item \emph{Regular endpoint} if there exists such an open interval
 $J\subset I$ with $a\in \overline{J}$ such that $q,r \in L^1(J)$;

\item \emph{Singular endpoint} if there exists such an open interval $J\subset I$ 
with $a\in \overline{J}$, $\overline{J}\neq I$ such that 
$\int_J |q(t)| + |r(t)|\mathrm{d}t = +\infty$;

\item the singular point is called \emph{Limit-point} (denoted later by LP) 
when there exists at least one solution $x_0$ to the problem \eqref{sl:eq:zero} 
satisfying $\int_J |x_0(t)|^2 \mathrm{d}t = +\infty$ for certain  open interval 
$J\subset I$, $\overline{J}\neq I$ satisfying $a\in \overline{J}$;

\item the singular point is called \emph{Limit-circle} (denoted later by LC) 
when all solutions to the problem  \eqref{sl:eq:zero} belong to $
L^2(J)$ for an open interval $J\subset I$, $a\in \overline{J}$, $\overline{J}\neq I$;
\end{itemize}

\begin{remark} \label{rmk1} \rm
Since we look for locally AC solutions the classification given above does 
not depend on the selection of the interval $J$, as long as $\overline{J}\neq I$.
\end{remark}

\begin{remark} \label{rmk2} \rm
The original definitions of LP and LC taken from \cite{Z} or \cite{Ev} 
refer to the more general spectral problem \eqref{sl:eq:spectral} and 
formally depend on $\lambda$. But it may actually be proved that it does 
not depend on $\lambda$ (cf. \cite[Remark 5.1]{Ev}) and the classification 
given above is the same as the one given in \cite{Z}.
\end{remark}

The most general setting of boundary conditions referring
to \eqref{sl:eq:spectral} requires the notion of the Lagrange
 sesquilinear form (cf. \cite[Remark 8.2.1]{Z}), which is given by
\[
[f,g](t) = f(t) (pg')(t) - g(t) (pf')(t) = p(t)(f(t)g'(t) - f'(t)g(t)),
\]
for all $f,g\in D_{\rm max}$. It may be shown (see \cite[Lemma 10.2.3]{Z}) 
that for any $f,g\in D_{\rm max}$ both limits
\[
\lim_{t\to 0^+} [f,g](t) \quad \text{and} \quad \lim_{t\to1^-} [f,g](t)
\]
exist and are finite. From now on, when writing $[f,g](0)$ and $[f,g](1)$ 
we will refer to the appropriate limit.

The appropriate selection of function $g\in D_{\rm max}$ gives rise to the 
boundary condition depending on $[g,x](1)$ or $[g,x](0)=0$. 
The details depend here on the endpoint classification.
One should, first of all, observe that in case of the LP endpoint $a$ 
there is no need to specify the  boundary condition at all, since for all 
functions $g,x\in D_{\rm max}$ there is
$[g,x](a) =0$ (see \cite[Lemma 10.4.1]{Z}).
 In case of one regular or LC and one LP endpoint the boundary value problem 
may be given as
\begin{equation}\label{eq:lin:LP:NLP}
\begin{gathered}
-(p(t)x'(t))' + q(t)x(t) = h(t) \quad \text{for a.e. } t\in(0,1) \\
 [g,x](a)  = 0,
\end{gathered}
\end{equation}
where $a\in\{0,1\}$ is not a LP endpoint and $g\in D_{\rm max}$.

When both endpoints are regular or LC,
\begin{equation}\label{eq:lin:NLP:NLP}
\begin{gathered}
-(p(t)x'(t))' + q(t)x(t) = h(t) \quad \text{ for a.e. } t\in(0,1) \\
  [g_1,x](0) - [g_2,x](0) = 0, \\
  [g_1,x](1) - [g_2,x](1) = 0,
\end{gathered}
\end{equation}
for certain selection of functions $g_1, g_2\in D_{\rm max}$.

From now on we  refer to the most general version of
 Sturm\--Liouville boundary value problem as the one given by
\begin{equation}\label{eq:SL:most:general}
\begin{gathered}
-(p(t)x'(t))' + q(t)x(t) = h(t) \quad \text{for a.e. } t\in(0,1) \\
[g_1,x](0) - [g_2,x](0) = 0  \quad  \text{if 0 is not an LP endpoint}, \\
[g_1,x](1) - [g_2,x](1) = 0  \quad \text{if 1 is not an LP endpoint},
\end{gathered}
\end{equation}
within the convention that if there is only one LP endpoint then we take $g_2=0$.

It may be shown (cf. \cite[Chapter 10, Section 4.1]{Z}) that, in case of the 
regular endpoint,  this general form of boundary conditions covers the case 
of classical boundary conditions \eqref{eq:lin:sl}.

\begin{remark} \label{rmk3} \rm
We should note that the boundary conditions in the form 
$ [g_1,x](a) - [g_2,x](a) = 0$, where $a\in\{0,1\}$ may also take the form 
$[g_i,x](a) = 0$, where $i\in\{1,2\}$. This is the case when the function 
$g_j$ for $j\neq i$ will satisfy  $g_j(t)=0$ in some open neighbourhood of $a$.
\end{remark}



\section{Green function}

The main tool used when solving the problem \eqref{eq:lin:sl} is the Green function, 
i.e. such $G:(0,1)\times (0,1)\to \mathbb{R}$ that $x$ is a solution to \eqref{eq:lin:sl}
 if and only if
\begin{equation}\label{eq:lin:op:sl}
x(t) = \int_0^1 G(s,t) h(s)\mathrm{d}s.
\end{equation}
This requires, of course, some care, at least in specifying the space which 
function $h$ belongs to, as well as the assumption that $0$ is not the 
eigenvalue of the problem \eqref{eq:lin:sl}. We are not going to be very 
strict about it at the moment and we will return to this issue later.

In the classical theory the Green function (see \cite[Chapter XI, Exercise 2.1]{H}) 
for the problem \eqref{eq:lin:sl} with $q\in C(I)$, $p\in C^1(I)$, $p> 0$, 
is given as
\begin{equation}\label{eq:green}
G(s,t) = \begin{cases} 
c^{-1}x_1(s)x_2(t) & 0\leq s \leq t \leq 1 \\
 c^{-1}x_1(t)x_2(s) & 0 \leq t \leq s \leq 1,
\end{cases}
\end{equation}
where $x_1,x_2:I\to \mathbb{R}$ are linearly independent solutions of \eqref{eq:lin:sl} 
satisfying the initial conditions
\[
x_1(0)\sin\eta - p(0)x_1'(0)\cos\eta = 0, \quad 
x_2(1)\sin\zeta + p(1)x_2'(1)\cos\zeta = 0
\]
and $c$ is the appropriate constant, $c\neq 0$.

Similar formula (see \cite{SLC}) may be given for the problem \eqref{eq:lin:sl}
with $r = 1/p \in L^1(I)$,  $p\in C^1((0,1),(0,+\infty))$ and $q=0$. 
Moreover, in the paper \cite[Theorem 3.1]{Duhoux_Tatra}
 the Green function $G:(0,1)\times(0,1)\to\mathbb{R}$ for the singular problem 
\eqref{eq:lin:sl} satisfying $r=1/p \in L^1(0,c)$ and $r=1/p\not\in L^1(c,1)$ 
for some $c\in(0,1)$, and such $q\in L^1_{loc}(0,1)$ that 
$q(t)\int_0^t r(s)\mathrm{d}s \in L^1(0,1)$ is given by \eqref{eq:green}, 
with the appropriate selection of the basic solutions $x_1$ and $x_2$.

General theorems on the existence of the Green function  for 
the problem \eqref{eq:SL:most:general}, with $r,q\in L^1_{loc}(I)$,  
are given for example in  \cite[Theorems 9.4.2, 10.10.1]{Z}. 
It is worth noting that there are, in general, no assumptions on the sign of 
$p$ and $q$.
The theorem given below is actually the reformulation of facts given in \cite{Z}, 
but we present it here, with a proof, for the sake of the clarity.

\begin{theorem}\label{Green:exist}
Assume the function $x=0$ is the only solution to \eqref{eq:SL:most:general}
 with $h=0$. Let $x_1,x_2 \in D_{\rm max}$ be two linearly independent 
solutions of the equation \eqref{sl:eq:zero} satisfying:
\begin{itemize}
\item[(a)] exactly two $LP$ endpoints: no further assumptions;

\item[(b)] exactly one non LP (i.e. regular or LC) endpoint $a$, the other
 endpoint being $LP$:
\begin{equation} [g_1,x_1](a)  = 0;
\end{equation}

\item[(c)]  both endpoints being regular or LC:
\begin{gather}\label{asmpt:bndr:0}
[g_1,x_1](0) -  [g_2,x_1](0)  = 0 , \\
\label{asmpt:bndr:1}
[g_1,x_2](1) - [g_2,x_2](1)  = 0,
\end{gather}
\end{itemize}
Then
\begin{itemize}
\item[(i)] the function $(0,1) \ni t\mapsto (p(t)x_1'(t))x_2(t) 
- (p(t)x_2'(t)) x_1(t)  = c$ is a nonzero constant;

\item[(ii)] the function $G:(0,1)\times(0,1)\to\mathbb{R}$ given by
\begin{equation}\label{Green:fun:def}
G(s,t) = \begin{cases} c^{-1}x_1(s)x_2(t) & 0< s \leq t < 1 \\
c^{-1}x_1(t)x_2(s) & 0 < t \leq s < 1
\end{cases}
\end{equation}
is the Green function for problem \eqref{eq:SL:most:general}. 
This means that for any $h \in L^2(I)$ there exists a unique solution 
$x\in D_{\rm max}$ to problem \eqref{eq:SL:most:general} given by
\begin{equation}\label{SL:gen:Green:form}
\begin{aligned}
x(t) &= \int_0^1 G(s,t)h(s)\mathrm{d}s  \\
&= c^{-1}x_2(t)\int_0^t x_1(s)h(s)\mathrm{d}s 
+   c^{-1}x_1(t)\int_t^1 x_2(s)h(s)\mathrm{d}s.
\end{aligned}
\end{equation}
\end{itemize}
\end{theorem}

\begin{proof}
Property (i) is a standard Wronskian-type argument, checked by a direct 
calculation. Since the function 
$t\mapsto (p(t)x_1'(t))x_2(t) - (p(t)x_2'(t)) x_1(t)$ 
is locally absolutely continuous it is enough to check that its derivative 
equals $0$ almost everywhere.

To prove property (ii) it is sufficient to observe that substitution of $x$ 
given by \eqref{SL:gen:Green:form} into our differential operator gives
$-(p(t)x'(t))' + q(t) x(t) = h(t)$.

We should now check that the function $x$ satisfies the boundary conditions. 
We will check if $[g_1,x](a) - [g_2,x](a)= 0$ for endpoint $a$ which is not 
LP. First, let us now observe that for $i=1,2$ we have
\begin{align*}
[g_i,x](t) 
&= p(t) g_i(t) x'(t) - p(t) g_i'(t) x(t) \\
&= c^{-1}p(t) g_i(t)\Bigl( x_2'(t) \int_0^t  x_1(s) h(s)\mathrm{d}s
  + x_1'(t)  \int_t^1  x_2(s) h(s)\mathrm{d}s \Bigr) \\
&\quad - c^{-1}p(t) g_i'(t) \Bigl( x_2(t) \int_0^t  x_1(s) h(s)\mathrm{d}s
  +  x_1(t)  \int_t^1  x_2(s) h(s)\mathrm{d}s \Bigr)\\
& =  c^{-1}[g_i, x_2](t)  \int_0^t  x_1(s) h(s)\mathrm{d}s 
 +  c^{-1}[g_i, x_1](t)  \int_t^1  x_2(s) h(s)\mathrm{d}s .
\end{align*}
We should note that, since the functions $x_1, x_2, h$ belong to $L^2(I)$, 
all integrals in the formula above are finite. We also know that the values 
$ [g_i, x_j](0)$ and $ [g_i, x_j](1)$ are well-defined and finite 
(for $i, j=1,2$) -- cf. \cite[Section 10.4]{Z}. Therefore,
\begin{gather*}
[g_i,x](1) = c^{-1}[g_i,x_2](1)  \int_0^1  x_1(s) h(s)\mathrm{d}s, \\
[g_i,x](0) = c^{-1}[g_i,x_1](0)  \int_0^1  x_2(s) h(s)\mathrm{d}s,\\
[g_1,x](0) - [g_2,x](0) = c^{-1} \int_0^1 x_2(s)h(s)\mathrm{d}s 
\bigl( [g_1,x_1](0) - [g_2,x_1](0) \bigr) = 0, \\
[g_1,x](1) - [g_2,x](1) = c^{-1} \int_0^1 x_1(s)h(s)\mathrm{d}s
 \bigl( [g_1,x_2](1) - [g_2,x_2](1) \bigr) = 0,
\end{gather*}
which follows from  assumptions \eqref{asmpt:bndr:0} and \eqref{asmpt:bndr:1}.

The uniqueness is the consequence of the assumption that guarantees that
 $0$ is the only solution to the problem \eqref{eq:SL:most:general} 
with the right-hand side equal to $0$.
\end{proof}

\begin{remark} \label{rmk4} \rm
In case both endpoints are regular we  take $g_1$ and $g_2$ as functions 
satisfying $g_1(0) = \cos\eta$, $p(0)g_1'(0) = \sin\eta$, $g_1(1) = 0$, 
$p(1)g_1'(1) = 0$ and  $g_2(1) = \cos\zeta$, $p(1)g_2'(1) = \sin\zeta$, 
$g_2(0) = 0$, $p(0)g_2'(0) = 0$
(see \cite[Lemma 10.4.4]{Z}  and further discussion there). 
This actually recreates the classical boundary conditions given for 
problem \eqref{eq:lin:sl}.
\end{remark}

\section{Bounded variation solutions}

As mentioned in the previous section, having the Green function, we may 
translate the problem \eqref{eq:SL:most:general} into the Hammerstein equation 
\eqref{eq:lin:op:sl}
with $G:(0,1)\times(0,1)\to\mathbb{R}$ given by \eqref{Green:fun:def} and 
$x_1, x_2$ satisfying assumptions of Theorem \ref{Green:exist}. 
Then, the value of the integral operator $K:L^2(0,1)\to L^2(0,1)$ is given by
\begin{equation}\label{def:op:K}
x(t) = (Kh)(t) = \int_0^1 G(s,t)h(s)\mathrm{d}s
\end{equation}
is the unique solution to the problem \eqref{eq:SL:most:general}
 with the right-hand side $h\in BV(I)$.

Now, the question whether the problem  \eqref{eq:SL:most:general} 
admits the solutions of unbounded variation may be translated into the 
properties of the kernel of the integral operator, for the Hammerstein equation 
(i.e. the Green function). We should refer to some of the recent results on 
the subject.

First of all, in \cite[Theorem 4]{BGK2} we may find the characterization of 
the kernels that induce the continuous linear map from $BV(I)$ to $BV(I)$, 
i.e. the assumptions that guarantee that for a $BV$ right-hand side $h$ 
of the problem \eqref{eq:SL:most:general} there exist only $BV$ solutions:


\begin{theorem}\label{ref:prop_suff}
Let $K$ be a linear integral operator generated by the kernel 
$k \colon I \times I \to \mathbb R$, that is, $K$ is given by the formula
\begin{equation}\label{eq:opK}
 Kx(t) = \int_0^1 k(t,s) x(s) \mathrm{d}s, \quad \text{$t \in I$}.
\end{equation}
The operator $K$ maps continuously the space $BV(I)$ into itself if and only
 if the following conditions are satisfiedp:
\begin{itemize}
 \item[(H1)] for every $t \in I$, the function $s \mapsto k(t,s)$ 
is Lebesgue integrable on $I$;

 \item[(H2)]
there exists a positive constant $M$ such that 
\[
\sup_{\xi \in I} \bigvee_{0}^{1}\Big( \int_0^{\xi} k(\cdot,s)\mathrm{d}s\Big) \leq M.
\]
\end{itemize}
\end{theorem}

Moreover, for some kernels $k \colon I \times I \to \mathbb R$ the integral 
operator  $K$ given by \eqref{eq:opK} maps the space $L^\infty(I)$ of all 
essentially bounded functions into the space $BV(I)$ of functions of 
bounded variation (see \cite[Proposition 4]{BGK2}). 
That would mean that in such case, for any right-hand side $h$ belonging to
 $L^\infty(I)$, we have only $BV$ solutions to problem \eqref{eq:SL:most:general}.

This may actually be easily generalized to:

\begin{theorem}\label{kernel:Lp:BV} 
Assume $p\in [1,+\infty]$ is fixed and the kernel 
$k \colon I \times I \to \mathbb R$ is such that
   \begin{itemize}
 \item[(a)] %\label{ass:2ka} 
for every $t \in I$ the function $s \mapsto k(t,s)$ is Lebesgue measurable;

 \item[(b)]  %\label{ass:2kb} 
the function $s \mapsto k(0,s)$  belongs to $L^p(I)$;

 \item[(c)] %\label{ass:2kc} 
$\bigvee_0^1 k(\cdot, s) \leq m(s)$ for a.e. $s \in I$, where $m \in L^p(I)$, .
 \end{itemize}
Then the operator $K$, generated by the kernel $k$, continuously maps the 
space $L^q(I)$ into the space $BV(I)$, where $1/p+1/q = 1$.
\end{theorem}

Based on the observation given above we  prove the following theorem.

\begin{theorem}\label{x1:x2:BV}
Assume there exist functions $x_1,x_2\colon I\to\mathbb{R}$ satisfying assumptions 
of Theorem \ref{Green:exist} and $x_1,x_2 \in BV(I)$. Then for any 
$h\in L^1(I)$ the solution $x$ to the problem \eqref{eq:SL:most:general} 
is of bounded variation.
\end{theorem}

\begin{proof}
We are going to show that the Green function $G(s,t)$ given by \eqref{Green:fun:def} 
satisfies the conditions (a)--(c) of Theorem \ref{kernel:Lp:BV} 
for a function $m\in L^\infty(I)$. 
Since functions belonging to $BV(I)$ are measurable and bounded, 
then conditions (a) and (b) are obvious. 
Now, we are going to check the condition (c).

Let us fix $s\in I$. Then 
\[
k(t,s) = G(s,t)=  c^{-1}x_1(t)x_2(s)\chi_{[0,s]}(t) 
+  c^{-1}x_1(s)x_2(t)\chi_{[s,1]}(t)
\]
 and
\begin{align*}
\bigvee_0^1 k(\cdot,s)
&\leq |c^{-1}x_2(s)|\Bigl( \bigvee_0^s x_1 + |x_1(s)| \Bigr)
+ |c^{-1}x_1(s)|\Bigl(\bigvee_s^1 x_2 + |x_2(s)| \Bigr) \\
&\leq |c^{-1}| \Bigl(|x_1(s)| \bigvee_0^1 x_2
 + |x_2(s)| \bigvee_0^1 x_1 + 2|x_1(s)|\,|x_2(s)|\Bigr).
\end{align*}
With $m(s) := |c^{-1}| \Bigl(|x_1(s)| \bigvee_0^1 x_2
+ |x_2(s)| \bigvee_0^1 x_1 + 2|x_1(s)||x_2(s)|\Bigr)$
there is, of course, $m\in L^\infty(I)$. This completes the proof.
\end{proof}

Now, the question appears what if $x_1$ or $x_2$ are not of bounded Jordan 
variation. Does it imply that there exists the unbounded variation solution 
to the problem \eqref{eq:SL:most:general}, with the right-hand side belonging 
to the space $BV(I)$? We are going to show that it is not that easy. 
The first observation is that, since both functions are locally $AC$, 
then the one with unbounded variation must have infinite variation in 
the neighbourhood of $0$ or $1$.

To characterize the Green functions $G$ that induce the integral operator 
transferring  functions of bounded Jordan variation into the functions 
of bounded Jordan variation, we will refer to the characterization given 
in Theorem \ref{ref:prop_suff}. First, the crucial role of the functions 
$\varphi_\xi: I\to\mathbb{R}$, given by
\begin{equation}\label{phi:xi:def}
\varphi_\xi(t) = \int_0^1 G(s,t)\chi_{[0,\xi]}(s)\mathrm{d}s
\end{equation}
 for fixed $\xi\in[0,1]$, should be noted. This is because the assumption 
(H2) of Theorem  \ref{ref:prop_suff} requires the variance of all 
functions $\varphi_\xi$  be uniformly bounded. Actually, as it may be 
easily observed, we may replace the functions $\varphi_\xi$, by functions 
$\psi_\xi\colon I\to \mathbb{R}$, given by
\begin{equation}\label{psi:xi:def}
\psi_\xi(t) = \int_0^1 G(s,t)\chi_{[\xi,1]}(s)\mathrm{d}s.
\end{equation}

In what follows we will also refer to the function $\alpha\colon I\to\mathbb{R}$ given by
\begin{equation}\label{alpha:def}
\alpha(t) = \int_0^1 G(s,t)\mathrm{d}s 
=  c^{-1}x_2(t)\int_0^t x_1(s)\mathrm{d}s 
+  c^{-1}x_1(t)\int_t^1 x_2(s) \mathrm{d}s.
\end{equation}

\begin{lemma}\label{lem:func:form}
Let $\xi\in[0,1]$, $\varphi_\xi, \psi_\xi, \alpha \colon I\to\mathbb{R}$ be given by 
\eqref{phi:xi:def},  \eqref{psi:xi:def} and \eqref{alpha:def} respectively.
Then
\[
\bigvee_0^1 \varphi_\xi = \Bigl| c^{-1}\int_0^\xi x_1(s)\mathrm{d}s\Bigr|
\bigvee_\xi^1  x_2
+ \bigvee_0^\xi \Bigl[ \alpha - c^{-1} \int_\xi^1 x_2(s)\mathrm{d}s \cdot x_1 \Bigr]
\]
and
\[
\bigvee_0^1 \psi_\xi = \Bigl| c^{-1}\int_\xi^1 x_2(s)\mathrm{d}s\Bigr| \bigvee_0^\xi  x_1
+ \bigvee_\xi^1 \Bigl[ \alpha - c^{-1} \int_0^\xi x_1(s)\mathrm{d}s \cdot x_2\Bigr].
\]
\end{lemma}

\begin{proof}
Let us fix $\xi\in[0,1]$. As we may see
\begin{equation}\label{eq:phi:chi}
\varphi_\xi(t) 
=   \begin{cases} c^{-1}x_2(t) \int_0^\xi x_1(s)\mathrm{d}s 
& \text{for } t\in (\xi,1] \\
c^{-1}x_2(t) \int_0^t x_1(s)\mathrm{d}s 
+ c^{-1}x_1(t) \int_t^\xi x_2(s)\mathrm{d}s & \text{for } t\in [0,\xi].
\end{cases}
\end{equation}

Similarly we have
\begin{equation}\label{eq:psi:chi}
\psi_\xi(t) =   \begin{cases} 
c^{-1}x_1(t) \int_\xi^1 x_2(s)\mathrm{d}s & \text{for } t\in [0,\xi] \\
c^{-1}x_2(t) \int_\xi^t x_1(s)\mathrm{d}s 
+ c^{-1}x_1(t) \int_t^1 x_2(s)\mathrm{d}s & \text{for } t\in (\xi,1].
\end{cases}
\end{equation}

Let us also observe that for $t\in [0,\xi]$,
\[
\varphi_\xi(t) = c^{-1}x_2(t) \int_0^t x_1(s)\mathrm{d}s
 + c^{-1}x_1(t) \int_t^\xi x_2(s)\mathrm{d}s
= \alpha(t) - c^{-1}x_1(t) \int_\xi^1 x_2(s)\mathrm{d}s
\]
and for $t\in (\xi,1]$ we have
\[
\psi_\xi(t) =  c^{-1}x_2(t) \int_\xi^t x_1(s)\mathrm{d}s 
+ c^{-1}x_1(t) \int_t^1 x_2(s)\mathrm{d}s
= \alpha(t) - c^{-1}x_2(t) \int_0^\xi x_1(s)\mathrm{d}s.
\]
This  completes the proof.
\end{proof}


\begin{lemma}\label{lem:notBV}
Let $\xi\in[0,1]$, $\varphi_\xi, \psi_\xi, \alpha \colon I\to \mathbb{R}$ be given 
by  \eqref{phi:xi:def}, \eqref{psi:xi:def} and \eqref{alpha:def} respectively. 
Then if there exists such $\xi\in(0,1)$ that
$\bigvee_0^\xi x_1 = +\infty$ then $\bigvee_0^1 \psi_\xi = +\infty$.
 Similarly, if $\bigvee_\xi^1 x_2 = +\infty$ for some $\xi\in(0,1)$,
then  $\bigvee_0^1 \varphi_\xi = +\infty$.
\end{lemma}

\begin{proof}
Assume now that $\bigvee_0^\xi x_1 = +\infty$ holds. If this is the case,
then it does not depend on the selection of $\xi\in(0,1)$. 
Then, since $x_2\neq 0$, we may pick  such $\xi\in(0,1)$ that
 $\int_\xi^1 x_2(s)\mathrm{d}s = c_0 \neq 0$. 
Then $\Bigl| c^{-1}\int_\xi^1 x_2(s)\mathrm{d}s\Bigr| \bigvee_0^\xi  x_1 = +\infty$,
meaning (by Lemma \ref{lem:func:form}) that $\bigvee_0^1 \psi_\xi = +\infty$.
Similarly we handle the case $\bigvee_\xi^1 x_2 = +\infty$, concluding that
 $\bigvee_0^1 \varphi_\xi =+\infty$.
\end{proof}

\begin{lemma}\label{lem:notBV:2}
Let $\xi\in[0,1]$, $\varphi_\xi, \psi_\xi \colon I\to \mathbb{R}$ be given by 
 \eqref{phi:xi:def} and \eqref{psi:xi:def} respectively. 
Let $y_1, y_2\colon I\to\mathbb{R}$ be given by $y_1(t) = x_1(t)\int_t^1 x_2(s)\mathrm{d}s$ 
and $y_2(t) = x_2(t)\int_0^t x_1(s)\mathrm{d}s$. 
Then, if there exists such $\xi\in(0,1)$ that $\bigvee_0^\xi x_1 <+\infty$ and
$\bigvee_0^\xi y_2 = +\infty$ then $\bigvee_0^1 \varphi_\xi = +\infty$.
Similarly, if $\bigvee_\xi^1 x_2<+\infty$ and  $\bigvee_\xi^1 y_1 = +\infty$ for
some $\xi\in(0,1)$, then  $\bigvee_0^1 \psi_\xi = +\infty$.
\end{lemma}

\begin{proof}
Assume now that $\bigvee_0^\xi x_1 <+\infty$ and
$\bigvee_0^\xi y_2 = +\infty$. We  show that
\[
\bigvee_0^\xi \varphi_\xi = \bigvee_0^\xi \bigl[ c^{-1}x_2(t)
\int_0^t x_1(s)\mathrm{d}s + c^{-1}x_1(t) \int_t^\xi x_2(s)\mathrm{d}s \bigr] 
= +\infty.
\]
As we may observe:
\[
\bigvee_0^\xi \varphi_\xi= \bigvee_0^\xi \bigl[ c^{-1}y_2(t) 
+ c^{-1}x_1(t) \int_t^\xi x_2(s)\mathrm{d}s \bigr]
\]
The function $[0,\xi]\ni t\mapsto x_1(t) \int_t^\xi x_2(s)\mathrm{d}s$ 
is the product of the bounded variation and absolutely continuous functions,
 hence is of bounded variation. But the sum of function from $BV(I)$ and 
the one with unbounded Jordan variation (i.e. function $y_2$) gives 
the function of unbounded Jordan variation. This completes the first 
part of the proof.

The second conclusion may be proved the same way.
\end{proof}


The following two propositions are direct consequences of Lemma \ref{lem:func:form}.

\begin{proposition}\label{prob:limsup:phi}
Assume that $\bigvee_\xi^1 x_2 < +\infty$ for each $\xi\in(0,1)$. Then if
\begin{equation}\label{sup:unbounded:phi}
\limsup_{\xi\to 0^+} \Bigl| \int_0^\xi x_1(s)\mathrm{d}s \Bigr|
\bigvee_\xi^1 x_2(t) = +\infty,
\end{equation}
then $\sup_{\xi\in I} \bigvee_0^1 \varphi_\xi = +\infty$.
\end{proposition}

\begin{proposition}\label{prob:limsup:psi}
Assume that $\bigvee_0^\xi x_1 < +\infty$ for each $\xi\in(0,1)$. Then if
\begin{equation}\label{sup:unbounded:psi}
\limsup_{\xi\to 1^-} \Bigl| \int_\xi^1 x_2(s)\mathrm{d}s \Bigr| 
 \bigvee_0^\xi x_1(t) =  +\infty,
\end{equation}
then $\sup_{\xi\in I} \bigvee_0^1 \psi_\xi = +\infty$.
\end{proposition}

Now, we are ready to present the conditions for functions $x_1$ and $x_2$, 
so the corresponding Green function is the kernel of the integral 
operator \eqref{def:op:K} which maps the space of functions of bounded variation 
into itself.

\begin{theorem} \label{thm5} 
Assume the map $G:(0,1)\times (0,1)\to\mathbb{R}$ is given as in Theorem \ref{Green:exist}. 
Then the integral operator $K:L^2(I)\to L^2(I)$ given by \eqref{def:op:K} maps 
the space $BV(I)$ into itself if and only if all of the following conditions 
are satisfied:
\begin{itemize}
 \item[(1)]  
there exists $\xi\in(0,1)$ such that $\bigvee_{\xi}^1 x_2<+\infty$;

 \item[(2)] 
 there exists $\xi\in(0,1)$ such that $\bigvee_0^\xi x_1 < +\infty$;

 \item[(3)] 
there exists $\xi\in(0,1)$ such that $\bigvee_{\xi}^1 y_1 < +\infty$, 
where $y_1(t) =  x_1(t)\int_t^1 x_2(s)\mathrm{d}s$;

 \item[(4)] 
there exists $\xi\in(0,1)$ such that $\bigvee_0^\xi y_2  < +\infty$,
 where $y_2(t) = x_2(t)\int_0^t x_1(s)\mathrm{d}s$;

\item[(5)] 
$\limsup_{\xi\to 0^+} \bigl| \int_0^\xi x_1(s)\mathrm{d}s \bigr| 
\bigvee_\xi^1 x_2 = M_0 < +\infty$;

\item[(6)] 
$\limsup_{\xi\to 1^-} \bigl| \int_\xi^1 x_2(s)\mathrm{d}s \bigr|
 \bigvee_0^\xi x_1  = M_1 < +\infty$.
\end{itemize}
\end{theorem}

\begin{remark}\label{rmk5} \rm
Because  $x_1, x_2$ are locally absolutely continuous,  
the quantifier $\exists \xi\in(0,1)$ appearing in conditions 
(1)--(4) can be equivalently replaced with $\forall \xi\in(0,1)$. 
In what follows we should also remember that $x_1,x_2 \in L^2(I)$, 
so also $x_1,x_2\in L^1(I)$ and the functions
$\int_0^t x_1(s)\mathrm{d}s$ and $\int_t^1 x_2(s)\mathrm{d}s$ are absolutely 
continuous in the entire closed interval $I$.
\end{remark}

\begin{proof}[Proof of Theorem \ref{thm5}]
We observe that if all of the conditions (1)--(6) are satisfied, then
\[
\sup_{\xi\in I} \bigvee_0^1 \varphi_\xi < +\infty.
\]
First, we  observe that $\bigvee_0^1 \alpha < +\infty$.
Let us estimate
\begin{align*}
\bigvee_0^1 \alpha 
&= \bigvee_0^1 \Bigl( c^{-1}x_2(t)\int_0^t x_1(s)\mathrm{d}s 
+  c^{-1}x_1(t)\int_t^1 x_2(s) \mathrm{d}s \Bigr) \\
& \leq \bigvee_0^1 \Bigl( c^{-1}x_2(t)\int_0^t x_1(s)\mathrm{d}s\Bigr) 
 +  \bigvee_0^1 \Bigl( c^{-1}x_1(t)\int_t^1 x_2(s) \mathrm{d}s \Bigr) \\
&\leq  \bigvee_0^{1/2} \Bigl( c^{-1}x_2(t)\int_0^t x_1(s)\mathrm{d}s\Bigr)
  + \bigvee_{1/2}^1 \Bigl( c^{-1}x_2(t)\int_0^t x_1(s)\mathrm{d}s\Bigr) \\
&\quad + \bigvee_0^{1/2} \Bigl( c^{-1}x_1(t)\int_t^1 x_2(s)
 \mathrm{d}s \Bigr) 
  + \bigvee_{1/2}^1 \Bigl( c^{-1}x_1(t)\int_t^1 x_2(s) \mathrm{d}s \Bigr) \\
&\leq |c^{-1}|\bigvee_0^{1/2} y_2 
  + |c^{-1}|\bigvee_{1/2}^1 x_2  \int_0^1 |x_1(s)|\mathrm{d}s \\
&\quad +|c^{-1}|\bigvee_0^{1/2}x_1\int_0^1 |x_2(s)|\mathrm{d}s
 + |c^{-1}|\bigvee_{1/2}^1 y_1.
\end{align*}


The estimates of the first and fourth term are finite by (3) and (4). 
The second and third terms are estimated by (1) and (2) because 
 $\bigvee_a^b xy \leq \|x\|_{BV}\|y\|_{BV}$ 
(see \cite[Proposition 1.10]{ABM}) and
 $\bigvee_a^b \int_c^t x(s)\mathrm{d}s = \int_a^b |x(s)|\mathrm{d}s$ 
for any $[a,b]\subset[0,1]$.

Let us now take a closer look at the conditions (5) and (6). 
First, we should note that from (5) it may be concluded that there 
exists such $\xi_0\in(0,1)$, that for all $\xi\in(0,\xi_0)$
\[
\Bigl| \int_0^\xi x_1(s)\mathrm{d}s \Bigr| \bigvee_\xi^1 x_2 \leq M_0 +1.
\]
On the other hand, for all $\xi\in[\xi_0,1)$ we estimate
\[
\Bigl| \int_0^\xi x_1(s)\mathrm{d}s \Bigr|  \bigvee_\xi^1 x_2 
\leq \int_0^1 |x_1(s)|\mathrm{d}s  \bigvee_{\xi_0}^1 x_2.
\]
Let us denote $\tilde M :=  \int_0^1 |x_1(s)|\mathrm{d}s  \bigvee_{\xi_0}^1 x_2$.  
By (1)  $\tilde M$ is finite.

Hence we know that
\[
\sup_{\xi\in(0,1)} \Bigl| \int_0^\xi x_1(s)\mathrm{d}s 
\Bigr|  \bigvee_\xi^1 x_2 \leq M_0+\tilde M + 1:=  K_0 < +\infty.
\]
Similarly, from (6) and (2), we may conclude that
\[
\sup_{\xi\in(0,1)}  \Bigl| \int_\xi^1 x_2(s)\mathrm{d}s \Bigr| 
 \bigvee_0^\xi x_1:=  K_1< +\infty
\]

From Lemma \ref{lem:func:form} we infer that
\[
\bigvee_0^1 \varphi_\xi = \Bigl| c^{-1}\int_0^\xi x_1(s)\mathrm{d}s\Bigr| 
\bigvee_\xi^1  x_2 + \bigvee_0^\xi \Bigl[ \alpha(t)
 - c^{-1}x_1(t) \int_\xi^1 x_2(s)\mathrm{d}s \Bigr]
\]
and we estimate
\begin{align*}
\bigvee_0^1 \varphi_\xi 
&\leq \bigvee_0^1 \alpha 
+ |c^{-1}|\Bigl( \Bigl| \int_0^\xi x_1(s)\mathrm{d}s\Bigr| \bigvee_\xi^1  x_2 
+ \Bigl| \int_\xi^1 x_2(s)\mathrm{d}s\Bigr|\bigvee_0^\xi x_1\Bigr) \\
&\leq \bigvee_0^1 \alpha + K_0 + K_1 <+\infty,
\end{align*}
and the estimate is valid for any $\xi\in(0,1)$. 
This  proves that the condition (H2) is satisfied and hence that,
 by Theorem \ref{ref:prop_suff}, the integral operator $K$ maps $BV(I)$ to $BV(I)$.

Now, it is time to note that each of the assumptions (1)--(6) is a necessary
 condition for having all solutions to the problem  \eqref{eq:SL:most:general} 
with right-hand side $h\in BV(I)$ of bounded variation. 
Conditions (1) and (2) are necessary by Lemma \ref{lem:notBV}. 
Conditions (3) and (4) are necessary by Lemma \ref{lem:notBV:2}. 
Conditions (5) and (6) are necessary by Propositions \ref{prob:limsup:phi} 
and \ref{prob:limsup:psi} and Lemma \ref{lem:notBV}.
\end{proof}



\begin{example}[{Legendre equation \cite[Section 19]{Ev}}] \label{examp2} \rm
Let us consider the equation
\begin{equation}\label{eq:legendre}
-((1-t^2)x'(t))'  = 0 \quad t\in(0,1)
\end{equation}
with the boundary conditions $x(0)=0$ and $[g,x](1)=0$, 
where $g(t)=1$. Here $0$ is the regular endpoint, while $1$ 
is the LC endpoint. The two basic solutions 
$x_1(t) = \frac{1}{2}\ln\frac{1+t}{1-t}$ and $x_2(t) = 1$ 
satisfy the assumptions of Theorem \ref{Green:exist}. 
It is not difficult to check that the functions $x_1,x_2$ 
satisfy all assumptions (1)--(6) of theorem \ref{thm5}, 
even if $x_1$ is of unbounded Jordan variation. 
Hence there does not exist a solution to the Legendre equation which 
is a function of unbounded variation, when the right-hand side $h$ 
belongs to $BV(I)$.
\end{example}


\begin{example}[Legendre equation, again] \label{examp3} \rm
Let us again look at the Lengendre equation \eqref{eq:legendre} but now 
with the different boundary conditions $x'(0)=0$ and $[g,x](1)=0$ with  
$g(t)= \frac{1}{2}\ln\frac{1+t}{1-t}$. As we can easily check  
the two basic solutions $x_1(t) = 1$ and  $x_2(t) = \frac{1}{2}\ln\frac{1+t}{1-t}$. 
Since the function $x_2$ is unbounded in the neighbourhood of $1$ we have 
$\bigvee_{1/2}^1 x_2 = +\infty$. Since (1) is not satisfied we
 know that there exists the right-hand side of \eqref{eq:legendre} of 
bounded variation with the corresponding solution being function of unbounded 
Jordan variation.
\end{example}

\begin{thebibliography}{0}

\bibitem{ABM} Appell, J.; Bana\'s, J.; Merentes, N.;
\emph{Bounded variation and around},
  De Gruyter Studies in Nonlinear Analysis and Applications, no. 17, De
  Gruyter, Berlin, 2014.

\bibitem{BGK2} Bugajewski, D.; Gulgowski, J.; Kasprzak, P.;
 \emph{On integral operators and
  nonlinear integral equations in the spaces of functions of bounded
  variation}, J. Math. Anal. Appl. \textbf{444} (2016), 230--250.

\bibitem{CL} Coddington, E. A.; Levinson, N.;
\emph{Theory of Ordinary Differential Equations},
McGraw-Hill, New York, 1955.

\bibitem{Duffy} D.~G. Duffy;
 \emph{Green's functions with applications}, Applied Mathematics,
  CRC Press, 2001.

\bibitem{Duhoux_Tatra} M. Duhoux;
 \emph{Green functions and the property of positivity for singular
  second order boundary value problems}, Tatra Mt. Math. Publ. \textbf{19}
  (2000), 1--20.

\bibitem{Ev} W.~N. Everitt;
 \emph{A catalogue of Sturm-Liouville differential equations},
  (2005), 271--331.

\bibitem{H} P.~Hartman;
 \emph{Ordinary differential equations}, Birkhauser,
  Boston\--Basel\-- Stuttgart, 1982.

\bibitem{SLC} Sun, Yan; Liu, L.; Cho, Y. L.;
 \emph{Positive solutions of singular nonlinear
  Sturm-Liouville boundary value problems}, ANZIAM J. \textbf{45} (2004),
  557--571.

\bibitem{Z} A.~Zettl;
 \emph{Sturm-Liouville theory}, AMS, 2005.

\end{thebibliography}

\end{document}

