\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2016 (2016), No. 01, pp. 1--15.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2016 Texas State University.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2016/01\hfil Fractional regularization of operator equations]
{Singular regularization of operator equations in $L_1$ spaces via fractional
differential equations}

\author[G. L. Karakostas, I. K. Purnaras \hfil EJDE-2016/01\hfilneg]
{George L. Karakostas, Ioannis K. Purnaras}

\address{George L. Karakostas \newline
 Department of Mathematics, University of Ioannina,
 451 10 Ioannina, Greece}
 \email{gkarako@uoi.gr}

\address{Ioannis K. Purnaras \newline
 Department of Mathematics, University of Ioannina,
 451 10 Ioannina, Greece}
\email{ipurnara@uoi.gr}

\thanks{Submitted June 8, 2015. Published January 4, 2016.}
\subjclass[2010]{34K35, 34A08, 47045, 65J20}
\keywords{Causal operator equations;
fractional differential equations;
\hfill\break\indent regularization; Banach space}

\begin{abstract}
 An abstract causal operator equation  $y=Ay$ defined on a space of the
 form $L_1([0,\tau],X)$, with $X$ a Banach space, is regularized
 by the fractional differential equation
 $$
 \varepsilon(D_0^{\alpha}y_{\varepsilon})(t)
 =-y_{\varepsilon}(t)+(Ay_{\varepsilon})(t), \quad t\in[0,\tau],
 $$
 where  $D_0^{\alpha}$ denotes the (left) Riemann-Liouville derivative
 of order $\alpha\in(0,1)$. The main procedure lies on properties of
 the Mittag-Leffler function combined  with some facts from convolution theory.
 Our results complete relative ones that have appeared in the literature;
 see, e.g. \cite{cord}  in which  regularization via ordinary differential
 equations is used.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\allowdisplaybreaks


\section{Introduction} 

Regularization employs several  techniques in order to approximate solutions 
of ill-posed problems such as 
\begin{equation}\label{ia}
My = f,
\end{equation}  
where $M$ is an operator acting on a space $X$ and taking values in another 
space $Y$. Basically, the problem is characterized as an ill-posed problem, 
if either solutions do not exist for some $f$, or  uniqueness of solutions 
is not guaranteed, or continuous dependence on data does not hold. 
The latter is equivalent to saying that there is no continuous inverse of $M$. 
In order to solve an ill- posed problem (approximately), we should regularize it, 
namely, replace this problem by a suitable family of  well-posed problems 
whose solutions approximate (in some sense) the solution of the ill-posed 
problem which we look for.

 However, it is not true that such a process may produce an approximation 
of the solutions of the original equation for all situations. 
To see it, we borrow an example from the literature (e.g., \cite{lin, mar}) 
adopted to our situation, as follows:  Consider the $2\times 2$ 
matrix-operator $M$ and the function $f$ given by
\[
M:=\begin{bmatrix}
\frac{d}{dt}&
-1\\
1&
0
\end{bmatrix}\quad
\text{and}\quad f(t):=\begin{bmatrix}
0\\
p(t)
\end{bmatrix},
\]
 where $p$ is a differentiable function on  $[0,1]$, say. The exact solution 
of the operator equation \eqref{ia} in the space 
$C^{1}([0,1],\mathbb{R})\times C([0,1],\mathbb{R})$ is given by 
$$
x(t)=p(t),\quad y(t)=p'(t),\quad t\in[0,1].
$$ 
Take a small number $\varepsilon$ and let
\[ 
f_{\varepsilon}(t):=f(t)+\begin{bmatrix}
0\\
{\varepsilon}\sin(t/\varepsilon^2)
\end{bmatrix}
\]   
be a small perturbation of $f$. Then we obtain the exact solution
$$
x_{\varepsilon}(t)=p(t)+{\varepsilon}\sin(t/\varepsilon^2),\quad  
y_{\varepsilon}(t)=p'(t)+\frac{1}{\varepsilon}\cos(t/\varepsilon^2).
$$ 
Hence the quantity
\[ 
\begin{bmatrix}
x_{\varepsilon}(t)\\
y_{\varepsilon}(t)
\end{bmatrix}
-\begin{bmatrix}
x(t)\\
y(t)
\end{bmatrix}=
\begin{bmatrix}
{\varepsilon}\sin(t/\varepsilon^2)\\
\frac{1}{\varepsilon}\cos(t/\varepsilon^2)
\end{bmatrix}
\]
 becomes large enough if the number $\varepsilon$ tends to 0. 
This means that the solution changes a lot after a small change in 
the right side of equation. 

In case that $M$ is a compact linear operator between two Hilbert spaces, 
a regularizing  form should consist of the equation 
\begin{equation}\label{i3}
(M^*M+\varepsilon)x_{\varepsilon}=M^*f,
\end{equation} 
where $M^*$ is the adjoint of $M$, see \cite{cwg}.  In \cite{he} the
regularization \eqref{i3} has its right side $M^*f_{\delta}$, 
where  $f_{\delta}$ is a (noisy) approximation of $f$. 
The works  \cite{neu, amw} refer to \emph{Tikhonov-regularization},
i.e. regularization of minimazing problems. According to such problems, 
an equation of the form 
\begin{equation}\label{i1} 
\int_a^bk(t,s)x(s)ds=f(t)
\end{equation} 
is replaced by the equation 
$$
\int_a^bk(t,s)x_{\varepsilon}(s)ds+\varepsilon x_{\varepsilon}(t)=f(t),
$$ 
or the equation
$$
\int_a^bk(t,s)x_{\varepsilon}(s)ds+\varepsilon x_{\varepsilon}(t)=f_{\delta}(t),
$$
and then one looks for the convergence of the net $x_{\varepsilon}$. 
Here a   noisy $f_{\delta}$ replaces $f$, for small $\delta$;
 see, e.g., the interesting survey presented in \cite{lamm}.
 Approximation of the kernel $k$ of \eqref{i1} is used by other authors,
 see, e.g., \cite{men}. Approximation of both the perturbation and the 
operator applies elsewhere, \cite{gg}.
Some authors,  as, e.g. \cite{bll}, dealing with the  Volterra equation
\begin{equation}\label{i2} 
\int_0^tk(t,s)x(s)ds=f(t)\,,
\end{equation}
apply the so called  method of \emph{the simplified (or Lavrentiev)
regularization}, consisting of an approximation of the perturbation $f$
and the \emph{local regularization}, realized by  an approximate equation
of the form
$$
\int_t^{t+\varepsilon}k(t+\varepsilon,s)x(s)ds
+\int_0^tk(t+\varepsilon,s)x(s)ds=f(t+\varepsilon),
$$ 
where $\varepsilon$ is a  parameter tending to 0.

  In \cite{sav} another approach is applied  to \eqref{i1} by taking an 
approximation of both the kernel $k$ and the output $f$. 
For a more general setting see, also,  \cite{sav1}.

 Regularization of abstract equations of the form \eqref{ia} can be realized 
by approximating the output $f$, as, e.g. in \cite{hls} and for Fredholm integral 
equations, as, e.g., in  \cite{ww}.  
Regularization of the  Hammerstein's type equation
$x+BAx =f$, is achieved, (see, e.g., \cite{pop}) by replacing 
it with the equation
$x_{\varepsilon}+(B+\varepsilon J)(A+\varepsilon J )x_{\varepsilon}=f_{\delta}$, 
where $\varepsilon, \delta$ are positive reals tending to 0 and the functions
 $f, f_{\delta}$ are such that
$\|f-f_{\delta}\|\leq\delta$.
Here $A$ and $B$ are operators, and $x, f$ are elements in a given Banach space	 $X$, 
with $x$ being the unknown element in $X$. 

 In case that the operator  
$M$ has the form $My=Ay-y+f$, the problem \eqref{ia} leads to the fixed 
point problem 
\begin{equation}\label{i4} 
y=Ay.
\end{equation}  

 It is known (see, e.g., \cite[p. 89]{cord}) that a  continuous compact  
operator $A$ (in the sense of Krasnoselskii) defined on a locally convex 
Hausdorff space  has a fixed point.   Regularization theory of such 
an equation (especially), when $A$ is a monotone or a non-expansive operator 
defined in a Hilbert or (even in a) Banach space, forms a large field, 
and most of the authors make use of variation techniques,
 see, e.g. \cite{a3, a2, a1, kar4, a4} and the references therein. 

 In case \eqref{i4} refers to a space of functions $y:[0,1]\to\mathbb{R}$, say,  
namely we have
\begin{equation}\label{i5} 
y(t)=(Ay)(t), \quad t\in[0,1],
\end{equation}  
regularization is achieved by a differential equation of the form
\begin{equation}\label{i6}  
\varepsilon\frac{d}{dt}y(t)+y(t)-(Ay)(t)=0. 
\end{equation}  
This is done elsewhere (see, e.g., the book \cite[p. 140]{cord}, and the 
references therein),  when $y$ has to be a continuous function, say, 
$y\in C([0,T],\mathbb{R})$. Similar things occur for a neutral differential 
equation discussed in \cite{gh}. An immediate consequence of this approach  
is that, in this case, a solution of \eqref{i5} is approximated by a sequence 
$(y_{\varepsilon_n})$ of real-valued  functions having continuous first order 
derivatives.  

  For fractional differential equations a few results, analogous to above, 
are known. We should refer to the  problem
$$
D_0^{\alpha}(x-x(0)-\varepsilon)=f(t,x)+\varepsilon, \quad x(0)=x_0+\varepsilon,
$$ 
discussed in \cite{lak}, where conditions are given so that, as $\varepsilon$ 
tends to 0, the maximal solution $\eta(t;\varepsilon)$ tends to the maximal 
solution $\eta(t)$ of the problem
$$
D_0^{\alpha}(x-x(0))=f(t,x), \quad x(0)=x_0,
$$ 
uniformly on any compact interval $[0,t_1]$ of the domain of $\eta$. 
 In this work we assume that $A$ is defined on an $L_1$-space of $X$-valued 
functions, where $X$ is a Banach space, and we regularize \eqref{i5} 
by an equation involving  continuous functions with Lebesgue-integrable 
first order derivatives.  To succeed in such an approach we work in 
$L_1$-spaces and use the fractional equation
\begin{equation}\label{e01}
\varepsilon(D_0^{\alpha}y_{\varepsilon})(t)
=-y_{\varepsilon}(t)+(Ay_{\varepsilon})(t), \quad\text{a.a.}\quad 
 t\in[0,\tau]:=I_{\tau},
\end{equation} 
for $\varepsilon$ tending to 0. Here,  $D_0^{\alpha}y_{\varepsilon}$ is the 
(left) Riemann-Liouville derivative of $f$ of order $\alpha$. 

A central role to our approach is played by some facts from convolution theory, 
as well as the Mittag-Leffler function. It is known that  the relation of 
the latter with the fractional calculus, is analogous of that of the 
exponential function with standard calculus. See, for instance, 
\cite[subsection 3.2]{jum}. 

 We investigate when, for some $\tau\in(0,T]$, there is a sequence of solutions  
of the fractional differential equation
\eqref{e01} converging in the sense of $L_1$-norm on $[0,\tau]$  to solutions 
of equation \eqref{i5}, when the parameter $\varepsilon$ approaches 0.

\section{Preliminaries}

\subsection{Fractional calculus}
Throughout this paper we shall work on a real Banach space  $X$  endowed 
with a norm $\|\cdot\|_X$, and on  the space $L_1^T:=L_1([0,T],X)$, 
for some $T>0$ fixed, with norm 
$$
\|y\|_1^{\tau}:=\int_0^{\tau}\|y(s)\|_Xds.
$$ 
Several books in the literature present surveys on the classical fractional 
calculus.  Two exhaustive such books are the ones by  Podlubny \cite{po} 
and Miller and Ross \cite{miro}. We recall some basic definitions and results 
adopted for our purposes, namely we consider the meaning of fractional 
derivative and integral on an $X$-valued function defined on  the interval $[0,T]$. 

Let $\Gamma$ be the Euler Gamma function. It is well known (see, e.g., \cite{wiki}) 
that on the positive real axis the function $\Gamma$ admits a local minimum
 $0.885603..$. at $x_{\rm min}=1.461632144..$. and it is increasing 
for $x>x_{\rm min}$. Later on we shall use the monotonicity of $\Gamma$ 
on the interval $[2,+\infty)$. 

For $u\in L_1^T$ and $\alpha\in(0,1)$,  the (left) fractional Riemann-Liouville 
derivative of $f$ of order $\alpha$, is defined by
$$
(D_0^{\alpha}u)(t)=\frac{1}{\Gamma(1-\alpha)}\frac{d}{dt}
\int_0^t(t-s)^{-\alpha}u(s)ds,
$$
 where the integral is in the Bohner sense. 

As in  \cite{po}, [pp. 59-73, and relation (2.122)], we can see that the 
first composition formula with integer order $n$ derivative 
holds\footnote{The relation holds even for $\alpha<0.$}:
\begin{equation}\label{10}
D_0^{\alpha}(u^{(n)})(t)=D_0^{\alpha+n}u(t)
-\sum_{j=0}^{n-1}\frac{u^{(j)}(0)t^j}{\Gamma(j+1)}.
\end{equation} 
Now consider the problem
\begin{equation}\label{eq1} 
(D_0^{\alpha}u)(t)=f(t), \quad \text{a.a. }  t\in[0,T], \quad 
(D_0^{\alpha-1}u)(t)\Big|_{t=0}=b,
\end{equation}
where $b\in X$.

 Although the following result can be implied from arguments borrowed from 
the literature (see, e.g., \cite{po} 
Theorem 3.1, p. 122 and relation (3.7) in p. 123),
 we shall give our proof for two reasons: First we want this work to be complete. 
Second,  the functions used here  take values in the abstract Banach space $X$ 
and not in $\mathbb{R}$, as it is used elsewhere 
(and in  \cite[Theorem 3.1]{po}). 

Let $B$ be the (real) Betta function, namely the function defined for 
$\rho, \sigma>0$ by
$$
B(\rho,\sigma)=\int_0^1(1-\theta)^{\rho-1}\theta^{\sigma-1}d\theta
$$ 
This  is connected with the Gamma function by the relation
$$
B(\rho,\sigma)=\frac{\Gamma(\rho)\Gamma(\sigma)}{\Gamma(\rho+\sigma)}.
$$

\begin{lemma}\label{l2} 
The function $y$  defined by
$$
y(t)=\frac{t^{\alpha-1}}{\Gamma(\alpha)}b
 +\frac{1}{\Gamma(\alpha)}\int_0^t(t-s)^{\alpha-1}f(s)ds,\quad\text{a.a. } t\in[0,T],
$$ is the only solution of the  problem \eqref{eq1}.
\end{lemma}

\begin{proof} 
We show that $y$ satisfies the problem \eqref{eq1}. We have
\begin{equation}\begin{aligned}
(D_0^{\alpha}y)(t)
&=\frac{1}{\Gamma(\alpha)\Gamma(1-\alpha)}\frac{d}{dt}
 \int_0^t{(t-s)^{-\alpha}}s^{\alpha-1}dsb\\
&\quad +\frac{1}{\Gamma(\alpha)\Gamma(1-\alpha)}
 \frac{d}{dt}\int_0^t{(t-s)^{-\alpha}}\int_0^s(s-r)^{\alpha-1}f(r)\,dr\,ds\\
&=\frac{1}{\Gamma(\alpha)\Gamma(1-\alpha)}\frac{d}{dt}B(1-\alpha,\alpha)b\\
&\quad +\frac{1}{\Gamma(\alpha)\Gamma(1-\alpha)}
 \frac{d}{dt}\int_0^t{(t-s)^{-\alpha}}\int_r^t(s-r)^{\alpha-1}f(r)dsdr\\
&=\frac{1}{\Gamma(\alpha)\Gamma(1-\alpha)}
 \frac{d}{dt}\int_0^tf(r)drB(1-\alpha,\alpha)\\
&=\frac{d}{dt}\int_0^tf(r)dr=f(t), \quad\text{a.e.},
\end{aligned}
\end{equation}
where, in the integration, we used the substitution 
$s=:(1-\theta)r+\theta t, \quad \theta\in[0,1]$.
Similarly we obtain
$$
(D_0^{\alpha-1}y)(t)\Big|_{t=0}=\frac{d}{dt}(t)
\Big|_{t=0}b+\frac{d}{dt}\int_0^t(t-r)f(r)dr\Big|_{t=0}=b.
$$ 
The inverse is implied by an application  \cite[Theorem 3.1, p.122]{po}.
\end{proof}

\subsection{The Mittag-Leffler function} \label{p1} 

The Mittag-Leffler function of order $\alpha (>0)$ is defined on the complex
 plane by 
$$
E_{\alpha}(z):=\sum_0^{\infty}\frac{z^j}{\Gamma(ja+1)}\,.
$$  
From  a result of Feller referred by Pollard \cite{poll}, we know that there 
is a nondecreasing and bounded function $F_{\alpha}$ such that
\begin{equation}\label{r2}
E_{\alpha}(-x)=\int_0^{+\infty}e^{-xs}dF_{\alpha}(s), \quad x\geq 0.
\end{equation} 
 It follows that this function is positive, non-increasing, it tends to 0 
as $x\to+\infty$  and since $E_{\alpha}(0)=1$, the quantity  $E_{\alpha}(-x)$ 
is not greater than 1. More properties of this function and of some 
generalizations of it can be found in \cite{po}.

\section{Main results}   

Let $A: L_1^T\to  L_1^T$ be a  causal  operator, namely, it satisfies 
$(Ax)(t)=(Ay)(t)$, whenever $x(s)=y(s)$, for a.a. $ s\in[0,t]$, 
(for the continuous case see, e.g., \cite{kar3}, \cite{new} and 
the references therein). This characteristic guarantees  that, 
for any $\tau\in(0,T]$, the operator $A$ maps the ball 
$$
B_{\tau}^r:=\{y\in L_1^{\tau}:  \|y\|_1^{\tau}<r\},
$$ 
into the space $L_1^{ \tau}$. Suppose, also, that $A$ is  continuous 
and compact in the sense that, it maps bounded sets into relatively compact sets.  
 Hence, in case that for some $\tau>0$ it holds 
$$
A(\overline{B_{\tau}^r})\subseteq \overline{B_{\tau}^r},
$$  
the following Schauder's  fixed point theorem applies and ensures 
the existence of a fixed point of $A$ in $\overline{B_{\tau}^r}$. 

\begin{theorem}[{\cite[p. 89]{cord}}] \label{t1}
 Let $E$ be a real Banach space and $K\subset E$ a closed, bounded  
and convex set. If $C:K\to K$ is a continuous compact operator, then $C$ 
has at least one fixed point.
\end{theorem}

Now, for any fixed $\varepsilon>0$ and small enough, say $\varepsilon<1$, 
consider the fractional differential equation
\begin{equation}\label{e1}
\varepsilon(D_0^{\alpha}y)(t)=-y(t)+(Ay)(t), \quad \text{a.a. }\ t\in[0,T],
\end{equation} 
where the derivative $D_0^{\alpha}y$ is in the sense of Riemann-Liouville 
and $\alpha\in(0,1)$.  

Let $b$ be a (nonzero) real number and consider the initial value problem
$$
(D_0^{\alpha}y)(t)=-\frac{1}{\varepsilon}y(t)+\frac{1}{\varepsilon}(Ay)(t), 
\quad (D_0^{\alpha-1}y)(t)\Big|_{t=0}=b.
$$ 
According to Lemma \ref{l2}, a function $y$  is a solution of the problem, 
if and only if it satisfies the equation
\begin{equation}\label{a1}
y(t)=\frac{b}{\Gamma(\alpha)}t^{\alpha-1}
-\frac{1}{\varepsilon\Gamma(\alpha)}\int_0^t(t-s)^{\alpha-1}y(s)ds
+\frac{1}{\varepsilon\Gamma(\alpha)}\int_0^t(t-s)^{\alpha-1}(Ay)(s)ds.
\end{equation}

Our main result in this work is given in the following theorem: 

\begin{theorem}\label{t2}
If $A$ is a causal, compact and continuous operator on $L_1^T$, then, 
there exists a certain $\tau\in(0,T]$, such that, for any sequence 
$(\varepsilon_n)$ converging to 0, there is a sequence of solutions 
$(y_{n})$ of equation \eqref{a1} converging in the $L_1^{\tau}$-sense 
to a solution $y$ of equation 
$$
y(t)=(Ay)(t), \quad\text{a.a } t\in [0,\tau].
$$ 
\end{theorem}

The proof of the above theorem will be given in the last section. 
It is noteworthy that the theorem has several interesting consequences,
 as the following one.

 \begin{corollary} \label{coro3.3}
Let $k$ be a positive integer, $W$  a  continuous and causal operator 
defined on the   $C^{k}([0,T],X)$-space and let $\alpha\in(0,1)$. Then, 
there exists a certain $\tau\in(0,T]$ such that, for any sequence 
$(\varepsilon_n)$ converging to 0, there is a sequence of solutions 
$(x_{n})$ of the problem
 \begin{gather}\label{es1}
\varepsilon(D_0^{k+\alpha}x)(t)=-x^{(k)}(t)+(Wx)(t), \quad\text{a.a. }
 t\in[0,\tau], \\
x^{(j)}(0)=0,\quad  j=0, 1, \dots, k-1, \quad (D_0^{k+\alpha-1}x)(t)\Big|_{t=0}=b,
\nonumber
\end{gather} 
converging, in the sup-norm $\|\cdot\|^{\tau}_{\infty}$ sense, to a 
solution of the problem 
\begin{gather*}
x^{(k)}(t)=(Wx)(t)\\
x^{(j)}(0)=0,\quad  j=0, 1, \dots, k-1.
\end{gather*}
\end{corollary}

\begin{proof}
Set $y=x^{(k)}$. Then, due to \eqref{10}, we have  
$$
(D_{0}^{\alpha}y)(t)=(D_0^{k+\alpha}x)(t)\quad\text{and}\quad
 (D_{0}^{\alpha-1}y)(t)\big|_{t=0}=(D_0^{k+\alpha-1}x)(t)\Big|_{t=0}=b
$$ 
and, moreover,
$$
x(t)=  \int_0^t\frac{(t-s)^{k-1}}{(k-1)!}y(s)ds=:(Uy)(t).
$$ 
Thus  problem \eqref{es1} is transformed into  problem \eqref{i5}, where
$Au:=W\circ U(u)$, with $A$  continuous, compact and, obviously, causal. 

 Take any sequence $(\varepsilon_n)$ converging to 0. Then applying the
 results above, we obtain the existence of a sequence of solutions $y_n$ 
of \eqref{e1} satisfying $(D_{0}^{\alpha-1}y_n)(t)\big|_{t=0}=b$ and 
converging in the $L_1^{\tau}$-sense to a solution of equation $y=Ay$. 
We set 
$$
x_n:=Uy_n \quad\text{and}\quad x:=Uy.
$$ 
Then, evidently,  $x_n$ satisfies the problem \eqref{es1} and  
$$
x^{(k)}(t)=y(t)=(Ay)(t)=W(Uy)(t)=Wx(t),
$$ 
for a.a. $t\in[0,\tau]$ and
$x^{(j)}(0)=0$, $j=0, 1,\dots, k-1$. Finally, we observe that
$$
\|x_n-x\|^{\tau}_{\infty}
=\sup_{t\in[0,\tau]}\big\|\int_0^t\frac{(t-s)^{k-1}}{(k-1)!}[y_n(s)-y(s)]ds
\big\|_X
\leq\frac{\tau^{k-1}}{(k-1)!}\|y_n-y\|_1^{\tau}.
$$ 
The right-hand side tends to zero. The proof is complete.
\end{proof}

\section{Auxiliary Lemmas}

Before giving the proof of Theorem \ref{t2}, we need some auxiliary facts 
concerning the series
\begin{equation}\label{s1}
\sum_{j=1}^{\infty}\frac{(-1)^{j-1}s^{j\alpha-1}}{\varepsilon^j\Gamma(j\alpha)}, 
\quad s>0.
\end{equation}

\begin{lemma}\label{lem1} 
The series \eqref{s1} converges absolutely and uniformly on compact subsets 
of $[0,+\infty)$ to a function $k(s;\varepsilon),\quad s>0$, which is 
continuous and positive.
\end{lemma}

\begin{proof}  Define  the sets
$$
Q_1:=\{j\in\mathbb{Z}: \alpha\leq j\alpha<1\},\quad 
Q_k:=\{j\in\mathbb{Z}: k\leq j\alpha<k+1\}, \quad k=2, 3, \dots
$$ 
Obviously, for $k\geq 2$ the set $Q_k$ has at most 
$\mu:=[\frac{1}{\alpha}]+1$ elements. Absolutely,  the series can be written as
$$
\sum_{j=1}^{\infty}\frac{s^{j\alpha-1}}{\varepsilon^j\Gamma(j\alpha)}
=\Lambda(s)+\sum_{k=3}^{\infty}\sum_{j\in Q_k}
\frac{s^{j\alpha-1}}{\varepsilon^j\Gamma(j\alpha)},
$$
where 
$$
\Lambda(s):=\sum_{k=1}^{2}\sum_{j\in Q_k}
\frac{s^{j\alpha-1}}{\varepsilon^j\Gamma(j\alpha)}, \quad s>0
$$ 
is an $L_1^T$ function, for any $T>0$.

Now, by using the fact that $(s+1)^{\alpha}>1>\varepsilon$ and the 
monotonicity of the function $\Gamma$ on the interval $[2,+\infty)$,  we obtain
\begin{align*} 
\sum_{j=1}^{\infty}\frac{(s+1)^{j\alpha-1}}{\varepsilon^j\Gamma(j\alpha)}
& \leq \Lambda(s)+ \sum_{k=3}^{\infty}\sum_{j\in Q_k}
 \frac{1}{s+1}\frac{(\frac{(s+1)^{\alpha}}{\varepsilon})^j}{\Gamma(k)}\\
&\leq\Lambda(s)+\sum_{k=3}^{\infty}\sum_{j\in Q_k}\frac{1}{s+1}
 \frac{(\frac{(s+1)^{\alpha}}{\varepsilon})^{\frac{k+1}{\alpha}}}{\Gamma(k)}\\
&=\Lambda(s)+\sum_{k=3}^{\infty}\sum_{j\in Q_k}\varepsilon^{\frac{1}{\alpha}}
 \frac{(\frac{(s+1)}{\varepsilon^{1/\alpha}})^k}{\Gamma(k)}\\
&\leq \Lambda(s)+\mu\sum_{k=3}^{\infty}\varepsilon^{\frac{1}{\alpha}}
 \frac{(\frac{(s+1)}{\varepsilon^{1/\alpha}})^k}{(k-1)!}\\
&=\Lambda(s)+\mu (s+1)\sum_{k=3}^{\infty}
 \frac{(\frac{(s+1)}{\varepsilon^{1/\alpha}})^{k-1}}{(k-1)!}\\
&=\Lambda(s)-\mu (s+1)(1+\frac{(s+1)}{\varepsilon^{1/\alpha}})
 +\mu (s+1)\exp({\frac{(s+1)}{\varepsilon^{1/\alpha}}}).
\end{align*}
The right-hand side defines an $L_1^T$ function, for any $T>0$. 
Obviously, this proves the first part of the lemma. 

 It remains to show that
the function $k(\cdot;\varepsilon)$ is positive. 
Indeed, by the previous arguments, we can apply the Lebesgue Dominated
 Convergence Theorem and get,  for fixed $\theta\in[0,t]$, that
\begin{equation}\label{e5}
\begin{aligned}
\int_{t-\theta}^tk(s;\varepsilon)ds
&=\int_0^{\theta}k(t-s;\varepsilon)ds
 =\int_0^{\theta}\sum_{j=1}^{\infty}
 \frac{(-1)^{j-1}(t-s)^{j\alpha-1}}{\varepsilon^j\Gamma(j\alpha)}ds\\
&=\sum_{j=1}^{\infty}\frac{(-1)^{j}(t-\theta)^{j\alpha}}
 {\varepsilon^j\Gamma(j\alpha+1)}-\sum_{j=1}^{\infty}
 \frac{(-1)^{j}t^{j\alpha}}{\varepsilon^j\Gamma(j\alpha+1)}\\
&=\sum_{j=0}^{\infty}\frac{(-1)^{j}(t-\theta)^{j\alpha}}
 {\varepsilon^j\Gamma(j\alpha+1)}-\sum_{j=0}^{\infty}
 \frac{(-1)^{j}t^{j\alpha}}{\varepsilon^j\Gamma(j\alpha+1)}\\
&=E_{\alpha}(\frac{-(t-\theta)^{\alpha}}{\varepsilon})-E_{\alpha}
 (\frac{-t^{\alpha}}{\varepsilon}).
\end{aligned}
\end{equation}

 By using \eqref{r2}, relation \eqref{e5} gives
$$ 
\int_0^{\theta}\sum_{j=1}^{\infty}
 \frac{(-1)^{j-1}(t-s)^{j\alpha-1}}{\varepsilon^j
 \Gamma(j\alpha)}ds=\int_0^{+\infty}(e^{-(t-\theta)s}
 -e^{-ts})dF_{\alpha}(s)\geq 0.
$$ 
From the properties of $E_{\alpha}$ which we mentioned in Subsection \ref{p1},  
it follows that the quantity $E_{\alpha}(\frac{-t^{\alpha}}{\varepsilon})$ 
is positive and less than 1 and it tends to zero monotonically when $t$ tends 
to $+\infty$. The latter implies that
\begin{equation}\label{e15}
\lim_{x\to+\infty}E_{\alpha}(-x)=0,
\end{equation} 
namely,
\begin{gather}\label{e7}
0<E_{\alpha}(\frac{-t^{\alpha}}{\varepsilon})\leq 1, \\
\label{e8}\lim_{t\to+\infty}E_{\alpha}(\frac{-t^{\alpha}}{\varepsilon})=0.
\end{gather}
 Obviously, \eqref{e7} implies that
$$
0\leq\int_0^tk(s;\varepsilon)ds<1.
$$
Finally, since the function
\begin{equation}\label{e16}
t\to \int_0^{t}k(s;\varepsilon)ds=1-E_{\alpha}(\frac{-t^{\alpha}}{\varepsilon}), 
\quad t\geq 0
\end{equation} 
is increasing,  its derivative, namely the function  $k(t;\varepsilon)$, 
is positive. 
 \end{proof}

\begin{lemma} 
The following properties\footnote{These properties are enough to characterize 
the function $k$ as an approximate identity of the convolution,
 which resembles to the well known Dirac sequences in the convolutions theory.} 
hold:
  \begin{equation}\label{e10}
\lim_{\varepsilon\to 0}\int_0^tk(s;\varepsilon)ds=1,
\end{equation} 
uniformly for $t$ in intervals of the form $[r,T]$, for all $r\in(0,T]$ and
   \begin{equation}\label{e11}
\lim_{\varepsilon\to 0}\int_{\delta}^tk(s;\varepsilon)ds=0,
\end{equation}
   for all $t\in(0,T]$ and  $\delta\in(0,t)$.
   For each $u\in L_1^T$ it holds
 \begin{equation}\label{e12} 
\lim_{\varepsilon\to 0}\int_0^tk(t-s;\varepsilon)u(s)ds=u(t).
\end{equation}
\end{lemma}

   \begin{proof}
Property \eqref{e10} is easily implied from \eqref{e15} and \eqref{e5}, 
while \eqref{e11} follows from \eqref{e5} and  the fact that
 $\int_{\delta}^tk(s;\varepsilon)ds=E_{\alpha}
(\frac{-{\delta}^{\alpha}}{\varepsilon})-E_{\alpha}(\frac{-t^{\alpha}}{\varepsilon})$.

 Next, let $u\in L_1^T$ and $\eta>0$. Extend $u$ from $[0,T]$ to $\mathbb{R}$   
by setting $\bar{u}(s)=0$, if $s\notin[0,\tau]$ and $\bar{u}(s)=u(s)$, $s\in[0,T]$. 
Then $\bar{u}$ is an element of $L_1(\mathbb{R},X)$ and, so it satisfies
$\lim_{s\to0}\|\bar{u}(\cdot-s)-\bar{u}(\cdot)\|_1^T=0$,
 (see. e.g. \cite[Thm 1.4.2 p. 298]{cc}). This means that there is an 
$s_0>0$ such that
 $$
\|\bar{u}(\cdot-s)-\bar{u}(\cdot)\|_1^T\leq \eta, \quad 0\leq s\leq s_0.
$$ 
Take any $\delta\in(0,s_0]$. By \eqref{e10}, there is some 
$\varepsilon_{\delta}>0$, such that for all 
$\varepsilon\in(0,\varepsilon_{\delta}]$ it holds
$$
\big|\int_0^tk(t-s;\varepsilon)ds-1\big|<\eta, \quad t\in[{\delta},T].
$$
Hence, we have
$$
\big\|\int_0^tk(t-s;\varepsilon)u(t)ds-u(t)\big\|_X\leq\eta\|u(t)\|_X, \quad 
t\in [{\delta},T],
$$ 
or
\begin{equation}\label{e13} 
\big\|\int_0^t\big[k(s;\varepsilon)u(s)-\frac{1}{t}u(t)\big]ds\big\|_X
\leq\eta\|u(t)\|_X, \quad t\in  [{\delta},T].
\end{equation}

Taking into account Lemma \ref{lem1} (i.e. that $k$ is positive),   we observe that
\begin{equation}\label{16}
\begin{aligned}
&\int_{\delta}^T\big\|\int_0^t\big[k(t-s;\varepsilon)u(s)ds-u(t)\big]\big\|_Xdt\\
&=\int_{\delta}^T\big\|\int_0^t\big[k(s;\varepsilon)\bar{u}(t-s)
 -\frac{1}{t}\bar{u}(t)\big]ds\big\|_Xdt\\
&\leq \int_{\delta}^T\big\|\int_0^t\big[k(s;\varepsilon)\bar{u}(t-s)ds
 -\int_0^tk(s;\varepsilon)\bar{u}(t)ds\big]\big\|_Xdt\\
&\quad +\int_{\delta}^T\big\|\int_0^t\Big(k(s;\varepsilon)\bar{u}(t)
 -\frac{1}{t}\bar{u}(t)\Big)ds\big\|_Xdt\\
&\leq \int_{\delta}^T\big\|\int_0^tk(s;\varepsilon)
 [\bar{u}(t-s)-\bar{u}(t)]ds\big\|_Xdt+\eta\int_{\delta}^T\|\bar{u}(t)\|_Xdt\\
&\leq \int_{\delta}^T\int_0^{\delta}k(s;\varepsilon)
 \|\bar{u}(t-s)-\bar{u}(t)\|_X\,ds\,dt\\
&\quad +\int_{\delta}^T\int_{\delta}^tk(s;\varepsilon)\|\bar{u}(t-s)
 -\bar{u}(t)\|_X\,ds\,dt+\eta\|u\|_1^T.
\end{aligned}
\end{equation}

 We estimate the right-hand side of relation \eqref{16}. We have
\begin{align*}
&\int_{\delta}^T\int_0^{\delta}k(s;\varepsilon)\|\bar{u}(t-s)
 -\bar{u}(t)\|_X\,ds\,dt\\
&=\int_0^{\delta}k(s;\varepsilon)\int_{\delta}^T\|\bar{u}(t-s)-\bar{u}(t)\|_X\,dt\,ds\\
&\leq\int_0^{\delta}k(s;\varepsilon)\|\bar{u}(\cdot-s)-\bar{u}(\cdot)\|_1^Tds \\
&\leq \eta\int_0^{\delta}k(s;\varepsilon)ds.
\end{align*}
 Also
\begin{align*}
&\int_{\delta}^T\int_{\delta}^tk(s;\varepsilon)\|\bar{u}(t-s)
 -\bar{u}(t)\|_X\,ds\,dt\\
&= \int_{\delta}^Tk(s;\varepsilon)\int_{s}^T\|\bar{u}(t-s)-\bar{u}(t)\|_X\,dt\,ds\\
&\leq \int_{\delta}^T\int_0^T\big(k(s;\varepsilon)(\|\bar{u}(t-s)\|_X+\|\bar{u}(t)\|_X\big)\,dt\,ds\\
&\leq 2\|u\|_1^T\int_{\delta}^Tk(s;\varepsilon)ds.
\end{align*}
 Hence, \eqref{e16} becomes
\begin{align*}
&\int_{\delta}^T\big\|\int_0^t\big[k(t-s;\varepsilon)u(s)
 -\frac{1}{t}u(t)\big]ds\big\|_Xdt\\
&\leq \eta\int_0^{\delta}k(s;\varepsilon)ds+2\|u\|_1^T\int_{\delta}^T
k(s;\varepsilon)ds+\eta\|u\|_1^T.
\end{align*}
Now, in view of \eqref{e10} and \eqref{e11} as $\varepsilon$ tends to 0, 
the right-hand side tends to $\eta(1+\|u\|_1^T)$. 
Since $\delta$ is  arbitrary and small, we obtain
 $$
\int_0^T\big\|\int_0^t\big[k(t-s;\varepsilon)u(s)
-\frac{1}{t}u(t)\big]ds\big\|_Xdt\leq\eta(1+\|u\|_1^T).
$$
 The fact that $\eta$ is arbitrary completes the proof of relation \eqref{e12}. 
\end{proof}

\section{Proof of theorem \ref{t2}}

To simplify notation,  we set 
$$
\phi(t):=\frac{t^{\alpha-1}}{\Gamma(\alpha)}b, \quad t\in(0,T]
$$ 
 and observe that $\phi$ is an element of $L_1^T$, for all $T>0$. 
Also, consider the operator 
$$
(L_{\varepsilon}u)(t):=\frac{1}{\varepsilon\Gamma(\alpha)}
\int_0^t(t-s)^{\alpha-1}u(s)ds, \quad u\in L_1^T.
$$
Then relation \eqref{a1} takes the form
$$
y(t)=\phi(t)-(L_{\varepsilon}y)(t)+(L_{\varepsilon}Ay)(t)
$$ 
which, by iteration, for each $n=1, 2, \dots$, gives
\begin{equation}\label{e3}
y(t)=\sum_{j=0}^{n-1}(-1)^{j}(L_{\varepsilon}^{(j)}\phi)(t)
+(-1)^{n}(L_{\varepsilon}^{n}y)(t)
+\sum_{j=1}^n(-1)^{j-1}(L_{\varepsilon}^{(j)}Ay)(t).
\end{equation}
 Let $u\in L_1^T$. We observe that
$$
(L_{\varepsilon}^{(2)}u)(t)=\frac{1}{\varepsilon^2\Gamma(2\alpha)}
\int_0^t(t-s)^{2\alpha-1}u(s)ds.
$$
By induction we obtain
$$
(L_{\varepsilon}^{(j)}u)(t)
=\frac{1}{\varepsilon^j\Gamma(j\alpha)}
\int_0^t(t-s)^{j\alpha-1}u(s)ds, \quad j=1, 2, \dots.
$$
Then we have
\begin{align*}
\|L_{\varepsilon}^{(j)}u\|_1^T
&=\int_0^T\big\|\frac{1}{\varepsilon^j\Gamma(j\alpha)}
 \int_0^t(t-s)^{j\alpha-1}u(s)ds\big\|_Xdt\\
&\leq\int_0^T\frac{1}{\varepsilon^j\Gamma(j\alpha)}
 \int_s^t(t-s)^{j\alpha-1}\|u(s)\|_X\,dt\,ds\\
&\leq \frac{T^{j\alpha}}{\varepsilon^j\Gamma(j\alpha+1)}\|u\|_1^T.
\end{align*}

 Since  by definition 
$$
\sum_0^{+\infty}\frac{T^{j\alpha}}{\varepsilon^j\Gamma(j\alpha+1)}
=E_{\alpha}(\frac{T^{\alpha}}{\varepsilon}),
$$
where $E_{\alpha}$ is the Mittag-Leffler function, it follows that both 
series in \eqref{e3} converge, yet
$$
\lim_j L_{\varepsilon}^{(j)}u=0.
$$ 
So the right side of \eqref{e3} converges to
$$
\sum_{j=0}^{\infty}(-1)^{j}(L_{\varepsilon}^{(j)}\phi)(t)
+\sum_{j=1}^{\infty}(-1)^{j-1}(L_{\varepsilon}^{(j)}Au)(t)=:Su(t)
$$
and, therefore, we obtain
\begin{equation}\label{ea8}
\begin{aligned}
Su(t)-\phi(t)&=\sum_{j=1}^{\infty}(-1)^{j-1}\big(L_{\varepsilon}^{(j)}
 (Au-\phi)\big)(t)dt\\
&=\sum_{j=1}^{\infty}(-1)^{j-1}\frac{1}{\varepsilon^j\Gamma(j\alpha)}
 \int_0^t(t-s)^{j\alpha-1}(Au(s)-\phi(s))ds\\
&=\int_0^t\sum_{j=1}^{\infty}\frac{(-1)^{j-1}(t-s)^{j\alpha-1}}
 {\varepsilon^j\Gamma(j\alpha)}(Au(s)-\phi(s))ds\\
&=\int_0^tk(t-s;\varepsilon)(Au(s)-\phi(s))ds,
\end{aligned}
\end{equation}
where
$$
k(s;\varepsilon):=\sum_{j=1}^{\infty}\frac{(-1)^{j-1}s^{j\alpha-1}}
{\varepsilon^j\Gamma(j\alpha)}. 
$$
The interchange of  integration and summation is permitted because of 
Lemma \ref{lem1}.
 From \eqref{ea8} and the fact that $k$ is positive, we obtain
\begin{equation} \label{e9}
\begin{aligned}
\|Su-\phi\|_1^T&=\int_0^T\|Su(t)-\phi(t)\|_Xdt\\
&\leq \int_0^T\int_0^tk(t-s;\varepsilon)\|Au(s)-\phi(s)\|_X\,ds\,dt\\
&=\int_0^T\int_s^Tk(t-s;\varepsilon)\|Au(s)-\phi(s)\|_X\,dt\,ds\\
&=\int_0^T\Big[1-E_{\alpha}\Big(\frac{-(T-s)^{\alpha}}{\varepsilon}\Big)
\Big]\|Au(s)-\phi(s)\|_Xds\\
&\leq \|Au-\phi\|_1^T.
\end{aligned}
\end{equation}

We claim that, for any  $R>0$, there exists  $\tau\in(0,T]$, such that in the 
space $L_1^{\tau}$, it holds
$$
S(\overline{B(\phi, R)})\subseteq \overline{B(\phi,R)}.
$$ 
By \eqref{e9}, to show this fact, it is sufficient to prove that there is 
 a $\tau\in(0,T]$, such that in the space $L_1^{\tau}$, it holds
\begin{equation}\label{e17}
A(\overline{B(\phi, R)})\subseteq \overline{B(\phi,R)}.
\end{equation}
 Let $\overline{B(\phi,R)}$ be the closed ball 
$\{u\in L_1^{\tau}: \quad \|u-\phi\|_1^T\leq R\}$. 
 Fix any $\zeta\in(0,\frac{R}{2}]$.
Since the set $A(\overline{B(\phi,R)})$ has compact closure, there is a 
finite $\zeta$-dense subset of it, say, 
 $Au_1, Au_2, \dots, Au_k\in A(\overline{B(\phi,R)})$.
Also, we can find $\tau\in(0,T]$ such that
$$
\|Au_j-\phi\|_1^{\tau}=\int_0^{\tau}\|(Au_j)(t)-\phi(t)\|_Xdt\leq\zeta, \quad 
j=1, 2, \dots, k.
$$
Take any $u\in \overline{B(\phi,R)}$. Then $Au\in A(\overline{B(\phi,R)})$ 
and, thus,  $\|Au-Au_j\|_1^{\tau}\leq \zeta$, for some $j$. Hence,
$$
\|Au-\phi\|_1^{\tau}\leq \|Au-Au_j\|_1^{\tau}
+\|Au_j-\phi\|_1^{\tau}\leq  2\zeta\leq R.
$$
Therefore \eqref{e17} is true.

 Because of the previous facts, the fixed point Theorem \ref{t1} applies  
and we conclude that there is $y_{\varepsilon}\in \overline{B([0,\tau],R)}$, 
such that
$$
y_{\varepsilon}(t)=(Sy_{\varepsilon})(t)
=\sum_{j=0}^{\infty}(-1)^{j}(L_{\varepsilon}^{(j)}\phi)(t)
+\sum_{j=1}^{\infty}(-1)^{j-1}(L_{\varepsilon}^{(j)}Ay_{\varepsilon})(t), 
\quad t\in[0,\tau],
$$ 
or, by \eqref{ea8},
$$
y_{\varepsilon}(t)-\phi(t)=\int_0^tk(t-s;\varepsilon)(Ay_{\varepsilon}(s)
-\phi(s))ds, \quad t\in[0, \tau].
$$
 Next, we take any sequence $\varepsilon_n$ tending to 0, and denote by 
$y_n$ the solution $ y_{\varepsilon_n}$. 
Hence we have
\begin{equation}\label{e19}
y_{n}(t)-\phi(t)=\int_0^tk(t-s;\varepsilon_n)(Ay_{n}(s)-\phi(s))ds, \quad 
t\in[0, \tau].
\end{equation}
 By the relative compactness of the set $A(\overline{(B(\phi,R)})$, 
we can assume that the sequence $(Ay_{n})$ converges to some $y\in L_1^{\tau}$.
Then, for almost all $t\in[0,\tau]$,  from \eqref{e19} we obtain
$$
y_n(t)-y(t)=\int_0^tk(t-s;\varepsilon_n)(Ay_n(s)-\phi(s))ds-(y(t)-\phi(t))
$$ 
and, therefore, it follows that
\begin{align*}
\|y_n-y\|_1^{\tau}
&=\int_0^{\tau}\big\|\Big(\int_0^tk(t-s;\varepsilon_n)
 \big[Ay_n(s)-\phi(s)\big]ds\Big)-(y(t)-\phi(t))\big\|_Xdt\\
&\leq \int_0^{\tau}\int_0^tk(t-s;\varepsilon_n)\|Ay_n(s)-y(s)\|_X\,ds\,dt\\
&\quad +\int_0^{\tau}\big\|\int_0^tk(t-s;\varepsilon_n)(y(s)-\phi(s))ds
 -(y(t)-\phi(t))\big\|_Xdt.
\end{align*}
For the first integral on the right side we have
\begin{align*}
&\int_0^{\tau}\int_0^tk(s;\varepsilon_n)\big\|(Ay_n)(t-s)
 -y(t-s)\big\|_X \,ds\,dt \\
&= \int_0^{\tau}\int_s^{\tau}k(s;\varepsilon_n)
\big\|(Ay_n)(t-s)-y(t-s)\big\|_X\,dt\,ds
 \\
&\leq\int_0^{\tau}k(s;\varepsilon_n)\int_s^{\tau}\big\|(Ay_n)(t-s)-y(t-s)
 \big\|_X\,dt\,ds\\
&=\int_0^{\tau}k(s;\varepsilon_n)\int_0^{\tau-s}\big\|(Ay_n)(\xi)-y(\xi)
 \big\|_Xd\xi ds\\
&\leq \int_0^{\tau}k(s;\varepsilon_n)ds\|Ay_n-y\|_1^{\tau},
\end{align*}
 which tends to $0$.
Also, the sequence 
$$
\int_0^{\tau}\big\|\int_0^tk(t-s;\varepsilon_n)(y(s)
-\phi(s))ds-(y(t)-\phi(t))\big\|_Xdt
$$
 tends to $0$, because of \eqref{e12}.
 Hence,  we have $\lim y_n=y$ and, by the continuity of $A$, it follows that  
$y=\lim Ay_n=Ay$. The proof is complete.

\begin{thebibliography}{00}

\bibitem{a3} Ya. I. Alber;
 Recurrence relations and variational inequalities, \emph{Soviet Math. Dokl.},
\textbf{27} (1983), 511-517.

\bibitem{a2} Yakov Alber,  Simeon Reich, David Shoikhet;
  Iterative approximations of null points of uniformly accretive operators 
with estimates of the convergence rate,
\emph{Commun. Appl. Nonlinear Anal.} \textbf{3} (8-9) (2002), 1107--1124.

\bibitem{bll} Cara D. Brooks, Patricia K. Lamm, Xiaoyue Luo;
 Local regularization of nonlinear Volterra equations of Hammerstein type,
 \emph{Integral Equations Appl.}, 09/2010; 22(2010). DOI: 10.1216/JIE-2010-22-3-393.

\bibitem{a1} F. E.  Browder, W. V. Petryshyn;
 Construction of fixed points of nonlinear mappings in Hilbert space, 
\emph{J. Math. Anal. Appl.}, \textbf{20} (1967), 197-228.

\bibitem{cord} C. Corduneanu;
 \emph{Integral Equations and Applications}, Cambridge Univ. Press, 
 New York, 1991.

\bibitem{cc} Misha Cotlar, Roberto Cignoli;
 \emph{An Introduction to Functional Analysis}, American Elsevier Publ. Co. 
New York, 1974.

\bibitem{he} Heinz W. Engl;
 On the choice of the regularization parameter for iterated Tikhonov 
regularization of ill-posed problems, \emph{Journal of Approx. Theory} 
 49 (1987), 55-63.

\bibitem{hls} Markus Haltmeier, Antonio Leit\~ao, Otmar Scherzer;
 Kaczmarz methods for regularizing nonlinear ill-posed equations I: 
Convergence analysis, \emph{Inverse Problems and Imaging}	1(2007), 289-298.

\bibitem{gg} A. L. Gaponenko, Yu L. Gaponenko;
 A method of regularization for operator equations of the first kind, 
\emph{Zh. $v\tilde{y}chisl$. Mat. mat. Fiz.}, 16 (1976), 577-584.

\bibitem{cwg} C. W. Groetsch;
 Integral equations of the first kind, inverse problems and regularization: 
a crash course, \emph{Journal of Physics: Conference Series} 73 (2007) 1-32.

\bibitem{gh} Nicola Guglielmi, Ernst Hairer;
 Regularization of neutral delay differential equations with several delays, 
\emph{J. Dynam. Differential Equations}  7, (2012),  1-26.

\bibitem{jum} O. Jumarie;
 Modified Riemann-Liouville Derivative and Fractional Taylor Series 
of Nondifferentiable Functions Further Results,
\emph{Comput. Math. Appl.} 51 (2006) 1367-1376.

\bibitem{kar3} George L. Karakostas;
 Causal operators and Topological Dynamics, \emph{Ann. Matematica Pura ed Appl.}  
Vol. CXXXI, 1982, 1-27.

\bibitem{kar4} George L. Karakostas;
 Strong approximation of the solutions of a system of operator equations in
 Hilbert spaces, \emph{J. Difference Equ. Appl.}  12 (2006), 619-632.

\bibitem{lak} V. Lakshmikantham, A. S. Vatsala;
 General uniqueness and monotone iterative technique for fractional differential 
equations, \emph{Appl. Math. Lett.} 21 (2008), 828-834.

\bibitem{lamm} Patricia K. Lamm;
 A Survey of Regularization Methods for First-Kind Volterra Equations,
Mathematics Dept., Michigan State University, E. Lansing, MI 48824-1027 USA, 
http://www.mth.msu.edu/∼lamm (May 19, 2015).

\bibitem{lin} Ping Lin;
 Regularization methods for differential equations and their numerical solution, 
Ph. D. Thesis, The University of British Columbia, 1995.

\bibitem{mar} R. M\"arz;
 \emph{Numerical methods for differential-algebraic equations., Part I: 
Characterizing DAEe}, Preprint No. 91-32/I, Humboldt Universit\"at zu Berlin, 1991.

\bibitem{men} Abdelaziz Mennouni;
 A regularization procedure for solving some singular integral equations 
of the second kind, \emph{Internat. J. Difference Equations}  8 (2013), 71-76.

\bibitem{miro} Kenneth S. Miller, Bertram Ross;
 \emph{An Introduction to the Fractional Calculus and Fractional Differential 
Equations}, John Wiley and Sons, Inc. New York,  1993.

\bibitem{neu} A. Neubauer;
 Tikhonov-Regularization of ill-Posed Linear Operator Equations on Closed 
Convex Sets, \emph{J. Approx. Theory}  53(1988), 304-320.

\bibitem{amw} Abdul-Majid Wazwaz;
 Solving Schl\"omilch's integral equation by the regularization-Adomian method, 
\emph{Rom. Journ. Phys.}, 60 (2015), 56-72.

\bibitem{new} L. W. Neustadt;
 On the solutions of certain integral like operator equations. 
Existence, uniqueness and dependence theorem, \emph{Arch. Rat. Mech. Anal.}, 
38 (1970), 131-160.

\bibitem{po} Igor Podlubny;
 \emph{Fractional Differential Equation}, Mathematics in Science and Engineering, 
 Vol. 118, Acad. Press,  1999.

\bibitem{poll}  Harry Pollard;
 The completely monotonic character of the Mittag-Leffler function 
$E_{\alpha}(x)$, \emph{Bull. Amer. Math. Soc.}
Vol. 54, (12), (1948), 1115-1116.

\bibitem{pop} E. Prempeh, I. Owusu-Mensay, K. Piesie-Frimbong;
 On the regularization of Hammerstein's type operator equations, 
\emph{Aust. J. Math. Anal. Appl.}, 11 (2014), 1-10.

\bibitem{sav} T. I. Savelova;
Optimal regularization of equations of the convolution type with random noise 
in the kernel and right-hand side, \emph{U.S.U.R. Comput. Math. Phys.} 
18(1978), 1-7.

\bibitem{sav1} T. I. Savelova;
 Regularization of non-linear integral equations of the convolution type, 
\emph{U.S.U.R. Comput. Math. Phys.} 19(1979), 20-27.

\bibitem{a4} Ishikawa Shiro;
 Fixed points by a new iteration method, \emph{Proc. Amer. Math. Soc.}, 
\textbf{44}(1) (1974), 147-150.

\bibitem{ww} Jin Wen, Ting Wei;
 Regularized solution to the Fredholm integral equation of the 
first kind with noisy data, \emph{J. Appl. Math. and Informatics} 29(2011),  23-37.

\bibitem{wiki} Wikipedia, 
http://en.wikipedia.org/wiki/Particular\_values\_of\_the\_Gamma\_function
\#Other\_ constants (May 26, 2015).

\end{thebibliography}

\end{document}
