\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2014 (2014), No. 21, pp. 1--14.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2013 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2014/21\hfil Identification of the density dependent coefficient]
{Identification of the density dependent coefficient in an
inverse reaction-diffusion problem from a single boundary data}

\author[R. Tinaztepe, S. Tatar , S. Ulusoy \hfil EJDE-2014/21\hfilneg]
{Ramazan Tinaztepe, Salih Tatar, S\"uleyman Ulusoy}  % in alphabetical order

\address{Ramazan Tinaztepe \newline
Department of Mathematics, Faculty of Education,
Zirve University, Sahinbey \newline Gaziantep 27270, Turkey}
\email{ramazan.tinaztepe@zirve.edu.tr}
\urladdr{http://person.zirve.edu.tr/tinaztepe/}

\address{Salih Tatar \newline
 Department of Mathematics, Faculty of Education,
Zirve University,  Sahinbey \newline Gaziantep 27270, Turkey}
\email{salih.tatar@zirve.edu.tr}
\urladdr{http://person.zirve.edu.tr/statar/}

\address{S\"uleyman Ulusoy \newline
 Department of Mathematics,  Faculty of Education,
Zirve University,  Sahinbey \newline Gaziantep  27270, Turkey}
\email{suleyman.ulusoy@zirve.edu.tr}
\urladdr{http://person.zirve.edu.tr/ulusoy/}

\thanks{Submitted November 8, 2013. Published January 10, 2014.}
\subjclass[2000]{45K05, 35R30, 65M32}
\keywords{Fractional derivative; fractional Laplacian; weak solution;
\hfill\break\indent inverse problem; Mittag-Leffler function; Cauchy problem}

\begin{abstract}
 This study is devoted to the numerical solution of an inverse
 coefficient problem for a density dependent nonlinear reaction-diffusion
 equation. The method is based on  approximating the unknown coefficient
 by polynomials. An optimal idea for solving the inverse problem
 is to minimize an error functional between the output data and the
 additional data. For this purpose, we find a polynomial of degree
 $n$ that minimizes the error functional; i.e, $n^{th}$ degree polynomial
 approximation of the unknown coefficient for the desired $n$.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction} \label{sec:intro}

Problems involving the determination of  unknown coefficients in ordinary 
and partial differential equations by some additional conditions are 
well known in the mathematical literature as the inverse coefficient problems. 
These additional conditions may be given on the whole domain, on the boundary 
of the domain, or at the final time. As it is known, a direct problem 
aims to find a solution that satisfies given ordinary or partial differential 
equation with initial and boundary conditions. In some problems 
the main ordinary or partial differential equation and the initial and 
boundary conditions are not sufficient to obtain the solution, but, instead 
some additional conditions are required. Such problems are called the 
inverse problems. A problem is said to be well-posed or properly posed in 
the sense of Hadamard \cite{J} if it satisfies the following three conditions:
 there exists a solution of the problem (existence), there is at most one 
solution of the problem (uniqueness), and the solution depends continuously 
on the data (stability). If at least one of these properties does not hold, 
then the equation is called ill-posed. In this context, another definition 
of the inverse problems can be given as follows: If one of two problems 
which are inverse to each other is ill-posed, we call it the inverse problem 
and the other one the direct problem. It is well-known that inverse problems 
are often ill-posed.

In this paper, we consider the one dimensional nonlinear inverse 
reaction-diffusion problem
\begin{equation}\label{inverseprob}
 \begin{gathered}
u_t=(a(u) u_x)_x+| u | ^p, \quad (x,t)\in\Omega_T, p \geq 2, \\
u(x,0)=0, \quad x \in \overline \Omega,\\\
-a(u(0,t))u_x(0,t)=g(t),  \quad   t \in [0,T],\\
u_x(1,t)=0,  \quad   t \in [0,T],\\
u(0,t)=f(t),  \quad  t \in [0,T],\\
\end{gathered} 
\end{equation}
where $\Omega$ is an open interval in $\mathbb{R}$, 
$\Omega_T:=\Omega\times (0,T)$ a domain in $\mathbb{R}\times \mathbb{R^{+}}$. 
The inverse problem here consists of determining the unknown coefficient 
$a(u)$ in the inverse problem \eqref{inverseprob}. The last condition,
 i.e., $u(0,t)=f(t)$ is taken to be an additional condition.  
In this context, for given inputs $a(u)$, $g(t)$ and $p$ the nonlinear 
problem \eqref{inverseprob}, without the additional condition, 
is defined as a direct (forward) problem. Henceforth, the expression 
{\it direct problem} will mean the studied problem without the additional 
condition. In the problem \eqref{inverseprob}, the compatibility 
condition $f(0)=0$ is satisfied.

The density dependent nonlinear reaction-diffusion equation 
$u_t=(a(u) u_x)_x+R(u)$ models many transport phenomena where 
$a(u)$ and $R(u)$ are called the diffusion coefficient and the reaction 
term respectively. The applications of this equations have had wide 
variety involving transport in porous medium, population dynamics, 
plasma physics and combustion theory. The  density dependent nonlinear 
reaction-diffusion equation becomes $u_t=(a(u) u_x)_x+| u | ^p$
for the reaction term $R(u)=| u | ^p$ and has been used to model
many different applications. For instance, it is used to model the flow of 
groundwater in a homogeneous, isotropic, rigid and unsaturated porous 
medium  \cite{B}. If we choose the coordinate $x$ to measure the vertical 
height from ground level and pointing upward, the soil is represented by 
the vertical column $(-L, 0)$ \cite{BA}, however as noted in \cite{BA}, 
they assumed that absorption and chemical osmotic and thermal effects are 
negligible and  there is no source inside the material. But in our work, 
we consider the effect of the nonlinear source term as well. 
This equation can be obtained easily by  combining  Darcy's  law  and  
the continuity  equation.  First initial condition and third boundary 
conditions in \eqref{inverseprob} represent  the  initial  moisture  
content  and  moisture  content  at  $x  =  1$ respectively.  The  flux  
of  moisture  and  moisture  content  at $x  =  0 $ are  identified  by 
the second and last conditions in problem \eqref{inverseprob} respectively.
 In this context, source term can be interpreted as material source.

Some numerical methods have been introduced for nonlinear diffusion equations. 
In  \cite{C1, D1} for example, the authors considered an inverse problem for 
the  nonlinear diffusion equation
\[
u_t=(a(u)u_x)_x, \quad (x,t)\in\Omega_T.
\]
The inverse problem is reformulated as an auxiliary inverse problem.
 Also it is proved that this auxiliary inverse problem has at least one 
solution in a specific admissible class. Finally, the auxiliary inverse 
problem is approximated by an associated identification problem. 
In addition to these, the authors presented a numerical method to solve 
the inverse problem for a special class of admissible coefficients. 
In the method, the partial differential equation is solved directly employing 
finite difference approach  and   the optimization part is solved using the 
program ZXMIN of the IMSL package. Furthermore, the intersecting graph 
technique is defined as a second numerical method \cite{C1,D1}. 
We note that there are some other numerical methods introduced for numerical 
solution of the nonlinear inverse diffusion problem. A numerical algorithm 
based on the finite difference method and the least-squares scheme was 
given in  \cite{N1}. According to this algorithm, Taylor's expansion  
is employed to linearize nonlinear terms and then finite difference method 
is applied to discretize the problem domain. Also this approach rearranges 
the matrix form of the governing differential  equations and estimate the 
unknown coefficient. In \cite{N2}, the given algorithm is based on  
linearizing nonlinear terms by Taylor's series expansion and  removing the 
time-dependent terms by Laplace transform. Finite difference technique is 
used to  discretize the problem.

In this paper, we develop a numerical algorithm to solve  the inverse 
coefficient problem \eqref{inverseprob}. The algorithm is based on 
the optimization of an error functional between the output data and 
the additional data. The algorithm attempts to minimize the error functional 
by using polynomials of a predetermined degree $n$. In doing so, it is
assumed that the error functional is differentiable with respect to the 
coefficients of the polynomial which enables us to use the gradient descent 
method. The numerical experiments show  that the algorithm is effective in 
practical use. A detailed analysis of the factors affecting the algorithm 
is also given.

The remainder of this paper comprises of  five sections: In the next section, 
some theoretical background is recalled for the inverse problem including the 
existence and uniqueness of the solution. Our numerical method is given in 
section 3. Some numerical examples are presented to show the efficiency of 
the method in section 4. In section 5, analysis of the results are given. 
The final section of the paper contains discussions and comments on planned 
studies.

\section{Existence and uniqueness for the inverse coefficient problem}

The theoretical aspect of an inverse problem, similar to \eqref{inverseprob} 
is studied in \cite{FSS1}. The inverse problem \eqref{inverseprob} differs 
from the inverse problem in \cite{FSS1}  in that it imposes the condition 
$-a(u(0,t))u_x(0,t)=g(t)$, (see \cite{C1}); whereas in \cite{FSS1}, 
the condition $-a(u(0,t))u_t(0,t)=g(t)$,  (see \cite{APS}) is used instead.  
In this paper, the authors have proved that the inverse problem  has a unique 
solution under certain conditions.  But the existence and uniqueness theorem 
also holds for the inverse problem \eqref{inverseprob}.  
In this section, we present the existence and uniqueness theorem for the 
self-containment of the paper, but first we give some preliminaries.

 We define the following norms and function spaces:
\begin{gather*}
| u |_D=\sup \{u(s), s \in D  \},\\
H_{\alpha}(u)=\sup \big\{\frac{u(p)-u(q)}{d(p,g)^{\alpha}} 
: p,q \in D, p \ne q \big\},\\
| u |_\alpha=| u |_D+H_{\alpha}(u),\quad 
| u |_{1+\alpha}=| u |_\alpha+\big| \frac{\partial u}{\partial x} \big|_\alpha,\\
| u |_{2+\alpha}=| u |_\alpha+\big| \frac{\partial u}{\partial x}  \big|_\alpha
+\big| \frac{\partial^2 u}{\partial x^2 } \big|_\alpha
+\big| \frac{\partial u}{\partial t}  \big|_\alpha,
\end{gather*}
 where $D=\Omega_T$, $d(p,q)$ is the usual Euclidean metric for the points 
$p$ and $q$ in $D$ and $\alpha>0$ is a constant. The space of all functions 
$u$ for which $| u |_{2+\alpha}<\alpha$ is denoted by $C_{2+\alpha}(D)$. 
In  \cite{FRD}, it is proved that the space $C_{2+\alpha}(D)$ is a Banach 
space with the corresponding norm.

 \begin{definition} \label{def1}\rm
A set $\mathbb{A}$ satisfying the following conditions is called the 
{\it class of admissible coefficients}  for the inverse coefficient 
problem \eqref{inverseprob}
\begin{itemize}
\item[(C1)] $a \in  C_{2+\alpha}(I)$ with $| a |_{2+\alpha} \leq c_1$, 
\item[(C2)] $\nu \leq a \leq \mu$ and $a'(s)>0$,  for $s \in I$,
\item[(C3)] $| a' | \leq \delta$ and $| a'' | \leq \delta$ for $s \in I$,
\end{itemize}
where $\alpha \in (0,1)$, I is a closed interval, $a:I \to \mathbb{R}$ 
and $c_1, \nu, \mu, \delta$ are positive constants.
\end{definition}

Inspired by \cite{C1,D1},  the authors in \cite{FSS1} use 
the transformation $v(x,t)=T_a(u(x,t))=\int_0^{u(x,t)}a(s)ds$ to transform 
the inverse problem \eqref{inverseprob} into the problem
\begin{equation} \label{newinverseprob}
\begin{gathered}
v_t=a(T_a^{-1}(v))v_{xx}+a(T_a^{-1}(v))| (T_a^{-1}(v)) |^p,\quad
(x,t)\in\Omega_T,~p\geq 2, \\
v(x,0)=0,\quad x \in \overline \Omega,\\
-v_x(0,t)=g(t),\quad t \in [0,T],\\
v_x(1,t)=0,\quad t \in [0,T],\\
v(0,t)=F(t),\quad t \in [0,T],\\
\end{gathered}
\end{equation}
where  $F(t)= \int_0^{f(t)}a(s)ds$. In problem \eqref{newinverseprob},
the compatibility condition $F(0)=0$ also holds. We note that
$\frac {d}{du}T_a(u)\geq\nu>0$ implies that $T_a(u(x,t))$ is invertible.
So the term $a(T_a^{-1}(v))$ in \eqref{newinverseprob} makes sense.
It is clear that the unknown coefficient is not in divergence form
in \eqref{newinverseprob}, that is why the inverse problem  \eqref{inverseprob}
 is needed to be transformed into a new one.  Moreover, determination of
the unknown coefficient $a(u)$ in the problem \eqref{newinverseprob}
is equivalent to determination of the unknown coefficient $A(v):=a(T_a^{-1}(v))$
in the problem \eqref{newinverseprob}. Therefore the authors study
the inverse problem \eqref{newinverseprob} instead of \eqref{inverseprob}.
 Before we state the existence and uniqueness theorem, we need the following
lemmas for the functions that belong the class of admissible coefficients
$\mathbb{A}$.

 \begin{lemma}[\cite{C1}] \label{lem1}  
For each $a \in \mathbb{A}$, there exists a unique function $p_a(u,v)$ 
defined on $I \times I$ such that $p_a(u,v)$ is a number between $u$ and $v$. 
Moreover, the following equality holds
\[
a(u)-a(v)=a'(p_a(u,v))(u-v).
\]
\end{lemma}

It is important to emphasize that the above lemma can be applied to 
$T_a^{-1}(v)$. Because the following equalities imply that the inverse 
function $T_a^{-1}(v)$ also belongs to the set $\mathbb{A}$,
\begin{gather*}
\frac{\partial}{\partial v}  (T_a^{-1}(v))=\frac{1}{T_a'(v)}=\frac{1}{a(v)},\\
\frac{\partial^2}{\partial v^2} (T_a^{-1}(v))
= \frac{\partial}{\partial v}  \frac{1}{a(v)} =-\frac{a'(v)}{a(v)^2}.
\end{gather*}

Hence we have the following lemma.

 \begin{lemma}[\cite{C1}] \label{lem2}  
There exists a unique function $q_a(\cdot,\cdot)$ such that $q_a(u,v)$ 
is a number between $u$ and $v$. Moreover, the following equality holds,
\[
T_a^{-1}(u)-T_a^{-1}(v)= (T_a^{-1})'(q_a(u,v))(u-v).
\]
\end{lemma}

 \begin{lemma}[\cite{APS}] \label{lem3}  
 Suppose that $\{w_n\}$ is a bounded and monotone increasing sequence of 
functions in $C_{2+\alpha}(\Omega_T)$. Then, there exists a function
 $w \in C_{2+\alpha}(\Omega_T) $ such that 
$D^{\beta}D_t^j w_n \to D^{\beta}D_t^j w, |\beta| \leq 2$, $0 \leq j \leq 1$ 
uniformly (on compact subsets of D), where 
$D^{\beta} u(x,t)=\frac{\partial^  {| \beta |} }{\partial x_1^{m_1}
\dots\partial x_n^{m_n}}u(x,t), \beta=(m_1 ,m_2,\dots, m_n), | \beta |
=m_1+m_2+\dots+m_n $ and $D_t^{m} u(x,t)=\frac{\partial ^m}{\partial t^m}u(x,t)$.
\end{lemma}

Using the above lemmas, following \cite{FSS1} closely, it can be proved 
that the inverse coefficient problem \eqref{newinverseprob} has 
a unique solution under certain conditions. This is stated in the following 
theorem. For the sake of completeness we provide a sketch of the proof.

\begin{theorem} \label{thm1}  
Assume that $\frac{dF}{dt}$ and $g(t)$ are positive continuous functions 
on $[0,T]$ and $C^1([0,T])$ respectively. Then the inverse problem 
\eqref{newinverseprob} (equivalently, \eqref{inverseprob} ) 
has a unique solution.
\end{theorem}

\begin{proof}[Sketch of the proof]
{\it Step 1 (Existence).} Let ${\hat{v}_0}=0$ and ${\hat{v}_n}$,
 $n=1,2,\dots , $ be solution of the  problem
\begin{gather*}
 (\hat {v}_n)_t=a(T_a^{-1}((\hat {v}_{n-1})))v_{xx}
+a(T_a^{-1}((\hat {v}_{n-1})))| (T_a^{-1}((\hat {v}_{n-1}))) |^p,
\; (x,t)\in\Omega_T,\, p\geq 2, \\
\hat {v}_n(x,0)=0,\quad x \in \overline \Omega,\\
-(\hat {v}_n)_x(0,t)=g(t),\quad t \in [0,T],\\
(\hat {v}_n)_x(1,t)=0,\quad t \in [0,T],\\
\hat {v}_n(0,t)=F(t),\quad t \in [0,T].
\end{gather*}
First it is not difficult to show that the sequence $\{\hat {v}_n\}$ 
is monotone increasing. Also, by applying Lemma \ref{lem3} for $\beta=1$ 
and $0 \leq j \leq 1$, we deduce
\[
D^{\beta}D_t^j {\hat{v}_{n}} \to D^{\beta}D_t^j{\hat{v}}.
\]
Since ${\hat{v}_{n}} $ is the a solution of the problem in Step 1, we have
\begin{equation}
(\hat {v}_n)_t=a(T_a^{-1}((\hat {v}_{n-1})))v_{xx}+a(T_a^{-1}
((\hat {v}_{n-1})))| (T_a^{-1}((\hat {v}_{n-1}))) |^p.
\end{equation} \label{e2.2}
Letting $n \to \infty$, we deduce that $\hat {v}$ is a solution
of \eqref{newinverseprob}.

 {\it Step 2  (Uniqueness).} Suppose $v(x,t)$ and $u(x,t)$ are two solutions 
of \eqref{newinverseprob}. Let $z(x,t)=v(x,t)-u(x,t)$. Then $z(x,t)$ must 
satisfy the  problem
\begin{equation} \label{e2.3}
\begin{gathered}
z_t=a(T_a^{-1}(v))z_{xx}+C_*(x,t)z,\\
z(x,0)=0,\quad x \in \overline \Omega,\\
z_x(0,t)=z_x(1,t)=0,\quad t \in [0,T],\\
z(0,t)=0,\quad t \in [0,T],
\end{gathered}
\end{equation}
where
\begin{gather*}
C_*(x,t)=C(x,t)+  \frac{h'(T_a^{-1}(\bar u))}{a(q_a(v(x,t),u(x,t)))},  \\
C(x,t)= \frac{a'\Big(p_a\Big(T_a^{-1}(v(x,t)),T_a^{-1}(u(x,t))\Big)\Big)}
{a\Big(q_a\Big(v(x,t),u(x,t)\Big)\Big)}.
\end{gather*}
By using the maximum principle we conclude that $z(x,t) \equiv 0$.
Therefore the solution of problem \eqref{newinverseprob} must be unique.
\end{proof}

\section{Overview of the method} \label{sec:over}

In this section, we present our numerical method. The essence of the method 
is to approximate the unknown diffusion coefficient $a(u)$ by polynomials.
 Since the unknown diffusion coefficient $a(u)$ is continuous on a compact 
domain $\Omega_{T}$ in the problem (3), there exists a sequence of polynomials 
converging to $a(u)$.
However, finding such a sequence which guarantees the solution of
the inverse problem is difficult. It is known that the direct problem has 
a unique solution if $a(u)$ satisfies certain conditions \cite{FRD}. 

Our starting point is that the correct $a(u)$ will yield the solution
satisfying the condition $u(0,t)=f(t)$, hence $a(u)$ will minimize
the functional
\[
F(c)=\| u(c,0,t)-f(t)\| _2^2,
\]
where $u(c,x,t)$ is the solution of the direct problem
with the diffusion coefficient $c(u)$ and $\| \cdot\| _2$
is the $L^2$ norm on $\Omega$. Hence, our strategy is to find a polynomial
of degree $n$ that minimizes $F(c)$, i.e, $n^{th}$ degree polynomial
approximation of $a(u)$ for the desired $n$. From now on we take
$c(u)=c_{0}+c_{1}u+\dots+c_{n}u^{n}$ as $c=(c_{0},\dots,c_{n}$)
hence $F(c)$ is a function of $n$ variables. To overcome the ill-posedness
of the inverse problem, Tikhonov regularization is applied. 
A regularization term with a regularization parameter $\lambda$ 
is added to $F(c)$
\[
G(c)=\| u(c,0,t)-f(t)\| _2^2+\lambda\| c\| ^2,
\]
 where $\| c\| $ denotes the Euclidean
norm of $c$. From now on, we fix $n$ and $\lambda$ and we leave
the discussions about the regularization parameter to the next section. 

The method for minimizing $G(c)$ depends on the properties of $F(c)$,
e.g., convexity, differentiability  etc. In our case, the convexity
or differentiability of $F(c)$ is not clear due to the term $u(c,x,t)$.
However, we do not envision a major drawback in assuming the differentiability
of $F(c)$ in numerical implementations. For this reason, we proceed
the minimization of $G(c)$ by the steepest descent method which will
utilize the gradient of $F$. 

In this method, the algorithm starts with an initial point $b_{0}$,
then the point providing the minimum is approximated by the points
\[
b_{i+1}=b_{i}+\Delta b_{i},
\]
 where $\Delta b_{i}$ is the feasible direction which minimizes
\[
E(\Delta b)=G(b_{i}+\Delta b).
\]
This procedure is repeated until a stop criterion is satisfied; i.e,
$\| \Delta b_{i}\| <\epsilon$ or $|G(b_{i+1})-G(b_{i})|<\epsilon$
or a certain number of iterations. In the minimization of $E(\Delta b)$,
we use the following estimate on $u(b_{i}+\Delta b,0,t)$;
\[
u(b_{i}+\Delta b,0,t)\simeq u(b_{i},0,t)+\nabla u(b_{i},0,t)\cdot\Delta b,
\]
 where $\nabla$denotes the gradient of $u(b,0,t)$ with respect to
$b$. Hence $E(\Delta b)$ turns out to be
\[
E(\Delta b)=\| \nabla u(b_{i},0,t)\cdot\Delta b+u(b_{i},0,t)-f(t)\| _2^2
+\lambda\| \Delta b\| _2^2.
\]

In numerical calculations, we note that $\| \cdot\| _2$
can be discretized by using a finite number of points in $[0,T]$,
i.e., for $t_{1}=0<t_2<\dots<t_{q}=T$, hence $E(\Delta b)$
has its new form as
\begin{equation}
E(\Delta b)\simeq\sum_{k=1}^{q}(u(b_{i},0,t_{k})
+\nabla u(b_{i},0,t_{k})\cdot\Delta b-f(t_{k}))^2
+\lambda\| \Delta b\| _2^2. \label{discretization}
\end{equation}

Now the minimization of this problem is a least squares problem whose
solution leads to the following normal equation (see \cite{Kirsch})
\[
(\lambda I+A^{T}A)\Delta b=A^{T}K,
\]
 where
\begin{gather*}
A=[\nabla u(b_{i},0,t_{1})^{T}\dots\nabla u(b_{i},0,t_{q})^{T}], \\
K=\left[u(b_{i},0,t_{1})-f(t_{1})\dots u(b_{i},0,t_{q})-f(t_{q})\right]^{T}.
\end{gather*}
 Now the optimal direction is found by
\begin{equation}
\Delta b=(\lambda I+A^{T}A)^{-1}A^{T}K.\label{normal eq}
\end{equation}
 In forming $A$, the computation (or estimation ) of $s^{th}$ component
of the vector $\nabla u(b_{i},0,t_{k})$ can be achieved by
\begin{equation}
\frac{u(b_{i}+he_{s},0,t_{k})-u(b_{i}+he_{s},0,t_{k})}{h}\label{diffstep},
\end{equation}
 where $e_{s}$ is the standard unit vector whose $s^{th}$ component
is 1 and $h$ is the differential step. Now we give the algorithm.
\begin{description}
\item[Step 1]  Set $b_{0}$, $n$, $\lambda$ and a stopping criterion
$k$ or $\epsilon$ (iteration number less than $k$ or size of 
$\|\Delta b_i\|\leq \epsilon$).

\item[Step 2]  Calculate $\Delta b_{i}$ using \eqref{normal eq}
and set $b_{i+1}=b_{i}+\Delta b_{i}$.

\item[Step 3]  Stop when the criterion is achieved.
\end{description}


\section{Numerical examples with noisy and noise-free data}
\label{sec:numer}

In this section we examine the algorithm with three inverse problems. 
The computations have been carried out in MATLAB. In solving the direct 
problem for each value of $c$, MATLAB PDE solver is used.
All examples are considered in the following form where $a(u)$ is
to be found
\begin{equation} \label{e4.1}
 \begin{gathered}
u_t=(a(u)u_x)_x+|u|^{p}+H(x,t),\quad (x,t)\in(0,1)\times(0,1),\\
u(x,0)=0,\quad x\in[0,1],\\
-a(u(0,t))u_x(0,t)=g(t),\quad t\in[0,1],\\
u_x(1,t)=0,\quad t\in[0,1],\\
u(0,t)=f(t),\quad t\in[0,1].
\end{gathered}
\end{equation}

Note that in the problem statement above, the term $H(x,t)$ has been
added to the main equation. This  is due to the difficulty in  finding 
an analytical solution of $u_t=(a(u)u_x)_x+|u|^{p}$.
In particular, it is done to check  the algorithm for $a(u)=e^{u}$ which 
brings a nonzero $H(x,t)$. The algorithm has also been tested with the
 analytic solution in the second example where $a(u)=e^{u}$. However,  t
he numerical solution of the direct problem is used  in all examples.

Due to the discretization of the problem, many variables appear in computations.
These factors and their values in our computations are listed below:
\begin{enumerate}
\item The degree of the polynomial $c(u)$ to approximate $a(u)$: $n=2,3,4,5$ 
and 6 are taken in the examples.

\item Initial guess for the coefficients of $c(u)$:  All initial guesses
for the coefficients are taken to be vectors composed of $1$'s to get an 
objective observation.

\item Differential step $h$ in \eqref{diffstep}: $h=0.1$, $h=0.01$
are taken in the examples.

\item Number of $t$ points; $q$ in \eqref{discretization}: $q=10$ and $q=100$
are taken in the examples.

\item Number of $(x, t)$ points in mesh grid used in Matlab PDE solver:  
taken to be $q \times q$ where $q$ is already determined in (4).

\item Stopping criterion: $\| \Delta b_{i}\| <\epsilon=0.01$
or maximum iteration number $M=100$.

\item Regularization parameter ($\lambda$ is in \eqref{discretization}):  
$\lambda$ is taken to be zero in the noise-free examples, but an optimal 
$\lambda $ is searched to deal with noisy data.
\end{enumerate}

In our examples, the correct $a(u)$ is already known and  $u(0,t)$ is extracted 
from the numerical  solution of the direct problem for the correct $a(u)$. 
In the second example $u(x,t)$ is given analytically, the results for the 
analytical solution is also provided. The expected solution for $a(u)$ is 
of $n$th degree Taylor polynomial for given $n$.

\begin{example} \label{examp1} \rm
$p=2.5$, $g(t)=\sin t$,  $H(x,t)=0$, and $u(0,t)=f(t)$ is found numerically.  
The correct solution is  $a(u)=1+2u+3u^2+u^{3}$. See Table \ref{table1}.
\end{example}

\begin{table}[ht]
\caption{Initial guesses and results for $n=2,3$,4,$5$ and 6}
\label{table1}
\begin{center}
\begin{tabular}{|c|c|}
\hline
Initial guess  & $h=0.1,q=10$  \\
\hline
(1,1)  & (0.8329, 3.7928 )   \\
\hline
(1,1,1)  & (1.0094, 1.8164, 3.8355)   \\
\hline
(1,1,1,1)  & (1.0000, 2.0000, 3.0001, 0.9999)   \\
\hline
(1,1,1,1,1)  & (1.0000, 2.0000, 2.9999, 1.0004, -0.0005 )   \\
\hline
(1,1,1,1,1,1)  & (1.0000, 2.0000, 3.0000, 1.0000, -0.0001, 0.0001)  \\
\hline
%
   & $h=0.01, q=10$ \\
\hline
(1,1)    & (0.8315, 3.7928) \\
\hline
(1,1,1)    & (1.0093 1.8179 3.8332) \\
\hline
(1,1,1,1)  & (1.0000, 2.0000, 3.0000, 1.0000) \\
\hline
(1,1,1,1,1)   & (1.0000, 2.0000, 3.0000, 1.0000, -0.0000) \\
\hline
(1,1,1,1,1,1)    & (1.0000, 2.0000, 3.0000, 1.0000, -0.0000, 0.0000) \\
\hline
%

 & $h=0.1,q=100$  \\
\hline
(1,1)  & (0.8486, 3.6889)  \\
\hline
(1,1,1)  & (1.0077, 1.8393, 3.7754)  \\
\hline
(1,1,1,1)  & (1.0000,    2.0000,    3.0001,    0.9999)  \\
\hline
(1,1,1,1,1)  & (1.0000, 2.0000, 3.0000, 1.0002, -0.0002) \\
\hline
(1,1,1,1,1,1)  & (1.0000, 2.0000, 3.0000, 1.0001, -0.0001, 0.0000)  \\
\hline
%
   & $h=0.01,q=100$\\
\hline
(1,1)   & (0.8486, 3.6894)\\
\hline
(1,1,1)    & (1.0087,    1.8293,    3.7937)\\
\hline
(1,1,1,1)   & (1.0000, 2.0000, 3.0000, 1.0000)\\
\hline
(1,1,1,1,1)    & (1.0000, 2.0000, 3.0000, 1.0000, 0.0000)\\
\hline
(1,1,1,1,1,1)    & (1.0000, 2.0000, 3.0000, 1.0002, -0.0003, 0.0002)\\
\hline
\end{tabular}
\end{center}
\end{table}

\begin{example} \label{examp2} \rm
 $p=2$, $g(t)=t^2$, $H(x,t)$ is found accordingly and 
$u(x,t)=t^2(\frac{x^2}{2}-x)$, hence $u(0,t)=f(t)=0$.
The correct solution is  $a(u)=e^{u}$. The expected coefficients are 
the Taylor coefficients of $e^{u}$ which is 
$(1,1,0.5,\frac{1}{6},\frac{1}{24},\frac{1}{120} )$.  
Both results obtained from the analytical solution $u(0,t)=0$  
(See Table \ref{table2}) and the numerical solution for $u(0,t)$ 
(See Table \ref{table3}) are given. 
\end{example}

\begin{table}[ht]
\caption{Initial guesses and results for $n=2,3,4,5$}
\label{table2}
\begin{center}
\begin{tabular}{|c|c|}
\hline
Initial guess  & $h=0.1,q=10$   \\
\hline
(1,1)  & (0.9777, 0.7918)   \\
\hline
(1,1,1)  & (0.9791, 0.8209, 0.0966)   \\
\hline
(1,1,1,1)  & (0.9746, 0.6361, -1.4027, -3.0406)  \\
\hline
(1,1,1,1,1)  & (0.9706 0.3645 -5.4992 -23.2347 -30.5746)   \\
\hline
    & $h=0.01,q=10$ \\
\hline
(1,1)    & (0.9778 0.7919) \\
\hline
(1,1,1)  & (0.9792, 0.8216, 0.0988) \\
\hline
(1,1,1,1)  & (0.9997, 0.9940, 0.4622, 0.0832) \\
\hline
(1,1,1,1,1)    & (0.9702 0.3443 -5.7337 -24.0679 -31.3722) \\
\hline

 & $h=0.1,q=100$ \\
\hline
(1,1)  & (0.9944, 0.8693) )\\
\hline
(1,1,1)  & (0.9996,    0.9894,    0.4232)  \\
\hline
(1,1,1,1)  &  (0.9997,    0.9940,    0.4622,    0.0832) \\
\hline
(1,1,1,1,1)  & (0.9996,    0.9904,    0.4082,   -0.1814,   -0.3939 )\\
\hline

 & $h=0.01,q=100$\\
\hline
(1,1)   & (0.9943, 0.8693)\\
\hline
(1,1,1)   & (0.9996, 0.9894, 0.4231) \\
\hline
(1,1,1,1)   & (0.9997, 0.9934, 0.4572, 0.0729)\\
\hline
(1,1,1,1,1)   & (0.9996, 0.9896, 0.3971, -0.2315, -0.4616)\\
\hline
\end{tabular}
\end{center}
\end{table}

\begin{table}[ht]
\caption{Initial guesses and results for $n=2,3,4,5$ and $6$.}
\label{table3}
\begin{center}
\begin{tabular}{|c|c|}
\hline
Initial guess & $h=0.1,q=10$ \\
\hline
(1,1) & (0.9937, 0.8628) \\
\hline
(1,1,1)  & (0.9998, 0.9911, 0.4253) \\
\hline
(1,1,1,1)  & (1, 0.997, 0.4948, 0.1408)  \\
\hline
(1,1,1,1,1)  & (1.0000, 1.0000, 0.5001, 0.1669, 0.0384) \\
\hline
(1,1,1,1,1,1)  & (1,1.0004,0.5128,0.3247,0.7995,1.1887 ) \\
\hline
   & $h=0.01, q=10$\\
\hline
(1,1)  & (0.9937, 0.8629)\\
\hline
(1,1,1)   & ( .9998, 0.9912, 0.4254)\\
\hline
(1,1,1,1)    & (1, 0.997, 0.4948, 0.1409)\\
\hline
(1,1,1,1,1)    & (1.0000, 1.0000, 0.4998, 0.1648, 0.0351)\\
\hline
(1,1,1,1,1,1)   & (1,1,0.5,0.1666,0.0412,0.007)\\
\hline

 & $h=0.1, q=100$ \\
\hline
(1,1) & (0.9945, 0.8702) \\
\hline
(1,1,1)  & (0 .9998, 0.9919, 0.4286) \\
\hline
(1,1,1,1)  & (1, 0.9997, 0.4953, 0.1417) \\
\hline
(1,1,1,1,1)  & (1.0000, 1.0003, 0.5045, 0.1856, 0.0635) \\
\hline
(1,1,1,1,1,1)  & (1,1.0031,0.5721,0.7732,2.0824,2.3360) \\
\hline
  & $h=0.01,\mbox{ }q=100$\\
\hline
(1,1)  & (0.9945, 0.8702)\\
\hline
(1,1,1)   & (0 .9998, 0.9918, 0.4288)\\
\hline
(1,1,1,1)   & (1, 0.9996, 0.4947, 0.1407)\\
\hline
(1,1,1,1,1)   & (1.0000, 1.0013, 0.5207, 0.2704, 0.1955)\\
\hline
(1,1,1,1,1,1)   & (1,1.0022,0.5689,0.8506,2.5924,3.1306)\\
\hline
\end{tabular}
\end{center}
\end{table}

\begin{example} \label{examp3}\rm
 $p=3$, $g(t)=t^2$, $H(x,t)=0$, $u(0,t)=f(t)$ is found numerically. 
The correct solution is  $a(u)=2+\sin(u)$.
The expected coefficients are  the Taylor coefficients of $2+\sin(u)$ 
which is $(2,1,0,-\frac{1}{6},0,\frac{1}{120})$. (See Table \ref{table4}).
\end{example}

\begin{table}[ht]
\caption{Initial guesses and results for $n=2,3,4,5$ and $6$.}
\label{table4}
\begin{center}
\begin{tabular}{|c|c|}
\hline
Initial guesses  &  $h=0.1,q=10$   \\
\hline
(1,1)  & (2.0039, 0.9665)   \\
\hline
(1,1,1)  & (1.9991, 1.0210, -0.1131)   \\
\hline
(1,1,1,1)  & (2.0000, 1.0002, -0.0017, -0.1621)  \\
\hline
(1,1,1,1,1)  & (2.0000, 1.0000, 0.0005, -0.1701, 0.0090)  \\
\hline
(1,1,1,1,1,1)  & (2.0000, 1.0000, -0.0000, -0.1664, -0.0007, 0.0089 )   \\
\hline

\hline
  & $h=0.01,q=10$ \\
\hline
(1,1)    & (2.0040, 0.9665) \\
\hline
(1,1,1)    & (1.9991, 1.0211, -0.1132) \\
\hline
(1,1,1,1)  & (2.0000, 1.0002, -0.0017, -0.1621) \\
\hline
(1,1,1,1,1)    & (2.0000, 1.0000, 0.0005, -0.1701, 0.0089) \\
\hline
(1,1,1,1,1,1)    & (2.0000, 1.0000, 0.0000, -0.1667, 0.0001, 0.0081) \\
\hline

 & $h=0.1,q=100$  \\
\hline
(1,1)  & (2.0032, 0.9710) \\
\hline
(1,1,1)  & (1.9992 ,1.0191, -0.1077)  \\
\hline
(1,1,1,1)  & (2.0000, 1.0015, -0.0096, -0.1510)  \\
\hline
(1,1,1,1,1)  & (2.0001, 0.9970, 0.1128, -0.9067 ,1.1098)  \\
\hline
(1,1,1,1,1,1)  & (2.0000, 1.0012, -0.0169, -0.0727, -0.2284, 0.2087)  \\
\hline

   & $h=0.01,q=100$\\
\hline
(1,1)    & (2.0033, 0.9700)\\
\hline
(1,1,1)    & (1.9992 ,1.0191, -0.1081 )\\
\hline
(1,1,1,1)   & (2.0000, 1.0007, -0.0047, -0.1579)\\
\hline
(1,1,1,1,1)    & (2.0001, 0.9969, 0.1138, -0.9096, 1.1129)\\
\hline
(1,1,1,1,1,1)    & (2.0000 ,1.0007 ,-0.0094 ,-0.1157 ,-0.1197, 0.1096)\\
\hline
\end{tabular}
\end{center}
\end{table}

In the examples above $u(0,t)=f(t)$  for the correct $a(u)$ is obtained 
analytically or numerically without any noise on $u(0,t)$ except the error 
resulting from the computation of $u(0,t)$ with PDE Solver. However, 
in the applications the additional data  $u(0,t)$ is generally given 
with a noise; i.e., $u(0,t)+\gamma u(0,t)$ where $\gamma$ is called 
noise level and is generally less than $0.1$. The examples above are 
now tested with $u(0,t)$ plus some noise. The algorithm is run for the 
best choices of $h$, $q$ and the initial guesses in the previous calculations;
i.e.,  $h=0.1,q=100$ for all examples. The noise levels  will be taken as 
$\gamma=+0.03,-0.05$. Table \ref{table5} shows the results. 
In these tables we also  give the relative error for each example and
 perturbation which is defined  as  
$$
\frac{\|u-u_{a}\|_\infty}{\|u\|_\infty} 
$$ 
where $\|\cdot\|_\infty$ denotes maximum norm, $u$ and $u_{a}$ are 
the solutions corresponding to the correct $a(u)$ and observed 
$a(u)$ (i.e coefficients obtained in Tables \ref{table5} and \ref{table6})  
 respectively. 
Defining the relative error provides a gauge to compare the results for 
the noisy data for different regularization parameters.
The results are given  for each example in Table \ref{table5}. 

\begin{table}\label{gamma}
\caption{The results for given $\gamma$ values}
\label{table5}
\begin{center}
\begin{tabular}{|c|c|}
\hline
Example \ref{examp1}  & $\gamma=+0.03$   \\
\hline
(1,1,1,1)  & (0.9439, 1.7790, 2.8595, -0.8672)    \\
\hline
Relative error & 0.0297 
 \\
\hline
 Example \ref{examp1} & $\gamma=-0.05$ \\
\hline
(1,1,1,1)  & (1.0895,    2.9046,   0.2247,   12.1565 )  \\
\hline
Relative error  & 0.0499
 \\
\hline

Example \ref{examp2} &   $\gamma=+0.03$  \\
\hline
(1,1,1,1,1,1)  & (1.0011,1.0441,1.2929,6.3858,21.0167,24.7614) 
\\
\hline
Relative Error & 0.0024   \\
\hline

Example \ref{examp2} &  $\gamma=-0.05$ \\
\hline
(1,1,1,1,1,1)  & (0.9981,0.9300,-0.7489,-9.5259,-32.1563,-37.5388)
\\
\hline
Relative Error   & 0.0034  \\
\hline

Example \ref{examp3}  &   $\gamma=+0.03$ \\
\hline
(1,1,1,1,1)  & (1.8833,0.3501,2.8646,-11.5998,22.3717, -16.4603)  \\
\hline
Relative error & 0.0299   \\
\hline

Example \ref{examp3}  & $\gamma=-0.05$  \\
\hline
(1,1,1,1,1)    & (2.2204, 2.6791, -8.6243, 40.0126, -90.2433, 76.5890)  \\
\hline
Relative error & 0.05  \\
\hline
\end{tabular}
\end{center}
\end{table}

Note that the coefficients in Table \ref{table5}  are far from the 
coefficients of the correct $a(u)$. This is due to the ill-posedness 
of the problem. A small perturbation in $u(0,t)$ causes the algorithm 
to deviate much from the correct $a(u)$. In order to overcome this 
problem, regularization parameter is used. The regularization parameter 
is now added to the algorithm, i.e., $\lambda$ is nonzero. In our experiments 
below, the best $\lambda$ value is sought. Since the problem is highly nonlinear,
 we seek the best regularization parameter empirically. We present the 
optimal regularization parameters and the corresponding relative errors 
in Table \ref{table6}.

\begin{table}
\caption{Regularization parameters and relative errors for 
different noise levels}
\label{table6}
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
Example \ref{examp1}  & $\gamma=+0.03$   \\
\hline
(1,1,1,1)  &(0.9418, 1.9809, 1.4719, 1.2065)  \\
\hline
$\lambda$ & 0.0004 
 \\
\hline
Relative error &0.0283  
\\
\hline
Example \ref{examp1}    & $\gamma=-0.05$ \\
\hline
(1,1,1,1)    &(1.4532, 1.1701, 1.0707, 1.0319)\\
\hline
$\lambda$  & 0.58
 \\
\hline
Relative error   &0.0287
\\
\hline

Example \ref{examp2} & $\gamma=+0.03$  \\
\hline
(1,1,1,1,1,1)  & (1.0027, 1.0661, 0.9099, 1.0385, 0.9855, 1.0052)  \\
\hline
$\lambda$ & 0.000007  \\
\hline
Relative Error & 0.000286  \\
\hline
Example \ref{examp2} & $\gamma=-0.05$ \\
\hline
(1,1,1,1,1,1)   & (1.0011, 1.0539, 0.8900, 1.0448, 0.9826, 1.0060)\\
\hline
$\lambda$  & 0.0000071  \\
\hline
Relative Error  & 0.00031  \\
\hline

Example \ref{examp3}  &  $\gamma=+0.03$   \\
\hline
(1,1,1,1,1,1)  & (1.7049, 0.9091, 0.9263, 0.9644, 0.9845, 0.9934) \\
\hline
$\lambda$ & 0.012  \\
\hline
Relative error & 0.023 \\
\hline
Example \ref{examp3}  & $\gamma=-0.05$  \\
\hline
(1,1,1,1,1,1)   & (1.7880, 1.1874, 1.0551, 1.0186, 1.0068, 1.0026)\\
\hline
$\lambda$  & 0.37 \\
\hline
Relative error  & 0.0106
\\
\hline
\end{tabular}
\end{center}
\end{table}

\section{Analysis of results}\label{sec:analy}

The experiments have clearly indicated that the initial guess for
$q$ and $n$ are the main factors affecting the accuracy of the solutions. 
The initial guesses have to be chosen close enough to the coefficients 
of the correct solution. However, it is hard to give a radius of the 
trust region around the expected coefficients. One way to overcome this
 problem is to start with $n=1$ with several initial guesses then choose 
the best one for it (call it $x_{0}$) then make it $n=2$, use the solution 
$(x_{0},1)$ as an initial guess and repeat it for the other dimensions. 
Although the initial guesses in the above experiments have not been 
determined with this procedure, that approach also has been observed to 
work well in all examples. It should also be  noted that  the initial 
guesses for different dimensions are intended to be same in all examples 
and they are not very far from the expected coefficients. However, using 
the same initial guesses aims  to get an insight about  the behavior of 
the inversion algorithm on different types of $a(u)$; 
i.e., a polynomial, an exponential function and a uniformly bounded function.

It is observed that $q$ has a significant impact on the solutions. 
However, the way how it affects the algorithm is not very clear. It appears 
that  in all examples $q=10$ works better 
(See Tables \ref{table1}, \ref{table3}, \ref{table4}).
 When the analytical solution is used in the inversion algorithm in the 
second example, $q=100$ turns out to be better than $q=10$ 
(See Table \ref{table2}). It seems that using an additional data 
$u(0,t)=f(t)$ that is numerically found by PDE Solver from the correct 
$a(u)$ is more preferable. Indeed, in applications an analytical
 $u(0,t)=f(t)$  is often not given. It should be noted that when the 
solutions $u(c,x,t)$ and $u(0,t)$ in $F(c)$ are obtained numerically by 
PDE solver, they bear an error caused by $q$. These errors seem to cancel 
each other or add to the error of the main algorithm because $F(c)$ measures 
the difference between $u(c,x,t)$ and $u(0,t)$. This might be the fact behind
the result $q=10$  works better than $q=100$ using the same initial guesses 
in all examples 
(See Tables \ref{table1}, \ref{table3}, \ref{table4}). 
Additionally these errors brings some perturbation to the additional data 
$u(0,t)$ (see example \ref{examp2}). In the second example when analytical solution is used, 
the analytical solution $u(0,t)$ is perceived as a perturbation of the numerical 
$u(0,t)$ and that  leads to the results in Table \ref{table3}. 
This is due to  the ill-posedness of the problem which is made clear in the 
examples with noisy data (Compare the fourth row for $q=10$ with the findings 
in Table \ref{table5}). Despite the ambiguous but significant  effect of $q$, 
for practical uses $q=10$ appears to be efficient for non-polynomial $a(u)$'s.       Using a higher dimension $n$ is more accurate in estimating $a(u)$ provided that the algorithm is started with a good initial guess. The ill-posedness 
of the inverse problems is observed when  noisy data is used in the examples. 
Table \ref{table5} shows clearly that a noise level even as small as 0.03\% 
causes a big shift in the coefficients of $a(u)$.

The effect of regularization parameter becomes apparent in noisy examples.  
Since the problem is highly nonlinear, we seek the best regularization parameter 
empirically. We present the best regularization parameter with their relative 
errors.  When Tables \ref{table5} and \ref{table6} are compared, we see a 
significant enhancement in  results in terms of relative errors. 
When the optimal  regularization parameter is used, the algorithm  ends at 
relatively better coefficients.

The changes in differential step $h$ is observed to have a negligible 
effect in finding feasible directions. In our experiments $h=0.1$ 
appears to be good enough for a satisfactory solution.

\subsection*{Concluding remarks}
The presented numerical method has been successfully applied 
to a nonlinear inverse reaction-diffusion problem. The demonstrated numerical 
examples show that the method allows to reconstruct the unknown coefficient 
with high accuracy, even for acceptable noise levels. 
The assumption on the smoothness of $F(c)$ appears to be true at least 
in the space of polynomials. The authors of this paper plan to consider 
simultaneously determination of the coefficient $a(u)$ and the number $p$ 
in the considered inverse problem. In this context, an existence and uniqueness 
theorem for the solution will be provided. Later we will study the same kind 
of inverse problems involving determination of the coefficient $a(u_x^2)$ 
and/or the number $p$ related to the nonlinear equation 
$u_t=\big (a(u_x^2) u_x \big)_x+| u | ^p$. Also we generalize the nonlinear 
source $| u | ^p$  to be $f(u)$ and then study some inverse problems for 
these two equations; i.e., $u_t=\big (a(u) u_x \big)_x+f(u)$ and  
$u_t=\big (a(u_x^2) u_x \big)_x+f(u)$.  These are subjects of the planned 
studies by the authors of this article.

\subsection*{Acknowledgments}
This research was partially supported by the Scientific
and Technological Research Council of Turkey (TUBITAK), also by
the  Zirve  University Research Fund.


\begin{thebibliography}{00}

\bibitem{B} J. Bear.
\newblock  Dynamics of Fluids in Porous Media.
\newblock 2nd edition, Elsevier, New York, (1930).


\bibitem{BA} M. Badii.
\newblock Existence and Uniqueness of solutions for a degenerate Quasilinear Parabolic Problem.
\newblock  Publicacions Matem$\grave{a}$tiques, 38, 327-352, (1994).


\bibitem{C1} J. R. Cannon, P. Duchateau.
\newblock An inverse problem for a nonlinear diffusion equation.
\newblock SIAM J. Appl. Math. 39(2), 272-289, (1980).


\bibitem{D1} P. Duchateau.
\newblock Monotonicity and uniqueness results in identifying an unknown coefficient in a nonlinear diffusion equation.
\newblock SIAM J. Appl. Math. 41(2), 310-323, (1981).


\bibitem{N1}  Shidfar, A., Pourgholi, R., Ebrahimi, M.
\newblock A numerical method for solving of a nonlinear inverse diffusion problem.
\newblock An Int. Journal Comput.$\&$Math. with Appl. 52, 1021-1030, (2006).

\bibitem{N2} Ahmedizadeh, Y., Soti, V., Pourghol, R..
\newblock Numerical solution of an inverse diffusion problem.
\newblock  Appl. Math. Sci. 1, 863-868, (2007).

\bibitem{FSS1} Akyildiz, F. T., Tatar, S. and Ulusoy, S.
\newblock Existence and uniqueness for a nonlinear inverse reaction-diffusion 
problem with a nonlinear source in higher dimensions.
\newblock   Math. Meth. Appl. Sci.. doi: 10.1002/mma.2765, (2013).

\bibitem{FRD} Friedman, A.
\newblock  Partial Differential Equations of Parabolic Type.
\newblock Prentice-Hall: Englewood Cliffs, NJ, (1964).

\bibitem{APS} Abtahi, M., Pourgholi, R., Shidfar, A.
\newblock   Existence and uniqueness of a solution for a two dimensional nonlinear inverse diffusion problem.
\newblock  Nonl. Analy.: Theory, Method $\&$ Appl.  74, 2462-2467, (2011).

\bibitem{J} Hadamard, J.
\newblock Sur les problemes aux derivees partielles et leur signification physique.
\newblock   Princeton University Bulletin, 49–52, (1902).


\bibitem{Kirsch} Kirsch, A.
\newblock An Introduction to Mathematical Theory of Inverse Problems
\newblock  Springer, NY, (1996)
\end{thebibliography}

\end{document}
