\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2015 (2015), No. 245, pp. 1--10.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2015 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2015/245\hfil Inverse coefficient problems]
{An inverse coefficient problem for a nonlinear reaction diffusion 
equation with a nonlinear source}

\author[S. Tatar, S. Ulusoy \hfil EJDE-2015/245\hfilneg]
{Sal\.ih Tatar, S\"uleyman Ulusoy}

\address{Sal\.ih Tatar \newline
Department of Mathematics,
Faculty of Education,
Zirve University, \newline
Sahinbey, Gaziantep 27260, Turkey}
\email{salih.tatar@zirve.edu.tr}
\urladdr{http://person.zirve.edu.tr/statar/}

\address{S\"uleyman Ulusoy \newline
Department of Mathematics,
Faculty of Education,
Zirve University, \newline
Sahinbey,  Gaziantep 27260, Turkey }
\email{suleyman.ulusoy@zirve.edu.tr}
\urladdr{http://person.zirve.edu.tr/ulusoy/}

\thanks{Submitted August 4, 2015. Published September 22, 2015.}
\subjclass[2010]{35R30, 65M32, 65N20}
\keywords{Inverse problem; class of admissible coefficients;
maximum principle; 
\hfill\break\indent steepest descent method; least squares approach}

\begin{abstract}
 In this article, we consider the problem of identifying an unknown coefficient
 in a  nonlinear diffusion equation. Under appropriate conditions, we prove
 the existence and the uniqueness of solution for the inverse problem.
 For the numerical solution of the inverse problem, a numerical method based
 on discretization of the minimization problem, steepest descent method and
 least squares approach is proposed.  A numerical example is given to
 illustrate applicability and high accuracy of the proposed method.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}
\allowdisplaybreaks

\section{Introduction}\label{sec:intro}

We consider the following  $n$-dimensional nonlinear inverse reaction-diffusion problem
\begin{equation} \label{1-1}
\begin{gathered}
u_t=\nabla\cdot (a(u)\nabla u)+ f(u) ,\quad (x,t)\in\Omega_T, \\
u(x,0)=0, \quad x \in \overline \Omega,\\
-a(u(x,t))\nabla u(x,t)= \vec{g}(x,t),\quad x \in B_{0}^1,\; t \in [0,T], \\
u_{x_i}(x,t)=0,\quad x \in B_{0}^i,\; t \in [0,T],\;  i= 2,\dots, n,\\
u_{x_i}(x,t)=0, \quad x \in B_{1}^i,\;  t \in [0,T],\;  i= 1,\dots, n,\\
u(x,t)= f_1(x,t),\quad x \in B_{0}^1,\; t \in [0,T],\\
\end{gathered}
\end{equation}
where $\Omega:= [0,1]^n$ and $\Omega_T :=\Omega\times (0,T)$ are two domains
in $\mathbb{R}^n$ and $\mathbb{R}^{n+1}$ respectively,
$x=(x_1,x_2,\dots,x_n) \in \Omega$, $T > 0$ is a final time,
$B_{0}^i = \{ (x_1,x_2,\dots, x_i = 0, x_{i+1}, \dots, x_n)\}$ and
$B_{1}^i = \{ (x_1,x_2,\dots, x_i = 1, x_{i+1}, \dots, x_n)\}$.
In this problem, we assume that the compatibility condition $f_1(0,0)=0$
is satisfied. The last Dirichlet condition in \eqref{1-1}  is used as an
additional condition.

The parabolic equation in  \eqref{1-1} has many applications.
For instance, it is  used to describe the spread of populations in space
\cite{Kot, Murray}. It is also used in modeling chemical and bio-chemical
reactions \cite{Hun,dev}. In general the nonlinear source term $f(u)$
 is a smooth function and it describes processes with really change the
present $u$, i.e. something happens to it (birth, death, chemical reactions,
etc.) not just diffuse in the space. Also in the context of heat
conduction and diffusion when $u$ represents temperature and concentration,
$f(u)$ is interpreted as a heat and material source respectively.

It is known that the direct problem, i.e the problem \eqref{1-1} without
 the additional condition, has a unique solution if $a(u)$ satisfies certain
conditions \cite{FRD}. The inverse problem here consists of determining
the unknown coefficient $a(u)$  in the problem \eqref{1-1}.
Nonlinear  inverse  problems  similar to  \eqref{1-1}  have  been  previously
treated  by  many authors  \cite{ APS, ATU,N2, C1, D1, N1, TTU}.
In  this  article, we consider the existence and uniqueness of the solution
of a higher dimensional inverse reaction-diffusion problem with a general
nonlinear source. We prove that the inverse problem has a unique solution
in the class of admissible coefficients.

Now we provide some preliminary material. First we define the following
function spaces:
\begin{gather*}
| u |_D=\sup \big \{u(s), s \in D \big \},\\
H_{\alpha}(u)=\sup \Big \{\frac{u(p)-u(q)}{d(p,g)^{\alpha}}
: p,q \in D, p \ne q \Big \},\\
| u |_\alpha=| u |_D+H_{\alpha}(u), \\
| u |_{1+\alpha}=| u |_\alpha+\sum_{i=1}^{n}
\big | \frac{\partial u}{\partial x_i} \big |_\alpha, \\
| u |_{2+\alpha}=| u |_\alpha+\sum_{i=1}^{n}
\big | \frac{\partial u}{\partial x_i}  \big  |_\alpha
+\sum_{i,j=1}^{n} \big  | \frac{\partial^2 u}{\partial x_i  \partial x_j }
\big  |_\alpha+\big  | \frac{\partial u}{\partial t}  \big |_\alpha,
\end{gather*}
 where $D=\Omega_T$, $d(p,q)$ is usual Euclidean metric for the points $p$ and
 $q$ in $D$ and $\alpha>0$ is a constant. The space of all functions $u$
for which $| u |_{2+\alpha}<\alpha$ is denoted by $C_{2+\alpha}(D)$.
In \cite{FRD}, it is proved that the space $C_{2+\alpha}(D)$ is a Banach
space with the corresponding norm.

\begin{definition} \rm
A set $\mathcal{A}$ satisfying the following conditions is called the
class of admissible coefficients in optimal control and inverse problems:
\begin{enumerate}
\item $a \in  C_{2+\alpha}(I)$ with $| a |_{2+\alpha} \leq c$;
\item $\nu \leq a \leq \mu$ and $a'(s)>0$,  for $s \in I$;
\item $| a' | \leq \delta$ and $| a'' | \leq \delta$ for $s \in I$;
\end{enumerate}
where $\alpha \in (0,1)$, I is a closed interval, $a:I \to \mathbb{R}$ and
$c, \nu, \mu, \delta$ are positive constants.
\end{definition}

This article is organized as follows.
In section \ref{sec:ex-uniq} the inverse problem \eqref{1-1} is reduced
to an equivalent auxiliary problem and existence and uniqueness
of the inverse problem is proved. We present our numerical method for
the numerical solution of the inverse problem in Section \ref{numer}.
A numerical example is also given to show efficiency of the method.

\section{Existence and uniqueness for the inverse problem}\label{sec:ex-uniq}

In this section we prove that the inverse problem \eqref{1-1} has a unique
solution. We use the well-known Kirchoff's transformation
\begin{equation*}
 T_a(u)  = \int_0^u a(s) \, ds,
\end{equation*}
where $a \in \mathcal{A}$ and $u>0$. Let $u = u(x, t)$ be a solution of
\eqref{1-1}. Then define $v(x,t)$ as
\begin{equation}\label{sol-v}
v(x, t) = T_a(u(x,t)) = \int_0^{u(x, t)} a(s) \, ds.
\end{equation}
From \eqref{sol-v}, we reduce the inverse problem \eqref{1-1}
to the  auxiliary problem
\begin{equation} \label{1-2}
\begin{gathered}
v_t= a(T_a^{-1}(v)) \Delta v + a(T_a^{-1}(v))  f(T_a^{-1}(v)) ,\quad
(x,t)\in\Omega_T, \\
v(x,0) = 0,\quad x \in \overline \Omega,\\
-\nabla v(x, t) = \vec{g}(x,t), \quad x \in B_{0}^1,\; t \in [0,T], \\
v_{x_i}(x,t)=0,\quad x \in B_{0}^i,\; t \in [0,T],\;  i= 2,\dots, n,\\
v_{x_i}(x,t)=0,\quad x \in B_{1}^i,\; t \in [0,T],\;  i= 1,\dots, n,\\
v(x,t)= F(x,t),\quad x \in B_{0}^1,\; t \in [0,T],
\end{gathered}
\end{equation}
where $F(x, t) := \int_0^{f_1(x,t)} a(s) \, ds$.
 We note that $\frac{d}{du} T_a(u) \geq \nu > 0$ implies that $T_a(u)$
is invertible.
Now, we  prove the following comparison theorem.

\begin{theorem}\label{thm:comp}
Let $f \in C^1(\Omega_T)$, $\vec{g}(x,t)$ and $F$ be continuous functions.
In addition assume that  $\vec{g}_t(x,t)$ and
$\frac{\partial F}{\partial t}$ are positive and continuous functions. Then,
\begin{equation}\label{1-3}
w_{\nu} \leq v \leq w_{\mu},
\end{equation}
where $v$ is the solution of  \eqref{1-2}, $w_{\nu}$ and $ w_{\mu}$ are
solutions of the following problem for $\lambda = \nu$ and
 $\lambda = \mu$ respectively:
\begin{equation} \label{1-4}
\begin{gathered}
L_{\lambda}w := \lambda \Delta w + \lambda  f(T_a^{-1}(w)) - w_t = 0,
\quad (x,t)\in\Omega_T, \\
w(x,0) = 0,\quad x \in \overline \Omega,\\
-\nabla w(x, t) = \vec{g}(x,t), \quad x \in B_{0}^1,\; t \in [0,T], \\
w_{x_i}(x,t)=0,\quad x \in B_{0}^i, \;t \in [0,T], \; i= 2,\dots, n,\\
w_{x_i}(x,t)=0,\quad x \in B_{1}^i,\; t \in [0,T],\;  i= 1,\dots, n,\\
w(x,t)= F(x,t),\quad x \in B_{0}^1,\;  t \in [0,T].
\end{gathered}
\end{equation}
\end{theorem}

\begin{proof}
Let $\tilde{a}  = a(T_a^{-1}(v))$. Now, we estimate
$L_{\tilde{a}}(w_{\mu}) - L_{\tilde{a}}(w_{\mu}) $. Since,
${w_{\mu}}_t = \mu [ \Delta w_{\mu} +  f(T_a^{-1}(w_{\mu})) ]$
and
$  v_t = \tilde{a} [ \Delta v +  f(T_a^{-1}(v)) ]$,
we obtain
\begin{equation}\label{1-5}
L_{\tilde{a}}(w_{\mu}) - L_{\tilde{a}}(v)
 = \left(\tilde{a}- \mu \right)[ \Delta w_{\mu} + f(T_a^{-1}(w_{\mu}))].
\end{equation}
To use the maximum principle on \cite[page 177]{Wein}, we need to show that
 $[ \Delta w_{\mu} + f(T_a^{-1}(w_{\mu}))] \geq 0$. For this purpose
let $r = \frac{\partial w_{\mu}}{\partial t}$. Then  $r(x, t)$ satisfies
\begin{equation} \label{1-6}
\begin{gathered}
r_t =  \big[ \Delta r +   f'(T_a^{-1}(w_{\mu}))\frac{1}{a'(w_{\mu})}r \big], \quad
(x,t)\in\Omega_T, \\
r(x,0) = 0, \quad x \in \overline \Omega,\\
-\nabla r(x, t) = \vec{g}_t(x,t),\quad x \in B_{0}^1,\; t \in [0,T], \\
r_{x_i}(x,t)=0, \quad x \in B_{0}^i,\; t \in [0,T], \; i= 2,\dots, n,\\
r_{x_i}(x,t)=0,\quad x \in B_{1}^i, \; t \in [0,T], \; i= 1,\dots, n,\\
r(x,t)=\frac{\partial }{\partial t}F(x,t),\quad x \in B_{0}^1,\; t \in [0,T].
\end{gathered}
\end{equation}
Employing the maximum principle on \cite[page 177]{Wein}, we conclude that
 $r \geq 0$, which implies that
$[ \Delta w_{\mu} + f(T_a^{-1}(w_{\mu}))] \geq 0$. Thus,
$L_{\tilde{a}}(w_{\mu}) - L_{\tilde{a}}(v) \leq 0$.
By the maximum principle \cite[page 172]{Wein}, we conclude that
 $w_{\mu} \geq v$. The proof for the other side of the  inequality \eqref{1-3}
is similar.
\end{proof}

Now, we state and prove an existence theorem.

\begin{theorem}\label{thm:exist}
Under the conditions of Theorem \ref{thm:comp}, the inverse problem \eqref{1-1}
has a solution for each $a \in \mathcal{A}$.
\end{theorem}

\begin{proof}
 Let $z_0 =0$ and $z_n$, $n=1,2, \dots, $ be solution of the  problem
\begin{equation} \label{e1}
 \begin{gathered}
( {z_n})_t=a(T_a^{-1}( {{z}_{n-1}})) [\Delta {z_n}
 +  f(T_a^{-1}( {z_{n-1}}))], \quad (x,t)\in\Omega_T, \\
z_n(x,0) = 0, \quad x \in \overline \Omega,\\
-\nabla {z_n} (x,t) = \vec{g}(x,t),\quad x \in B_{0}^1,\; t \in [0,T],\\
( {z_n})_{x_i}(x,t)=0,\quad x \in B_{0}^i,\; t \in [0,T], \; i= 2,\dots, n,\\
( {z_n})_{x_i}(x,t)=0,\quad x \in B_{1}^i,\; t \in [0,T],\;  i= 1,\dots, n,\\
z_n(x,t) = F(x,t),\quad x \in B_{0}^1, \;t \in [0,T].
\end{gathered}
\end{equation}
Then $z_n$ is a bounded sequence in $C_{2+\alpha}(\Omega_T)$ \cite{FRD}.
Now we show that  $z_n$ is monotone increasing. For this we employ induction.
If we put $n=1$ in \eqref{e1} and note that $z_0 = 0$ we obtain
\begin{equation}
(z_1)_t = a(T_a^{-1}( 0) [\Delta z_1 +   f(T_a^{-1}( 0) = a(0)[\Delta z_1 + f(0)].
\end{equation}
This says that $z_1$ is a solution of \eqref{1-2}  for $\lambda = a(0)$.
Using Theorem \ref{thm:comp} we deduce that $z_1 \geq z_0$. Now suppose that
$z_{n-1} \leq z_n$. Applying the same method in Theorem \ref{thm:comp}
for $z_{n+1}$ and $z_n$ we find that $z_n \leq z_{n+1}$ which shows that
$\{ z_n\}$, is a monotone increasing sequence.
 Applying a simple version of Lemma 1 in \cite{APS} we deduce that
there is a $z \in C_{2+\alpha}(\Omega_T)$ such that
\begin{gather*}
\Delta z_n \to \Delta z, \quad \text{as }  n \to \infty, \\
z_n \to  z,\quad  \text{as }  n \to \infty.
\end{gather*}
Passing to the limit in the first equation of \eqref{e1}  as
$n \to \infty$ and observing that $z$ satisfies all conditions in \eqref{1-2}
we find that $z$ satisfies the problem \eqref{1-2}.
\end{proof}

As $z$ is a solution of  \eqref{1-2} and  the operator $T_a$ is invertible,
$u = T_a^{-1}z$ is a solution of the problem \eqref{1-1}.

\begin{theorem} \label{thm2.3}
Under the assumptions of Theorems \ref{thm:comp} and \ref{thm:exist},
the problem \eqref{1-1} has  a unique solution.
\end{theorem}

\begin{proof}
Let $u(x, t)$  and $v(x, t)$ be two solutions  of \eqref{1-2} and let
 $z(x, t) = v(x, t) - u(x, t)$. Then
\begin{equation}\label{zeqn}
\begin{split}
z_t
&= v_t - u_t
= [ a(T_a^{-1}( v)) \Delta v -  a(T_a^{-1}( u)) \Delta u ] \\
&\quad + [ a(T_a^{-1}( v))  f(T_a^{-1}( v))  -  a(T_a^{-1}( u))  f(T_a^{-1}( u)) ].
\end{split}
\end{equation}
Now, we estimate the term in the first bracket on the right hand side
of \eqref{zeqn}. For this, add and subtract the term  $a(T_a^{-1}( v)) \Delta u$.
Then, we have
\[
a(T_a^{-1}( v)) \Delta v -  a(T_a^{-1}( u)) \Delta u
= a(T_a^{-1}( v)) \Delta z
 + [a(T_a^{-1}( v)) -a(T_a^{-1}( u))  ]\Delta u.
\]
Using smoothness of the functions $a$ and $T_a^{-1}$, we conclude that
\begin{equation}\label{conc1}
a(T_a^{-1}( v))  -  a(T_a^{-1}( u))     = \left(C(x, t) \Delta u \right) z,
\end{equation}
where
\[
C(x, t) = \frac{a'\big(p_a( T_a^{-1}(v(x, t)),  T_a^{-1}(u(x, t)))\big)}
{ a(q_a(v(x, t)), u(x, t))}
\]
 and $p_a(y_1, y_2)$, $q_a(y_1, y_2)$ are two numbers between $y_1$ and $y_2$.

Next, we estimate the term in the second bracket on the right hand side
of \eqref{zeqn}. Let $h(s) = a(s) f(s)$. Then
\begin{equation}\label{conc2}
\begin{split}
&a(T_a^{-1}( v))  f(T_a^{-1}( v))  -  a(T_a^{-1}( u))  f(T_a^{-1}( u)) \\
&=  h(T_a^{-1}( v)) - h(T_a^{-1}( u))
 = \frac{h'(T_a^{-1}(\tilde{u}))}{a(q_a(v(x, t), u(x, t)))} z,
\end{split}
\end{equation}
where $\tilde{u}$ is a number between  $T_a^{-1}( v)$ and $T_a^{-1}( u)$.

Combining \eqref{conc1}, \eqref{conc2}  we  conclude that $z(x, t)$
satisfies the  equation
\begin{equation*}
z_t = a(T_a^{-1}(v)) \Delta z + C_{*}(x, t) z,
\end{equation*}
where
$$
C_{*}(x, t) = C(x, t) \Delta u
 +\frac{h'(T_a^{-1}(\tilde{u}))}{a(q_a(v(x, t), u(x, t)))}.
$$
Moreover, $z(x, t)$ satisfies the  initial and boundary conditions
\begin{gather*}
z(x,0) = 0,\quad x \in \overline \Omega,\\
-\nabla z(x, t) = \vec{0},\quad x \in B_{0}^1,\; t \in [0,T], \\
z_{x_i}(x,t) = 0,\quad x \in B_{0}^i,\; t \in [0,T],\;  i= 2,\dots, n,\\
z_{x_i}(x,t) = 0,\quad x \in B_{1}^i,\; t \in [0,T],\;  i= 1,\dots, n,\\
z(x,t) = 0,\quad x \in B_{0}^1, \; t \in [0,T].
\end{gather*}
Employing the maximum principle  \cite[page 177]{Wein} for $z(x, t)$,
we conclude that $z(x, t) \equiv 0$, which concludes the proof.
\end{proof}

\section{Numerical solution of the inverse problem}\label{numer}

In this section, we present our numerical method for the solution of
the inverse problem. For simplicity, we consider only one dimensional
case in space. In this case, the inverse problem \eqref{1-1} becomes
\begin{equation} \label{3-1}
\begin{gathered} 
u_t=(a(u) u_x)_x+ f(u) ,\quad (x,t)\in\Omega_T, \\
u(x,0)=0,\quad x \in \overline \Omega,\\
-a(u(0,t))u_x(0,t)= g(t),\quad  t \in [0,T], \\
u_{x}(1,t)=0,\quad t \in [0,T],\\
u(0,t)= f_1(t),\quad t \in [0,T],
\end{gathered}
\end{equation}
where $\Omega:= [0,1]$ and $\Omega_T :=\Omega\times (0,T)$.

We note that the same method is used in \cite{TTU}. For the completeness 
of the content, we explain the main steps of the method. The essence of 
the method is to approximate the unknown coefficient $a(u)$ by polynomials. 
Since the unknown diffusion coefficient $a(u)$ is continuous on a compact 
domain $\Omega_{T}$, there exists a sequence of polynomials converging to $a(u)$. 
Our starting point is that the correct $a(u)$ will yield the solution 
satisfying the condition $u(0,t)=f_1(t)$, hence $a(u)$ will minimize the 
functional
\begin{equation*}
F(c)=\| u(c,0,t)-f_1(t)\| _{2}^{2},
\end{equation*}
where $u(c,x,t)$ is the solution of the direct problem with the diffusion 
coefficient $c(u)$ and $\| \cdot\| _{2}$ is the $L_{2}$ norm on $\Omega$. 
Hence, our strategy is to find a polynomial of degree $n$ that minimizes 
$F(c)$, i.e, $n^{th}$ degree polynomial approximation of $a(u)$ for the 
desired $n$. From now on we take $c(u)=c_{0}+c_{1}u+\dots+c_{n}u^{n}$ 
as $c=(c_{0},\dots,c_{n}$), hence $F(c)$ is a function of $n$ variables. 
To overcome the ill-posedness of the inverse problem, Tikhonov 
regularization is applied. A regularization term with a regularization 
parameter $\lambda$ is added to $F(c)$
\begin{equation*}
G(c)=\| u(c,0,t)-f_1(t)\| _{2}^{2}+\lambda\| c\| ^{2},
\end{equation*}
 where $\| c\| $ denotes the Euclidean norm of $c$. From now on, we 
fix $n$ and $\lambda$.

The method for minimizing $G(c)$ depends on the properties of $F(c)$, e.g., 
convexity, differentiability  etc. In our case, the convexity or 
differentiability of $F(c)$ is not clear due to the term $u(c,x,t)$. 
However, we do not envision a major drawback in assuming the differentiability
of $F(c)$ in numerical implementations. 
For this reason, we proceed the minimization of $G(c)$ by the steepest 
descent method which will utilize the gradient of $F$. In this method, 
the algorithm starts with an initial point $b_{0}$, then the point 
providing the minimum is approximated by the points
\begin{equation*}
b_{i+1}=b_{i}+\triangle b_{i},
\end{equation*}
where $\triangle b_{i}$ is the feasible direction which minimizes
\begin{equation*}
E(\triangle b)=G(b_{i}+\triangle b).
\end{equation*}
This procedure is repeated until a stop criterion is satisfied, i.e,
$\| \triangle b_{i}\| <\epsilon$ or $|G(b_{i+1})-G(b_{i})|<\epsilon$
or a certain number of iterations. In the minimization of $E(\triangle b)$,
we use the following estimate on $u(b_{i}+\triangle b,0,t)$:
\begin{equation*}
u(b_{i}+\triangle b,0,t)\simeq u(b_{i},0,t)+\nabla u(b_{i},0,t)\cdot\triangle b,
\end{equation*}
 where $\nabla$ denotes the gradient of $u(b,0,t)$ with respect to
$b$. Hence $E(\triangle b)$ turns out to be
\begin{equation*}
E(\triangle b)=\| \nabla u(b_{i},0,t)\cdot\triangle b
+u(b_{i},0,t)-f_1(t)\| _{2}^{2}+\lambda\| \triangle b\| _{2}^{2}.
\end{equation*}
In numerical calculations, we note that $\| \cdot\| _{2}$
can be discretized by using a finite number of points in $[0,T]$,
i.e., for $t_{1}=0<t_{2}<\dots<t_{q}=T$, hence $E(\triangle b)$
has its new form as
\begin{equation}
E(\triangle b)\simeq\sum_{k=1}^{q}(u(b_{i},0,t_{k})
+\nabla u(b_{i},0,t_{k})\cdot\triangle b-f_1(t_{k}))^{2}
+\lambda\| \triangle b\| _{2}^{2}.\label{discretization}
\end{equation}

Now the minimization of this problem is a least squares problem whose
solution leads to the  normal equation (see \cite{Kirsch})
\begin{equation*}
(\lambda I+A^{T}A)\triangle b=A^{T}K,
\end{equation*}
where
\begin{gather*}
A=[\nabla u(b_{i},0,t_{1})^{T}\dots\nabla u(b_{i},0,t_{q})^{T}], \\
K=[u(b_{i},0,t_{1})-f_1(t_{1})\dots u(b_{i},0,t_{q})-f_1(t_{q})]^{T}.
\end{gather*}
 Now the optimal direction is found by
\begin{equation}
\triangle b=(\lambda I+A^{T}A)^{-1}A^{T}K.\label{normal eq}
\end{equation}
In forming $A$, the computation (or estimation ) of $s^{th}$ 
component of the vector $\nabla u(b_{i},0,t_{k})$ can be achieved by
\begin{equation}
\frac{u(b_{i}+he_{s},0,t_{k})-u(b_{i},0,t_{k})}{h}\label{diffstep},
\end{equation}
where $e_{s}$ is the standard unit vector whose $s^{th}$ component 
is 1 and $h$ is the differential step.

The algorithm can be summarized by following steps:
\smallskip

\noindent\textbf{Step 1.} Set $b_{0}$, $n$, $\lambda$ and a stopping criterion
$k$ or $\epsilon$ (iteration number less than $k$ or size of 
$\|\triangle b_{i}\|\leq \epsilon$). 
\smallskip

\noindent\textbf{Step  2.}  Calculate $\triangle b_{i}$ using \eqref{normal eq}
and set $b_{i+1}=b_{i}+\triangle b_{i}$.
\smallskip

\noindent\textbf{Step  3.}  Stop when the criterion is achieved. 
\medskip

\noindent \textbf{Example.} In this example, we solve the inverse
 problem \eqref{3-1} for $f(u)=Du \big (1-\frac{u}{K} \big)$, where 
$D$ the constant growth rate, and $K$ is the carrying capacity as limitation 
of growth for population dynamics model. For simplicity, we take 
$D=K=1$, hence $f(u)=u (1-u)$. We also take $g(t)=\sin(t)$. 
The additional data $u(0,t)=f_1(t)$ is found numerically. 
The correct solution is $a(u)=1+2u+3u^2+u^3$. 
See Table \ref{table1}. We note that all computations have been
 carried out in MATLAB. In solving the direct problem for each value of 
$c$, MATLAB PDE solver is used.

Because of the discretization of the problem, many variables appear in computations. 
These variables and their values in our computations are listed below:
\begin{enumerate}
\item The degree of the polynomial $c(u)$: $n=2,3,4,5$ are taken.

\item Initial guess for the coefficients of $c(u)$:  All initial guesses
for the coefficients are taken to be vectors composed of $1$'s in order 
to get an objective observation.

\item Differential step $h$: $h=0.1,h=0.01$ are taken.

\item Number of $t$ points: $q=10$ and $q=100$ are taken.

\item Number of $(x, t)$ points in mesh grid used in Matlab PDE solver:  
taken to be $q \times q$ where $q$ is already determined in (4).

\item Stopping criterion: $\epsilon=0.01$ or maximum iteration number: $k=100$.

\item Regularization parameter: $\lambda$ is taken to be zero in the
 noise-free examples, but an optimal $\lambda $ is searched to deal 
with noisy data. Since the problem is highly nonlinear, we seek 
the best regularization parameter empirically.
\end{enumerate}

In the applications, the additional data  $u(0,t)$ is generally given with a noise, 
i.e., $u(0,t)+\gamma \, u(0,t)$ where $\gamma$ is called noise level and 
is generally less than $0.1$. The example is now tested with $u(0,t)$ 
plus some noise. The algorithm is run for the best choices of $h$, $q$ 
and the initial guesses in the previous calculations, i.e.,  $h=0.1,q=100$.  
The noise levels are taken as $\gamma=+0.02,-0.04$. 
Table \ref{table2} and Table \ref{table3} shows the results. 
In this table, we also give the relative errors which is defined as
\[
\frac{\|u-u_a\|_\infty}{\|u\|_\infty},
\]
where $\|\cdot\|_\infty$ denotes maximum norm, $u$ and $u_a$ 
are the solutions corresponding to the correct $a(u)$ and observed 
$a(u)$ respectively. The relative error provides a gauge to compare 
the results for noisy data for different regularization parameters. 
See Table \ref{table4}. 

\begin{table}[htb]
\caption{Initial guesses and results for $n=2,3$,4,$5$.}
\label{table1}
\begin{center}
\begin{tabular}{|c|c|}
\hline
Initial guess  & $h=0.1,q=10$  \\
\hline
(1,1)  & (0.9486, 2.5990  )   \\
\hline
(1,1,1)  & (0.9862, 2.1632, 1.6133)   \\
\hline
(1,1,1,1)  & (1.0176, 1.7199, 2.9978, -0.5290)   \\
\hline
(1,1,1,1,1)  & (1.0194, 1.5602, 4.7561, -6.1976, 6.1034 )   \\
\hline
  & $h=0.01, q=10$ \\
\hline
(1,1)    & (0.9521, 2.5653) \\
\hline
(1,1,1)    & (0.9996, 2.0289, 1.7452) \\
\hline
(1,1,1,1)  & (0.9922, 2.1623, 1.0803, 1.6373) \\
\hline
(1,1,1,1,1)   & (1.0101, 1.7365, 3.6651, -3.9461, 4.5934) \\
\hline
& $h=0.1,q=100$  \\
\hline
(1,1)  & (0.9506, 2.5998)  \\
\hline
(1,1,1)  & (1.0008, 2.0268, 1.8113)  \\
\hline
(1,1,1,1)  & (1.0095, 1.8579,  2.4673, 0.0554)  \\
\hline
(1,1,1,1,1)  & (1.0028, 1.9976, 1.6339, 1.8159, -0.4184) \\
\hline
 & $h=0.01,q=100$\\
\hline
(1,1)   & (0.9611, 2.5334)\\
\hline
(1,1,1)    & (0.9941, 2.0922, 1.6526)\\
\hline
(1,1,1,1)   & (1.0006, 2.0090, 1.7427, 0.8832)\\
\hline
(1,1,1,1,1)    & (0.9961, 2.1296, 0.8972, 2.9274, -0.8337)\\
\hline
\end{tabular}
\end{center}
\end{table}

\begin{table}[htb]
\caption{The results for given $\gamma$ values.}
\label{table2}
\begin{tabular}{|c|c|c|}
\hline
Initial guess  & $\gamma=+0.02$  & Relative error \tabularnewline
\hline
(1,1)  & (0.9247, 2.4262)   & 0.0531  \tabularnewline
\hline
(1,1,1)&(0.9541, 2.0427, 1.3576)   & 0.0350
 \tabularnewline
\hline
(1,1,1,1) &(0.9694, 1.7606, 2.5081, -0.6971)   &0.0290
 \tabularnewline
\hline
(1,1,1,1,1)  &(0.9598, 1.9843, 1.0394, 2.8274, -2.2120)    &0.0256
\tabularnewline
\hline
\end{tabular}
\end{table}

\begin{table}[htb]
\caption{The results for given $\gamma$ values.}
\label{table3}
\begin{tabular}{|c|c|c|}
\hline
Initial guess  & $\gamma=-0.04$  & Relative error \tabularnewline
\hline
(1,1)  & (1.0024, 2.9468,)   & 0.0298  \tabularnewline
\hline
(1,1,1)&(1.0941, 1.9951, 2.7186)   & 0.0076
 \tabularnewline
\hline
(1,1,1,1) &(1.0897, 2.0524, 2.3855, 1.5605)   &0.0078
 \tabularnewline
\hline
(1,1,1,1,1)  &(1.0889, 2.0244, 2.8228, -0.2071, 3.1688)    &0.0078
\tabularnewline
\hline
\end{tabular}
\end{table}

\begin{table}[htb]
\caption{Regularization parameters and relative errors for different noise levels.}
\label{table4}
\begin{center}
\tiny
\begin{tabular}{|c|c|c|}
\hline
& $\gamma=+0.02$  & $\gamma=-0.04$ \tabularnewline
\hline
(1,1)  &(0.9250, 2.4252)  &(1.0029, 2.9955)
\tabularnewline
\hline
$\lambda$ & $10^{-5}$ & $10^{-4}$
 \tabularnewline
\hline
Relative error &0.0530  &0.0299
\tabularnewline
\hline
(1,1,1)  &(0.9635, 1.9784, 1.4416)  &(1.0910, 2.0231, 2.6732)
\tabularnewline
\hline
$\lambda$ & $10^{-4}$ & $10^{-5}$
 \tabularnewline
\hline
Relative error &0.0350  &0.0077
\tabularnewline
\hline
(1,1,1,1)  &(1.0105, 1.7411, 1.3630, 1.1535)  &(1.0845, 2.1218, 2.1640, 1.7523)
\tabularnewline
\hline
$\lambda$ & $10^{-3}$ & $10^{-4}$
 \tabularnewline
\hline
Relative error &0.0281  &0.0078
\tabularnewline
\hline
(1,1,1,1,1)  &(0.9661, 1.9976, 1.1769, 0.8555, 0.8037)  &(1.0782, 2.1933, 2.0096, 1.6764, 1.4226)
\tabularnewline
\hline
$\lambda$ & $10^{-4}$ & $10^{-4}$
 \tabularnewline
\hline
Relative error &0.0243  &0.0078
\tabularnewline
\hline
\end{tabular}
\end{center}
\end{table}

The above experiment clearly indicates that the initial guess,  
$q$ (the number of $t$ points) and $n$ (the degree of the polynomial $c(u)$)  
are the main factors affecting the accuracy of the solutions. 
The changes in differential step $h$ is observed to have a negligible 
effect in finding feasible directions. In our experiments $h = 0.1$ appears 
to be good enough for a satisfactory solution. 
The initial guesses have to be chosen close enough to the coefficients 
of the correct solution. However, it is hard to give a radius of the trust 
region around the expected coefficients. One way to overcome this problem 
is to start with $n = 1$ with several initial guesses then choose the best 
one for it (call it $x_0$) then make it $n = 2$, use the solution ($x_0$, 1) 
as an initial guess and repeat it for the other dimensions. 
Although the initial guesses in the above experiment have not been determined 
with this procedure, that approach also has been observed to work well 
in the example. It is observed that $q$ has a significant impact on the solution. 
However, the way how it affects the algorithm is not very clear. 
It appears that in $q=100$ works better. As we mentioned above, we solve the 
direct problem by MATLAB PDE solver which uses Finite Element Method (FEM). 
In general, increase of mesh points will also increase the accuracy of the solution. 
This might be the fact behind the result $q = 100$ works better than $q=10$ 
using the same initial guesses in the example.

The effect of regularization parameter becomes apparent in the noisy case. 
Since the problem is highly nonlinear, we seek the best regularization 
parameter empirically. We present the best regularization parameter with 
their relative errors, see Table \ref{table4}. When the optimal regularization 
parameter is used, the algorithm ends at relatively better coefficients.

\subsection*{Acknowledgments} %\label{ack}
This research was supported by the Scientific and Technological Research 
Council of Turkey (T\"UB\.{I}TAK) through the project Nr 113F373, also by 
the Zirve University Research Fund. 
The authors would like to thank Dr. R. Tinaztepe for the assistance in 
providing the computational experiment.

\begin{thebibliography}{10}

\bibitem{APS} Abtahi, M.; Pourgholi, R.; Shidfar, A.;
\newblock   Existence and uniqueness of a solution for a two dimensional nonlinear
inverse diffusion problem.
\newblock  Nonl. Analy.: Theory, Method $\&$ Appl.  74, 2462-2467, (2011).

\bibitem{ATU} Akyildiz, F. T., Tatar; S.  Ulusoy, S.;
\newblock Existence and uniqueness for a nonlinear inverse reaction-diffusion
problem with a nonlinear source in higher dimensions.
\newblock   Math. Meth. Appl. Sci., 36, 2397-
2402, (2013).

\bibitem{N2} Ahmedizadeh, Y.; Soti, V., Pourghol, R.;
\newblock Numerical solution of an inverse diffusion problem.
\newblock  Appl. Math. Sci. 1, 863-868, (2007).


\bibitem{C1} Cannon, J. R.;  Duchateau, P.;
\newblock An inverse problem for a nonlinear diffusion equation.
\newblock SIAM J. Appl. Math. 39(2), 272-289, (1980).


\bibitem{D1} Duchateau, P.;
\newblock Monotonicity and uniqueness results in identifying an unknown coefficient in a nonlinear diffusion equation.
\newblock SIAM J. Appl. Math. 41(2), 310-323, (1981).


\bibitem{FRD} Friedman, A.;
\newblock  Partial Differential Equations of Parabolic Type.
\newblock Prentice-Hall: Englewood Cliffs, NJ, (1964).

\bibitem{Hun} Hundsdorfer, W. and Verwer, J.
\newblock Numerical Solution of Time-Dependent Advection-Diffusion-Reaction
Equations.
\newblock   Springer, (2003).

\bibitem{Kirsch} Kirsch, A.;
\newblock An Introduction to Mathematical Theory of Inverse Problems
\newblock  Springer, NY, (1996).

\bibitem{Kot} Kot, M.;
\newblock Elements of Mathematical Ecology
\newblock  Cambridge University Press, (2001).

\bibitem{Murray} Murray, J.;
\newblock Mathematical Biology, I: An Introduction
\newblock  Springer, (2002).

\bibitem{Wein} Protter, M. H.;  Weinberger, H. F.;
\newblock Maximum Principles in Differential Equations
\newblock  Springer-Verlag, NY, (1984).

\bibitem{N1}  Shidfar, A.; Pourgholi, R.; Ebrahimi, M.;
\newblock A numerical method for solving of a nonlinear inverse diffusion problem.
\newblock An Int. Journal Comput.$\&$Math. with Appl. 52, 1021-1030, (2006).

\bibitem{TTU} Tinaztepe, R.; Tatar, S.; Ulusoy, S.;
\newblock Identification of the density dependent coefficient in an
inverse reaction-diffusion problem from a single boundary data.
\newblock  Electron. J. Differential Equations, Vol. 2014,  No. 21, 1--14, (2014).

\bibitem{dev} de Vries, G.; Hillen, T.; Lewis, M.; M\"{u}ller J;
 Sch\"{o}nfisch, B.;
\newblock A Course in Mathematical Biology.
\newblock SIAM, Philadelphia, (2006).

\end{thebibliography}

\end{document}
