\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2017 (2017), No. 200, pp. 1--15.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2017 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2017/200\hfil Optimization method for an inverse problem]
{Optimization method for identifying the source term in an inverse wave equation}

\author[A. Deiveegan, P. Prakash, J. J. Nieto \hfil EJDE-2017/200\hfilneg]
{Arumugam Deiveegan, Periasamy Prakash, Juan Jose Nieto}

\address{Arumugam Deiveegan \newline
Department of Mathematics,
Periyar University,
Salem 636 011, India}
\email{deiveegan.a@gmail.com}

\address{Periasamy Prakash (corresponding author) \newline
Department of Mathematics,
Periyar University,
Salem 636 011, India}
\email{pprakashmaths@gmail.com}

\address{Juan Jose Nieto \newline
Departamento de An\'alisis Matem\'atico,
Facultad de Matem\'aticas,
Universidad de Santiago de Compostela,
Santiago de Compostela 15782, Spain.\newline
Department of Mathematics,
King Abdulaziz University,
Jeddah 21589, Saudi Arabia}
\email{juanjose.nieto.roig@usc.es}

\dedicatory{Communicated by Suzanne Lenhart}

\thanks{Submitted September 1, 2015. Published August 30, 2017.}
\subjclass[2010]{35L05, 35R30, 49J20}
\keywords{Inverse problem; source term; optimal control; model function}

\begin{abstract}
 In this work, we investigate the inverse problem of identifying a
 space-wise dependent source term of wave equation from the measurement
 on the boundary. On the basis of the optimal control framework,
 the inverse problem is transformed into an optimization problem.
 The existence and necessary condition of the minimizer for the cost
 functional are obtained. The projected gradient method and two-parameter
 model function method are applied to the minimization problem and numerical
 results are illustrated.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction}

We consider an initial-boundary value problem for wave equation in the form
\begin{equation}\label{a1}
\begin{gathered}
u_{tt}(x, t)= \Delta u(x, t)+\sigma(t)f(x),\quad x\in \Omega, \; 0 < t < T, \\
	u(x, 0)  = u_t(x,0)= 0,\quad x\in \Omega, \\	
	u(x, t) = 0, \quad x\in \partial \Omega,\;  0 < t < T,
\end{gathered}
\end{equation}
where $\Omega \subset R^N (N\geq 1)$ is a bounded domain with smooth boundary
$\partial \Omega$, $T>0$, $\sigma$ is a known non-zero function and independent
of the space variable $x$, $f\in L^2(\Omega)$ is unknown and $\Delta$ is
the Laplacian operator.
An additional condition is assumed in the form
\begin{equation}\label{a3}
\frac{\partial u}{\partial n}(x, t) = g(x, t),\quad  x\in\partial\Omega, \; 0<t<T,
\end{equation}
where $g$ is a known function and
\[
\frac{\partial u}{\partial n}(x, t)
= \sum_{i=1}^{N} \gamma_i(x) \frac{\partial u}{\partial x_i}(x, t),\quad
x\in\partial\Omega,\; 0 < t < T, \;  i=1, 2,\dots ,N,
\]
where $\gamma_i(x)=(\gamma_{1}(x),\dots ,\gamma_{N}(x))$ is the outward unit
 normal to $ \partial\Omega $ at $x$.

 We set $d=\sup \{|x_1-x_2|:x_1,x_2\in \Omega\} $ is the diameter of
$\Omega$. Henceforth we assume
\begin{gather}\label{assum1}
T> d,\\
\label{assum2}
\sigma(0) \neq 0, \sigma\in C^1[0,T].
\end{gather}

 For uniqueness and stability of our governing equation we have to choose a
large observation time $T$. The external forces $\sigma(t)f(x)$ in the form of
separation of variables are important in modelling vibrations. For example, if
we set $\sigma(t)=\cos \omega t$ $(\omega \in R)$, then it describes a spatial
force which varies harmonically. Moreover the system \eqref{a1} is regarded as
an approximation to a model for elastic waves from a point dislocation source.
For instance, this kind of point source can be related to models in reflection
seismology, oil and gas exploration, ground-penetrating radar and many other
physical problems \cite{aki}.
According to the Hadamard requirements (existence, uniqueness and stability of
the solution), the inverse problem is ill-posed mathematically \cite{isa,kir}.
\par For an inverse problem with a single measurement, the main methodology is
based on an $L^2$-weighted inequality called a Carleman estimate.
Bellassoued \cite{bellu}, Imanuvilov and Yamamoto \cite{iman},
Klibanov and Timonov \cite{klic} discussed the applications of Carleman estimates
 to inverse problems.

Yamamoto \cite{yam1} studied  the uniqueness and stability result for
reconstruction algorithm using exact controllability for an inverse problem
described by the wave equation. Nicaise and Zair \cite{nic} identified the
source term from interior measurements by using some observability estimates
and controllability results by using  multiplier and Hilbert uniqueness method.

 Bellassoued et al.\ \cite{bells}, Cipolatti and Lopez \cite{cip} and
 Rakesh \cite{rak} obtained uniqueness and stability of inverse problem for the
wave equation by using Dirichlet to Neumann map. Stability estimate was
established for inverse problem for the wave equation by using Neumann
to Dirichlet map in \cite{bao}.

 Mordukhovich and Raymond \cite{mord}, Lagnese et al.\ \cite{lagnese} proved
the optimal control problems for hyperbolic equations with boundary control.
In \cite{bar}, Barbu and Pavel had considered coefficient optimal control
problem for 1-D wave equation with nonhomogeneous boundary periodic inputs.
Liang \cite{liang} studied the bilinear optimal control problem of the
wave equation. Ton \cite{ton} used optimal techniques and established
feedback laws to identify the surface of the unknown source and its intensity
from the observed values of the solution of the wave equation on a portion of
fixed closed surface.

 For stable reconstruction, we have some regularization techniques \cite{engl}.
Engl et al.\ \cite{engl2} established the uniqueness of inverse source problem
of parabolic and hyperbolic equations and analyzed the convergence rate of
the regularized solution. In \cite{yam2}, Yamamoto derived the convergence
rate of Tikhonov regularization scheme for multidimensional inverse
hyperbolic problem.
Cheng et al.\ \cite{cheng} employed a new strategy for a priori choice of
regularizing parameter in Tikhonov's regularization.
Feng et al.\ \cite{feng} solved the identification problem of the wave equation
by using optimal control method. In \cite{yang}, Yang obtained the idea to use
the techniques of optimal control framework to the inverse problem of recovering
the source term in a parabolic equation. Gnanavel et al. \cite{gnana} studied
an inverse problem of reconstructing two time independent coefficients and
the initial data in the linear reaction diffusion system from the arbitrary
sub-domain measurement and final measurement. Tr\"oltzsch \cite{tro}
analyzed the existence of optimal solutions, necessary optimality
conditions on optimal control problems of partial differential equation and
main principles of selected numerical techniques. Hasanov \cite{hasanov}
applied conjugate gradient method to identify the unknown spacewise and time
dependent heat sources of the variable coefficient heat conduction equation.
In \cite{hasanov1}, Hasanov et al.\ established the direct relationship between
two widely used methods, least square method and singular value expansion,
in inverse source and backward problems with final overdetermination for
parabolic and hyperbolic equations. Kabanikhin et al. \cite{kab,kab1} obtained
the iteration methods for solve a parameter identification problem in a one
and two dimensional hyperbolic equation of second order respectively.
Kabanikhin et al.\ \cite{kab2} analyzed a numerical method for inverse
problem in hyperbolic equation.

 From the Theorem \eqref{unithm} in Section 2 we have observed the time
derivative of $\frac{\partial u}{\partial n}$ as well as
$\frac{\partial u}{\partial n} $ itself for stable construction of
$f\in L^2(\Omega)$. However, from a practical point of view, the observation of
the time derivative is not desirable and frequently we are obliged to
construct $f\in L^2(\Omega) $ only on the basis of $\frac{\partial u}{\partial n} $
itself which is polluted with $L^2$-errors.
Thus the problem of determining $f\in L^2(\Omega)$ from
$\frac{\partial u}{\partial n}\in L^2\left( 0,T;L^2(\partial \Omega)\right)$
is ill-posed in the sense of Hadamard. For stable construction of $f$ we
apply Tikhonov regularization.
To solve the inverse problem, we consider the following optimal control
problem for $\beta > 0$
\begin{equation} \label{optimal}
\min_{f\in A}J_\beta(f),
\end{equation}
where
\begin{gather*}
J_\beta(f)=\frac{1}{2}\int_0^T\int_{\partial\Omega}
\big|\frac{\partial u}{\partial n}(x, t, f)-g(x, t)\big|^2\,dx\,dt
+\frac{\beta}{2}\int_\Omega | f|^2\,dx,\\
  A=\{ f \in L^{2} (\Omega):|f| \leq a\},
\end{gather*}
$J_\beta:A\subseteq L^2(\Omega)\to R^+$, $J_\beta$ depends on $a>0$,
$u$ and $\beta$ is a regularization parameter. For each $\beta >0$,
the source term $f$ is viewed as a control and is adjusted to get the
corresponding $\frac{\partial u}{\partial n}$, close to the observations $g$.
In the optimal control problem, the second integration in $J_\beta(f)$
is called the penalty term, which is used to stabilize the minimizer.

This article is organized as follows: In Section 2, we give some preliminaries.
In Section 3, we consider the given inverse problem as a optimal control
problem and prove the existence of the minimizer, the necessary optimality
condition which has to be satisfied by each optimal control is deduced.
The projected gradient method and two-parameter model function method are
applied to the inverse problem and numerical examples are given in Section 4.

\section{Preliminaries}

\textbf{Weak solution:} Given $\sigma f \in L^1(0,T;L^2(\Omega))$, we say that
a function $u \in C([0, T];H^1_0(\Omega))$ with
$u_t \in C([0, T];L^2(\Omega)), u_{tt} \in C([0, T];H^{-1}(\Omega))$
is a weak solution of the problem \eqref{a1} and \eqref{a3} provided
\begin{enumerate}
	\item $\langle u_{tt},\phi\rangle + B[u,\phi;t]
=\sigma\int_{\Omega}f\phi\,dx $, for any $\phi \in H^1_0(\Omega)$ and a.e.\
 $0\leq t\leq T$;
	\item $u(\cdot,0)=0$;
	\item $u_t(\cdot,0)=0$
\end{enumerate}
where $\langle \cdot,\cdot\rangle$ denotes the duality pairing of
$H^{-1}(\Omega)$ and $H^1_0(\Omega)$ and
$B[u,\phi;t]=\int_{\Omega}\nabla u\nabla \phi\,dx$.

\begin{lemma}[\cite{pedersen}]
If $\sigma f \in L^1(0,T;L^2(\Omega))$, then there exists a unique solution
 $u$ to \eqref{a1} such that
$u\in C\left([0,T];H^1_0(\Omega)\right) \cap C^1\left([0,T];L^2(\Omega)\right)$
and
\begin{equation}\label{a2}
\frac{\partial u}{\partial n} \in L^2\left( \partial \Omega\times(0,T)\right).
\end{equation}
\end{lemma}

\begin{lemma}[\cite{pedersen}] \label{lem3}
If $\sigma f \in L^1(0,T;L^2(\Omega)),\partial\Omega $ is $C^2$, then
the weak solution $u=u(f)$ satisfies
\begin{gather}
\sup_{0\leq t\leq T}\left(\|u\|_{H^1_0(\Omega)}+\|u_t\|_{L^2(\Omega)}\right)
+\|u_{tt}\|_{L^2(0, T;H^{-1}(\Omega))}\leq C\|f\|_{L^2(\Omega)},\\
\label{a5}
\|\frac{\partial u}{\partial n}\|_{L^2\left( \partial \Omega\times(0,T)\right)}
\leq C_0\|f\|_{L^2(\Omega)},
\end{gather}
where $C$ and $C_0$ are constants depending only on $\Omega$, $T$ and $\sigma$.
\end{lemma}

\begin{theorem}[\cite{yam}] \label{unithm}
Under assumptions \eqref{assum1} and \eqref{assum2} we have:

(1) (Uniqueness) If the solution $u(f)$ to \eqref{a1} satisfies
\[
\frac{\partial u}{\partial n}(x, t) = 0,\quad  x\in\partial\Omega, \; 0<t<T,
\]
then $f(x)=0$ for almost all $x\in \Omega$.

(2) (Continuity) There exists a constant $C=C(\Omega, T)$ such that
\begin{equation}
C^{-1}\|\frac{\partial u}{\partial n}(f)\|_{H^1(0,T:L^2(\partial \Omega))}
\leq\|f\|_{L^2(\Omega)}\leq C\|\frac{\partial u}{\partial n}(f)
\|_{H^1(0,T:L^2(\partial \Omega))}
\end{equation}
for any $f\in L^2(\Omega)$.
\end{theorem}

\section{Optimal control problem}

For a fixed $\beta$, we consider the functional $J_\beta(f)$ as $J(f)$ and
\begin{equation}\label{b1}
J(\bar{f})=\min_{f\in A}J(f),
\end{equation}

\subsection{Existence of minimizer}

\begin{theorem}\label{thm1}
There exists a unique minimizer $\bar{f} \in A$ of $J$,
that is, $J(\bar{f})= \min_{f\in A}J(f)$.
\end{theorem}

\begin{proof}
It can be easily seen that $J(f)$ is nonnegative and thus $J(f)$ has greatest
lower bound $ \inf_{f\in A}J(f)$. Let $\{f_k\}$ be a minimizing
sequence, for example,
\[
 \inf_{f\in A}J(f)\leq J(f_k)\leq \inf_{f\in A}J(f)+\frac{1}{k}\,,\quad
 k= 1, 2,\dots.
 \]
Since $J(f_k)\leq C_1$ and from the structure of $J$ we easily deduce that
$\|f_k\|_{L^2(\Omega)} \leq C_1$, where $C_1$ is independent of $k$.
Let $\{u_k\}$ be the solution of \eqref{a1} corresponding to $\{f_k\}$.
 By Lemma \ref{lem3}, we have
\[
\sup_{0\leq t\leq T}\Big(\|u_k\|_{H^1_0(\Omega)}+\|(u_k)_t\|_{L^2(\Omega)}\Big)
+\|(u_k)_{tt}\|_{L^2(0, T;H^{-1}(\Omega))}
\leq C\|f_k\|_{L^2(\Omega)}.
\]
This means that we have uniform bounds for $u_k\in L^\infty(0,T;H^1_0(\Omega))$
and $(u_k)_t\in L^\infty(0,T;L^2(\Omega))$. On a subsequence of $f_k$ and $u_k$,
by weak compactness, there exists $\bar{u}$ in $C([0, T]; H^1_0(\Omega))$ such that
\begin{gather*}
f_k \rightharpoonup \bar{f} \quad  \text{weakly in } L^2(\Omega),\\
u_k \rightharpoonup \bar{u}  \quad \text{weak* in } L^\infty(0,T;H^1_0(\Omega)),\\
(u_k)_t \rightharpoonup \bar{u}_t  \quad \text{weak* in }
L^\infty(0,T;L^2(\Omega)),\\
(u_k)_{tt} \rightharpoonup \bar{u}_{tt}  \quad \text{weakly in }
 L^2(0,T;H^{-1}(\Omega)),\\
\frac{\partial u_k}{\partial n} \rightharpoonup
 \frac{\partial \bar{u}}{\partial n} \quad \text{weakly in }
L^2\left( \partial \Omega\times(0,T)\right).
\end{gather*}
Using a compactness result from \cite{simon}, we have $u_k\to \bar{u}$
strongly in $L^\infty (0,T;L^2(\Omega))$. By the definition of weak solution,
we have
\[
\langle (u_k)_{tt},\phi\rangle
=-\int_{\Omega}[\nabla u_k\nabla \phi -\sigma f_k\phi]\,dx
\]
for any $\phi \in H^1_0(\Omega)$ and $a.e$. $0\leq t\leq T$.
If we pass to the limit as $k\to \infty$ in the weak formulation of $u_k$, we obtain
\[
\langle \bar{u}_{tt},\phi\rangle
=-\int_{\Omega}[\nabla \bar{u}\nabla \phi -\sigma\bar{f}\phi]\,dx.
\]
Thus $\left(\bar{f}(x), \bar{u}(x, t)\right)$ satisfies \eqref{a1}.
Moreover, using \eqref{a5} and the lower-semicontinuity of the $L^2$ norm
 with respect to weak convergence, we obtain
\[
J(\bar{f})\leq  \liminf_{k\to \infty}J(f_k)=\min_{f\in A}J(f).
\]
Hence
$J(\bar{f})= \min_{f\in A}J(f)$.
We can easily know that $\frac{\partial u}{\partial n}(x, t, f)$ has
the linearity and convexity with respect to $f$; that is,
\[
\frac{\partial u}{\partial n}(x,t,\epsilon f_1+ (1-\epsilon)f_2)
=\epsilon \frac{\partial u}{\partial n}(x,t,f_1)+(1-\epsilon)
\frac{\partial u}{\partial n}(x,t,f_2),\quad \forall \epsilon \in [0,1].
\]
Then the strict convexity of $L^2$-norm naturally leads to the strict
convexity of $J(f)$ which implies that the minimizer $\bar{f}$ is unique.
 This completes the proof.
\end{proof}

\subsection{Necessary condition}

We are now in a position to state the necessary (and, owing to the convexity,
also sufficient) optimality conditions.

\begin{theorem}\label{thm2}
Let $f$ be the solution of the optimal control problem \eqref{thm1}.
Then there exists a triple of functions $(u, v, f)$ satisfying the system
\begin{equation}
\begin{gathered}\label{c1}
	u_{tt}(x, t)= \Delta u(x, t)+\sigma(t)f(x),\quad x\in \Omega, \; 0 < t < T, \\
	u(x, 0) = u_t(x,0)= 0,\quad x\in \Omega, \\	
	u(x, t) = 0, \quad x\in \partial \Omega, \; 0 < t < T.
\end{gathered}
\end{equation}
and
\begin{equation}
\label{c2}
\begin{gathered}
	 v_{tt}(x, t)=\Delta v(x, t),\quad x\in \Omega,\ \ 0 < t < T, \\
 v(x,0)=v_t(x,0)=0,\quad x\in\Omega,\\
 v(x,t)=\frac{\partial u}{\partial n} - g(x, t),\quad x\in\partial \Omega, \;
 0 < t < T.
\end{gathered}
\end{equation}
Moreover
\begin{equation}\label{c3}
 \beta\int_\Omega f(h -f)\,dx -\int^T_0 \int_{\Omega}(v\sigma(t)(h-f))\,dx\,dt \geq 0.
\end{equation}
for any $h\in A$.
\end{theorem}

\begin{proof}
For any $h\in A, 0\leq \delta \leq 1$, we have
$f_\delta = (1-\delta ) f + \delta h \in A$. Then
\begin{equation}\label{c4}
J_\delta = J(f_\delta )=\frac{1}{2}\int_0^T\int_{\partial\Omega}
\big|\frac{\partial u}{\partial n}(x,t,f_\delta)-g(x,t)\big|^2\,dx\,dt
+\frac{\beta}{2}\int_\Omega|f_\delta|^2d x.
\end{equation}
Let $u_\delta $ be the solution of \eqref{c1} with given
$f = f_{\delta} $. Since $f$ is an optimal solution,
\begin{equation} \label{c5}
\begin{aligned}
 \frac{dJ_\delta}{d\delta}\big|_{\delta=0}
&=\int_0^T\int_{\partial\Omega} [\frac{\partial u}{\partial n}(x, t, f_\delta)
-g(x, t)]
\frac{\partial}{\partial n}
\big(\frac{\partial u_{\delta}}{\partial \delta}\big)\big|_{\delta=0}\,dx\,dt\\
&\quad +\beta\int_\Omega f(h-f) d x \geq 0.
\end{aligned}
\end{equation}
Let $\tilde{u}_\delta = (\frac{\partial u_\delta}{\partial \delta})$,
direct calculations lead to the  equation
\begin{equation}\label{c6}
\begin{gathered}
 \frac{\partial^2\tilde{u}_\delta}{\partial t^2}
= \Delta\tilde{u}_\delta + \sigma(h-f),\quad x\in \Omega, \quad 0 < t < T, \\
\tilde{u}_\delta(x, 0) = \frac{\partial\tilde{u}_\delta}{\partial t}(x, 0) = 0,
\quad x\in \Omega,\\
  \tilde{u}_\delta(x, t)=0,\quad  x\in \partial \Omega, \quad 0 < t < T.
\end{gathered}
\end{equation}
Let $\xi = \tilde{u}_\delta$ at $\delta = 0$. Then $\xi$ satisfies the below
equation
\begin{equation}\label{c7}
\begin{gathered}
  \xi_{tt}=\Delta\xi+\sigma(h-f),\quad x\in \Omega, \; 0 < t < T, \\
\xi(x,0)=\xi_t(x,0)=0,\quad x\in\Omega,\\
  \xi(x,t)=0,\quad x\in\partial \Omega, \; 0 < t < T.
\end{gathered}
\end{equation}
From \eqref{c5}, we have
\begin{equation}\label{c8}
\int^T_0 \int_{\partial\Omega}(\frac{\partial u}{\partial n} - g(x, t))
\frac{\partial \xi}{\partial n} (x, t)\,dx\,dt + \beta\int_\Omega  f (h -f)\,dx \geq 0.
\end{equation}
Let $L\xi=\xi_{tt}-\Delta\xi $ and $v$ be the solution of the following problem
\begin{gather*}
L^*v = v_{tt}-\Delta v=0,\\
v(x,T) = v_t(x,T)=0, \\
v(x,t) = \frac{\partial u}{\partial n} - g(x, t),
\end{gather*}
where $L^*$ is the adjoint operator of the operator $L$.
From the above equation we have
\begin{equation}\label{c9}
\begin{aligned}
0&=\int^T_0\int_{\Omega}(\xi L^*v)d x\,dt \\
&=\int^T_0 \int_{\Omega} v(\xi_{tt}-\Delta\xi)d x\,dt
+\int^T_0\int_{\partial\Omega}\frac{\partial \xi}{\partial n}
(\frac{\partial u}{\partial n}-g)d x\,dt .
\end{aligned}
\end{equation}
Combining \eqref{c8} with  \eqref{c9} we have
\begin{equation}
 \beta\int_\Omega f(h -f)\,dx -\int^T_0 \int_{\Omega}(v\sigma(t)(h-f))\,dx\,dt \geq 0.
\end{equation}
This completes the proof.
\end{proof}

\section{Numerical examples}

After obtaining the theoretical results, we propose the numerical schemes
for the inverse problem. We solve the control problem \eqref{b1}
 directly from the cost functional; but the regularization parameter plays
 a major role in the numerical simulation. In fact, the effectiveness of
a regularization method depends strongly on the choice of the regularization
parameter.  Kunisch and Zou \cite{kunisch} proposed a two parameter algorithm
to choose some reasonable regularization parameters in an efficient manner.
The basic tool is to use the well known Morozov discrepancy principle
\cite{engl, kir} and the damped Morozov discrepancy principle \cite{kunisch}.

 We consider the inverse problem of the form
\begin{gather*}
P:L^2(\Omega)\to L^2(\partial\Omega\times(0,T)),\\
Pf=\frac{\partial u}{\partial n}=g(x,t),
\end{gather*}
where $P$ is a linear bounded operator, $g$ is the observation data and
$\frac{\partial u}{\partial n}$ satisfies the equation \eqref{a1}.
In applications, $g$ is often corrupted by some error and the noise data of
$g$ with noise level $\delta$ are denoted by $g^\delta$.

 We rewrite the Tikhonov functional
\begin{equation}\label{t1}
\begin{gathered}
\min_{f^\delta\in A} J_\beta (f^\delta)
= \frac{1}{2}\int_0^T\int_{\partial\Omega} \left|Pf^\delta-g^\delta\right|^2\,dx\,dt
+\frac{\beta}{2}\int_\Omega \left| f^\delta\right|^2\,dx,\\
  A= \{ f^\delta(x):|f^\delta| \leq a, f^\delta \in L^{2} (\Omega)\}.
\end{gathered}
\end{equation}
where $f^\delta$ is the corresponding regularization solution for $g^\delta$.
For fixed $\beta$, the problem \eqref{t1} is solved by projected gradient
 method \cite{tro}. For this method, the derivative of $J_\beta$ at an
iterate $f^\delta_n$ is given by
\[
J'_\beta (f^\delta_n)(h-f)
= \int_\Omega \Big(-\int_0^T z_n \sigma(t) dt + \beta f^\delta_n\Big)(h-f)dx,
\]
where $z_n$ is the solution of the  adjoint equation
\begin{equation}\label{exm3}
\begin{gathered}
	z_{tt}(x, t) =  \Delta z(x, t),\quad x\in \Omega, \; 0 < t < T,\\
 	z(x, T) =  z_t(x, T)= 0, \quad x\in \Omega, \\	
 	z(x, t) =  \frac{\partial u}{\partial n} - g(x, t), \quad x\in \partial \Omega,
\; 0 < t < T.
\end{gathered}
\end{equation}
By the Riesz representation theorem, we obtain the usual representation of the
reduced gradient
\[
w_n=J'_\beta (f^\delta_n) = -\int_0^T z_n (x,t) \sigma (t) dt + \beta f^\delta_n.
\]
Set $f^\delta_{n+1}=\mathbb{P}_{[A]}\{f^\delta_n-s w_n\}$ for the iteration.
where $\mathbb{P}$ denotes the projection onto $A$ and $s$ is optimal step size.
The stopping criterion for the iteration is chosen as
$\|f^\delta_{n+1}-f^\delta_n\|_{L^2(\Omega)}\leq tol$.

The two equations \eqref{a1} and \eqref{exm3} are solved by the implicit
finite difference method \cite{rich}. They are discretized based on the
difference approximation
\begin{gather*}%\label{ndif1}
u(x_i, y_j, t_k)_{tt}
=\frac{u(x_i, y_j, t_{k+1})-2u(x_i, y_j, t_k)+u(x_i, y_j, t_{k-1})}{(\Delta t)^2}, \\
u(x_i, y_j, t_k)_{xx}
=\frac{u(x_{i+1}, y_j, t_k)^{1/4}-2u(x_i, y_j, t_k)^{1/4}+u(x_{i-1}, y_j, t_k)^{1/4}}{(\Delta x)^2}, \\
u(x_i, y_j, t_k)_{yy}
=\frac{u(x_i, y_{j+1}, t_k)^{1/4}-2u(x_i, y_j, t_k)^{1/4}+u(x_i, y_{j-1}, t_k)^{1/4}}{(\Delta y)^2}, \\
u(x_i, y_j, t_k)^{1/4}
=\frac{1}{4}u(x_i, y_j, t_{k+1})+\frac{1}{2}u(x_i, y_j, t_k)+\frac{1}{4}u(x_i, y_j, t_{k-1}), \\
\end{gather*}

 It is easy to check that all above approximation formulas are of second-order
accuracy. The implicit schemes for $\eqref{a1}$ and $\eqref{exm3}$ are
obtained by approximating the derivatives using the above formulas.

 The popular Morozov principle has received a considerable amount of
 attention in linear inverse problems and turns out to be very effective
for many inverse problems. This principle suggests choosing the
regularization parameter $\beta$ in such a way that the error due to
the regularization is equal to the error due to the observation data, that is,
$\beta$ is chosen according to
\begin{equation}\label{t4}
\int_0^T\int_{\partial\Omega} \left|Pf^\delta(\beta)-g^\delta\right|^2\,dx\,dt
+\beta^\gamma\int_\Omega \left| f^\delta(\beta)\right|^2\,dx=\delta^2,
\end{equation}
where $\gamma \in [1,\infty]$ and $\delta$ is the noise level defined
by $\delta=\int_0^T\int_{\partial\Omega}|g-g^\delta|^2\,dx\,dt$.

 From \cite{kunisch}, \eqref{t1} has a unique minimizer for any fixed $\beta$,
denoted as $f^\delta(\beta)$ and it can be characterized as the solution to
the system
\[
P^*Pf^\delta+\beta f^\delta =P^*g^\delta
\]
or in variational form
\[
(Pf^\delta,Pq)_{L^2(\partial\Omega\times(0,T))}
+\beta(f^\delta,q)_{L^2(\Omega)}=(g^\delta,Pq)_{L^2(\partial\Omega\times(0,T))}
\quad  \text{for all }  q\in L^2(\Omega).
\]

 It is obvious that the convergence rate of the damped Morozov discrepancy
principle is quite important for the application of this strategy.
For the linear operator $P$, this result can be stated as follows.

\begin{lemma}[\cite{wang}]
Let $Pf=g$ with noisy data $g^\delta$ such that
$\|g-g^\delta\|\leq\delta<\|g^\delta\|$. Let the Tikhonov solution
$f^\delta$ satisfy the damped Morozov discrepancy principle \eqref{t4}.
Assume that there exists $w\in L^2(\partial\Omega\times(0,T))$ such that
$f=P^*w\in P^*(L^2(\partial\Omega\times(0,T)))$. Then
\[
\|f^\delta-f\|_{L^2(\Omega)}=O(\delta^{\min\{1/2,2(\gamma-1)/\gamma\}}).
\]
\end{lemma}

We frequently use the minimal cost functional of \eqref{t1}
\begin{equation}\label{t2}
F(\beta)= \frac{1}{2}\int_0^T\int_{\partial\Omega}
|Pf^\delta(\beta)-g^\delta|^2\,dx\,dt
+\frac{\beta}{2}\int_\Omega|f^\delta(\beta)|^2\,dx.
\end{equation}
It is known that both $f^\delta(\beta)$ and $F(\beta)$ are infinitely
 differentiable with respect to $\beta$.
Moreover we have
\begin{equation}\label{t3}
F'(\beta)=\frac{1}{2}\int_\Omega|f^\delta(\beta)|^2\,dx.
\end{equation}
In terms of $F(\beta)$, the Morozov equation \eqref{t4} can be written as
\begin{equation}\label{t5}
F(\beta)+(\beta^\gamma-\beta )F'(\beta)=\frac{1}{2}\delta^2.
\end{equation}
Then the entire difficulty of choosing the regularization parameter $\beta$
lies in solving the highly nonlinear equation \eqref{t5} for $\beta$ effectively.

\begin{lemma}[\cite{kunisch}]
If $F(0) < \frac{1}{2}\delta^2\leq F(1)$, then there exists a unique solution
$\beta^*\in (0, 1]$ to the Morozov equation \eqref{t5}.
\end{lemma}

To solve \eqref{t5}, we use model function approach. By a model function we mean
 a parametrized function which presserves the major properties of the non-negative
function $F(\beta)$ and which approximates $F(\beta)$ in a manner to be specified
 below.

From \cite{kunisch} the two-parameter model function algorithm is based on the
important identity
\begin{equation}\label{t6}
2F(\beta)+2\beta F'(\beta)
+\int_0^T\int_{\partial\Omega}|Pf^\delta(\beta)|^2\,dx\,dt=2\hat{C},
\end{equation}
where $\hat{C}$ is an integration constant. To derive the model function,
we make the following approximation in the equation \eqref{t6}.
\begin{equation}\label{t7}
(Pf^\delta(\beta),Pq(\beta))_{L^2(\partial\Omega\times(0,T))}
\approx \tilde{P}(f^\delta(\beta),q(\beta))_{L^2(\Omega)}
\end{equation}
where $\tilde{P}$ is a positive constant. Then equation \eqref{t6} reduces to
\begin{equation}\label{tt1}
\beta m' (\beta)+m(\beta)+\tilde{P}m' (\beta)=\hat{C}
\end{equation}
Solving the ordinary differential equation \eqref{tt1} we obtain
\[
m(\beta)=\hat{C}+\frac{\tilde{C}}{\tilde{P}+\beta}.
\]
where $\tilde{C}$ is an integration constant. Then, by assuming
$F(0)=0$ or $m(0)=0$, one can remove the constant $\hat{C}$ and arrive
at the two-parameter model function
\begin{equation}\label{t8}
m(\beta)=\bar{C}+\Big(1-\frac{\bar{P}}{\bar{P}+\beta}\Big).
\end{equation}
With this model function, the two-parameter algorithm is used to solve
the Morozov equation \eqref{t5}.

Based on the analysis above, the procedure of the iteration can be stated
as follows:
Given $\beta_0>0$ and $\epsilon>0$, set $k=0$.
\smallskip

\noindent\textbf{Step 1:} Choose an initial value of iteration
$f^\delta=f^\delta_0(x)$.
\smallskip

\noindent\textbf{Step 2:} Solve the optimal control problem $\eqref{t1}$ to
obtain $f^\delta(\beta_k)$ and compute $F(\beta_k)$ and $F'(\beta_k)$.
Then update $\bar{C}_k$ and $\bar{P}_k$ from
 \begin{gather}\label{t9}
m(\beta_k) = \bar{C}_k+\Big(1-\frac{\bar{P}_k}{\bar{P}_k+\beta_k}\Big)
 =F(\beta_k),\\
m'(\beta_k)= \frac{\bar{C}_k\bar{P}_k}{(\bar{P}_k+\beta_k)^2}=F'(\beta_k).
\end{gather}
\smallskip

\noindent\textbf{Step 3:} Set the $kth$ model function
\[
m(\beta)= \bar{C}_k+\Big(1-\frac{\bar{P}_k}{\bar{P}_k+\beta}\Big)
\]
and solve for $\beta_{k+1}$ the approximate Morozov's equation
\begin{equation}\label{t10}
m(\beta)+(\beta^\gamma-\beta )m'(\beta)=\frac{1}{2}\delta^2.
\end{equation}
\smallskip

\noindent\textbf{Step 4:}
Compare it with $\epsilon$.
If $\|\beta_{k+1}-\beta_k\|<\epsilon$,
then stop the iteration; otherwise set $k=k+1$ and go to step 1.

 We have performed two numerical experiments to test the stability of
our algorithm for different noise levels and initial data. The stopping
criterion for the two-parameter iteration is chosen as
$|\beta_{k+1}-\beta_k|/\beta_{k+1}\leq 10^{-2}$. In all experiments,
 some basic parameters are $T=1, \delta(t)=\cos t, s=1$ and $\gamma=1.4$.
We apply the noise data generated in the form
\[
g^\delta=g(1+ \hat{\delta}\times \operatorname{random}(0,1)).
\]
where $\hat{\delta}$ is a noise level.


In the first numerical experiment, we consider one dimensional problem
($N=1$).

\begin{example}\label{t13}  \rm
 Let  $f(x)=\sin \pi x, x\in (0,1) $. The exact solution of the forward problem
for this $f(x)$ is
  \begin{gather*}
  u(x,t)= \frac{1}{1-\pi^2}\sin\pi x\left(\cos\pi t-\cos t\right),\quad
 (x,t) \in [0,1]\times[0,1],\\
   \frac{\partial u}{\partial n}
= -\frac{\pi}{1-\pi^2}\left(\cos\pi t-\cos t\right),\quad\text{at } x=0,1.
   \end{gather*}
    \end{example}
 The source term $f(x)$ is to be recovered from the noise observation data
$g^\delta$. In our implementations, the mesh size and time step size are
$\Delta x=\Delta t=1/50$. The tolerance of the optimal control problem is
taken as $tol=10^{-4}$.

\begin{table}[ht]
\caption{$\beta$ value, the errors in observation and source for the
initial value $f^\delta_0=0$.} \label{table1}
\begin{center}
\begin{tabular}{*5{|c}|}\hline
			$\hat{\delta}$ & $\beta$  & $\|g-g^{\delta}\|_{L^2(\partial \Omega  \times  (0,T))}$ & $\|f-f^{\delta }\|_{L^2(\Omega )}$ & \text{iter($\beta$)}\\ \hline
			0.01 &0.000106318 &0.00145371 &0.0156492& 6 \\
   0.05 &0.00121214 &0.00756438&0.0271989& 5 \\
  0.1&0.00147077&0.0112825&0.0486434&8\\ \hline
		\end{tabular}
\end{center}
\end{table}

\begin{table}[ht]
\caption{$\beta$ value, the errors in observation and source for the initial value
 $f^\delta_0=-1$.} \label{table2}
\begin{center}
\begin{tabular}{*5{|c}|}\hline
			$\hat{\delta}$ & $\beta$  & $\|g-g^{\delta}\|_{L^2(\partial \Omega  \times  (0,T))}$ & $\|f-f^{\delta }\|_{L^2(\Omega )}$ & \text{iter($\beta$)}\\ \hline
			0.01 &0.000110693& 0.00147976&0.0113912& 6 \\
   0.05 &0.000819191&0.00597265&0.0234172&6 \\
  0.1&0.00308773&0.014366&0.0402498&5\\ \hline
		\end{tabular}
\end{center}
\end{table}

\begin{table}[ht]
\caption{$\beta$ value, the errors in observation and source for the initial
value $f^\delta_0=x(1-x)$.} \label{table3}
\begin{center}
		\begin{tabular}{*5{|c}|}\hline
			$\hat{\delta}$ & $\beta$  & $\|g-g^{\delta}\|_{L^2(\partial \Omega  \times  (0,T))}$ & $\|f-f^{\delta }\|_{L^2(\Omega )}$ & \text{iter($\beta$)}\\ \hline
			0.01 &0.000112646 &0.00148598 &0.0126763 & 6 \\
   0.05 &0.00140652&0.00827753&0.0213082&5 \\
  0.1&0.00202417& 0.0123668&0.0377899&6\\ \hline
		\end{tabular}
\end{center}
\end{table}

In Tables \ref{table1}--\ref{table3}, we present some numerical results of
Example \ref{t13} with different noise levels $\hat{\delta}$, different
initial value of $f^\delta=f^\delta_0$ and $\beta_0=0.1$.
The regularization parameter $\beta$ obtained by two-parameter algorithm
is given in the second column. The third and forth columns of the tables
give the errors in observation data $g$ and errors in computed source term
respectively. The last column shows the number of iterations of the two-parameter
algorithm.

\begin{figure}[ht]
\begin{center}
 \includegraphics[width=0.5\textwidth]{fig1}
\end{center}
	\caption{Exact and computed source term for different $\hat{\delta}$ and
$f^\delta_0=0$ in 1-D wave equation.}
	\label{fig:1_D_0}
\end{figure}


\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.5\textwidth]{fig2}
\end{center}
	\caption{Exact and computed source term for different $\hat{\delta}$
and $f^\delta_0=-1$ in 1-D wave equation}
	\label{fig:1_D_-1}
\end{figure}

 Figure \ref{fig:1_D_0} shows the plot of the approximation of the unknown
source function $f(x)$ for different noise levels $\hat{\delta}$ and the initial
guesses $f_0=0$. From this, we can see that the efficiency of reconstruction of
source term depends on the noise level.
Figure \ref{fig:1_D_-1}  shows the plot of the approximation of the unknown
source function $f(x)$ for different noise levels $\hat{\delta}$ and the initial
 guesses $f_0=-1$. From this, we can see that the approximation of $f(x)$ converges
even when the initial guess is negative.

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.5\textwidth]{fig3}  % fig3_1.eps
\end{center}
	\caption{Exact and computed source term for different $\hat{\delta}$ and
$f^\delta_0=x(1-x)$ in 1-D wave equation}
	\label{fig:1_D_x(1-x)}
\end{figure}

  Figure \ref{fig:1_D_x(1-x)}  shows the plot of the approximation of the unknown
source function $f(x)$ for different noise levels $\hat{\delta}$ and the
initial guesses $f^\delta_0=x(1-x)$. The initial guesses are similar in
characteristics to the known source.
In the second numerical experiment, we consider a two dimensional problem
($N=2$).

\begin{example}\label{t14} \rm
Let  $f(x,y)=\sin \pi x \sin\pi y,(x,y)\in (0,1)\times(0,1)$. The exact solution
is
\begin{gather*}
u(x, y, t)=\frac{1}{1-2\pi^2}\sin\pi x\sin\pi y\big(\cos\pi\sqrt{2}t
-\cos t\big), \\
\frac{\partial u}{\partial n}=-\frac{\pi}{1-2\pi^2}\sin\pi y
\big(\cos\sqrt{2}\pi t-\cos t\big), \quad\text{ on } x=0,1,\\
\frac{\partial u}{\partial n}=-\frac{\pi}{1-2\pi^2}\sin\pi x
\big(\cos\sqrt{2}\pi t-\cos t\big),\quad\text{ on } y=0,1.
\end{gather*}
   \end{example}

 The source term $f(x,y)$ is to be recovered from the noise observation data
$g^\delta$. In two dimensional case, the mesh sizes and time step size are
$\Delta x=\Delta y=\Delta t=\frac{1}{10}$. The tolerance of the optimal control
problem is taken as $tol=10^{-4}$.

\begin{table}[ht]
\caption{$\beta$ value, the errors in observation and source for the initial value
$f^\delta_0=0$.} \label{table4}
\begin{center}
\begin{tabular}{*5{|c}|}\hline
			$\hat{\delta}$ & $\beta$  & $\|g-g^{\delta}\|_{L^2(\partial \Omega  \times  (0,T))}$ & $\|f-f^{\delta }\|_{L^2(\Omega )}$ & \text{iter($\beta$)}\\ \hline
			0.05 &0.00120084& 0.00846277 &0.020988 & 10 \\
   0.07 &0.00260834&0.012032 &0.0224599&7 \\
  0.1&0.0051492&0.017512&0.026386&5\\ \hline
		\end{tabular}
\end{center}
\end{table}

\begin{table}[ht]
\caption{$\beta$ value, the errors in observation and source for the initial
value $f^\delta_0=-1$.} \label{table5}
\begin{center}
		\begin{tabular}{*5{|c}|}\hline
			$\hat{\delta}$ & $\beta$  & $\|g-g^{\delta}\|_{L^2(\partial \Omega  \times  (0,T))}$ & $\|f-f^{\delta }\|_{L^2(\Omega )}$ & \text{iter($\beta$)}\\ \hline
			0.05 &0.00118572 &0.00816773 &0.0235247& 10 \\
   0.07 &0.00272184&0.0120211&0.0245797&7 \\
  0.1&0.00505242& 0.0172179&0.0251407&5\\ \hline
		\end{tabular}
\end{center}
\end{table}

\begin{table}[ht]
\caption{$\beta$ value, the errors in observation and source for the initial
value $f^\delta_0=xy(1-x)(1-y)$.} \label{table6}
\begin{center}
		\begin{tabular}{*5{|c}|}\hline
			$\hat{\delta}$ & $\beta$  & $\|g-g^{\delta}\|_{L^2(\partial \Omega  \times  (0,T))}$ & $\|f-f^{\delta }\|_{L^2(\Omega )}$ & \text{iter($\beta$)}\\ \hline
			0.05 &0.00103108 &0.00820616 &0.0199434 & 12 \\
   0.07 &0.00236611&0.0114796&0.0235887&7 \\
  0.1&0.00490916& 0.0168844&0.0274358&5\\ \hline
		\end{tabular}
\end{center}
\end{table}

In Tables \ref{table4}--\ref{table6}, we present some numerical results of
two dimensional equation as in Example \ref{t14}  with different noise levels
$\hat{\delta}$, different initial values of $f^\delta=f^\delta_0$ and
$\beta_0=0.1$. From the result, we see that the source term $f$ is recovered
from the noise observation data $g^\delta$ stably by the different initial values.

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.5\textwidth]{fig4}
\end{center}
	\caption{Computed source term in 2-D wave equation for $f^\delta_0=0$ and
$\hat{\delta}=0.1$}
	\label{fig:2_D_1}
\end{figure}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.5\textwidth]{fig5}
\end{center}
	\caption{Computed source term in 2-D wave equation for $f^\delta_0=-1$
and $\hat{\delta}=0.1$}
	\label{fig:2_D_2}
\end{figure}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.5\textwidth]{fig6}
\end{center}
	\caption{Computed source term in 2-D wave equation for $f^\delta_0=xy(1-x)(1-y)$
and $\hat{\delta}=0.1$}
	\label{fig:2_D_3}
\end{figure}

Figures \ref{fig:2_D_1}--\ref{fig:2_D_3} we draw the computed source term
$f(x,y)$ for the noise level $\hat{\delta}=0.1$ and the different initial
value of $f^\delta$.

\subsection*{Acknowledgments}
This work was supported by University Grants Commission, New Delhi, India,
Major Research Project 41-798/2012 (SR).
This work has been completed during the visits of P. Prakash to the USC and has
been partially supported by Ministerio de Economia y Competitividad (Spain),
 Project MTM2010-15314, and cofinanced by the European Community fund FEDER.
 The second author was supported by the University Grants Commission, New Delhi,
India, under Special Assistance Programme F.510/7/DRS-1/2016(SAP-I).

The authors would like to thank the referees for the valuable suggestions
 to improve the paper.

\begin{thebibliography}{99}

\bibitem{aki} K. Aki, P. G. Richards;
\emph{Quantitative Seismology Theory and Methods,} Freeman, New York, 1980.

\bibitem{bao} G. Bao, K. Yun;
\emph{On the stability of an inverse problem for the wave equation,}
Inverse problems, 25 (2009), 045003.

\bibitem{bar} V. Barbu, N. H. Pavel;
\emph{Determining the acoustic impedance in the 1-D wave equation via an optimal
control problem,} SIAM. J. Control. Optim., 35 (1997), 1544--1556.

\bibitem{bellu} M. Bellassoued;
\emph{Uniqueness and stability in determining the speed of propagation of
second-order hyperbolic equation with variable coefficients,}
Appl. Anal., 83 (2004), 983--1014.

\bibitem{bells} M. Bellassoued, D. Jellali, M. Yamamoto;
\emph{Stability estimate for the hyperbolic inverse boundary value problem
by local Dirichlet - to - Neumann map,} J. Math. Anal. Appl., 343 (2008), 1036--1046.

\bibitem{cheng} J. Cheng, M. Yamamoto;
\emph{One new strategy for a priori choice of regularizing parameters in
Tikhonov's regularization,} Inverse Problems, 16 (2000), L31--L38.

\bibitem{cip} R. Cipolatti, I. F. Lopez;
\emph{Determination of coefficients for a dissipative wave equation via boundary
measurements,} J. Math. Anal. Appl., 306 (2005), 317--329.

\bibitem{engl} H. W. Engl, M. Hanke, A. Neubauer;
\emph{Regularization of Inverse Problems,} Kluwer Academic Publishers,
Dordrecht, 1996.

\bibitem{engl2} H. W. Engl, O. Scherzer, M. Yamamoto;
\emph{Uniqueness and stable determination of forcing terms in linear partial
differential equations with overspecified boundary data,} Inverse Problems,
10 (1994), 1253--1276.

\bibitem{feng} X. Feng, S. Lenhart, V. Protopopescu, L. Rachele, B. Sutton;
\emph{Identification problem for the wave equation with Neumann data input
and Dirichlet data observations,} Nonlinear Anal., 52 (2003), 1777--1795.

\bibitem{gnana} S. Gnanavel, N. Barani Balan, K. Balachandran;
\emph{Simultaneous identification of parameters and initial datum of
reaction diffusion system by optimization method,}
Appl. Math. Model., 37 (2013), 8251--8263.

\bibitem{hasanov} A. Hasanov;
\emph{Identification of spacewise and time dependent source terms in $1D$ heat
conduction equation from temperature measurement at a final time,}
Int. J. Heat Mass. Tran., 55 (2012), 2069--2080.

\bibitem{hasanov1} A. Hasanov, B. Mukanova;
\emph{Relationship between representation formulas for unique regularized
solutions of inverse source problems with final overdetermination and singular
value decomposition of input-output operators,}
IMA J. Appl. Math., 80 (2015), 676--696.

\bibitem{iman} O. Y. Imanuvilov, M. Yamamoto;
\emph{Global uniqueness and stability in determining coefficients of wave equations,}
Comm. Partial Differential Equations, 26 (2001), 1409--1425.

\bibitem{isa} V. Isakov;
\emph{Inverse Problems for Partial Differential Equations,} Springer, New York, 1998.

\bibitem{kab} S. I. Kabanikhin, R. Kowar, O. Scherzer;
\emph{On the Landweber iteration for the solution of a parameter identification
 problem in a hyperbolic partial differential equation of second order,}
J. Inv. Ill-Posed Problems, 6 (1998), 403--430.

\bibitem{kab1} S. I. Kabanikhin, O. Scherzer, M. A. Shishlenin;
\emph{Iteration methods for solving a two dimensional inverse problem for a
hyperbolic equation,} J. Inv. Ill-Posed Problems, 11 (2003), 87--109.

\bibitem{kab2} S. I. Kabanikhin, A. D. Satybaev, M. A. Shishlenin;
\emph{Direct Methods of Solving Multidimensional Inverse Hyperbolic Problems,}
VSP Science Press, Utrecht, 2005.

\bibitem{kir} A. Kirsch;
\emph{An Introduction to the Mathematical Theory of Inverse Problem,}
Springer, New York, 1999.

\bibitem{klic} M. V. Klibanov, A. A. Timonov;
\emph{Carleman Estimates for Coefficient Inverse Problems and Numerical Applications,}
 VSP Science Press, Utrecht, 2004.

\bibitem{kunisch} K. Kunisch, J. Zou;
 \emph{Iterative choices of regularization parameters in linear inverse problems,}
Inverse Problems, 14 (1998), 1247--1264.

\bibitem{lagnese} J. E. Lagnese, G. Leugering;
\emph{Time-domain decomposition of optimal control problems for the wave equation,}
 Systems Control Lett., 48 (2003), 229--242.

\bibitem{liang} M. Liang;
 \emph{Bilinear optimal control for a wave equation,}
 Math. Models Methods Appl. Sci., 9 (1999), 45--68.

\bibitem{mord} B. S. Mordukhovich, J. P. Raymond;
\emph{Dirichlet boundary control of hyperbolic equations in the presence of
state constraints,} Appl. Math. Optim., 49 (2004), 145--157.

\bibitem{nic} S. Nicaise, O. Zair;
\emph{Identifiability, stability and reconstruction results of sources by
interior measurements,} Port. Math. (N.S.), 60 (2003), 455--471.

\bibitem{pedersen} M. Pedersen;
\emph{Functional Analysis in Applied Mathematics and Engineering,}
 CRC Press, 2000.

\bibitem{rak} Rakesh;
\emph{Reconstruction for an inverse problem for the wave equation with
constant velocity,} Inverse Problems, 6 (1990), 91--98.

\bibitem{rich} R. D. Richtmyer, K. W. Morton;
\emph{Difference Methods for Initial-Value Problems,}
Interscience Publishers, New York, 1967.

\bibitem{simon} J. Simon;
\emph{Compact sets in the space $L^p(0,T;B)$,} Ann. Mat. Pura. Appl. (4),
 CXLVI (1987), 65--96.

\bibitem{ton} B. A. Ton;
\emph{An inverse source problem for the wave equation,} Nonlinear Anal.,
55 (2003), 269--284.

\bibitem{tro} F. Tr\"oltzsch;
\emph{Optimal Control of Partial Differential Equations: Theory, Methods
and Applications,} AMS, Providence, 2010.

\bibitem{wang} Z. Wang, J. Liu;
\emph{New model function methods for determining regularization parameters in
linear inverse problems,} Appl. Numer. Math., 59 (2009), 2489--2506.

\bibitem{yam} M. Yamamoto;
\emph{Well-posedness of some inverse hyperbolic problem by the Hilbert
uniqueness method,} J. Inv. Ill-Posed Problems, 2 (1994), 349--368.

\bibitem{yam1} M. Yamamoto;
\emph{Stability, reconstruction formula and regularization for an inverse
source hyperbolic problem by a control method,} Inverse Problems,
11 (1995), 481--496.

\bibitem{yam2} M. Yamamoto;
\emph{On ill-posedness and a Tikhonov regularization for a multidimensional
inverse hyperbolic problem,} J. Math. Kyoto. Univ., 36 (1996), 825--856.

\bibitem{yang} L. Yang, Z. C. Deng, J. N. Yu, G. W. Luo;
 \emph{Optimization method for the inverse problem of reconstruction the
source term in a parabolic equation,}
Math. Comput. Simulation, 80 (2009), 314--326.

\end{thebibliography}

\end{document}
