\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2017 (2017), No. 119, pp. 1--19.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2017 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2017/119\hfil An inverse source problem]
{An inverse source problem of the Poisson equation with Cauchy data}

\author[J.-C. Liu \hfil EJDE-2017/119\hfilneg]
{Ji-Chuan Liu}

\address{Ji-Chuan Liu \newline
School of Mathematics,
China University of Mining and Technology,
Xuzhou, Jiangsu 221116, China}
\email{liujichuan2003@126.com}

\dedicatory{Communicated by Goong Chen}

\thanks{Submitted October 26, 2016. Published May 4, 2017.}
\subjclass[2010]{65N20, 65N21}
\keywords{Inverse source problem; gradient descent method;
\hfill\break\indent trust-region-reflective algorithm; Poisson equation; ill-posed problem}

\begin{abstract}
 In this article, we study an inverse source problem of the
 Poisson equation with Cauchy data. We want to find iterative algorithms
 to detect the hidden source within a body from measurements on the boundary.
 Our goal is to reconstruct the location, the size and the shape of the hidden
 source. This problem is ill-posed, regularization techniques should be employed
 to obtain the regularized solution. Numerical examples show that our proposed
 algorithms are valid and effective.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction}\label{sec1}

Inverse source problems are very important for applications in science,
engineering and bioengineering which have attracted great attention of many
researchers in recent years, refer to \cite{cheney1999electrical(1),RevModPhys(2)}.
In this paper, we consider the problem of determining a
source term of the Poisson equation. The inverse source problem consists of
determining the location, the size and the shape of the hidden source from
available measured data on the boundary. This inverse source problem is
nonlinear and ill-posed in the sense that the solution, even if it exists,
does not depend continuously on the measured data. Any small errors in the
given data might induce large errors in the solution.
Thus regularization techniques should be employed in our proposed algorithms.

Inverse source problems of the Poisson equation have been researched
extensively
\cite{badia2000(20),baratchart2005(12),bubnov1997(8),hanke2011(18),
he1987(13),hettlich1999(19),hon2010(17),isakov1990(9),kavanagk1978(14),ling2005(23)}.
Bubnov, Erokhin and Isakov \cite{bubnov1997(8), isakov1990(9)}
presented some theoretical results to reconstruct the unknown source or obstacles
from overdetermined boundary measurements of solutions of the Poisson equation.
Baratchart et al.\ \cite{baratchart2005(12)} solved the inverse problem of
locating pointwise or small size conductivity defaults in a plane domain
from overdetermined boundary measurements of solutions to the Laplace equation.
Hon et al. \cite{hon2010(17),ling2005(23)} proposed some effective numerical
algorithms to solve inverse source problems of the Poisson equation.
Hanke and Rundell \cite{hanke2011(18)} used the rational approximation method
 to solve inverse source problems for determining hidden obstacles.
There are some iterative algorithms for obtaining source parameters from
measurement data on the boundary
\cite{he1987(13),hettlich1999(19),kavanagk1978(14),
nenonen1991(15),Kohzaburo1989(16)}. Hettlich and Rundell
\cite{hettlich1999(19)} applied iterative algorithms to solve an inverse
potential problem for reconstruction the
shape of an obstacle.

In this paper, we propose two reconstruction algorithms to solve
an inverse source problem of the Poisson equation from measurements on the boundary.
 According to the fundamental solution of Laplace equation, we can obtain the
expression of solution for inverse boundary value problem with boundary integral
equation. Based on the shape derivative, we apply gradient descent algorithm (GDA)
and trust-region-reflective algorithm (TRA) to detect the location, the size
and the shape of the hidden source within a body. From numerical experiments,
we can see that the proposed iterative algorithms are feasible and stable.

The outline of the paper is as follows. In Section \ref{sec2}, we
introduce an inverse source problem of the Poisson equation. We
introduce the shape derivative and parameterization of boundary in
Section \ref{sec3}. We propose both reconstruction algorithms to detect the
hidden source within a body in Section \ref{sec4}. In Section \ref{sec5},
we give some examples to illustrate the efficiency of the proposed
reconstruction algorithms. 


\section{Formulation of an inverse source problem}\label{sec2}

The inverse source problem we consider consists in detecting
the location, the size and the shape of the hidden source of the Poisson
equation from a single measurement pair of Cauchy data on the boundary.
Assume that $\Omega$ is a simply connected bounded domain of $\mathbb{R}^2$
 with a smooth boundary $\partial \Omega$, and $\Omega^*$ is a subdomain of
$\Omega$ whose boundary $\Gamma$ is piecewise differentiable and star-like.
We consider the inverse source problem in the following
\begin{gather}
\Delta u=f, \quad \text{in } \Omega, \label{2.1}\\
\label{2.2}
u=0, \quad \text{on } \partial \Omega,\\
\label{2.3}
\frac{\partial u}{\partial \nu}=g \quad\text{on }\partial \Omega,
\end{gather}
where $\nu$ denotes the outward unit normal to $\partial \Omega$.
In this paper, we assume the source term $f\in L^2(\Omega)$ is piecewise
constant which has compact support and satisfies
$\operatorname{supp} f\subset\Omega^*$.

Note that we have assumed homogeneous Dirichlet values and this can be done
without loss of generality, refer to \cite{hanke2011(18)}.
The inverse source problem is that of recovering $f$ given $g$. According to
the Green's function, we can obtain the expression of the solution of problem
\eqref{2.1} and \eqref{2.2} for $f=\chi(\Omega^*)$ as follows
\begin{equation}\label{2.4}
u(x,\chi(\Omega^*))=\int_{\Omega}G(x,y)f(y)dy=\int_{\Omega^*}G(x,y)dy,
\quad x\in \Omega,
\end{equation}
where the Green's function $G: \Omega \times \Omega
\to\mathbb{R}$, is
\[
G(x,y)= \begin{cases}
\frac{1}{2\pi}\big(\log|x-y|-\log|\frac{x}{|x|}-|x|y|\big),
& y\neq0,\\
\frac{1}{2\pi}\log|x|, &y=0,
\end{cases}
\]
where $x,y\in \Omega$.

The research of uniqueness of the
inverse problem \eqref{2.1}-\eqref{2.3} has attracted a good deal of
attention \cite{isakov1990(9),hettlich1999(19),isakov2005(25)}.
From \cite{isakov1990(9)}, we know that the subdomain domain $\Omega^*$
is $x_1$-convex. Therefore we have the uniqueness theorem of the inverse source
problem \eqref{2.1}-\eqref{2.3} as follows

\begin{theorem}[\cite{isakov1990(9)}] \label{th1}
 Suppose that either (1) $\Omega_1^{*}$ and $\Omega_2^{*}$ are
star-shaped with respect to their centers of gravity, or
(2) $\Omega_1^{*}$ and $\Omega_2^{*}$ are convex in $x_1$.
If $u(\cdot,\chi({\Omega_1^{*}}))=u(\cdot,\chi({\Omega_2^{*}}))$ on
$\Omega\backslash \Omega^*$, then $\Omega_1^{*}=\Omega_2^*$.
\end{theorem}

For the inverse problem \eqref{2.1}-\eqref{2.3}, we want to recover a star-shaped
domain $\Omega^*=\{x:|x-a|<w((x-a)/|x-a|)\}$ from the modulo of its potential
gradient $|\nabla u(\cdot;\chi_{\Omega^*})|$ given on the hypersurface
 $\Gamma(h)=\{y:|y|=1,1-h<y_1\}$ where $h$ is a number from $(0,1)$.
We may assume that these centers of gravity are the origin, then the star-shaped
domain $\Omega_j^{*}=\{x:|x|<w_j(x/|x|)\}=\{r<w_j(\sigma)\}$. For a number
$p$, $0<p<1$, $|w_j|_{2+p}(\Sigma)$ is the usual H\"{o}lder norm for a function
$w$ on the unit sphere $\Sigma$.For estimating a solution $\Omega$ of the
inverse domain problem, we have the following stability theorem from
\cite{isakov1990(9)}.

\begin{theorem}[{\cite[Chap.2]{isakov1990(9)}}] \label{th2}
 Suppose $b<w_j<1-b$ on $\Sigma$ where $0<b<1$. There is a constant $C$
depending only on $|w_j|_{2+p}(\Sigma)$, $b$ and $h$ such that if
$$
\|\nabla u(\cdot;\chi_{\Omega_1^{*}})
-\nabla u(\cdot;\chi_{\Omega_2^{*}})\|_{L^2(\Gamma(h))}<\varepsilon,
$$
then
$$
|w_1-w_2|<C|\ln\varepsilon|^{-1/C} \quad on \quad \Sigma.
$$
\end{theorem}

Tikhonov first applied this result to show a stability in the inverse problem
of potential theory, refer to \cite{Tikhonov1943(34)} for details.
From the above theorems \ref{th1} and \ref{th2}, we know that the solution
of inverse problem \eqref{2.1}-\eqref{2.3} is unique and stable.

From \eqref{2.4}, we can get
\begin{equation}\label{2.6}
\frac{\partial u(x,\chi(\Omega^*))}{\partial \nu(x)}
=\int_{\Omega}\frac{\partial G(x,y)}{\partial \nu(x)}f(y)dy,
\quad x\in \partial\Omega.
\end{equation}
Combined \eqref{2.3} with \eqref{2.6}, we can define an operator
${ \mathcal A}:L^2(\Omega)\to L^2(\partial \Omega)$, satisfying
\begin{equation}\label{2.06}
{ \mathcal A}f=g,
\end{equation}
where
\begin{equation}\label{2.026}
{ \mathcal A}f=\int_{\Omega}\frac{\partial G(x,y)}{\partial \nu(x)}f(y)dy,
\quad x\in \partial\Omega,
\end{equation}
According to \eqref{2.026}, we know that ${ \mathcal A}$ is a compact linear
operator.
The problem of solving \eqref{2.06} is ill-posed, refer to
\cite{Tikhonov1995(33)} for detail, that is the solution, if exists, does
not depend continuously on the data $g$. Therefore, it is impossible
to solve this inverse source problem using classical numerical methods.

In this article, we want to determine the location, the size and the shape
 of the hidden source within a body. However, we can obtain the measurements
$g^{\delta}$ in the real application, i.e.,
\begin{equation}\label{2.5}
\| g^{\delta}-g \|_{L^{2}(\partial \Omega)} \leq \delta,
\end{equation}
where $\| \cdot\|_{L^{2}(\partial \Omega)}$ is the $L^2$-norm on the boundary
and $\delta $ is a noisy level.

Instead of \eqref{2.06}, one solves the regularized equation
\begin{equation}\label{2.006}
({ \mathcal A}{ \mathcal A}^*+\lambda)f={ \mathcal A}^*g^{\delta}.
\end{equation}
The equation \eqref{2.006} is the Euler equation for the problem of minimization
of the functional
\begin{equation}\label{2.0006}
\frac{1}{2}\|{ \mathcal A}f-g^{\delta}\|_{L^2(\partial \Omega)}^2
+\frac{\lambda}{2}\| f\|^2_{L^{2}(\Omega)},
\end{equation}
which is the famous Tikhonov regularization method \cite{Tikhonov1995(33)}.

Thus we can obtain the convergence theorem as follows, refer
to \cite{isakov1990(9)},

\begin{theorem}\label{th3}
 For each $\lambda>0$ there is a solution $f^{\lambda}$ to the minimization
\eqref{2.0006} and any such solution satisfies the estimate
$$
\| f^{\lambda}-f\|_{L^{2}(\Omega)}
\leq \omega^{\lambda} (2\| g^{\delta}-g\|_{L^2(\partial \Omega)}+\sqrt{\lambda})
$$
provided ${\mathcal A}$ is one-to-one, where $\omega^{\lambda}$ is a function
such that $\omega^{\lambda}\to0$ when $\lambda\to0$.
\end{theorem}


\section{Shape derivative and parameterization}\label{sec3}

\subsection{Eulerian derivative of a shape functional}

We want to study the geometric change of a bounded domain $\Omega^*$ which
is though to be a collection of material particles changing their position in time.
The space occupied by them at time will determine a new configuration
$\Omega_\sigma^*$. The change in the geometry of $\Omega^*$ will be given by
a process which deforms the initial configuration $\Omega^*$.
To formalize this mathematically, let domain
$\Omega^*\subset\mathbb{R}^2$ be bounded with
Lipschitz boundary $\Gamma$, and transformations
$\Lambda_{\sigma}:\Omega^*\to\mathbb{R}^2, \sigma\in[0,\varepsilon)$, i.e.,
\begin{equation}\label{2.701}
y^*\in\Omega^*\mapsto y=\Lambda_{\sigma}(y^*)\equiv y(\sigma,y^*),
\end{equation}
where $\Lambda_{\sigma}$ is bijection and $\Lambda_{\sigma}\in C^1(\Omega^*)$.
Then we can get the transformed geometry as follows
\begin{equation}\label{2.7}
\Omega_\sigma^*=\Lambda_{\sigma}(\Omega^*),
\end{equation}
i.e., $\Omega_\sigma^*$ is the image of $\Omega^*$ with respect to
$\Lambda_\sigma$.

\begin{definition}\label{de2.1} \rm
\cite{keplershape(32)} For the point $y(\sigma)$, the Eulerian velocity field
$\vec{h}(\sigma,y)$ is as follows
\begin{equation}\label{2.702}
\vec{h}(\sigma,y)=\frac{\partial y}{\partial \sigma}(\sigma,\Lambda_\sigma^{-1}(y)).
\end{equation}
\end{definition}

From the above definition \ref{de2.1}, it can be seen that
$y(\sigma,y^*)$ satisfies an initial value problem
\begin{equation}\label{2.703}
\begin{gathered}
\frac{d}{d \sigma}y(\sigma,y^*)=\vec{h}(\sigma,y(\sigma,y^*)),\\
y(0,y^*)=y^*,
\end{gathered}
\end{equation}
conversely, according to transformations $\Lambda_\sigma(y;\vec{h})$,
 we can obtain the solution of problem \eqref{2.703} for $\vec{h}(\sigma,y)$.

Based on the Eulerian velocity field $\vec{h}$, we introduce a directional
derivative for a shape functional.

\begin{definition}[\cite{keplershape(32)}] \label{de2.2} \rm
 Let $J$ be a functional with $\Omega^* \mapsto J(\Omega^*)$,
$\Omega^*\subset\mathbb{R}^2$. Then the
Eulerian derivative of the functional $J$ at $\Omega^*$ in the
direction of a vector field $\vec{h}$ is given by
\begin{equation}\label{2.8}
dJ(\Omega^*;\vec{h})=\frac{d}{d\sigma}J(\Omega_\sigma)
\big|_{\sigma=0}=\lim_{\sigma\to0}\frac{1}{\sigma}(J(\Omega^*_\sigma)-J(\Omega^*)),
\end{equation}
where $\Omega^*_\sigma=\Lambda_{\sigma}(\Omega^*;\vec{h})$.
\end{definition}

From definition \ref{de2.2}, we know that if $dJ(\Omega^*;\vec{h})$ exits
for all $\vec{h}$, then the Eulerian derivative is said to be a shape derivative.

\subsection{Shape derivatives of a volume integral}

Now we consider the shape derivative of a volume integral.
The domain function is given by the volume integral of a function
$\varphi \in W^{1,1}_{\rm loc}(\mathbb{R}^{2})$
\begin{equation}\label{2.08}
J(\Omega_\sigma^*)=\int_{\Omega_\sigma^*} \varphi dy.
\end{equation}
We recall from \cite{Sokolowski1992(22),allaire2002(24)} the following
transformation Lemmas.

\begin{lemma}\label{la01}
Let $\varphi \in W^{1,1}_{\rm loc}(\mathbb{R}^{2})$, then
$\varphi\circ \Lambda_{\sigma} \in W^{1,1}_{\rm loc}(\mathbb{R}^{2})$ and
\begin{equation}\label{2.008}
J(\Omega_\sigma^*)=\int_{\Omega_\sigma^*} \varphi dy=\int_{\Omega^*}
\varphi \circ \Lambda_{\sigma}J_\sigma dy,
\end{equation}
where $J_\sigma=det \mathfrak{D}\Lambda_\sigma$ is the volume jacobian.
\end{lemma}


\begin{lemma}\label{la02}
(1) For $\sigma$ small enough, the map $W^{1,1}_{\rm loc}(\mathbb{R}^2)
\to W^{1,1}_{\rm loc}(\mathbb{R}^2);
\varphi\mapsto\varphi\circ \Lambda_\sigma$ is locally lipschitz and
\begin{equation}\label{2.0018}
\nabla (\varphi\circ \Lambda_\sigma)=(\mathfrak{D}
\Lambda_\sigma)^{T}(\nabla\varphi)\circ \Lambda_\sigma,
\end{equation}

(2) If $\Lambda_{\sigma}$ is the flow of a vector field $\vec{h}\in C^0([0,\varepsilon),C^1(\mathbb{R}^{2},\mathbb{R}^{2}))$, then the map $[0,\varepsilon)\to W^{1,1}_{\rm loc}(\mathbb{R}^{2})$; $\sigma\mapsto\varphi\circ \Lambda_\sigma$ is well defined and
\begin{equation}\label{2.018}
\frac{d}{d\sigma}(\varphi\circ \Lambda_\sigma)=(\nabla \varphi\cdot
\vec{h}(\sigma))\circ \Lambda_\sigma \in L^{1}_{\rm loc}(\mathbb{R}^{2});
\end{equation}
the map $[0,\varepsilon)\to C^{0}_{\rm loc}(\mathbb{R}^{2})$;
 $\sigma\mapsto J_\sigma$ is differentiable with
\begin{equation}\label{2.028}
\frac{d}{d\sigma}J_\sigma=(\operatorname{div} \vec{h}(\sigma))
\circ \Lambda_\sigma J_\sigma\in C^{0}_{\rm loc}(\mathbb{R}^{2}).
\end{equation}
\end{lemma}

In terms of definition \ref{de2.2} and Lemma \ref{la01}, we can get
\begin{equation}\label{2.9}
\begin{aligned}
dJ(\Omega^*;\vec{h})
&=\lim_{\sigma\to0}\frac{1}{\sigma}\int_{\Omega^*}
((\varphi \circ \Lambda_{\sigma})J_\sigma-(\varphi \circ \Lambda_{0})J_0) dy \\
&= \int_{\Omega^*} \lim_{\sigma\to0}\frac{1}{\sigma}
((\varphi \circ \Lambda_{\sigma})J_\sigma-(\varphi \circ \Lambda_{0})J_0)dy,
\end{aligned}
\end{equation}
where $\Lambda_{0}(y)=y$ and $J_0(y)=1$.

From Lemma \ref{la02}, we apply product rule, chain rule and Gauss theorem to have
\begin{equation}\label{2.901}
\begin{aligned}
dJ(\Omega^*;\vec{h})
&= \int_{\Omega^*}((\nabla \varphi\cdot \vec{h}(0))
 \circ \Lambda_0J_0+(\varphi\circ \Lambda_0)(\operatorname{div}
 \vec{h}(0))\circ \Lambda_0J_0)dy \\
&= \int_{\Omega^*}(\nabla (\varphi\circ \Lambda_0)\cdot
 \vec{h}(0)+(\varphi\circ \Lambda_0)(\operatorname{div}
 \vec{h}(0)\circ \Lambda_0))dy \\
&= \int_{\Omega^*}(\nabla \varphi\cdot \vec{h}(0)+\varphi\operatorname{div}
 \vec{h}(0))dy \\
&= \int_{\Omega^*}\operatorname{div} (\varphi\vec{h}(0))dy \\
&= \int_{\Omega^*} \varphi \vec{h}(0)\cdot \nu d\Gamma,
\end{aligned}
\end{equation}
where $\vec{h}(0)\cdot \nu$ is the Euclidean inner product in $\mathbb{R}^{2}$,
$\Gamma$ is the boundary of $\Omega^*$, $\nu$ is the outward unit normal of
the boundary $\Gamma$.


\begin{theorem}\label{thm0001}
Let $\varphi\in W^{1,1}_{\rm loc}(\mathbb{R}^{2})$, $\Lambda_{\sigma}$
be the flow of a vector field
$\vec{h}$ in the space $C^0([0,\varepsilon),C^1(\mathbb{R}^{2},\mathbb{R}^{2}))$,
the open subset $\Omega^*$ has a Lipschitz boundary $\Gamma$, $\nu$ is
the outward unit normal of the boundary
$\Gamma$, then the shape derivatives of a volume integral is as follows
\begin{equation}\label{2.9001}
dJ(\Omega^*;\vec{h})=\int_{\Gamma} \varphi \vec{h}(0)\cdot \nu d\Gamma.
\end{equation}
\end{theorem}

In terms of Theorem \ref{thm0001}, we can get the shape derivative of solution
for problem \eqref{2.1} and \eqref{2.2} from \eqref{2.4}
\begin{equation}\label{2.10}
du(x,\Omega^*;\vec{h})=\int_{\Gamma} G(x,y)\vec{h}(0,y)\cdot \nu(y) d\Gamma(y),
 \quad x\in \Omega^*.
\end{equation}

\subsection{Parameterization}

To compute easily, we should parameterize the boundary
$\Gamma$ of $\Omega^*$. We want to detect the location and the size of the
hidden source, and then reconstruct the shape of the source. Thus we employ
two methods to parameterize the boundary $\Gamma$.

Firstly, we apply the polar coordinates to parameterize the boundary
$\Gamma$ given by
$$
\Gamma: O+r(\cos t,\sin t), 0\leq t\leq 2\pi,
$$
for determining the location and the size of the hidden source within a body,
where $O=(O_1,O_2)$ is the centroid of the domain $\Omega^*$ and
$r$ is radius. We take every point $y$ on the boundary $\Gamma$ of the domain
$\Omega^*$.
Let
\[
y=\begin{pmatrix}
O_1\\
O_2
\end{pmatrix}
+r \begin{pmatrix}
\cos t\\
\sin t
\end{pmatrix}, \quad 0\leq t\leq 2\pi.
\]

We take three transformations $\Lambda_{\sigma_1}$, $\Lambda_{\sigma_2}$,
 $\Lambda_{\sigma_3}$ for $O_1$, $O_2$ and $r$, respectively, then we can
 get the transformed geometry $\Omega_\sigma^*$. Three transformations
$\Lambda_{\sigma_1}$, $\Lambda_{\sigma_2}$ and $\Lambda_{\sigma_3}$ are given by
\[
\Lambda_{\sigma_1}(y)=y+\sigma_1
\begin{pmatrix}
1\\
0
\end{pmatrix}, \quad
\Lambda_{\sigma_2}(y)=y+\sigma_2
\begin{pmatrix}
0\\
1
\end{pmatrix},
\Lambda_{\sigma_3}(y)=y+\sigma_3
\begin{pmatrix}
\cos t\\
\sin t
\end{pmatrix}.
\]
Thus the vector field $\vec{h}(0,y)=(h_1,h_2,h_3)$ is as follows
\begin{equation}\label{2.11}
h_1=\begin{pmatrix}
1\\
0
\end{pmatrix}, \quad
h_2= \begin{pmatrix}
0\\
1
\end{pmatrix}, \quad
h_3=\begin{pmatrix}
\cos t\\
\sin t
\end{pmatrix}.
\end{equation}
By using a simple calculation, we obtain
\begin{gather*}
\frac{d\Lambda_{\sigma_1}(y)}{d\sigma_1}\big|_{\sigma_1=0}
=\frac{d\Lambda_{\sigma_1}(y)}{dO_1}=h_1, \quad
\frac{d\Lambda_{\sigma_2}(y)}{d\sigma_2}\big|_{\sigma_2=0}
=\frac{d\Lambda_{\sigma_2}(y)}{dO_2}=h_2, \\
\frac{d\Lambda_{\sigma_3}(y)}{d\sigma_3}\big|_{\sigma_3=0}
=\frac{d\Lambda_{\sigma_3}(y)}{dr}=h_3.
\end{gather*}
Let $y=(y_1,y_2)$, we know that
$$
y_1'=\frac{d y_1}{d t}=-r\sin t,\quad
y_2'=\frac{d y_2}{d t}=r\cos t.
$$
So the outward unit normal is
\begin{equation}\label{2.1101}
\nu=\frac{(y_2', -y_1')}{\sqrt{(y_1')^2+(y_2')^2}}=(\cos t,\sin t).
\end{equation}
We can use $\beta=(O_1,O_2,r)$ to describe the location and the size of
 the hidden source within a body.

Secondly, in order to reconstruct the shape of the hidden source, we
parameterize the boundary $\Gamma$ of $\Omega^*$ as
$$
\Gamma: O+r(t)(\cos t,\sin t), 0\leq t\leq2\pi,
$$
where $O$ is the centroid of the domain $\Omega^*$ which is fixed, and
$r(t)$ is a real-valued function of $0\leq t<\leq 2\pi$ which is given by
\begin{equation}\label{2.12}
r(t)=c_0+\sum_{j=1}^{l}(c_j\cos(j t)+c_{j+l}\sin(j t)),
\end{equation}
where $0\leq t\leq2\pi$, $c_j\in\mathbb{R}$, $l\in\mathbb{N}$.

We take every point $y$ on the boundary $\Gamma$ of the domain $\Omega^*$.
Let
\[
y= O+r(t)\begin{pmatrix}
\cos t\\
\sin t
\end{pmatrix},
\]
We take transformations $\Lambda_{\sigma_0},\dots,\Lambda_{\sigma_{2l}}$
for $c_0,\ldots,c_{2l}$, respectively, then we can get the transformed geometry
$\Omega_\sigma^*$. Transformations $\Lambda_{\sigma_0},\dots,\Lambda_{\sigma_{2l}}$
are given by
\[
\Lambda_{\sigma_{j}}(y)=y+r_{\sigma_{j}}(t)
\begin{pmatrix}
\cos t\\
\sin t
\end{pmatrix}, \quad j=0,1,\dots,2l,
\]
where
\[
r_{\sigma_{j}}(t)= (c_0,\ldots,c_{j}+\sigma_{j},\ldots,c_{2l})(1, \ldots,
\cos(l t), \sin(t), \ldots, \sin(lt) )^{T},
\]
$0\leq t\leq2\pi$. The vector field $\vec{h}=(h_0,h_1,\ldots,h_{2l})$
is given by
\begin{equation}\label{2.13}
\begin{gathered}
h_j= \cos(jt)
\begin{pmatrix}
\cos t\\
\sin t
\end{pmatrix},\quad j=0,\ldots,l, \\
h_j= \sin((j-l)t)
\begin{pmatrix}
\cos t\\
\sin t
\end{pmatrix},\quad j=l+1,\ldots,2l,
\end{gathered}
\end{equation}
and we have
\[
\frac{d\Lambda_{\sigma_j}(y)}{d\sigma_j}\Big|_{t=0}
=\frac{d\Lambda_{\sigma_j}(y)}{dc_j}=h_j, \quad j=0,\ldots,2l.
\]
Let $y=(y_1,y_2)=O+r(t)(\cos t, \sin t)$,
we obtain
\begin{gather*}
y_1'=\frac{d y_1}{d t}=r'(t)\cos t-r(t)\sin t,\\
y_2'=\frac{d y_2}{d t}=r'(t)\sin t+r(t)\cos t.
\end{gather*}
Thus the outward unit normal vector is given by
\begin{equation}\label{2.14}
\nu=\frac{(y_2', -y_1')}{\sqrt{(y_1')^2+(y_2')^2}}.
\end{equation}
We can use $\beta=(c_0,c_1,\ldots,c_{2l})$ to describe the shape of
the hidden source within a body.

According to parameters $\beta$ and the vector field $\vec{h}$,
Equation \eqref{2.10} can be changed as
\begin{equation}\label{2.15}
\begin{aligned}
\nabla_{\beta}u(x,\beta;\vec{h})
&= \int_{0}^{2\pi} G(x,O_1+r\cos t,O_2+r\sin t)\vec{h}(0,O_1+r\cos t
,O_2+r\sin t) \\
&\quad\times \nu(O_1+r\cos t,O_2+r\sin t)
\sqrt{(y_1')^2+(y_2')^2}d t, \quad x\in \Omega
\end{aligned}
\end{equation}
where $\vec{h}=(h_1,h_2,h_3)^{T}$ or
$\vec{h}=(h_0,\ldots,h_{2l})^{T}$.

Denote $u(x,\chi(\Omega^*))$ and $\nabla_{\beta}u(x,\beta;\vec{h})$ as
 $u(x,\beta)$ and $\nabla_{\beta}u(x,\beta)$, respectively. Using polar
 coordinate to \eqref{2.4}, we have
\begin{equation}\label{2.04}
u(x,\beta)=\int_{0}^{2\pi}dt \int_{0}^{r(t)}
\partial G(x,y(O_1+r\cos t,O_2+r\sin t))rdr, \quad x\in \Omega,
\end{equation}
and
\begin{equation}\label{2.015}
\begin{aligned}
&\nabla_{\beta}u(x,\beta) \\
&= \int_{0}^{2\pi}G(x,O_1+r(t)\cos t,O_2+r(t)\sin t)\vec{h}(0,O_1+r(t)\cos t
,O_2\\
&\quad r(t)\sin t)\cdot \nu(O_1+r(t)\cos t,O_2+r(t)\sin t)
\sqrt{(y_1')^2+(y_2')^2}d t, \quad x\in \Omega,
\end{aligned}
\end{equation}
then we can get
\begin{equation}\label{2.0014}
\begin{aligned}
&\frac{\partial u(x,\beta)}{\partial \nu(x)} \\
&= \int_{0}^{2\pi}d t \int_{0}^{r(t)}
 \frac{\partial G(x,y(O_1+r\cos t,O_2+r\sin t))}{\partial \nu(x)}rdr \\
&= \int_{0}^{2\pi}d t \int_{0}^{1}
 \frac{\partial G(x,y(O_1+vr(t)\cos t,O_2+vr(t)\sin t))}{\partial \nu(x)}
 vr^{2}(t)dv,\quad x\in \Omega,
\end{aligned}
\end{equation}
\begin{equation}\label{2.0015}
\begin{aligned}
&\frac{\partial \nabla_{\beta}u(x,\beta)}{\partial \nu(x)} \\
&= \int_{0}^{2\pi} \frac{\partial G(x,O_1+r(t)\cos t,O_2+r(t)\sin t)}
 {\partial \nu(x)}\vec{h}(0,O_1+r(t)\cos t,O_2\\
&\quad + r(t)\sin t)\cdot \nu(O_1+r(t)\cos t,O_2+r(t)\sin t)
\sqrt{(y_1')^2+(y_2')^2}dt,\; x\in \Omega.
\end{aligned}
\end{equation}

\section{Reconstruction algorithms for a hidden source}\label{sec4}

In this section, we want to seek reconstruction algorithms to determine the location,
the size and the shape of hidden source. In practical applications, we can only get
measured data with errors on the boundary. The inverse source problem is nonlinear
and ill-posed. Therefore, we
should employ the regularization technique to solve this inverse source problem.

We consider the objective function as follows
\begin{equation}\label{3.1}
F(\beta)=\frac{1}{2}\big\| g^{\delta}-\frac{\partial
_{}u(\cdot,\beta)}{\partial \nu}\big\|_{L^2{(\partial \Omega)}}^2,
\end{equation}
where $\| \cdot\|_{L^2{(\partial \Omega)}}$ denotes the $L^2$-norm,
$g^{\delta}$ are the measured data on the boundary of the domain
$\Omega$ and
$$
\beta=(O_{1},O_{2},r)\in\mathbb{R}^{3}, \quad \text{or} \quad
\beta=(c_{0},\ldots,c_{2l})\in\mathbb{R}^{(2l+1)}.
$$

This problem is a nonlinear least squares optimization problem,
we propose reconstruction algorithms to find the minimum of the objective
function in \eqref{3.1} by update $\beta$. Starting with an initial guess
$\beta^{0}$, these algorithms proceed by the iterations
\begin{equation}\label{3.2}
\beta^{k+1}=\beta^{k}+\triangle, k=0,1,2\ldots,
\end{equation}
where $\triangle$ is the increment vector.

From \eqref{3.1}, we can obtain the gradient of the objective function
\begin{equation}\label{3.04}
F'(\beta)=-\big\langle g^{\delta}-\frac{\partial
u(\cdot,\beta)}{\partial \nu}, \frac{\partial
\nabla_{\beta}u(\cdot,\beta)}{\partial \nu}\big\rangle,
\end{equation}
where $\langle\cdot,\cdot\rangle$ denotes the ${L^2(\partial D)}$ inner product.

We know that the measured data $g^{\delta}$ are discrete data at discrete points
$\{x_s\}$ on the boundary of the domain $\Omega$. In order to compute numerically 
inner product \eqref{3.04}, we apply the collocation method to compute
$\frac{\partial u(x,\beta)}{\partial \nu(x)}$ and
$\frac{\partial \nabla_{\beta}u(x,\beta)}{\partial \nu(x)}$ on the collocation
 points $\{x_s\}$ of the boundary $\partial \Omega $. For each collocation point $x_s$,
we should estimate the integral equations \eqref{2.0014} and \eqref{2.0015}.
We note that the kernels are smooth in $\eqref{2.0014}$ and $\eqref{2.0015}$
so that the well-estimated quadrature rules can be employed for numerical
approximation.


The interval $[0,2\pi]$ is partitioned as $0=t_0<t_1<\dots<t_{n_1}<2\pi$, where
$\theta_i=(i-1)h_{t}(i=1,\dots,n_1)$ and $h_{t}=\frac{2\pi}{n_1}$.
The interval $[0,1]$ is partitioned as $0=v_0<v_1<\dots<v_{n_1}<1$,
where $v_j=(j-1)h_{v}(j=1,\dots,n_2)$ and $h_{v}=\frac{1}{n_2}$.
In terms of integral equations \eqref{2.0014} and \eqref{2.0015}, we can obtain
the approximate value of collocation point $x_s$ as follows
\begin{equation}\label{2.0024}
\begin{aligned}
&\frac{\partial u(x_s,\beta)}{\partial \nu(x_s)} \\
&=\sum_{j=1}^{n_2}(\sum_{i=1}^{n_1}
 \frac{\partial G(x_s,y(O_1+v_jr(t_i)\cos t_i,O_2+v_{j}r(t_i)\sin t_i))}
 {\partial \nu(x_s)}v_{j}r^{2}( t_i)h_v)h_t,
\end{aligned}
\end{equation}
\begin{equation}\label{2.0025}
\begin{aligned}
\frac{\partial \nabla_{\beta}u(x_s,\beta)}{\partial \nu(x_s)}
&= \sum_{i=1}^{n_1}
\frac{\partial G(x_s,O_1+r( t_i)\cos t_i,O_2+r( t_i)\sin t_i)}
 {\partial \nu(x_s)}\vec{h}(0,O_1 \\
&\quad +r( t_i) \cos t_i,O_2+r( t_i)\sin t_i)\cdot \nu(O_1
+r( t_i)\cos t_i,O_2 \\
&\quad +r( t_i)\sin t_i)\sqrt{(y_1')_i^2+(y_2')_i^2}h_t.
\end{aligned}
\end{equation}
According to the numerical approximation \eqref{2.0024} and \eqref{2.0025},
 we can compute $\frac{\partial u(x,\beta)}{\partial \nu(x)}$ and $
\frac{\partial \nabla_{\beta}u(x,\beta)}{\partial \nu(x)}$ at the collocation
points $\{x_s\}$ on the boundary $\partial \Omega$. Then, the inner product
\eqref{3.04} can be computed well along with measurement data $g^{\delta}$.

\subsection{Gradient descent algorithm (GDA)}

GDA is a way to find a local minimum of an objective function.
The way it works is we start with an initial guess of the solution and
we take the gradient of the function at that point. We step the solution in
the negative direction of the gradient and we repeat the process.
GDA will eventually converge where the gradient is zero. GDA is a
first-order optimization algorithm which is recognized as a highly
convergent algorithm for finding the minimum of the objective function.
We know that this inverse source problem is ill-posed, we employ the
regularization technique for GDA because of the measurement data $g^{\delta}(x)$.
The modified objective function is as follows
\begin{equation}\label{3.3}
\mathcal
{\widetilde{F}}(\beta)=F(\beta)+\frac{\lambda}{2}|\beta|^2,
\end{equation}
where $\lambda$ is the regularization parameter.
According to Theorem \ref{th3}, we can obtain the convergence theorem for
parameters $\beta$ by minimizing objective function $\mathcal
{\widetilde{F}}(\beta)$.

For GDA, a key issue is to choose the regularization parameter $\lambda$.
A wise choice of regularization parameter is obviously crucial to obtaining
useful approximate solutions to ill-posed problems, there are well-studied
techniques for computing a good regularization parameter, such as the
discrepancy principle \cite{morozov1984(27)}, the generalized cross-validation
(GCV) \cite{Gene1979(28)}, the L-curve \cite{Hansen(21)} and so on.
In this paper, we are interested in a-posteriori rules $\lambda$ for choosing
the regularization parameter when minimizing ${\widetilde{F}}(\beta)$.
Based on discrepancy principle, we apply sequential discrepancy principle
\cite{anzengruber2012(31)} to choose the regularization parameter.
For prescribed $0<q<1$ and $\lambda_0>0$, let
\begin{equation}\label{3.03}
\Lambda_q:=\{\lambda_j|\lambda_j=q^j\lambda_0,j\in \mathbb{Z}\}.
\end{equation}
Given any $\delta>0$, measured data $g^{\delta}$ and $\tau>1$, we say that
an element $\lambda\in\Lambda_q$ is chosen according to the sequential
discrepancy principle, if
\begin{equation}\label{3.003}
F(\beta_\lambda^{\delta})<\tau\delta<F(\beta_{\lambda/q}^{\delta})
\end{equation}


The gradient of $\mathcal{\widetilde{F}}(\beta)$ is
\begin{equation}\label{3.4}
\mathcal{\widetilde{F}}'(\beta)=F'(\beta)+\lambda \beta.
\end{equation}
The increment
vector $\triangle$ of \eqref{3.1} is given by
\begin{equation}\label{3.5}
\triangle=-\alpha \mathcal {\widetilde{F}}'(\beta),
\end{equation}
where $\alpha$ is the step size of iteration.

The final iteration relationship is as follows
\begin{equation}\label{3.501}
\beta^{k+1}=\beta^{k}-\alpha \mathcal {\widetilde{F}}'(\beta^{k}), k=0,1,2\ldots.
\end{equation}

\subsection{Trust-region-reflective optimization algorithm (TRA)}

TRA is a subspace trust-region
method and is based on the interior-reflective Newton method
described in \cite{coleman1994(3),coleman1996(4)} for the detail.
Each iteration involves the approximate solution of a large linear
system using method of preconditioned conjugate gradient.
TRA is used to minimize a nonlinear function subject to simple bound.
TRA exhibits strong convergence properties and global and second-order convergence.

According to the objective function $F(\beta)$, the shape derivative
to $\beta$ is as follows
\begin{equation}\label{3.8}
\widetilde{\mathcal{F}}'(\beta)=\frac{\partial \nabla_{\beta}u(\cdot,\beta)}
{\partial \nu}.
\end{equation}
Assume the increment $\triangle_*$ is a solution of a subproblem as follows
\begin{equation}\label{3.9}
\min_{\triangle_*\in\mathbb{R}^n}\{\psi(\triangle_*)
=\widetilde{\mathcal{F}}'(\beta)^T\triangle_*
+\frac{1}{2}\triangle_*^TM\triangle_*:|B\triangle_*|\leq w\},
\end{equation}
where $B$ is a positive diagonal scaling matrix refer to
\cite{coleman1994(3),coleman1996(4)}, and $w>0$ is the trust region size, and
$$
M(\beta)=\mathcal {\widetilde{F}}'(\beta)^T\mathcal
{\widetilde{F}}'(\beta)+B\operatorname{diag}
(\widetilde{\mathcal{F}}'(\beta))\operatorname{diag}(\operatorname{sign}
(\widetilde{\mathcal{F}}'(\beta)))B.
$$
We take the initial descent direction $\triangle_*$ as a new starting guess,
and then determine the piecewise linear reflective path $p(\alpha)$. Moreover,
we obtain an acceptable stepsize $\alpha$ by an approximate
piecewise line minimization $F(\beta^{k}+p(\alpha_k))$, refer
to \cite{coleman1994(3)} for details.

The final iteration relationship is as follows
\begin{equation}\label{3.901}
\beta^{k+1}=\beta^{k}+p(\alpha_k), k=0,1,2\ldots.
\end{equation}

\section{Numerical experiments}\label{sec5}

In this section, we show that the results of some numerical experiments
that illustrate the reconstruction algorithms of the previous section.
The measured data are given by
\begin{equation*}
g^{\delta}=g(1+\delta \cdot \text{rand(size}(g)),
\end{equation*}
where $g$ is the exact data, $\text{rand(size}(g))$ is a random
number uniformly distributed in $[-1,1]$ and $\delta$
is a relative noise level. In order to calculate conveniently, we take
a unit circle as the boundary $\partial \Omega$ of the solution domain $\Omega$.

\subsection{Sensitivity analysis on the chosen parameter}
As we know, the parameter has an important role in our numerical computation.
To analyze the sensitivity of the chosen parameter, we suppose the source is
circle, the center is located in origin and the radius is 0.3.
 We parameterize the boundary $\Gamma$ of
$\Omega$ as $\Gamma: O+r(\cos t,\sin\ t),
0\leq t\leq2\pi,$ along with $\beta=(O_{1},O_{2},r)$, choose $(0.71,0.23,0.02)$
as a test for starting guess and the error is given by
$\text{error}=\sqrt{O_1^2+O_2^2+(r-0.3)^2}$.


\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.6\textwidth]{fig1} 
 \end{center}
\caption{Sensitivity of the regularization parameter $\lambda$ for GDA}
 \label{fig0}
\end{figure}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig2a}
\includegraphics[width=0.49\textwidth]{fig2b}
 \end{center}
\caption{Sensitivity of the step size parameter $\alpha$ for GDA}
 \label{fig00}
\end{figure}

According to the parameter of GDA, the regularization parameter and the
step size should be analyzed, the results are shown in Figures \ref{fig0}
and \ref{fig00}. Figure \ref{fig0} is the sensitivity of the regularization
parameter. In this case, we choose $\alpha=0.25$, $\lambda_0=2$, $q=0.02$,
$\tau=2.46$ and take $|\widetilde{\mathcal{F}}(\beta^{k+1})
-\widetilde{\mathcal {F}}(\beta^{k})|<10^{-6}$
as a stopping criterion. we apply the sequential discrepancy principle [2]
to obtain the regularization parameter $\lambda=1.6e-5$ as in
Figure \ref{fig0} shown.

Figure \ref{fig00} is the sensitivity of the step size. The step size has
a consistent increase or decrease effect on the error and the elapsed
time of CPU in general. That means, as the error decrease, the elapsed
time decrease, at the same time, as the error increase, the elapsed time increase
along with the step size increase. In our computation, we want to find the
balance between the error and the elapsed time. That means, for fixed the
step size $\alpha$, the error is smaller and the elapsed time of CPU is fewer.
Therefore, we choose the step size in the stability interval $[0.2,0.6]$ from
 Figures \ref{fig00}(a) and \ref{fig00}(b).

\subsection{Numerical stability and convergence of the proposed algorithms}

To show the numerical stability and convergence of GDA, we suppose the hidden
source is a circle which is located in origin $O(0,0)$ and the radius is 0.3.
We parameterize the boundary $\Gamma$ of
$\Omega$ as $\Gamma: O+r(\cos t,\sin t),
0\leq t\leq2\pi,$ along with $\beta=(O_{1},O_{2},r)$, choose $(0.71,0.23,0.02)$
as a test for starting guess and the error is given by
$$
\text{error}=\sqrt{O_1^2+O_2^2+(r-0.3)^2}.
$$
In this case, we choose $\alpha=0.25$, $\lambda_0=2$, $q=0.02$, $\tau=2.46$
and take $|\widetilde{\mathcal{F}}(\beta^{k+1})
-\widetilde{\mathcal {F}}(\beta^{k})|<10^{-6}$
as a stopping criterion.

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig3a}
\includegraphics[width=0.49\textwidth]{fig3b}
 \end{center}
\caption{The numerical stability of GDA for different noise levels}
 \label{fig000}
\end{figure}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig4a}
\includegraphics[width=0.49\textwidth]{fig4b}
 \end{center}
\caption{The numerical convergence of GDA for different values of the number
 of collocation points}
 \label{fig0000}
\end{figure}

In Figures \ref{fig000} and \ref{fig0000}, we use GDA to reconstruct the
approximation location and the size of hidden source within a body.
In Figure \ref{fig000}(a), we investigate the numerical stability of GDA
 with a fixed number of collocation points and four different levels of
noise added to the data, e.g. $3\%, 9\% , 15\%$ and $21\%$.
Figure \ref{fig000}(b) shows the error of the location and the size between
the exact source and the reconstructed source for different noise levels
$\delta$. In Figure \ref{fig0000}(a), we investigate the numerical
convergence of GDA with a fixed level of noise added to the data and four
different values of the number of collocation points, e.g. $20,25,30,35,40$.
Figure \ref{fig0000}(b) shows the error of the location and the size between
the exact source and the reconstructed source for the different number of
collocation points. From Figures \ref{fig000} and \ref{fig0000}, we can see
that our proposed method is stable and effective to detect the hidden source.


\subsection{Estimation of the location and the size of the hidden source}
To estimate the location and the size of the hidden
source within a body, we parameterize the boundary $\Gamma$ of
$\Omega$ as $ O+\rho(\cos t,\sin t),
0\leq t\leq2\pi,$ along with $\beta=(O_{1},O_{2},r)$, that is,
we use the circle to approximate the source for every iteration.

For simplification, we assume the source is located in origin
$(0,0)$.

\textbf{Algorithm 1 for GDA}
Estimate the location and the size of the hidden source.
Let $\varepsilon$, $\delta$, $\tau$, $q$, $\lambda_0$, $\alpha$ and
$g^{\delta}$ be given.\\
(1) Input $\beta^{0}=(O_{1},O_{2},r)$: the location and the size of starting guess\\
(2) Compute the values of collocation points
 $\frac{\partial u(\cdot,\beta^{k})}{\partial \nu}$ and
 $\frac{\partial \nabla_{\beta^{k}} u(\cdot,\beta^{k})}{\partial \nu}$
 from \eqref{2.0024}and \eqref{2.0025}\\
(3) Choose $\lambda$ by the sequential discrepancy principle from \eqref{3.003}\\
(4) Compute $F'(\beta^{k})$ and $\mathcal{\widetilde{F}}'(\beta^{k})$
 from \eqref{3.04} and \eqref{3.4}\\
(5) Update $\beta^{k+1}$ from \eqref{3.501}\\
(6) If $\|\widetilde{\mathcal {F}}(\beta^{k+1})-\widetilde{\mathcal
{F}}(\beta^{k})\|\leq\varepsilon$, stop, $\beta=\beta^{k+1}$; otherwise,
set $\beta^{k}=\beta^{k+1}$, return to( 4)



\textbf{Algorithm 2 for TRA}
Estimate the location and the size of the hidden source.
Let $g^{\delta}$ be given.\\
(1) Input $\beta^{0}=(O_{1},O_{2},r)$: the location and the size of starting guess\\
(2) Compute the values of collocation points
 $\frac{\partial u(\cdot,\beta^{k})}{\partial \nu}$ and
 $\frac{\partial \nabla_{\beta^{k}} u(\cdot,\beta^{k})}{\partial \nu}$
 from \eqref{2.0024}and \eqref{2.0025}\\
(3) Compute $R=\| g^{\delta}-\frac{\partial u(\cdot,\beta^{k})}{\partial \nu}\|$
 and $J=\mathcal {\widetilde{F}}'(\beta^k)$ from \eqref{3.8}\\
(4) In terms of $R$ and $J$, call Matlab programs 'lsqnonlin' to update
 $\beta^{k+1}$\\
(5) From the updated $\beta^{k+1}$, obtain the approximation location
 $(O^{k+1}_{1},O^{k+1}_{2})$ and the size $r^{k+1}$ of the hidden source


\begin{example} \label{examp5.3.1} \rm
 In this case, we suppose the
source is a peanut or a peach or a pear or a ``L" type. Polar
radius of the peanut is given by
$$
r_{pt}=0.4\sqrt{(\cos t)^2+0.25(\sin t)^2}, \quad
0\leq t\leq2\pi,
$$
polar radius of the peach is given by
$$
r_{ph}=3/10-1/12\sin t-1/28\sin(3t), \quad 0\leq t\leq2\pi,
$$
polar radius of the pear is given by
$$
r_{pr}=3/10+1/16\cos(3t), \quad 0\leq t\leq2\pi,
$$
and the longest length of the ``L" type is $0.25$.
\end{example}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig5a} % peanut.eps
\includegraphics[width=0.49\textwidth]{fig5b} \\ % peach.eps
(a) $x(-0.0069,0.0095)$, $r=0.3170$\hfil 
(b) $x(0.0079,-0.0819)$, $r=0.3086$ \\
\includegraphics[width=0.49\textwidth]{fig5c} % pear.eps
\includegraphics[width=0.49\textwidth]{fig5d}  \\%L.eps
(c) $x(0.0029,-0.0073)$,  $r=0.3033$\hfil
(d) $x (-0.0454,0.0362)$, $r=0.2439$
 \end{center}
\caption{(a) Peanut; (b) peach; (c) pear; (d) "L" shape. Estimate
the location and the size of the source with 10\% noise data along
with exact solution (red), initial guess (black) and
recovered solution (blue), respectively for Example \ref{examp5.3.1}. }
 \label{fig1}
\end{figure}

\begin{table}
\caption{The approximate location and the size of the hidden source for four
different cases using GDA along with $10\%$ noise data for Example \ref{examp5.3.1}}
\label{table1}
\renewcommand{\arraystretch}{1.3}
\begin{center}
\begin{tabular}{|c|c|c|}
 \hline
 & Location &Size\\ \hline
Exact & (0,0)& $r$ \\
Guess & (0.71,0.23)&0.02\\
peanut & (-0.0069,0.0095)&0.3170\\
peach & (0.0079,-0.0819)&0.3086\\
pear & (0.0029,-0.0073)&0.3033\\
``L" shape & (-0.0454,0.0362) &0.2439  \\ \hline
\end{tabular}
\end{center}
\end{table}

In Figure \ref{fig1}, we can get the approximate centroid location
and the size of the source using GDA along with $30$ measured data.
We choose $\varepsilon=10^{-7}$, $\delta=0.1$, $\alpha=0.25$,
$\lambda_0=1.5$, $q=0.02$ and $\tau=2.71$. In fact, we can use any
point and any radius as a starting guess in the domain of the solution for these
four cases. In Figure \ref{fig1}, we choose $(0.71,0.23,0.02)$
as a test for starting guess. From Figure \ref{fig1} and Table \ref{table1},
it can be seen that we obtain the more accurate approximation of the location
and the size for four different cases by GDA.
We can get the same result with TRA.

\subsection{Estimation of the shape of the hidden source}

From the previous sub-section, we know that the location and the
size of the hidden source can be determined. In this sub-section, we
try to reconstruct the shape of the hidden source along with the
location and the size of the source given a prior. Therefore, we
can parameterize the boundary $\Gamma$ of $\Omega$ as
$O+r(t)(\cos t, \sin t)$ with
$r(t)$ a real-valued function of $0\leq t\leq2\pi$,
and $O$ is fixed which is the center of the sub-domain $\Omega$. We
apply GDA and TRA to reconstruct the shape of the source.

\textbf{Algorithm 3 for GDA} Reconstruct the shape of the hidden source.
Let $\varepsilon$, $\delta$, $\tau$, $q$, $\lambda_0$, $\alpha$ ,
$O$, $c^0_{0}$ and $g^{\delta}$ be given.\\
(1) Input $\beta^{0}=(c^0_{0},0,\ldots,0)$: the shape of starting guess\\
(2) Compute the values of collocation points
 $\frac{\partial u(\cdot,\beta^{k})}{\partial \nu}$ and
 $\frac{\partial \nabla_{\beta^{k}} u(\cdot,\beta^{k})}{\partial \nu}$
 from \eqref{2.0024}and \eqref{2.0025}\\
(3) Choose $\lambda$ by the sequential discrepancy principle from \eqref{3.003}\\
(4) Compute $F'(\beta^{k})$ and $\mathcal{\widetilde{F}}'(\beta^{k})$
 from \eqref{3.04} and \eqref{3.4}\\
(5) Update $\beta^{k+1}$ from \eqref{3.501}\\
(6) If $\|\widetilde{\mathcal {F}}(\beta^{k+1})-\widetilde{\mathcal
{F}}(\beta^{k})\|\leq\varepsilon$, stop, $\beta=\beta^{k+1}$; otherwise, set
$\beta^{k}=\beta^{k+1}$, return to (4)



\textbf{Algorithm 4 for TRA} Reconstruct the shape of the hidden source.
Let $O$, $\rho$ and $g^{\delta}$ be given.\\
(1) Input $\beta^{0}=(c^0_{0},0,\ldots,0)$: the shape of starting guess\\
(2) Compute the values of collocation points
 $\frac{\partial u(\cdot,\beta^{k})}{\partial \nu}$ and
 $\frac{\partial \nabla_{\beta^{k}} u(\cdot,\beta^{k})}{\partial \nu}$
 from \eqref{2.0024}and \eqref{2.0025}\\
(3) Compute $R=\| g^{\delta}-\frac{\partial u(\cdot,\beta^{k})}{\partial \nu}\|$
 and $J=\mathcal {\widetilde{F}}'(\beta^k)$ from \eqref{3.8}\\
(4) In terms of $R$ and $J$, call Matlab programs 'lsqnonlin' to update
 $\beta^{k+1}$\\
(5) From \eqref{2.12}, obtain the shape of the hidden source base on the update
$\beta^{k+1}$


\begin{example} \label{examp5.4.1} \rm
In this case, we also assume the hidden source is a peanut or a peach or a pear
 or a ``L" type with the
approximate location and the size given by Table \ref{table1} in
Example \ref{examp5.3.1}.
\end{example}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig6a} % Gpeanut1.eps
\includegraphics[width=0.49\textwidth]{fig6b} \\ %Gpeach.eps
(a) $l=2$; $c_0^{0}=0.3170$ \hfil
(b) $l=3$, $c_0^{0}=0.3086$ \\
\includegraphics[width=0.49\textwidth]{fig6c} % Gpear.eps
\includegraphics[width=0.49\textwidth]{fig6d} \\ % GL1.eps
(c) $l=3$, $c_0^{0}=0.3033$ \hfil
(d) $l=3$, $c_0^{0}=0.2439$
 \end{center}
\caption{(a) Peanut; (b) peach; (c) pear; (d) ``L" shape.
Reconstructed the shape of the hidden source with 10\% noise data using
GDA along with exact shape (red) and
recovered shape (blue), respectively for Example \ref{examp5.4.1}.}
 \label{fig2}
\end{figure}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig7a} % Tpeanut.eps
\includegraphics[width=0.49\textwidth]{fig7b} \\ % Tpeach.eps
(a) $l=2$, $c_0^{0}=0.02$ \hfil
(b) $l=3$, $c_0^{0}=0.02$ \\
\includegraphics[width=0.49\textwidth]{fig7c} % Tpear.eps
\includegraphics[width=0.49\textwidth]{fig7d} \\ % TL.eps
(c) $l=3$, $c_0^{0}=0.02$ \hfil
(d) $l=3$, $c_0^{0}=0.03$
\end{center}
\caption{(a) Peanut; (b) peach; (c) pear; (d) ``L" shape.
Reconstructed the shape of the hidden source with 10\% noise data using
TRA along with exact
shape (red) and recovered shape (blue), respectively for
Example \ref{examp5.4.1}.}
 \label{fig3}
\end{figure}

In Figures \ref{fig2} and \ref{fig3}, we apply two iterative
algorithms to recover the shape of the hidden source for four
different cases within a body. We choose $\varepsilon=10^{-7}$,
$\delta=0.1$, $\tau=2.71$, $q=0.02$, $\lambda_0=1.5$, $\alpha=0.25$ and
$c_0^0=\rho$ for GDA. We
take a circle as a starting guess for the source, thus we can use
the size of the source as the radius of a circle from Table \ref{table1},
or reset it.

In this example, we use the approximate location given by Table \ref{table1}
in Example \ref{examp5.3.1} as the fix center of the sub-domain $\Omega$
and reset the initial value $\beta^{0}=(c_0^{0},0,\dots,0)$ as a starting
guess. Compared with these two algorithms, the convergence speed of
TRA is much faster than
GDA. The run time of CPU for
TRA (about 0.5sec) is far
less than GDA (about 100sec). From Figures \ref{fig2} and \ref{fig3},
we can see that these two algorithms work well with noisy data to
reconstruct the shape of the source within a body.

\begin{example} \label{examp5.4.2} \rm
In this case, we consider the hidden source is a kite or a hypocycloid.
Polar radius of the kite is given by
\begin{equation}
r_{kt}=0.3(1+0.9\cos t+0.15\sin(2t))/(1+0.7\cos t),0\leq t\leq2\pi,
\end{equation}
and polar radius of the hypocycloid is given by
\begin{equation}
r_{hy}=0.3\sqrt{10/9+2/3\cos(4t)},0\leq t\leq2\pi.
\end{equation}
\end{example}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig8a} % Gk1.eps
\includegraphics[width=0.49\textwidth]{fig8b} \\ % Tk.eps
(a) $l=3$, $c_0^{0}=0.01$ \hfil
(b) $l=2$, $c_0^{0}=0.02$ \\
\includegraphics[width=0.49\textwidth]{fig8c} % Gm2.eps
\includegraphics[width=0.49\textwidth]{fig8d} \\ % Tm.eps
(c) $l=10$, $c_0^{0}=0.01$ \hfil
(d) $l=4$, $c_0^{0}=0.01$
 \end{center}
\caption{
Reconstructed the shape of the hidden source with 10\% noise data using
GDA and TRA along with exact
shape (red) and recovered shape (blue), respectively for
Example \ref{examp5.4.2}.}
 \label{fig4}
\end{figure}

In Figure \ref{fig4}, we apply GDA and TRA to recover the shape of the hidden
 source along with $10\%$ noise. The centroid of the source is origin.
We choose $\varepsilon=10^{-7}$, $\delta=0.1$, $\tau=2.12$, $q=0.02$,
$\lambda_0=1.2$ and $\alpha=0.25$ for GDA. GDA is employed to recover
the shape of the source in Figures \ref{fig4}(a) and \ref{fig4}(c).
The shape of the source in Figures \ref{fig4}(b) and \ref{fig4}(d)
is reconstructed by TRA. From Figure \ref{fig4} we know that TRA is
much less sensitive to the noise level, it works with much less prior information.
However, the shape of the recovered hidden
source agrees well with that of the true one for both iterative algorithms.

\subsection*{Conclusions}\label{sec6}
In this paper, we consider the inverse source problem
within a body from the measured data . We want to
detect the salient features of the hidden source, such as the location,
the size and the shape. We transform this problem into an optimization
problem for finding a minimum of an objective function.
This inverse source problem is nonlinear and ill-posed, thus regularization
technique of our proposed algorithms should be considered.
We apply GDA and TRA to solve this inverse source problem.
Our proposed algorithms are robust in the presence of noise, and less
sensitive to the noise level and an initial guess. Another nice feature
 of TRA is that it is self-adaptive, that is, at each iteration it can
remedy the possible errors from the previous iterations. Numerical results show
that our proposed algorithms are feasible and stable.

\subsection*{Acknowledgments}
The research of Ji-Chuan Liu was supported by the Fundamental Research Funds
for the Central Universities (2014QNA57) and the NSF of China (11601512).

\begin{thebibliography}{10}

\bibitem{allaire2002(24)}
G.~Allaire; \emph{Shape optimization by the homogenization method}, vol. 146,
 Springer, 2002.

\bibitem{anzengruber2012(31)}
S. W. Anzengruber, B.~Hofmann,  P.~Math{\'e}; \emph{Regularization properties
 of the discrepancy principle for tikhonov regularization in banach spaces},
 Techn. Univ., Fak. Mathematik, 2012.

\bibitem{badia2000(20)}
A. E. Badia, T.~Ha-Duong; \emph{An inverse source problem in potential
 analysis}, Inverse Problems \textbf{16} (2000), 651.

\bibitem{baratchart2005(12)}
L.~Baratchart, A. B. Abda, F. B. Hassen,  J.~Leblond;
 \emph{Recovery of  pointwise sources or small inclusions in {2D} domains
and rational  approximation}, Inverse problems \textbf{21} (2005), no.~1, 51.

\bibitem{bubnov1997(8)}
B. A. Bubnov, G. N. Erokhin;
 \emph{Inverse and ill-posed sources problems},  vol.~9, Vsp, 1997.

\bibitem{cheney1999electrical(1)}
M. Cheney, D.~Isaacson,  J. C. Newell;
 \emph{Electrical impedance tomography}, SIAM review (1999), 85--101.

\bibitem{coleman1994(3)}
T. F. Coleman, Y.~Li;
 \emph{On the convergence of interior-reflective newton
 methods for nonlinear minimization subject to bounds}, Mathematical
 programming \textbf{67} (1994), no.~1, 189--224.

\bibitem{coleman1996(4)}
T. F. Coleman, Y.~Li;
 \emph{An interior, trust region approach for nonlinear minimization
 subject to bounds}, SIAM Journal on Optimization, \textbf{6} (1996),
 418--445.

\bibitem{Gene1979(28)}
H. G. Gene, M. T. Heath, G.~Wahba;
 \emph{{Generalized Cross-Validation as a
 Method for Choosing a Good Ridge Parameter}}, Technometrics \textbf{21}
 (1979), 215--223.

\bibitem{RevModPhys(2)}
M. H\"am\"al\"ainen, R. Hari, R. J. Ilmoniemi, J. Knuutila,  O. V. Lounasmaa;
 \emph{Magnetoencephalography theory, instrumentation, and applications to
 noninvasive studies of the working human brain}, Reviews of Modern Physics
 \textbf{65} (1993), 413--497.

\bibitem{hanke2011(18)}
M.~Hanke, W.~Rundell; \emph{On rational approximation methods for inverse
 source problems}, Inverse Problems and Imaging \textbf{5} (2011), no.~1,
 185--202.

\bibitem{Hansen(21)}
P. C. Hansen; \emph{Rank-deficient and discrete ill-posed problems: Numerical
 aspects of linear inversion}, Society for Industrial and Applied Mathematics,
 Philadelphia, PA, USA, 1998.

\bibitem{he1987(13)}
B.~He, T.~Musha, Y.~Okamoto, S.~Homma, Y.~Nakajima,  T.~Sato; \emph{Electric
 dipole tracing in the brain by means of the boundary element method and its
 accuracy}, Biomedical Engineering, IEEE Transactions on \textbf{BME-34}
 (1987), no.~6, 406--414.

\bibitem{hettlich1999(19)}
F.~Hettlich, W.~Rundell;
 \emph{Iterative methods for the reconstruction of
 an inverse potential problem}, Inverse problems \textbf{12} (1999), no.~3,
 251.

\bibitem{hon2010(17)}
Y. C. Hon, M.~Li, Y.A. Melnikov;
 \emph{Inverse source identification by
 {G}reen's function}, Engineering Analysis with Boundary Elements \textbf{34}
 (2010), no.~4, 352--358.

\bibitem{isakov1990(9)}
V.~Isakov; \emph{Inverse source problems}, no.~34, American Mathematical Soc.,
 1990.

\bibitem{isakov2005(25)}
V.~Isakov;
 \emph{Inverse problems for partial differential equations}, Applied
 Mathematical Sciences, vol. 127, Springer-Verlag, New York, second edition,
 2006.

\bibitem{kavanagk1978(14)}
R. N. Kavanagk, T. M. Darcey, D.~Lehmann, D. H. Fender;
\emph{Evaluation of  methods for three-dimensional localization of electrical
sources in the human  brain}, Biomedical Engineering, IEEE Transactions on
\textbf{BME-25} (1978),  no.~5, 421--429.

\bibitem{keplershape(32)}
Johannes Kepler;
 \emph{Shape optimization with shape derivatives [{J}]}.

\bibitem{ling2005(23)}
L.~Ling, Y.C. Hon,  M.~Yamamoto;
 \emph{Inverse source identification for  poisson equation},
Inverse problems in science and engineering \textbf{13}
 (2005), no.~4, 433--447.

\bibitem{morozov1984(27)}
V.~Morozov; \emph{Methods of solving incorrectly posed problems},
 Springer-Verlag, New York, 1984.

\bibitem{nenonen1991(15)}
J.~Nenonen, T.~Katila, M.~Leinio, J.~Montonen, M.~Makijarvi, P.~Siltanen;
 \emph{Magnetocardiographic functional localization using current multipole
 models}, Biomedical Engineering, IEEE Transactions on \textbf{38} (1991),
 no.~7, 648--657.

\bibitem{Kohzaburo1989(16)}
K.~Ohnaka, K.~Uosaki;
\emph{Boundary element approach for identification of
 point forces of distributed parameter systems}, International Journal of
 Control \textbf{49} (1989), no.~1, 119--127.

\bibitem{Sokolowski1992(22)}
J.~Sokolowski, J.P. Zolesio; \emph{Introduction to shape optimization},
 Introduction to Shape Optimization, Springer Series in Computational
 Mathematics, vol.~16, Springer Berlin Heidelberg, 1992, pp.~5--12 (English).

\bibitem{Tikhonov1943(34)}
A.~N Tikhonov; \emph{On the stability of inverse problems}, Dolk.akad.nauk Sssr
 \textbf{39} (1943), no.~5, 176--179.

\bibitem{Tikhonov1995(33)}
A.~N. Tikhonov, A.~V. Goncharsky, V.~V. Stepanov, A.~G. Yagola;
 \emph{Numerical methods for the solution of ill-posed problems}, Kluwer
 Academic Publishers, 1995.

\end{thebibliography}

\end{document}
