\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{mathrsfs}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2015 (2015), No. 56, pp. 1--6.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2015 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2015/56\hfil  Borel's lemma in two dimensions]
{A new proof of Borel's lemma in two dimensions}

\author[D. Li \hfil EJDE-2015/56\hfilneg]
{Dening Li}

\address{Dening Li \newline
Department of Mathematics,  West Virginia University,
 Morgantown, WV 26506, USA}
\email{li@math.wvu.edu}

\thanks{Submitted March 18, 2014. Published February 27, 2015.}
\subjclass[2000]{35L50, 40C05}
\keywords{Borel's Lemma; infinite order approximate solution}

\begin{abstract}
 Existence of solutions for nonlinear problems can often be established by
 a Newton's scheme, near an approximate solution, combined with a regularization
 procedure. This article provides a new method of constructing an 
 infinite order $C^\infty$ approximate solution for proving Borel's Lemma,
 without using the usual $C^\infty$ cut-off functions.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks

\section{Approximate solutions of infinite order}

In the study of nonlinear problems, the linear iteration method is widely 
used to obtain the existence of solutions. Depending upon the nature of the
energy estimate for the linearized problem, various iteration methods are 
carried out, usually near an approximate solution for the nonlinear problem.
The existence of such an approximate solution depends upon the compatibility 
of the initial data and boundary or free boundary conditions. 
This compatibility is a necessary condition for the existence of the solution 
with some given regularity.

Consider the $n\times n$ system of nonlinear partial differential equations 
(the 3-dimensional Euler system is a special example with $n=5$):
\begin{equation}
\mathscr{L}(u) u =A_0(u) \partial_t u + A_1(u) \partial_x u +A_2(u) \partial_y u
+A_3(u) \partial_z u + C(u) u=f.
\label{e1.1}
\end{equation}
Here, the matrix $A_0(u)$ is assumed to be positively definite in the range of $u$.

The piece-wise smooth solutions (such as shock waves, rarefaction waves, 
or contact discontinuity) for the system \eqref{e1.1} are usually formulated
as initial-free-boundary problems. After a change of coordinates 
(depending upon the free boundary), the problem can be further transformed 
into an initial-boundary value problem. The approximate solutions for the 
unknown functions describing free boundaries can be constructed separately. 
Hence for simplicity, we will omit that part and consider only the following 
initial-boundary conditions
\begin{gather}
u(0,x,y,z) = u_0(x,y,z),\quad x\geq \phi(y,z),\; (y,z) \in \mathbb{R}^2,
\label{e1.2}\\
B(u) u(t,x,y,z) = g(t,y,z), \quad  t\geq 0, \;
 x=\psi(t,y,z), \; (y,z) \in \mathbb{R}^2.
\label{e1.3}
\end{gather}
Here $B(u)$ is in general an $m\times n$ matrix of nonlinear
zero-order operators, and $\psi(0,y,z)= \phi(y,z)$.

For the nonlinear problem \eqref{e1.1}-\eqref{e1.3} to be solvable, at least
locally in time, the compatibility is a standard requirement. 
Such requirement is necessary so that the value $u_0(x,y,z)$ in \eqref{e1.2}
 and the value $u(t,x,y,z)$ required by the boundary condition \eqref{e1.3} 
do not conflict with the value determined from the  partial differential 
equations \eqref{e1.1} at the intersection curve $x=\phi(y,z)$, $t=0$.

The $0$-order compatibility comes from the fact the solution is continuous 
at the intersection curve. The values of the $u$-components obtained 
from \eqref{e1.3} must be identical to the values prescribed in \eqref{e1.2}. 
 From this 0-order compatibility of $u_0$ and $g$, one obtains that all the 
derivatives $\partial_y^{j_2}\partial_z^{j_3}u$ can be uniquely determined 
at $x=\phi(y,z)$ and $t=0$ by \eqref{e1.2} and \eqref{e1.3}.

The first-order compatibility condition is derived from the fact that the 
solution is continuously differentiable at the intersection curve 
$x=\phi(y,z)$, $t=0$. From \eqref{e1.1} and \eqref{e1.2}, the values of $u_t$ 
at $t=0$ for a classical solution $u(t,x,y,z)$ can be uniquely determined. 
On the other hand, for $m$ components of $u$, the derivative $u_t$ can also
 be determined by \eqref{e1.3} at $x=\phi(y,z)$. Therefore, in order that 
problem \eqref{e1.1}-\eqref{e1.3} have a classical solution $u(t,x,y,z)$, 
these two values must coincide at the intersection of $x=\phi(y,z)$ and $t=0$. 
This implies that the values $u_0(x,y,z)$ and $g(y,z,t)$ must satisfy 
certain constraints at the intersection of $x=\phi(y,z)$ and $t=0$. 
These constraints consist of the first order compatibility for the 
initial and boundary data $(u_0,g)$. In other words, the data  $(u_0,g)$ 
are first-order compatible if and only if one can uniquely determine the 
values of $u_t, u_x$ at the intersections of $x=\phi(y,z)$ and $t=0$.

Once the values of $u_t$ and $u_x$ are obtained at the intersection curve 
$x=\phi(y,z)$ and $t=0$, all the derivatives $\partial_y^{j_2}\partial_z^{j_3}u_t$ 
and $\partial_y^{k_2}\partial_z^{k_3}u_x$ are also known at $x=\phi(y,z)$ and $t=0$.

In general, the $k$-th order compatibility of the data $(u_0, g)$ can be 
defined similarly from the continuity of $k$-th order derivatives of the solution. 
With $k$-th order compatible data $(u_0,g)$, all the derivatives 
$\partial^\alpha u$, $(|\alpha| \leq k)$ at the intersection of 
$x=\phi(y,z)$ and $t=0$ are uniquely determined. Here, we use the
 multi-index convention that
\begin{equation}
\partial^\alpha = \partial_t^{\alpha_0}\partial_x^{\alpha_1}
\partial_y^{\alpha_2}\partial_z^{\alpha_3}, \quad
|\alpha| = \alpha_0+\alpha_1+\alpha_2+\alpha_3.
\label{e1.4}
\end{equation}

A $k$-th order approximate solution is closely related to the $k$-th order 
compatibility of the data. In particular, let $u \in C^k$ be a solution 
for \eqref{e1.1}-\eqref{e1.3} (which implies $k$-order compatibility of 
the data $(u_0,g)$), a $k$-th order approximate solution $w$ for t
problem \eqref{e1.1}-\eqref{e1.3} is a function $w(t,x,y,z) \in C^k$ near 
the intersections of $x=\phi(y,z)$ and $t=0$ such that
\begin{equation}
\partial^\alpha w(0,\phi(y,z),y,z) =\partial^\alpha u(0,\phi(y,z),y,z), \quad
 \forall |\alpha|\leq k. \label{e1.5}
\end{equation}

Equivalently, a function $w(t,x,y,z) \in C^k$ is a $k$-th order approximate solution 
if $w(0,x,y,z)=u_0(x,y,z)$, and both the interior equation \eqref{e1.1} 
and the boundary conditions \eqref{e1.3} are satisfied up to the order 
of $O(t^k)$; i.e.,
\begin{equation}
\mathscr{L}(u)u -f =O(t^k), \quad  B(u)u-g =O(t^k).\label{e1.6}
\end{equation}

To find a solution by a linear iteration for a nonlinear initial-boundary 
value problem such as \eqref{e1.1}-\eqref{e1.3}, the iteration are proceeded 
near an approximate solution, see e.g., \cite{poppenberg}.
 For various free boundary problems (essentially nonlinear);
 see e.g., \cite{alinhac,coulombel,li}, the linear iterations needs also to
 be carried out around an approximate solution.

The order requirement of the approximate solution varies, depending upon 
the nature of different iteration schemes.  When an appropriate a priori 
estimate is available for the solution of linearized problem which would 
support the iteration indefinitely, the standard Picard's linear iteration 
is used, and the order for the approximate solution is usually the same 
as the smoothness order of required solution.
However, when the linearized problem admits only a weaker estimate, 
the Nash-Moser iteration is a powerful tool which requires only a 
family of so-called tame estimates \cite{hamilton} for the linearized problems. 
In such cases, the order for the approximate solution could be much higher 
than the smoothness order of the required solution.

Given the initial-boundary data which are compatible up to any order, 
then an approximate solution of infinite order can be obtained by Borel's Lemma. 
In the following section, a new construction of such an approximate solution 
will be presented.

\section{A new proof of Borel's lemma}

From the $k$-th order compatibility condition, all the derivatives 
$u^{(\alpha)} (|\alpha| \leq k)$ of the solution for \eqref{e1.1}-\eqref{e1.3} 
are uniquely determined. Then the $k$-th order approximate solution can be 
constructed immediately by using the Taylor polynomials.

Specifically, let $ y \in \mathbb{R}^m$, given a family of $C^k$ functions 
$\{c_\alpha(y) \in C^k, \; \alpha =(\alpha_0,\alpha_1),\;|\alpha|\leq k\}$, 
the corresponding $k$-th order approximate solution $w(t,x,y)$ can be 
obtained by 
\begin{equation}
w(t,x,y)= \sum_{|\alpha|=0}^k \dfrac{c_\alpha(y)}{\alpha!}t^{\alpha_0}x^{\alpha_1}.
\label{e2.1}
\end{equation}

However, the construction in \eqref{e2.1} cannot be directly generalized 
to the case of infinite order approximate solution, because the corresponding 
Taylor series may not have a non-zero radius of convergence. 
This difficulty is usually overcome by using Borel's technique;
 i.e., introducing a sequence of $C_0^\infty$ cut-off functions in the 
coefficients of \eqref{e2.1}.
Indeed, we have the following result.

\begin{theorem}[Borel's Lemma] \label{thm2.1}
Let $\{c_\alpha(y) \in C^\infty, \alpha =(\alpha_0,\alpha_1),|\alpha|\geq 0\}$ 
be a given sequence of smooth functions, and $x = \phi(y)$ with $\phi(0)=0$ 
be a $C^\infty$ surface in $(x,y)$ space near $(0,0)$.
Then there is a $C^\infty$ function $w(t,x,y)$ near $(t,x,y) = (0,0,0)$ satisfying
\begin{equation}
 w^{(\alpha)}(0,\phi(y),y) = c_\alpha(y),\; \;|\alpha|=0,1,2,\cdots.
\label{e2.2}
\end{equation}
Here $w^{(\alpha)}(t,x,y) = \partial_t^{\alpha_0}\partial_x^{\alpha_1}w(t,x,y)$.
 \end{theorem}

The one-dimensional result of Theorem \ref{thm2.1} was first proved by Borel 
in \cite{borel}. More generalized versions are also available,
 see e.g.\cite{poppenberg}. The two-dimensional version of Theorem \ref{thm2.1} is 
proved in \cite{chen-li} in a somewhat simplified form, using a modified 
Taylor series of \eqref{e2.1}, with added $C_0^\infty$ coefficients
 $\phi_\alpha$ with rapidly shrinking support as $|\alpha| \to \infty$. 
In the following, we present a completely different proof without introducing 
any $C^\infty_0$ functions. Instead, we will use a more elementary construction 
for the infinite order approximate solution in Theorem \ref{thm2.1}. 
The method  might be of interest because of its explicit expression.

\begin{proof} 
First we define a sequence of functions $\gamma_\alpha(r)$ ($|\alpha|\geq 1$) 
as follows.

For $|\alpha|=1$,
$$
\gamma_\alpha(r,y) \equiv \sin (b_\alpha(y)r);
$$
for $|\alpha|=2$,
$$
\gamma_\alpha(r,y) \equiv \int_0^r \sin (b_\alpha(y)s)ds;
$$
and for $|\alpha|\geq 3$,
\begin{equation}
\gamma_\alpha(r)\equiv \int_0^{r}\int_0^{s_{|\alpha|-1}}
\cdots\int_0^{s_3}\int_0^{s_2}\sin(b_\alpha (y)s_1) ds_1ds_2
\cdots ds_{|\alpha|-2}ds_{|\alpha|-1},
\label{e2.3}
\end{equation}
where $b_\alpha = b_\alpha(y)$ depends only upon the parameter
 $y \in \mathbb{R}^m$ and will be chosen later.

$\gamma_\alpha(r)$ is a scalar function of the variable $r$, depending 
upon the parameter $y \in \mathbb{R}^m $ through $b_\alpha(y)$.
Let $\gamma_\alpha^{(j)}$ denote the $j$-th order derivative with respect to $r$. 
It is readily checked that we have the following statement.

\begin{lemma} \label{lem2.1}
The functions $\gamma_\alpha(r)$ defined in \eqref{e2.3} have the following 
properties:
\begin{enumerate}
\item $\gamma_\alpha \in C^\infty(\mathbb{R})$;

\item $|\gamma^{(j)}_\alpha(r)| \leq 1$ for all $j < |\alpha|$, $r \in (-1,1)$;

\item $\gamma^{(j)}_\alpha(0)=0$ for all $j < |\alpha|$;

\item $\gamma^{(|\alpha|)}_\alpha(0)=b_\alpha$.

\end{enumerate}
\end{lemma}

Now we define the function 
\begin{equation}
w(t,x,y) = c_0(y)+
\sum_{|\alpha|\geq 1}
\frac{1}{|\alpha|!}
\gamma_\alpha \Big( t +\frac{ x-\phi(y)}{ \alpha_1 +1}\Big). \label{e2.4}
\end{equation}

\begin{remark} \rm
The choice of the factor $(\alpha_1 + 1)^{-1}$ in \eqref{e2.4} serves 
to distinguish the different $\alpha$ with the same $|\alpha|$. 
Its specific form is only for convenience and can obviously be made differently,
 e.g., $(\alpha_1 + 1)$ or $2^{\alpha_1}$, etc. However, $\alpha_1$ cannot be
 replaced by, say $|\alpha|$ or $|\alpha|!$, as it will be seen later 
in \eqref{e2.7} and \eqref{e2.8}.
\end{remark}

From the property 2 in Lemma \ref{lem2.1}, the function $w(t,x,y)$ in \eqref{e2.4} 
is well-defined and $C^\infty$ in the region: $\{(t,x,y): |t|+|x-\phi(y)| < 1\}$. 
From the property 3 in Lemma \ref{lem2.1}, it is obvious that 
$w^{(0)}(0,\phi(y), y) = c_0(y)$. To show that it is the required function 
in Theorem \ref{thm2.1}, it remains to choose $b_\alpha(y)$ such that 
$w^{(\alpha)}(0,\phi(y), y) = c_\alpha(y)$  for all $\alpha$. 
This is achieved by induction on $k=|\alpha|$ as follows.
\begin{itemize}

\item For $|\alpha|=k=1$, let $b_\alpha(y) = c_\alpha(y)$.

\item Assume that $b_\alpha(y)$ be already chosen for all $|\alpha|<k$. 
 This means that all the functions $\gamma_\alpha$, together with all the
 derivatives $\gamma_\alpha^{(j)}$ are known for $|\alpha| <k$.
\end{itemize}
We proceed to choose the vector $b_\alpha(y)$ for all $|\alpha|=k$ 
simultaneously such that for any $\beta =(\beta_0,\beta_1)$ with $|\beta|=k$,
\begin{equation}
\begin{aligned}
w^{(\beta)}(t,x,y)\big|_{t=0, x=\phi(y)}
&=\sum_{|\alpha|\geq 1} \frac{1}{|\alpha|!}
\partial^{\beta}\gamma_\alpha
\Big( t +\frac{ x-\phi(y)}{\alpha_1 +1}\Big)\big|_{t=0, x=\phi(y)}\\
&=c_\beta(y).
\end{aligned} \label{e2.5}
\end{equation}
Since
$$
\partial^{\beta}\gamma_\alpha
\Big( t +\frac{ x-\phi(y)}{\alpha_1 +1}\Big)
=\gamma^{(|\beta|)}_\alpha \Big( t +\frac{x-\phi(y)}{\alpha_1 +1}\Big)
\Big( \frac{1}{\alpha_1 +1}\Big)^{\beta_1},
$$
by Property 3 in Lemma \ref{lem2.1}, all the terms in the summation of
\eqref{e2.5} with $k<|\alpha|$ vanish, i.e.,
for all $|\alpha|> |\beta|=k$,
$$
\partial^{\beta}\gamma_\alpha
\Big( t +\frac{x-\phi(y)}{\alpha_1 +1}\Big)\Big|_{t=0, x=\phi(y)}=0.
$$
Then \eqref{e2.5} becomes
$$
w^{(\beta)}(t,x,y)\big|_{t=0, x=\phi(y)}
=\sum_{ |\alpha| =1}^k\frac{1}{|\alpha|!}
\Big( \frac{1}{\alpha_1 +1}\Big)^{\beta_1}\gamma^{(|\beta|)}_\alpha(0)
	=c_\beta(y),
$$
or equivalently
\begin{equation}
\frac{1}{k!}
\sum_{|\alpha|=k}\Big( \frac{1}{\alpha_1 +1}\Big)^{\beta_1}  b_\alpha(y)\\
= c_\beta(y) -  \sum_{|\alpha|=1}^{k-1} \frac{1}{|\alpha|!}
\Big( \frac{1}{\alpha_1 +1}\Big)^{\beta_1}\gamma^{(|\beta|)}_\alpha(0).
 \label{e2.6}
\end{equation}

For all multi-index $\beta$ with $|\beta|=k$, \eqref{e2.6} consists of $k+1$ 
linear equations for $k+1$ variables $b_\alpha(y)$ with $|\alpha|=k$. 
\eqref{e2.6} admits a unique vector solution $b_\alpha(y)$ if 
(omitting the non-zero factor ${1/k!}$) the following coefficient  
$(k+1) \times (k+1)$ matrix is nonsingular
\begin{equation}
\mathscr{A} =\big\{ \big( \frac{1}{\alpha_1 +1}\big)^{\beta_1},\,
 \alpha_1, \beta_1 = 0,1,\dots,k.\big\}
\label{e2.7}
\end{equation}
Computed explicitly, \eqref{e2.7} becomes
\begin{equation}
\mathscr{A}
=\begin{pmatrix}
1&1&\cdots&1\\
1&1/2 & \cdots & 1/(k+1)\\
\cdots & \cdots & \cdots & \cdots \\
1&1/2^k & \cdots &1/(k+1)^k
\end{pmatrix}
\label{e2.8}
\end{equation}
This matrix is the well-known Vandermonde matrix with the following non-zero
determinant
\begin{equation}
\det \mathscr{A}
=\prod_{1 \leq i < j \leq k+1}(1/i - 1/j) \neq 0.
\label{e2.9}
\end{equation}
This completes the proof.
\end{proof}

\begin{remark} \rm
As mentioned in section 1, the existence of an approximate solution of 
infinite order with explicit structure in \eqref{e2.4} can be a useful 
tool in the study of some nonlinear problems, especially when Nash-Moser iteration 
is required to obtain the existence of the solution.
This was first successfully used in the context of multi-dimensional 
rarefaction waves \cite{alinhac} to establish the existence of solution, 
see also \cite{li}. Later on, it was also used in studying the general 
initial-boundary value problems in \cite{poppenberg}, and the 2-dimensional 
contact discontinuity problems \cite{coulombel} for the Euler system in
 gas-dynamics, etc.
\end{remark}

\begin{thebibliography}{99}

\bibitem{alinhac} S. Alinhac;
 \emph{Existence d'ondes de rarefaction pour des
syst\`{e}mes quasi-lin\'{e}aires hyperboliques multidimensionnels},
Comm. Partial Differential Equations, 14(1989),173-230.

\bibitem{borel} E. Borel,
\emph{Sur quelques points de la theorie des fonctions}, Ann. Sci. Ecole Norm. Sup.
12 (1895), 9-55.

\bibitem{chen-li} S. Chen, D. Li;
\emph{Cauchy problem with general discontinuous initial data along a smooth
 curve for 2-D Euler system},  J. Differential Equations 257 (2014), 
no. 6, 1939--1988.

\bibitem{coulombel} J-F. Coulombel, P. Secchi;
\emph{Nonlinear compressible vortex sheets in two space dimensions},
Ann. Sci. Ec. Norm. Super.,41(2008),85-139.

\bibitem{hamilton} R. S. Hamilton;
\emph{The inverse function theorem of Nash and Moser},
Bull. Amer. Math. Soc., 7(1982), 65-222.

\bibitem{li} D. Li;
 \emph{Rarefaction and shock waves for multi-dimensional
hyperbolic conservation laws}, Comm. in PDEs, 16(1991), 425-450.

\bibitem{majda} A. Majda;
 \emph{The stability of multi-dimensional shock front,
The existence of multi-dimensional shock front}, Memoirs Amer. Math.
Soc., 275, 281 (1983).

\bibitem{poppenberg} M. Poppenberg,
\emph{Nash-Moser techniques for nonlinear boundary-value problems,} 
Electronic Journal of Differential Euqations, 2003 (2003),  no. 54,  pp. 1-33.

\end{thebibliography}

\end{document}
