\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2016 (2016), No. 157, pp. 1--13.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2016 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2016/157\hfil Existence of solutions to Burgers equations]
{Existence of solutions to Burgers equations in domains
that can be transformed into rectangles}

\author[Y. Benia, B.-K. Sadallah \hfil EJDE-2016/157\hfilneg]
{Yassine Benia, Boubaker-Khaled Sadallah}

\address{Yassine Benia \newline
Department of Mathematics,
University of Tiaret, B.P. 78,
14000, Tiaret, Algeria}
\email{benia.yacine@yahoo.fr}

\address{Boubaker-Khaled Sadallah \newline
Lab. PDE \& Hist Maths; Dept of Mathematics, E.N.S.,
16050, Kouba, Algiers, Algeria}
\email{sadallah@ens-kouba.dz}

\thanks{Submitted April 15, 2016. Published June 21, 2016.}
\subjclass[2010]{35K58, 35Q35}
\keywords{Semilinear parabolic problem; Burgers equation; existence;
\hfill\break\indent uniqueness; anisotropic Sobolev space}

\begin{abstract}
 This work is concerned with Burgers equation
 $\partial _{t}u+u\partial_x u-\partial _x^2u=f$
 (with Dirichlet boundary conditions) in the
 non rectangular domain
 $\Omega =\{(t,x)\in R^2;\ 0<t<T,\;  \varphi_1(t)<x<\varphi _2(t)\}$
 (where $\varphi _1(t)<\varphi _2(t)$ for all $t\in [ 0;T]$).
 This domain will be transformed into a rectangle by a regular change of
 variables. The right-hand side lies in the Lebesgue space $L^2(\Omega )$,
 and the initial condition is in the usual Sobolev space $H_0^{1}$.
 Our goal is to establish the existence, uniqueness and the optimal regularity
 of the solution in the anisotropic Sobolev space.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks

\section{Introduction}

One of the most important partial differential equations of the theory of
nonlinear conservation laws, is the semilinear diffusion equation, called
Burgers equation:
\begin{equation}
\partial _{t}u+u\partial _xu-\nu \partial _x^2u=f,  \label{44}
\end{equation}
where $u$ stands, generally, for a velocity, $t$  the time variable, $x$
the space variable and $\nu $ the constant of viscosity (or the diffusion
coefficient). Homogeneous Burgers equation (equation \eqref{44} with $f=0$),
is one of the simplest models of nonlinear equations which have been studied.

The mathematical structure of this equation includes a nonlinear convection
term $u\partial _xu$ which makes the equation more interesting, and a
viscosity term of higher order $\partial _x^2u$ which regularizes the
equation and produces a dissipation effect of the solution near a shock.
When the viscosity coefficient vanishes, $\nu =0$, the Burgers equation
reduced to the transport equation, which represents the inviscid Burgers
equation $\partial _{t}u+u\partial _xu=f$.

The study of the equation \eqref{44} has a long history: In 1906, Forsyth,
treated an equation which converts by some variable changes to the Burgers
equation. In 1915, Bateman \cite{Bateman} introduced the equation \eqref{44}:
He was interested in the case when $\nu \to 0$, and in studying
the movement behavior of a viscous fluid when the viscosity tends to zero.
Burgers (1948) has published a study on the equation \eqref{44} (which it
owes his name), in his document \cite{Burgers1} about modeling the
turbulence phenomena. Using the transformation discovered later by
\cite{Cole} in 1951, about the same time and independently by Hopf \cite{Hopf},
(called the Hopf-Cole transformation), Burgers continued his study of what
he called ``nonlinear diffusion equation''.
This study treated mainly the static aspects of the equation. The results
of these works can be found in the book \cite{Burgers2}.

The objective of Burgers was to consider a simplified version of the
incompressible Navier Stokes equation $\partial_tu+(u\cdot \nabla)u=\nu
\Delta u-\nabla p$ by neglecting the pressure term.

Among the most interesting applications of the one-dimensional Burgers
equation, we mention traffic flow, growth of interfaces, and financial
mathematics (see for example \cite{Kevorkian,Whitham}).

The nonlinear Burgers equation \eqref{44}, with $f=0$, can be converted to
the linear heat equation and then explicitly solved by the Hopf-Cole
transformation. We usually look for explicit solutions for the forced
Burgers equation $\partial _{t}u+u\partial _xu-\nu \partial _x^2u=f$,
where $f(x,t)$ is the forcing term in a rectangular domain. In this work we
are interested in proving a result of existence, uniqueness and regularity
for the inhomogeneous Burgers problem.

For $f(x,t)=-\lambda \partial _x\eta (x,t)$, Burgers equation becomes
\begin{equation}
\partial _{t}u+u\partial _xu-\nu \partial _x^2u=-\lambda \partial
_x\eta (x,t),  \label{200}
\end{equation}
which is Burgers stochastic equation, where $\eta (x,t)$ stands for the
white noise. Using the transformation $u(x,t)=-\lambda \partial_xh(x,t)$,
we find that \eqref{200} is equivalent to the equation of KPZ
\begin{equation*}
\partial _{t}h(x,t)-\frac{\lambda }{2}(\partial _xh(x,t))^2-\nu
\partial _x^2h(x,t)=\eta (x,t).
\end{equation*}
This equation has been introduced by Kardar, Parisi and Zhang in 1986, and
quickly became the default model for random interface growth in physics.

In a paper by Morandi Cecchi\emph{\ et al.} \cite{Morandi}, the main result
was the existence and uniqueness of a solution to the Burgers problem (with
constant coefficients) in the anistropic Sobolev space
\begin{equation*}
H^{1,2}(R)=\left \{ u\in L^2(R):\partial _{t}u\in L^2(R),~\partial
_xu\in L^2(R),~\partial _x^2u\in L^2(R)\right \}
\end{equation*}
where $R$ is a rectangle. The authors used a wrong inequality
 (namely  $\int _{\Omega }M(u-M)^{+}(t)dx\leq M\| (u-M)^{+}(t)\| ^2$) at
the end of the proof of Theorem 2 (maximum principle); the inequality
appears in the line 14, page 165 (and line 15 page 167). To rectify this
part of the proof it suffices to show that $u\in {L^{\infty }(Q)}$. The
proof given by the authors remains true only when $f=0$ (but this was not
the objective of their paper), this case being treated by Bressan\emph{\ }
in \cite{Bressan}. However, in our work, using another method, we prove a
more general result concerning the existence, uniqueness and regularity of a
solution to the Burgers problem with variable coefficients in a rectangle.
Then, the existence, uniqueness and regularity of a solution to the Burgers
problem in a domain that can be transformed into a rectangle.

\subsection*{Setting of the problem}
 Recall that $L^{p}(0,a)$ and $H^{m}(0,a)$ are the usual spaces of
Lebesgue and Sobolev, respectively, for $1\leq p\leq \infty $ and $m\in
\mathbb{Z}$. For any Banach space $X$, we define $L^{p}(0,T;X)$ to be the
space of measurable functions $u:(0,T)\to X$ such that
\[
\| u\|_{L^{p}(0,T;X)}=\Big(  \int _0^{T}\|
u\| _{X}^{p}\, \mathrm{d}t\Big) ^{1/p}<\infty
\]
for $1\leq p<\infty $ and $\| u\| _{L^{\infty }(0,T;X)}
=\operatorname{ess\,sup}_{0<t<T} \| u\| _{X}<\infty $ if
$p=\infty $. $L^{p}(0,T;X)$ is a
Banach space. Of course, we have
$L^{p}(R)=L^{p}(0,T;L^{p}(0,a))$.

This article is concerned with two questions regarding the Burgers equation.
The first one is to study the existence, uniqueness and regularity of the
solution of the semilinear parabolic problem:
\begin{equation}
\begin{gathered}
\begin{aligned}
&\partial _{t}u(t,x)+\alpha (t)u(t,x)\partial _xu(t,x)-\beta (t)\partial
_x^2u(t,x) +\gamma (t,x)\partial _xu(t,x)\\
&=f(t,x)\quad  (t,x)\in R,
\end{aligned}\\
u(0,x)=u_0(x)\quad  x\in I, \\
u(t,0)=u(t,a)=0\quad  t\in (0,T), \\
\end{gathered} \label{1}
\end{equation}
in the rectangle $R=I\times (0,T)$ where $I=(0,a)$, $a\in R^{+}$
($T $ is finite); $f\in L^2(R)$ and $u_0\in H_0^{1}(I)$ are given
functions. We assume that the functions $\alpha $, $\beta $ depend only on $
t $ and the function $\gamma $ depends on $t$ and $x$. We also suppose that
there exist positive constants $(\alpha _i)_{i=1,2}$,
$(\beta_i)_{i=1,2} $ and $\gamma _1$, such that
\begin{equation}
\begin{gathered}
\alpha_1\leq \alpha(t)\leq \alpha_2, \quad
\beta_1\leq \beta(t)\leq \beta_2,\quad \forall t\in[0,T]\\
\text{and }  \vert \partial_x\gamma(t,x)\vert \leq \gamma_1
\text{ or }   \vert \gamma(t,x)\vert \leq \gamma_1 \quad
\forall(t,x)\in R.
\end{gathered} \label{47}
\end{equation}

The second question concerns the semilinear parabolic Burgers problem:
\begin{equation}
\begin{gathered}
\partial _{t}u(t,x)+u(t,x)\partial _xu(t,x)-\nu \partial
_x^2u(t,x)=f(t,x)\quad  \text{in }  \Omega ,   \\
u|_{t=0}=u_0(x)\quad  x\in J, \\
u|_{x=\varphi _i(t)}=0\quad i=1,2
\end{gathered} \label{7}
\end{equation}
in $\Omega \subset \mathbb{R}^2$, such as
\begin{equation*}
\Omega =\{(t,x)\in R^2; 0\leq t\leq T,\,  \varphi_1(t)<x<\varphi _2(t)\}
\end{equation*}
where $J=[\varphi _1(0),\varphi _2(0)]$ and $\nu $ is a positive
constant, $\varphi _1$, $\varphi _2$ are functions defined on
$[ 0,T] $ belonging to $C^{1}(]0,T[)$. We assume that
$\varphi_1(t)<\varphi _2(t)$ for $t\in [ 0,T]$.

Using the results obtained in the first part of this work, we look for
conditions on the functions $(\varphi _i)_{i=1,2}$ which guarantee that
problem \eqref{7} admits a unique solution $u\in H^{1,2}(\Omega )$. In
order to solve problem \eqref{7}, we will follow the method which was used,
for example, in Sadallah\cite{Sadallah} and Clark \emph{et al.}
\cite{Clark}. This method consists in proving that this problem admits a unique
solution when $\Omega $ is transformed into a rectangle, using a change of
variables preserving the anisotropic Sobolev space $H^{1,2}(\Omega )$.

To establish the existence (and uniqueness) of the solution to
\eqref{7}, we impose the assumption
\begin{equation}
\ |\varphi '(t)|\leq c\quad  \text{for all } t\in [ 0,T]
\label{46}
\end{equation}
where $c$ is a positive constant, and
$\varphi (t)=\varphi _2(t)-\varphi_1(t)$ for all $t\in [ 0,T]$.

The result related to the existence of the solution $u$ of  \eqref{1}
in a rectangle is obtained thanks to a personal (and detailed) communication
of professor  Luc Tartar about the Burgers equation with constant coefficients
in a rectangle. The authors would like to thank him for his
appreciate comments and hints.
Our main result is as follows:

\begin{theorem}\label{thm1}
If $u_0\in H_0^{1}(I)$, $f\in L^2(R)$ and $\alpha ,\beta $
, $\gamma $ satisfy the assumption \eqref{47}, then problem \eqref{1} admits
a (unique) solution $u\in H^{1,2}(R)$.
\end{theorem}

\begin{theorem} \label{thm2}
If $u_0\in H_0^{1}(J)$, $f\in L^2(\Omega )$ and $\varphi_1$, $\varphi _2$
satisfy the assumption \eqref{46}, then problem \eqref{7}
 admits a (unique) solution $u\in H^{1,2}(\Omega )$.
\end{theorem}

The proof of Theorem \ref{thm1} is based on the Faedo-Galerkin method. We
introduce approximate solution by reduction to the finite dimension. By the
Faedo-Galerkin method, we obtain the existence of an approximate solution
using an existence theorem of solutions for a system of ordinary
differential equations. We approximate the equation of problem \eqref{1} by
a simple equation. Then we make the passage to the limit using a compactness
argument. The proof of Theorem \ref{thm2} needs an appropriate change of
variables which allows us to use Theorem \ref{thm1}.


\section{Proof of Theorem \ref{thm1}}

Multiplying the equation of problem \eqref{1} by a test function
$w\in H_0^{1}(I)$, and integrating by parts from $0$ to $a$, we obtain
\begin{equation}
\begin{aligned}
& \int _0^{a}\partial_t u w\,\mathrm{d}x
+ \alpha(t) \int _0^{a}u\partial_xu w\, \mathrm{d}x\\
&+ \beta(t) \int _0^{a}\partial_x u\partial_xw\, \mathrm{d}x
+  \int_0^{a}\gamma(t,x)\partial_xu w\, \mathrm{d}x \\
&= \int _0^{a}fw\, \mathrm{d}x, \quad  \forall w\in H^1_0(I),\; t\in (0,T),
\end{aligned}  \label{25}
\end{equation}
This is the weak formulation of problem \eqref{1}. The solution of \eqref{25}
 satisfying the conditions of problem \eqref{1} is called
\emph{weak solution}.

To prove the existence of a weak solution to  \eqref{1}, we choose
the basis $(e_j)_{j\in \mathbb{N^{\star }}}$ of $L^2(I)$ defined as a
subset of the eigenfunctions of $-\partial _x^2$ for the Dirichlet
problem
\begin{gather*}
-\partial _x^2e_j =\lambda _je_j,\quad j\in\mathbb{N}^{\ast},  \\
e_j =0\quad   \text{on }  \Gamma =\{0,a\}.
\end{gather*}
More precisely,
\[
e_j(x)=\frac{\sqrt{2}}{\sqrt{a}}\sin \frac{j\pi x}{a}, \quad
\lambda _j=(\frac{j\pi }{a}) ^2, \quad \text{for }
j\in\mathbb{N}^{\ast}.
\]
As the family $(e_j)_{j\in \mathbb{N^{\star }}}$ is an
orthonormal basis of $L^2(I)$, then it is an orthogonal basis of
$H_0^{1}(I)$. In particular, for $v\in L^2(R)$, we can write
\begin{equation*}
v=\sum _{k=1}^{\infty }b_{k}(t)e_{k},
\end{equation*}
where $b_{k}=(v,e_{k})_{L^2(I)}$ and the series converges in $L^2(I)$.
Then, we introduce the approximate solution $u_n$ by
\begin{gather*}
u_n(t) =\sum _{j=1}^{n}c_j(t)e_j,  \\
\ u_n(0) = u_{0n}=\sum _{j=1}^{n}c_j(0)e_j,
\end{gather*}
which has to satisfy the approximate problem
\begin{equation}
\begin{gathered}
\begin{aligned}
&\int _0^{a}\partial _{t}u_ne_j\, \mathrm{d}x
 +\alpha (t) \int _0^{a}u_n\partial _xu_ne_j\,\mathrm{d}x \\
&+\beta (t) \int _0^{a}\partial_xu_n\partial _xe_j\, \mathrm{d}x
 + \int _0^{a}\gamma (t,x)\partial _xu_ne_j\, \mathrm{d}x\\
&= \int _0^{a}fe_j\,\mathrm{d}x,
\end{aligned} \\
u_n(0)=u_{0n}.
\end{gathered} \label{2}
\end{equation}
for all $j=1,\dots ,n$, and$0\leq t\leq T$.

\begin{remark}\label{rmq1}\rm
 The coefficients $c_j(0)$ (which depend on $j$ and $n$) will
be chosen such that the sequence $(u_{0n}) $ converges in
$H_0^{1}(I)$ to $u_0$. Then we suppose in the sequel that
$\lim u_{0n}=u_0$ in $H_0^{1}(I)$.
\end{remark}

\subsection{Solution of the approximate problem}

\begin{lemma}
For all $j$, there exists a unique solution $u_n$ of Problem \eqref{2}.
\end{lemma}

\begin{proof}
As $e_1,\cdots ,e_n$ are orthonormal in $L^2(I)$, then
\begin{equation*}
 \int _0^{a}\partial _{t}u_ne_j\, \mathrm{d}
x=\sum _{i=1}^{n}c_i'(t) \int
_0^{a}e_ie_j\, \mathrm{d}x=c_j'(t).
\end{equation*}
On the other hand, $-\partial _x^2e_i=\lambda _ie_i$, then
 $\partial _x^2u_n(t)=-\sum _{i=1}^{n}c_i(t)\lambda _ie_i$.
Therefore, for all $t\in [ 0,T]$,
\begin{equation*}
-\beta (t) \int _0^{a}\partial _x^2u_ne_j\,
\mathrm{d}x=\beta (t)\sum _{i=1}^{n}c_i(t)\lambda _i
\int _0^{a}e_ie_j\, \mathrm{d}x=\beta (t)\lambda _jc_j(t).
\end{equation*}
Now, if we introduce
\begin{gather*}
f_j(t)= \int _0^{a}fe_j\, \mathrm{d}x, \quad
k_j(t)=-\alpha (t) \int _0^{a}u_n\partial
_xu_ne_j\, \mathrm{d}x,\\
 h_j(t)=-\int _0^{a}\gamma (t,x)\partial _xu_ne_j\, \mathrm{d}x,
\end{gather*}
for $j\in \{1,\dots ,n\}$, then \eqref{2} is equivalent to the
following system of $n$ uncoupled linear ordinary differential equations:
\begin{equation}
c_j'(t)=-\beta (t)\lambda _jc_j(t)+k_j(t)+h_j(t)+f_j(t),
\quad j=1,\dots ,n.  \label{380}
\end{equation}
Observe that the terms $k_j(t),h_j(t)$ are well defined
(because $e_j$ and $\gamma (t,x)$ are regular) and $f_j$ is
integrable (because $f\in L^2(R)$). Taking into account the initial
condition $c_j(0)$, for each fixed $j\in \{1,\dots ,n\}$, \eqref{380}
has a unique regular solution $c_j$
in some interval $(0,T')$ with $T'\leq T$. In fact, we can
prove here that $T'=T$.
\end{proof}

\subsection{A priori estimate}

\begin{lemma}\label{102}
There exists a positive constant $K_1$ independent of $n$,
such that for all $t\in [ 0,T]$
\begin{equation*}
\| u_n\| _{L^2(I)}^2+\beta _1 \int_0^{t}\| \partial _xu_n(s)\| _{L^2(I)}^2\, \mathrm{d}
s\leq K_1.
\end{equation*}
\end{lemma}

\begin{proof}
Multiplying  \eqref{2} by $c_j$ and summing for $j=1,\dots,n$, we
obtain
\begin{equation*}
\frac{1}{2}\frac{d}{dt} \int _0^{a}u_n^2\, \mathrm{d}x
+\beta (t) \int _0^{a}(\partial _xu_n)^2\, \mathrm{d}x
-\frac{1}{2} \int _0^{a}\partial _x\gamma (t,x)u_n^2\, \mathrm{d}x
= \int _0^{a}fu_n\, \mathrm{d}x.
\end{equation*}
Indeed, because of the boundary conditions, we have
\begin{equation*}
\alpha (t) \int _0^{a}u_n^2\partial _xu_n\,\mathrm{d}x
=\frac{\alpha (t)}{3} \int _0^{a}\partial_x(u_n)^{3}\, \mathrm{d}x=0,
\end{equation*}
and an integration by parts gives
\begin{equation*}
-\frac{1}{2} \int _0^{a}\partial _x\gamma (t,x)u_n^2\, \mathrm{d}x
= \int _0^{a}\gamma (t,x)u_n\partial _xu_n\, \mathrm{d}x.
\end{equation*}
Then, by integrating with respect to $t$ ($t\in (0,T)$), and according to
\eqref{47}, we find that
\begin{align*}
&\frac{1}{2}\| u_n \|^2_{L^2(I)}+\beta_1
\int _0^{t}\| \partial_x u_n(s) \|^2_{L^2(I)}\,\mathrm{d}s\\
&\leq \frac{1}{2}\| u_{0n} \|^2_{L^2(I)}+
\int _0^{t}\| f(s)\|_{L^2(I)}\| u_n(s) \|_{L^2(I)}\,
\mathrm{d}s +\frac{\gamma_1}{2} \int _0^{t}\|
u_n(s) \|^2_{L^2(I)}\, \mathrm{d}s.
\end{align*}
Using Poincar\'{e}'s inequality
\begin{equation*}
\| u_n\| _{L^2(I)}^2\leq \frac{a^2}{2}\| \partial _xu_n\| _{L^2(I)}^2,
\end{equation*}
both with the elementary inequality
\begin{equation}
|rs|\leq \frac{\varepsilon }{2}r^2+\frac{s^2}{2\varepsilon },\quad
\forall r,s\in R,\;  \forall \varepsilon >0,  \label{38}
\end{equation}
with $\varepsilon =\frac{2\beta _1}{a^2}$, we obtain
\begin{align*}
&\| u_n \|^2_{L^2(I)}+\beta_1 \int_0^{t}\| \partial_x u_n(s) \|^2_{L^2(I)}\,
\mathrm{d}s \\
&\leq \| u_{0n} \|^2_{L^2(I)} + \frac{a^2}{2\beta_1} \int
_0^{t}\| f(s)\|^2_{L^2(I)}\, \mathrm{d}s
+\gamma_1 \int _0^{t}\| u_n(s) \|^2_{L^2(I)}\,
\mathrm{d}s,
\end{align*}
so
\begin{align*}
&\| u_n \|^2_{L^2(I)}+\beta_1 \int_0^{t}\| \partial_x u_n(s) \|^2_{L^2(I)}\,
 \mathrm{d}s \\
&\leq
\| u_{0n} \|^2_{L^2(I)} + \frac{a^2}{2\beta_1} \int
_0^{t}\| f(s)\|^2_{L^2(I)}\, \mathrm{d}s\\
&\quad+\gamma_1 \int _0^{t} \Big(\| u_n(s)
\|^2_{L^2(I)}+\beta_1 \int _0^{s} \| \partial_x
u_n(\tau) \|^2_{L^2(I)} \, \mathrm{d}\tau \Big)\, \mathrm{d}s.
\end{align*}
As the sequence $(u_{0n}) $ converges in $H_0^{1}(I)$ to
$u_0 $ (see Remark \ref{rmq1}) and $f\in L^2(R)$, there exists a positive
constant $C_1$ independent of $n$ such that
\[
\|u_{0n}\|_{L^2(I)}^2+\frac{a^2}{2\beta_1}\| f\|_{L^2(R)}^2\leq C_1
\]
and
\begin{align*}
&\| u_n \|^2_{L^2(I)}+\beta_1 \int
_0^{t}\| \partial_x u_n(s) \|^2_{L^2(I)}\, \mathrm{d}s \\
&\leq C_1+ \gamma_1 \int _0^{t} \Big(\| u_n(s)\|^2_{L^2(I)}
+ \beta_1 \int _0^{s}
\| \partial_x u_n(\tau) \|^2_{L^2(I)} \, \mathrm{d}\tau \Big)\,
\mathrm{d}s,
\end{align*}
then by Gronwall's inequality,
\begin{equation*}
 \| u_n \|^2_{L^2(I)}+\beta_1 \int
_0^{t}\| \partial_x u_n(s) \|^2_{L^2(I)}\, \mathrm{d}s\leq
C_1\exp(\gamma_1 t).
\end{equation*}
Taking $K_1=C_1\exp (\gamma _1T)$, we obtain
\begin{equation*}
\| u_n\| _{L^2(I)}^2+\beta _1 \int_0^{t}\| \partial _xv_n(s)\| _{L^2(I)}^2\,
 \mathrm{d}s
\leq K_1.
\end{equation*}
\end{proof}

\begin{lemma}\label{103}
There exists a positive constant $K_2$ independent of $n$,
such that for all $t\in [ 0,T]$
\begin{equation*}
\| \partial _xu_n\| _{L^2(I)}^2+\beta _1 \int
_0^{t}\| \partial _x^2u_n(s)\| _{L^2(I)}^2\,\mathrm{d}s\leq K_2.
\end{equation*}
\end{lemma}

\begin{proof}
As $-\partial _x^2e_j=\lambda _je_j$, we deduce that
\begin{equation*}
\sum _{j=1}^{n}c_j(t)\lambda_je_j=-\sum
_{j=1}^{n}c_j(t)\partial _x^2e_j=-\partial_x^2u_n(t),
\end{equation*}
then, multiplying both sides of \eqref{2} by $c_j\lambda _j$
and summing for $j=1,\dots ,n$, we obtain
\begin{equation}
\begin{aligned}
& \frac{1}{2}\frac{d}{dt} \int
_0^{a}(\partial_x u_n)^2\mathrm{d}x+ \beta(t) \int
_0^{a}(\partial^2_x u_n)^2\, \mathrm{d}x\\
& = -\int _0^{a}f\partial^2_x u_n\, \mathrm{d}x
 +\int _0^{a}\gamma(t,x)\partial_xu_n\partial^2_x u_n\, \mathrm{d}x
 +\alpha(t) \int _0^{a}u_n\partial_x u_n\partial^2_x
u_n\, \mathrm{d}x.
\end{aligned}  \label{4}
\end{equation}
Using Cauchy-Schwartz inequality, \eqref{38} with
$\varepsilon =\beta _1/2$ leads to
\begin{equation}
\begin{aligned}
|  \int _0^{a}f\partial _x^2u_n\, \mathrm{d}x|
& \leq  \Big(\int
_0^{a}| \partial _x^2u_n| ^2\, \mathrm{d}x\Big) ^{1/2}
\Big(\int _0^{a}| f| ^2\, \mathrm{d}x\Big) ^{1/2} \\
& \leq  \frac{\beta _1}{4} \int _0^{a}|
\partial _x^2u_n| ^2\, \mathrm{d}x+\frac{1}{\beta _1}
 \int _0^{a}| f| ^2\, \mathrm{d} x,
\end{aligned} \label{39}
\end{equation}
and
\begin{equation}
|  \int _0^{a}\gamma (t,x)\partial
_xu_n\partial _x^2u_n\, \mathrm{d}x|
=\frac{1}{2}\big|  \int _0^{a}\partial _x\gamma (t,x)\partial
_xu_n^2\, \mathrm{d}x\big|
\leq \frac{\gamma _1}{2}
 \int _0^{a}| \partial _xu_n|^2\, \mathrm{d}x.  \label{40}
\end{equation}

Now, we have to estimate the last term of \eqref{4}. An integration by parts
gives
\begin{equation*}
\int _0^{a}u_n\partial _xu_n\partial
_x^2u_n\, \mathrm{d}x
= \int _0^{a}u_n\partial _x(\frac{1}{2}(\partial _xu_n)^2) \, \mathrm{d}x
=- \frac{1}{2} \int _0^{a}(\partial_xu_n) ^{3}\, \mathrm{d}x.
\end{equation*}

Since $\partial _xu_n$ satisfies $\int _0^{a}\partial _xu_n\,
\mathrm{d}x=0$ we deduce that the continuous function $\partial _xu_n$
is zero at some point $y_{0n}\in (0,a)$, and  by integrating
$2\partial_xu_n\partial _x^2u_n$ between $y_{0n}$ and $y$, we obtain
\begin{equation*}
|\partial _xu_n|^2=|  \int _{y_{0n}}^{y}
\partial _x(\partial _xu_n)^2\, \mathrm{d}x|
=2| \int _{y_{0n}}^{y}u_n\partial _x^2u_n\, \mathrm{d}x| ,
\end{equation*}
the Cauchy-Schwartz inequality gives
\begin{equation*}
\| \partial _xu_n\| _{L^{\infty }(I)}^2
\leq 2\| \partial _xu_n\| _{L^2(I)}\| \partial _x^2u_n\| _{L^2(I)}.
\end{equation*}
But
\begin{equation*}
\| \partial _xu_n\| _{L^{3}(I)}^{3}\leq \| \partial
_xu_n\| _{L^2(I)}^2\| \partial _xu_n\| _{L^{\infty}(I)}.
\end{equation*}
So, \eqref{47} yields
\begin{equation*}
|  \int _0^{a}\alpha (t)u_n\partial
_xu_n\partial _x^2u_n\, \mathrm{d}x|
\leq \Big( \int _0^{a}| \partial _x^2u_n|
^2\, \mathrm{d}x\Big) ^{1/4}\Big(\alpha _2^{4/5}
\int _0^{a}| \partial _xu_n| ^2\, \mathrm{d}x\Big) ^{5/4}.
\end{equation*}

Finally, thanks to Young's inequality
$|AB|\leq \frac{|A|^{p}}{p}+\frac{|B|^{p'}}{p'}$, with $1<p<\infty $ and
$p'=\frac{p}{p-1}$, we have
\begin{equation*}
|AB|=| (\beta _1^{1/p}A) (\beta_1^{1/p'}\frac{B}{\beta _1}) |
\leq \frac{ \beta _1}{p}|A|^{p}+\frac{\beta _1}{p'\beta _1^{p'}}|B|^{p'}.
\end{equation*}
Choosing $p=4$  (then $p'=\frac{4}{3}$) in the previous formula,
\[
A=(\int _0^{a}| \partial _x^2u_n| ^2\, \mathrm{d}x) ^{1/4},\quad
 B=\Big(\alpha _2^{4/5} \int _0^{a}| \partial
_xu_n| ^2\, \mathrm{d}x\Big) ^{5/4},
\]
the estimate of the last term of \eqref{4} becomes
\begin{equation}
|  \int _0^{a}\alpha (t)u_n\partial
_xu_n\partial _x^2u_n\, \mathrm{d}x|
\leq \frac{\beta _1}{4} \int _0^{a}| \partial _x^2u_n| ^2\, \mathrm{d}x
+\frac{3}{4}\frac{\alpha_2^{4/3}}{\beta _1^{1/3}}
\Big(\int _0^{a}| \partial _xu_n| ^2\, \mathrm{d}x\Big) ^{5/3}.
\label{41}
\end{equation}
Let us return to inequality \eqref{4}:
 By integrating between $0$ and $t$,
 from the estimates \eqref{39}, \eqref{40}, and \eqref{41} we obtain
\begin{align*}
&\| \partial_xu_n\|^2_{L^2(I)}+ \beta_1
\int _0^{t}\| \partial^2_xu_n(s)\|^2_{L^2(I)}\, \mathrm{d}s \\
&\leq \| \partial_xu_{0n}\|^2_{L^2(I)}+ \frac{2}{\beta_1}
\int _0^{t} \| f(s)\|^2_{L^2(I)}\, \mathrm{d}s\\
&\quad +C_2 \int _0^{t}\Big(\|
\partial_xu_n(s)\|^2_{L^2(I)}\Big)^{5/3}\, \mathrm{d}s
+\gamma_1 \int _0^{t}\|
\partial_xu_n(s)\|^2_{L^2(I)}\, \mathrm{d}s, 
\end{align*}
where $C_2=\frac{3}{2}\frac{\alpha _2^{4/3}}{\beta _1^{1/3}}$.
Observe that $f\in L^2(R)$), and
$\| \partial _xu_{0n}\|_{L^2(I)}^2$ is bounded (see Remark \ref{rmq1}).
Then, there exists a constant $C_3$ such that
\begin{align*}
&\| \partial_xu_n\|^2_{L^2(I)}+ \beta_1
\int _0^{t}\| \partial^2_xu_n(s)\|^2_{L^2(I)}\,\mathrm{d}s\\
&\leq C_3+ C_2 \int _0^{t}\Big(\|
\partial_xu_n(s)\|^2_{L^2(I)}\Big)^{2/3}
\| \partial_xu_n(s)\|^2_{L^2(I)}\, \mathrm{d}s
+\gamma_1 \int _0^{t}\| \partial_xu_n(s)\|^2_{L^2(I)}\, \mathrm{d}s.
\end{align*}
Consequently, the function
\[
\varphi (t)=\| \partial _xu_n\|
_{L^2(I)}^2+\beta _1 \int _0^{t}\| \partial
_x^2u_n(s)\| _{L^2(I)}^2\, \mathrm{d}s
\]
 satisfies the inequality
\begin{equation*}
\varphi (t)\leq C_3+\int _0^{t}(C_2\| \partial
_xu_n(s)\| _{L^2(I)}^{4/3}+\gamma _1)\varphi (s)\mathrm{d}s.
\end{equation*}
Gronwall's inequality shows that
\begin{equation*}
\varphi (t)\leq C_3\exp \Big(\int _0^{t}(C_2\| \partial
_xu_n(s)\| _{L^2(I)}^{4/3}+\gamma _1)\mathrm{d}s\Big) .
\end{equation*}
According to Lemma \ref{102} the integral
$\int _0^{t}\| \partial _xu_n\| _{L^2(I)}^{4/3}\mathrm{d}s$ is bounded by a constant
independent of $n$ (and $t$). So there exists a positive constant $K_2$
such that
\begin{equation*}
\| \partial _xu_n\| _{L^2(I)}^2+\beta _1
\int _0^{t}\| \partial _x^2u_n(s)\| _{L^2(I)}^2\,\mathrm{d}s\leq K_2.
\end{equation*}
\end{proof}

\begin{lemma} \label{104} 
There exists a positive constant $K_3$ independent of $n$,
such that for all $t\in [ 0,T]$
\begin{equation*}
\| \partial _{t}u_n\| _{L^2(R)}^2\leq K_3.
\end{equation*}
\end{lemma}

\begin{proof}
Let 
\begin{equation*}
g_n=f-\alpha (t)u_n\partial _xu_n+\beta (t)\partial
_x^2u_n-\gamma (t,x)\partial _xu_n.
\end{equation*}
To show that $\partial _{t}u_n$ is bounded in $L^2(R)$, we will first
show that $g_n$ is bounded in $L^2(R)$. According to Lemmas \ref{102}
and \ref{103}, \ the terms $\gamma (t,x)\partial _xu_n$ and $\beta
(t)\partial _x^2u_n$ are bounded in $L^2(R)$. On the other hand, by
the hypothesis $f\in L^2(R)$. It remains only to show that 
$\alpha(t)u_n\partial _xu_n\in L^2(R)$.

Lemma \ref{102} proves that $\| u_n\| _{L^{\infty}(0,T;H_0^{1}(I))}^2$ 
is bounded. Then, using the injection of 
$H_0^{1}(I)$ in $L^{\infty }(I)$, we obtain
\begin{align*}
|  \int _0^{T} \int
_0^{a}(\alpha (t)u_n\partial _xu_n)^2\, \mathrm{d}x\,\mathrm{d}t|
& \leq  \alpha _2^2 \int
_0^{T}\Big(\| u_n\| _{L^{\infty }(I)}^2
\int _0^{a}|\partial _xu_n|^2\, \mathrm{d}x\Big) \, \mathrm{d}t \\
& \leq  \alpha _2^2C_{I} \int _0^{T}\|
u_n\| _{H_0^{1}(I)}^2\| \partial _xu_n\| _{L^2(I)}^2\,
\mathrm{d}t \\
& \leq  \alpha _2^2C_{I}\| u_n\| _{L^{\infty
}(0,T;H_0^{1}(I))}^2\| \partial _xu_n\| _{L^2(R)}^2, 
\end{align*}
where $C_{I}$ is a constant independent of $n$. Hence $g_n$ is bounded in 
$L^2(R)$. So, $\partial _{t}u_n$ is also bounded in $ L^2(R)$.
Indeed, from \eqref{2} for $j=1,\dots ,n$, we have
\begin{align*}
\int _0^{a}\partial _{t}u_ne_j\, \mathrm{d}x 
& =  \int _0^{a}(f-\alpha (t)u_n\partial
_xu_n+\beta (t)\partial _{y}^2u_n-\gamma (t,x)\partial
_xu_n) e_j\, \mathrm{d}x, \\
& =   \int _0^{a}g_ne_j\, \mathrm{d}x, 
\end{align*}
multiplying both sides by $c_j'$ and summing for $j=1,\dots ,n$,
\begin{equation*}
\| \partial _{t}u_n\| _{L^2(I)}^2
= \int_0^{a}g_n\partial _{t}u_n\, \mathrm{d}x,
\end{equation*}
we deduce that
$\| \partial _{t}u_n\| _{L^2(R)}\leq \| g_n\| _{L^2(R)}$.
\end{proof}

\subsection{Existence and uniqueness}

Lemmas \ref{102}, \ref{103} and \ref{104} show that the Galerkin
approximation $u_n$ is bounded in $L^{\infty }(0,T,L^2(I))$, and in 
$L^2(0,T,H^2(I))$, and $\partial _{t}u_n$ is bounded in $L^2(R)$. So,
it is possible to extract a subsequence from $u_n$ (that we continue to
denote $u_n$) such that
\begin{gather}
u_n\to u\quad \text{weakly in }L^2(0,T,H_0^{1}(I)),  \label{31} \\
u_n\to u\quad \text{strongly in }L^2(0,T,L^2(I)) \text{ and a.e. in } R,  \label{33}\\
\partial _{t}u_n\to \partial _{t}u\quad \text{strongly in }
L^2(R).  \label{34}
\end{gather}

\begin{lemma} \label{lem2.5}
Under the assumptions of Theorem \ref{thm1}, problem \eqref{1} admits a weak
solution $u\in H^{1,2}(R)$.
\end{lemma}

\begin{proof}
Note that \eqref{34} implies
\begin{equation*}
 \int _0^{T} \int _0^{a}\partial_{t}u_nw\, \mathrm{d}x\, \mathrm{d}t
\to  \int _0^{T} \int _0^{a}\partial _{t}uw\, \mathrm{d}
x\, \mathrm{d}t,\quad \forall w\in L^2(R).
\end{equation*}
From \eqref{31} and \eqref{33},
\begin{equation*}
u_n\partial _xu_n\to u\partial _xu\hspace{0.3cm}\text{
weakly in}\hspace{0.3cm}L^2(R)\hspace{0.1cm},
\end{equation*}
then
\begin{equation*}
 \int _0^{T} \int _0^{a}\alpha
(t)u_n\partial _xu_nw\, \mathrm{d}x\, \mathrm{d}t\to
 \int _0^{T} \int _0^{a}\alpha
(t)u\partial _xuw\, \mathrm{d}x\, \mathrm{d}t,\quad \forall w\in L^2(R),
\end{equation*}
and
\begin{equation*}
 \int _0^{T} \int _0^{a}\gamma
(t,x)\partial _xu_nw\, \mathrm{d}x\, \mathrm{d}t\to
 \int _0^{T} \int _0^{a}\gamma
(t,x)\partial _xuw\, \mathrm{d}x\, \mathrm{d}t,\quad \forall w\in
L^2(R).
\end{equation*}

Our goal is to use these properties to pass to the limit.
 In problem \eqref{2}, when $n\to +\infty $, for each fixed index $j$, we have
\begin{equation}
\begin{aligned}  
&\int _0^{a}\big(\partial_t u+
\alpha(t)u\partial_xu\big)e_j\, \mathrm{d}x+ \beta(t) \int
_0^{a}\partial_x u\partial_xe_j\, \mathrm{d}x+  \int
_0^{a}\gamma(t,x)\partial_xu e_j\, \mathrm{d}x\\
&= \int_0^{a}fe_j\, \mathrm{d}x, 
\end{aligned}  \label{35}
\end{equation}
Since $(e_j)_{j\in \mathbb{N}}$ is a base of $H_0^{1}(I)$, for all
 $w\in H_0^{1}(I)$, we can write
\begin{equation*}
w(t)=\sum _{k=1}^{\infty }b_{k}(t)e_{k},
\end{equation*}
that is to say $w_{N}(t)=\sum _{k=1}^{N}b_{k}(t)e_{k}\to
w(t)$ in $H_0^{1}(I)$ when $N\to +\infty $.

Multiplying  \eqref{35} by $b_{k}$ and summing for $k=1,\dots ,N$, then
\begin{align*}  
&\int _0^{a}\big(\partial_t u+
\alpha(t)u\partial_xu\big)w_N\, \mathrm{d}x+ \beta(t) \int
_0^{a}\partial_x u\partial_xw_N\, \mathrm{d}x+  \int
_0^{a}\gamma(t,x)\partial_xu w_N\, \mathrm{d}x\\
& =  \int _0^{a}fw_N\, \mathrm{d}x.
 \end{align*}
Letting $N\to +\infty $, we deduce that
\[
  \int _0^{a}\big(\partial_t u+
\alpha(t)u\partial_xu\big)w\, \mathrm{d}x+ \beta(t) \int
_0^{a}\partial_x u\partial_xw\, \mathrm{d}x+  \int
_0^{a}\gamma(t,x)\partial_xu w\, \mathrm{d}x =  \int
_0^{a}fw\, \mathrm{d}x,
\]
so, $u$ satisfies the weak formulation \eqref{25} for all $w\in H_0^{1}(I)$
and $t\in [ 0;T]$.

Finally, we recall that, by hypothesis, 
$\lim_{n\to +\infty } u_n(0):=u_0$. 
This completes the proof of the ``existence''  part of Theorem \ref{thm1}.
\end{proof}

\begin{lemma}
Under the assumptions of Theorem \ref{thm1}, the solution of problem \eqref{1}
 is unique.
\end{lemma}

\begin{proof}
Let us observe that any solution $u\in H^{1,2}(R)$ of problem \eqref{1} is
in $u\in L^{\infty }(0,T,L^2(I))$. Indeed, it is not difficult to see that
such a solution satisfies
\begin{equation*}
\frac{1}{2}\frac{d}{dt} \int _0^{a}u^2\, \mathrm{d}
x+\beta (t) \int _0^{a}(\partial _xu)^2\, \mathrm{d
}x-\frac{1}{2} \int _0^{a}\partial _x\gamma
(t,x)u^2\, \mathrm{d}x= \int _0^{a}fu\, \mathrm{d}x,
\end{equation*}
because
\begin{equation*}
\alpha (t) \int _0^{a}u^2\partial _xu\, \mathrm{d}
x=\frac{\alpha (t)}{3} \int _0^{a}\partial
_x(u)^{3}\, \mathrm{d}x=0,
\end{equation*}
and
\begin{equation*}
 \int _0^{a}\gamma (t,x)\partial _xuu\, \mathrm{d}x=
 \int _0^{a}\gamma (t,x)\partial _x(\frac{
u^2}{2}) \, \mathrm{d}x=-\frac{1}{2} \int
_0^{a}\partial _x\gamma (t,x)u^2\, \mathrm{d}x.
\end{equation*}
Consequently (see the proof of Lemma \ref{102})
\begin{align*}
&\| u \|^2_{L^2(I)}+\beta_1 \int
_0^{t}\| \partial_x u(s) \|^2_{L^2(I)}\, \mathrm{d}s\\
&\leq \| u_0 \|^2_{L^2(I)} + \frac{a^2}{2\beta_1} \int
_0^{t}\| f(s)\|^2_{L^2(I)}\, \mathrm{d}s
+\gamma_1 \int _0^{t}\| u(s) \|^2_{L^2(I)}\,
\mathrm{d}s, 
\end{align*}
so,
\begin{align*} 
&\| u \|^2_{L^2(I)}+\beta_1 \int
_0^{t}\| \partial_x u(s) \|^2_{L^2(I)}\, \mathrm{d}s \\
&\leq \| u_0 \|^2_{L^2(I)} + \frac{a^2}{2\beta_1} \int
_0^{t}\| f(s)\|^2_{L^2(I)}\, \mathrm{d}s\\
&\quad +\gamma_1 \int _0^{t} \Big(\| u(s)
\|^2_{L^2(I)}+\beta_1 \int _0^{s} \| \partial_x
u(\tau) \|^2_{L^2(I)} \, \mathrm{d}\tau \Big)\, \mathrm{d}s.
\end{align*}
Then there exist a positive constant $C$ such that
\begin{align*} 
&\| u \|^2_{L^2(I)}+\beta_1 \int _0^{t}\| \partial_x
u(s) \|^2_{L^2(I)}\, \mathrm{d}s \\
&\leq C+ \gamma_1 \int _0^{t} \Big(\| u(s)
\|^2_{L^2(I)} +\beta_1 \int _0^{s}
\| \partial_x u(\tau) \|^2_{L^2(I)}
\, \mathrm{d}\tau \Big)\, \mathrm{d}s.
 \end{align*}
Hence, Gronwall's lemma gives
\begin{equation*}
\| u\| _{L^2(I)}^2+\beta _1 \int_0^{t}\| \partial _xu(s)\| _{L^2(I)}^2\, \mathrm{d}
s\leq K,
\end{equation*}
where $K=C\exp (\gamma _1T)$. This shows that 
$u\in L^{\infty}(0,T,L^2(I))$ for all $f\in L^2(I)$.

Now, let $u_1,u_2\in H^{1,2}(R)$ be two solutions of \eqref{1}.
 We put $u=u_1-u_2$. It is clear that $u\in L^{\infty }(0,T,L^2(I))$.
The equations satisfied by $u_1$ and $u_2$ lead to
\begin{equation*}
 \int _0^{a}[ \partial _{t}uw+\alpha
(t)uw\partial _xu_1+\alpha (t)u_2w\partial _xu+\beta (t)\partial
_xu\partial _xw+\gamma (t,x)w\partial _xu] \, \mathrm{d}x=0.
\end{equation*}
Taking, for $t\in [ 0,T]$, $w=u$ as a test function, we deduce that
\begin{equation}  \label{122}
\begin{aligned} 
&\frac{1}{2}\frac{d}{dt}\| u\| _{L^2(I)}^2+\beta
(t)\| \partial _xu\| _{L^2(I)}^2\\
&=- \int_0^{a}\gamma(t,x)u\partial _xu\,
\mathrm{d}x-\alpha(t) \int _0^{a}u^2\partial
_xu_1\, \mathrm{d}x -\alpha(t) \int
_0^{a}u_2u\partial _xu\, \mathrm{d}x. 
\end{aligned}
\end{equation}
An integration by parts gives
\begin{equation*}
\alpha (t) \int _0^{a}u^2\partial _xu_1\,
\mathrm{d}x=-2\alpha (t) \int _0^{a}u\partial_xuu_1\, \mathrm{d}x,
\end{equation*}
then \eqref{122} becomes
\begin{equation*}
\frac{1}{2}\frac{d}{dt}\| u\| _{L^2(I)}^2+\beta (t)\|
\partial _xu\| _{L^2(I)}^2=\  \frac{1}{2} \int
_0^{a}\partial _x\gamma (t,x)u^2\, \mathrm{d}x+
\int _0^{a}\alpha (t)(2u_1-u_2) u\partial _xu\,
\mathrm{d}x.
\end{equation*}
By \eqref{47} and inequality \eqref{38} with 
$\varepsilon =2\beta _1$, we obtain
\begin{align*} 
&|  \int _0^{a}\alpha (t)(2u_1-u_2) u\partial _xu\, \mathrm{d}x| \\
&\leq \frac{1}{4\beta _1}\alpha _2^2(2\| u_1\| _{L^{\infty
}(0,T,L^2(I))}+\| u_2\| _{L^{\infty }(0,T,L^2(I))}) ^2
\| u\| _{L^2(I)}^2 +\beta _1\| \partial _xu\|_{L^2(I)}^2. 
\end{align*}
Furthermore,
\begin{equation*}
\  \frac{1}{2} \int _0^{a}\partial _x\gamma
(t,x)u^2\, \mathrm{d}x\leq \frac{\gamma _1}{2}\| u\|
_{L^2(I)}^2.
\end{equation*}
So, we deduce that there exists a non-negative constant $D$, such as
\begin{equation*}
\frac{1}{2}\frac{d}{dt}\| u\| _{L^2(I)}^2\leq D\| u\|
_{L^2(I)}^2,
\end{equation*}
and Gronwall's lemma leads to $u=0$. This completes the proof.
\end{proof}

\section{Proof of the theorem \ref{thm2}}

Let
\begin{equation*}
\Omega =\{(t,x)\in R^2;\ 0<t<T;\  \varphi _1(t)<x<\varphi _2(t)\},
\end{equation*}
where $T$ is a positive finite number.
The change of variables: $ \Omega  \to  R$,
\[
(t,x)  \mapsto  (t,y)=(t,\frac{x-\varphi _1(t)}{\varphi
_2(t)-\varphi _1(t)})
\]
transforms $\Omega $ into the rectangle $R=]0,T[\times ]0,1[$. 
Putting $u(t,x)=v(t,y)$ and $f(t,x)=g(t,y)$, then problem \eqref{7} becomes
\begin{equation}
\begin{gathered}
\begin{aligned}
&\partial _{t}v(t,y)+\frac{1}{\varphi (t)}v(t,y)\partial _{y}v(t,y)\\
&-\frac{\nu }{\varphi ^2(t)}\partial _{y}^2v(t,y)
 + \gamma (t,y)\partial_{y}v(t,y)=g(t,y)\quad \text{in } R, 
\end{aligned}\\
v(0,y)=v_0(y)=u_0(\varphi _1(0)+\varphi (0)y),\quad  y\in (0,1), \\
v(t,0)=v(t,1)=0\quad t\in (0,T), 
\end{gathered} \label{11}
\end{equation}
where
\begin{gather*}
\varphi (t)=\varphi _2(t)-\varphi _1(t), \\
\gamma (t,y)=-\frac{y\varphi '(t)+\varphi _1'(t)}{\varphi (t)}.
\end{gather*}
Now, we take $I=(0,1)$, $\alpha (t)=\frac{1}{\varphi (t)}$, 
$\beta (t)=\frac{\nu }{\varphi ^2(t)}$, then problem \eqref{11} 
can be written as
\begin{gather*}
\partial _{t}v(t,y)+\alpha (t)v(t,y)\partial _{y}v(t,y)-\beta (t)\partial
_{y}^2v(t,y)+\gamma (t,y)\partial _{y}v(t,y)=g(t,y)\\
 (t,y)\in R,
\\
v(0,y)=v_0(y)\quad y\in I,   \\
v(t,1)=v(t,0)=0\quad t\in (0,T),
\end{gather*}
It is easy to see that this change of variables preserves the spaces 
$H_0^{1}$, $H^{1,2}$ and $L^2$. In other words
\begin{equation}
\begin{gathered}
f\in L^2(\Omega )  \;\Leftrightarrow\;  g\in L^2(R) \\
u\in H^{1,2}(\Omega )  \;\Leftrightarrow\;  v\in H^{1,2}(R) \\
u_0\in H_0^{1}(J)  \;\Leftrightarrow\;  v_0\in H_0^{1}(I)
\end{gathered}\label{120}
\end{equation}

\begin{remark} \label{121}\rm
 Observe that the hypotheses \eqref{47} are fulfilled. This means
that the functions $\alpha ,\beta $ and $\gamma $ satisfy the following
conditions
\begin{gather*}
\alpha _1<\alpha (t)<\alpha _2,\quad \forall t\in [ 0,T], \\
\beta _1<\beta (t)<\beta _2,\quad \forall t\in [ 0,T], \\
|\partial _{y}\gamma (t,y)|\leq \gamma _1,\quad \forall (t,y)\in R.
\end{gather*}
\end{remark}

So, Burgers problem \eqref{7} is equivalent to problem \eqref{11}, and by
Theorem \ref{thm1}, there exists a unique solution $v\in H^{1,2}(R)$ of
problem \eqref{11}. Then \eqref{120} implies that the nonhomogeneous Burgers
problem \eqref{7} in the domain $\Omega $ admits a unique solution $u\in
H^{1,2}(R)$. 


This work can be generalized to the case when $\varphi _1$, $\varphi _2$
are Lipshitz continuous functions on $[ 0,T] $ instead of 
$C^{1}(]0,T[)$. On the other hand, this is an interesting question:
 What happens if $\varphi _1(0)=\varphi _2(0)$? This is a singular case which
needs some hypotheses on $\varphi _1$, $\varphi _2$ near $t=0$. In a
forthcoming work, we will answer this question.


\begin{thebibliography}{99}

\bibitem{Adams} R. A. Adams;
 \emph{Sobolev spaces}, Academic Press, New York, 1975. %2

\bibitem{Bateman} H. Bateman;
 \emph{Some recent researches on the motion
of fluids}, Mon. Weather Rev. 43 (1915), 163-170. %3

\bibitem{Bressan} N. Bressan, A. Quarteroni;
 \emph{An implicit/explicit spectral method for Burgers equation}, 
Calcolo, 23 (1987), 265-284. %4

\bibitem{Brezis} H. Brezis;
\emph{Functional analysis, Sobolev spaces and partial
differential equations}, Springer, New York, 2011. 

\bibitem{Burgers2} J. M. Burgers;
\emph{The nonlinear diffusion equation},
Reidel, Dordrecht, 1974. 

\bibitem{Burgers1} J. M. Burgers;
\emph{A mathematical model illustrating
the theory of turbulence}, Adv. Appl. Mech. 1 (1948) 171-199. 

\bibitem{Clark} H. R. Clark, M. A. Rincon, A. Silva;
\emph{Analysis and numerical simulation of viscous Burgers equation}, 
Numerical Functional Analysis and Optimization, 32:7 (2011) 695-716. 

\bibitem{Cole} J. D. Cole;
\emph{On a quasi-linear parabolic equation
occurring in aerodynamics}, Quart. Appl. Math.9 (1951) 225-236. 

\bibitem{Forsyth} A. R. Forsyth;
\emph{Theory of differential equations},
Part IV - Partial Differential Equations, Campridg University press,
Cambridge, 1906, Republished by Dover, New York, 1959 %11

\bibitem{Hopf} E. Hopf;
\emph{The partial differential equation $u_t +uu_x =\mu u_{xx}$}, 
Comm. Pure Appl. Math. 3 (1950) 201-230.

\bibitem{Kevorkian} J. Kevorkian;
\emph{Partial differential equations: Analytical solution techniques}, 
Brooks/Cole Pub. Pacific Grove, California, 1990. 

\bibitem{Morandi} M. Morandi Cecchi, R. Nociforo, P. Patuzzo;
\emph{ Burgers problems - Theoretical results}, Rivista di Matematica Pura ed
Applicata (1997), 159-174. 

\bibitem{Sadallah} B.-K. Sadallah; 
\emph{\'{E}tude d'un probl\`{e}me $2m$
-parabolique dans des domaines plan non rectangulaires}, Boll. U. M. I.,
(6), 2-B (1983), 51-112. %17

\bibitem{Temam} R. Temam;
\emph{Infinite-dimensional dynamical systems in
mechanics and physics}, Springer, New York, 1997.

\bibitem{Whitham} G. B. Whitham;
\emph{Lectures on wave propagation},
Narosa Pub. House, New Delhi, 1979. 

\end{thebibliography}

\end{document}
