\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{mathrsfs,amssymb}
\usepackage{epic}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2011 (2011), No. 76, pp. 1--25.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2011 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2011/76\hfil
A linear first-order hyperbolic equation]
{A linear first-order hyperbolic equation
with a discontinuous coefficient: distributional shadows
and propagation of singularities}

\author[H. Deguchi\hfil EJDE-2011/76\hfilneg]
{Hideo Deguchi}

\address{Hideo Deguchi \newline
Department of Mathematics, University of Toyama \\
Toyama 930-8555, Japan}
\email{hdegu@sci.u-toyama.ac.jp}

\thanks{Submitted January 19, 2011. Published June 16, 2011.}
\thanks{Supported by  the Austrian Science
Fund (FWF), Lise Meitner project M1155-N13}
\subjclass[2000]{46F30, 35L03, 35A21}
\keywords{First-order hyperbolic equation;
 discontinuous coefficient; \hfill\break\indent generalized solutions}

\begin{abstract}
 It is well-known that distributional solutions to the Cauchy
 problem for $u_t + (b(t,x)u)_{x} = 0$ with $b(t,x) = 2H(x-t)$,
 where $H$ is the Heaviside function, are non-unique.
 However, it has a unique generalized solution in the sense
 of Colombeau. The relationship between its generalized solutions
 and distributional solutions is established.
 Moreover, the propagation of singularities is studied.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
%\newtheorem{example}[theorem]{Example}

\section{Introduction}\label{intro}

In this paper we study generalized solutions of the Cauchy
problem for a first-order hyperbolic equation
\begin{equation}\label{eqn:hyperbolic1}
     \begin{gathered}
 u_t + (b(t,x)u)_{x} = 0,  \quad (t,x) \in \mathbb{R}^2, \\
 u|_{t = 0} = u_0,  \quad x \in \mathbb{R}
\end{gathered}
\end{equation}
with $b(t,x) = 2H(x-t)$, where $H$ is the Heaviside function,  in
the framework of generalized functions introduced by Colombeau
\cite{colombeau1,colombeau2}. We will seek solutions in an algebra
$\mathscr{G}(\mathbb{R}^2)$ of generalized functions, which will
be defined in Section \ref{sec:2} below. We mention that
$\mathscr{G}(\mathbb{R})$ contains the space
$\mathscr{D}'(\mathbb{R})$ of distributions so that initial data
with strong singularities can be considered in our setup. The
formulation of problem \eqref{eqn:hyperbolic1} in $\mathscr{G}$
will be given in Section \ref{sec:3}.

Until now, the following three questions for a variety of partial
differential equations in Colombeau's algebras have been addressed:
(a) existence and uniqueness of generalized solutions;
(b) behavior of generalized solutions in the framework of distribution
theory (distributional shadow);
(c) regularity of generalized solutions.

For linear first-order hyperbolic systems with discontinuous
coefficients, the existence and uniqueness were established
in one space-dimensional case by Oberguggenberger
\cite{oberguggenberger1989},
for symmetric hyperbolic systems in higher space-dimensional
case by Lafon and Oberguggenberger \cite{lafon} and for
hyperbolic pseudodifferential systems with generalized symbols
by H\"{o}rmann \cite{hoermann2}.

Almost all previous results on question (b) for differential
equations have been obtained under the hypothesis of unique
distributional solutions. However, it is well-known \cite{hurd}
that distributional solutions of linear first-order hyperbolic
equations with discontinuous coefficients may fail to exist or may
be non-unique. Question (b) for the linear hyperbolic equation
having no distributional solution given by Hurd and Sattinger
\cite{hurd}, namely, for problem \eqref{eqn:hyperbolic1} with
$b=-H$ and $u_0 \equiv 1$, where $H$ is the Heaviside function,
was answered by Oberguggenberger \cite{oberguggenberger1988}.
Similar equations have been studied by H\"{o}rmann and de Hoop
\cite{hoermann1}. In this paper we are concerned with question (b)
for linear hyperbolic equations having non-unique distributional
solutions. According to Hurd and Sattinger \cite{hurd}, for $t >
0$, problem \eqref{eqn:hyperbolic1} with $u_0 \equiv 0$ has
infinitely many distributional solutions
\begin{equation}\label{eqn:u_c}
    u_c(t,x):=
        \begin{cases}
            0, &  \text{if } x < 0 \text{ or } x > 2t, \\
            c, &  \text{if } 0 < x < t, \\
            -c, &  \text{if } t < x < 2t
        \end{cases}
\end{equation}
with $c \in \mathbb{R}$. On the other hand, as stated above, it
has been proved in \cite{oberguggenberger1989} that problem
\eqref{eqn:hyperbolic1} has a unique generalized solution $U \in
\mathscr{G}(\mathbb{R}^2)$ for any initial data. First, we will
investigate how the generalized solutions are related to the
distributional solutions $u_c$. Several results on behavior of
generalized solutions in the framework of distribution theory of
differential equations  having non-unique distributional solutions
have been obtained. For ordinary differential equations, see
\cite{deguchi1}, and for parabolic equations, see \cite{deguchi2}.

Concerning the regularity of generalized solutions of  problem
\eqref{eqn:hyperbolic1}, we focus on the case of initial data
given by the delta function at $s \in \mathbb{R}$. As can be seen
in Section \ref{sec:2}, there exist an abundant variety of
elements of $\mathscr{G}(\mathbb{R})$ having the property of the
delta function at $s$, which are called Dirac generalized
functions at $s$. In particular, there exist Dirac generalized
functions at $s$ which can be interpreted to have different
strengths of singularity at $s$. Thus we have the following
question: how does the strength of the singularity of a Dirac
generalized function taken as initial data affect the regularity
of the generalized solution of problem \eqref{eqn:hyperbolic1}?
Secondly, we will give an answer to this question. The propagation
of singularities for linear first-order hyperbolic equations with
other particular discontinuous coefficients has been studied by
H\"{o}rmann and de Hoop \cite{hoermann1}, Garetto and H\"{o}rmann
\cite{garetto} and Oberguggenberger \cite{oberguggenberger2008}.

The rest of this paper is organized as follows: we recall  the
definition and basic properties of the Colombeau algebra
$\mathscr{G}$ in Section \ref{sec:2}. In Section \ref{sec:3}, we
first give our formulation of problem \eqref{eqn:hyperbolic1} and
describe a result on existence and uniqueness of its generalized
solution $U \in \mathscr{G}(\mathbb{R}^2)$ for any initial data
$U_0 \in \mathscr{G}(\mathbb{R})$ which has been obtained by
Oberguggenberger \cite{oberguggenberger1989}. In Section
\ref{sec:4}, we discuss how the generalized solutions are related
to the distributional solutions $u_c$ given by form
\eqref{eqn:u_c} (Theorem \ref{thm:1}). In Section \ref{sec:5}, we
look at problem \eqref{eqn:hyperbolic1} with various Dirac
generalized functions as initial data. We investigate the behavior
of the generalized solutions in the framework of distribution
theory, and further the regularity of the generalized solutions
(Theorems \ref{thm:2}, \ref{thm:2-1}, \ref{thm:3}, \ref{thm:3-1},
\ref{thm:4} and \ref{thm:4-1}).

\section{Colombeau's theory of generalized functions}\label{sec:2}

We will employ the \emph{special Colombeau algebra} denoted by
$\mathscr{G}^{s}$ in Grosser et al. \cite{grosser}, which was
called the \emph{simplified Colombeau algebra} in Biagioni
\cite{biagioni}. However, here we will simply use the letter
$\mathscr{G}$ instead. Let us briefly recall the definition and
basic properties of the algebra $\mathscr{G}$ of generalized
functions. For more details, see Grosser et al. \cite{grosser}.

Let $\Omega$ be a non-empty open subset of $\mathbb{R}^d$.
Let $\mathscr{E}(\Omega)$ be the differential algebra of all
maps from the interval $(0,1]$ into $C^{\infty}(\Omega)$.
Thus each element of $\mathscr{E}(\Omega)$ is a family
$(u^{\varepsilon})_{\varepsilon \in (0,1]}$ of real valued
smooth functions on $\Omega$.
The subalgebra ${\mathscr{E}}_{M}(\Omega)$ is defined by
all elements $(u^{\varepsilon})_{\varepsilon \in (0,1]}$
of $\mathscr{E}(\Omega)$ with the property that, for all
$K \Subset \Omega$ and $\alpha \in \mathbb{N}_0^d$, there
exists $p \ge 0$ such that
\[
    \sup_{x \in K} |\partial_{x}^{\alpha} u^{\varepsilon}(x)|
 = O(\varepsilon^{-p}) \quad \text{as }\varepsilon \downarrow 0.
\]
The ideal $\mathscr{N}(\Omega)$ is defined by all elements
$(u^{\varepsilon})_{\varepsilon \in (0,1]}$ of $\mathscr{E}(\Omega)$
with the property that, for all $K \Subset \Omega$,
$\alpha \in \mathbb{N}_0^d$ and $q \ge 0$,
\[
    \sup_{x \in K} |\partial_{x}^{\alpha} u^{\varepsilon}(x)|
 = O(\varepsilon^q) \quad \text{as }\varepsilon \downarrow 0.
\]
\emph{The algebra} $\mathscr{G}(\Omega)$ \emph{of generalized
functions} is defined by the quotient space
\[
    \mathscr{G}(\Omega) = {\mathscr{E}}_{M}(\Omega) / \mathscr{N}(\Omega).
\]
We use capital letters for elements of $\mathscr{G}(\Omega)$
to distinguish generalized functions from distributions and
denote by $(u^{\varepsilon})_{\varepsilon \in (0,1]}$
a representative of $U \in \mathscr{G}(\Omega)$.
Then for any $U$, $V \in \mathscr{G}(\Omega)$ and
$\alpha \in \mathbb{N}_0^d$, we can define the partial
derivative $\partial^{\alpha} U$ to be the class of
$(\partial^{\alpha}u^{\varepsilon})_{\varepsilon \in (0,1]}$
and the product $UV$ to be the class of
$(u^{\varepsilon}v^{\varepsilon})_{\varepsilon \in (0,1]}$.
Also, for any $U =$\ class of
$(u^{\varepsilon}(t,x))_{\varepsilon \in (0,1]}
\in \mathscr{G}(\mathbb{R}^2)$, we can define its
restriction $U|_{t = 0} \in \mathscr{G}(\mathbb{R})$
to the line $\{t = 0\}$ to be the class of
$(u^{\varepsilon}(0,x))_{\varepsilon \in (0,1]}$.

\begin{remark}\label{rem:imbedding} \rm
The algebra $\mathscr{G}(\Omega)$ contains the space
$\mathscr{E}'(\Omega)$ of compactly supported distributions. In
fact, the map
\[
    f \mapsto \text{class of }
(f \ast \rho_{\varepsilon}\mid_{\Omega})_{\varepsilon \in (0,1]}
\]
defines an imbedding of $\mathscr{E}'(\Omega)$ into
$\mathscr{G}(\Omega)$, where
\[
    \rho_{\varepsilon}(x) = \frac{1}{\varepsilon^d} \rho
\left(\frac{x}{\varepsilon}\right)
\]
and $\rho$ is a fixed element of $\mathscr{S}(\mathbb{R}^d)$ such
that $\int \rho(x)\,dx = 1$ and $\int x^{\alpha}\rho(x)\,dx = 0$
for any $\alpha \in \mathbb{N}_0^d$, $|\alpha| \ge 1$. In this
sense, we obtain an inclusion relation $\mathscr{E}'(\Omega)
\subset \mathscr{G}(\Omega)$. This can be extended in a unique way
to an imbedding of the space $\mathscr{D}'(\Omega)$ of
distributions. Moreover, this imbedding turns $C^{\infty}(\Omega)$
into a subalgebra of $\mathscr{G}(\Omega)$.
\end{remark}

\begin{definition} \rm
A generalized function $U \in \mathscr{G}(\Omega)$ is said to be
\emph{associated with a distribution} $w \in \mathscr{D}'(\Omega)$
if it has a representative $(u^{\varepsilon})_{\varepsilon \in
(0,1]} \in {\mathscr{E}}_{M}(\Omega)$ such that
\[
    u^{\varepsilon} \to w \quad {\rm in}\ \mathscr{D}'(\Omega) \quad {\rm as} \ \varepsilon \downarrow 0.
\]
We denote by $U \approx w$ and call $w$ the \emph{distributional
shadow} of $U$ if $U$ is associated with $w$.
\end{definition}

\begin{remark} \rm
A subalgebra $\mathscr{G}_{\rm log}(\Omega)$ of $\mathscr{G}(\Omega)$
is defined similarly as $\mathscr{G}(\Omega)$ by replacing the
bound $\sup_{x \in K}|\partial_{x}^{\alpha} u^{\varepsilon}(x)| =
O(\varepsilon^{-p})$ in ${\mathscr{E}}_{M}(\Omega)$ by the
stronger bound $\sup_{x \in K}|\partial_{x}^{\alpha}
u^{\varepsilon}(x)| = O((\log(1/\varepsilon))^{p})$. For any
distribution $f \in \mathscr{D}'(\Omega)$, there exists a
generalized function $U \in \mathscr{G}_{\rm log}(\Omega)$ which is
associated with $f$, see Colombeau and Heibig \cite{colombeau and
heibig}. Therefore, any distribution on $\Omega$ can be
interpreted as an element of $\mathscr{G}_{\rm log}(\Omega)$ in the
sense of association.
\end{remark}

We next define the notion of generalized functions of Dirac type.

\begin{definition}\label{defn:dirac}
We say that $U \in \mathscr{G}(\mathbb{R})$ is a \emph{Dirac
generalized function} at $s \in \mathbb{R}$ if it has a
representative $(u^{\varepsilon})_{\varepsilon \in (0,1]}$
satisfying
\begin{itemize}
\item[(1)] there exists $a(\varepsilon) > 0$, $a(\varepsilon) \to 0$
 as $\varepsilon \downarrow 0$, such that $u^{\varepsilon}(x) = 0$
 if $|x-s| \ge a(\varepsilon)$;
\item[(2)] $\int_{\mathbb{R}} u^{\varepsilon}(x)\,dx = 1$ for all
 $\varepsilon \in (0,1]$;
\item[(3)] $\sup_{\varepsilon \in (0,1]} \int_{\mathbb{R}}
 |u^{\varepsilon}(x)|\,dx < \infty$.
\end{itemize}
Then $U$ admits the delta function $\delta_s$ at $s$ as
distributional shadow.
\end{definition}

Regularity theory for linear equations has been based on the
subalgebra $\mathscr{G}^{\infty}(\Omega)$ of \emph{regular
generalized functions} in $\mathscr{G}(\Omega)$ introduced by
Oberguggenberger \cite{oberguggenberger1992}. It is defined by all
elements which have a representative
$(u^{\varepsilon})_{\varepsilon \in (0,1]}$ with the property
that, for all $K \Subset \Omega$, there exists $p \ge 0$ such
that, for all $\alpha \in \mathbb{N}_0^d$,
\[
    \sup_{x \in K} |\partial_{x}^{\alpha} u^{\varepsilon}(x)|
= O(\varepsilon^{-p}) \quad \text{as }\varepsilon \downarrow 0.
\]
We observe that all derivatives of $u^{\varepsilon}$ have locally
the same order of growth in $\varepsilon > 0$, unlike elements of
${\mathscr{E}}_{M}(\Omega)$. This subalgebra
$\mathscr{G}^{\infty}(\Omega)$ has the property
$\mathscr{G}^{\infty}(\Omega) \cap \mathscr{D}'(\Omega) =
C^{\infty}(\Omega)$, see
\cite[Theorem 25.2, p. 275]{oberguggenberger1992}.
Hence, for the purpose of describing the regularity
of generalized functions, $\mathscr{G}^{\infty}(\Omega)$ plays the
same role for $\mathscr{G}(\Omega)$ as $C^{\infty}(\Omega)$ does
in the setting of distributions. The
$\mathscr{G}^{\infty}$-singular support
(denoted by $\operatorname{sing\,supp}_{\mathscr{G}^{\infty}}$) of a generalized function is
defined as the complement of the largest open set on which the
generalized function is regular in the above sense. A subalgebra
$\mathscr{G}_{\rm log}^{\infty}(\Omega)$ of
$\mathscr{G}_{\rm log}(\Omega)$ is defined similarly as
$\mathscr{G}^{\infty}(\Omega)$ by replacing the bound $\sup_{x \in
K}|\partial_{x}^{\alpha} u^{\varepsilon}(x)| =
O(\varepsilon^{-p})$ by the stronger bound $\sup_{x \in
K}|\partial_{x}^{\alpha} u^{\varepsilon}(x)| =
O((\log(1/\varepsilon))^{p})$. The
$\mathscr{G}_{\rm log}^{\infty}$-singular support (denoted by $\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}}$) can be also introduced.

\begin{remark}\label{rem:strength} \rm
Let $s \in \mathbb{R}$ and let $\chi$ be a fixed element of
$\mathscr{D}(\mathbb{R})$ such that $\chi$ is symmetric,
non-negative, with supp$\,\chi \subset [-1,1]$, $\chi(0) > 0$
 and $\int_{\mathbb{R}}\chi(x)\,dx = 1$.
Put $\chi_{\varepsilon}(x) = \chi(x/\varepsilon)/\varepsilon$.
Then $U \in \mathscr{G}(\mathbb{R})$ defined by the class of
$(\chi_{\varepsilon}(\cdot - s))_{\varepsilon \in (0,1]}$ is a
Dirac generalized function at $s$ and $\operatorname{sing\,supp}_{\mathscr{G}^{\infty}} U = \{s\}$. However, if $U \in
\mathscr{G}(\mathbb{R})$ is defined as the class of
$(\chi_{h(\varepsilon)}(\cdot - s))_{\varepsilon \in (0,1]}$ with
$h(\varepsilon) = 1/\log(1/\varepsilon)$, then it is a Dirac
generalized function at $s$ again, but $\operatorname{sing\,supp}_{\mathscr{G}^{\infty}} U = \emptyset$. Hence, the speed
of convergence of a representative of $U$ to the delta function at
$s$ can be interpreted as the strength of  the singularity of $U$
at $s$. Thus, there exist infinitely many Dirac generalized
functions with different strengths of singularity at $s$ in
$\mathscr{G}(\mathbb{R})$.
\end{remark}

\section{Existence and uniqueness of generalized solutions}\label{sec:3}

We formulate problem \eqref{eqn:hyperbolic1} in Colombeau's
algebra $\mathscr{G}$ in the form
\begin{equation}\label{eqn:generalized hyperbolic1}
  \begin{gathered}
            U_t + (BU)_{x} = 0 \quad \text{in } \mathscr{G}(\mathbb{R}^2), \\
            U|_{t = 0} = U_0 \quad  \text{in } \mathscr{G}(\mathbb{R})
        \end{gathered}
\end{equation}
with the generalized function $B \in \mathscr{G}(\mathbb{R}^2)$
having the representative $(b^{\varepsilon})_{\varepsilon \in (0,1]}$
given by
\[
    b^{\varepsilon}(t,x)
    := b \ast \varphi_{h(\varepsilon)}
    = 2\int\int_{\mathbb{R}^2} H(x-t-h(\varepsilon)y+h(\varepsilon)s)
\varphi(s,y)\,dy\,ds,
\]
where $h(\varepsilon):= 1/\log (1/\varepsilon)$ and $\varphi$ is a
fixed element of $\mathscr{D}(\mathbb{R}^2)$ such that $\varphi$
is symmetric, non-negative, with $\operatorname{supp}\varphi
\subset [-1,1] \times [-1,1]$, $\varphi(0,0) > 0$ and
$\int\int\varphi(t,x)\,dx\,dt = 1$. We note that $B$ belongs to
$\mathscr{G}_{\rm log}(\mathbb{R}^2)$ and further admits $b(t,x) =
2H(x-t)$ as distributional shadow.

\begin{definition} \rm
We say that $U \in \mathscr{G}(\mathbb{R}^2)$ is a
(\emph{generalized}) \emph{solution} of problem
\eqref{eqn:generalized hyperbolic1} if it has a representative
$(u^{\varepsilon})_{\varepsilon \in (0,1]} \in
{\mathscr{E}}_{M}(\mathbb{R}^2)$ such that
 \begin{gather*}
  u^{\varepsilon}_t + (b^{\varepsilon}u^{\varepsilon})_{x}
 = N^{\varepsilon},  \quad (t,x) \in \mathbb{R}^2,\\
  u^{\varepsilon}|_{t = 0} = u_{0}^{\varepsilon} + n^{\varepsilon},
\quad x \in \mathbb{R}
 \end{gather*}
for some $(N^{\varepsilon})_{\varepsilon \in (0,1]} \in
\mathscr{N}(\mathbb{R}^2)$ and $(n^{\varepsilon})_{\varepsilon \in
(0,1]} \in \mathscr{N}(\mathbb{R})$, where
$(u_{0}^{\varepsilon})_{\varepsilon \in (0,1]}$ and
$(b^{\varepsilon})_{\varepsilon \in (0,1]}$ are representatives of
$U_0$ and $B$, respectively.
\end{definition}

For any $U_0 \in \mathscr{G}(\mathbb{R})$, problem
\eqref{eqn:generalized hyperbolic1} has a unique solution $U \in
\mathscr{G}(\mathbb{R}^2)$, see \cite{oberguggenberger1989}, in
which a more general existence and uniqueness result has been
obtained.

\section{Relationship to non-unique distributional solutions}\label{sec:4}

In this section we establish the relationship between the
generalized solutions of problem
\eqref{eqn:generalized hyperbolic1} and the distributional solutions
$u_c$ of problem \eqref{eqn:hyperbolic1}.
For this purpose, we first prepare the following lemma.

\begin{lemma}\label{lemma:1}
For $\varepsilon \in (0,1)$, let
\begin{align*}
    G^{\varepsilon}(x):= \int_{-x/h(\varepsilon)}^{2}
\frac{dz}{1-\widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}
\quad \text{on } [-2h(\varepsilon),0),
\end{align*}
where $\widetilde{b}^{\varepsilon}(z):= 2\int\int_{\mathbb{R}^2}
H(z-h(\varepsilon)y+h(\varepsilon)s)\varphi(s,y)\,dy\,ds$.
Then $G^{\varepsilon}$ has the following four properties:
\begin{itemize}
\item[(i)] $G^{\varepsilon}$ is a strictly increasing continuous
 function on $[-2h(\varepsilon),0)$ such that
 $G^{\varepsilon}(-2h(\varepsilon)) = 0$ and
 $\lim_{x \uparrow 0}G^{\varepsilon}(x) = \infty$;
\item[(ii)] there exist two constants $C_1$, $C_2 > 0$ such that,
 for any $x \in [-2h(\varepsilon),0)$ and $\varepsilon \in (0,1)$,
\begin{equation}\label{eqn:inequality for G^{varepsilon}}
    C_1\log\Big(\frac{2h(\varepsilon)}{-x}\Big)
 \le G^{\varepsilon}(x) \le C_2\log\Big(\frac{2h(\varepsilon)}{-x}\Big);
\end{equation}
\item[(iii)] there exists the inverse function $(G^{\varepsilon})^{-1}$ on $[0,\infty)$, which satisfies
\begin{equation}\label{eqn:differentiation of (G^{varepsilon})^{-1}}
    \frac{d(G^{\varepsilon})^{-1}(x)}{dx}
= h(\varepsilon)[1-\widetilde{b}^{\varepsilon}((G^{\varepsilon})^{-1}(x))];
\end{equation}
\item[(iv)] for any $a$, $x \in [0,\infty)$,
\begin{equation}\label{eqn:inequality for (G^{varepsilon})^{-1}}
    2 \exp \Big(-\frac{a}{C_1}\Big) h(\varepsilon)\varepsilon^{x/C_1}
    \le \big|(G^{\varepsilon})^{-1}\Big(\frac{x}{h(\varepsilon)}
+a\Big)\big| \le 2 \exp \Big(-\frac{a}{C_2}\Big) h(\varepsilon)
 \varepsilon^{x/C_2},
\end{equation}
where $C_1$, $C_2 > 0$ are the constants given in (ii).
\end{itemize}
\end{lemma}

\begin{proof}
Property (i) is clear. To show property (ii), rewrite
$1 = 2\int\int_{s > y} \varphi(s,y)\,dy\,ds$.
Then we find that
\begin{align*}
    1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)
    & = 1 - 2\int\int_{s > y + z} \varphi(s,y)\,dy\,ds
     = 2\int\int_{y < s < y + z} \varphi(s,y)\,ds\,dy.
\end{align*}
Hence, there exist two constants $c_1$, $c_2 > 0$ such that $c_1 z \le 1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z) \le c_2 z$ for $0 \le z \le 2$.
Putting $C_1=1/c_2$ and $C_2=1/c_1$, the reciprocal of $1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)$ satisfies the inequality
\[
    \frac{C_1}{z} \le \frac{1}{1 - \widetilde{b}^{\varepsilon}
(-h(\varepsilon)z)} \le \frac{C_2}{z}.
\]
Integrating this over $[-x/h(\varepsilon),2)$ gives inequality
\eqref{eqn:inequality for G^{varepsilon}}.

Next, we prove property (iii). By property (i), there exists
$(G^{\varepsilon})^{-1}$ on $[0,\infty)$. We differentiate
$(G^{\varepsilon})^{-1}$ to get $d(G^{\varepsilon})^{-1}(x)/dx =
1/(G^{\varepsilon})'((G^{\varepsilon})^{-1}(x))$. We have
$(G^{\varepsilon})'(x) =
1/h(\varepsilon)(1-\widetilde{b}^{\varepsilon}(x))$ and so get
formula \eqref{eqn:differentiation of (G^{varepsilon})^{-1}}.

Finally, we prove property (iv).
Put $y = (G^{\varepsilon})^{-1}(x/h(\varepsilon)+a) < 0$.
By property (ii), there exist two constants $C_1$, $C_2 > 0$ such that
\[
    C_1\log\Big(\frac{2h(\varepsilon)}{-y}\Big)
\le G^{\varepsilon}(y) \le C_2\log\Big(\frac{2h(\varepsilon)}{-y}\Big).
\]
Noting that $G^{\varepsilon}(y) = x/h(\varepsilon) + a$, we have
\[
    C_1\left[\log 2h(\varepsilon) - \log(-y)\right] \le \frac{x}{h(\varepsilon)} + a \le C_2\left[\log 2h(\varepsilon) - \log(-y)\right].
\]
Therefore, we see that
\[
    \log 2h(\varepsilon) - \frac{a}{C_1} - \frac{x}{C_1h(\varepsilon)} \le \log(-y) \le \log 2h(\varepsilon) - \frac{a}{C_2} - \frac{x}{C_2h(\varepsilon)}.
\]
Since $h(\varepsilon) = 1/\log (1/\varepsilon)$, it follows that
\[
    2 \exp \Big(-\frac{a}{C_1}\Big) h(\varepsilon)
\varepsilon^{x/C_1} \le -y \le 2 \exp \Big(-\frac{a}{C_2}\Big)
h(\varepsilon) \varepsilon^{x/C_2}.
\]
Thus inequality \eqref{eqn:inequality for (G^{varepsilon})^{-1}}
 follows.
\end{proof}

We now turn to a comparison between generalized solutions of
problem \eqref{eqn:generalized hyperbolic1} and the distributional
solutions $u_c$ given by \eqref{eqn:u_c} of
problem \eqref{eqn:hyperbolic1}.

\begin{theorem}\label{thm:1}
For any $c \in \mathbb{R}$ and $T > 0$, there exists initial
data $U_0 \approx 0$ such that the solution
$U \in \mathscr{G}(\mathbb{R}^2)$ of problem
\eqref{eqn:generalized hyperbolic1} admits a distributional
shadow on $(-T,T) \times \mathbb{R}$, which is given by
\[
    u(t,x) =   \begin{cases}
            u_c(t,x), & \text{if } 0 < t < T,\; x \in \mathbb{R}, \\
            0, &\text{if } -T < t \le 0,\; x \in \mathbb{R},
        \end{cases}
\]
where $u_c$ is the function given by \eqref{eqn:u_c}.
\end{theorem}

\begin{proof}
We consider the Cauchy problem
\begin{equation}\label{eqn:generalized hyperbolic2}
  \begin{gathered}
            V_t + BV_{x} = 0 \quad \text{in } \mathscr{G}(\mathbb{R}^2), \\
            V|_{t = 0} = V_0 \quad \text{in } \mathscr{G}(\mathbb{R}).
        \end{gathered}
\end{equation}
The existence and uniqueness of solutions $V \in
\mathscr{G}(\mathbb{R}^2)$ of problem
\eqref{eqn:generalized hyperbolic2} are guaranteed for all
initial data $V_0 \in \mathscr{G}(\mathbb{R})$ by Oberguggenberger
\cite{oberguggenberger1989}. Clearly, $V_x$ satisfies problem
\eqref{eqn:generalized hyperbolic1}. Define the function
$v(t,x):= \int_0^x u(t,y)\,dy$. In order to prove the
assertion, it suffices to show that, for any $c \in \mathbb{R}$
and $T>0$, there exists initial data $V_0$ such that $V_0' \approx 0$ on
$\mathbb{R}$ and $V \approx v$ on $(-T,T) \times \mathbb{R}$. We
will only prove this for the case $c > 0$. The case $c \le 0$ can
be argued similarly. The proof is divided into three steps.

{\bf Step 1}. Fix $c > 0$ and $\varepsilon \in (0,1)$ arbitrarily.
Let $G^{\varepsilon}$ be as in Lemma \ref{lemma:1}. Recall that
$G^{\varepsilon}$ is a strictly increasing continuous function on
$[-2h(\varepsilon),0)$ with $G^{\varepsilon}(-2h(\varepsilon)) =
0$ and $\lim_{x \uparrow 0}G^{\varepsilon}(x) = \infty$. Hence,
there exists a point $0 < \eta(\varepsilon) < 2h(\varepsilon)$
such that $G^{\varepsilon}(-\eta(\varepsilon)) = 2$. Define
\begin{equation}\label{eqn:w_0^{varepsilon}}
    w_{0}^{\varepsilon}(x):=
        \begin{cases}
            ch(\varepsilon)(G^{\varepsilon}(x)-2), &  \text{if } -\eta(\varepsilon) \le x < 0,\\
            ch(\varepsilon)(G^{\varepsilon}(-x)-2), &  \text{if } 0 < x \le \eta(\varepsilon),\\
            0, &  \text{if } |x| > \eta(\varepsilon),
        \end{cases}
\end{equation}
and let $w^{\varepsilon}$ be a solution of the problem
  \begin{gather*}
   w^{\varepsilon}_t + b^{\varepsilon}w^{\varepsilon}_{x} = 0,
 \quad (t,x) \in \mathbb{R}^2,\; t \ne x \\
            w^{\varepsilon}|_{t = 0} = w_0^{\varepsilon},
 \quad x \in \mathbb{R},\; x \ne 0.
\end{gather*}
We will show below that, for $t \ge 0$,
\[
    w^{\varepsilon}(t,x) =
        \begin{cases}
            0, &  \text{if } x \le -2h(\varepsilon) \text{ or }
 x \ge 2t + 2h(\varepsilon),\\
            cx, &  \text{if } 0 \le x \le t - 2h(\varepsilon),\\
            -cx + 2ct, &  \text{if }t + 2h(\varepsilon) \le x \le 2t.
        \end{cases}
\]
Similarly, we can obtain that, for $t < 0$,
\[
    w^{\varepsilon}(t,x) = 0\quad \text{if }x \le t-2h(\varepsilon)
 \text{ or } x \ge t + 2h(\varepsilon).
\]

The characteristic curve $\gamma^{\varepsilon}(t,x,\tau)$
passing through $(t,x)$ at time $\tau = t$ is the solution
of the problem
\begin{equation}\label{eqn:characteristics}
  \begin{gathered}
            \gamma^{\varepsilon}_{\tau}(t,x,\tau) = b^{\varepsilon}(\tau,\gamma^{\varepsilon}(t,x,\tau)), \\
            \gamma^{\varepsilon}|_{\tau = t} = x.
        \end{gathered}
 \end{equation}
Along the characteristic curves, $w^{\varepsilon}$ is easily
calculated as
\begin{equation}\label{eqn:w^{varepsilon}}
    w^{\varepsilon}(t,x) = w_0^{\varepsilon}(\gamma^{\varepsilon}
(t,x,0)).
\end{equation}
If $x \le -2h(\varepsilon)$ and $t > 0$, then
$\gamma^{\varepsilon}(t,x,0) = x$ and $w_{0}^{\varepsilon}(x) = 0$.
Hence, by \eqref{eqn:w^{varepsilon}}, we have
$w^{\varepsilon}(t,x) = 0$.
If $x \ge 2t + 2h(\varepsilon)$ and $t > 0$, then
$\gamma^{\varepsilon}(t,x,0) = x - 2t$ and
$w_{0}^{\varepsilon}(x-2t) = 0$.
Hence, by \eqref{eqn:w^{varepsilon}}, we get
$w^{\varepsilon}(t,x) = 0$.

We next prove that $w^{\varepsilon}(t,x) = cx$ if $0 \le x \le t -
2h(\varepsilon)$. Fix $(t,x)$ arbitrarily so that $0 \le x \le t -
2h(\varepsilon)$. Let $\widetilde{b}^{\varepsilon}$ be as in Lemma
\ref{lemma:1}. Then, from \eqref{eqn:characteristics}, we see
that $\gamma^{\varepsilon}(t,x,\tau)$ satisfies the equation
$(\gamma^{\varepsilon}(t,x,\tau) - \tau)_{\tau} =
\widetilde{b}^{\varepsilon}(\gamma^{\varepsilon}(t,x,\tau) - \tau)
- 1$. We divide both sides by
$\widetilde{b}^{\varepsilon}(\gamma^{\varepsilon}(t,x,\tau) -
\tau) - 1$ and integrate it over $[0,t]$ to get
\begin{align*}
    \int_{0}^{t} \frac{(\gamma^{\varepsilon}(t,x,\tau)
- \tau)_{\tau}}{\widetilde{b}^{\varepsilon}(\gamma^{\varepsilon}
(t,x,\tau) - \tau) - 1}\,d\tau = t.
\end{align*}
Putting $\gamma = \gamma^{\varepsilon}(t,x,\tau) - \tau$ and
noting that $\gamma^{\varepsilon}(t,x,t) = x$, we have
\begin{equation}\label{eqn:integral}
    \int_{\gamma^{\varepsilon}(t,x,0)}^{x - t}
\frac{d\gamma}{\widetilde{b}^{\varepsilon}(\gamma) - 1} = t.
\end{equation}
Since $x - t \le - 2h(\varepsilon)$ and
$\widetilde{b}^{\varepsilon}(\gamma) = 0$ for
$\gamma \le -2h(\varepsilon)$, it follows that
\[
    \int_{\gamma^{\varepsilon}(t,x,0)}^{-2h(\varepsilon)}
\frac{d\gamma}{\widetilde{b}^{\varepsilon}(\gamma) - 1}
= x + 2h(\varepsilon).
\]
Put $z = -\gamma/h(\varepsilon)$.
Then
\begin{align*}
    \int_{-\gamma^{\varepsilon}(t,x,0)/h(\varepsilon)}^{2}
 \frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}
= \frac{x}{h(\varepsilon)} + 2.
\end{align*}
The left-hand side is rewritten as
$G^{\varepsilon}(\gamma^{\varepsilon}(t,x,0))$ and so
$\gamma^{\varepsilon}(t,x,0) = (G^{\varepsilon})^{-1}(x/h(\varepsilon)
 + 2)$.
Therefore, by \eqref{eqn:w_0^{varepsilon}} and
\eqref{eqn:w^{varepsilon}}, we get $w^{\varepsilon}(t,x) = cx$.

We next prove that $w^{\varepsilon}(t,x) = -cx + 2ct$ if
$t + 2h(\varepsilon) \le x \le 2t$.
Fix $(t,x)$ arbitrarily so that $t + 2h(\varepsilon) \le x \le 2t$.
The same argument as above gives \eqref{eqn:integral}.
Since $x - t \ge 2h(\varepsilon)$ and
$\widetilde{b}^{\varepsilon}(\gamma) = 2$ for
$\gamma \ge 2h(\varepsilon)$, we have
\[
    \int_{\gamma^{\varepsilon}(t,x,0)}^{2h(\varepsilon)}
\frac{d\gamma}{\widetilde{b}^{\varepsilon}(\gamma) - 1}
=-x+2t+2h(\varepsilon).
\]
Put $z = \gamma/h(\varepsilon)$.
Then
\[
    \int_{\gamma^{\varepsilon}(t,x,0)/h(\varepsilon)}^{2}
\frac{dz}{\widetilde{b}^{\varepsilon}(h(\varepsilon)z)-1}
= \frac{-x+2t}{h(\varepsilon)} + 2.
\]
The left-hand side is equal to
$G^{\varepsilon}(-\gamma^{\varepsilon}(t,x,0))$,
since $\widetilde{b}^{\varepsilon}(h(\varepsilon)z)-1
= 1-\widetilde{b}^{\varepsilon}(-h(\varepsilon)z)$ for
$z \in \mathbb{R}$.
Hence, $\gamma^{\varepsilon}(t,x,0) = -(G^{\varepsilon})^{-1}((-x+2t)/h(\varepsilon) + 2)$.
Therefore, by \eqref{eqn:w_0^{varepsilon}} and
\eqref{eqn:w^{varepsilon}}, we obtain that
$w^{\varepsilon}(t,x) = -cx+2ct$.

{\bf Step 2}. Fix $T > 0$ arbitrarily.
Then $T/h(\varepsilon) > 2$ for $\varepsilon > 0$ small enough.
For such $\varepsilon > 0$, we choose
$0 < \lambda(\varepsilon) < \eta(\varepsilon)$ such that
$G^{\varepsilon}(-\lambda(\varepsilon)) = T/h(\varepsilon)$, and put
\[
    \overline{w_0^{\varepsilon}}(x):=
        \begin{cases}
            w_0^{\varepsilon}(x), &  \text{if }|x| > \lambda(\varepsilon),\\
            w_0^{\varepsilon}(\lambda(\varepsilon)), &  \text{if }|x| \le \lambda(\varepsilon).
        \end{cases}
\]
Let $\overline{w^{\varepsilon}}$ be a solution of the problem
 \begin{gather*}
            (\overline{w^{\varepsilon}})_t
+ b^{\varepsilon}(\overline{w^{\varepsilon}})_{x} = 0,
 \quad (t,x) \in \mathbb{R}^2, \\
            \overline{w^{\varepsilon}}|_{t = 0}
= \overline{w_0^{\varepsilon}},  \quad x \in \mathbb{R}.
\end{gather*}
Then it is easy to check that, for $t \ge 0$,
\[
    \overline{w^{\varepsilon}}(t,x) =
        \begin{cases}
            0, &  \text{if }x \le -2h(\varepsilon) \text{ or } x \ge 2t + 2h(\varepsilon), \\
            cx, & \text{if }0 \le x \le \min\{t - 2h(\varepsilon),
\gamma^{\varepsilon}(0,-\lambda(\varepsilon),t)\},\\
            -cx + 2ct, &  \text{if }\max\{t + 2h(\varepsilon),
\gamma^{\varepsilon}(0,\lambda(\varepsilon),t)\} \le x \le 2t,
        \end{cases}
\]
and further that, for $t < 0$,
\[
    \overline{w^{\varepsilon}}(t,x) = 0\quad
 \text{if }x \le t-2h(\varepsilon) \text{ or }
  x \ge t + 2h(\varepsilon).
\]

We now prove that $\overline{w^{\varepsilon}}$ converges to $v$
in $\mathscr{D}'((-T,T) \times \mathbb{R})$ as $\varepsilon
\downarrow 0$. Consider the characteristic curve
$\gamma^{\varepsilon}(0,-\lambda(\varepsilon),\tau)$ passing
through $(0,-\lambda(\varepsilon))$ at $\tau = 0$. There exists
$t_1^{\varepsilon} > 0$ such that $t_1^{\varepsilon} =
\gamma^{\varepsilon}(0,-\lambda(\varepsilon),t_1^{\varepsilon}) +
2h(\varepsilon)$. As in Step $1$, we can show that
\[
    t_1^{\varepsilon} = h(\varepsilon)
\int_{\lambda(\varepsilon)/h(\varepsilon)}^{2}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}
= h(\varepsilon) G^{\varepsilon}(-\lambda(\varepsilon))
    = T.
\]
Similarly, for the characteristic curve
$\gamma^{\varepsilon}(0,\lambda(\varepsilon),\tau)$
passing through $(0,\lambda(\varepsilon))$ at $\tau = 0$,
there exists $t_2^{\varepsilon} > 0$ such that
$t_2^{\varepsilon} = \gamma^{\varepsilon}(0,\lambda(\varepsilon),
t_2^{\varepsilon}) - 2h(\varepsilon)$.
Moreover, $t_2^{\varepsilon} = T$.
Hence, for any $\psi \in \mathscr{D}((-T,T)\times \mathbb{R})$,
we see that
\begin{equation}
\begin{split}
    & \int_{-T}^{T}\int_{-\infty}^{\infty}
(\overline{w^{\varepsilon}}(t,x) - v(t,x))\psi(t,x)\,dx\,dt  \\
    &  = \int \int_{-2h(\varepsilon) < x < \min\{0,t-2h
 (\varepsilon)\},\, 0 < t < T} (\overline{w^{\varepsilon}}(t,x)
 - v(t,x))\psi(t,x)\,dx\,dt  \\
    & \quad  + \int \int_{t-2h(\varepsilon) < x < t+2h
(\varepsilon),\, -T < t < T} (\overline{w^{\varepsilon}}(t,x)
 - v(t,x))\psi(t,x)\,dx\,dt   \\
    & \quad  + \int \int_{\max\{2t,t+2h(\varepsilon)\} < x
< 2t+2h(\varepsilon),\, 0 < t < T} (\overline{w^{\varepsilon}}(t,x)
- v(t,x))\psi(t,x)\,dx\,dt.
\end{split} \label{eqn:integral2}
\end{equation}
The area of $\{(t,x) \in \mathbb{R}^2 \mid -2h(\varepsilon) < x <
\min\{0,t-2h(\varepsilon)\}\} \cap {\rm supp}\,\psi$ converges to
$0$ as $\varepsilon \downarrow 0$. Moreover,
$(\overline{w^{\varepsilon}} - v)_{\varepsilon \in (0,1]}$ is
uniformly bounded on this intersection. Hence, the first integral
on the right-hand side of \eqref{eqn:integral2} converges to $0$
as $\varepsilon \downarrow 0$. We can similarly show that the
second and third integrals converge to $0$ as $\varepsilon
\downarrow 0$. Thus, $\overline{w^{\varepsilon}}$ converges to
$v$ in $\mathscr{D}'((-T,T) \times \mathbb{R})$ as $\varepsilon
\downarrow 0$.

{\bf Step 3}. Finally, we construct
$(v_0^{\varepsilon})_{\varepsilon \in (0,1]} \in
{\mathscr{E}}_{M}(\mathbb{R})$ such that $(v_0^{\varepsilon})'$
converges to $0$ in $\mathscr{D}'(\mathbb{R})$ as $\varepsilon
\downarrow 0$, and further that the solution $v^{\varepsilon}$ of
the problem
\begin{equation}\label{eqn:equation for v^{varepsilon}0}
    \begin{gathered}
            v^{\varepsilon}_t + b^{\varepsilon}v^{\varepsilon}_{x} = 0,
 \quad (t,x) \in \mathbb{R}^2, \\
            v^{\varepsilon}|_{t = 0} = v_0^{\varepsilon},
 \quad x \in \mathbb{R}
        \end{gathered}
 \end{equation}
converges to $v$ in $\mathscr{D}'((-T,T)\times\mathbb{R})$ as
$\varepsilon \downarrow 0$. The existence of such
$(v_0^{\varepsilon})_{\varepsilon \in (0,1]}$ implies the
existence of initial data $V_0 \in \mathscr{G}(\mathbb{R})$
satisfying the desired properties that $V_0' \approx 0$ on
$\mathbb{R}$ and $V \approx v$ on $(-T,T) \times \mathbb{R}$.

Let $\chi \in \mathscr{D}(\mathbb{R})$ be as in Remark
\ref{rem:strength}. Define the function $v_0^{\varepsilon}(x):=
(\overline{w_0^{\varepsilon}} \ast
\chi_{\lambda(\varepsilon)})(x)$. We have $\sup_{x \in
\mathbb{R}}|\overline{w_0^{\varepsilon}}(x)| =
|w_0^{\varepsilon}(\lambda(\varepsilon))| = cT -
2ch(\varepsilon)$. Furthermore, by inequality
\eqref{eqn:inequality for (G^{varepsilon})^{-1}}, there exist
two constants $C_1$, $C_2 > 0$ such that
$2h(\varepsilon)\varepsilon^{T/C_1} \le \lambda(\varepsilon) \le
2h(\varepsilon)\varepsilon^{T/C_2}$. Hence, we see that the family
of $v_0^{\varepsilon}$ defines an element of
${\mathscr{E}}_{M}(\mathbb{R})$, and further that
$(v_0^{\varepsilon})'$ converges to $0$ in
$\mathscr{D}'(\mathbb{R})$ as $\varepsilon \downarrow 0$.

To show that the solution $v^{\varepsilon}$ of problem
\eqref{eqn:equation for v^{varepsilon}0} converges to $v$ in
$\mathscr{D}'((-T,T)\times\mathbb{R})$ as $\varepsilon \downarrow
0$, it suffices to prove that $v_0^{\varepsilon} -
\overline{w_0^{\varepsilon}}$ converges uniformly to $0$ on any
compact subset of $\mathbb{R}$ as $\varepsilon \downarrow 0$. The
difference $v_0^{\varepsilon}(x) -
\overline{w_0^{\varepsilon}}(x)$ satisfies the inequality
\[
    \big|v_0^{\varepsilon}(x) - \overline{w_0^{\varepsilon}}(x)\big|
    \le \int_{-\infty}^{\infty}|\overline{w_0^{\varepsilon}}
(x-\lambda(\varepsilon) y) - \overline{w_0^{\varepsilon}}(x)|\chi(y)\,dy.
\]
Moreover,
\[
    |\overline{w_0^{\varepsilon}}(x-\lambda(\varepsilon) y) - \overline{w_0^{\varepsilon}}(x)|
    \le \sup_{-\eta(\varepsilon) \le \xi \le -\lambda(\varepsilon)}|(\overline{w_0^{\varepsilon}})'(\xi)| \lambda(\varepsilon) |y|
    = \frac{c}{1-\widetilde{b}^{\varepsilon}(-\lambda(\varepsilon))} \lambda(\varepsilon)|y|.
\]
As in the proof of Lemma \ref{lemma:1}, we have
$1/(1-\widetilde{b}^{\varepsilon}(-\lambda(\varepsilon)))
\le C_2h(\varepsilon)/\lambda(\varepsilon)$ for some
constant $C_2 > 0$.
Thus, we get
\[
    \left|v_0^{\varepsilon}(x) - \overline{w_0^{\varepsilon}}(x)\right|
    \le cC_2\int_{-\infty}^{\infty} |y|\chi(y)\,dy \cdot h(\varepsilon) \to 0 \quad \mbox {as}\ \varepsilon \downarrow 0.
\]
The proof of Theorem \ref{thm:1} is now complete.
\end{proof}

\begin{remark} \rm
In Theorem \ref{thm:1}, for $t < 0$, the solution $U \in
\mathscr{G}(\mathbb{R}^2)$ admits $0$ as distributional shadow,
which is the unique distributional solution of problem
\eqref{eqn:hyperbolic1} for negative time with $0$ initial data,
see Hurd and Sattinger \cite{hurd}.
\end{remark}

\begin{remark} \rm
Theorem \ref{thm:1} means that, in the setting of Colombeau's
theory, all distributional solutions $u_c$ with initial data $0$
can be regarded as generalized solutions with different initial
data.
\end{remark}

\begin{remark} \rm
A similar result to Theorem \ref{thm:1} does not necessarily hold
for other differential equations having non-unique distributional
solutions. In fact, there exists an ordinary differential equation
having a classical solution with which none of its generalized
solutions is associated. For details, see \cite{deguchi1}.
\end{remark}


\section{Propagation of singularities}\label{sec:5}

In this section we study the propagation of singularities for
problem \eqref{eqn:generalized hyperbolic1}. The coefficient $B$
in problem \eqref{eqn:generalized hyperbolic1}  is
$\mathscr{G}^{\infty}$-regular, since $B$ is an element of
$\mathscr{G}_{\rm log}(\mathbb{R}^2) \subset
\mathscr{G}^{\infty}(\mathbb{R}^2)$. Hence, the subalgebra
$\mathscr{G}_{\rm log}^{\infty} \subset \mathscr{G}_{\rm log}$ is
suitable to study the propagation of singularities for problem
\eqref{eqn:generalized hyperbolic1}. However, we are also
interested in the propagation of singularities in
$\mathscr{G}^{\infty}$, since $U_0 \in
\mathscr{G}^{\infty}(\mathbb{R})$ does not necessarily imply that
$U \in \mathscr{G}^{\infty}(\mathbb{R}^2)$. Thus, we discuss the
propagation of singularities in both $\mathscr{G}_{\rm
log}^{\infty}$ and $\mathscr{G}^{\infty}$ for problem
\eqref{eqn:generalized hyperbolic1}.

Let $\chi \in \mathscr{D}(\mathbb{R})$ be as in Remark
\ref{rem:strength}. Assume that $U_0 \in \mathscr{G}(\mathbb{R})$
is given by the class of $(\chi_{h(\varepsilon)})_{\varepsilon \in
(0,1]}$. Then $U_0$ is a Dirac generalized function at $0$ and
belongs to $\mathscr{G}^{\infty}(\mathbb{R}) \setminus
\mathscr{G}_{\rm log}^{\infty}(\mathbb{R})$. As may be seen in the
following theorem, the singularity in
$\mathscr{G}_{\rm log}^{\infty}$ of the initial data $U_0$ splits in
two directions at the origin due to the discontinuity of the
coefficient.

\begin{figure}[ht] 
\begin{center}
\setlength{\unitlength}{1mm}
\begin{picture}(70,60)(0,0)
\put(0,30){\line(1,0){60}}
\put(59.8,29.2){$\rightarrow$}
\put(28.1,26){$0$}
\put(61,26){$x$}

\put(30,0){\line(0,1){60}}
\put(29.2,60){$\uparrow$}
\put(26,60){$t$}
\put(15,40){$\delta(x)/2$}
\put(24,45){$\nearrow$}

\put(10,50){$0$}
\put(42,50){$0$}
\put(10,17){$0$}
\put(42,17){$0$}

\put(0,0){\line(1,1){30}}
\put(13,5){$\delta(x-t)$}
\put(14,9){$\nwarrow$}

\dottedline{1}(30,30)(60,60)
\put(60,55){$t=x$}

\put(30,30){\line(2,1){30}}
\put(60,41){$t=x/2$}
\put(50,33){$\delta(x-2t)/2$}
\put(52,37){$\nwarrow$}
\end{picture}
\end{center}
\caption{Distributional shadow}\label{fig1}
\end{figure}

\begin{theorem}\label{thm:2}
Let $U_0 \in \mathscr{G}(\mathbb{R})$ be as above.
Then the solution $U \in \mathscr{G}(\mathbb{R}^2)$ of
problem \eqref{eqn:generalized hyperbolic1}
 admits a distributional shadow, which is given by
\[
    u(t,x) =
        \begin{cases}
            \frac{\delta(x) + \delta(x-2t)}{2}, &  \text{if }t \ge 0,\; x \in \mathbb{R},\\
            \delta(x-t), &  \text{if }t < 0,\; x \in \mathbb{R}.
        \end{cases}
\]
Furthermore,
\[
    \operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U
= \{(t,0) \mid t \ge 0\} \cup \{(t,2t) \mid t \ge 0\}
\cup \{(t,t) \mid t \le 0\} \ (= \operatorname{sing\,supp}\, u).
\]
\end{theorem}

\begin{proof}
Let $v_0^{\varepsilon} = H \ast \chi_{h(\varepsilon)}$ and
let $V_0 \in \mathscr{G}(\mathbb{R})$ be given by the class
of $(v_0^{\varepsilon})_{\varepsilon \in (0,1]}$.
In order to prove the first assertion, it suffices to show that
the solution $V \in \mathscr{G}(\mathbb{R}^2)$ of
problem \eqref{eqn:generalized hyperbolic2} admits a distributional
shadow, which is given by
\[
    v(t,x) =
        \begin{cases}
            \frac{H(x) + H(x-2t)}{2}, &  \text{if }t \ge 0,\;
 x \in \mathbb{R}, \\
            H(x-t), &  \text{if }t < 0,\; x \in \mathbb{R}.
        \end{cases}
\]

Let $(v^{\varepsilon})_{\varepsilon \in (0,1]}$ be a representative
of $V \in \mathscr{G}(\mathbb{R}^2)$ satisfying
\begin{equation}\label{eqn:equation for v^{varepsilon}}
        \begin{gathered}
            v^{\varepsilon}_t + b^{\varepsilon}v^{\varepsilon}_{x} = 0,
 \quad (t,x) \in \mathbb{R}^2, \\
            v^{\varepsilon}|_{t = 0} = v_0^{\varepsilon},
 \quad x \in \mathbb{R}.
        \end{gathered}
 \end{equation}
Consider the characteristic curve $\gamma^{\varepsilon}(0,x,t)$
passing through $(0,x)$ at time $t = 0$.
Along the characteristic curves, we have
$v^{\varepsilon}(t,\gamma^{\varepsilon}(0,x,t)) = v_0^{\varepsilon}(x)$.
We can easily check that $v^{\varepsilon}$ converges to $H(x-t)$
a.e. in $(-\infty,0) \times \mathbb{R}$ as $\varepsilon \downarrow 0$.

We now fix $0 < a \le 1$ arbitrarily, and put $t_1^{\varepsilon}:=
\gamma^{\varepsilon}(0,-ah(\varepsilon),t_1^{\varepsilon}) +
2h(\varepsilon)$. As in Step $1$ of the proof of Theorem
\ref{thm:1}, we have
\[
    t_1^{\varepsilon}
    = h(\varepsilon)\int_{a}^{2}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}
    = h(\varepsilon)\int_{a}^{2}
\frac{dz}{1 - 2\int\int_{s > y + z}\varphi(s,y)\,dy\,ds}
\to 0 \quad \text{as}\ \varepsilon \downarrow 0.
\]
Note that, for any $t \ge t_1^{\varepsilon}$,
$\gamma^{\varepsilon}(0,-ah(\varepsilon),t)
 = \gamma^{\varepsilon}(0,-ah(\varepsilon),t_1^{\varepsilon})$.
Hence, for any $t \ge 0$, we have
$\gamma^{\varepsilon}(0,-ah(\varepsilon),t) \to 0$ as
$\varepsilon \downarrow 0$.
Moreover,
\[
    v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ah(\varepsilon),t))
= v_0^{\varepsilon}(-ah(\varepsilon)) = \int_{-\infty}^{-a}\chi(y)\,dy,
\]
so that $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-h(\varepsilon),t))
= 0$ and $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ah(\varepsilon),t))
 \uparrow 1/2$ as $a \downarrow 0$.
Similarly, we take
$t_2^{\varepsilon}:= \gamma^{\varepsilon}(0,ah(\varepsilon),
t_2^{\varepsilon}) - 2h(\varepsilon)$ to get
\[
    t_2^{\varepsilon}
    = h(\varepsilon)\int_{a}^{2} \frac{dz}{\widetilde{b}^{\varepsilon}(h(\varepsilon)z) - 1}
    = h(\varepsilon)\int_{a}^{2} \frac{dz}{2\int\int_{s > y - z}\varphi(s,y)\,dy\,ds - 1} \to 0 \quad \text{as}\ \varepsilon \downarrow 0.
\]
Note that, for any $t \ge t_2^{\varepsilon}$,
$\gamma^{\varepsilon}(0,ah(\varepsilon),t)
= 2t - \gamma^{\varepsilon}(0,ah(\varepsilon),t_2^{\varepsilon})
+4h(\varepsilon)$.
Therefore, for any $t \ge 0$,
$\gamma^{\varepsilon}(0,ah(\varepsilon),t) \to 2t$ as
$\varepsilon \downarrow 0$.
Moreover,
\begin{align*}
    v^{\varepsilon}(t,\gamma^{\varepsilon}(0,ah(\varepsilon),t))
 = v_0^{\varepsilon}(ah(\varepsilon)) = \int_{-\infty}^{a}\chi(y)\,dy,
\end{align*}
so that $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,h(\varepsilon),t))
= 1$ and $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,ah(\varepsilon),t)) \downarrow 1/2$ as $a \downarrow 0$.
Hence, taking into account the fact that $v^{\varepsilon}(t,x)$
is non-decreasing in $x$, we obtain that $v^{\varepsilon}$
converges to $(H(x) + H(x-2t))/2$ a.e. in
$(0,\infty) \times \mathbb{R}$ as $\varepsilon \downarrow 0$.
Thus, the first assertion follows.

Next, we prove the second assertion.
The proof is divided into four steps.

{\bf Step 1}. First, we prove that $U \in \mathscr{G}(\mathbb{R}^2)$
is $\mathscr{G}_{\rm log}^{\infty}$-regular on
$\{(t,x) \in \mathbb{R}^2 \mid x < \min\{0,t\}\} \cup \{(t,x)
\in \mathbb{R}^2 \mid x > \max\{t,2t\}\}$.

It is easy to check that $V \in \mathscr{G}(\mathbb{R}^2)$ equals
$0$ on $\{(t,x) \in \mathbb{R}^2 \mid x < \min\{0,t\}\}$ and further
equals $1$ on $\{(t,x) \in \mathbb{R}^2 \mid x > \max\{t,2t\}\}$.
Hence, $U = V_x \in \mathscr{G}(\mathbb{R}^2)$ is
$\mathscr{G}_{\rm log}^{\infty}$-regular on the union of these
two sets.

{\bf Step 2}. Secondly, we prove that $\{(t,t) \mid t \le 0\}$
is contained in $\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

Fix $t < 0$ arbitrarily.
We see that $v^{\varepsilon}(t,x) = v_0^{\varepsilon}(\gamma^{\varepsilon}(t,x,0))$.
Since $v_0^{\varepsilon} = H \ast \chi_{h(\varepsilon)}$, we get
\begin{equation}\label{eqn:step 2-1}
    v_x^{\varepsilon}(t,x)
    = (v_0^{\varepsilon})'(\gamma^{\varepsilon}(t,x,0)) \gamma^{\varepsilon}_x(t,x,0)
    = \frac{1}{h(\varepsilon)} \chi\left(\frac{\gamma^{\varepsilon}(t,x,0)}{h(\varepsilon)}\right) \gamma^{\varepsilon}_x(t,x,0).
\end{equation}
From problem \eqref{eqn:characteristics} and the definition of
$\widetilde{b}^{\varepsilon}$, we find that
$\gamma^{\varepsilon}(t,x,\tau)$ satisfies the problem
\begin{equation}\label{eqn:characteristics'}
        \begin{gathered}
            \gamma_{\tau}^{\varepsilon}(t,x,\tau) = \widetilde{b}^{\varepsilon}(\gamma^{\varepsilon}(t,x,\tau)-\tau), \\
            \gamma^{\varepsilon}|_{\tau = t} = x.
        \end{gathered}
\end{equation}
We differentiate these equations in $x$ to get
 \begin{gather*}
            \gamma_{\tau x}^{\varepsilon}(t,x,\tau) = (\widetilde{b}^{\varepsilon})'(\gamma^{\varepsilon}(t,x,\tau)-\tau) \gamma_{x}^{\varepsilon}(t,x,\tau), \\
            \gamma_x^{\varepsilon}|_{\tau = t} = 1.
\end{gather*}
We divide the first equation by $\gamma_x^{\varepsilon}(t,x,\tau)$
and integrate it over $[t,0]$ to see that
\[
    \int_t^0 \frac{\gamma_{\tau x}^{\varepsilon}(t,x,\tau)}{\gamma_{x}^{\varepsilon}(t,x,\tau)}\,d\tau = \int_t^0 (\widetilde{b}^{\varepsilon})'(\gamma^{\varepsilon}(t,x,\tau)-\tau)\,d\tau.
\]
A simple calculation shows that
\[
    \gamma_x^{\varepsilon}(t,x,0)
= \exp\Big(\int_t^0 (\widetilde{b}^{\varepsilon})'
(\gamma^{\varepsilon}(t,x,\tau)-\tau)\,d\tau\Big).
\]
Since $\gamma^{\varepsilon}(t,t,\tau) = \tau$, we see that
\[
    \gamma_x^{\varepsilon}(t,t,0)
= \exp\Big(\int_t^0 (\widetilde{b}^{\varepsilon})'(0)\,d\tau\Big)
= \exp\Big(-(\widetilde{b}^{\varepsilon})'(0)t\Big).
\]
By the definition of $\widetilde{b}^{\varepsilon}$, we have
$(\widetilde{b}^{\varepsilon})'(0) = 2\int_{-1}^{1}
\varphi(s,s)\,ds/h(\varepsilon)$. Hence, noting that
$h(\varepsilon) = 1/\log (1/\varepsilon)$, we see that
\begin{equation}\label{eqn:step 2-2}
    \gamma_x^{\varepsilon}(t,t,0)
= \exp\Big(\frac{2}{h(\varepsilon)}\int_{-1}^{1} \varphi(s,s)\,ds
 \cdot (-t)\Big) = \big(\frac{1}{\varepsilon}\big)^{2\int_{-1}^{1}
\varphi(s,s)\,ds \cdot (-t)}.
\end{equation}
Combining equations \eqref{eqn:step 2-1} and \eqref{eqn:step 2-2},
we obtain that, for $\varepsilon > 0$ small enough,
\[
    v_x^{\varepsilon}(t,t)
    = \frac{1}{h(\varepsilon)} \chi(0) \big(\frac{1}{\varepsilon}\big)^{2\int_{-1}^{1} \varphi(s,s)\,ds \cdot (-t)}
    \ge \chi(0) \big(\frac{1}{\varepsilon}\big)^{2\int_{-1}^{1} \varphi(s,s)\,ds \cdot (-t)}.
\]
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, this shows that
$\{(t,t) \mid t \le 0\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

{\bf Step 3}. Thirdly, we prove that $\{(t,0) \mid t \ge 0\}$ and
$\{(t,2t) \mid t \ge 0\}$ are contained in
$\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

Put $t_1^{\varepsilon} = \gamma^{\varepsilon}
(0,-ah(\varepsilon),t_1^{\varepsilon}) + 2h(\varepsilon)$.
Then as shown above, $t_1^{\varepsilon} \downarrow 0$ as
$\varepsilon \downarrow 0$.
For $t \ge t_1^{\varepsilon}$, consider
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ah(\varepsilon),t)) - v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-2h(\varepsilon),t))}{\gamma^{\varepsilon}(0,-ah(\varepsilon),t) - \gamma^{\varepsilon}(0,-2h(\varepsilon),t)},
\]
where $0 < a < 1$ is a constant such that
$\int_{-\infty}^{-a} \chi(y)\,dy > 0$.
As shown above, we have
\[
    \gamma^{\varepsilon}(0,-ah(\varepsilon),t)
    = \gamma^{\varepsilon}(0,-ah(\varepsilon),t_1^{\varepsilon})
= h(\varepsilon)\int_{a}^{2} \frac{dz}{1 - \widetilde{b}^{\varepsilon}
(-h(\varepsilon)z)} - 2h(\varepsilon).
\]
Since $\gamma^{\varepsilon}(0,-2h(\varepsilon),t) = -2h(\varepsilon)$,
we get
\[
    0 < \gamma^{\varepsilon}(0,-ah(\varepsilon),t)
- \gamma^{\varepsilon}(0,-2h(\varepsilon),t)
    = h(\varepsilon)\int_{a}^{2}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}.
\]
Furthermore,
\begin{gather*}
     v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ah(\varepsilon),t)) = \int_{-\infty}^{-a} \chi(y)\,dy > 0,\\
     v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-2h(\varepsilon),t)) = 0.
\end{gather*}
Therefore,
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ah(\varepsilon),t)) - v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-2h(\varepsilon),t))}
{\gamma^{\varepsilon}(0,-ah(\varepsilon),t)
- \gamma^{\varepsilon}(0,-2h(\varepsilon),t)}
    = \frac{\int_{-\infty}^{-a} \chi(y)\,dy}{\int_{a}^{2}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}}
\cdot \frac{1}{h(\varepsilon)}.
\]
By the mean value theorem, there exists $x_1^{\varepsilon}
\in (\gamma^{\varepsilon}(0,-2h(\varepsilon),t), \gamma^{\varepsilon}(0,-ah(\varepsilon),t))$ such that
\[
    v^{\varepsilon}_x(t,x_1^{\varepsilon})
    = \frac{\int_{-\infty}^{-a} \chi(y)\,dy}{\int_{a}^{2}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}}
\cdot \frac{1}{h(\varepsilon)}.
\]
Note that $\partial_x^{\alpha} v^{\varepsilon}(t,\gamma^{\varepsilon}
(0,-2h(\varepsilon),t)) = 0$ for $\alpha \in \mathbb{N}$.
Then, repeating the above process gives us
$(x^{\varepsilon}_{\alpha})_{\alpha \ge 2}$ such that
$x^{\varepsilon}_{\alpha} \in (\gamma^{\varepsilon}(0,-2h
(\varepsilon),t), x^{\varepsilon}_{\alpha-1})$ and
\begin{align*}
    \partial^{\alpha}_x v^{\varepsilon}(t,x_{\alpha}^{\varepsilon})
    & = \frac{\partial^{\alpha-1}_x v^{\varepsilon}
(t,x^{\varepsilon}_{\alpha-1}) - \partial^{\alpha-1}_x
 v^{\varepsilon}(t,\gamma^{\varepsilon}(0,
-2h(\varepsilon),t))}{x_{\alpha - 1}^{\varepsilon}
- \gamma^{\varepsilon}(0,-2h(\varepsilon),t)} \\
    & \ge \frac{\int_{-\infty}^{-a} \chi(y)\,dy}
{\big(\int_{a}^{2} \frac{dz}{1 - \widetilde{b}^{\varepsilon}
(-h(\varepsilon)z)}\big)^{\alpha}}
\cdot \frac{1}{h(\varepsilon)^{\alpha}}.
\end{align*}
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, this shows that
$\{(t,0) \mid t \ge 0\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
In a similar way, we can show that
$\{(t,2t) \mid t \ge 0\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

{\bf Step 4}. Fourthly, we prove that
$U \in \mathscr{G}(\mathbb{R}^2)$ is
$\mathscr{G}_{\rm log}^{\infty}$-regular on $\{(t,x)
\in \mathbb{R}^2 \mid 0 < x < 2t\}$.

{\bf Step 4-1}. To do so, we first estimate
$\gamma^{\varepsilon}(t,x,0)$ for all $(t,x)$ such
that $0 \le x \le 2t$.

When $0 \le x \le t-2h(\varepsilon)$, as seen in Step 1 of the
proof of Theorem \ref{thm:1}, $\gamma^{\varepsilon}(t,x,0) =
(G^{\varepsilon})^{-1}(x/h(\varepsilon) + 2)$. By inequality
\eqref{eqn:inequality for (G^{varepsilon})^{-1}}, there exists a
constant $C_2 > 0$ such that
\begin{equation}\label{eqn:step 4-1}
    0 < -\gamma^{\varepsilon}(t,x,0)
\le 2\exp\big(-\frac{2}{C_2}\big)h(\varepsilon) \varepsilon^{x/C_2}.
\end{equation}

When $t + 2h(\varepsilon) \le x \le 2t$, we have
$\gamma^{\varepsilon}(t,x,0) = -(G^{\varepsilon})^{-1}((2t-x)/h(\varepsilon) + 2)$.
Hence, by \eqref{eqn:inequality for (G^{varepsilon})^{-1}}, we have
\[
    0 < \gamma^{\varepsilon}(t,x,0) \le 2\exp\big(-\frac{2}{C_2}\big)h(\varepsilon) \varepsilon^{(2t-x)/C_2}.
\]

When $t-2h(\varepsilon) \le x < t$, we get
\[
    \int_{-\gamma^{\varepsilon}(t,x,0)/h(\varepsilon)}^{(t-x)
/h(\varepsilon)} \frac{dz}{1-\widetilde{b}^{\varepsilon}
(-h(\varepsilon)z)} = \frac{t}{h(\varepsilon)}.
\]
As seen from the proof of Lemma \ref{lemma:1}, we have
$1/(1-\widetilde{b}^{\varepsilon}(-h(\varepsilon)z)) \le C_2/z$
for $0 \le z \le 2$ and so
\[
    \frac{t}{h(\varepsilon)}
    \le \int_{-\gamma^{\varepsilon}(t,x,0)/h(\varepsilon)}^{(t-x)
/h(\varepsilon)} \frac{C_2}{z}\,dz
= C_2 \log \frac{t-x}{-\gamma^{\varepsilon}(t,x,0)}.
\]
Since $h(\varepsilon)=1/\log(1/\varepsilon)$, it follows that
\begin{equation}\label{eqn:step 4-3}
    0 < -\gamma^{\varepsilon}(t,x,0) \le (t-x)\varepsilon^{t/C_2}.
\end{equation}

When $t < x \le t + 2h(\varepsilon)$, we get
\[
    \int_{\gamma^{\varepsilon}(t,x,0)/h(\varepsilon)}^{(x-t)/h(\varepsilon)} \frac{dz}{1-\widetilde{b}^{\varepsilon}(-h(\varepsilon)z)} = \frac{t}{h(\varepsilon)},
\]
and so
\[
    0 < \gamma^{\varepsilon}(t,x,0) \le (x-t)\varepsilon^{t/C_2}.
\]

When $t = x$, we have
$\gamma^{\varepsilon}(t,t,0) = 0$.

{\bf Step 4-2}. We next estimate $\gamma^{\varepsilon}_x(t,x,0)$.
When $0 \le x \le t-2h(\varepsilon)$, we have
$\gamma^{\varepsilon}(t,x,0) = (G^{\varepsilon})^{-1}(x/h(\varepsilon) + 2)$.
Hence, as in the proof of Lemma \ref{lemma:1}, we get, for
some constant $c_2 > 0$,
\[
    \gamma^{\varepsilon}_x(t,x,0)
    = 1 - \widetilde{b}^{\varepsilon}(\gamma^{\varepsilon}(t,x,0))
    \le c_2 \frac{|\gamma^{\varepsilon}(t,x,0)|}{h(\varepsilon)} \le 2c_2\exp\big(-\frac{2}{C_2}\big) \varepsilon^{x/C_2},
\]
where we used formula
\eqref{eqn:differentiation of (G^{varepsilon})^{-1}}
 in the first step and inequality \eqref{eqn:step 4-1}
 in the last step.

When $t + 2h(\varepsilon) \le x \le 2t$, $\gamma^{\varepsilon}(t,x,0)
= -(G^{\varepsilon})^{-1}((2t-x)/h(\varepsilon) + 2)$.
Similarly, we get
\[
    \gamma^{\varepsilon}_x(t,x,0)
    = 1 - \widetilde{b}^{\varepsilon}(-\gamma^{\varepsilon}(t,x,0))
\le 2c_2\exp\big(-\frac{2}{C_2}\big) \varepsilon^{(2t-x)/C_2}.
\]

When $t-2h(\varepsilon) \le x < t$, we have
$\gamma^{\varepsilon}(t,x,0) = (G^{\varepsilon})^{-1}(t/h(\varepsilon)
+ G^{\varepsilon}(x-t))$.
Differentiating this in $x$ gives
\begin{equation}\label{eqn:differentiation}
    \gamma_x^{\varepsilon}(t,x,0)
    = \frac{1 - \widetilde{b}^{\varepsilon}
(\gamma^{\varepsilon}(t,x,0))}{1 - \widetilde{b}^{\varepsilon}(x-t)}.
\end{equation}
The numerator of \eqref{eqn:differentiation}
 can be estimated as follows:
\[
    1 - \widetilde{b}^{\varepsilon}(\gamma^{\varepsilon}(t,x,0))
    \le c_2 \frac{|\gamma^{\varepsilon}(t,x,0)|}{h(\varepsilon)}
    \le c_2 \frac{(t-x) \varepsilon^{t/C_2}}{h(\varepsilon)},
\]
where we used inequality \eqref{eqn:step 4-3} in the last step.
Similarly, the denominator of \eqref{eqn:differentiation} is estimated as follows: for some constant $c_1 > 0$, we have $1 - \widetilde{b}^{\varepsilon}(x-t) \ge c_1(t-x)/h(\varepsilon)$.
Hence,
\[
    0 < \gamma_x^{\varepsilon}(t,x,0) \le \frac{c_2}{c_1}\varepsilon^{t/C_2}.
\]

When $t < x \le t + 2h(\varepsilon)$, we have $\gamma^{\varepsilon}(t,x,0) = -(G^{\varepsilon})^{-1}(t/h(\varepsilon) + G^{\varepsilon}(t-x))$ and so get
\[
    0 < \gamma_x^{\varepsilon}(t,x,0) \le \frac{c_2}{c_1}\varepsilon^{t/C_2}.
\]

To estimate $\gamma_x^{\varepsilon}(t,t,0)$, we consider
problem \eqref{eqn:characteristics'}.
As in Step 2, we can derive that
\begin{equation}\label{eqn:x-derivative of gamma}
    \gamma_x^{\varepsilon}(t,x,s)
= \exp\Big(-\int_s^t (\widetilde{b}^{\varepsilon})'
(\gamma^{\varepsilon}(t,x,\tau)-\tau)\,d\tau\Big).
\end{equation}
Note that $\gamma^{\varepsilon}(t,t,\tau) = \tau$ and
$(\widetilde{b}^{\varepsilon})'(0) = 2\int_{-1}^{1}
\varphi(s,s)\,ds/h(\varepsilon)$. Hence,
\[
    \gamma_x^{\varepsilon}(t,t,0)
= \varepsilon^{(2\int_{-1}^{1} \varphi(s,s)\,ds)t}.
\]

{\bf Step 4-3}. Finally, we prove that, for all
$K \Subset \{(t,x) \in \mathbb{R}^2 \mid 0 < x < 2t\}$
and $\alpha \in \mathbb{N}_0^2$,
\begin{equation}\label{eqn:estimate for v}
    \|\partial^{\alpha}v_x^{\varepsilon}(t,x)\|_{L^{\infty}(K)}
\to 0 \quad \text{as }\varepsilon \downarrow 0.
\end{equation}
This implies that $U = V_x \in \mathscr{G}(\mathbb{R}^2)$ is
$\mathscr{G}_{\rm log}^{\infty}$-regular on $\{(t,x)
\in \mathbb{R}^2 \mid 0 < x < 2t\}$.

Note that
\[
    v_x^{\varepsilon}(t,x)
    = \chi\Big(\frac{\gamma^{\varepsilon}(t,x,0)}{h(\varepsilon)}\Big) \frac{\gamma_x^{\varepsilon}(t,x,0)}{h(\varepsilon)}.
\]
Hence, to prove \eqref{eqn:estimate for v}, it suffices to show that,
 for all $K \Subset \{(t,x) \in \mathbb{R}^2 \mid 0 < x < 2t\}$
and $\alpha \in \mathbb{N}^2$,
\begin{equation}\label{eqn:estimate for gamma}
    \frac{\|\partial^{\alpha}\gamma^{\varepsilon}(t,x,0)
\|_{L^{\infty}(K)}}{h(\varepsilon)} \to 0 \quad \text{as }
\varepsilon \downarrow 0.
\end{equation}
Since $(\widetilde{b}^{\varepsilon})'(z) \ge 0$ for $z \in
\mathbb{R}$, we see from \eqref{eqn:x-derivative of gamma} that,
for $0 \le s \le t$,
\begin{equation}\label{eqn:x-derivative of gamma1}
    0 < \gamma_x^{\varepsilon}(t,x,s) \le 1.
\end{equation}
From \eqref{eqn:x-derivative of gamma} again, we find that
\begin{align*}
    \gamma_{xx}^{\varepsilon}(t,x,s)
    & = -\gamma_x^{\varepsilon}(t,x,s) \int_s^t (\widetilde{b}^{\varepsilon})''(\gamma^{\varepsilon}(t,x,\tau)-\tau) \gamma_x^{\varepsilon}(t,x,\tau)\,d\tau.
\end{align*}
We see that $\|(\widetilde{b}^{\varepsilon})''\|_{L^{\infty}(\mathbb{R})}
 \le C''/h(\varepsilon)^2$ for some constant $C'' > 0$.
In view of this inequality and \eqref{eqn:x-derivative of gamma1},
we get $|\gamma_{xx}^{\varepsilon}(t,x,s)| \le C''(t-s)/h(\varepsilon)^2$ for $0 \le s \le t$.
Furthermore, if $s = 0$, then from Step 4-2, we see that
\[
    \frac{|\gamma_{xx}^{\varepsilon}(t,x,0)|}{h(\varepsilon)}
\le C''\frac{|\gamma_x^{\varepsilon}(t,x,0)|t}{h(\varepsilon)^{3}}
\to 0 \quad {\rm on}\ K \quad \text{as }\varepsilon \downarrow 0.
\]
By repeating this process, we obtain that a similar estimate holds
for any derivative of $\gamma^{\varepsilon}(t,x,0)$ in $x$.
We also find from problem \eqref{eqn:characteristics'} that
\begin{equation}\label{eqn:t-derivative of gamma}
    \gamma_t^{\varepsilon}(t,x,s) = -\widetilde{b}^{\varepsilon}(x-t)
\exp\Big(-\int_s^t (\widetilde{b}^{\varepsilon})'
(\gamma^{\varepsilon}(t,x,\tau)-\tau)\,d\tau\Big).
\end{equation}
In view of \eqref{eqn:t-derivative of gamma}, we can similarly show
inequality \eqref{eqn:estimate for gamma}.
The proof of Theorem \ref{thm:2} is now complete.
\end{proof}

\begin{remark} \rm
We assumed that $\chi$ is symmetric.
Hence, $\int_{-\infty}^{0} \chi(y)\,dy = 1/2$.
If, instead of the symmetry of $\chi$, we assume that
$\int_{-\infty}^{0} \chi(y)\,dy = a$ for $0 \le a \le 1$,
then the solution $U \in \mathscr{G}(\mathbb{R}^2)$ of
 problem \eqref{eqn:generalized hyperbolic1} with the initial
data $U_0$ given by the class of
$(\chi_{h(\varepsilon)})_{\varepsilon \in (0,1]}$ possesses
the distributional shadow
\[
    u(t,x) =
        \begin{cases}
            a\delta(x) + (1-a)\delta(x-2t), &  \text{if }t \ge 0,\; x \in \mathbb{R}, \\
            \delta(x-t), &  \text{if }t < 0,\; x \in \mathbb{R}.
        \end{cases}
\]
\end{remark}

Next, we calculate the $\mathscr{G}^{\infty}$-singular support of
the solution $U \in \mathscr{G}(\mathbb{R}^2)$ with the same
initial data $U_0$ as in Theorem \ref{thm:2}. The following
theorem shows that the splitting of the singularity at the origin
does not occur in the sense of $\mathscr{G}^{\infty}$.

\begin{theorem}\label{thm:2-1}
Under the same assumption as in Theorem \ref{thm:2}, it holds
that
\[
    \operatorname{sing\,supp}_{\mathscr{G}^{\infty}} U
= \{(t,t) \mid t \le 0\}.
\]
\end{theorem}

\begin{proof}
The proof is divided into two steps.

{\bf Step 1}. First, we prove that $U \in \mathscr{G}(\mathbb{R}^2)$
is $\mathscr{G}^{\infty}$-regular on $\mathbb{R}^2 \setminus \{(t,t)
\mid t \le 0\}$.

As can be seen in Step 1 of the proof of Theorem \ref{thm:2}, the
solution $U \in \mathscr{G}(\mathbb{R}^2)$ is
$\mathscr{G}^{\infty}$-regular on $\{(t,x) \in \mathbb{R}^2 \mid x
< \min\{0,t\}\} \cup \{(t,x) \in \mathbb{R}^2 \mid x >
\max\{t,2t\}\}$. Hence, it suffices to prove that $U \in
\mathscr{G}(\mathbb{R}^2)$ is $\mathscr{G}^{\infty}$-regular on
$(0,\infty) \times \mathbb{R}$.

Let $(t,x) \in (0,\infty) \times \mathbb{R}$ and $0 \le s \le t$.
As in Step 4-3 of the proof of Theorem \ref{thm:2}, we get $0 <
\gamma_x^{\varepsilon}(t,x,s) \le 1$ and
$|\gamma_{xx}^{\varepsilon}(t,x,s)| \le
C''(t-s)/h(\varepsilon)^2$. Similarly, we can prove
that all derivatives of $\gamma^{\varepsilon}(t,x,s)$ in $x$ are
dominated by a finite sum of terms in the form of
$\kappa_i(t-s)^j/h(\varepsilon)^k$ with a constant $\kappa_i > 0$.
Note by \eqref{eqn:t-derivative of gamma} that
$\gamma_t^{\varepsilon}(t,x,s) = -\widetilde{b}^{\varepsilon}(x-t)
\gamma_x^{\varepsilon}(t,x,s)$. Hence, we see that, all
derivatives of $\gamma^{\varepsilon}(t,x,s)$ in $t$ and $x$ are
also dominated by a finite sum of terms in the form of
$\kappa_i(t-s)^j/h(\varepsilon)^k$. Let us recall that the
solution $v^{\varepsilon}$ of problem
\eqref{eqn:equation for v^{varepsilon}} satisfies
\eqref{eqn:step 2-1}. Then, we see
that, for all $K \Subset (0,\infty) \times \mathbb{R}$ and $\alpha
\in \mathbb{N}_0^2$,
\[
    \|\partial^{\alpha} v_x^{\varepsilon}(t,x)\|_{L^{\infty}(K)}
 = O(\varepsilon^{-1}) \quad \text{as }\varepsilon \downarrow 0.
\]
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, this shows
that $U$ is $\mathscr{G}^{\infty}$-regular on
$(0,\infty) \times \mathbb{R}$.

{\bf Step 2}. Secondly, we prove that $\{(t,t) \mid t \le 0\}$
is contained in $\operatorname{sing\,supp}_{\mathscr{G}^{\infty}}U$.

For $t < 0$, consider
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,0,t))
 - v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-2h(\varepsilon),t))}{\gamma^{\varepsilon}(0,0,t) - \gamma^{\varepsilon}(0,-2h(\varepsilon),t)}.
\]
Clearly, $\gamma^{\varepsilon}(0,0,t) = t$. As in Step 1 of the
proof of Theorem \ref{thm:1}, we see that
$t - \gamma^{\varepsilon}(0,-2h(\varepsilon),t) =
-(G^{\varepsilon})^{-1}(-t/h(\varepsilon))$. Hence, by inequality
\eqref{eqn:inequality for (G^{varepsilon})^{-1}}, we get, for
some constant $C_2 > 0$,
\[
    0 < t - \gamma^{\varepsilon}(0,-2h(\varepsilon),t)
\le 2h(\varepsilon)\varepsilon^{-t/C_2}.
\]
Furthermore,
\begin{gather*}
     v^{\varepsilon}(t,\gamma^{\varepsilon}(0,0,t)) = \frac{1}{2}, \\
     v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-2h(\varepsilon),t)) = 0.
\end{gather*}
Therefore,
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,0,t))
- v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-2h
(\varepsilon),t))}{\gamma^{\varepsilon}(0,0,t)
- \gamma^{\varepsilon}(0,-2h(\varepsilon),t)}
 \ge \frac{1}{4h(\varepsilon)} \cdot
\frac{1}{\varepsilon^{-t/C_2}}.
\]
By the mean value theorem, there exists
$x_1^{\varepsilon} \in (\gamma^{\varepsilon}(0,-2h(\varepsilon),t),t)$
such that
\[
    v_x^{\varepsilon}(t,x_1^{\varepsilon})
\ge \frac{1}{4h(\varepsilon)} \cdot \frac{1}{\varepsilon^{-t/C_2}}.
\]
Note that $\partial_x^{\alpha} v^{\varepsilon}
(t,\gamma^{\varepsilon}(0,-2h(\varepsilon),t)) = 0$
for $\alpha \in \mathbb{N}$.
Then we repeat this process to find
$(x_{\alpha}^{\varepsilon})_{\alpha \ge 2}$ such that
$x_{\alpha}^{\varepsilon} \in (\gamma^{\varepsilon}
(0,-2h(\varepsilon),t),x_{\alpha-1}^{\varepsilon})$ and
\[
    \partial_x^{\alpha} v^{\varepsilon}(t,x_\alpha^{\varepsilon})
    = \frac{\partial_x^{\alpha-1}v^{\varepsilon}(t,x_{\alpha-1}^{\varepsilon}) - \partial_x^{\alpha-1} v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-2h(\varepsilon),t))}{x_{\alpha-1}^{\varepsilon} - \gamma^{\varepsilon}(0,-2h(\varepsilon),t)}
    \ge \frac{1}{2^{\alpha+1}h(\varepsilon)^{\alpha}} \cdot \frac{1}{\varepsilon^{-\alpha t/C_2}}.
\]
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, this shows
that $\{(t,t) \mid t \le 0\} \subset
\operatorname{sing\,supp}_{\mathscr{G}^{\infty}} U$.
The proof of Theorem \ref{thm:2-1} is now complete.
\end{proof}

Next, we discuss the case of initial data $U_0$ given by other
Dirac generalized functions at $0$. As stated in Remark
\ref{rem:strength}, there exist infinitely many Dirac generalized
functions with different strengths of singularity at $0$. For any
constant $t_0 > 0$, we define $c(\varepsilon):=
-(G^{\varepsilon})^{-1}(t_0/h(\varepsilon))/2h(\varepsilon)$. We
find from inequality \eqref{eqn:inequality for (G^{varepsilon})^{-1}}
 that there exist two constants $C_1$,
$C_2 > 0$ independent of $t_0$ such that $\varepsilon^{t_0/C_1}
\le c(\varepsilon) \le \varepsilon^{t_0/C_2}$ for $\varepsilon \in
(0,1]$. Hence, we have $(\chi_{c(\varepsilon)})_{\varepsilon \in
(0,1]} \in {\mathscr{E}}_{M}(\mathbb{R})$, which allows us to
define $U_0 \in \mathscr{G}(\mathbb{R})$ as the class of
$(\chi_{c(\varepsilon)})_{\varepsilon \in (0,1]}$. Then $U_0$ is a
Dirac generalized function at $0$ and does not belong to
$\mathscr{G}^{\infty}(\mathbb{R})$. Furthermore, the singularity
of $U_0$ at $0$ can be interpreted to become stronger as $t_0$
becomes large. As may be seen in the following theorem, the
stronger the singularity of the initial data $U_0$ at $0$ becomes,
the longer the singularity in $\mathscr{G}_{\rm log}^{\infty}$
propagates along the line $\{t = x\}$, and it splits at time
$t_0$.

\begin{figure}[ht] 
\begin{center}
\setlength{\unitlength}{1mm}
\begin{picture}(70,60)(0,0)
\put(0,30){\line(1,0){60}}
\put(59.8,29.2){$\rightarrow$}
\put(28.1,26){$0$}
\put(61,26){$x$}

\put(30,0){\line(0,1){60}}
\put(29.2,60){$\uparrow$}
\put(26,60){$t$}
%\put(15,40){$\delta(x)/2$}
%\put(24,45){$\nearrow$}

\put(10,50){$0$}
\put(44,54){$0$}
\put(10,17){$0$}
\put(44,17){$0$}

\put(0,0){\line(1,1){40}}
\put(13,5){$\delta(x-t)$}
\put(14,9){$\nwarrow$}

\dottedline{1}(40,40)(63,63)
\put(60,56){$t=x$}

\put(40,40){\line(5,2){23}}
\put(60,45){$t=x/2+t_0/2$}
\put(51,37){$\delta(x-2t+t_0)/2$}
\put(51,41){$\nwarrow$}

\put(40,40){\line(0,1){20}}
\put(18,47){$\delta(x-t_0)/2$}
\put(35,51){$\nearrow$}

\dottedline{1}(30,40)(40,40)
\put(25,39){$t_0$}
\end{picture}
\end{center}
\caption{Distributional shadow}\label{fig2}
\end{figure}

\begin{theorem}\label{thm:3}
Let $t_0$ and $U_0 \in \mathscr{G}(\mathbb{R})$ be as above.
Then the solution $U \in \mathscr{G}(\mathbb{R}^2)$ of
problem \eqref{eqn:generalized hyperbolic1}
 admits a distributional shadow, which is given by
\[
    u(t,x) =
        \begin{cases}
            \frac{\delta(x-t_0) + \delta(x-2t+t_0)}{2},
&  \text{if }t \ge t_0,\; x \in \mathbb{R}, \\
            \delta(x-t),
&  \text{if }t < t_0,\; x \in \mathbb{R}.
        \end{cases}
\]
Furthermore,
\begin{align*}
    & \{(t,t_0) \mid t \ge t_0\} \cup \{(t,2t-t_0) \mid t \ge t_0\} \cup \{(t,t) \mid t \le t_0\} \\
    & \subset \operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U
     \subset \{(t,t_0) \mid t \ge t_0\} \cup \{(t,2t-t_0) \mid t \ge t_0\} \cup \{(t,t) \mid t \in \mathbb{R}\}.
\end{align*}
\end{theorem}

\begin{proof}
Let $v_0^{\varepsilon} = H \ast \chi_{c(\varepsilon)}$ and
let $V_0 \in \mathscr{G}(\mathbb{R})$ be given by the class
of $(v_0^{\varepsilon})_{\varepsilon \in (0,1]}$.
In order to prove the first assertion, it suffices to show
that the solution $V \in \mathscr{G}(\mathbb{R}^2)$ of
problem \eqref{eqn:generalized hyperbolic2} admits a
distributional shadow, which is given by
\[
    v(t,x) =
        \begin{cases}
            \frac{H(x-t_0) + H(x-2t+t_0)}{2},
&  \text{if }t \ge t_0,\; x \in \mathbb{R}, \\
            H(x-t),
& \text{if }t < t_0, \; x \in \mathbb{R}.
        \end{cases}
\]

Let $(v^{\varepsilon})_{\varepsilon \in (0,1]}$ be a representative
of $V \in \mathscr{G}(\mathbb{R}^2)$ satisfying problem
\eqref{eqn:equation for v^{varepsilon}}.
Then we have $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,x,t))
 = v_0^{\varepsilon}(x)$ and see that $v^{\varepsilon}$
converges to $H(x-t)$ a.e. in $(-\infty,0) \times \mathbb{R}$
 as $\varepsilon \downarrow 0$.

We now fix $0 < a \le 1$ arbitrarily, and put $t_1^{\varepsilon}:=
\gamma^{\varepsilon}(0,-ac(\varepsilon),t_1^{\varepsilon}) +
2h(\varepsilon)$. As in Step 1 of the proof of Theorem
\ref{thm:1}, we have
\[
    t_1^{\varepsilon}
    = h(\varepsilon)\int_{ac(\varepsilon)/h(\varepsilon)}^{2} \frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}.
\]
By the definition of $c(\varepsilon)$, we get
\[
    t_1^{\varepsilon} = t_0 - h(\varepsilon)
\int^{ac(\varepsilon)/h(\varepsilon)}_{2c(\varepsilon)}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}.
\]
As in the proof of Lemma \ref{lemma:1}, we get, for some
constant $C_2 > 0$ and $\varepsilon > 0$ small enough,
\[
    h(\varepsilon)\int^{ac(\varepsilon)
/h(\varepsilon)}_{2c(\varepsilon)} \frac{dz}
{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}
\le h(\varepsilon)\int^{ac(\varepsilon)
/h(\varepsilon)}_{2c(\varepsilon)} \frac{C_2}{z}\,dz.
\]
We have
\[
    h(\varepsilon)\int^{ac(\varepsilon)/h(\varepsilon)}_{2c(\varepsilon)} \frac{C_2}{z}\,dz
    = -C_2h(\varepsilon) \log \frac{2h(\varepsilon)}{a} \to 0
\quad \text{as } \varepsilon \downarrow 0,
\]
so that $t_1^{\varepsilon} \to t_0$ as $\varepsilon \downarrow 0$.
Note that for any $t \ge t_1^{\varepsilon}$,
$\gamma^{\varepsilon}(0,-ac(\varepsilon),t)
= \gamma^{\varepsilon}(0,-ac(\varepsilon),t_1^{\varepsilon})$.
Therefore, for any $0 \le t \le t_0$,
$\gamma^{\varepsilon}(0,-ac(\varepsilon),t) \to t$ as
$\varepsilon \downarrow 0$, and for any $t \ge t_0$,
$\gamma^{\varepsilon}(0,-ac(\varepsilon),t) \to t_0$ as
 $\varepsilon \downarrow 0$.
Furthermore,
\[
    v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ac(\varepsilon),t))
 = v_0^{\varepsilon}(-ac(\varepsilon))
 = \int_{-\infty}^{-a}\chi(y)\,dy,
\]
and so $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-c(\varepsilon),t))
= 0$ and $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ac(\varepsilon),t))
 \uparrow 1/2$ as $a \downarrow 0$.
Similarly, we take $t_2^{\varepsilon}:= \gamma^{\varepsilon}
(0,ac(\varepsilon),t_2^{\varepsilon}) - 2h(\varepsilon)$ to get
\[
    t_2^{\varepsilon}
    = h(\varepsilon)\int_{ac(\varepsilon)/h(\varepsilon)}^{2}
\frac{dz}{\widetilde{b}^{\varepsilon}(h(\varepsilon)z) - 1}
    \to t_0 \quad \text{as}\ \varepsilon \downarrow 0.
\]
Note that, for any $t \ge t_2^{\varepsilon}$,
$\gamma^{\varepsilon}(0,ac(\varepsilon),t)
= 2t - \gamma^{\varepsilon}(0,ac(\varepsilon),t_2^{\varepsilon})
 + 4h(\varepsilon)$.
Hence, for any $0 \le t \le t_0$,
$\gamma^{\varepsilon}(0,ac(\varepsilon),t) \to t$ as
$\varepsilon \downarrow 0$, and for any $t \ge t_0$,
$\gamma^{\varepsilon}(0,ac(\varepsilon),t) \to 2t - t_0$ as
$\varepsilon \downarrow 0$.
Furthermore,
\[
    v^{\varepsilon}(t,\gamma^{\varepsilon}(0,ac(\varepsilon),t))
= v_0^{\varepsilon}(ac(\varepsilon)) = \int_{-\infty}^{a}\chi(y)\,dy,
\]
and so $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,c(\varepsilon),t)) = 1$
and $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,ac(\varepsilon),t))
\downarrow 1/2$ as $a \downarrow 0$.
Therefore, in view of the fact that $v^{\varepsilon}(t,x)$
is non-decreasing in $x$, we obtain that $v^{\varepsilon}$
converges to $H(x-t)$ a.e. in $(0,t_0) \times \mathbb{R}$ and
to $(H(x-t_0) + H(x-2t+t_0))/2$ a.e.
in $(t_0,\infty) \times \mathbb{R}$  as $\varepsilon \downarrow 0$.
Thus, the first assertion follows.

Next, we prove the second assertion.
We will do so in four steps.

{\bf Step 1}. First, we prove that $U \in \mathscr{G}(\mathbb{R}^2)$
 is $\mathscr{G}_{\rm log}^{\infty}$-regular on
$\{(t,x) \in \mathbb{R}^2 \mid x < \min\{t_0,t\}\} \cup \{(t,x)
\in \mathbb{R}^2 \mid x > \max\{t,2t-t_0\}\}$.

It is easy to check that $V \in \mathscr{G}(\mathbb{R}^2)$ equals $0$
on $\{(t,x) \in \mathbb{R}^2 \mid x < \min\{t_0,t\}\}$ and further
equals $1$ on $\{(t,x) \in \mathbb{R}^2 \mid x > \max\{t,2t-t_0\}\}$.
Hence, $U = V_x \in \mathscr{G}(\mathbb{R}^2)$ is
$\mathscr{G}_{\rm log}^{\infty}$-regular on the union of these
two sets.

{\bf Step 2}. Secondly, we prove that $\{(t,t) \mid t \le t_0\}$
is contained in $\operatorname{sing\,supp}_{\mathscr{G}
_{\rm log}^{\infty}} U$.

Put $t_1^{\varepsilon} = \gamma^{\varepsilon}(0,-c(\varepsilon),
t_1^{\varepsilon}) + 2h(\varepsilon)$.
Then, as shown above, $t_1^{\varepsilon} \uparrow t_0$ as
$\varepsilon \downarrow 0$.
For  $t < t_1^{\varepsilon}$, consider
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,0,t))
- v^{\varepsilon}(t,\gamma^{\varepsilon}(0,
-c(\varepsilon),t))}{\gamma^{\varepsilon}(0,0,t)
-\gamma^{\varepsilon}(0,-c(\varepsilon),t)}.
\]
As in Step 1 of the proof of Theorem \ref{thm:1}, we get
\[
    G^{\varepsilon}(\gamma^{\varepsilon}(0,-c(\varepsilon),t)-t)
= \frac{t_1^{\varepsilon}-t}{h(\varepsilon)}.
\]
Using inequality \eqref{eqn:inequality for (G^{varepsilon})^{-1}},
we have, for some constant $C_2 > 0$,
\[
    0 < t - \gamma^{\varepsilon}(0,-c(\varepsilon),t)
\le 2h(\varepsilon)\varepsilon^{(t_1^{\varepsilon}-t)/C_2}.
\]
Since $\gamma^{\varepsilon}(0,0,t) = t$, it follows that
\begin{equation}\label{eqn:difference}
    0 < \gamma^{\varepsilon}(0,0,t)
- \gamma^{\varepsilon}(0,-c(\varepsilon),t)
 \le 2h(\varepsilon)\varepsilon^{(t_1^{\varepsilon}-t)/C_2}.
\end{equation}
We use $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,0,t)) = 1/2$,
$v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-c(\varepsilon),t)) = 0$
and \eqref{eqn:difference} to see that
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,0,t))
- v^{\varepsilon}(t,\gamma^{\varepsilon}
(0,-c(\varepsilon),t))}{\gamma^{\varepsilon}(0,0,t)
-\gamma^{\varepsilon}(0,-c(\varepsilon),t)}
    \ge \frac{1}{4h(\varepsilon)} \cdot
\frac{1}{\varepsilon^{(t_1^{\varepsilon}-t)/C_2}}.
\]
By the mean value theorem, there exists
$x^{\varepsilon} \in (\gamma^{\varepsilon}(0,-c(\varepsilon),t),
\gamma^{\varepsilon}(0,0,t))$ such that
\[
    v_x^{\varepsilon}(t,x^{\varepsilon})
\ge \frac{1}{4h(\varepsilon)} \cdot
\frac{1}{\varepsilon^{(t_1^{\varepsilon}-t)/C_2}}.
\]
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, this means that
$\{(t,t) \mid t \le t_0\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

{\bf Step 3}. Thirdly, we prove that $\{(t,t_0) \mid t \ge t_0\}$
and $\{(t,2t-t_0) \mid t \ge t_0\}$ are contained in
$\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

For $t > t_0$, consider
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}
(0,-ac(\varepsilon),t)) - v^{\varepsilon}
(t,\gamma^{\varepsilon}(0,-c(\varepsilon),t))}{\gamma^{\varepsilon}
(0,-ac(\varepsilon),t) - \gamma^{\varepsilon}(0,-c(\varepsilon),t)}
\]
for $0 < a \le 1$.
As shown above, if we put $t_1^{\varepsilon}
= \gamma^{\varepsilon}(0,-ac(\varepsilon),t_1^{\varepsilon})
 + 2h(\varepsilon)$, then we have $t_1^{\varepsilon} \uparrow t_0$
as $\varepsilon \downarrow 0$ and
\[
    \gamma^{\varepsilon}(0,-ac(\varepsilon),t)
    = \gamma^{\varepsilon}(0,-ac(\varepsilon),t_1^{\varepsilon})
    = h(\varepsilon)\int_{ac(\varepsilon)/h(\varepsilon)}^{2}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}
- 2h(\varepsilon).
\]
Hence,
\[
    \gamma^{\varepsilon}(0,-ac(\varepsilon),t)
- \gamma^{\varepsilon}(0,-c(\varepsilon),t)
    = h(\varepsilon)\int^{c(\varepsilon)/h(\varepsilon)}_{ac
(\varepsilon)/h(\varepsilon)} \frac{dz}{1 - \widetilde{b}^{\varepsilon}
(-h(\varepsilon)z)}.
 \]
 We now take $0 < a < 1$ so that $\int_{-\infty}^{-a} \chi(y)\,dy > 0$.
Then, as in the proof of Lemma \ref{lemma:1}, we get, for some
constant $C_2 > 0$,
\begin{align*}
    0 < \gamma^{\varepsilon}(0,-ac(\varepsilon),t)
- \gamma^{\varepsilon}(0,-c(\varepsilon),t)
    \le h(\varepsilon)\int^{c(\varepsilon)/h(\varepsilon)}_{ac
(\varepsilon)/h(\varepsilon)} \frac{C_2}{z}\,dz
= C_2 h(\varepsilon) \log \frac{1}{a}.
 \end{align*}
Furthermore,
\begin{gather*}
     v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ac(\varepsilon),t)) = \int_{-\infty}^{-a}\chi(y)\,dy > 0, \\
     v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-c(\varepsilon),t)) = 0.
\end{gather*}
Hence,
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-ac(\varepsilon),t))
- v^{\varepsilon}(t,\gamma^{\varepsilon}
(0,-c(\varepsilon),t))}{\gamma^{\varepsilon}(0,-ac(\varepsilon),t)
- \gamma^{\varepsilon}(0,-c(\varepsilon),t)}
    \ge \frac{\int_{-\infty}^{-a} \chi(y)\,dy}{C_2 \log 1/a}
\cdot \frac{1}{h(\varepsilon)}.
\]
By the mean value theorem, there exists
$x_1^{\varepsilon} \in (\gamma^{\varepsilon}(0,-c(\varepsilon),t),
\gamma^{\varepsilon}(0,-ac(\varepsilon),t))$ such that
\[
    v^{\varepsilon}_x(t,x_1^{\varepsilon})
    \ge \frac{\int_{-\infty}^{-a} \chi(y)\,dy}{C_2 \log 1/a}
\cdot \frac{1}{h(\varepsilon)}.
\]
Note that $\partial_x^{\alpha} v^{\varepsilon}(t,
\gamma^{\varepsilon}(0,-c(\varepsilon),t)) = 0$ for
$\alpha \in \mathbb{N}$.
Hence, we repeat this process to get
$(x^{\varepsilon}_{\alpha})_{\alpha \ge 2}$ such that
$x^{\varepsilon}_{\alpha} \in
(\gamma^{\varepsilon}(0,-c(\varepsilon),t),
 x^{\varepsilon}_{\alpha-1})$ and
\[
    \partial^{\alpha}_x v^{\varepsilon}(t,x_{\alpha}^{\varepsilon})
    = \frac{\partial_x^{\alpha-1} v^{\varepsilon}(t,x_{\alpha-1}^{\varepsilon}) - \partial_x^{\alpha-1} v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-c(\varepsilon),t))}{x_{\alpha-1}^{\varepsilon} - \gamma^{\varepsilon}(0,-c(\varepsilon),t)}
    \ge \frac{\int_{-\infty}^{-a} \chi(y)\,dy}{\left(C_2 \log 1/a\right)^{\alpha}} \cdot \frac{1}{h(\varepsilon)^{\alpha}}.
\]
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, this shows that
$\{(t,t_0) \mid t \ge t_0\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
In a similar way, we can show that $\{(t,2t-t_0) \mid t \ge t_0\}
\subset \operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

{\bf Step 4}. Fourthly, we prove that $U \in \mathscr{G}(\mathbb{R}^2)$
is $\mathscr{G}_{\rm log}^{\infty}$-regular on
$\{(t,x) \in \mathbb{R}^2 \mid t_0 < x < t\}$ and
$\{(t,x) \in \mathbb{R}^2 \mid t < x < 2t-t_0\}$.

{\bf Step 4-1}. To do so, we first estimate
$\gamma^{\varepsilon}(t,x,0)$ for all $(t,x)$ such that
$t_0 \le x \le t-2h(\varepsilon)$ or $t+2h(\varepsilon)
 \le x \le 2t-t_0$.

When $t_0 \le x \le t-2h(\varepsilon)$, as seen in Step 1 of the
proof of Theorem \ref{thm:1}, $\gamma^{\varepsilon}(t,x,0) =
(G^{\varepsilon})^{-1}(x/h(\varepsilon) + 2)$. Hence,
$G^{\varepsilon}(\gamma^{\varepsilon}(t,x,0)) = x/h(\varepsilon) +
2$. By the definition of $c(\varepsilon)$, we have
$G^{\varepsilon}(-2h(\varepsilon)c(\varepsilon)) =
t_0/h(\varepsilon)$. Taking the difference gives
\[
    G^{\varepsilon}(\gamma^{\varepsilon}(t,x,0))
- G^{\varepsilon}(-2h(\varepsilon)c(\varepsilon))
= \frac{x - t_0}{h(\varepsilon)} + 2.
\]
By the definition of $G^{\varepsilon}$, we have
\[
    \int_{-\gamma^{\varepsilon}(t,x,0)
/h(\varepsilon)}^{2c(\varepsilon)}
\frac{dz}{1-\widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}
= \frac{x - t_0}{h(\varepsilon)} + 2.
\]
As seen from the proof of Lemma \ref{lemma:1}, we have
 $1/(1-\widetilde{b}^{\varepsilon}(-h(\varepsilon)z)) \le C_2/z$
for $0 \le z \le 2$ and so
\[
    \frac{x-t_0}{h(\varepsilon)} + 2
    \le \int_{-\gamma^{\varepsilon}(t,x,0)
/h(\varepsilon)}^{2c(\varepsilon)} \frac{C_2}{z}\,dz
= C_2 \log \frac{2h(\varepsilon)c(\varepsilon)}{-\gamma^{\varepsilon}
(t,x,0)}.
\]
Since $h(\varepsilon)=1/\log(1/\varepsilon)$, we have
\[
    \big(\frac{1}{\varepsilon}\big)^{x-t_0}e^2
    \le \Big( \frac{2h(\varepsilon)c(\varepsilon)}
{-\gamma^{\varepsilon}(t,x,0)}\Big)^{C_2}.
\]
Hence,
\begin{equation}\label{eqn:step 4-1'}
    0 < -\gamma^{\varepsilon}(t,x,0)
\le 2\exp\big(-\frac{2}{C_2}\big)h(\varepsilon)c(\varepsilon)
\varepsilon^{(x-t_0)/C_2}.
\end{equation}

When $t + 2h(\varepsilon) \le x \le 2t-t_0$, we have
$\gamma^{\varepsilon}(t,x,0)
= -(G^{\varepsilon})^{-1}((2t-x)/h(\varepsilon) + 2)$.
A similar argument to the one above gives
\[
    0 < \gamma^{\varepsilon}(t,x,0)
\le 2\exp\big(-\frac{2}{C_2}\big)h(\varepsilon)c(\varepsilon)
 \varepsilon^{(2t-x-t_0)/C_2}.
\]

{\bf Step 4-2}. We next estimate $\gamma^{\varepsilon}_x(t,x,0)$.
When $t_0 \le x \le t-2h(\varepsilon)$, we have
$\gamma^{\varepsilon}(t,x,0) = (G^{\varepsilon})^{-1}(x/h(\varepsilon)
 + 2)$.
Hence, as in the proof of Lemma \ref{lemma:1}, we get,
for some constant $c_2 > 0$,
\[
    \gamma^{\varepsilon}_x(t,x,0)
    = 1 - \widetilde{b}^{\varepsilon}(\gamma^{\varepsilon}(t,x,0))
    \le c_2 \frac{|\gamma^{\varepsilon}(t,x,0)|}{h(\varepsilon)}
    \le 2c_2\exp\big(-\frac{2}{C_2}\big)c(\varepsilon)
\varepsilon^{(x-t_0)/C_2},
\]
where we used formula
\eqref{eqn:differentiation of (G^{varepsilon})^{-1}}
 in the first step and inequality \eqref{eqn:step 4-1'}
 in the last step.

When $t + 2h(\varepsilon) \le x \le 2t-t_0$,
$\gamma^{\varepsilon}(t,x,0)
= -(G^{\varepsilon})^{-1}((2t-x)/h(\varepsilon) + 2)$.
Similarly, we get
\[
    \gamma^{\varepsilon}_x(t,x,0)
    = 1 - \widetilde{b}^{\varepsilon}(-\gamma^{\varepsilon}(t,x,0))
 \le 2c_2\exp\big(-\frac{2}{C_2}\big)c(\varepsilon)
 \varepsilon^{(2t-x-t_0)/C_2}.
\]

{\bf Step 4-3}. Finally, we prove that, for all
$K \Subset \{(t,x) \in \mathbb{R}^2 \mid t_0 < x < t\} \cup \{(t,x)
\in \mathbb{R}^2 \mid t < x < 2t-t_0\}$ and
$\alpha \in \mathbb{N}_0^2$,
\begin{equation}\label{eqn:estimate for v'}
    \|\partial^{\alpha}v_x^{\varepsilon}(t,x)\|_{L^{\infty}(K)}
\to 0 \quad \text{as }\varepsilon \downarrow 0.
\end{equation}
This implies that $U = V_x \in \mathscr{G}(\mathbb{R}^2)$ is
$\mathscr{G}_{\rm log}^{\infty}$-regular on
$\{(t,x) \in \mathbb{R}^2 \mid t_0 < x < t\} \cup
\{(t,x) \in \mathbb{R}^2 \mid t < x < 2t-t_0\}$.

Note that
\[
    v_x^{\varepsilon}(t,x)
    = \chi\Big(\frac{\gamma^{\varepsilon}(t,x,0)}{c(\varepsilon)}\Big)
\frac{\gamma_x^{\varepsilon}(t,x,0)}{c(\varepsilon)}.
\]
Hence, to prove \eqref{eqn:estimate for v'},
it suffices to show that, for all $K \Subset \{(t,x)
\in \mathbb{R}^2 \mid t_0 < x < t\} \cup \{(t,x)
\in \mathbb{R}^2 \mid t < x < 2t-t_0\}$ and
$\alpha \in \mathbb{N}^2$,
\[
    \frac{\|\partial^{\alpha}\gamma^{\varepsilon}
(t,x,0)\|_{L^{\infty}(K)}}{c(\varepsilon)} \to 0 \quad
\text{as }\varepsilon \downarrow 0.
\]
This can be done similarly to Step 4-3 of the proof of Theorem
\ref{thm:2}.
The proof of Theorem \ref{thm:3} is now complete.
\end{proof}

\begin{remark} \rm
It can be conjectured in Theorem \ref{thm:3} that
\begin{align*}
 &\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U \\
  &= \{(t,t_0) \mid t \ge t_0\} \cup \{(t,2t-t_0) \mid t \ge t_0\}
\cup \{(t,t) \mid t \le t_0\} \quad
 (= \operatorname{sing\,supp}\, u),
\end{align*}
but this is open.
\end{remark}

As in Step 2 of the proof of Theorem \ref{thm:2-1}, we can apply
the mean value theorem repeatedly in Step 2 of the proof of
Theorem \ref{thm:3} to show the following inclusion relation on
the $\mathscr{G}^{\infty}$-singular support of the solution $U$.
However, it is open whether equality holds.

\begin{theorem}\label{thm:3-1}
Under the same assumption as in Theorem \ref{thm:3}, it holds
that
\[
    \operatorname{sing\,supp}_{\mathscr{G}^{\infty}} U
\supset \{(t,t) \mid t \le t_0\}.
\]
\end{theorem}

Finally, we discuss the case that $U_0 \in \mathscr{G}(\mathbb{R})$
is defined as the class of $(\kappa_1\chi_{a_1(\varepsilon)}
(\cdot+s_1) + \kappa_2\chi_{a_2(\varepsilon)}(\cdot-s_2))_{\varepsilon
\in (0,1]}$, where $\kappa_1$, $\kappa_2 \in \mathbb{R}$, $s_1$,
 $s_2 > 0$, $a_1(\varepsilon)$, $a_2(\varepsilon) \le h(\varepsilon)$.
Then $U_0 \approx \kappa_1\delta_{-s_1} + \kappa_2\delta_{s_2}$,
where $\delta_{-s_1}$ and $\delta_{s_2}$ are the delta functions
at $-s_1$ and $s_2$, respectively.
As may be seen in the following theorem, the
$\mathscr{G}_{\rm log}^{\infty}$-singular support of the corresponding
solution $U \in \mathscr{G}(\mathbb{R}^2)$ and the singular support
of its distributional shadow do not necessarily coincide.

\begin{figure}[ht] 
\begin{center}
\setlength{\unitlength}{1mm}
\begin{picture}(70,60)(0,0)
\put(0,30){\line(1,0){60}}
\put(59.8,29.2){$\rightarrow$}
\put(28.1,26){$0$}
\put(61,31.5){$x$}

\put(30,0){\line(0,1){60}}
\put(29.2,60){$\uparrow$}
\put(26,60){$t$}
\put(8,27){$-s_1$}
\put(39,27){$s_2$}

\put(8,50){$0$}
\put(40,50){$0$}
\put(8,18){$0$}
\put(40,18){$0$}

\put(0,0){\line(1,1){20}}
\put(14,11){$\kappa_2\delta(x-t)$}
\put(18.5,15){$\nwarrow$}

\put(1,-2){$(\kappa_1+\kappa_2)\delta(x-t)$}
\put(5.5,1.5){$\nwarrow$}

\put(15,15){\line(0,1){45}}
\put(-4,40){$\kappa_1\delta(x+s_1)$}
\put(10.5,44){$\nearrow$}

\dottedline{1}(20,20)(63,63)
\put(60,55){$t=x$}

\put(20,20){\line(2,1){43}}
\put(60,37){$t=x/2-s_2/2$}
\put(47,25){$\kappa_2\delta(x-2t-s_2)$}
\put(49,29.2){$\nwarrow$}
\end{picture}
\end{center}
\caption{Distributional shadow for the case $s_1 > s_2$}
\label{fig3}
\end{figure}

\begin{theorem}\label{thm:4}
Let $U_0 \in \mathscr{G}(\mathbb{R})$ be as above.
Then the solution $U \in \mathscr{G}(\mathbb{R}^2)$
of problem \eqref{eqn:generalized hyperbolic1}
 admits a distributional shadow, which is given by
\[
    u(t,x) =
        \begin{cases}
            \kappa_1\delta(x+s_1) + \kappa_2\delta(x-2t-s_2), \\
            \quad \text{if }t \ge \max\{-s_1,-s_2\},\\[3pt]
            \kappa_1\delta(x+s_1) + \kappa_2\delta(x-t), \\
            \quad \text{if }\min\{-s_1,-s_2\} < t < \max\{-s_1,-s_2\}
            \text{ and }s_1 > s_2,\\[3pt]
            \kappa_1\delta(x-t) + \kappa_2\delta(x-2t-s_2), \\
            \quad \text{if }\min\{-s_1,-s_2\} < t < \max\{-s_1,-s_2\}
            \text{ and }s_1 < s_2,\\[3pt]
            (\kappa_1+\kappa_2)\delta(x-t), \\
            \quad \text{if }t \le \min\{-s_1,-s_2\}.
        \end{cases}
\]
Furthermore,
\begin{equation}
\begin{aligned}
&\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U\\
 & = \{(t,-s_1) \mid t \ge -s_1\} \cup \{(t,2t+s_2) \mid t \ge -s_2\}
  \cup \{(t,t) \mid t \le \max\{-s_1,-s_2\}\}.
\end{aligned}\label{eqn:singsupp3}
\end{equation}
Thus, if $\kappa_1 = -\kappa_2 \ne 0$, then
\[
    \operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}}U
\ne \operatorname{sing\,supp} u.
\]
\end{theorem}

\begin{proof}
The first assertion can be proved similarly to the proof of
Theorem \ref{thm:2}. We will only prove the second assertion for
the case $\kappa_1\kappa_2 \ne 0$. The case $\kappa_1\kappa_2 = 0$
can be argued similarly.

Let $v_0^{\varepsilon} = H \ast (\kappa_1\chi_{a_1(\varepsilon)}
(\cdot+s_1) + \kappa_2\chi_{a_2(\varepsilon)}(\cdot-s_2))$
and let $V_0 \in \mathscr{G}(\mathbb{R})$ be given by the class
of $(v_0^{\varepsilon})_{\varepsilon \in (0,1]}$.
In order to prove the second assertion, it suffices to investigate
the behavior of the solution $V \in \mathscr{G}(\mathbb{R}^2)$
of problem \eqref{eqn:generalized hyperbolic2}.

Let $(v^{\varepsilon})_{\varepsilon \in (0,1]}$ be a
representative of $V \in \mathscr{G}(\mathbb{R}^2)$ satisfying
problem \eqref{eqn:equation for v^{varepsilon}}. As in the proof
of Theorems \ref{thm:2} and \ref{thm:3}, we can apply the method
of characteristic curves to see that $V_x$ identically equals $0$
on the complement of the set given by \eqref{eqn:singsupp3}.
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, it follows that $U$
is $\mathscr{G}_{\rm log}^{\infty}$-regular on the complement of the
set given by \eqref{eqn:singsupp3}.

Now, we show that $\{(t,-s_1) \mid t \ge -s_1\}
\cup \{(t,2t+s_2) \mid t \ge -s_2\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
We have $v^{\varepsilon}(t,x)
= v_0^{\varepsilon}(\gamma^{\varepsilon}(t,x,0))$.
If $-s_1-a_1(\varepsilon) \le x \le -s_1 + a_1(\varepsilon)$ and
$t \ge -s_1 + a_1(\varepsilon) + 2h(\varepsilon)$, then
$\gamma^{\varepsilon}(t,x,0) = x$, so that
$v^{\varepsilon}(t,x) = v_0^{\varepsilon}(x)$.
We see that $\partial_x^{\alpha}v^{\varepsilon}(t,-s_1)
= \kappa_1 \chi^{(\alpha-1)}(0)/a_1(\varepsilon)^{\alpha}$ for
$\alpha \in \mathbb{N}$.
Hence, $\{(t,-s_1) \mid t \ge -s_1\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
Similarly, if $2t+s_2-a_2(\varepsilon) \le x \le 2t+s_2
+ a_2(\varepsilon)$ and $t \ge -s_2 + a_2(\varepsilon)
+ 2h(\varepsilon)$, then $\gamma^{\varepsilon}(t,x,0) = x - 2t$,
so that $v^{\varepsilon}(t,x) = v_0^{\varepsilon}(x-2t)$.
We see that $\partial_x^{\alpha}v^{\varepsilon}(t,2t+s_2)
 = \kappa_2 \chi^{(\alpha-1)}(0)/a_2(\varepsilon)^{\alpha}$
for $\alpha \in \mathbb{N}$.
Hence, $\{(t,2t+s_2) \mid t \ge -s_2\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.

Finally, we prove that $\{(t,t) \mid t \le \max\{-s_1,-s_2\}\}
\subset \operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
For $t < -s_1$, consider
\[
    \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-s_1,t))
- v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-s_1
-a_1(\varepsilon),t))}{\gamma^{\varepsilon}(0,-s_1,t)
-\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)}.
\]
As in Step 1 of the proof of Theorem \ref{thm:1}, we get, for
$\varepsilon > 0$ small enough,
\begin{gather}
    G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1,t)-t)
= -\frac{t+s_1}{h(\varepsilon)} + 2, \label{eqn:G^{varepsilon}1} \\
    G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)-t)
 = -\frac{t+s_1+a_1(\varepsilon)}{h(\varepsilon)} + 2. \label{eqn:G^{varepsilon}2}
\end{gather}
By \eqref{eqn:inequality for (G^{varepsilon})^{-1}}
 and \eqref{eqn:G^{varepsilon}2}, we have, for some constant $C_2 > 0$,
\begin{equation}\label{eqn:inequality for gamma2}
    t - \gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)
\le 2 \exp\big(-\frac{2}{C_2}\big)h(\varepsilon)\varepsilon^{-(t
+s_1+a_1(\varepsilon))/C_2}.
\end{equation}
Subtracting \eqref{eqn:G^{varepsilon}2} from
\eqref{eqn:G^{varepsilon}1} gives
\begin{equation}\label{eqn:difference of G^{varepsilon}}
    G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1,t)-t)
- G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)-t)
= \frac{a_1(\varepsilon)}{h(\varepsilon)}.
\end{equation}
On the other hand, by the definition of $G^{\varepsilon}$, we get
\begin{align*}
  0 &< G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1,t)-t)
 - G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)-t) \\
    & \hspace{0.5cm} = \int_{(t - \gamma^{\varepsilon}(0,-s_1,t))
 /h(\varepsilon)}^{(t - \gamma^{\varepsilon}
 (0,-s_1-a_1(\varepsilon),t))/h(\varepsilon)}
\frac{dz}{1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)}.
\end{align*}
As in the proof of Lemma \ref{lemma:1}, we have
$1/(1 - \widetilde{b}^{\varepsilon}(-h(\varepsilon)z)) \ge C_1/z$
for some constant $C_1 > 0$ and so
\begin{align*}
    & G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1,t)-t) - G^{\varepsilon}(\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)-t)\\
    &  \ge \int_{(t - \gamma^{\varepsilon}(0,-s_1,t))/h(\varepsilon)}^{(t - \gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t))/h(\varepsilon)} \frac{C_1}{z}\,dz\\
    &  \ge C_1\big[\frac{t - \gamma^{\varepsilon}(0,-s_1-a_1
(\varepsilon),t)}{h(\varepsilon)}
- \frac{t - \gamma^{\varepsilon}(0,-s_1,t)}{h(\varepsilon)}\big]
\big[\frac{h(\varepsilon)}{t - \gamma^{\varepsilon}
(0,-s_1-a_1(\varepsilon),t)}\big]\\
    &  = \frac{C_1[\gamma^{\varepsilon}(0,-s_1,t)
 - \gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)]}{t
- \gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)}.
\end{align*}
Hence, by \eqref{eqn:difference of G^{varepsilon}}, we have
\begin{equation}\label{eqn:difference of G^{varepsilon}2}
    \gamma^{\varepsilon}(0,-s_1,t)-\gamma^{\varepsilon}
(0,-s_1-a_1(\varepsilon),t)
    \le \frac{t - \gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)}{C_1}
 \cdot \frac{a_1(\varepsilon)}{h(\varepsilon)}.
\end{equation}
We combine \eqref{eqn:inequality for gamma2}
 and \eqref{eqn:difference of G^{varepsilon}2} to see that
\begin{equation}\label{eqn:difference of G^{varepsilon}3}
    \gamma^{\varepsilon}(0,-s_1,t)-\gamma^{\varepsilon}
(0,-s_1-a_1(\varepsilon),t)
    \le \frac{2}{C_1} \exp\big(-\frac{2}{C_2}\big)
a_1(\varepsilon) \varepsilon^{-(t+s_1+a_1(\varepsilon))/C_2}.
\end{equation}
We use $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-s_1,t)) = \kappa_1/2$,
 $v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t))
= 0$ and \eqref{eqn:difference of G^{varepsilon}3} to get
\begin{align*}
    & \frac{v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-s_1,t)) - v^{\varepsilon}(t,\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t))}{\gamma^{\varepsilon}(0,-s_1,t)-\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t)} \\
    &  \ge \frac{\kappa_1C_1}{4 \exp(-2/C_2) a_1(\varepsilon)} \cdot \frac{1}{\varepsilon^{-(t+s_1+a_1(\varepsilon))/C_2}}.
\end{align*}
Then by the mean value theorem, we find
$x^{\varepsilon} \in (\gamma^{\varepsilon}(0,-s_1-a_1(\varepsilon),t),
\gamma^{\varepsilon}(0,-s_1,t))$ such that
\[
    v_x^{\varepsilon}(t,x^{\varepsilon})
\ge \frac{\kappa_1C_1}{4 \exp(-2/C_2) a_1(\varepsilon)}
\cdot \frac{1}{\varepsilon^{-(t+s_1+a_1(\varepsilon))/C_2}}.
\]
Since $U = V_x \in \mathscr{G}(\mathbb{R}^2)$, this means that
$\{(t,t) \mid t \le -s_1\} \subset
\operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
Similarly, we can see that $\{(t,t) \mid t \le -s_2\} \subset
 \operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
Therefore, $\{(t,t) \mid t \le \max\{-s_1,-s_2\}\} \subset
 \operatorname{sing\,supp}_{\mathscr{G}_{\rm log}^{\infty}} U$.
Thus, \eqref{eqn:singsupp3} follows.
\end{proof}

\begin{remark} \rm
In Theorem \ref{thm:4}, when $s_1 = s_2$ and $\kappa_1 = -\kappa_2
\ne 0$, the solution $U \in \mathscr{G}(\mathbb{R}^2)$ of problem
\eqref{eqn:generalized hyperbolic1} admits a distributional
shadow on $(-s_1,\infty) \times \mathbb{R}$, which is given by
$u(t,x) = \kappa_1\delta(x+s_1) + \kappa_2\delta(x-2t-s_1)$.
Since, for any $\kappa_1$, $\kappa_2$ such that $\kappa_1 =
-\kappa_2 \ne 0$, this distribution $u$ satisfies problem
\eqref{eqn:hyperbolic1} with initial data $0$ at $t = -s_1$, it
follows that there exist infinitely many different distributional
solutions with initial data $0$ at $t = -s_1$. Thus, Theorem
\ref{thm:4} means that, in the setting of Colombeau's theory,
these distributional solutions with initial data $0$ at $t=-s_1$
can be regarded as generalized solutions with different initial
data, as in Theorem \ref{thm:1}.
\end{remark}

As in Step 2 of the proof of Theorem \ref{thm:2-1}, we can use the
mean value theorem repeatedly in the last part of the proof of
Theorem \ref{thm:4} to get the following equality on the
$\mathscr{G}^{\infty}$-singular support of the solution $U$.
Hence, we see that, even if $U_0 \in
\mathscr{G}^{\infty}(\mathbb{R})$, the singularity in
$\mathscr{G}^{\infty}$ occurs suddenly when the propagation of
singularities is observed backward in time.

\begin{theorem}\label{thm:4-1}
Under the same assumption as in Theorem \ref{thm:4},  if $U_0 \in
\mathscr{G}^{\infty}(\mathbb{R})$, then
\[
    \operatorname{sing\,supp}_{\mathscr{G}^{\infty}}
 U = \{(t,t) \mid t \le \max\{-s_1,-s_2\}\}.
\]
\end{theorem}

\subsection*{Acknowledgments}
The author expresses his most heartfelt thanks
to G\"{u}nther H\"{o}rmann for the warm hospitality and
valuable discussions during his visit to the
 Fakult\"{a}t f\"{u}r Mathematik, Universit\"{a}t Wien from
October 2, 2009 to March 31, 2011.

\begin{thebibliography}{00}

\bibitem{biagioni} H. A. Biagioni.
\emph{A nonlinear theory of generalized functions}.
Lect. Notes Math. 1421. Springer-Verlag, Berlin, 1990.

\bibitem{colombeau1} J. F. Colombeau.
\emph{New generalized functions and multiplication of distributions}.
 North-Holland Math. Stud. 84. North-Holland, Amsterdam, 1984.

\bibitem{colombeau2} J. F. Colombeau.
\emph{Elementary introduction to new generalized functions}.
North-Holland Math. Stud. 113. North-Holland, Amsterdam, 1985.

\bibitem{colombeau and heibig} J. F. Colombeau and A. Heibig.
Generalized solutions to Cauchy problems.
\emph{Monatsh. Math.}, 117:33--49, 1994.

\bibitem{deguchi1} H. Deguchi.
Generalized solutions of semilinear parabolic equations.
 \emph{Monatsh. Math.}, 146:279--294, 2005.

\bibitem{deguchi2} H. Deguchi.
Generalized solutions of parabolic initial value problems with
discontinuous nonlinearities.
\emph{J. Math. Anal. Appl.}, 323:583--596, 2006.

\bibitem{garetto} C. Garetto and G. H\"{o}rmann.
On duality theory and pseudodifferential techniques for
Colombeau algebras: generalized delta functionals,
kernels and wave front sets.
\emph{Bull. Cl. Sci. Math. Nat. Sci. Math.} 31:115--136, 2006.

\bibitem{grosser} M. Grosser, M. Kunzinger, M. Oberguggenberger,
and R. Steinbauer.
\emph{Geometric theory of generalized functions with applications
to general relativity}. Mathematics and its Applications 537.
Kluwer Acad. Publ., Dordrecht, 2001.

\bibitem{hoermann1} G. H\"{o}rmann and M. V. de Hoop.
Microlocal analysis and global solutions of some hyperbolic
equations with discontinuous coefficients.
\emph{Acta Appl. Math.}, 67:173--224, 2001.

\bibitem{hoermann2} G. H\"{o}rmann.
First-order hyperbolic pseudodifferential equations with
generalized symbols. \emph{J. Math. Anal. Appl.}, 293:40--56, 2004.

\bibitem{hurd} A. E. Hurd and D. H. Sattinger.
Questions of existence and uniqueness for hyperbolic equations
with discontinuous coefficients.
\emph{Trans. Amer. Math. Soc.}, 132:159--174, 1968.

\bibitem{lafon} F. Lafon and M. Oberguggenberger.
Generalized solutions to symmetric hyperbolic systems
with discontinuous coefficients: the multidimensional case.
\emph{J. Math. Anal. Appl.}, 160:93--106, 1991.

\bibitem{oberguggenberger1988} M. Oberguggenberger.
Hyperbolic systems with discontinuous coefficients:
examples. In: B. Stankovi\'{c}, E. Pap, S. Pilipovi\'{c},
and V. S. Vladimirov (Eds.), Generalized functions,
convergence structures, and their applications, pp. 257--266,
Plenum, New York, 1988.

\bibitem{oberguggenberger1989} M. Oberguggenberger.
Hyperbolic systems with discontinuous coefficients:
generalized solutions and a transmission problem in acoustics.
\emph{J. Math. Anal. Appl.}, 142:452--467, 1989.

\bibitem{oberguggenberger1992} M. Oberguggenberger.
\emph{Multiplication of distributions and applications to partial
differential equations}. Pitman Research Notes Math. 259.
Longman Scientific \& Technical, Harlow 1992.

\bibitem{oberguggenberger2008} M. Oberguggenberger.
Hyperbolic systems with discontinuous coefficients:
 generalized wavefront set. In: L. Rodino and M. W. Wong (Eds.),
 New developments in pseudo--differential operators, pp. 117--136,
Birkh\"{a}user, Basel, 2009.

\end{thebibliography}

\end{document}
