\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2017 (2017), No. 248, pp. 1--9.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2017 Texas State University.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2017/248\hfil Quadratic systems in algebras]
{Remarks on second-order quadratic systems in algebras}

\author[A. Sagle, K. Schmitt \hfil EJDE-2017/248\hfilneg]
{Art Sagle, Klaus Schmitt}

\address{Art Sagle \newline
Department of Mathematics,
University of Hawaii-Hilo, Hilo, HI 96720, USA}
\email{asagle@msn.com}

\address{Klaus Schmitt \newline
Department of Mathematics,
University of Utah,
155 South 1400 East,
Salt Lake City, UT 84112, USA}
\email{schmitt@math.utah.edu}

\thanks{Submitted  April 14, 2017. Published October 6, 2017.}
\subjclass[2010]{34B45, 34J60, 34J65}
\keywords{Quadratic systems; ordinary differential equations;  algebras; 
\hfill\break\indent derivations; periodic motions}

\begin{abstract}
 This paper is an addendum to our earlier paper \cite{sagle2},
 where a systematic study of quadratic systems of second order ordinary
 differential equations defined in commutative algebras was presented.
 Here we concentrate on special solutions and energy considerations of
 some quadratic systems defined in algebras which need not be commutative,
 however, we shall throughout assume the algebra to be associative.
 We here also give a positive answer to an open question, concerning periodic
 motions of such systems, posed in our earlier paper.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{corollary}[theorem]{Corollary}
\allowdisplaybreaks


\section{Introduction}

Let $\mathbb A$ be a  finite dimensional normed vector space over the field 
of real or complex numbers. For
\[
X: (a,b) \subseteq (-\infty, \infty )\to \mathbb A,
\]
we write, as usual,
\[
\dot X:=\frac{dX}{dt},~\ddot X:=\frac{d\dot X}{dt}.
\]
Let us assume that $\mathbb A$ is an algebra, i.e. there is a multiplication 
defined in $\mathbb A$, denoted by  juxtaposition
\begin{gather*}
\mathbb A\times \mathbb A\mapsto \mathbb A, \\
(X,Y)\mapsto XY, \quad \forall X,Y\in \mathbb A,
\end{gather*}
which is bilinear and continuous, making it right and left distributive 
with respect to addition, i.e.
\[
(X+Y)Z=XZ+YZ,~X(Y+Z)=XY+XZ, \quad \forall X,Y,Z\in \mathbb A,
\]
and homogeneus of degree 1 in each variable, i.e., for all scalars
 $\lambda$,
\[
(\lambda X)(Y)=\lambda (XY),~ X(\lambda Y)=\lambda (XY), \quad
\forall X,Y\in \mathbb A.
\]
We shall also assume that $\mathbb A$ is an associative algebra, i.e.
\[
(XY)Z=X(YZ), \quad \forall X,Y,Z\in \mathbb A.
\]
A second-order quadratic differential equation on $\mathbb A$ is of the form
\[
\ddot X(t)\pm XX=:\ddot X(t)\pm X^2=0.
\]
It follows from standard existence proofs in the theory of ordinary 
differential equations, that initial value problems of the form
\[
\ddot X(t)\pm X^2=0, \quad X(0)=A,\quad \dot X(0)=B,
\]
are uniquely solvable for all $A,B\in \mathbb A$, and solutions are extendable
 to maximal intervals of existence (cf. \cite{hartman,hirsch}).

As pointed out in \cite{sagle2},
 differential equations in algebras have been studied extensively 
in recent years (see e.g. \cite{kinyon,markus,sagle1,walcher}).

Motivating examples are given by the Henon-Heiles system 
\cite{hale,tabor} and by elementary problems such as  given by the equation
\begin{equation} \label{a}
\ddot X+aX^2=0
\end{equation}
in the algebra $\mathbb R$ of real numbers.  By a change in time scale,
 this equation is no more general than
\begin{equation} \label{1}
\ddot X\pm X^2=0,
\end{equation}
where $+$ is chosen in case $a$ is a positive constant and $-$ in case $a$ 
is negative.
This equation has first integrals given by
\[
3\dot X^2\pm 2X^3=k,
\]
where $k$ a constant, the solution of which may be analyzed using phase 
plane methods (or direct integration)
\cite{hartman,hirsch,tabor}.  For a commutative algebra (i.e. multiplication 
is commutative) $\mathbb A$, a similar calculation leads to first order
nonlinear equations (see \cite{sagle2}).
It is the purpose of this paper to supplement the results of \cite{sagle2}
 by several observations concerning special solutions and {\it energies} 
associated with equation \eqref{1}, where the equations live in the  given 
algebra $\mathbb A$. 

Also, as follows from the considerations to come, the abstract treatment 
for the two equations, is similar  in both cases, and we hence shall restrict 
to the case of the equation
\begin{equation} \label{+}
\ddot X+X^2=0,
\end{equation}
and obtain results, mutatis mutandis, for the other.

\section{Some observations}

Let $D:\mathbb A\to \mathbb A$
be a bounded derivation (see \cite{sagle}),  then, by definition, $D$ 
is a bounded linear map, which also satisfies
\[
D(XY)=D(X)Y+XD(Y), \quad \forall X,Y \in \mathbb A.
\]
The set of all bounded derivations on an algebra $\mathbb A$, denoted
 by $\mathbb D$, is known to be an algebra, as well, where multiplication 
is defined by the Lie bracket, i.e.
\[
[D_1,D_2]:=D_1D_2-D_2D_1,\quad \forall D_1,D_2\in \mathbb D,
\]
and $D_iD_j$ is the composition of $D_i$ with $D_j$.

It then follows immediately (see \cite{sagle}) that $e^{tD}$ 
(given by the power series) is an automorphism of $\mathbb A$, i.e.
\[
e^{tD}(XY)=e^{tD}(X)e^{tD}(Y), \quad \forall X,Y\in \mathbb A, \;
\forall t\in \mathbb R.
\]
This observation is crucial for most of our considerations to follow and we 
shall present here a short proof, based on the existence uniqueness 
principal for linear differential equations; to this end, let us denote by
\[
Z(t):=e^{tD}(XY),\quad W(t):= e^{tD}(X)e^{tD}(Y).
\]
Then, since the linear map $D$ commutes with its exponential $e^{tD}$, 
and since $D$ is a derivation, we obtain, by differentiation 
(note that the product rule of differentiation prevails!)
\begin{gather*}
\dot Z(t)=De^{tD}(XY)=DZ(t),\\
\dot W(t)= De^{tD}(X)e^{tD}(Y)+e^{tD}(X)De^{tD}(Y), \\
Z(0)=XY=W(0).
\end{gather*}
But
\begin{align*}
&De^{tD}(X)e^{tD}(Y)+e^{tD}(X)De^{tD}(Y)\\
&=e^{tD}( D(X)e^{tD}(Y)+XDe^{tD}(Y))\\
&=e^{tD}D(Xe^{tD}Y)\\
&= D(e^{tD} (X)e^{tD}(Y))\\
&=DW(t).
\end{align*}
Hence, both $Z$ and $W$ satisfy the same (linear) differential equation 
and the same initial conditions, hence must equal by the uniqueness theorem.

If we consider the differential equation
\begin{equation} \label{2}
\ddot X +X^2=0
\end{equation}
in the algebra $\mathbb A$, and $D$ is a bounded derivation on $\mathbb A$, 
we have the following proposition.

\begin{proposition} \label{pro}
$X(t):=e^{tD}P$, is the solution of \eqref{2} with
\[
X(0)=P,\quad \dot X(0)=DP
\]
if and only if,
\begin{equation} \label{P}
D^2P+P^2=0.
\end{equation}
\end{proposition}

\begin{proof}
For $X(t)$, as given above,  we compute
\[
\ddot X +X^2=D^2e^{tD}P+e^{tD}Pe^{tD}P=e^{tD}(D^2P+P^2),
\]
since $e^{tD}$ is an automorphism and $D$ and $e^{tD}$ commute. 
Since $e^{tD}$ is nonsingular, the result follows.
\end{proof}

\section{A first special case}

At this point it is instructive to consider  examples of derivations on  associative,
 but non commutative,  algebras $\mathbb A$.
For $\mathbb A$, as given, define
$D:\mathbb A\to \mathbb A$ by
\begin{equation} \label{A}
D(X):=AX-XA,
\end{equation}
where $A\in \mathbb A$ is a given nonzero element. We have the following 
proposition, which easily follows from the definitions and the fact that 
$\mathbb A$ is associative.

\begin{proposition}\label{pro1}
Let $\mathbb A$ be an associative algebra and let $A\in \mathbb A$ be given. 
Then $D$, defined by \eqref{A}, is a bounded derivation on $\mathbb A$.
\end{proposition}

Thus, if $D$ is defined by \eqref{A},  equation \eqref{P} becomes
\[
A(AP)-A(PA)-(AP)A+(PA)A +P^2=0,
\]
and, since multiplication is associative
\begin{equation} \label{B}
A^2P-2APA+PA^2+P^2=0;
\end{equation}
in any case,  the above reduces to study the equation
\begin{equation} \label{L}
L(P) +P^2=0
\end{equation}
in the algebra $\mathbb A$, where $L$ is a bounded linear map
$L:\mathbb A\to \mathbb A$.

\begin{remark}\rm
While it cannot be asserted that this equation always has a nontrivial solution $P$, 
many particular cases can be constructed. For example,  if $\mathbb A$ is the 
algebra of $n\times n$ matrices,  with respect to the usual multiplication
(also for the so-called circle and bracket multiplication), then all derivations 
are of the form \eqref{A} and hence, if $n>1$, nonzero elements $A$ exist for 
which this equation has nontrivial solutions. It is therefore of interest to 
seek such exponential solutions and study some of their properties. 
We note that equation \eqref{L}, and more generally equation \eqref{P}, 
may be analyzed using the fact that $L$ and $D^2$ are bounded linear maps, 
whose kernels and cokernels may be determined and thus both equations may 
be written as a system of coupled equations for which sufficient conditions 
for the nontrivial solvability may be obtained. Furthermore, if it is the 
case that equation \eqref{L} is given by equation \eqref{A}, we may think 
of $A\in \mathbb A$ as a parameter and, since \eqref{A} has $P=0$ as a solution 
for all such $A$, one may apply the method of Lyapunov-Schmidt, at those points 
$A$, where $L$ is singular and seek nontrivial solution branches $P=P(A)$ 
via bifurcation theory. See, for example \cite{deimling}.
\end{remark}

The interested reader may easily construct examples of equations \eqref{B} 
where nontrivial solutions exist. For example, in the case that the algebra 
$\mathbb A$ consists of the $2\times 2$ matrices with the usual matrix 
multiplication, the matrix
\[
A=\begin{pmatrix}
0&0\\
0&1
\end{pmatrix}
\]
will furnish such.

\section{Periodic motions}

Assuming that again $D$ is a derivation on the algebra $\mathbb A$ and that 
$P\in \mathbb A $ solves the equation \eqref{P} nontrivially we may ask 
whether the motion
\[
X(t) = e^{Dt}P
\]
is a periodic motion. This will be the case, whenever there exists $T>0$  
and $P\in \mathbb A$ such that
\[
e^{D(t+T)}P=e^{Dt}P, \quad t\in \mathbb R,
\]
or equivalently whenever
\[
e^{DT}P=P,
\]
i.e., whenever $T$ is such that the operator $e^{DT}:\mathbb A \to \mathbb A$ 
has $1$ as an eigenvalue with associated  eigenvector $P$. We shall provide here such an example of a three dimensional commutative algebra over the complex field.
Let us assume that the commutative algebra $\mathbb A$ is spanned by the vectors
$E_1$, $E_2$, $E_3$  satisfying the multiplication rule:
\[
E_1E_i=E_i, \; i=1,2,3,\quad  E_2E_3=E_1,\quad E_2^2=E_3^2=0.
\]
 If then
\[
X=x_1E_1+x_2E_2+x_3E_3, \quad Y=y_1E_1+y_2E_2+y_3E_3,
\] 
we obtain
\[
XY=(x_1y_1+x_2y_3+x_3y_2)E_1+(x_2y_1+x_1y_2)E_2+(x_3y_1+x_1y_3)E_3.
\]
We next define the linear mapping, relative to the given basis,
$D:\mathbb A\to \mathbb A$, by the matrix
\[
D:=\begin{pmatrix}
0&0&0\\
0&\lambda &0\\
0&0&-\lambda
\end{pmatrix},
\]
where $\lambda $ is a scalar. An easy computation shows that $D$, 
so defined is a derivation whose eigenvalues are 
$0$, $\lambda$, $-\lambda $ with associated eigenvectors given by the 
basis elements $E_1$, $E_2$, $E_3$.
Equation \eqref{P} becomes (with $P=\sum _{i=1}^3p_iE_i$)
\begin{equation} \label{N}
\begin{gathered}
p_1^2+2p_2p_3=0\\
\lambda^2p_2+2p_1p_2=0\\
\lambda^2p_3+2p_1p_3=0,
\end{gathered}
\end{equation}
the nontrivial solutions of which are 
$$
P=-\frac {\lambda ^2}{2}E_1 + p_2E_2-\frac{\lambda ^4}{8p_2}E_3,
$$ 
and 
$$
P=-\frac { \lambda ^2}{2}E_1  -\frac{\lambda ^4}{8p_3}E_2+p_3E_3.
$$
These considerations imply that if we chose $\lambda =i\omega $ then 
the corresponding exponential solutions will be periodic of period 
$T=\frac {2\pi }{\omega}$, and, in general have the form
\[
X(t) = -\frac {\lambda ^2}{2}E_1 + p_2e^{\lambda t}E_2
-\frac{\lambda ^4}{8p_2}e^{-\lambda t} E_3, \quad p_2\in \mathbb C\setminus \{0\}
\]
and
\[
X(t) = -\frac {\lambda ^2}{2}E_1 -\frac{\lambda ^4}{8p_3}e^{\lambda t} E_2 
+ p_3e^{- \lambda t}E_3, \quad p_3\in \mathbb C\setminus \{0\}.
\]

\begin{remark}\rm
 Note that the above example provides a partial answer to 
\cite[Conjecture 5.1]{sagle2}.
\end{remark}

\section{Energy considerations}

If $X(t)$ is a solution of \eqref{2} we define (motivated by the case of 
$\mathbb A=\mathbb R$) the energy of the solution
\begin{equation} \label{ener}
\mathbb E(X,\dot X):=3 (\dot X)^2 +2X^3.
\end{equation}
We have the following proposition.

\begin{proposition}\label{ener1}
Let $D$ be a derivation in $\mathbb A$ and let $X(t)=e^{tD} P$ be a 
solution of \eqref{2}. Let $\mathbb E$, given by \eqref{ener}, 
be the associated energy.
Then
\[ 
D\mathbb E\equiv 0,\quad\text{i.e. } \mathbb E\in \ker D,
\] 
if and only if
\[
P^2D(P)-2PD(P)P+D(P)P^2=0.
\]
\end{proposition}

To see the above, we compute
\begin{equation} \label{e1}
D\mathbb E (e^{tD}P, De^{tD}P)=-e^{tD}(P^2D(P)-2PD(P)P+D(P)P^2),
\end{equation}
using the fact that $X(t)$ is a solution of \eqref{1}, that $D$ is a
 derivation and that $e^{tD}$ is an automorphism.
In fact, for such solutions,  the above calculations show that
\begin{equation} \label{e2}
D\mathbb E(X,\dot X)=-(X^2D(X)-2XD(X)X+D(X)X^2 ).
\end{equation}

\begin{remark} \rm
If $\mathbb A$ is a commutative algebra, then $D(\mathbb E )=0$ 
for all such exponential solutions.
\end{remark}

\begin{corollary}
Let $D$ be a derivation in the associative algebra $\mathbb A$ and 
let $X(t)=e^{tD} P$ be a solution of \eqref{1}. Let $\mathbb E$ be 
the associated energy, i.e.
\[
\mathbb E:=3\dot X^2+2 X^3.
\]
Then $D\mathbb E\equiv 0$, whenever
\[
PD(P)=D(P)P.
\]
And, if $D$ is given by \eqref{A}, this is the case,  whenever
\[
2APA=P^2A+AP^2,
\]
and in particular, if
\[
D(P)=AP-PA=0.
\]
\end{corollary}

To prove the above corollary use formula \eqref{e1}.
We may summarize the above in the following theorem.

\begin{theorem} \label{thm5.4}
Let $\mathbb A$ be an associative algebra as above and let $A\in \mathbb A$ 
be given, defining the derivation $D(P)=AP-PA$. Then $X(t)=e^{Dt}P$ 
is a solution of \eqref{2} satisfying $D\mathbb E\equiv 0$, where $\mathbb E$
is the energy given by \eqref{e1}, whenever
\[
P^3A-3P(PA-AP)P=0.
\]
\end{theorem}

We noted above that solutions  $X$ of \eqref{2} which are given by 
$X(t)=e^{Dt}P$ satisfy also the equation
 \[
  \dot X=D(X).
 \]
 We may then compute
\[
\frac{d}{dt}(XD(X)-D(X)X),
\]
where this expression is given by
\[
{\dot X}  D(X)+X\frac{d}{dt}{(D(X))}-\frac{d}{dt}{(D(X))}X -D(X))\dot X;
\]
we also have
\[
\frac{d}{dt}(D(X))=\ddot X,
\]
and
\[ \ddot X=-X^2.
\]
Hence we obtain
\[
\frac{d}{dt}(XD(X)-D(X)X)\equiv 0
\]
and thus
\begin{align*}
 XD(X)-D(X)X&\equiv  \text{constant}\\
 &= X(0)D(X(0))-D(X(0))X(0)\\
 &= PD(P)-D(P)(P).
 \end{align*}
Hence, it follows from \eqref{e2} that $D\mathbb E \equiv 0$, 
whenever the initial conditions $P$ and $D(P)$ commute. Of course, 
this may also easily be deduced from the fact that such solutions 
are given as exponentials.

\begin{remark} \rm
We note here also the very general fact that in an associative algebra, 
if $X$ is a solution of equation \eqref{2} with
\[
X(0)\dot X(0) = \dot X(0) X(0),
\]
then
\[
X(t)\dot X(t) = \dot X(t) X(t),
\]
for all $t$ in the interval of existence of the solution.
\end{remark}

\section{More on energies}
If we are given a non degenerate, symmetric, bilinear form
\[
C:\mathbb A\times \mathbb A\to \mathbb R,
\]
we shall measure the associativity of $\mathbb A$ through the metric 
induced by the form $C$ (the $C$ {\it associator}) as
\[
\gamma (X,Y,Z):=
C(XY,Z)-C(X,YZ),\quad \forall X,Y,Z\in \mathbb A
\]
and use it to measure the system's energy.  
In particular, $C$ is called {\it associative}, whenever $\gamma (X,Y,Z)=0$
for all $X,Y,Z\in \mathbb A$ and nondegenerate, whenever $C(U,V)=0$
for all $V\in \mathbb A$, implies $U=0$. In particular, Jordan algebras 
of symmetric matrices have such associative forms (see. e.g. \cite{sagle}) given by
\[
C(U,V):=\operatorname{trace}L(UV),
\] 
where the {\it left multiplication}
\[
L(Z):\mathbb A\to \mathbb A,~ X\mapsto ZX.
\]
For such $L(X)$ its {\it adjoint} $L(X)^C$ relative to the form $C$ is given by
\[
C(L(X)U,V)=C(U,L(X)^CV),\quad \forall U,V\in \mathbb A.
\]
One calls $L(X)$ symmetric whenever $L(X)=L(X)^C$, and a simple calculation 
shows that if $L(X)$ is symmetric then the form $C$ must be associative. 
Furthermore we may easily show that
\[
\frac{d}{dt}C(X,X^2) =3C(\dot X, X^2).
\]
If now $X$ is a solution of \eqref{2},
then
\begin{align*}
0&= C(\dot X,0)=C(\dot X, \ddot X+X^2)\\
&= \frac{d}{dt} (\frac{1}{2} C(\dot X, \dot X) +\frac{1}{3}C(X,X^2)).
\end{align*}
Thus the energy
\[
E:= \frac{1}{2} C(\dot X, \dot X) +\frac{1}{3}C(X,X^2)
\]
is constant, say $E\equiv E _0$.
These observations together with some simple calculations
 (to follow) yield the following result.

\begin{proposition}
Let $C$ be a nondegenerate bilinear form on $\mathbb A$. Then the energy
\[
E:= \frac{1}{2} C(\dot X, \dot X) +\frac{1}{3}C(X,X^2)
\]
is constant on the solution curves of \eqref{1} whenever $C$ is an 
associative form. Conversely, if the energy $E$ is constant along 
solution curves of \eqref{1} then
\[
\gamma (X,X,\dot X)=C(X^2,\dot X)-C(X,X\dot X)\equiv 0,
\]
i.e. $C$ is left associative along solution curves.
\end{proposition}

The first part of the proposition was established above. 
The second part follows from the following calculations:
\begin{align*}
\frac{dE}{dt}
&= C(\dot X, \ddot X)+\frac{1}{3}(C(\dot X, X^2)+C( X, 2X\dot X))\\
&= C(\dot X, - X^2)+\frac{1}{3}(C(\dot X, X^2)+2C( X, X\dot X))\\
&= -\frac{2}{3}(C(X^2,\dot X)-C(X,X\dot X))\\
&= -\frac{2}{3}\gamma (X,X,\dot X)
\end{align*}
and hence, if $E$ is constant along solution curves $\gamma (X,X,\dot X)
=0$.

\section{More remarks and extensions}

(1) From what has been discussed above, we note that considering equation \eqref{2} 
in the algebra $\mathbb A$ subject to  initial conditions
\[
X(0)=P,\quad \dot X(0)=D(P),
\] 
where $D$ is a bounded derivation on $\mathbb A, $ and where $(P,D)$ 
lives on the manifold
\[
\mathbb M:=\{(P,D) \in \mathbb A\times \mathbb D: D^2(P)+P^2=0\},
\]
is simply equivalent to the study of the initial value problem
\[
\dot X=D(X),\quad X(0)=P,
\]
for $(P,D)$ in this manifold.

This remark lets us immediately extend the above considerations to the more
 general problems
\[
\ddot X + Q(X)=0, ~X(0)=P, ~\dot X(0)=D(P),
\]
where $Q:\mathbb A\to \mathbb A$
is a polynomial with scalar coefficients and  $(P,D)$ is in the manifold
\[
\mathbb M:=\{(P,D) \in \mathbb A\times \mathbb D: D^2(P)+Q(P)=0\},
\]
or even more general equations of higher order and/or containing terms 
of powers of $\dot X$.

(2) Let us consider the case that
\[
X:\mathbb R^n\to \mathbb A,\quad  x:=(x_1,x_2,\dots, x_n)\mapsto X(x)\in \mathbb A
\]
and $L$ is a second order differential operator (for given linear maps 
$l_{i,j}:\mathbb A\to \mathbb A$), given by
\[
L:=\sum _{i,j} l_{i,j}\frac {\partial ^2}{{\partial x_i}{\partial x_j}},\quad
i,j=1,2,\dots, n.
\]
Let us consider the differential equation
\begin{equation} \label{7.1}
LX+X^2=0
\end{equation}
in the algebra $\mathbb A$, and let $D$ be a derivation on $\mathbb A$. 
Then for any $k\in \mathbb R^n$ the mapping
\[
e^{{(k\cdot x)}D}:\mathbb A\to \mathbb A
\]
($k\cdot x$ is the scalar product of $k$ and $x$) is an endomorphism and 
we may use arguments as used before to find special solutions of \eqref{7.1}
 which are of the form
\[
X(x)=e^{{(k\cdot x)}D}P, \quad P\in \mathbb A.
\] 
The calculations  will be straightforward.


\begin{thebibliography}{00}

\bibitem{deimling} K. Deimling;
 \emph{Nonlinear Functional Analysis}, Springer Verlag, Berlin, 1985.

\bibitem{hale} J. Hale, H. Ko\c{c}ak;
 \emph{Dynamics and Bifurcations}, Springer Verlag, New York, 1991.

\bibitem{hartman}  P. Hartman;
 \emph{Ordinary {D}ifferential {E}quations}, Wiley, New York, 1964.

\bibitem{hirsch}  M. Hirsch, S. Smale;
 \emph{Differential {E}quations, {D}ynamical {S}ystems, and {L}inear {A}lgebra}, 
Academic Press, New York, 1974.

\bibitem{kinyon}  M. K. Kinyon, A. A. Sagle;
 \emph{Quadratic dynamical systems}, J. Differential Equations, 117 (1995), 
pp. 67--126.

\bibitem{markus}  L. Markus;
 \emph{Quadratic differential equations and nonassociative algebras}, 
Contributions to the Theory of Nonlinear Oscillations 
(L. Ceasri, J. LaSalle, S. Lefschetz, editors), 5 (1960), 185--213.

\bibitem{sagle1}  A. Sagle, K. Schmitt;
 \emph{Nonassociative algebras and some quadratic differential systems},
 WSSIA,  3 (1994), 523-535.

\bibitem{sagle2}  A. Sagle, K. Schmitt;
 \emph{On second-order quadratic  systems and algebras},
 Differential and Integral Equations,  24 (2011), 877-894.

\bibitem{sagle}  A. A. Sagle, R. Walde;
\emph{Introduction to {L}ie {G}roups and {A}lgebras},
 Academic Press, New York, 1973.

\bibitem{tabor}  M. Tabor;
 \emph{Chaos and {I}ntegrability in {N}onlinear {D}ynamics, {A}n {I}ntroduction},
 J. Wiley, New York, 1989.

\bibitem{walcher}  S. Walcher;
 \emph{Algebras and {D}ifferential {E}quations}, Hadronic Press,
Palm Harbor, 1991.

\end{thebibliography}

\end{document}
