\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2013 (2013), No. 187, pp. 1--14.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2013 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2013/187\hfil The role of Riemann generalized derivative]
{The role of Riemann generalized derivative in the
study of qualitative properties of functions}

\author[S. R\u{a}dulescu, P. Alexandrescu, D.-O. Alexandrescu \hfil EJDE-2013/187\hfilneg]
{Sorin R\u{a}dulescu, Petru\c{s} Alexandrescu, Diana-Olimpia Alexandrescu}  % in alphabetical order

\address{Sorin R\u{a}dulescu \newline
  Institute of Mathematical Statistics and Applied Mathematics,
  Calea 13 Septembrie, no. 13, Bucharest 5, RO-050711, Romania}
\email{xsradulescu@gmail.com}

\address{Petru\c{s} Alexandrescu \newline
  Institute of sociology, Casa Academiei Rom\^ane,
  Calea 13 Septembrie, no. 13, Bucharest 5, RO-050711, Romania}
\email{alexandrescu\_petrus@yahoo.com}

\address{Diana-Olimpia Alexandrescu \newline
 Department of Mathematics, University of Craiova,
 200585 Craiova, Romania}
\email{alexandrescudiana@yahoo.com}

\thanks{Submitted April 23, 2013. Published August 23, 2013.}
\subjclass[2000]{26A24, 28A15}
\keywords{Riemann generalized derivative; $(\sigma,\tau)$ differentiable functions;
\hfill\break\indent monotonicity; convexity; symmetric derivative; Schwarz derivative;
 Darboux property}

\begin{abstract}
  Marshal Ash \cite{ref3} introduced the concept of $(\sigma,\tau)$ 
  differentiable functions and studied the Riemann generalized derivatives
  In this article we study the convexity and monotonicity of $(\sigma,\tau)$
  differentiable functions, using results by Hincin,  Humke and Laczkovich,
  and using the Riemann generalized derivative.
  We give conditions such that the classic properties of differentiable
  functions hold also for $(\sigma,\tau)$ differentiable functions.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks


\section{Introduction} 
\subsection{Riemann differentiable functions}

In this article, we study properties of generalized Riemann differentiable 
functions (see \cite{ref6}-\cite{ref3}, \cite{ref13}) and the derivatives 
of first and second order which are well known as $(\sigma,\tau)$ 
differentiable functions (see \cite{ref9}-\cite{ref12},\cite{ref13}).
In the first part we give generalizations of classical Fermat, Rolle, 
Darboux and Lagrange theorems. 
There exists functions such that the Riemann generalized derivative is 
positive on an interval, but the function is not increasing. 

We establish sufficient conditions on the system of vectors $(\sigma,\tau)$ 
that define the generalized Riemann derivative, and which imply that any 
generalized Riemann differentiable function with positive derivative is 
increasing. Therefore we obtain a generalization of A. Hincin's result 
(see \cite{ref72}). There are also given conditions in which an increasing 
function on an interval has positive derivative.
In general the generalized Riemann derivative does not have the Darboux 
intermediate value property. To emphasize this, we give as example the 
function $f(x)=|x|$ to which we apply the symmetric derivative.

Theorem 2.7 establishes conditions under which the generalized Riemann 
derivative of a continuous increasing function on an interval has 
the Darboux property. 
Further, we  study the convexity of a function with the aid of 
generalized Riemann derivative of second order. We obtain necessary 
and sufficient conditions in which a generalized Riemann function is convex. 
Theorem 4.12 generalizes the result of  Zygmund \cite{ref153}.
Theorem 4.13 relaxes the conditions from theorem 4.12 and 
theorem 4.14 establishes conditions in which a function that 
has generalized Riemann derivative of second order, 
of length $n$, null on an open interval, is linear. 

\subsection{$(\sigma,\tau)$ differentiable functions}
In \cite{ref13} it is defined a new class of generalized Riemann functions,
 the $(\sigma,\tau)$ differentiable functions.
The motivation for this definition is that  the system 
$(\sigma,\tau)$ in $\mathbb{K}^n\times \mathbb{K}^n$ 
has conditions such that a function $f$ is generalized Riemann 
differentiable and such that a series of theorems hold,
where we denote $\mathbb{K}=\mathbb{R}$ or $\mathbb{C}$. 
Let $G\subset \mathbb{K}$ an open subset and the function 
$f:G\to \mathbb{K}$. 
For $p, n\in \mathbb{N}^*, p\le n$, define the set:
\begin{align*}
L(p,n,\mathbb{K}):=\Big\{& (\sigma,\tau)\in\mathbb{K}^{*n}\times\mathbb{K}^n:
 \sum_{k=1}^n\sigma_k\tau_k^j=0, \text{for }j\in\{0,1,\dots ,p-1\},\\
&\text{and } \sum_{k=1}^n\sigma_k \tau_k^p=p!,\; \tau_k\text{ are  distinct }\Big\}.
\end{align*}

 \begin{definition} \label{def2.1} \rm
The function $f$ is $(\sigma,\tau)$-differentiable of order $p$
 at $x$ ($x$ in $G$) if $(\sigma,\tau)\in L(p,n,\mathbb{K})$ and if 
the following limit exists and belongs to $\mathbb{K}$:
\[
\lim_{h\to 0}\frac{1}{h^p} \sum_{k=1}^n\sigma_k f(x+\tau_k h).
\]
If this limit exists, we denote it by $D_p(\sigma,\tau)f(x)$.
\end{definition}

\section{Generalizations of classical theorems for 
$(\sigma,\tau)$-differentiable functions }

\begin{theorem}[Generalized Fermat's theorem] \label{th:1.1}
 Let $I$ be an interval, $f:I\to \mathbb{R}$, $x_0\in {\rm Int} I$, 
$x_0$-local extremum point for $f$. Suppose that the following conditions hold:
\begin{itemize}
\item[(i)] $(\sigma,\tau)\in L(1,n,\mathbb{R})$,
\item[(ii)] $\sigma_1>0$, $\sigma_j<0$, $j\in \{2,\dots ,n\}$,
\item[(iii)] $\tau_1=0$,
\item[(iv)] $f$ is $(\sigma,\tau)$-differentiable at $x_0$.
\end{itemize}
Then $D_1(\sigma,\tau)f(x_0)=0$.
\end{theorem}

\begin{proof}
 Let $x_0\in \operatorname{int}I$ a local maximum point. 
The case when $x_0$ is the local minimum point is analogous. 
There exists $\epsilon>0$ such that $f(x)\ge f(x_0)$, for all 
$x\in (x_0-\epsilon, x_0+\epsilon)\subset I$.
Therefore,
\begin{equation}
\label{eq:1.1}
\sum_{j=1}^n\sigma_jf(x_0+\tau_jh)\le\sum_{j=1}^n\sigma_jf(x_0), 
\quad \forall h\in(-\epsilon_1,\epsilon_1),\; 
\epsilon_1=\frac{\epsilon}{\max_{2\le j\le n}|\tau_j|},
\end{equation}
and further we can write:
\begin{gather} \label{eq:1.2}
\frac{1}{h}\sum_{j=1}^n\sigma_j f(x_0+\tau_jh)\le 0, \quad 
\forall h\in(0,\epsilon_1) \\
\frac{1}{h}\sum_{j=1}^n\sigma_j f(x_0+\tau_jh)\ge0, \quad 
h\in (-\epsilon_1,0). \nonumber
\end{gather}
Letting $h\to 0$ in equations \eqref{eq:1.2} we obtain
$D_1(\sigma,\tau)f(x_0)=0$.
\end{proof}

\begin{theorem}[Generalized Rolle's theorem] \label{th:1.2}
Let $I$ be an interval, $a$, $b\in  I$, $a<b$ and 
$(\sigma,\tau)\in L(1,n,\mathbb{R})$ with the following properties: 
\begin{itemize}
\item[(1)] $\sigma_1>0$, $\sigma_j<0$, $j\in \{2,\dots ,n\}$,
\item[(2)] $\tau_1=0$.
\end{itemize}
Let $f:I\to \mathbb{R}$ such that:
\begin{itemize}
\item[(i)]   $f$ continuous on $[a,b]$,
\item[(ii)]  $f$ is $(\sigma,\tau)$-differentiable on $(a,b)$,
\item[(iii)] $f(a)=f(b)$.
\end{itemize}
Then there exists $c\in(a,b)$ such that
$D_1(\sigma,\tau)f(c)=0$.
\end{theorem}

\begin{proof} 
If the function $f$ is constant on $[a,b]$, the theorem holds, because the 
$(\sigma,\tau)$-derivative of a constant function is zero. 
If the function $f$ is not constant on $[a,b]$, then $f$ attains 
its maximum and minimum, as $f$ is continuous and bounded on a compact set. 
Let $M= \sup_{x\in[a,b]}f(x)$ and $ m=\inf_{x\in[a,b]}f(x) $. 
Suppose that $M>f(a)$. Then there exists $c\in (a,b)$ with $f(c)=M$ 
(if $c\in \{a,b\}$ then $M=f(c)=f(a)=f(b)$ which is a contradiction).
We can now conclude that $c$ is a local maximum and applying theorem 
\eqref{th:1.1} implies that $D_1(\sigma,\tau)f(c)=0$.
 The case $M=f(a)$ implies $m<f(a)$ and the proof is similar.
\end{proof}

\begin{theorem}[Generalized Lagrange's theorem] \label{th:1.3}
 Let $I$ be an interval, $a$, $b\in I$, $a<b$ and the function 
$f:I\to\mathbb{R}$ with the following properties:
\begin{itemize}
\item[(i)] $(\sigma,\tau)\in L(1,n,\mathbb{R})$ such that:
 (1) $\sigma_1>0$, $\sigma_j<0$, $j\in \{2,\dots ,n\}$, and
 (2) $\tau_1=0$,
\item[(ii)] $f$ is continuous on $[a,b]$,
\item[(iii)] $f$ is $(\sigma,\tau)$-differentiable on $(a,b)$.
\end{itemize}
Then there exists $c\in (a,b)$ such that
\[
f(b)-f(a)=(b-a)D_1(\sigma,\tau)f(c)
\]
\end{theorem}

\begin{proof}
We define $g:I\to\mathbb{R}$  as
\[
g(x):=f(x)-\frac{f(b)-f(a)}{b-a}\cdot x.
\]
We observe that $g(a)=g(b)$. Using the properties of the 
$(\sigma,\tau)$-generalized derivative we deduce that $g$ is
 $(\sigma,\tau)$-differentiable and
\[
D_1(\sigma,\tau)g(x)=D_1(\sigma,\tau)f(x)-\frac{f(b)-f(a)}{b-a}.
\]
Applying theorem 2.2 to function $g$ we obtain that there exists 
$c\in (a,b)$ such that
\begin{equation} \label{eq:1.3}
D_1(\sigma,\tau)g(c)=0,
\end{equation}
which is equivalent to
\[
D_1(\sigma,\tau)f(c)=\frac{f(b)-f(a)}{b-a}.
\]
\end{proof}

\section{Monotonicity versus generalized Riemann derivative}

We first state a series of results about the monotonicity of a function 
studied with the aid of the generalized Riemann derivative of first order. 
These results from Theorems 3.1 and 3.2 belong to Humke and Laczkovich 
(see \cite{ref76}). The results from 3.3 and 3.6 belong to the authors of
this article. 
Let $[a,b]$ be an interval, $f:[a,b]\to \mathbb{R}$ continuous. 
Denote by $G_f^1$ the reunion of all open intervals on which $f$ 
is increasing and denote by $F_f^1=[a,b]\setminus G_f^1$.

\begin{theorem} \label{th:2.1}
Let $f$ be a continuous function on $[a,b]$ and $D^1$ be
the Riemann generalized derivative of first order with $n$ terms. 
If $D^1_+f(x)\ge 0$ for all $x\in(a,b)$, then $F_f^1$ is dense 
nowhere and $\underline{f_+'}(x)=-\infty$ on a residual set of $F_f^1$.
\end{theorem}

\begin{corollary} \label{th:2.2}
Let $f:[a,b]\to\mathbb{R}$ continuous. If $D_+^1f\ge0$ and 
$\underline{f_+'}>-\infty$ for all  $x\in(a,b)$, then $f$ is 
increasing on $[a,b]$.
\end{corollary}

\begin{theorem} \label{th:2.3}
Let $D^1$ be the generalized Riemann derivative of first order
 with three terms and  $b_1<b_2<b_3$.

Case 1: Suppose that either:
\[
b_1<0<b_2<b_3\text{ and }a_1>0,a_2<0,a_3>0,
\]
or
\[
b_1<b_2<0<b_3\text{ and }a_1<0,a_2>0,a_3<0.
\]
Then there exists a nonconstant decreasing continuous function
 $f:[a,b]\to\mathbb{R}$, such that
$D_+^1f(x)\ge0, \quad\forall x\in(a,b)$.

Case 2: Other cases, if $f:[a,b]\to\mathbb{R}$ is continuous 
and $D_+^1f(x)\ge 0$ for all $x\in (a,b)$, then $f$ is increasing.
\end{theorem}

On the left hand side, the case $(1,-2,1), (-1,1,4)$ belongs to case 1,
and for this we have no monotonicity theorem corresponding to 
$\underline{D_+^1}$. On the right hand side, for the cases
 $(1,-2,1)$, $(-2,-1,1)$ we have the following theorem.

\begin{theorem} \label{th:2.4}
Let $I$ be an interval, $(\sigma,\tau)\in L(1,n,\mathbb{R})$ with the 
following properties:
\begin{itemize}
\item[(i)] $\sigma_1>0$, $\sigma_k<0$, $k\in\{ 2,\dots ,n\}$,
\item[(ii)] $\tau_1>0$, $\tau_k<0$, $k\in \{2,\dots ,n\}$.\\
Let $f:I\to\mathbb{R}$ be continuous, such that:
\item[(iii)] $D_1(\sigma,\tau)f(x)>0$ for all $x\in \operatorname{int}I$.
\end{itemize}
Then $f$ is increasing on $I$.
\end{theorem}

\begin{proof} 
Suppose that there exists $x_1<x_2, (x_1,x_2\in I)$, such that
$f(x_1)>f(x_2)$.
Denote by $t:=\frac{1}{2}[f(x_1)+f(x_2)]$ and define the set:
\[
A:=\{x\in[x_1,x_2]:f(x)<t\}.
\]
Note that $x_1\notin A$ and $x_2\in A$. Let $c=\inf A$. Then 
$c\neq x_1$ and $c\in \overline{A}$. There exists a sequence 
$(h_m)_{m\ge 1}$ of strictly positive numbers that converges to 0, 
such that $c+\tau_1h_m\in A$ and $c+\tau_kh_m\notin A$, $k\in\{2,\dots ,n\}$. 
Then: $f(c+\tau_1h_m)\le t$ and $f(c+\tau_kh_m)\ge t$, for
 $k\in \{2,\dots ,n\}$. It follows that
\[
\sum_{k=1}^n \sigma_k f(c+\tau_kh_m)<\sum_{k=1}^n\sigma_kt=t\sum_{k=1}^n\sigma_k=0
\]
and hence
\[
\lim_{m\to\infty}\frac{1}{h_m}\sum_{k=1}^n\sigma_k f(c+\tau_k h_m)\le 0,
\]
which is a contradiction.
\end{proof}

 
\subsection*{Remarks} 1. For $ \sigma_1=-\sigma_2=\frac{1}{2}$, 
$\tau_1=-\tau_2=1$, $\sigma_k=\tau_k=0$, $k\in \{3,\dots ,n\}$, 
we get the symmetric derivative of $f$.\\
2. Theorem 3.3 extends the result of Hincin for the symmetric derivative 
(see \cite{ref72}).

\begin{theorem}\label{th:2.5} 
Let $I$ be an interval, $f:I\to \mathbb{R}$ continuous, 
$(\sigma,\tau)\in L(1,n,\mathbb{R})$ such that:
\begin{itemize}
\item[(i)] $\sigma_1>0$, $\sigma_k<0$, $k\in \{2,\dots ,n\}$,
\item[(ii)] $\tau_1>0$, $\tau_k<0$, $k\in \{2,\dots ,n\}$,
\item[(iii)] $D_1(\sigma,\tau)f(x)\ge0$ for all $x\in \operatorname{int}I$.
\end{itemize}
Then $f$ is increasing on $I$.
\end{theorem}

\begin{proof} 
For $j\in \mathbb{N}^*$ we consider the function defined by:
\begin{equation}\label{eq:2.5}
f_j(x)=f(x)+\alpha_jx, \quad x\in I
\end{equation}
where $(\alpha_j)_{j\ge 1}$ is a sequence of strictly positive numbers 
converging to zero. On the left hand side,
if we apply the differential operator $D_1(\sigma,\tau)$ 
to \eqref{eq:2.5} we obtain
\[
D_1(\sigma,\tau)f_j(x)=D_1(\sigma,\tau)f(x)+D_1(\sigma,\tau)\alpha_jx, 
\quad x\in I.
\]
On the right hand side, we have
\begin{align*}
D_1(\sigma,\tau)(\alpha_jx)
&=\alpha_j\cdot D_1(\sigma,\tau)x\\
&=\alpha_j \lim_{h\to 0}\frac{1}{h}\sum_{k=1}^n \sigma_k (x+\tau_kh)\\
&=\alpha_j\lim_{h\to0}\Big[ \frac{x}{h}\sum_{k=1}^n\sigma_k
 +\sum_{k=1}^n\sigma_k\tau_k\Big]=\alpha_j,
\end{align*}
which implies
\[
D_1(\sigma,\tau)f_j(x)>0, \quad\forall x\in IntI.
\]
Applying Theorem 3.3 it follows that $f_j$ is increasing on
 $I (j\in \mathbb{N}^*)$.
Let $x<y$, then for $f_j(x)<f_j(y)$ we have
\[
f(x)= \lim_{j\to \infty}f_j(x)\le\lim_{j\to \infty}f_j(y)=f(y),
\]
hence $f$ increasing on $I$.
\end{proof}

\begin{theorem} \label{th:2.6}
Let $I$ be an interval, $f:I\to\mathbb{R}$ be continuous, 
$(\sigma,\tau)$ be in $L(1,n,\mathbb{R})$ such that:
\begin{itemize}
\item[(i)] $\sigma_1>0$, $\sigma_k<0$, $k\in\{ 2,\dots ,n\}$,
\item[(ii)] $\tau_1>0$, $\tau_k<0$, $k\in\{ 2,\dots ,n\}$,
\item[(iii)] $D_1(\sigma,\tau)f(x)=0$, for all $x\in I$,
\end{itemize}
then $f$ is constant on $I$.
\end{theorem}

\begin{proof} 
According to Theorem 3.4, $D_1(\sigma,\tau)f(x)=0$ on $I$ implies $f$ 
is increasing on  $I$ and 
$D_1(\sigma,\tau)(-f(x))=0$ on $I$ implies $(-f)$ increasing on $I$.
This implies further that $f$ is constant on $I$.
\end{proof}

\begin{theorem}\label{th:2.7}
Let $I$ be an interval, $(\sigma,\tau)\in L(1,n,\mathbb{R})$ and 
$f:I\to \mathbb{R}$. If there exists $\alpha\in \mathbb{R}$ such that 
$\sigma_k(\tau_k-\alpha)\ge 0$ for all $k\in \{1,2,\dots ,n\}$ and 
$f$ is increasing on $I$, then
\[
D_1(\sigma,\tau)f(x)\ge0,\quad \forall x\in \operatorname{int}I.
\]
\end{theorem}

\begin{proof} 
The proof is immediate if we notice that for $x\in \operatorname{int}I$,
\begin{align*}
D_1(\sigma,\tau)f(x)
&=\lim_{h\to 0}\frac{1}{h}\sum_{k=1}^n\sigma_kf(x+\tau_kh)\\
&= \lim_{h\to 0}\frac{1}{h}\sum_{k=1}^n \sigma_k \frac{f(x+\tau_kh)
 -f(x+\alpha h)}{\tau_k-\alpha}(\tau_k-\alpha)\ge 0.
\end{align*}
\end{proof}

We remark that not all the functions $f:I\to\mathbb{R}$, ($I$-interval), 
which are $(\sigma,\tau)$-differentiable on $I$ have the Darboux intermediate
 property. Indeed, it is sufficient to consider the function 
$f:I\to\mathbb{R}, f(x)=|x|$, for which we apply the symmetric 
derivative $R(1)$- which is a particular case of the derivative
 $D(\sigma,\tau)$. Therefore, for $x\neq0$ we have
\[
R(1)f(x)= \lim_{h\to 0}\frac{|x+h|-|x-h|}{2h}={\rm sgn} x.
\]
which has not the Darboux property.
Under some conditions, $(\sigma,\tau)$ differentiable functions can 
have the Darboux intermediate value property. We shall see this in the 
following theorem.

\begin{theorem} \label{th:2.8}
Let $f:I\to \mathbb{R}$, $I$-open interval and 
$(\sigma,\tau)\in L(1,n,\mathbb{R})$ with the following properties:
\begin{itemize}
\item[(i)] $f$ is continuous and increasing on $I$,
\item[(ii)] $\sigma_1>0$, $\sigma_j<0$, $j\in \{2,\dots ,n\}$,
\item[(iii)] $\tau_1=0$,
\item[(iv)] $f$ is $(\sigma,\tau)$ differentiable on $I$,
\item[(v)] there exists $\alpha \in \mathbb{R}$ such that 
$\sigma_k(\tau_k-\alpha)\ge 0$ for all $k\in \{1,\dots ,n\}$.
\end{itemize}
Then the function $\varphi(x)=D_1(\sigma,\tau)f(x)$, $x\in I$ has 
the Darboux property.
\end{theorem}

\begin{proof} 
Let $a$, $b\in I$, $a<b$ and suppose that 
$D_1(\sigma,\tau)f(a)<D_1(\sigma,\tau)f(b)$. 
Let $\lambda\in \mathbb{R}$ such that 
$D_1(\sigma,\tau)f(a)<\lambda<D_1(\sigma,\tau)f(b)$. 
We show that in these conditions, there exists $c\in (a,b)$ 
such that $D_1(\sigma,\tau)f(c)=\lambda$. 
We define $g(x):=f(x)-\lambda x$, $x\in [a,b]$. 
We notice that $g$ is $(\sigma,\tau)$ differentiable on I. We also have
\begin{gather*}
D_1(\sigma,\tau)g(a)=D_1(\sigma,\tau)f(a)-\lambda<0,\\
D_1(\sigma,\tau)g(b)=D_1(\sigma,\tau)f(b)-\lambda>0.
\end{gather*}
Suppose that there exists $x_0\in [a,b]$ such that $x_0$ is a maximum for $g$. 
If $x_0\in (a,b)$, then the conditions from theorem 2.1 hold. 
Therefore, $D_1(\sigma,\tau)g(x_0)=0$, which implies 
$D_1(\sigma,\tau)f(x_0)-\lambda=0$.

Suppose that $g$ does not attain its extremal values (in this case the maximum) 
on $(a,b)$. From here it results that $g$
is strictly monotone on $[a,b]$. According to Theorem 3.6, 
$D_1(\sigma,\tau)g(x)\ge0$ for all $x\in [a,b]$ or 
$D_1(\sigma,\tau)g(x)\le 0$ for all $x\in[a,b]$, which is a contradiction. 
Therefore, we get the existence of $c\in(a,b)$ such that 
$D_1(\sigma,\tau)f(c)=\lambda$, for each $\lambda$ as previously chosen. 
\end{proof}

We complete this section with an example of an increasing function on 
a neighborhood of the origin for which the Riemann generalized derivative
 does not have the Darboux intermediate value property 
(inspired by Patrick O'Connor).
Let $\alpha\in (-1,0)$ and $f:\mathbb{R}\to\mathbb{R}$ given by
\begin{equation}
\quad  f(x)= \begin{cases}
x(|x|^{\alpha}-1), & x\in\mathbb{R}^* \\
0, & x=0.
\end{cases}
 \end{equation}
We also consider the system $(\sigma,\tau)\in L(1,n,\mathbb{R}^*)$ and the 
function
\[
\varphi(x)=\sum_{j=1}^n\sigma_j\tau_j|\tau_j|^x,\quad x\in\mathbb{R} .
\]
If $\varphi(\alpha)=0$, then $f$ is continuous, increasing on a neighborhood 
of the origin and $(\sigma,\tau)$-differentiable. Moreover the derivative 
does not have the Darboux property. 
To prove this we shall first notice that $f$ is continuous on $\mathbb{R}^*$. 
Therefore, for $x\in\mathbb{R}^*$,
\[
D_1(\sigma,\tau)f(x)=f'(x)=(\alpha +1)|x|^{\alpha}-1.
\]
On the right hand side $(\alpha+1)>0$, $|x|^{\alpha}\ge \epsilon ^{\alpha}>0$, 
for $x\in (-\epsilon, 0)\cup (0,\epsilon)$, where 
$\epsilon\in(0,\epsilon_0)$ with 
$\epsilon_0=\Big( \frac{1}{\alpha+1}\Big)^{ \frac{1}{\alpha}}$ and 
$f$ continuous, implies that $f$ increasing on a neighborhood of origin.
If we denote $g(x):=x|x|^{\alpha}$, $x\in\mathbb{R}$, then
\[
D_1(\sigma,\tau)g(0)= \lim_{h\to 0}\frac{1}{h}\sum_{j=1}^n 
\sigma_j g(\tau_j h)=0.
\]
This leads us to conclude that $D_1(\sigma,\tau)f(0)=-1$.
In the above example we built function which has the 
$(\sigma,\tau)$ derivative non positive and as a consequence,
it does not have the Darboux intermediate value property.


\section{Convexity versus second order Riemann generalized derivative}
%\subsection{Preliminary}
\begin{definition} \label{def:4.1} \rm
We say that a function $f:I\to \mathbb{R}$ (where $I$ is an interval)
 has a \textbf{strong maximum} at $x_0\in \operatorname{int}I$ if there exists 
a neighborhood $V$ of it, such that $f(x_0)\ge f(x)$ for all $x\in V$ and 
$f$ is non constant on $V$.
\end{definition}

\subsection*{Remark} 
A convex function has not a strong maximum in the interior of its
 domain of definition. Indeed, if $x_0$ is a strong maximum point, 
then the arc from the graph $y=f(x)$, for which $|x-x_0|\le \delta$ 
is situated above the chord, for $\delta$ small enough, which contradicts 
the convexity.

\begin{theorem} \label{th:4.2} 
A continuous functions $\varphi:I\to \mathbb{R}$ is convex if and only 
if for all $\alpha$, $\beta\in\mathbb{R}$ the function 
$\psi(x)=\varphi(x)+\alpha x+\beta$ has no strong maximum in the interval $I$.
\end{theorem}

\begin{proof} 
The sum of two convex function is convex, therefore the necessity is obvious. 
To prove the sufficiency, suppose that $\varphi$ is not convex. 
Then there exists the arc $P_1P_2$, such that every point from it 
is situated either above the chord $P_1P_2$ or on the chord. 
Let $x_1,x_2$ the abscise of points $P_1$, $P_2$ and let $y=-\alpha x-\beta$ 
the chord equation. Then the equation $\varphi(x)+\alpha x+\beta=0$ has 
solutions $x_1$ and $x_2$ and $\psi(x)=\varphi(x)+\alpha x+\beta>0$ for 
$x\in(x_1,x_2)$. It follows that $\psi$ has a maximum strictly included 
in the interior of $(x_1,x_2)$. As a consequence, the maximum is included 
in the interior of $I$, which is a contradiction.
\end{proof}

In the following theorems we shall mention a number of convexity properties 
of functions that are obtained with the aid of generalized Riemann derivative 
of second order. These results are known and they belong to  Humke and 
 Laczkovick (see \cite{ref75}).
Let $[a,b]$ be an interval and $f:[a,b]\to\mathbb{R}$ continuous. 
Denote by $G_f^2$ the reunion of relative open intervals on which $f$ is convex.
 We denote by $F_f^2:=[a,b]\setminus G_f^2$.

\begin{theorem}  \label{th:4.3}
Suppose that $f:[a,b]\to\mathbb{R}$ is continuous and $D^2$ is the generalized 
Riemann derivative of second order
with $n$ terms. If $\underline{D}_+^2f(x)\ge0$, $\forall x\in(a,b)$, 
then $F_f^2$ is nowhere dense.
\end{theorem}

\begin{theorem} \label{th:4.4}
Let $D^2$ be the Riemann generalized derivative of second order with $n$ 
terms such that $b_1\ge0$, $(i=1,\dots ,n)$. Then for all closed subsets 
which are nowhere dense, $F\subset [a,b]$ for which neither $a$ nor $b$ 
are isolated points of $F$, there exists a continuous function 
$f:[a,b]\to\mathbb{R}$, such that $\underline{D}_+^2f(x)\ge0$, for all 
$x\in (a,b)$ and $F_f^2=F$. As we can remark, the condition 
$\underline{D}_+^2f\ge 0$ does not always imply the convexity of $f$. 
The following two theorems deal with this, in case of three terms.
\end{theorem}

\begin{theorem} \label{th:4.5}
Let $D^2$ be the generalized Riemann derivative of second order with three 
terms for which $b_1<b_2=0<b_3$. If $f:[a,b]\to\mathbb{R}$ is continuous 
and $\underline{D}_+^2f(x)\ge0$, $\forall x\in (a,b)$, then $f$ is convex.
 \end{theorem}

\begin{theorem} \label{th:4.6}
Let $D^2$ the Generalized Riemann derivative of second order with three terms, 
for which $b_1<0<b_2<b_3$. Suppose that $f:[a,b]\to\mathbb{R}$ is continuous,
 $\underline{D}_+^2f(x)\ge 0$, $\forall x\in (a,b)$ and $f$ is not convex. 
Then there exists $d\in(a,b)$ such that $f$ is convex on a left neighborhood 
of $d$ and $f_d'(d)=-\infty$.
\end{theorem}

\begin{corollary} \label{coro4.1}
Let $D^2$ be the generalized Riemann derivative of second order with three terms, 
for which $b_1<0<b_2<b_3$. Let $f:[a,b]\to\mathbb{R}$ continuous and suppose 
that $\underline{D}_+^2 f(x)\ge0$ and $\overline{f'}_+>-\infty$ for all 
$x\in(a,b)$. Then $f$ is convex.
\end{corollary}

\begin{theorem} \label{th:4.7}
Let $D^2$ be the generalized Riemann derivative of second order with three 
terms and $f:[a,b]\to\mathbb{R}$ continuous. If $\underline{D}_+^2f(x)\ge0$ 
for all $x\in (a,b)$ then the set of isolated points of $F_f^2$ is 
dense in $F_f^2$. 
\end{theorem}

\begin{theorem} \label{th:4.8}
Let $D^2$ be the generalized Riemann derivative of second order with $n$ terms,
 for which $b_i\ge0, i=1,\dots ,n$. Let $F$ be a closed subset of $[a,b]$ 
such that neither $a$ nor $b$ are isolated point of $F$ and the set of 
isolated points of $F$ is dense in $F$. Then there exists a continuous 
function $f:[a,b]\to\mathbb{R}$ such that
\[
\underline{D}_+^2f(x)\ge0,\quad \forall x\in (a,b)\quad\text{and}\quad
F_f^2=F.
\]
\end{theorem}

\begin{theorem}  \label{th:4.9} 
Let $D^2$ be the generalized Riemann derivative of second order with 
three terms, with $b_1<0\le b_2<b_3$. Suppose that either $b_2=0$ or
\[
 \frac{\log|\frac{b_2}{b_1}|}{b_2-b_1}\le\frac{\log|\frac{b_3}{b_1}|}{b_3-b_1}.
\]
If $f:[a,b]\to\mathbb{R}$ is continuous and $\underline{D}_+^2f(x)\ge0$ 
for all $x\in (a,b)$, then $f$ is convex.
\end{theorem}

 \begin{theorem}  \label{th:4.10}
Let $D^2$ be the generalized Riemann derivative of second order with three terms, 
such that $b_1<0<b_2<b_3$ and
\[
\frac{\log|\frac{b_2}{b_1}|}{b_2-b_1}>\frac{\log| \frac{b_3}{b_1}|}{b_3-b_1}.
\]
Let $F$ be a closed and nowhere dense subset of $[a,b]$, such that neither 
$a$ nor $b$ are isolated points of $F$ and the set of isolated points 
of $F$ is dense in $F$. Then there exists a continuous function 
$f:[a,b]\to\mathbb{R}$ such that $\underline{D}_+^2f(x)\ge0$ for all $x\in (a,b)$ 
and $F_f^2=F$.
\end{theorem}

The following theorem solves the problem posed by  Ash in \cite{ref3}.
 
\begin{theorem} \label{th:4.11}
Let $D^2$ be the generalized Riemann derivative of second order with three 
terms and $b_1<0<b_3$. If $f:[a,b]\to\mathbb{R}$ is continuous and 
$D_+^2f(x)=0$, for all $x\in (a,b)$, then $f$ is linear.
\end{theorem}

 These results are stated in \cite{ref75}, without proof. 
In the following subsection we shall give our results,
 keeping the notation introduced in chapter 1 and 2.

\subsection{Necessary and sufficient conditions on the $(\sigma,\tau)$ 
differentiable function convexity}

\begin{theorem} \label{4.2.1} 
Let $f:(a,b)\to\mathbb{R}$  with the following properties:
\begin{itemize}
\item[(i)] $(\sigma,\tau)\in L(2,n,\mathbb{R})$,
\item[(ii)] $\sigma_1<0$, $\sigma_j>0$, $j\in \{ 2,\dots ,n\}$,
\item[(iii)] $\tau_1=0$,
\item[(iv)] $f$ is convex.
\end{itemize}
Then $\underline{D}_2(\sigma,\tau)f(x)\ge0$ for all $x\in(a,b)$.
\end{theorem}

\begin{proof} We notice that
\begin{equation} \label{eq:*}
 \sum_{j=1}^n \sigma_j f(x+\tau_jh)\ge 0
\end{equation}
if and only if
\begin{align*} 
\sum_{j=1}^n \sigma_jf(x+\tau_jh)
&= \sigma_1 f(x+\tau_1h)+\sum_{j=2}^n \sigma_jf(x+\tau_jh)\\
&=\sum_{j=2}^n\sigma_jf(x+\tau_jh)+\sigma_1f(x)\ge0
\end{align*}
if and only if
\[
\sum_{j=2}^n \Big( -\frac{\sigma_j}{\sigma_1}\Big)f(x+\tau_jh)\ge f(x).
\]
To prove the inequality \eqref{eq:*}, we consider
 $ -\frac{\sigma_j}{\sigma_1}\in[0,1)$ and 
$ \sum_{j=2}^n \big(-\frac{\sigma_j}{\sigma_1} \big)=1$. 
As $f$ is convex we apply Jensen inequality and we obtain:
\begin{align*}
\sum_{j=2}^n \Big( -\frac{\sigma_j}{\sigma_1}\Big)f(x+\tau_jh)
&\ge f\Big( \sum_{j=2}^n\Big( -\frac{\sigma_j}{\sigma_1}\Big)(x+\tau_jh)\Big)\\
&=f\Big( -\frac{1}{\sigma_1}\sum_{j=2}^n\sigma_jx-\frac{h}{\sigma_1}
\sum_{j=2}^n\sigma_j\tau_j\Big)\\
&=f\Big( x-\frac{h}{\sigma_1}\sum_{j=2}^n\sigma_j\tau_j\Big)=f(x).
\end{align*}
 Let $\epsilon >0$ arbitrary. For 
$|h|< \frac{-\epsilon \epsilon_1 }{| \sum_{j=2}^n \sigma_j\tau_j |}=\epsilon '$ 
results: $x- \frac{h}{\sigma_1}\sum_{j=2}^n\sigma_j\tau_j<\epsilon+x$.
As this inequality takes place for all $\epsilon>0$, we deduce the inequality
 \eqref{eq:*}. Therefore
\begin{equation} \label{eq:8}
  \frac{1}{h^2}\sum_{j=1}^n\sigma_jf(x+\tau_jh)\ge0, \quad 
\forall x\in (a,b),\; \forall h\in (-\epsilon',\epsilon').
\end{equation}
 From which we obtain
\begin{equation}
  \liminf_{h\to 0}\frac{1}{h^2} \sum_{j=1}^n \sigma_j f(x+\tau_j h)\ge 0,
 \quad \forall x\in (a,b).
\end{equation}
\end{proof}

 \begin{theorem} \label{th:4.2.2}
Let $f:(a,b)\to\mathbb{R}$ a continuous function such that:
\begin{itemize}
\item[(i)] $(\sigma,\tau)\in L(2,n,\mathbb{R})$,
\item[(ii)] $\sigma_1<0$, $\sigma_j>0$, $j=2,\dots ,n$,
\item[(iii)] $\tau_1=0$,
\item[(iv)] $D_2(\sigma,\tau) f(x)>0$ for all $x\in (a,b)$.
\end{itemize}
Then $f$ is convex on $(a,b)$.
\end{theorem}

\begin{proof} 
Suppose that $f$ is not convex. Then, according to Theorem 4.2 
there exist $\alpha,\beta\in \mathbb{R}$ such that the function 
$g(x):=f(x)+\alpha x+\beta$ has a strong maximum. 
Let $x_0\in (a,b)$ a strong maximum for $g$. On the left hand side we have
\[
f(x_0)\ge f(x_0+\tau_j h), \quad j\in \{2,\dots ,n\}.
\]
We multiply this relation by $\sigma_j>0, (j=2,\dots ,n)$ and summing we obtain
\[
\sum_{j=2}^n \sigma_j f(x_0)\ge \sum_{j=2}^n \sigma_j f(x_0+\tau_j h).
\]
 From this we obtain
\[
 -\sigma_1f(x_0)=f(x_0)\sum_{j=2}^n\sigma_j
\ge \sum_{j=2}^n\sigma_jf(x_0+\tau_jh).
\]
The relation above is equivalent to
\[
 \sum_{j=2}^n \sigma_j f(x_0+\tau_jh)+\sigma_1f(x_0)\le0,
\]
which implies
\[
\sum_{j=1}^n\sigma_j f(x_0+\tau_j h)\le 0.
\]
On the right hand side we obtain
\begin{align*}
\sum_{j=1}^n\sigma_j g(x_0+\tau_jh)
&=\sum_{j=1}^n\sigma_jf(x_0+\tau_j h)+\sum_{j=1}^n\sigma_j(\alpha x+\beta)\\
&=\sum_{j=1}^n\sigma_jf(x_0+\tau_j h)\le 0
\end{align*}
and as a consequence,
\begin{align*}
\lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{j=1}^n\sigma_j g(x+\tau_j h)
&=\lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{j=1}^n \sigma_j f(x+\tau_j h)\\
&\le 0<\lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{j=1}^n\sigma_j f(x+\tau_jh)
\end{align*}
which is a contradiction. Therefore $f$ is convex.
\end{proof}

The following theorem establishes weaker conditions for the convexity 
of $f$ by relaxing condition (iv) from theorem \eqref{th:4.2.2}.

\begin{theorem}  \label{th:4.2.3}
Let $f:(a,b)\to\mathbb{R}$ continuous, such that:
\begin{itemize}
\item[(i)] $(\sigma,\tau)\in L(2,n,\mathbb{R})$,
\item[(ii)] $\sigma_1<0$, $\sigma_j>0$, $j\in \{2,\dots ,n\}$,
\item[(iii)] $\tau_1=0$,
\item[(iv)] $D_2(\sigma,\tau)f(x)\ge 0$ for all $x \in (a,b)$.
\end{itemize}
In these conditions $f$ is convex on $(a,b)$.
\end{theorem}

\begin{proof} 
We shall define the functions
$ f_j(x):=f(x)+\frac{x^2}{j}$, $j\in\mathbb{N^*}, x\in (a,b)$
for which we have
\[
\lim_{\overline{h\to 0}} \frac{1}{h^2} \sum_{k=1}^n \sigma_k f_j (x+\tau_k h)
= \lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{k=1}^n \sigma_k f(x+\tau_k h)
+\lim_{h\to 0}\frac{1}{h^2}\sum_{k=1}^n \frac{1}{j}\sigma_k (x+\tau_k h)^2.
\]
As
\begin{align*}
\lim_{h\to 0} \frac{1}{h^2}\sum_{k=1}^n \frac{1}{j}\sigma_k (x+\tau_kh)^2
&=\lim_{h\to  0}\frac{1}{h^2}\sum_{k=1}^n\frac{\sigma_k}{j}[(x+\tau_kh)^2-x^2]\\
&= \lim_{h\to 0}\frac{1}{h}\sum_{k=1}^n\frac{\sigma_k \tau_k}{j}(2x+\tau_k h)\\
&=\sum_{k=1}^n\frac{\sigma_k \tau_k^2}{j}=\frac{2}{j}>0,
\end{align*}
we  obtain
\begin{align*}
\lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{k=1}^n\sigma_k f_j(x+\tau_kh)
&=\lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{k=1}^n\sigma_kf(x+\tau_kh)
 +\sum_{k=1}^n\frac{\sigma_k\tau_k^2}{j}\\
&\ge \sum_{k=1}^n\frac{\sigma_k\tau_k^2}{j}=\frac{2}{j}>0.
 \end{align*}
Applying theorem \eqref{th:4.2.2} for $f_j,j\in\mathbb{N}^*$, we obtain their
convexity.
As $f(x)= \lim_{j\to \infty}f_j(x)$ for all $x\in (a,b)$ results that
 $f$ is convex on $(a,b)$.
\end{proof}
 
\subsection*{Remark} 
For $\sigma=(1,-2,1)$, $\tau=(1,0,-1)$ we get the symmetric derivative of 
second order for $f$. For this we get the following corollary of Theorem 4.12 
which belongs to Zygmund (see \cite[pp. 44-45]{ref153}).

\begin{corollary}[Zygmund] 
Let $f:(a,b)\to \mathbb{R}$ continuous. Then $f$ is convex on $(a,b)$ 
if and only if the inferior symmetric derivative of $f$ on $(a,b)$ is positive.
\end{corollary}

 \begin{theorem} \label{th:4.2.4}
 Let $I$ be an interval, $f:I\to \mathbb{R}$ continuous. 
If the following conditions hold:
\begin{itemize}
\item[(i)] $\sigma_1<0$, $\sigma_j>0$ for all $j\in \{2,\dots ,n\}$,
\item[(ii)] $\tau_1=0$,
\item[(iii)] $(\sigma,\tau)\in L(2,n,\mathbb{R})$,
\item[(iv)] $D_2(\sigma,\tau)f(x)=0,\quad \forall x\in Int I$,
\end{itemize}
then there exist $\alpha, \beta\in\mathbb{R}$ such that
$f(x)=\alpha x+\beta$.
\end{theorem}

\begin{proof} 
Let $a,b\in I,\quad a<b$. We define $\varphi:[a,b]\to\mathbb{R}$, given by:
\begin{equation} \label{eq:4.2.4.1}
\varphi(x):=f(x)-f(a)- \frac{f(b)-f(a)}{b-a}\cdot (x-a).
\end{equation}
If there exists $c\in (a,b)$ such as $\varphi(c)>0$, then there exists 
$\epsilon>0$ such that $\varphi(c)>\frac{\epsilon}{2}(c-a)(b-c)$, as 
$\varphi$ is continuous.
We define $\psi:I\to \mathbb{R}$, given by:
\begin{equation}
\label{eq:4.2.4.2}
\psi(x):=\varphi(x)-\frac{\epsilon}{2}(x-a)(b-x).
\end{equation}
We notice that $\psi$ is continuous and $\psi (a)=\psi(b)=0$. 
Therefore there exists $x_0\in[a,b]$ such that
\[
\psi(x)\le\psi(x_0),\quad \forall x\in (a,b).
\]
We also have $\psi(c)>0$ and $x_0\notin\{a,b\}$, and as a consequence 
$x_0\in (a,b)$.
Let $\eta>0$ such that $(x_0-\eta,x_0+\eta)\subset I$. Then
\[
 \psi(x_0+\tau_jh)\le\psi(x_0),\quad\text{for }
 |h|<\eta\cdot \Big(\max_j|\tau_j| \Big)^{-1}=\eta'.
\]
 From where we obtain
\begin{gather*}
\sigma_1\cdot\psi(x_0+\tau_1 h)=\sigma_1\cdot \psi(x_0),\\
\sum_{j=2}^n\sigma_j \psi(x_0+\tau_j h)\le \sum_{j=2}^n \sigma_j \psi(x_0);
\end{gather*}
therefore,
\begin{equation}
\label{eq:11}
 \sum_{j=1}^n\sigma_j \psi (x_0+\tau_j h)\le \sum_{j=1}^n\sigma_j \psi(x_0)=0 ,
\quad |h|<\eta '.
\end{equation}
Further we have
\begin{align*}
&\frac{1}{h^2}\sum_{j=1}^n\sigma_j \psi (x_0+\tau_j h)\\
&=\frac{1}{h^2} \Big( \sum_{j=1}^n \sigma_j \varphi(x_0+\tau_j h)
 -\frac{\epsilon}{2}\sum_{j=1}^n\sigma_j(x_0+\tau_j h-a)(b-x_0-\tau_jh)\Big)\\
&= \frac{1}{h^2}\sum_{j=1}^n \sigma_j \varphi (x_0+\tau_j h)\\
&\quad -\frac{\epsilon}{2h^2}\sum_{j=1}^n\sigma_j[(x_0-a)(b-x_0)
 +\tau_jh(b-x_0)-\tau_jh(x_0-a)-\tau_j^2h^2]\\
&=\frac{1}{h^2}\sum_{j=1}^n\sigma_j \varphi(x_0+\tau_j h)
 +\frac{\epsilon}{2}\sum_{j=1}^n\sigma_j\tau_j^2\\
&=\frac{1}{h^2}\sum_{j=1}^n\sigma_j\varphi (x_0+\tau_j h)+\epsilon,
\quad \text{for }|h|<\eta '.
\end{align*}
Letting $h\to 0$ in relation \eqref{eq:11} and taking into consideration 
the  relation
\begin{equation} \label{eq:12}
 \frac{1}{h^2} \sum_{j=1}^n\sigma_j \psi (x_0+\tau_j h)
=\frac{1}{h^2} \sum_{j=1}^n\sigma_j \varphi (x_0+\tau_j h)+\epsilon
\end{equation}
we obtain
\begin{align*}
0&\ge \lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{j=1}^n\sigma_j 
 \psi (x_0+\tau_j h)\\
&=\lim_{\overline{h\to 0}}\frac{1}{h^2} \sum_{j=1}^n\sigma_j\varphi (x_0+\tau_j h)
+\epsilon\\
&=\lim_{\overline{h\to 0}}\frac{1}{h^2}\sum_{j=1}^n\sigma_j f(x_0+\tau_j h)
+\epsilon>0
\end{align*}
 which is a contradiction.
 Similarly can be solved the case $\varphi(c)<0$. In conclusion 
$\varphi(c)=0$ for all $c\in (a,b)$.
Therefore, there exist $\alpha,\beta\in\mathbb{R}$, according to 
relation \eqref{eq:4.2.4.1}, such that
\[
f(x)=\alpha x+\beta,\quad \forall x\in [a,b].
\]
\end{proof}

\begin{thebibliography}{00}

\bibitem{ref6}  Alexandrescu, P.;
\emph{Monotonicity theorems for generalized Riemann derivatives}. 
Mathematical Reports, no. 4, vol.1 (51) (1999), 497-501.

\bibitem{ref7} Ash, J. M.;
\emph{Generalizations of the Riemann derivative}. 
Trans. Amer. Math. Soc., 126, (1967), 181-199.

\bibitem{ref3} Ash, J. M.;
\emph{Very generalized Riemann derivatives, generalized Riemann derivatives 
and associated summability methods}. The nineth summer real analysis 
symposium Liouville, KY, 1985, Real Anal. Exchange, no. 11 (1985/86) 1,  10-29.

\bibitem{ref72} Hincin, A.;
\emph{On the symmetric derivative}. Fund. Math. IX, (1927), 212.

\bibitem{ref75} Humke, P. D.; Laczkovich, M.;
\emph{The Convexity Theorems for Generalized Riemann Derivatives}, 
Real Analysis Exchange, 15, (1989/90), 2, 652-674.

\bibitem{ref76} Humke, P. D.; Laczkovich, M.;
\emph{Monotonicity theorems for generalized Riemann derivatives},
Rend. Circ. Mat. Palermo (2) 38 (1989) no. 3, 437--454.

\bibitem{ref8} Marcinkiewicz, J.; Zygmund, A.;
\emph{Sur la d\'eriv\'ee seconde g\'eneralis\'ee}. 
Josef Marcinkiewicz Collected Papers, 582-587, Panst wowe 
Wyderwonitwo Nankowe, Warszaw, (1964).

\bibitem{ref9} Mukhopadhyay, S. N.;
\emph{Higher order derivatives}. Chapman \& Hall CRC Monographs
and Surveys in Pure and Applied Mathematics 144, (2012).

\bibitem{ref13} R\u{a}dulescu, S.; Alexandrescu, P.; Alexandrescu, D.-O.;
\emph{Generalized Riemann Derivative}, Electronic Journal of Differential 
Equations, 74, (2013), 1-19.

\bibitem{ref14} R\u{a}dulescu, T.-L.; R\u{a}dulescu, V.; Andreescu, T.;
\emph{Problems in Real Analysis: Advanced Calculus on the Real Axis},
 Springer, New York, (2009).

\bibitem{ref9b} Thomson, B. S.;
\emph{Monotonicity theorems}, Proc. Amer. Math. Soc. 83, (1981), 547-552.

\bibitem{ref10} Thomson, B. S.;
\emph{Monotonicity theorems}, Real Anal. Exchange, (1981), 209-234.

\bibitem{ref11} Thomson, B. S.;
\emph{Some properties of generalized derivatives}, 
Real Anal. Exchange, 8, (1982-1983).

\bibitem{ref12} Weil, C. E.;
\emph{Monotonicity, convexity and symmetric derivatives}. 
Trans. Amer. Math. Soc., 231, (1976), 225-237.

\bibitem{ref153} Zygmund, A.;
\emph{Trigonometric Series}, Vol I+II, Cambridge University Press (2002).

\end{thebibliography}

\end{document}
