% LaTeX2e: % \documentclass{ajour} \usepackage[colorlinks=true, linkcolor=webgreen, filecolor=webbrown, citecolor=webgreen]{hyperref} \usepackage{amssymb,amsmath} \usepackage[dvips]{graphicx,epsfig} %%%%% To be entered at Academic Press: =====>> % % Uncomment line below only when doing final typesetting, %\finaltypesetting % \journame{} % \articlenumber{} % \yearofpublication{} % \volume{} % \cccline{} % \received{} % \revised{} % \accepted{} % communication line, use: \commline{Communicated by...} % \commline{Communicated by... } \authorrunninghead{} \titlerunninghead{} %% To set particular page number: %\setcounter{page}{261} %% %% <<== End of commands to be entered at Academic Press %% Authors, start here ==>> %%%--->\draft % Optional, will cause a line at the bottom of each page %% with the words `Draft' and the time and date that the article %% was LaTeXed. Will also double space text. \begin{document} \begin{center} \epsfxsize=4in \leavevmode\epsffile{logo122.eps} \end{center} \begin{center} \vskip 1cm {\LARGE\bf The gcd-sum function} \vskip 1.5cm \large Kevin A. Broughan \\ \smallskip University of Waikato, Hamilton, New Zealand \\ \medskip Email address: \href{mailto:kab@waikato.ac.nz}{kab@waikato.ac.nz} \vskip2.5cm {\bf Abstract} \end{center} {\em The gcd-sum is an arithmetic function defined as the sum of the gcd's of the first $n$ integers with $n: g(n)=\sum_{i=1}^n (i,n)$. The function arises in deriving asymptotic estimates for a lattice point counting problem. The function is multiplicative, and has polynomial growth. Its Dirichlet series has a compact representation in terms of the Riemann zeta function. Asymptotic forms for values of partial sums of the Dirichlet series at real values are derived, including estimates for error terms. } \vspace*{+.1in} \noindent {\it Keywords}: greatest common divisor, Dirichlet series, lattice points, multiplicative, Riemann zeta function, gcd-sum. \noindent MSC2000 11A05, 11A25, 11M06, 11N37, 11N56. \def\a{\alpha} \def\z{\zeta} \def\sx{\sum_{n\le \sqrt{x}}} %\begin{article} \section{Introduction} This article is a study of the gcd-sum function: $g(n)=\sum_{i=1}^n (i,n)$. The function arose in the context of a lattice point counting problem, for integer coordinate points under the square root curve. The function is multiplicative and has a derivative-like expression for its values at prime powers. The growth function is $O(n^{1+\epsilon})$ and the corresponding Dirichlet series $G(s)$ converges at all points of the complex plane, except at the zeros of the Riemann zeta function and the point $s=2$, where it has a double pole. Asymptotic expressions are derived for the partial sums of the Dirichlet series at all real values of $s$. These results may be compared with those of \cite{cohen1, cohen2, cohen3} where a different arithmetic class of sums of the gcd are studied, namely those based on $g(n)=\sum_{i,j=1}^n (i,j)$ and its generalizations. Note that the functions fail to be multiplicative. The original lattice point problem which motivated this work is solved using a method based on that of Vinogradov. The result is then compared with an expression found using the gcd-sum. %===================================================================================== \section{GCD-Sum Function} The gcd-sum is defined to be \begin{equation} \specialeqnum{1} g(n)=\sum_{j=1}^n (j,n) \end{equation} The function that is needed in the application to counting lattice points, described below, is the function $S$ defined by \begin{equation} \specialeqnum{2} {S(n)=\sum_{j=1}^n ({2j} - 1, n)} \end{equation} %------------------------------------------------------------------------------------ \begin{theorem} The function $S$ and gcd-sum $g$ are related by \begin{equation} \specialeqnum{3} S(n) = \left\{ \begin{array}{ll} g(n) & \mbox{$n$ odd} \\ 2g(n)-4g(\frac{n}{2}) & \mbox{ $n$ even} \end{array} \right. \end{equation} \end{theorem} \begin{proof} For all $n\ge 1$ \begin{equation}\specialeqnum{4} \sum_{j=1}^n (2j,n) + \sum_{j=1}^n (2j-1,n) = \sum_{j=1}^{2n} (j,n)=2g(n) \end{equation} If $n$ is odd, $$ \sum_{j=1}^n (2j,n)=\sum_{j=1}^n (j,n)=g(n)$$ From this and (4) we obtain the equation $S(n)=g(n)$. If $n$ is even, $$\sum_{j=1}^n (2j,n)=2\sum_{j=1}^n (j,\frac{n}{2})=4g(\frac{n}{2})$$ and again the result follows by (4). \end{proof} %------------------------------------------------------------------------------------- The following theorem gives the value of $g$ at prime powers. Even though a direct proof is possible, we give a proof by induction since it reveals more of the structure of the function. \begin{theorem} For every prime number $p$ and positive integer $\alpha \ge 1$: \begin{equation} \specialeqnum{5} g(p^\alpha)=(\alpha+1)p^\alpha-\alpha p ^{\alpha -1} \end{equation} \end{theorem} \begin{proof} When $\alpha=1$: $$g(p)=(1,p)+(2,p)+ \dots +(p,p)=(p-1)+p=2p-1$$ Similarly when $\alpha=2$: \begin{eqnarray*} g(p^2)&=&(1,p^2)+(2,p^2)+ \dots +(p,p^2)+(p+1,p^2)+ \dots + (2p,p^2)+ \dots +(p^2,p^2)\\ &=&1+1 \dots +p+1+ \dots + p + \dots + p^2\\ &=& (p^2-p)+p(p-1)+p^2\\ &=&3p^2-2p \end{eqnarray*} Hence the result is true for $\alpha = 1$ and for $\alpha =2$. Now for any $\alpha \ge 2$: \begin{eqnarray*} g(p^\alpha) &=& \sum_{j=1}^{p^{\alpha -1}} (j,{p^\alpha })+ \sum_{j=p^{\alpha -1}+1}^{p^\alpha -1} (j,{p^\alpha })+p^\alpha\\ &=& g(p^{\alpha -1})+p^\alpha +\sum_{j=p^{\alpha -1}+1}^{p^\alpha -1} (j,{p^\alpha -1}) \end{eqnarray*} But \begin{eqnarray*} \sum_{j={p^\alpha -1}+1}^{p^\alpha -1} (j,{p^\alpha -1}) &=& \sum_{j=1}^{p^\alpha - p^{\alpha -1}-1} (j,p^{\alpha -1})\\ &=& \sum_{j=1}^{p^\alpha - p^{\alpha -1}} (j,p^{\alpha -1})-p^{\alpha -1}\\ &=& (p-1)g(p^{\alpha -1})-p^{\alpha -1} \end{eqnarray*} Hence $$g(p^\alpha)=p^\alpha - p^{\alpha -1} + pg(p^{\alpha -1})$$ Thus, if we assume for some $\beta$ that $$g(p^\beta)=(\beta+1)p^\beta-\beta p ^{\beta -1}~,$$ then \begin{eqnarray*} g(p^{\beta +1}) &=& p^{\beta +1} - p^\beta + pg(p^{\beta})\\ &=& p^{\beta +1} - p^\beta + p \left[ (\beta +1)p^\beta + \beta p^{\beta -1} \right]\\ &=& (\beta + 2)p^{\beta +1} - (\beta +1)p^\beta \end{eqnarray*} and the result follows by induction. \end{proof} %------------------------------------------------------------------------------------ \begin{theorem} The following expression gives the function $g$ in terms of Euler's totient function $\phi:$ \begin{equation} \specialeqnum{6} g(n)=\sum_{j=1}^n (j,n)=n \sum_{d|n} {\frac{\phi (d)}{d}} \end{equation} \end{theorem} \begin{proof} The integer $e$ is equal to the greatest common divisor $(j,n)$ if and only if $e|n$ and $e|j$ and $(\frac{j}{e},\frac{n}{e})=1$ for $1 \le j \le n$. Therefore the terms with $(j,n)=e$ are $\phi (\frac{n}{e})$ in number. Grouping terms in the sum for $g(n)$ with value $e$ together, it follows that $$g(n)=\sum_{e|n} e\phi(\frac{n}{e})=\sum_{d|n} \frac{\phi (d)}{d/n}=n\sum_{d|n} \frac{\phi(d)}{d} $$ \end{proof} \begin{corollary} The function g is multiplicative, being the divisor sum of a multiplicative function. \end{corollary} Note that $g$ is not completely multiplicative, nor does it satisfy any modular style of identity of the form $$ g(n)g(m)=\sum_{d|(m,n)} h(d)g(\frac{mn}{d^2}) $$ %=================================================================================== \section{Bounds} \begin{theorem} The function g is bounded above and below by the expressions $$ max(2-\frac{1 }{n},\Bigl({\frac{3}{2}}\Bigr)^{\omega(n)}) \le \frac{g(n)}{n} \le 27 {\Bigl( \frac{\log n}{\omega (n)} \Bigr)}^{\omega (n)} $$ where $n$ is any positive integer and $\omega(n)$ is the number of distinct prime numbers dividing $n$. \end{theorem} \begin{proof} The bound $$g(n) = \sum_{j=1}^n (j,n) \ge 1(n-1)+n = 2n-1$$ gives the lower bound $$2-\frac{1}{n} \le \frac{g(n)}{n}$$ Now consider \begin{eqnarray*} \frac{g(n)}{n}&=& \prod_{p|n}\frac{g(p^\a )}{p^\a} \text{(by 2.1)}\\ &=&\prod_{p|n}\Bigl( (\a +1)-\frac{\a}{p}\Bigr) \text{(Theorem 2.2)}\\ &\ge&\prod_{p|n}(2-\frac{1}{p}) \ge \Bigl(\frac{3}{2}\Bigr)^{\omega(n)}\text{since $\a \ge 1$ and $p \ge 2$}. \end{eqnarray*} This completes the derivation of the second part of the lower bound. By equation (5), $$ \frac{g(p^\alpha )}{p^\alpha} = \alpha (1-\frac{1}{p})+1 \le w\alpha \log p $$ where $w=3$ if $p=2,3,5$ or $w=1$ if $p\ge 7$. Hence, if $p_i\ge 7$ for every $i$ and $n=\prod_{i=1}^m p_i^{\alpha_i}$, then \begin{eqnarray*} \frac{g(n)}{n}\le \prod_{i=1}^m \alpha_i \log p_i = \prod_{i=1}^m \log(p_i^{\alpha_i}) \end{eqnarray*} Now $$\log n =\sum_{i=1}^m \alpha_i \log p_i$$ If $f$ is the monomial function $f(x)=\prod_{i=1}^m x_i$ of real variables subject to the constraints $x_i \ge 1$ and $\sum x_i = \alpha$, for some fixed positive real number $\alpha$, then (using Lagrange multipliers) the maximum value of $f$ is $(\frac{\alpha}{m})^m$ and occurs where each $x_i = \frac{\alpha}{m}$. Hence $$ \frac{g(n)}{n} \le \big( \frac{\log n }{m}\big) ^m = \big( {\frac{\log n}{\omega (n)}}\big) ^{\omega (n)} $$ In general, using $\alpha_1=1$ if $2 \nmid n$, etc., \begin{eqnarray*} \frac{g(n)}{n} &\le& 27(\alpha_1 \log p_1)(\alpha_2 \log p_2)(\alpha_3 \log p_3) \prod_{p_i \ge 7} \alpha_i \log p_i \\ &=& 27 \prod_{i=1}^m \alpha_i \log p_i \\ &\le& 27\big( {\frac{\log n}{\omega (n)}}\big)^{\omega (n)} \end{eqnarray*} \end{proof} %------------------------------------------------------------------------------------ The upper bound in the expression given by the previous theorem is not very useful, given the extreme variability of $\omega(n)$. A plot of the first 200 values of $g(n)/n$ given in Figure 1 illustrates this variability. The following estimates are more useful in practice. \begin{theorem} The functions $g$ and $S$ satisfy for all $\epsilon > 0$ \begin{eqnarray} \specialeqnum{7} g(n) = O(n^{1+\epsilon})\\ S(n) = O(n^{1+\epsilon}). \end{eqnarray} \end{theorem} \begin{proof} This follows immediately from Theorem 2.3, since $\phi(d) \le d$ and the divisor function $d(n)=O(n^\epsilon)$. \end{proof} \begin{figure} \unitlength1cm % i'm changing the next line - njas oct 23 2001 %\center{\includegraphics*[scale=0.4, bb=2cm 20cm 12cm 27cm] \center{\includegraphics*[scale=0.8, bb=2cm 20cm 12cm 27cm] {/usr/njas/gauss/hisdir/NJAS/BROUGHAN/gcdsum1.eps}} \caption{The functions $g(n)/n$ and $n^{\epsilon}$} \end{figure} %==================================================================================== \section{Dirichlet Series} Define a Dirichlet series based on the function $g$: \begin{eqnarray*} \specialeqnum{12} G(s)=\sum_{n=1}^\infty \frac{g(n)}{n^s}, \mbox{for $\sigma=\Re(s)>2$} \end{eqnarray*} \begin{theorem} The Dirichlet series for $G(s)$ converges absolutely for $\sigma>2$ and has an analytic continuation to a meromorphic function defined on the whole of the complex plane with value \begin{eqnarray*}\specialeqnum{8} G(s)=\frac{\zeta(s-1)^2}{\zeta(s)} \end{eqnarray*} where $\zeta(s)$ is the Riemann zeta function. \end{theorem} \begin{proof} First write $g$ as a Dirichlet product: $$g(n)=\sum_{d|n} \phi(d)\frac{n}{d}=(\phi*g)(n)$$ Hence, if $\sigma>2$, \begin{eqnarray*} G(s)&=& \bigl( \sum_{n=1}^\infty \frac{\phi(n)}{n^s} \bigr) \bigl(\sum_{n=1}^\infty \frac{n}{n^s} \bigr)\\ &=& \zeta(s-1)\bigl( \sum_{n=1}^\infty \frac{\phi(n)}{n^s} \bigr) \end{eqnarray*} But \cite{apostol1} $$ \phi(n)=\sum_{d|n} \mu(d)\frac{n}{d} $$ Therefore \begin{eqnarray*} G(s)&=& \zeta(s-1)^2 \bigl(\sum_{n=1}^\infty \frac{\mu(n)}{n^s} \bigr)\\ &=& \frac{\zeta(s-1)^2}{\zeta(s)} \end{eqnarray*} Since the right hand side is valid on the whole of the complex plane, $G(s)$ has the claimed analytic continuation with a double pole at $s=2$ and a pole at every zero of $\zeta(s)$. \end{proof} %------------------------------------------------------------------------------------- We now derive asymptotic expressions for the partial sums of this Dirichlet series of $g$ by a method which employs good expressions for Dirichlet series based on Euler's function $\phi$, leading to an improvement in the error terms. If $\alpha \in \mathbb R$, define the partial sum function $G_\alpha$ by \begin{eqnarray*}\specialeqnum{9} G_\alpha (x)=\sum_{n \le x} \frac{g(n)}{n^\alpha} \end{eqnarray*} %------------------------------------------------------------------------------------ \begin{lemma} If $f(x)=O(\log x)$ then $\sum_{n\le x} f(\frac{x}{n})=O(x)$. \end{lemma} \begin{proof} This follows easily from the estimate $$ \log(\lfloor{x}\rfloor!)=x \log x -x + O(\log x)$$ \end{proof} %------------------------------------------------------------------------------------ In what follows we define the constant function $h_\alpha (x)=\alpha$ for each real number $\alpha$. \begin{theorem} As $x \rightarrow \infty$ \begin{eqnarray*} \specialeqnum{12} G_1(x)=\frac{x \log x}{\zeta(2)}+O(x) \end{eqnarray*} \end{theorem} \begin{proof} By Theorem 2.3, if $f(n)=\phi(n)/n$, \begin{eqnarray*} \frac{g(n)}{n}&=& \sum_{d|n} \frac{\phi(d)}{d}\\ &=& (h_1 * f)(n) \end{eqnarray*} If we define $F(x)=\sum_{n\le x} f(n)$ then, by \cite{apostol1}, $$ F(x)=\frac{x}{\zeta(2)}+O(\log x) $$ Therefore (using Lemma 5.1 to derive the error estimate) \begin{eqnarray*} G_1 (x)&=&\sum_{n\le x} h_1 (n)F(\frac{x}{n})\\ &=&\sum_{n\le x} F(\frac{x}{n})\\ &=& \frac{x}{\zeta(2)}[1+\frac{1}{2}+\frac{1}{3}+\dots + \frac{1}{\lfloor{x}\rfloor}] +O(x)\\ &=& \frac{x}{\zeta(2)}[\log x + \gamma + O(\frac{1}{x})]+O(x)\\ &=& \frac{x \log x}{\zeta(2)} +O(x) \end{eqnarray*} \end{proof} %------------------------------------------------------------------------------------ \begin{theorem} As $x \rightarrow \infty$, $$ G_0(x)=\frac{x^2\log x}{2\zeta(2)}+O(x^2) $$ \end{theorem} \begin{proof} By Theorem 2.3 with $f(n)=n$: \begin{eqnarray*} g(n)&=& \sum_{d|n} \frac{n}{d}\phi(d)\\ &=& (f * \phi)(n)\\ &=& (\phi * f)(n) \end{eqnarray*} If we define $F(x)=\sum_{n\le x} n$ then $$ F(x)= \frac{\lfloor{x}\rfloor(\lfloor{x}\rfloor +1)}{2}=\frac{x^2}{2}+O(x) $$ Therefore \begin{eqnarray*} G_0 (x)&=&\sum_{n\le x} \phi (n)F(\frac{x}{n})\\ &=&\frac{x^2}{2}\sum_{n\le x} \frac{\phi(n)}{n^2} + O(x^2)\\ &=& \frac{x^2\log x}{2\zeta(2)} + O(x^2) \end{eqnarray*} \end{proof} %------------------------------------------------------------------------------------- \begin{lemma} For all $\alpha \in \mathbb R$ $$ G_\alpha (x)= \sum_{n\le x} n^{1-\alpha} \Phi_\alpha(\frac{x}{n}) $$ where $$ \Phi_\alpha(x)=\sum_{n\le x} \frac{\phi(n)}{n^\alpha } $$ \end{lemma} \begin{proof} Define the monomial function $m_\beta (x) = x^{-\beta}$ for all real $\beta$ and positive $x$. By Theorem 2.3, \begin{eqnarray*} \frac{g(n)}{n^\alpha}&=& \sum_{d|n} \frac{\phi(d)}{d^\alpha}(\frac{n}{d})^{1-\alpha}\\ &=& (\phi_\alpha * m_{\alpha-1})(n) \end{eqnarray*} The lemma follows directly from this expression. \end{proof} Below we derive an asymptotic expression for $G_\alpha$ for all real values of $\alpha$. This is interesting because of the uniform applicability of the same expression. First we set out some standard estimates \cite{apostol1} which are collected together below for easy reference. Let $$ S_\alpha(x)=\sum_{n\le x} \frac{1}{n^\alpha} $$ for all positive $x$ and real $\alpha$. Then \begin{eqnarray*} (a)\mbox{\space} \Phi_0(x)&=&\frac{x^2}{2\zeta(2)}+O(x \log x)\\ (b)\mbox{\space} \Phi_1(x)&=&\frac{x}{\zeta(2)}+O(\log x)\\ (c)\mbox{\space} \Phi_2(x)&=&\frac{\log x}{\zeta(2)}+\frac{\gamma}{\zeta(2)}-A+ O(\frac{\log x}{x})\\ (d)\mbox{\space} \Phi_\alpha(x)&=&\frac{x^{2-\alpha}}{(2-\alpha)\zeta(2)}+ \frac{\zeta(\alpha-1)}{\zeta(\alpha)}+O(x^{1-\alpha} \log x), \alpha>1,\alpha\ne 2\\ (e)\mbox{\space} \Phi_\alpha(x)&=&\frac{x^{2-\alpha}}{(2-\alpha)\zeta(2)}+ O(x^{1-\alpha} \log x),\alpha\le 1\\ (A)\mbox{\space} S_1(x)&=&\log x + \gamma+O(\frac{1}{x})\\ (B)\mbox{\space} S_\alpha(x)&=&\frac{x^{1-\alpha}}{1-\alpha}+\zeta(\alpha) +O(\frac{1}{x^\alpha }),\alpha>0,\alpha\ne 1\\ (C)\mbox{\space} S_\alpha(x)&=&\frac{x^{1-\alpha}}{1-\alpha} +O(\frac{1}{x^\alpha}),\alpha\le 0 \end{eqnarray*} where in (c) $$A=\sum_{n=1}^\infty \frac{\mu(n)\log n}{n^2}\approxeq -0.35$$ Note that there are better estimates for the error terms, for \linebreak (a) $ O(x\log^{\frac{2}{3}} x (\log \log x)^{1+\epsilon})$ \cite{szaltiikov} and for (b) $ O(\log^{\frac{2}{3}} x (\log \log x)^{\frac{4}{3}})$ \cite{walfisz} but, since these are only available for $\a=0$ and $\a=1$ we do not use them. %==================================================================================== Even though there is a wide diversity of expressions in this set, a very similar expression holds for $G_\alpha(x)$, for all real values of $\alpha$, except $\alpha=2$ which corresponds to the pole of $G(s)$: \begin{theorem} If $\a<2$: $$ G_\alpha(x)=\frac{x^{2-\alpha} \log x}{(2-\alpha)\zeta(2)} + O(x^{2-\alpha}), $$ if $\a=2$: $$ G_2(x)=\frac{ \log^2 x}{2\zeta(2)} + O(\log x) $$ and if $\a>2$: $$ G_\alpha(x)=\frac{x^{2-\alpha} \log x}{(2-\alpha)\zeta(2)} +\frac{\z(\a-1)^2}{\z(\a)}+ O(x^{2-\alpha}). $$ \end{theorem} \begin{proof} Case 0: Let $\alpha = 0$. The stated result is given by Theorem 5.4 above. Case 1: Let $\alpha = 1$. The result is given by Theorem 5.3. Case 2: Let $\alpha = 2$. \def\a{\alpha} \begin{eqnarray*} G_2 (x)&=&\sum_{n\le x} n^{-1} \Phi_2 (\frac{x}{n}) \\ &=& \sum_{n\le x} \frac{\log(\frac{x}{n})}{n\zeta(2)}+(\frac{\gamma}{\zeta(2)}-A) \sum_{n\le x}n^{-1}+\sum_{n\le x}O\big( n^{-1}\frac{\log(\frac{x}{n})}{x/n}\big)\\ &=&\frac{\log x}{\zeta(2)}(\sum_{n\le x} \frac{1}{n})- \frac{1}{\zeta(2)}\sum_{n\le x} \frac{\log n}{n}+ (\frac{\gamma}{\zeta(2)}-A)(\sum_{n\le x} \frac{1}{n})+O(1)\\ &=&[\frac{\log x}{\zeta(2)}+\frac{\gamma}{\zeta(2)}-A][\log x + \gamma+O(\frac{1}{x})] - \frac{1}{\zeta(2)}[\frac{\log^2 x}{2} + A_1+O(\frac{\log x}{x})]+O(1)\\ &=& \frac{\log^2 x}{2\zeta(2)}+\log x[\frac{2\gamma}{\zeta(2)}-A]+O(1) \end{eqnarray*} Case 3: If $\alpha<1$ we have \begin{eqnarray*} G_\alpha (x)&=&\sum_{n\le x} \frac{1}{n^{\alpha-1}} \Phi_\alpha (\frac{x}{n}) \\ &=& \sum_{n\le x} \frac{1}{n^{\a -1}}\frac{x^{2-\a}}{n^{2-\a}(2-\a)\zeta(2)}+\sum_{n\le x} O\bigl( x^{1-\a} \log(\frac{x}{n}) \bigr)\\ &=& \frac{x^{2-\a}}{(2-\a)\zeta(2)}(\sum_{n\le x} \frac{1}{n})+O(x^{2-\a})\\ &=& \frac{x^{2-\alpha}}{(2-\alpha)\zeta(2)}[\log x +\gamma+O(\frac{1}{x})]+O(x^{2-\alpha})\\ &=& \frac{x^{2-\alpha}\log x}{(2-\alpha)\zeta(2)}+ O(x^{2-\alpha}) \end{eqnarray*} Case 4: Finally, if $\alpha>1$ and $\alpha \ne 2$: \begin{eqnarray*} G_\alpha (x)&=&\sum_{n\le x} \frac{1}{n^{\a -1}} \Phi_\a (\frac{x}{n}) \\ &=&\sum_{n\le x} \frac{1}{n^{\a-1}} \bigl[ \frac{x^{2-\a}}{n^{2-\a}(2-\a)\zeta(2)} +\frac{\zeta(\a -1)}{\zeta(\a)}+ O(\frac{x^{1-\a}}{n^{1-\a}}\log(\frac{x}{n}))\bigr]\\ &=&\frac{x^{2-\a}}{(2-\a)\zeta(2)}(\sum_{n\le x}\frac{1}{n}) + \frac{\zeta(\a -1)}{\zeta(\a)}(\sum_{n\le x}\frac{1}{n^{\a -1}})+O(x^{2-\a})\\ &=&\frac{x^{2-\a}}{(2-\a)\zeta(2)}[\log x +\gamma+O(\frac{1}{x})] \\ &&\quad + \frac{\zeta(\a -1)}{\zeta(\a)}[\frac{x^{2-\a}}{2-\a}+\zeta(\a -1)+O(x^{1-\a})]+O(x^{2-\a})\\ &=&\frac{x^{2-\alpha}\log x}{(2-\alpha)\zeta(2)}+ \frac{\z(\a-1)^2}{\z(\a)}+ O(x^{2-\alpha}) \end{eqnarray*} \end{proof} For $\a \in \{0,1,2\}$ we can improve these asymptotic expressions by deriving an additional term and a smaller error. This has already been done for $\a=2$. In both of the remaining cases we use the following useful, and again elementary, device \cite{apostol1}: If $ab=x$, $F(x)=\sum_{n\le x} f(n)$ and $H(x)=\sum_{n\le x} h(n)$ then $$ \sum_{e,d\le x} f(e)h(d)= \sum_{n\le a} f(n)H(\frac{x}{n}) +\sum_{n\le b} h(n)F(\frac{x}{n}) -F(a)H(b) $$ in the special case $a=b=\sqrt{x}$. %----\a=1----4.5------------------------------------------------------------------- \begin{theorem} $$ G_1(x)=\frac{x\log x}{\z(2)}+x[\frac{2\gamma}{\z(2)}-A-\frac{1}{\z(2)}]+ O(\sqrt{x} \log x) $$ \end{theorem} \begin{proof} First rewrite $G_1(x)$: \begin{eqnarray*} G_1(x)&=&\sum_{n\le x} \Phi_1(\frac{x}{n}) \text{\space(by Lemma 4.2)}\\ &=& \sum_{n\le x} \sum_{m\le x/n} \frac{\phi(n)}{n}\\ &=&\sum_{e,d\le x} \frac{\phi(d)}{d} 1. \end{eqnarray*} Now let $F$ and $H$ be defined by \begin{eqnarray*} F(x)&=&\sum_{n\le x} \frac{\phi(n)}{n} = \frac{x}{\z(2)} + O(\log x)\\ H(x)&=&\sum_{n\le x} 1 = \lfloor x \rfloor \end{eqnarray*} Using the device described above, rewrite $G_1$ in terms of $F$ and $H$: \begin{eqnarray*} G_1(x)&=& \sx \frac{\phi(n)}{n} H(\frac{x}{n}) +\sx F(\frac{x}{n})-F(\sqrt{x})H(\sqrt{x})\\ &=& \sx \frac{\phi(n)}{n} [\frac{x}{n}+O(1)]+ \sx [\frac{x}{n \z(2)}+O(\log(\frac{x}{n}))]\\ &\space&-(\frac{\sqrt{x}}{\z(2)}+O(\log(x)))(\sqrt{x}+O(1))\\ &=& x\sx \frac{\phi(n)}{n^2}+O(\sx\frac{\phi(n)}{n}) + \frac{x}{\z(2)}\sx \frac{1}{n}\\ &\space& +O(\sqrt{x}\log x)+O(\sx \log n) - \frac{x}{\z(2)} +O(\sqrt x\log x)\\ &=& x[\frac{\log x}{2\z(2)} + \frac{\gamma}{\z(2)}-A+O(\frac{\log(x)}{\sqrt{x}}]+O(\sqrt{x})\\ &\space& +\frac{x}{\z(2)}[\log{ \lfloor \sqrt{x} \rfloor} + \gamma+O(\frac{1}{\sqrt{x}})]\\ &\space&+O(\sqrt x\log x) +O(\log [\sqrt x]!) - \frac{x}{\z(2)}\\ &=& \frac{x\log x}{\z(2)}+x[\frac{2\gamma}{\z(2)}-A-\frac{1}{\z(2)}]+ O(\sqrt{x} \log x). \end{eqnarray*} \end{proof} %--------------------------------------------------------------------------------- \begin{theorem} $$ G_2 (x)= \frac{\log^2 x}{2\zeta(2)}+\log x[\frac{2\gamma}{\zeta(2)}-A]+O(1) $$ \end{theorem} \begin{proof} See the proof of Theorem 5.4, case 2 above. \end{proof} %-----\a=0--------------------------------------------------------------------------- \begin{theorem} $$ G_0 (x)= \frac{x^2\log x}{2\zeta(2)}+\frac{x^2\z(2)^2}{2\zeta(3)}+O(x^{3/2}\log x) $$ \end{theorem} \begin{proof} First we state four estimates: \begin{eqnarray*} (1)&\space&G_1(x)= \sum_{n\le x} \frac{g(n)}{n}=\frac{x\log x}{\z(2)}+O(x) \text{\space(Theorem 4.2)}\\ (2)&\space&F(x)=\sum_{n\le x} n = \frac{x^2}{2}+O(x)\\ (3)&\space&\sum_{n\le x} \log n =x\log x + O(x)\\ (4)&\space&G_3(x)=\frac{\z(2)^2}{\z(3)}+O(\frac{\log x}{x}) \text{\space(by Theorem 4.4)} \end{eqnarray*} Expand $G_0$ using $f(n)=n$ and $h(n)=g(n)/n$ so $H=G_1$: \begin{eqnarray*} G_0(x)&=& \sum_{n\le x} g(n)= \sum_{n\le x} \frac{g(n)}{n} n\\ &=& \sx \frac{g(n)}{n} F(\frac{x}{n})+ \sx n G_1(\frac{x}{n}) - F(\sqrt{x})G_1(\sqrt{x})\\ &=& \sx \frac{g(n)}{n}[\frac{x^2}{2 n^2}+O(\frac{x}{n})] + \sx n[\frac{\frac{x}{n}\log \frac{x}{n}}{\z(2)}+O(\frac{x}{n})]\\ &\space&-(\frac{x}{2}+O(\sqrt{x}))(\frac{\sqrt{x}\log x}{2\z(2)}+O(\sqrt{x})) \text{\space(by (1) and (2))}\\ &=& \frac{x^2}{2}\sx \frac{g(n)}{n^3} + O\big(\sx \frac{g(n)}{n^2}\big) + \frac{x\log x}{\z(2)}\sx 1\\ &\space& -\frac{x}{\z(2)}\sx \log n + O(x\sx 1) -\frac{x^{3/2}\log x}{4\z(2)} + O(x^{3/2})\\ &=&\frac{x^2}{2}G_3(\sqrt{x}) + O(\log^2 x) + \frac{x^2\log x}{2\z(2)}+O(x^{3/2}\log x)\\ &\space& -\frac{x}{\z(2)}[\frac{\sqrt{x}\log x}{2}+O(\sqrt{x})]+O(x^{3/2}) - \frac{x^{3/2}\log x}{4\z(2)}+O(x^{3/2}) \text{\space(by (3))} \end{eqnarray*} Therefore \begin{eqnarray*} G_0(x)&=& \frac{x^2}{2}[\frac{\z(2)^2}{\z(3)}+O(\frac{\log x}{\sqrt{x}})] + \frac{x^2\log x}{2\zeta(2)}\\ &\space&-\frac{x}{\z(2)}[\frac{\sqrt{x}\log x}{2}+O(\sqrt{x})] \\ &\space&- \frac{x^{3/2}\log x}{4\z(2)}+O(x^{3/2}\log x)\text{\space(using (4))}\\ &=& \frac{x^2\log x}{2\zeta(2)}+\frac{x^2\z(2)^2}{2\zeta(3)}+O(x^{3/2}\log x) \end{eqnarray*} \end{proof} %--------------------------------------------------------------------------------- %==================================================================================== \section{Application} Consider the problem of counting the integer lattice points in the first quadrant in the square $[0,R] \times [0,R]$ and under the curve $y=\sqrt {Rx}$ as $R \rightarrow \infty$. Let $R=n^2$ and count lattice points by adding those in trapezia under the curve. If $T$ is a trapezium with integral coordinates for each vertex $(0,0), (b,0), (0,\alpha)$, and $(b,\beta)$, then by Pick's theorem \cite{coxeter} the area is equal to the number of interior points plus one half the number of interior points on the edges plus one. From this it follows that the total number of interior lattice points is given by the expression \begin{eqnarray*} \specialeqnum{10} \frac{1}{2}[(b-1)(\alpha+\beta)-b-(b,\beta-\alpha)+2] \end{eqnarray*} where $(u,v)$ is the greatest common divisor. We approximate the region under the curve $y=n\sqrt{x}$ and above the interval $[0,n^2]$ by $n$ trapezia with the $j$-th having the base $[(j-1)^2,j^2]$. Divide the lattice points inside and on the boundary of these trapezia into five sets: \begin{eqnarray*} L_1&=&\#\{\mbox{interior points of trapezia}\}\\ L_2&=&\#\{\mbox{interior points of vertical sides}\}\\ L_3&=&\#\{\mbox{interior points of the top sides}\}\\ L_4&=&\#\{\mbox{interior points of the bottom sides}\}\\ L_5&=&\#\{\mbox{vertices of all trapezia}\} \end{eqnarray*} Then %= \begin{eqnarray*} L_1&=&\sum_{j=1}^n \frac{1}{2}[(2j-1)(nj+n(j-1))-(2j-1)-n(j-1)-nj-(2j-1,n) +2]\\ L_2&=&\sum_{j=1}^{n-1} nj-1=\frac{n^3}{2}-\frac{n^2}{2}-n+1\\ L_3&=&\sum_{j=1}^n [(n,2j-1)-1]=S(n)-n \text{ where $S$ is defined in (2)}\\ L_4&=&\sum_{j=1}^n 2j-2 = n^2-n\\ L_5&=&2n+1 \end{eqnarray*} Hence if $N_1(R)$ represents the total number of lattice points, \begin{eqnarray*} N_1 (R)&=&L_1+L_2+L_3+L_4+L_5\\ &=& \frac{2}{3}n^4-\frac{1}{6}n^2+\frac{1}{2}S(n)\\ N_1 (n^2)&=&\frac{2}{3}n^4-\frac{1}{6}n^2+O(n^{\frac{1}{2}+\epsilon}) \end{eqnarray*} by Theorem 3.2. It is interesting to note that the area of the gap between the curve and the trapezia is exactly $\frac{1}{6}n^2$. The total number of points in the trapezia is of course less than the number under the curve. There are $n$ trapezia, the $j$-th having width $2j-1$. The maximum distance from the top of the $j$-th trapezia to the curve is $n/4(2j-1)$, so the number of additional points is $O(n^2)$. This leads to the estimate $$ N_2(n^2)=\frac{2}{3}n^4+O(n^2) $$ for the number $N_2$ of lattice points under the curve. Now a more accurate estimate for $N_2$ is derived. First the method of Vinogradov \cite{karatsuba} is used to count the fractional parts of the inverse function $x={y^2}/R$: Let $b-a \ll A$ where $A \gg 1$. Let $f$ be a function defined on the positive real numbers with $f''$ continuous, $0 < f'(x) \ll 1$ and having $f''(x) \gg \frac{1}{A}$. Then \begin{eqnarray*} \specialeqnum{9} \sum_{a < u \le b} \{f(u)\}=\frac{b-a}{2}+O(A^\frac{2}{3}) \end{eqnarray*} If $A=n$, $f(u)=u^2/n$, and $f''(u)=2/n \gg A^{-1}$ then it follows that \begin{eqnarray*} \sum_{0 < u \le n} \{f(u)\}=\frac{n}{2}+O(n^\frac{2}{3}) \end{eqnarray*} Hence the number of lattice points $M(n)$ under or on the inverse function curve is \begin{eqnarray*} M(n) &=& \sum_{j=1}^n \bigl\lfloor{\frac{j^2}{n}}\bigr\rfloor +n+1\\ &=& \sum_{j=1}^n \frac{j^2}{n} - \sum_{j=1}^n \bigl\{ \frac{j^2}{n} \bigr\} + n+1\\ &=& \frac{1}{6}(n+1)(2n+1)+\frac{n}{2}+O(n^\frac{2}{3}) \end{eqnarray*} So if $N_2 (n)$ represents the number of lattice points strictly under the curve $y=\sqrt{ Rx}$ when $R=n$, then \begin{eqnarray*} N_2 (n)&=& (n+1)^2 - \frac{1}{6}(n+1)(2n+1)-\frac{n}{2}+O(n^\frac{2}{3})\\ &=& \frac{2}{3}n^2+n+O(n^\frac{2}{3}) \end{eqnarray*} The number of lattice points on the curve is $O(n^\frac{1}{2})$, so does not change this estimate. %==================================================================================== %% End of article: %% optional \begin{acknowledgment} This work was done in part while the author was on study leave at Columbia University. The support of the Department of Mathematics at Columbia University and the valuable discussions held with Patrick Gallagher are warmly acknowledged. \end{acknowledgment} %% Not optional, necessary: \begin{references} \bibitem{apostol1} Apostol, T.M. {\it Introduction to Analytic Number Theory}. New York, Berlin Heidelberg: Springer-Verlag, 1976. \bibitem{apostol2} Apostol, T.M. {\it Modular Functions and Dirichlet Series in Number Theory, Second Edition}. New York, Berlin, Heidelberg: Springer-Verlag, 1990. \bibitem{cohen1} Cohen, E. {\it Arithmetical functions of greatest common divisor. I.}, Proc. Amer. Math. Soc. {\bf 11} (1960), 164-171. \bibitem{cohen2} Cohen, E. {\it Arithmetical functions of greatest common divisor. II. An alternative approach.}, Boll. Un. Mat. Ital. (3) {\bf 17}, (1962), 349-356. \bibitem{cohen3} Cohen, E {\it Arithmetical functions of greatest common divisor. III. Ces\'aro's divisor problem}, Proc. Glasgow Math. Assoc. {\bf 5}, (1961), 67-75. \bibitem{coxeter} Coxeter, H.S.M. {\it Introduction to Geometry}, New York, Wiley, 1969. \bibitem{karatsuba} Karatsuba, A.A. {\it Basic Analytic Number Theory}. New York, Berlin, Heidelberg: Springer-Verlag, 1993. \bibitem{szaltiikov} Szaltiikov, A.I. {\em On Euler's function} Mat. Sb. {\bf 6} (1960), 34-50. \bibitem{walfisz} Walfisz, A. {\it Weylsche Exponentialsummen in der neueren Zahlentheorie}, Berlin, 1963. \end{references} \vspace*{+.5in} \centerline{\rule{6.5in}{.01in}} \vspace*{+.1in} \noindent {\small (Concerned with sequence \htmladdnormallink{A018804}{http://www.research.att.com:80/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=018804}.) \centerline{\rule{6.5in}{.01in}} \vspace*{+.1in} \noindent Received April 2, 2001. Revised version received July 19, 2001. Published in Journal of Integer Sequences, Oct 25, 2001. \centerline{\rule{6.5in}{.01in}} \vspace*{+.1in} \noindent Return to \htmladdnormallink{Journal of Integer Sequences home page}{http://www. research.att.com/~njas/sequences/JIS/}. \centerline{\rule{6.5in}{.01in}} \end{document} %% This command is necessary! ==>> %\end{article} \end{document} .