\documentclass[12pt]{article}

\usepackage{e-jc}
\specs{P3.24}{22(3)}{2015}

\usepackage{amssymb,amsfonts,amsmath,amsthm}
\DeclareMathOperator{\dd}{d}
\newcommand{\eR}[0]{\ensuremath{ \mathbb R}}
\newcommand{\eN}[0]{\ensuremath{ \mathbb N}}
\newcommand{\eps}{\varepsilon}
\newenvironment{proofof}[1]{\vspace{1ex}\noindent{\bf Proof of #1:}}{\hspace*{\fill}$\blacksquare$\vspace{1ex}}

%\newtheorem{lemma}[theorem]{Lemma}
\numberwithin{theorem}{section}
\numberwithin{equation}{section}



\begin{document}

% ## Start of authors' contribution

\setcounter{page}{47}

\renewcommand{\thesection}{C}


\section{Corrigendum added 29 Dec 2018}

The proof of Lemma 1.2 in the earlier version of the paper contained a slight oversight.
Here we provide an corrected statement with a full proof.
%whose proof was actually incomplete as we recently noticed.
%Here we provide a full proof.
% The following lemma, which we prove in Appendix~\ref{sec:CouplApp}, shows that we can take $\zeta=1$ without any loss of generality.
% We remind the reader that a {\em coupling} of two random objects $X,Y$ is a common probability space for a pair of objects $(X',Y')$ 
% whose marginal distributions satisfy $X'\isd X, Y'\isd Y$.

\begin{lemma}%[\cite{oops}, Lemma 1.2]
\label{lem:main}
Let $\alpha, \alpha', \nu, \nu' > 0$ be such that $\alpha \geq \alpha'$ and $\nu \leq \nu'$.
Then there exists a coupling such that, with probability tending to one as $N\to\infty$, the graph $G(N;\alpha, \nu)$ is a 
subgraph of $G(N;\alpha',\nu')$.
\end{lemma}

The only change of the statement compared to that of Lemma~1.2 is the
addition of the phrase ``with probability tending to one as
$N\to\infty$''.  This means that for practically all applications of
the old lemma, certainly the ones we have in mind, the new lemma
serves just as well.

%Throughout this note, we use hyperbolic polar coordinates to denote points of the hyperbolic plane. 
We use the same coupling as in the original paper, but for completeness we specify it again. 
We pick $\vartheta_1, \dots, \vartheta_N$ i.i.d.~uniform on $[0,2\pi)$ and we pick
$U_1,\dots, U_N$ i.i.d.~uniform on $[0,1]$.
We now let $\rho_1, \dots, \rho_N$  be defined by the equations:
\begin{equation}\label{eq:cdef}  
F_{\alpha,R}( \rho_i ) %= F_{\alpha',R'}( \rho_i' ) 
= U_i \quad \quad \quad (\text{for $i=1,\dots,N$.}) 
\end{equation}
where $F_{\alpha, R}(.)$ denotes the cdf of the radius coordinate under the $\alpha$-quasi uniform distribution. That is,
\begin{equation}\label{eq:Fdef} 
F_{\alpha;R}( r ) %= \Pee( \rho \leq r ) 
= \left\{\begin{array}{cl}
0 & \text{ if $r < 0$, } \\
\frac{\cosh( \alpha r) - 1}{\cosh(\alpha R)-1} & \text{ if } 0 \leq r \leq R; \\
1 & \text{ otherwise.}
\end{array}\right.
\end{equation}

%(Here $F_{\alpha,R}$ is as defined in the proof of Lemma~\ref{lem:CCCoupl}, and $R := 2\log(N/\nu), R' := 2\log(N/\nu')$.)
Note that the $\rho_i$s have exactly the distribution with cdf $F_{\alpha, R}$. 
The points used in the construction of  $G(N;\alpha, \nu)$ will be
$(\rho_1,\vartheta_1), \dots, (\rho_N,\vartheta_N)$ where we take $R = 2 \log(N/\nu)$.

(This coupling defines $G(N;\alpha,\nu)$ simultaneously for all $\alpha, \nu > 0$.)


\noindent
Let us also recall that, by the hyperbolic cosine rule, provided $\rho_i + \rho_j > R$, there is an edge between 
vertex $i$ and vertex $j$ if and only if
\begin{equation}\label{eq:edgerule} 
\cos(\vartheta_{ij}) \geq \frac{\cosh \rho_i \cosh \rho_j - \cosh R}{\sinh \rho_i \sinh \rho_j}, 
\end{equation}
where $\vartheta_{ij} = \min(|\vartheta_i - \vartheta_j|,2\pi- |\vartheta_i - \vartheta_j|)$ is the angle between 
the $i$-th and the $j$-th point.
If $\rho_i+\rho_j\leq R$ then by the triangle inequality there is always an edge between vertex $i$ 
and vertex $j$.


We will split the proof of Lemma~\ref{lem:main} into two parts, as follows.

\begin{lemma}~\label{lem:alphamain}
 If $\alpha\geq\alpha'$ and $\nu=\nu'$ then, under the coupling described above, we have 
 that $G(N;\alpha, \nu)$ is a subgraph of $G(N;\alpha',\nu')$.
\end{lemma}

\begin{lemma}~\label{lem:numain}
 If $\alpha=\alpha'$ and $\nu\leq\nu'$ then under the coupling described above, with probability tending to one as $N\to\infty$, 
 we have that $G(N;\alpha, \nu)$ is a subgraph of $G(N;\alpha',\nu')$.
\end{lemma}

Together these lemmas clearly prove Lemma~\ref{lem:main}.
Lemma~\ref{lem:alphamain} relies on Lemma~B.2 above, but we include a
proof here as well for completeness. The proof of
Lemma~\ref{lem:numain} needs an additional argument that unfortunately
was omitted in the original version of our paper.



%%%%%%%%
\section*{The proof of Lemma~\ref{lem:alphamain}}
%%%%%%%%


% We need the following geometric fact.
% 
% \begin{lemma}\label{lem:geod}
% Suppose that $p = (r,\theta), q = (s,\vartheta)$ are two points in the hyperbolic plane
% satisfying $\dist_{\Haa}(p, O), \dist_{\Haa}(q,O), \dist_{\Haa}(p,q) \leq R$ and let
% $p' = (r',\theta), q' = (s',\vartheta)$ with $r' \leq r, s' \leq s$.
% Then $\dist_{\Haa}(p',q') \leq R$.
% \end{lemma}
% 
% Before giving the proof of this lemma, let us remind the reader that disks are convex, also in the hyperbolic plane. 
% This means that if $D$ is a disk in the hyperbolic plane and $x, y \in D$ then the geodesic between
% $x,y$ is contained in $D$.
% One way to see this is by noting that every disk can be isometrically mapped to a disk with origin $O$, and that in the projective disk 
% model of the hyperbolic plane (a.k.a.~the Beltrami-Klein model) a hyperbolic disk with origin $O$ looks like a Euclidean disk, while 
% geodesics are just line segments in the projective disk model.
% %%(See for instance Section 4.8 of~\cite{bk:Stillwell} for a description of the projective disk model.)
% 
% 
% \begin{proofof}{Lemma~\ref{lem:geod}}
% It is enough to consider the case when $r' < r$ and $s'=s$. (Another application of this case
% will then give the full result.)
% Observe that the geodesic between $O$ and $p$ is just the line segment between them.
% So in particular, $p'$ lies on the geodesic between $O$ and $p$.
% Since $O, p \in B(q;R)$ it follows that also $p' \in B(q;R)$, as required.
% \end{proofof}

We will imagine the situation where $\vartheta_1, \dots, \vartheta_N$ and $U_1, \dots, U_N$ and $R = 2 \log(N/\nu)$ remain fixed
as we increase $\alpha$, and $\rho_1, \dots, \rho_N$ are defined via~\eqref{eq:cdef}.
It is enough to show that, as we increase $\alpha$, no new edges will be introduced.

We need the following observation, which can be rephrased as stating that the radius under the $(\alpha,R)$-quasi uniform distribution
stochastically dominates the radius under the $(\alpha',R)$-quasi uniform distribution if $\alpha > \alpha'$.

\begin{lemma}\label{lem:alphastochdom}
If $\alpha\geq\alpha'$ and $\nu=\nu'$ then $F_{\alpha,R}(r) \leq F_{\alpha',R'}(r)$ for every $r \in \eR$.
\end{lemma}

\begin{proof}
Note that $\nu=\nu'$ implies that also $R=R'$.
Let us thus fix $R > 0$ and $0 < r < R$, and define $\varphi(\alpha) := F_{\alpha,R}(r)$
for every $\alpha > 0$. %%% (with respect to $\alpha$).
Our aim will be to show that $\frac{{\dd}\varphi}{{\dd}\alpha}$ is non-positive for every $\alpha > 0$, which will clearly yield the result.

We obtain:
\[  
\frac{{\dd}\varphi}{{\dd}\alpha} =
\frac{r\sinh(\alpha r) (\cosh(\alpha R)-1) - R\sinh(\alpha R)(\cosh(\alpha r)-1)}{
(\cosh(\alpha R)-1)^2}.
\]
Observe that this is non-positive if and only if
\[ 
 \frac{\alpha R \sinh(\alpha R)}{\cosh(\alpha R)-1} \geq \frac{\alpha r \sinh(\alpha r)}{\cosh(\alpha r) -1}. 
\]
We claim this is the case for all $0 \leq r \leq R$. 
To see this, it suffices to show that 
 $(x\sinh x) / (\cosh x-1)$ is nondecreasing for $x\geq 0$.
Let us thus compute
\begin{align*}
 \left[\frac{x\sinh x}{\cosh x -1}\right]'
 & =  \frac{
(\sinh x+ x\cosh x)(\cosh x - 1) - x\sinh^2 x 
 }{
 (\cosh x -1)^2
 } \\
&  = 
\frac{
\sinh x\cosh x + x \cosh^2 x - \sinh x - x\cosh x - x\sinh^2 x 
 }{
 (\cosh x-1)^2
 } \\
& = 
\frac{
\sinh x\cosh x + x(\cosh^2x-\sinh^2x)-\sinh x - x\cosh x
}{
 (\cosh x-1)^2
} \\
& = 
\frac{
\sinh x\cosh x + x-\sinh x - x\cosh x
}{
 (\cosh x-1)^2
} \\
& =
\frac{
(\sinh x-x)(\cosh x-1)
}{
 (\cosh x-1)^2
} \\
& \geq 
0.
\end{align*}
So our claim holds, and we see that indeed
$\frac{{\dd}\varphi}{{\dd}\alpha} \leq 0$ for all $\alpha > 0$. This
proves the lemma.
\end{proof}

Note that Lemma~\ref{lem:alphastochdom} implies that $\rho_i$ is non-decreasing in $\alpha$ for $i=1,\dots, N$. 
To complete the proof of Lemma~\ref{lem:alphamain}, it therefore suffices to show the right-hand side of~\eqref{eq:edgerule} is non-decreasing in 
$\rho_i, \rho_j$. To see this we differentiate:
\begin{align*}
\frac{\partial}{\partial\rho_i} \left[ \frac{\cosh \rho_i \cosh \rho_j - \cosh R}{\sinh \rho_i \sinh \rho_j} \right] 
& = 
\frac{\sinh \rho_i \cosh \rho_j}{\sinh \rho_i \sinh \rho_j} - \frac{\cosh \rho_i(\cosh \rho_i \cosh \rho_j - \cosh R)}{\sinh^2 \rho_i \sinh \rho_j} \\
& =  
\frac{\cosh \rho_i \cosh R + (\sinh^2\rho_i -\cosh^2\rho_i)\cosh\rho_j}{\sinh^2\rho_i\sinh \rho_j} 
\\
& =  
\frac{\cosh \rho_i \cosh R - \cosh\rho_j}{\sinh^2\rho_i\sinh \rho_j}
\\
& \geq 0,
\end{align*}
using that $\cosh^2 x - \sinh^2 x = 1$ for the penultimate line and that $\rho_j \leq R$ for the last line.
By symmetry, the same of course holds for the derivative with respect to
$\rho_j$.



%%%%%%%%%
\section*{The proof of Lemma~\ref{lem:numain}}
%%%%%%%%

We begin by noting that $R := 2\log(N/\nu)$ is monotone decreasing in $\nu$.
% We also recall that, by the hyperbolic cosine rule, provided $\rho_i + \rho_j \geq R$, there is an edge between 
% vertex $i$ and vertex $j$ if and only if
% 
% \begin{equation}\label{eq:edgerule} 
% \cos(\vartheta_{ij}) \geq \frac{\cosh \rho_i \cosh \rho_j - \cosh R}{\sinh \rho_i \sinh \rho_j}, 
% \end{equation}
% 
% \noindent
% where $\vartheta_{ij} = |\vartheta_i - \vartheta_j|_{2\pi}$ is the angle between the $i$-th and the $j$-th point.

Similarly to the proof of Lemma~\ref{lem:alphamain}, we will imagine the situation where 
$\vartheta_1, \dots, \vartheta_N$ and $U_1, \dots, U_N$ remain fixed and we 
vary $R$, and $\rho_1 = \rho_1(R), \dots, \rho_N = \rho_N(R)$ are defined via~\eqref{eq:cdef} above.

We first consider the derivatives of $\rho_i(R)$ and $\rho_j(R)$.
Since we keep $U_i$ constant, differentiation of~\eqref{eq:cdef} with respect to~$R$ gives that
\begin{align*}
\alpha \sinh(\alpha \rho_i) \cdot \frac{\dd\rho_i}{{\dd}R} 
& = \frac{\dd}{{\dd}R} {\Big [} \cosh(\alpha \rho_i)-1 {\Big]}  \\
& = U_i \cdot \frac{\dd}{{\dd}R} {\Big[} \cosh(\alpha R) -1 {\Big ]} \\
& = U_i \cdot \alpha \sinh(\alpha R) \\
& \stackrel{\hidewidth\eqref{eq:cdef}\hidewidth}{=} \  
\frac{\cosh(\alpha\rho_i)-1}{\cosh(\alpha R)-1} \cdot \alpha \sinh( \alpha R ). 
\end{align*}
In other words,
\begin{equation}\label{eq:drdR}
 \frac{{\dd}\rho_i}{{\dd}R} = \frac{\varphi( \alpha \rho_i )}{\varphi( \alpha R )},
 %\frac{\cosh(\alpha\rho_i)-1}{\sinh(\alpha\rho_i)} / \frac{\cosh(\alpha R)-1}{\sinh(\alpha R)}.
\end{equation}
where 
$$\varphi(x) := \frac{\cosh x - 1}{\sinh x}. $$
Of course the corresponding statement holds for $\frac{{\dd}\rho_j}{{\dd}R}$.

\medskip

The following observations about $\varphi$ will turn out to be useful:

\begin{lemma}\label{lem:phiconc}
The function $\varphi$ is positive and strictly increasing on $(0,\infty)$, 
and  
\begin{equation}\label{eq:smallx} 
\varphi(x) = \Theta(x), %x/2 + O(x^2),
\end{equation}
as $x\searrow 0$; and, as $x\to\infty$, for every fixed $k \in \eN$ we have
\begin{equation}\label{eq:bigx} 
\varphi(x) = 1 + 2 \sum_{i=1}^k (-1)^i e^{-ix} + O( e^{-(k+1)x} ). 
\end{equation}
\end{lemma}


\begin{proofof}{Lemma~\ref{lem:phiconc}}
That $\varphi$ is positive follows immediately 
from the definition.
The derivative of $\varphi$ is:
\[ \varphi'(x) = 1 - \frac{(\cosh x -1)\cosh x}{\sinh^2 x} = \frac{\sinh^2 x - \cosh^2 x + \cosh x}{\sinh^2 x}
 = \frac{\cosh x - 1}{\sinh^2 x} > 0. 
\]
(Using that $\cosh^2 x - \sinh^2 x = 1$.)


% The second derivative is:
% 
% \[ \begin{array}{rcl} \varphi''(x) 
% & = & \frac{\sinh x}{\sinh^2 x} - 2 \frac{(\cosh x -1)\cosh x}{\sinh^3 x} = \frac{\sinh^2 x - 2 \cosh^2 x + 2 \cosh x}{\sinh^3 x} \\
% & = & \frac{-1 - \cosh^2 x + 2\cosh x}{\sinh^3 x} = - \frac{(\cosh x - 1)^2}{\sinh^3 x} \leq 0. 
% \end{array} \]
To see~~\eqref{eq:smallx} we note that
\begin{align*}
  \varphi(x)
    & = 
  \frac{\frac12(e^x+e^{-x})-1}{\frac12(e^x-e^{-x})} =  \frac{e^x + e^{-x} - 2}{e^x - e^{-x}}  =  
\frac{2 + 2 x^2/2 + 2 x^4 / 4! + \cdots - 2}{2x + 2x^3/3! + 2x^5/5! + \cdots} \\
& = 
\frac{x^2/2 + x^4/4! + \cdots }{x + x^3/3! + \cdots }  = 
\frac{x}{2} \cdot \frac{1 + 2x^2/4! + 2x^4/6! + \cdots}{1 + x^2/3! + x^4/5! + \cdots} \\
& =  \Theta(x). %\frac{x}{2} \cdot (1+o(1)),
\end{align*}
To see~\eqref{eq:bigx} we note that
\[ 
\varphi(x) = \frac{e^x + e^{-x} - 2}{e^x - e^{-x}} = 1 + \frac{2e^{-x}-2}{e^{x}-e^{-x}} 
 = 1 - 2 e^{-x} \frac{1-e^{-x}}{1-e^{-2x}}.
\]
Now we note that, for $|z|<1$:
$$
\frac{z(1-z)}{1-z^2} = z (1-z)(1+z^2+z^4+ \cdots) = z - z^2 + z^3 - z^4 + \cdots. 
$$
Hence, taking $z=e^{-x}$, we see
$$ \varphi(x) = 1 + 2 \sum_{i=1}^\infty (-1)^i e^{-ix} = 1 + 2 \sum_{i=1}^k (-1)^i e^{-ix} + O\left( e^{-(k+1)x } \right). $$
\end{proofof}

We need the following straightforward observations

\begin{lemma}\label{lem:nosmallpts}
Let $\alpha, \nu > 0$ be fixed.
\begin{enumerate}
 \item\label{itm:nosmallptsi} A.a.s., $G(N;\alpha,\nu)$ does not contain any point of radius $\rho_i \leq e^{-R/4}$;
 \item\label{itm:nosmallptsii} If $\alpha > \frac12$ then, a.a.s.,
 $G(N;\alpha,\nu)$ does not contain any point with radius $\rho_i \leq 1$;
\end{enumerate}
\end{lemma}

\begin{proofof}{Lemma~\ref{lem:nosmallpts}}
This is straightforward application of the first moment method.
The expected number of points with radius at most $1$ is
\[ N \cdot \frac{\cosh(\alpha)-1}{\cosh(\alpha R)-1} =  O(1) \cdot e^{R/2 - \alpha R}   = 
O(1) \cdot e^{R(\frac12 - \alpha)},
\] 
which is $o(1)$, if $\alpha > \frac12$.
Note that $\cosh (x) -1 = O(x^2)$, when $x \to 0$. 
So, a similar calculation now yields the expected number of points of radius ar most $e^{-R/4}$:
\[ N \cdot \frac{\cosh(\alpha e^{-R/4})-1}{\cosh(\alpha R)-1} = 
O(1) \cdot e^{R/2 - R/2 - \alpha R}  = 
O\left(1 \right) \cdot  e^{- \alpha R} = o(1). \]
\end{proofof}

Let us remark that, under our coupling, the conclusions of Lemma~\ref{lem:nosmallpts} in fact hold simultaneously
for all $G(N;\alpha,\nu')$ with $\nu' \leq \nu$. (This is because the radii $\rho_1, \dots, \rho_N$ and $R$ are both non-decreasing in $\nu$.
I.e., if we decrease $\nu$ then the radii can only get larger while $e^{-R/4}$ will decrease.)

The derivative of the right-hand side of~\eqref{eq:edgerule} with respect to~$R$ is {\small
\begin{align}&
{\bigg(}\frac{-\cosh\rho_j+\cosh R \cosh\rho_i}
{\sinh^2\rho_i\sinh\rho_j} 
{\bigg)} \frac{{\dd}\rho_i}{{\dd}R} 
+ 
{\bigg(} \frac{-\cosh\rho_i%}{\sinh\rho_i\sinh^2\rho_j} 
+ \cosh R \cosh\rho_j}{\sinh\rho_i\sinh^2\rho_j} 
{\bigg)} \frac{{\dd}\rho_j}{{\dd}R} 
- 
\frac{\sinh R}{\sinh\rho_i\sinh\rho_j} \nonumber\\
&= 
\frac{1}{\sinh\rho_i\sinh\rho_j} \left( 
%\left( 
\frac{\cosh R \cosh\rho_i - \cosh\rho_j}{\sinh\rho_i} %\right) 
\frac{{\dd}\rho_i}{{\dd}R}
+ %\left( 
\frac{\cosh R \cosh\rho_j - \cosh\rho_i}{\sinh\rho_j} %\right) 
\frac{{\dd}\rho_i}{{\dd}R}
- \sinh R
\right). \label{eq:derida}
\end{align}}


% %where we've used that $\left[ \cosh x / \sinh x \right]' = 1 - \cosh^2 x / \sinh^2 x = (\sinh^2 x - \cosh^2 x)/\sinh^2 x = 
% %- 1/\sinh^2 x$.


We fix a small $\eps > 0$, to be made precise later, and we first assume that $\rho_i, \rho_j$ are both at least $\eps$.
Since $\rho_i + \rho_j > R$, we can assume without loss of generality that 
$\rho_j \geq R/2$ and $\rho_i \geq \eps$.
We remark that 
\begin{align*}
\eqref{eq:derida}
& \geq 
\frac{1}{\sinh\rho_i\sinh\rho_j} 
\left( 
\left( \frac{\cosh\rho_i-1}{\sinh\rho_i} \cdot \frac{{\dd}\rho_i}{{\dd}R}
+ \frac{\cosh\rho_j-1}{\sinh\rho_j} \cdot \frac{{\dd}\rho_j}{{\dd}R} \right) \cosh R 
- \sinh R \right)  \\
& = 
\frac{1}{\sinh\rho_i\sinh\rho_j} \left( 
\frac{\varphi(\rho_i) \varphi(\alpha\rho_i) + \varphi(\rho_j)\varphi(\alpha\rho_j)}{\varphi(\alpha R)} \cosh R 
- \sinh R \right).
\end{align*}
Hence, under the assumption that $\rho_i \geq \eps, \rho_j \geq R/2$, the factor in front of $\cosh R$ 
in~\eqref{eq:derida} can be lower bounded as follows.
$$ \frac{\varphi(\rho_i) \varphi(\alpha\rho_i) + \varphi(\rho_j)\varphi(\alpha\rho_j)}{\varphi(\alpha R)}
\geq \frac{ \varphi(\eps)\varphi(\alpha\eps) + 1 - o_R(1) }{ 1 - o_R(1) } = 1 + \varphi(\eps)\varphi(\alpha\eps) + o_R(1). $$
For sufficiently large $R$, this expression is larger than 1, which implies the derivative of the right-hand side of~\eqref{eq:edgerule} is
positive. (Using that $\varphi(\eps), \varphi(\alpha\eps) > 0$.)
It thus remains to consider only the case when one of the two radii is less than $\eps$.
Let us also note that, by part~\ref{itm:nosmallptsii} of Lemma~\ref{lem:nosmallpts}, a.a.s., there are no points with radius less than $\eps$ if
$\alpha > 1/2$. Hence we can further assume in the sequel that $\alpha \leq 1/2$.


Let us thus consider a pair $i,j$ with $\rho_i \leq \eps$ and $\rho_i+\rho_j > R$ (or in other words $\rho_j > R-\rho_i$).
By part~\ref{itm:nosmallptsi} of Lemma~\ref{lem:nosmallpts} we can and do further assume that 
$\rho_i \geq e^{-R/4}$.
We now use the more detailed lower bound
{\small
\begin{align*}
  \eqref{eq:derida} & \geq 
   \frac{1}{\sinh\rho_i\sinh\rho_j}  \left( 
\bigg( \frac{\cosh\rho_i-\frac{\cosh(\rho_j)}{\cosh(R)}}{\sinh\rho_i} 
   \frac{{\dd}\rho_i}{{\dd}R} + \frac{\cosh\rho_j-1}{\sinh\rho_j}  \frac{{\dd}\rho_j}{{\dd}R} \bigg)\cosh R   - \sinh R \right)  \\
   & = 
   \frac{1}{\sinh\rho_i\sinh\rho_j}  \left(  \bigg( \frac{\cosh\rho_i-\frac{\cosh(\rho_j)}{\cosh(R)}}{\sinh\rho_i}  
   \frac{\varphi(\alpha \rho_i)}{\varphi(\alpha R)} + \frac{\varphi(\rho_j)\varphi(\alpha\rho_j)}{\varphi(\alpha R)} 
   \bigg)\cosh R   - \sinh R \right) \\
   & \geq  
   \frac{1}{\sinh\rho_i\sinh\rho_j}  \left(  \bigg( \frac{\cosh\rho_i-\frac{\cosh(R-\rho_i)}{\cosh(R)}}{\sinh\rho_i}  
   \frac{\varphi(\alpha\rho_i)}{\varphi(\alpha R)} + \frac{\varphi(R-\rho_i)\varphi(\alpha(R-\rho_i))}{\varphi(\alpha R)} 
   \bigg)\cosh R {-} \sinh R \right) \\
   & =: 
   \frac{1}{\sinh\rho_i\sinh\rho_j}  \big( 
( A + B )\cosh R   - \sinh R \big).
\end{align*}}%
For convenience, we write $r := \rho_i$ from now on. We let $k \in \eN$ be such that $k \alpha > 1$, and note that
\begin{align}
    B 
    & = 
    \frac{ (1-O(e^{-R})) (1 - 2 \sum_{i=1}^k e^{i \alpha (r-R)} + O( e^{-R} ) )}{1 - 2 \sum_{i=1}^k e^{-i \alpha R} + O(e^{-R})} \nonumber\\
     & =  
    (1-O(e^{-R})) \left( 1 + 2\frac{\sum_{i=1}^k e^{-i \alpha R} - \sum_{i=1}^k e^{i \alpha (r-R)} + O(e^{-R}) }{
    1 - 2\sum_{i=1}^k e^{-i \alpha R} + O(e^{-R})}\right) \nonumber\\
    & =  
    (1-O(e^{-R})) \left( 1 - 2 \frac{\sum_{i=1}^k (e^{i\alpha r} - 1) 
e^{-i\alpha R} }{1 - O( e^{-\alpha R} )} \right) 
     =  
   (1-O(e^{-R})) \left( 1 - \frac{ O( r e^{-\alpha R} ) }{1 - O( e^{-\alpha R} )} \right) \nonumber\\
    & =  
    1 - O( r e^{-\alpha R} ) \nonumber\\
    & =  
    1 - o(r).\label{eq:Bcomp} 
\end{align}
(using $\alpha < 1/2$ and $r \geq e^{-R/4}$ for the penultimate line.)
Next we consider
\begin{align}
    A 
    & = 
    \frac{\frac12(e^r+e^{-r}) - \frac{e^{R-r}+e^{r-R}}{e^R+e^{-R}} }{\sinh r } \cdot \frac{ \Omega(r) }{1 - O(e^{-\alpha R}) } 
     =  
    \frac{\frac12(e^r+e^{-r}) - e^{-r} - O( e^{-2R} )}{\sinh r } \cdot \frac{ \Omega(r) }{1 - O(e^{-\alpha R}) } \nonumber\\
    & = 
    \frac{\sinh r - O( e^{-2R} )}{\sinh r } \cdot \frac{ \Omega(r) }{1 - O(e^{-\alpha R}) } 
     =  
    \left( 1 - O( e^{-2R} / r ) \right) \cdot \frac{ \Omega(r) }{1 - O(e^{-\alpha R}) } \nonumber\\ 
    & \geq  
    \left( 1 - O( e^{-7R/4} ) \right)  \cdot \Omega( r ) \nonumber\\
    & = \Omega( r ).\label{eq:Acomp} 
\end{align}
Combining~\eqref{eq:Bcomp} and~\eqref{eq:Acomp}, we see that $A+B > 1$ (provided we have chosen the constant $\eps$ appropriately small, and $R$ is 
sufficiently large), which in turn implies the derivative of the right hand side of~\eqref{eq:edgerule} with respect to~$R$ 
is nonnegative as was to be shown. 


\section*{Acknowledgements}

We thank Dieter Mitsche for asking the right questions.

\end{document}
