\documentclass[12pt]{article}
\usepackage{e-jc}
%\usepackage{t1enc}
\usepackage{amsthm,amsmath,amssymb}
\usepackage{graphicx}


\theoremstyle{plain}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem*{problem}{Problem}
\newtheorem*{conjecture}{Conjecture}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{construction}[theorem]{Construction}   
\newtheorem{algorithm}[theorem]{Algorithm}   
\newtheorem{notation}[theorem]{Notation}   



\title{\bf The number of rooted trees of given depth}

% input author, affilliation, address and support information as follows;
% the address should include the country, and does not have to include
% the street address 

\author{P\'{e}ter P\'{a}l Pach\\
\small Department of Computer Science and Information Theory\\[-0.8ex]
\small University of Technology and Economics\\[-0.8ex] 
\small Budapest, Hungary\\
\small\tt ppp24@cs.elte.hu\\
\and
Gabriella Pluh\'{a}r \\
\small Department of Algebra and Number Theory,\\[-0.8ex]
\small E\"{o}tv\"{o}s Lor\'{a}nd University\\[-0.8ex]
\small Budapest, Hungary\\
\small\tt plugab@cs.elte.hu
\and
Andr\'{a}s Pongr\'{a}cz\\
\small Department of Mathematics and its Applications,\\[-0.8ex]
\small Central European University\\[-0.8ex]
\small Budapest, Hungary\\
\small\tt andras.pongracz@lix.polytechnique.fr
\and
Csaba Szab\'{o} \\
\small Department of Algebra and Number Theory,\\[-0.8ex]
\small E\"{o}tv\"{o}s Lor\'{a}nd University\\[-0.8ex]
\small Budapest, Hungary\\
\small\tt csaba@cs.elte.hu
\and
}

% \date{\dateline{submission date}{acceptance date}\\
% \small Mathematics Subject Classifications: comma separated list of
% MSC codes available from http://www.ams.org/mathscinet/freeTools.html}

\date{\dateline{Jan 1, 2011}{Jan 2, 2013}\\
\small Mathematics Subject Classifications: 05C30, 05A15, 05A16}%!!!!!

\begin{document}

\maketitle

\begin{abstract}
  In this paper it is shown that the  logarithm of the number of non-isomorphic rooted 
trees of depth $k\geq 3$ is asymptotically $\frac{\pi^2}{6}\cdot\frac{n}{\log\log\dots\log n}$, where $\log$ is iterated $k-2$ times in the denominator.

  % keywords are optional
  \bigskip\noindent \textbf{Keywords:} tree, depth, counting
\end{abstract}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Introduction}

In 1889 Cayley showed that there are $n^{n-2}$ labelled trees on $n$ vertices. 60 years later, in 1948 asymptotic formulas were given for the number of unlabelled- and unlabelled rooted trees.  In the seminal paper of Otter \cite{otter} it is shown that the number of unlabelled trees of size $n$ is asymptotically $b n^{-5/2}\alpha^{n}\left(1+O\left(1/n\right)\right)$, and the number of unlabelled rooted trees of size $n$ is asymptotically $c  n^{-3/2}\alpha^{n}\left(1+O\left(1/n\right)\right)$, where $\alpha=2.95576... $, $b= 0.5349... $ and $c=0.4399... $. All results about counting trees are summarized in the book of Drmota \cite{drmota}. Several parameters of trees were analyzed in detail, for example, the average depth, the distribution of the depth in unlabelled rooted trees \cite{DG} and random $d$-ary trees, etc. For the distribution of the depth of binary unlabelled rooted trees see \cite{BF}.

In this paper we count the the number of rooted trees of given depth. We show that the logarithm of the number of rooted trees of depth $k\geq 3$ is asymptotically $\frac{\pi^2}{6}\cdot \frac{n}{\log\log\dots\log n}$, where $\log$ is iterated $k-2$ times in the denominator.

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Generating functions}

Denote by $f_k\left(n\right)$ the number of $n$-element rooted trees of depth at most $k$. A rooted tree of depth 0 is a single point. A rooted tree of depth 1 has a root and $n-1$ leaves all connected to the root. Hence  $f_1\left(n\right)=1$ for all $n\ge 1$. The 5-element trees of depth at most 2 are shown on Figure 1. Thus $f_2\left(5\right)=5$. It is easy to find a general formula for the number of rooted trees of depth at most 2.

{\begin{figure}[ht]
\begin{center}
\includegraphics[width=12cm]{graf.pdf}
\caption{}{}\label{graf}
\end{center}
\end{figure}}

\begin{lemma}\label{f2} 
$f_2\left(n\right)=p\left(n-1\right)$, where $p\left(m\right)$ denotes the number of  partitions of $m$.
\end{lemma}
\begin{proof} 
Let us  omit the root of an $n$-element tree of depth at most 2. Then we obtain some (rooted) trees of depth at most 1 with altogether $n-1$ vertices. Trees of depth at most $1$ are uniquely determined by the number of their vertices. Hence, we have exactly as many such configurations as many partitions of $n-1$. Thus $f_2\left(n\right)=p\left(n-1\right)$.

\end{proof}

For a fixed $k$ let  $F_k\left(x\right)$ denote the generating function of the sequence $f_k\left(n\right)$.

$$F_k\left(x\right)=\sum\limits_{n=1}^{\infty}f_k\left(n\right)x^n$$

By Lemma~\ref{f2}, $F_2\left(x\right)=\sum\limits_{n=1}^{\infty}p\left(n-1\right)x^n=xP(x)$, where $P(x)$ denotes the generating function of the partitions of $n$. By the Hardy-Ramanujan formula $f_2\left(n\right)\sim\frac{1}{4n\sqrt{3}}e^{\pi\sqrt{\frac{2n}{3}}}$, which shows the asymptotic behaviour of $f_2\left(n\right)$. For more details see \cite{R}. To attain a recurrence formula for $F_k\left(x\right)$, we use again the  idea of chopping the tree: Omit the root of an $n$-element  tree of depth at most $k$. The remaining part of the graph is a forest consisting of trees of depth at most $ k-1$ with $n-1$ vertices altogether.  Let $\mu_j$ be the number of rooted trees with $j$ vertices after the chopping. There are $\binom{f_{k-1}\left(j\right)+\mu_j-1}{\mu_j}$ ways to choose ${\mu_j}$  trees with $j$ vertices. Thus we have the following recurrence formula 

\begin{equation}\label{recfkn} 
f_k\left(n\right)=\sum\limits_{\sum i\mu_i={n-1}}\left(\prod\limits_{j=1}^{n-1}\binom{f_{k-1}\left(j\right)+\mu_j-1}{\mu_j}\right)
\end{equation}

This technique, and the following formulas can be found in \cite{FS}, but we summarize the proofs for the reader's convenience.

\begin{theorem}\label{genfunc}
Let $k\geq 2$. Then the  generating function of the sequence $f_k\left(n\right)$ is 

$$F_k\left(x\right)=x\prod\limits_{j=1}^{\infty}\left(1-x^j\right)^{-f_{k-1}\left(j\right)}$$

and satisfies the recurrence formulas

\begin{equation}\label{rec2}\tag{F1}
F_k\left(x\right)= x\exp\left(\sum\limits_{m=1}^{\infty}\frac{1}{m}\sum\limits_{j=1}^{\infty}f_{k-1}\left(j\right)x^{jm}\right)
\end{equation} 

$$F_k\left(x\right)=x\exp\left(\sum\limits_{m=1}^{\infty}\frac{1}{m}F_{k-1}\left(x^m\right)\right)$$

\end{theorem}
\begin{proof}
According to the generalized binomial theorem, for every $|x|<1$ we have 

$$\left(1-x^j\right)^{-f_{k-1}\left(j\right)}= \sum\limits_{\mu_j=0}^{\infty}\binom{-f_{k-1}\left(j\right)}{\mu_j}\cdot\left(-x^j\right)^{\mu_j} =\sum\limits_{\mu_j=0}^{\infty}\binom{f_{k-1}\left(j\right)+\mu_j-1}{\mu_j}x^{j\mu_j}$$

Thus the coefficient of $x^{n-1}$ in the expression $\prod\limits_{j=1}^{\infty}\left(1-x^j\right)^{-f_{k-1}\left(j\right)}$ is

$$\sum\limits_{\mu_1+2\mu_2+\cdots+\left(n-1\right)\mu_{n-1}= n-1}\left(\prod\limits_{j=1}^{n-1}\binom{f_{k-1}\left(j\right)+\mu_j-1}{\mu_j}\right)$$

and this is exactly $f_k\left(n\right)$ by~\eqref{recfkn}. By expanding the Taylor-series of $\log(1-x^j)$, for $0<x<1$ we obtain

\begin{multline*}\log F_k\left(x\right)=\log x+\sum\limits_{j=1}^{\infty}\left(-f_{k-1}\left(j\right)\right)\log\left(1-x^j\right)=\\
=\log x+\sum\limits_{j=1}^{\infty}\left(-f_{k-1}\left(j\right)\right)\sum\limits_{m=1}^{\infty}\left(-\frac{1}{m}x^{jm}\right)=\\
=\log x+\sum\limits_{m=1}^{\infty}\frac{1}{m}\sum\limits_{j=1}^{\infty}f_{k-1}\left(j\right)x^{jm}=\log x+\sum\limits_{m=1}^{\infty}\frac{1}{m}F_{k-1}\left(x^m\right)
\end{multline*}

which is equivalent to \eqref{rec2}.
\end{proof}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Preliminary calculations} We give  a list of elementary analytic calculations often used in  the estimations of the generating functions. Those not interested in the technical details of these easy calculations can skip the whole section.

\begin{definition}
For $m\geq 1$ we denote by $L_m(x)$ the $m$-th iterated logarithm function $\log\log\dots\log x$. Similarly, $E_m(x)$ denotes the $m$-th iterated exponential function $\exp\exp\dots\exp x$.
\end{definition}

\begin{lemma}\label{elem}
The following rules apply for the functions $L_m$ and $E_m$.
\begin{enumerate}
\item[(i)] For $x,y\geq 2$ we have $\log x\leq \log(x+y)\leq \log x +\log y $.
\item[{(ii)}] For every $m\geq 2$  and every  large enough $x,y$ we have $L_m(x)\leq L_m(xy)\leq L_m(x)+L_m(y).$
\item[(iii)] $E_m(x/2) \le E_m(x)^{1/2}$ and $E_m(x/3) \le E_m(x)^{1/3}$ for large enough $x$. 
\item[(iv)] For all $C_1,C_2>0$ there exits a constant $C_3> 0$ with $C_1 E_m(x+C_2) \le E_m(x + C_3)$ for large enough $x$.
\end{enumerate}
\end{lemma}

\begin{proof}
As $\log x$ is increasing we have $\log x\leq \log(x+y)$ for $x,y\geq 2$. For the other inequality without loss of generality assume that $x\leq y$. Then $\log\left(x+y\right)=\log\left(y\left(1+\frac{x}{y}\right)\right)=\log y+\log\left(1+\frac{x}{y}\right)\leq \log y +\log 2\leq \log y +\log x$.

Let $x,y$ be large enough such that $L_m(x)\geq 2$ and $L_m(y)\geq 2$. Then according to the monotonicity of $L_m(x)$ we have $L_m(x)\leq L_m(xy)$. Applying  $(i)$ repeatedly, we obtain $L_m(xy)\leq L_{m-1}(L_1(x)+L_1(y))\leq  L_{m-2}(L_2(x)+L_2(y))\leq \cdots \leq L_m(x)+L_m(y)$.

Item (iii) is shown by induction. It is clear for $m=1$. For $m>1$ we have $E_m(u/2)=\exp(E_{m-1}(u/2))\leq \exp(E_{m-1}(u)^{1/2})\leq E_m(u)^{1/2}$ by the induction hypothesis, if $E_{m-1}(u)\geq 4$.

Item (iv) follows from the formula $E_m(x+y)\geq E_m(x)E_m(y)$ for large enough $x,y$.
\end{proof}

Throughout the paper we estimate certain power series coefficientwise. That is, $\leq_{coeff}$ is a partial order on the set of real power series, and $\sum\limits_{n=0}^{\infty}a_nx^{n}\leq_{coeff} \sum\limits_{n=0}^{\infty}b_nx^{n}$ if and only if $a_n\leq b_n$ for all $n\geq 0$. The following rules are going to be used several times.

\begin{lemma}\label{lead}
Let $\sum\limits_{n=0}^{\infty}a_nx^{n}$ and $\sum\limits_{n=0}^{\infty}b_nx^{n}$ be two (formal) power series. Then
\begin{enumerate}
\item[(i)] $\exp\left(\sum\limits_{n=0}^{\infty}a_nx^{n}\right)=\sum\limits_{n=0}^{\infty}\left(\sum\limits_{i=0}^{\infty}\frac{1}{i!}\sum\limits_{k_1+\cdots+k_i=n}a_{k_1}\cdots a_{k_i}\right)x^n$,
\item[(ii)] if $0\leq_{coeff} \sum\limits_{n=0}^{\infty}a_nx^{n}\leq_{coeff} \sum\limits_{n=0}^{\infty}b_nx^{n}$, then $$\exp\left(\sum\limits_{n=0}^{\infty}a_nx^{n}\right)\leq_{coeff} \exp\left(\sum\limits_{n=0}^{\infty}b_nx^{n}\right)$$
\end{enumerate}
\end{lemma}

\begin{proof}
The first item follows from $\exp(y)=\sum\limits_{i=0}^{\infty}\frac{1}{i!}y^{i}$, and item $(ii)$ is a direct consequence of $(i)$.
\end{proof}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Asymptotic formulas}

In this section, we prove the main theorem of the paper.

\begin{theorem}\label{main}
The sequences $f_k\left(n\right)$ satisfy the following asymptotic formulas

\begin{enumerate}
\item[(1)] $f_2\left(n\right)=p\left(n-1\right)\sim\frac{1}{4n\sqrt{3}}e^{\pi\sqrt{\frac{2n}{3}}}$,
\item[(2)] $\log f_k\left(n\right)=\frac{\pi^2}{6}\cdot \frac{n}{L_{k-2}\left(n\right)} \cdot\left( 1+O_k \left(\frac{L_{k-1}\left(n\right)}{L_{k-2}\left(n\right)}\right) \right)$ for $k>2$,
\end{enumerate}

where $L_m\left(x\right)$ denotes the $m$-th iterated logarithm function $\log\log\cdots\log x$.
\end{theorem}

The first statement of this theorem is a direct consequence of Lemma~\ref{f2}. For the second item a series of lemmas is needed.

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\subsection{The lower estimation}
First we give a lower bound for $k=3$.

\begin{lemma}
$\log f_3\left(n\right)\geq \frac{\pi^2}{6}\cdot\frac{n} {\log n}+O\left(n\frac{\log\log n}{\log^2 n }\right)$
\end{lemma}

\begin{proof}
According to formula (\ref{rec2}) we have the trivial lower bound

$$F_3\left(x\right)\geq_{coeff} x\exp\left(\sum\limits_{n=1}^{\infty}f_2\left(n\right)x^n\right)$$

By expanding the exponential function for  $f_3\left(n\right)$ we obtain

$$f_3\left(n\right)\geq \sum\limits_{i=1}^{n}\frac{1}{i!}\sum\limits_{a_1+\cdots+a_i=n-1}f_2\left(a_1\right)\cdots f_2\left(a_{i}\right)$$

Consider a term where $i=\frac{\pi^2}{6}\cdot \frac{n}{\log^2 n}\cdot \left(1+O\left(\frac{\log\log n}{\log n}\right)\right)$ and $a_1=\cdots=a_i=\frac{n-1}{i}$. Then $a_1=\cdots=a_i=\frac{6}{\pi^2}\cdot\log^2 n\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)\cdot\left(1-\frac{1}{n}\right)= \frac{6}{\pi^2}\cdot\log^2 n\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)$.

By estimating $\log i!$ with Stirling's formula and by using that for large enough $m$ the inequality $f_2\left(m-1\right)\ge \exp\left(\pi\sqrt{\frac{2m}{3}}-2\log m\right)$ holds, we obtain

\begin{multline*}
\log f_3(n) \geq -i\log i +i\log f_2(a_1)\geq\\
\geq -\frac{\pi^2}{6}\cdot\frac{n}{\log^2 n}\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)\cdot\log \left(\frac{\pi^2}{6}\cdot\frac{n}{\log^2 n}\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)\right)+\\
+\frac{\pi^2}{6}\cdot\frac{n}{\log^2 n}\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)
\cdot\\
\cdot \left(\pi\sqrt{\frac{2\frac{6}{\pi^2}\cdot\log^2 n\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)}{3}}-
2\log\left(\frac{6}{\pi^2}\cdot\log^2 n\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)\right)\right)  
\end{multline*}

After rearranging the terms we arrive at

\begin{multline*}
\log f_3(n) \geq -\frac{\pi^2}{6}\cdot\frac{n}{\log^2 n}\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)\cdot\left(\log n +O\left(\log\log n \right)\right)+\\ 
+\frac{\pi^2}{3}\cdot\frac{n}{\log^2 n}\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)\cdot\log n\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)+\\
+\frac{\pi^2}{6}\cdot\frac{n}{\log^2 n}\cdot O(\log\log n) =\frac{\pi^2}{6}\cdot\frac{n}{\log n}\cdot\left(1+O\left(\frac{\log\log n}{\log n}\right)\right)
\end{multline*}
\end{proof}

We proceed by induction to show the lower bound for $f_{k}\left(n\right)$ for $k>3$. Hence, assume that the estimation $\log f_k\left(n\right)\geq\frac{\pi^2}{6}\cdot \frac{n}{L_{k-2}\left(n\right)} \cdot\left( 1+O_k \left(\frac{L_{k-1}\left(n\right)}{L_{k-2}\left(n\right)}\right) \right)$ holds for some $k>3$. To obtain a similar lower bound for $f_{k+1}\left(n\right)$ we use the recurrence formula (\ref{rec2}), that is, $F_{k+1}\left(x\right)=x\exp\left(\sum\limits_{m=1}^{\infty}\frac{1}{m}\sum\limits_{j=1}^{\infty}f_{k}\left(j\right)x^{jm}\right)$, which yields the estimation

$$F_{k+1}\left(x\right)\geq_{coeff} x\exp\left(\sum\limits_{n=1}^{\infty}f_k\left(n\right)x^n\right)$$

According to the induction hypothesis there exist $n_k\in \mathbb{N}$ and $R_k\in \mathbb{R}$ such that $f_k\left(n\right)\geq \exp\left(\frac{\pi^2}{6}\cdot\frac{n}{L_{k-2}\left(n\right)}\cdot\left(1+R_k\frac{L_{k-1}\left(n\right)}{L_{k-2}\left(n\right)}\right)\right)$ for $n\geq n_k$. As $f_k\left(n\right)\geq 0$ we may omit the first few terms of the sum.

%\begin{multline*}
$$F_{k+1}\left(x\right)\geq_{coeff} x\exp\left(\sum\limits_{n=n_k}^{\infty}\exp\left(\frac{\pi^2}{6}\cdot\frac{n}{L_{k-2}\left(n\right)}\cdot\left(1+ R_k\frac{L_{k-1}\left(n\right)}{L_{k-2}\left(n\right)}\right)\right)x^n\right)
$$%\end{multline*}

By expanding the power series of $\exp$ we obtain that for $n\geq 1$

\begin{multline*}
f_{k+1}\left(n+1\right)\geq \sum\limits_{i=1}^{n}\frac{1}{i!}\prod_{n_k\leq a_1,\ldots ,a_i; a_1+\cdots+a_i=n}\prod_{j=1}^{i}\exp\left(\frac{\pi^2}{6}\cdot\frac{a_j}{L_{k-2}\left(a_j\right)}\cdot\left(1+R_k\frac{L_{k-1}\left(a_j\right)}{L_{k-2}\left(a_j\right)}\right)\right)
\end{multline*}

For large enough $n$ and $x_0=\frac{\pi^2}{6}\cdot\frac{n}{L_1\left(n\right)\cdots L_{k-2}\left(n\right)L^2_{k-1}\left(n\right)}\left(1+O\left(\frac{L_k\left(n\right)}{L_{k-1}\left(n\right)}\right)\right)$ we have $x_0\ge n_k$. By setting $\log i!\leq i\log i$, with $i=x_0$ and $a_1=\cdots =a_i=\frac{n}{x_0}$ we obtain

\begin{multline*}
\log f_{k+1}\left(n+1\right)\geq -x_0\log x_0+x_0\frac{\pi^2}{6}\cdot\frac{\frac{n}{x_0}}{L_{k-2}\left(\frac{n}{x_0}\right)}\cdot\left(1+ R_k\frac{L_{k-1}\left(\frac{n}{x_0}\right)}{L_{k-2}\left(\frac{n}{x_0}\right)}\right)=\\
=-x_0\log x_0+\frac{\pi^2}{6}\cdot\frac{n}{L_{k-2}\left(\frac{n}{x_0}\right)}\cdot\left(1+ R_k\frac{L_{k-1}\left(\frac{n}{x_0}\right)}{L_{k-2}\left(\frac{n}{x_0}\right)}\right)\\
\end{multline*}

From the definition of $x_0$ we have

$$\frac{n}{x_0}=\frac{6}{\pi^2}L_1(n)\cdots L_{k-2}(n)L_{k-1}^2(n)\cdot\left(1+O\left(\frac{L_k(n)}{L_{k-1}(n)}\right)\right)$$ 

By Lemma~\ref{elem} it follows that $L_m\left(\frac{n}{x_0}\right)=L_{m+1}\left(n\right)+O\left(L_{m+2}\left(n\right)\right)$. Finally, the estimation $x_0\log x_0=O\left(\frac{n}{L_{k-1}^2(n)}\right)$ yields

\begin{multline*}
\log f_{k+1}\left(n+1\right)\geq\\
\geq O\left(\frac{n}{L^2_{k-1}\left(n\right)}\right)+ \frac{\pi^2}{6}\cdot\frac{n}{L_{k-1}\left(n\right)+O\left(L_{k}\left(n\right)\right)}\cdot\left(1+R_k\frac{L_{k}\left(n\right)+O\left(L_{k+1}\left(n\right)\right)}{L_{k-1}\left(n\right)+O\left(L_{k}\left(n\right)\right)}\right)=\\
=O\left(\frac{n}{L^2_{k-1}\left(n\right)}\right)+ \frac{\pi^2}{6}\cdot\frac{n}{L_{k-1}\left(n\right)}\cdot\left(1+O\left(\frac{L_{k}\left(n\right)}{L_{k-1}\left(n\right)}\right)\right)\cdot\left(1+R_k\frac{L_{k}\left(n\right)}{L_{k-1}\left(n\right)}O\left(1\right)\right)=\\
=\frac{\pi^2}{6}\cdot\frac{n}{L_{k-1}\left(n\right)}\cdot\left(1+O\left(\frac{L_{k}\left(n\right)}{L_{k-1}\left(n\right)}\right)\right)
\end{multline*}

Thus we arrive at the lower bound

\begin{multline*}
\log f_{k+1}\left(n\right)\geq\\
\geq \frac{\pi^2}{6}\cdot\frac{n-1}{L_{k-1}\left(n-1\right)}\cdot\left(1+O\left(\frac{L_{k}\left(n-1\right)}{L_{k-1}\left(n-1\right)}\right)\right)\geq\\
\geq \frac{\pi^2}{6}\cdot\frac{n}{L_{k-1}\left(n\right)}\cdot\left(1-\frac{1}{n}\right)\cdot\left(1+O\left(\frac{L_{k}\left(n\right)}{L_{k-1}\left(n\right)}\right)\right)=\\
=\frac{\pi^2}{6}\cdot\frac{n}{L_{k-1}\left(n\right)}\cdot\left(1+O\left(\frac{L_{k}\left(n\right)}{L_{k-1}\left(n\right)}\right)\right)
\end{multline*}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\subsection{The upper estimation}

\begin{lemma}\label{lem1}

We have for real $x\to 1-$

\[
\log F_2(x) = \frac{\pi^2}{6(1-x)} +\frac 12 \log(1-x) -\frac{\pi^2}{12} - \log\sqrt{2\pi} + O(1-x)
\]
\end{lemma}
\begin{proof}
This is a reformulation of formula (68) on p.\ 576 from \cite{FS}.  We just note that $F_2(x) = x P(x)$ and that the factor $x$ leads to an (additional) error term of the form $\log x = O(1-x)$.
\end{proof}

The next step is to extend Lemma~\ref{lem1} in a proper way for $F_k(x)$, $k>2$.

\begin{lemma}\label{lem2}
For every $k\ge 2$ there exists $C_k> 0$ and  $x_0(k)<1$ such that

\begin{equation}\label{eqlem2}
L_{k-1}(F_k(x)) \le \frac{\pi^2}{6(1-x)} +\frac 12 \log(1-x) + C_k
\end{equation}

for $x_0(k)\le x < 1$.
\end{lemma}
\begin{proof}
The statement is shown by induction. However, we first observe that the sum $\sum_{m\ge 1} F_k(x^m)/m$ can be replaced by a much simpler upper bound. For $0<x <1$ we set $m_0 = m_0(x) = \lceil 1/\log(1/x) \rceil$.  If  $x_1<1$ is sufficiently close to 1, then we can apply the estimation $F_k(x) = O(x)$ to obtain

\begin{align*}
\sum_{m> m_0} \frac 1m F_k(x^m) &= O(\sum\limits_{m> m_0} \frac 1m x^m)= O(\sum\limits_{m=0}^{\infty} \frac 1m x^m-\sum\limits_{m=0}^{m_0} \frac 1m x^m)\\
&= O(-\log(1-x)-\log(m_0)+O(1))\\ 
&= O(-\log\frac{1-x}{\log x}+O(1))=O(1)
\end{align*}

 which is negligible, as there will be a larger error term. Furthermore, we have
 
\[
\sum_{m=3}^{m_0} \frac 1m F_k(x^m) = O\left( \frac 1{\log (1/x)} F_k(x^3) \right)
=  O\left( \frac 1{1-x} F_k(x^3) \right)
\]

which leads to the upper bound

\[
\sum_{m\ge 1}\frac 1m F_{k}(x^m) = F_k(x) + \frac 12 F_k(x^2) + O\left( \frac 1{1-x} F_k(x^3) \right)
\]

Finally, we prove (\ref{eqlem2}) by induction.  By Lemma~\ref{lem1} it is certainly true for $k=2$. So we assume now that it is true for some $k\ge 2$. For notational convenience we set

\[
G(x) = \frac{\pi^2}{6(1-x)} +\frac 12 \log(1-x).
\]

It is immediate that 

\[
G(x^2) = \frac{\pi^2}{6(1-x^2)} +\frac 12 \log(1-x^2) = \frac{\pi^2}{12(1-x)} +\frac 12 \log(1-x) + O(1)
\]

as $x\to 1-$; and a similar estimation follows if we replace $x^2$ by $x^3$:

\[
G(x^3) = \frac{\pi^2}{6(1-x^3)} +\frac 12 \log(1-x^3) = \frac{\pi^2}{18(1-x)} +\frac 12 \log(1-x) + O(1).
\]

Since $\log(1-x)\to -\infty$ (as $x\to1-$) we have that for every $C> 0$ there exists $x_2 = x_2(C)<1$ such that

\[
G(x^2)+C \leq \frac {1}{2} G(x) \quad \mbox{and}\quad G(x^3)+C \leq \frac {1}{3} G(x) 
\]

for $x_2\le x < 1$.  According to the induction hypothesis we have $F_k(x) \le E_{k-1}(G(x)+C_k)$. Thus Lemma~\ref{elem} items (iii) and (iv) imply

\[
F_k(x^2) \le  E_{k-1}(G(x^2)+C_k) \le E_{k-1}(G(x)/2) \le E_{k-1}(G(x))^{1/2}
\]

and similarly

\[
\frac 1{1-x} F_k(x^3) \le \frac 1{1-x} E_{k-1}(G(x))^{1/3} \le E_{k-1}(G(x))^{1/2}
\]

provided that $x<1$ is sufficiently close to $1$. Hence, we obtain

\begin{align*}
\log F_{k+1}(x) &\le \sum_{m\ge 1}\frac 1m F_{k}(x^m) \\
&= F_k(x) + \frac 12 F_k(x^2) +  O\left( \frac 1{1-x} F_k(x^3) \right) \\
&\le E_{k-1}(G(x)+C_k) + O\left( E_{k-1}(G(x))^{1/2} \right) \\
&\le  E_{k-1}(G(x)+C_k)\left( 1 + O\left( E_{k-1}(G(x)+C_k)^{-1/2}\right) \right) \\
&\le E_{k-1}(G(x) + C_{k+1}).
\end{align*}

which is equivalent to (\ref{eqlem2}) for $k+1$.
\end{proof}

\begin{corollary}
For every $k\ge 2$ there exists $x_1(k) < 1$ such that 

\begin{equation}\label{eqcor1}
L_{k-1}(F_k(x)) \le \frac{\pi^2}{6\log(1/x)}
\end{equation}

for $x_1(k) \le x < 1$.
\end{corollary}
\begin{proof}

Since 

\[
\frac{\pi^2}{6(1-x)} =  \frac{\pi^2}{6\log(1/x)} + O(1)
\]

and $\log(1-x) \to -\infty$ (as $x\to 1-$), it immediately follows that (\ref{eqcor1}) holds for $x_1(k)\le x < 1$, if $x_1(k) < 1$ is large enough.
\end{proof}

We finish the proof of the main result by verifying the upper bound.

\begin{theorem}

For every $k\ge 3$ we have for $n\to\infty$

\[
[x^n]\, F_k(x) \le \frac{\pi^2}6 \cdot \frac{n}{L_{k-2}(n)}\left( 1 + 
O\left( \frac{L_2(n)}{\log n\, L_2(n)\cdots L_{k-3}(n)L_{k-2}(n)} \right)\right).
\]

\end{theorem}
\begin{proof}
We use the trivial inequality $f_k(n) x^n \le F_k(x)$ (for $0\le x < 1$) to obtain an upper bound for $f_k(n) = [x^n]\, F_k(x)$. To this end, $x$ has to be chosen in a proper way, namely by the relation

\[
\log(1/x) =   \frac{\pi^2}{6 L_{k-2}(n/(\log n)^2))} 
\]

With this value we have by (\ref{eqcor1}) the inequality $L_{k-1}(F_k(x)) \le L_{k-2}(n/(\log n)^2)$, and consequently $\log F_k(x) \le n/(\log n)^2$. Furthermore, since

\[
\frac{\pi^2}{6\, L_{k-2}(n/(\log n)^2)} = \frac{\pi^2}{6 L_{k-2}(n)}
\left( 1 + O\left( \frac{L_2(n)}{\log n\, L_2(n)\cdots L_{k-2}(n)} \right) \right)
\]

we obtain the estimation

\begin{align*}
\log f_k(n) &\le \log F_k(x) + n\log(1/x) \\
&\le \frac n{(\log n)^2} +  \frac{\pi^2}6 \cdot \frac{n}{L_{k-2}(n/(\log n)^2)} \\
&= \frac{\pi^2}6 \cdot \frac{n}{L_{k-2}(n)}\left(1 + 
O\left( \frac{L_2(n)}{\log n\, L_2(n)\cdots L_{k-3}(n)L_{k-2}(n)} \right)\right)
\end{align*}

which completes the proof.
\end{proof}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{Acknowledgements}

The authors are grateful to the referee, who has significantly shortened the paper by showing the techniques applied for the upper estimation.
The research was supproted ny OTKA Grant 109085 and  P.P. Pach was partially supported by the grant TAMOP -4.2.2.B-10/1--2010-0009.

\begin{thebibliography}{6}
\bibitem{BF} N.~Broutin and P.~Flajolet.  \newblock The height of random binary unlabelled
trees.  \newblock {\em Discrete Math. Theor. Comput. Sci.Proc., AI}, 121--134, 2008.

\bibitem{drmota} M.~Drmota  \newblock {\em Random trees. An Interplay between Combinatorics and Probability}.  \newblock SpringerWienNewYork, 2009.

\bibitem{DG} M.~Drmota and B.~Gittenberger.  \newblock The shape of unlabeled rooted random
trees.  \newblock {\em Europ. J. Combinat.}, 3:2028--2063, 2010.

\bibitem{FS} P.~Flajolet and R.~Sedgewick  \newblock {\em Analytic Combinatorics}.  \newblock Cambridge University Press, Cambridge, 2009.

\bibitem{otter} R.~Otter.  \newblock The number of trees.  \newblock {\em Ann. of Math.}, 49:583--599, 1948.

\bibitem{R} H.~Rademacher.  \newblock On the expansion of the partition function in a series.  \newblock {\em Ann. of Math.}, 44:416--422, 1943.
\end{thebibliography}

\end{document}
