\documentclass[12pt]{article}
\usepackage{e-jc}

\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amsfonts}
\usepackage{thmtools}
\declaretheorem{theorem}
\usepackage{thm-restate}
\usepackage[colorlinks=true,citecolor=black,linkcolor=black,urlcolor=blue]{hyperref}
\usepackage[]{ytableau}
\usepackage[all, arc, curve, frame]{xy}

\DeclareMathOperator{\Tr}{Tr}

\title{An Arithmetic Property of Moments of the $\beta$-Hermite Ensemble and Certain Map Enumerators}


\author{Amol Aggarwal\thanks{Funded by the NSF Graduate Research Fellowship under grant number DGE1144152.}\\
	\small Department of Mathematics \\ [-0.8ex]
	\small Harvard University \\ [-0.8ex]
	\small Massachusetts, U.S.A \\
	\small \url{agg_a@math.harvard.edu}} 

\date{\dateline{Dec 3, 2016}{Jan 22, 2018}\\
	\small Mathematics Subject Classifications: 05C30; 58C35; 05E05}


\numberwithin{equation}{section}

\begin{document}

\maketitle

\begin{abstract}
Moments of the $\beta$-Hermite ensemble are known to be related to the enumerative theory of topological maps. When $\beta \in \{ 1, 2 \}$, asymptotic information about these moments has been used to deduce asymptotics on the number of maps of given genus, and arithmetic information about these moments can sometimes be explained by underlying group actions on the set of maps. In this paper we establish a new arithmetic property about the $2q$-th moment of the $\beta$-Hermite ensemble, for any prime $q \ge 3$ and real number $\beta > 0$, that has a combinatorial interpretation in terms of maps but no known combinatorial explanation. In the process, we derive several additional results that might be of independent interest, including a general integrality statement and an efficient algorithm for evaluating expectations of multi-part elementary symmetric polynomials of bounded length.

\bigskip \noindent \textbf{Keywords:} Topological maps; $\beta$-Hermite ensemble; elementary symmetric functions 
\end{abstract}


\newtheorem{thm}{Theorem}[section]
\newtheorem{prop}[thm]{Proposition}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{conj}[thm]{Conjecture}
\newtheorem{que}[thm]{Question}
\newtheorem{exa}[thm]{Example}
\theoremstyle{remark}
\newtheorem{rem}[thm]{Remark}

\section{Introduction}

\label{Introduction}

Since the work of Tutte \cite{ACPM, OEPM} over fifty years ago, the study of map enumeration has been a popular area of research among combinatorialists. One basic question in this field asks for the number of maps with $V$ vertices and $F$ faces that exist on a given surface of genus $g$, for fixed integers $V$, $F$, and $g$. In most cases, closed form expressions for these numbers are unavailable, so researchers have instead asked for properties that these numbers satisfy. 

For instance, questions asking for asymptotic properties of these numbers date back at least to 1986 with the work of Bender and Canfield \cite{TANRMS}. Since then, there has been significant effort in trying to find more asymptotic results in map enumeration; see, for instance, \cite{TMAC, TNOMAC, APANRMS, UAGC, RMRP, MIME}. Earlier work in this area, such as \cite{APANRMS}, established asymptotics in entirely combinatorial ways. However, many more recent works, such as \cite{TNOMAC, UAGC, RMRP, MIME}, take advantage of a connection between random matrices and map enumeration; by studying the asymptotics of random matrix integrals, one can sometimes obtain asymptotics on map enumerators. 

Instead of trying to find asymptotic properties of matrix integrals, our goal in this paper is find their arithmetic properties; these can then be interpreted as arithmetic properties of map enumerators. As we will see later (in \hyperref[Maps]{Section \ref*{Maps}}), results of this type can sometimes be combinatorially understood through an underlying group action on the set of maps on a given surface. 

Before discussing this further, let us begin by introducing some of the notions of random matrix theory. 

\subsection{Matrix Integrals}

\label{RandomMatrix}

We first need some notation about partitions and symmetric functions; we will follow the conventions used by Macdonald in \cite{SFHP}. A {\itshape partition} $\lambda = (\lambda_1, \lambda_2, \ldots , \lambda_r)$ is a finite, nondecreasing sequence of positive integers. The {\itshape size} of $\lambda$ is equal to the sum $\sum_{i = 1}^r \lambda_i$ and will be denoted by $|\lambda|$. The {\itshape length} of $\lambda$ is $r$ and will be denoted by $\ell (\lambda)$. For any positive integer $i$, let $m_i (\lambda)$ denote the number of indices $j$ for which $\lambda_j = i$; observe that $\ell(\lambda) = \sum_{i = 1}^{|\lambda|} m_i (\lambda)$. For any positive integer $i \le r$, let $\lambda \setminus \lambda_i$ denote the partition $(\lambda_1, \lambda_2, \ldots , \lambda_{i - 1}, \lambda_{i + 1}, \ldots , \lambda_r)$ obtained from removing one occurence of $\lambda_i$ from $\lambda$. For any positive integer $j$, let $(j, \lambda)$ denote the partition $\lambda \cup \{ j \}$ obtained from adjoining $j$ to $\lambda$. 

Now, let $N$ be a positive integer and $X = \{ x_1, x_2, \ldots , x_N \}$ be a set of $N$ real variables. For each nonnegative integer $k$, let $p_k (X) = \sum_{i = 1}^N x_i^k$ denote the $k$th {\itshape power sum}. Moreover, let 
\begin{flalign*}
e_k (X) = \displaystyle\sum_{\substack{S \subseteq X \\ |S| = k}}\displaystyle\prod_{s \in S} s, 
\end{flalign*}

\noindent denote the $k$th {\itshape elementary symmetric polynomial}, where the sum ranges over all unordered $k$-element subsets $S \subseteq X$. For any partition $\lambda$, let $p_{\lambda} (X) = \prod_{i = 1}^{\ell (\lambda)} p_{\lambda_i} (X)$ denote the {\itshape multi-part power sum} and let $e_{\lambda} (X) = \prod_{i = 1}^{\ell (\lambda)} e_{\lambda_i} (X)$ denote the {\itshape multi-part elementary symmetric polynomial}. 	

In this paper we will be interested in expectations of multi-part power sums and multi-part elementary symmetric polynomials against the $\beta$-Hermite ensemble (also called the Gaussian $\beta$ Ensemble or $\beta$-Ensemble), which is a probability distribution that generalizes the joint eigenvalue distributions of particular Gaussian ensembles of random matrices. Let us define these random matrices, following the notation used by Mehta in \cite{RM}. 

A {\itshape random matrix} is a matrix whose entries are random variables. Examples of such matrices are given by the {\itshape Gaussian Orthogonal Ensemble} (GOE), which is a collection of symmetric random matrices $\textbf{GOE}_n = \{ g_{i, j} \}_{1\le i, j \le n}$ whose upper triangular entries $\{ g_{i, j} \}_{1\le i < j \le n}$ are independently distributed, normalized, real Gaussian random variables; whose lower triangular entries $\{ g_{i, j} \}_{1\le j < i \le n}$ are symmetrically determined by the upper triangular entries through the equality $g_{i, j} = g_{j, i}$; and whose diagonal entries are independently distributed real Gaussian variables with variance $2$. The {\itshape Gaussian Unitary Ensemble} (GUE) and {\itshape Gaussian Symplectic Ensemble} (GSE) are complex and quaternionic analogs of the GOE, respectively. 

The joint eigenvalue distributions of the GOE, GUE, and GSE are part of a one-parameter family of probability measures $\mathbb{P}_{\beta} = \mathbb{P}_{\beta, N}$, which are defined by
\begin{flalign}	
\label{betadistribution}
\mathrm{d} \mathbb{P}_{\beta} (X) = c(\beta) \exp\left( \displaystyle\frac{-\beta}{4} \displaystyle\sum_{i = 1}^N x_i^2 \right) \displaystyle\prod_{1\le i < j \le N} |x_i - x_j|^{\beta} \displaystyle\prod_{i = 1}^N \mathrm{d}x_i, 
\end{flalign}

\noindent for $\beta = 1, 2, 4$, respectively, where 
\begin{flalign*}
c(\beta) = 2^{N (\beta - \beta N - 4) / 4} \pi^{- N / 2} \beta^{N (\beta N - \beta + 2) / 4} \displaystyle\prod_{i = 1}^N \Gamma \left( 1 + \frac{\beta}{2} \right) \Gamma \left( 1 + \frac{i \beta}{2} \right)^{-1}
\end{flalign*}

\noindent is a normalizing function in $\beta$. 

Although $\mathbb{P}_{\beta}$ is defined above for $\beta \in \{ 1, 2, 4 \}$, it is a probability distribution for all $\beta > 0$. This distribution is called the {\itshape $\beta$-Hermite ensemble}. Dense matrix models (such as the GOE, GUE, and GSE) are not known to exist for the $\beta$-Hermite ensemble if $\beta \notin \{ 1, 2, 4 \}$. However, Dumitriu and Edelman \cite{MMBE} showed that $\mathbb{P}_{\beta}$ has a tridiagonal matrix model for all positive $\beta$; we will discuss this further in \hyperref[Tridiagonal]{Section \ref*{Tridiagonal}}. 

For any function $f(X)$ of $N$ variables $X = \{ x_1, x_2, \ldots , x_N \}$, let 
\begin{flalign*}
\big\langle f (X) \big\rangle_{\beta} = \int_{\mathbb{R}^N} f(X) \mathrm{d} \mathbb{P}_{\beta} (X)
\end{flalign*}

\noindent denote the {\itshape $\beta$-expectation} of $f$. For instance, if $f (X) = p_k (X)$ for some positive integer $k$, then $\langle p_k \rangle_2 = \mathbb{E}[\Tr \textbf{GUE}_N^k]$, where $\mathbb{E}$ denotes expectation. By symmetry, this is equal to $0$ if $k$ is odd. 

An intriguing result of Harer and Zagier \cite{TECMSC} states that $\big\langle p_{\lambda} (X) \big\rangle_2$ is a nonnegative integer polynomial in $N$, for all partitions $\lambda$. When $\lambda = (2s)$ consists of one part, Harer and Zagier expressed $\big\langle p_{2s} (X) \big\rangle_2$ explicitly through the identity
\begin{flalign}
\label{sumorientable}
\big\langle p_{2s} (X) \big\rangle_2 = \displaystyle\frac{(2s)!}{2^s s!} \displaystyle\sum_{j = 0}^s 2^j \binom{s}{j} \binom{N}{j + 1}, 
\end{flalign}

\noindent which holds for all integers $s \ge 1$ (see also \cite{ADB} and \cite{DC} for combinatorial proofs of this fact). 

This identity implies the following arithmetic property about the integer polynomial $\big\langle p_{2q} (X) \big\rangle_2$ when $q \ge 3$ is a prime. 

\begin{prop}
\label{arithmeticorientable2}

We have that $\big\langle p_{2q} (X) \big\rangle_2 \equiv 2 N^{q + 1} - 2 N^2 \mod q$ for any prime $q \ge 3$. 
\end{prop}

\begin{proof}
The equality \eqref{sumorientable} may be rewritten as 
\begin{flalign}
\label{sumorientable2}
2^q \left( \big\langle p_{2q} (X) \big\rangle_2 - \displaystyle\frac{1}{q + 1} \binom{2q}{q} \displaystyle\prod_{i = 0}^q (N - i) \right) = \displaystyle\frac{1}{q + 1} \binom{2q}{q} \displaystyle\sum_{j = 0}^{q - 1} 2^j \binom{q}{j} \displaystyle\frac{(q + 1)!}{(j + 1)!} \displaystyle\prod_{i = 0}^j (N - i). 
\end{flalign} 

\noindent Now, $\binom{2q}{q} / (q + 1)$ is the $q$-th Catalan number and thus is integral. Furthermore, $q$ divides $\binom{q}{j}$ for all nonnegative integers $j < q$, unless $j = 0$, in which case $q$ divides $(q + 1)! / (j + 1)!$. Therefore, the right side of \eqref{sumorientable2} is divisible by $q$. Since $q \ne 2$ and $\big\langle p_{2q} (X) \big\rangle_2$ is an integer polynomial in $N$, we have that 
\begin{flalign*}
\big\langle p_{2q} (X) \big\rangle_2 - \displaystyle\frac{1}{q + 1} \binom{2q}{q} \displaystyle\prod_{i = 0}^q (N - i) \equiv 0 \mod q. 
\end{flalign*}

\noindent The proposition now follows from the facts that 
\begin{flalign}
\label{binomial}
\displaystyle\frac{1}{q + 1} \binom{2q}{q} \equiv 2 \mod q; \qquad \displaystyle\prod_{i = 0}^q (N - i) \equiv N^{q + 1} - N^2 \mod q. 
\end{flalign}  
\end{proof}

\begin{rem}
From \eqref{sumorientable}, we find that $\big\langle p_4 (X) \big\rangle_2 = 2N^3 + N$. Thus, \hyperref[arithmeticorientable2]{Proposition \ref*{arithmeticorientable2}} does not hold for $q = 2$. 
\end{rem}

Similar results for $\big\langle p_{\lambda} (X) \big\rangle_1$ were found by Goulden and Jackson in \cite{MLOSIORSM}. They showed that $\big\langle p_{\lambda} (X) \big\rangle_1$ is a nonnegative integer polynomial in $N$, for all partitions $\lambda$. When $\lambda = (2s)$ consists of one part, Goulden and Jackson established the identity 
\begin{flalign}
\label{sumnonorientable}
\big\langle p_{2s} (X) \big\rangle_1 = s! \displaystyle\sum_{j = 0}^s 2^{2s - j} \displaystyle\sum_{i = 0}^s \binom{s - \frac{1}{2}}{s - i} \binom{i + j - 1}{i} \binom{(N - 1) / 2}{i} + \displaystyle\frac{(2s)!}{2^s s!} \displaystyle\sum_{j = 0}^s 2^j \binom{s}{j} \binom{N}{j + 1}, 
\end{flalign}

\noindent for all positive integers $s$. The following result is analogous to \hyperref[arithmeticorientable2]{Proposition \ref*{arithmeticorientable2}} and can be verified similarly; we omit its proof. 

\begin{prop}
\label{arithmeticnonorientable2}

We have that $\big\langle p_{2q} (X) \big\rangle_1 \equiv 2 N^{q + 1} + N^q - 2N^2 - N \mod q$ for any prime $q$. 
\end{prop}

\hyperref[arithmeticorientable2]{Proposition \ref*{arithmeticorientable2}} and \hyperref[arithmeticnonorientable2]{Proposition \ref*{arithmeticnonorientable2}} might initially seem arbitrary in this matrix-theoretic context, but in the next section we will discuss their combinatorial significance; we will show how these two results can be deduced in a different way, using combinatorial interpretations of the coefficients of the polynomials $\big\langle p_{2q} (X) \big\rangle_2$ and $\big\langle p_{2q} (X) \big\rangle_1$ as enumerators of topological maps. 

\subsection{Maps}

\label{Maps}

In this section, we will describe how to establish \hyperref[arithmeticorientable2]{Proposition \ref*{arithmeticorientable2}} and \hyperref[arithmeticnonorientable2]{Proposition \ref*{arithmeticnonorientable2}} in a more combinatorial way, using the enumerative theory of topological maps. Let us begin by introducing some of the terminology in this field, following the notation given in Chapter 2.2 of \cite{TCJPGSTM}. 

Let $G$ be a topological connected graph (possibly with multiple edges and loops), and let $\mathcal{M}$ be a surface. A {\itshape map} $m : G \rightarrow \mathcal{M}$ is a two-cell embedding of $G$ into $\mathcal{M}$. The {\itshape edges} and {\itshape vertices} of $m$ are the edges and vertices of $G$, respectively; the {\itshape faces} of $m$ are the connected components of $\mathcal{M} \setminus m(G)$. The map $m$ is {\itshape orientable} if the surface $\mathcal{M}$ is orientable and is otherwise {\itshape non-orientable}; following Goulden and Jackson \cite{MLOSIORSM}, we call all maps {\itshape locally orientable}. We call $m$ {\itshape planar} if $\mathcal{M}$ has genus $0$. 

A map $m' : G' \rightarrow \mathcal{M}'$ is {\itshape equivalent} to $m$ if there is an orientation-preserving homeomorphism from $\mathcal{M}$ to $\mathcal{M'}$ that induces a graph isomorphism from $G$ to $G'$. A map is {\itshape rooted} by distinguishing an edge $e \in G$ and a {\itshape flag} $f$, which is a side-end position of $e$; each edge has two sides and two ends, so four flags. In what follows, all maps will be rooted. 

In his original paper \cite{ACPM}, Tutte was interested in the number $n (F, E)$ of planar rooted maps, up to equivalence, with a specified number of faces $F$ and edges $E$. The number of vertices $V$ is determined by the equality $V + F = E + 2$ because the map is planar. More generally, we would have that 
\begin{flalign}
\label{genus}
V + F = E + 2 - g,
\end{flalign}

\noindent where $g$ is the Eulerian genus of $\mathcal{M}$, which we will also refer to as the {\itshape genus} of the map $m$. In \cite{OEPM}, Tutte combinatorially deduced recursive relations for $n (F, E)$ and used them to solve for a generating function of the form $\sum_{F = 0}^{\infty} \sum_{E = 0}^{\infty} n(F, E) x^F y^E$. 

\begin{rem}
Euler's equality describing the standard genus $g'$ of an orientable map states that $V + F = E + 2 - 2 g'$. We use the Eulerian genus, which is twice the standard genus, in order to discuss the genera of orientable and non-orientable surfaces simultaneously. 
\end{rem}

For the remainder of this section, we will restrict ourselves to maps with one face ($F = 1$), although analogs of the following results are known for maps with multiple faces. For any nonnegative integers $E$ and $g$, let $\widetilde{k_g} (E)$ be the number of genus-$g$ orientable rooted maps with one face and $E$ edges, and let $k_v (E)$ denote the number of orientable rooted maps with one face, $v$ vertices, and $E$ edges. Observe that $n(1, E) = \widetilde{k_0} (E)$; furthermore, \eqref{genus} implies that $k_{E + 1 - g} (E) = \widetilde{k_g} (E)$. Similarly, let $\widetilde{K_g} (E)$ denote the number of genus-$g$ locally orientable rooted maps with one face and $E$ edges, and let $K_v (E)$ denote the number locally orientable rooted maps with one face, $v$ vertices, and $E$ edges. 

Extending on work of 't Hooft \cite{APDTFSI} and Br\'{e}zin-Itzykson-Parisi-Zuber \cite{PD}, Harer and Zagier showed that a generating function for the orientable map enumerators $k_v (E)$ could be found from moments of $\mathbb{P}_2$ \cite{TECMSC}. Specifically, they established that 
\begin{flalign}
\label{orientableseries}
\big\langle p_{2E} (X) \big\rangle_2 = \displaystyle\sum_{v = 1}^{E + 1} k_v (E) N^v, 
\end{flalign}

\noindent for any positive integer $E$; we refer to \cite{TECMSC} or Section 5 of the survey \cite{MIME} for a proof. Now, \hyperref[arithmeticorientable2]{Proposition \ref*{arithmeticorientable2}} may be restated as follows. 

\begin{prop}
\label{arithmeticorientable}

Let $q \ge 3$ be any prime number. For any nonnegative integer $v \notin \{ 2, q + 1 \}$, we have that $k_v (2q) \equiv 0 \mod q$. Furthermore, $k_{q + 1} (2q) \equiv 2 \mod q$ and $k_{2} (2q) \equiv -2 \mod q$. 
\end{prop}

In the previous section, we deduced this fact from \eqref{sumorientable}, which follows from matrix theoretic methods. However, as stated,  \hyperref[arithmeticorientable]{Proposition \ref*{arithmeticorientable}} also has a direct combinatorial proof. 

\begin{proof}[Proof of Proposition \ref{arithmeticorientable}]
For each nonnegative integer $g$, $\widetilde{k_g} (2q)$ counts the number of genus-$g$ rooted maps with one face of degree $2q$. This is equal to the number of {\itshape gluings} (that is, the number of ways to identify pairs of edges; see Section 5.1 of \cite{MIME}) of a labelled regular $2q$-gon $\mathcal{P}$ to form a surface of Eulerian genus $g$. Label the edges of $\mathcal{P}$ with the integers $1, 2, \ldots , 2q$ in clockwise order; any gluing may be represented by a collection of $q$ pairs $\{ (a_i, b_i) \}$ of integers between $1$ and $2q$, specifying which edges to identify. For instance, when $q =3$, the collection $\{ (1, 4), (2, 5), (3, 6) \}$ glues opposite sides of the hexagon $\mathcal{P}$ to form a surface of Eulerian genus $2$. 

The cyclic group $C_q$ of order $q$ acts on the set of genus-$g$ gluings; a generator of this group acts on a gluing by rotating $\mathcal{P}$ clockwise by an angle of $2 \pi / q$ around its center. Specifically, this generator sends any gluing identifying the pairs $\{ (a_i, b_i) \}_{i \in [1, q]}$ to the gluing identifying the pairs $\{ a_i + 2, b_i + 2 \}_{i \in [1, q]}$ (here, the $a_i + 2$ and $b_i + 2$ are taken modulo $2q$). Any gluing not fixed by the action of this group has an orbit of size $q$; therefore, the orbits of such gluings do not contribute to the residue of $\widetilde{k_g} (2q)$ modulo $q$. 

The cyclic group $C_q$ fixes $q$ gluings, which may be represented by collections of the form $\{ (1, k), (3, k + 2), \ldots , (2q - 1, k + 2q - 2) \}$, where $k$ is a positive even integer less than $2q + 1$. The two gluings $\{ (1, 2), (3, 4), \ldots , (2q - 1, 2q) \}$ and $\{ (1, 2q), (3, 2), \ldots , (2q - 1, 2q - 2) \}$ have $q + 1$ vertices; the other $q - 2$ gluings fixed by $C_q$ have $2$ vertices. Therefore, $k_{q + 1} (2q) \equiv 2 \mod q$; $k_2 (2q) \equiv -2 \mod q$; and $k_v (2q) \equiv 0 \mod q$ if $v \notin \{ 2, q + 1 \}$. 
\end{proof}

\begin{rem}
\label{rooting}
The proof of \hyperref[arithmeticorientable]{Proposition \ref*{arithmeticorientable}} uses the fact that the cyclic group $C_q$ acts on the set of genus-$g$ gluings by rotations. Rotating a genus-$g$ gluing is equivalent to re-rooting its corresponding genus-$g$ map. Therefore, the above proof implicitly uses the fact that re-rooting a map does not change its genus or orientability. As we will mention in \hyperref[BetaEnsemble]{Section \ref*{BetaEnsemble}}, this is not true in the general $\beta$ setting.
\end{rem}

The analog of \eqref{orientableseries} for locally orientable maps was established by Goulden and Jackson in \cite{MLOSIORSM}. Specifically, they showed that
\begin{flalign}
\label{nonorientableseries}
\big\langle p_{2E} (X) \big\rangle_1 = \displaystyle\sum_{v = 1}^{E + 1} K_v (E) N^v, 
\end{flalign}

\noindent for any positive integer $E$. Now, \hyperref[arithmeticnonorientable2]{Proposition \ref*{arithmeticnonorientable2}} can be rewritten as follows.

\begin{prop}
\label{arithmeticnonorientable}

Let $q$ be any prime number. For any nonnegative integer $v \notin \{ 1, 2, q, q + 1 \}$, we have that $K_v (2q) \equiv 0 \mod q$. Furthermore, $K_{q + 1} (2q) \equiv 2 \mod q$; $K_q (2q) \equiv 1 \mod q$; $K_2 (2q) \equiv - 2 \mod q$; and $K_1 (2q) \equiv -1 \mod q$. 
\end{prop}

 Similar to \hyperref[arithmeticorientable]{Proposition \ref*{arithmeticorientable}}, \hyperref[arithmeticnonorientable]{Proposition \ref*{arithmeticnonorientable}} might be shown using the action of $C_q$ on the set of oriented gluings; we omit this proof. 

Thus, while both \hyperref[arithmeticorientable2]{Proposition \ref*{arithmeticorientable2}} and \hyperref[arithmeticnonorientable2]{Proposition \ref*{arithmeticnonorientable2}} may initially appear as statements about matrix integrals, they can also be interpreted as arithmetic properties of the $k_v (2q)$ and $K_v (2q)$ that can be combinatorially explained through an underlying group action on the set of orientable or locally orientable genus-$g$ topological maps; the nonzero residues in these statements result from the fixed points of this group action. In \hyperref[BetaEnsemble]{Section \ref*{BetaEnsemble}}, we will present our main result, which is a generalization of both \hyperref[arithmeticorientable2]{Proposition \ref*{arithmeticorientable2}} and \hyperref[arithmeticnonorientable2]{Proposition \ref*{arithmeticnonorientable2}} that has also has a combinatorial interpretation but no known combinatorial explanation. 

\subsection{Results}

\label{BetaEnsemble}

The extension of \eqref{orientableseries} and \eqref{nonorientableseries} to the general $\beta$-Hermite ensemble was established by La Croix \cite{TCJPGSTM}. He showed as Lemma 4.13 of Chapter 4 of \cite{TCJPGSTM} that moments of the $\beta$-Hermite ensemble satisfy the recursive relations
\begin{flalign}
\label{recursionpower} 
\big\langle p_{j + 2, \lambda} (X) \big\rangle_{\beta} = b (j + 1) \big\langle p_{j, \lambda} (X) \big\rangle_{\beta} + (b + 1) \displaystyle\sum_{i = 1}^{\ell (\lambda)} \lambda_i \big\langle p_{\lambda_i + j, \lambda \setminus \lambda_i} (X) \big\rangle_{\beta} + \displaystyle\sum_{i = 0}^j \big\langle p_{i, j - i, \lambda} (X) \big\rangle_{\beta}, 
\end{flalign}

\noindent for all integers $j \ge -1$ (when $j = -1$, only the second term on the right side of \eqref{recursionpower} should be viewed as nonzero), positive reals $\beta > 0$, and partitions $\lambda$. In \eqref{recursionpower}, $b = 2 / \beta - 1$ denotes the {\itshape shifted Jack parameter}. Later, we will also use the notation $\alpha = 2 / \beta = b + 1$ to denote the {\itshape Jack parameter}. 

Since $\big\langle p_0 (X) \big\rangle_{\beta} = N$, \eqref{recursionpower} and induction on $|\lambda|$ imply that $\big\langle p_{\lambda} (X) \big\rangle_{\beta}$ is a polynomial in $N$ and $b$ with nonnegative integer coefficients, for any partition $\lambda$. Restricting to the case when $\lambda = (2E)$ consists of one even part (again, analogs exist for $\ell (\lambda) > 1$, but we will not discuss them here), this implies that there exist nonnegative integers $K_{v, \eta} (E)$ such that 
\begin{flalign}
\label{partiallyorientableseries}
\big\langle p_{2 E} (X) \big\rangle_{\beta} = \displaystyle\sum_{v = 1}^{E + 1} N^v \displaystyle\sum_{\eta = 0}^{E} K_{v, \eta} (E) b^{\eta}. 
\end{flalign}

\noindent Inserting $b = 0$ into \eqref{partiallyorientableseries} and applying \eqref{orientableseries} yields $k_v (E) = K_{v, 0} (E)$; in this case, only orientable maps are counted in \eqref{partiallyorientableseries}. Similarly, inserting $b = 1$ into \eqref{partiallyorientableseries} and applying \eqref{nonorientableseries} yields $K_v (E) = \sum_{\eta = 0}^E K_{v, \eta} (E)$; in this case, all maps are counted in \eqref{partiallyorientableseries}. Therefore, it might be reasonable to guess that $K_{v, \eta} (E)$ enumerates the number of rooted maps with $v$ vertices, one face, $E$ edges, and some ``degree of non-orientability" that increases with the integer $\eta$. This was conjectured implicitly by Harer, Goulden, and Jackson as Conjecture 3.5 of \cite{CCMMCCJSF}, and later in its above form in Section 5 of \cite{AGPVECMSRCAC}. 

Through a suitable combinatorial interpretation of the recurrence \eqref{recursionpower}, LaCroix showed this conjecture to be true; the parameter $\eta$ is known as a {\itshape $b$-invariant}. One may refer to Definition 4.1 of \cite{TCJPGSTM} for a precise definition of the $b$-invariant $\eta$. 

Since the $K_{v, \eta} (\lambda)$ have a combinatorial interpretation as ``partially orientable" map enumerators, one might expect the $K_{v, \eta} (2q)$ to have arithmetic properties similar to $k_v (2q)$ and $K_v (2q)$ for all odd primes $q$. The following result, which appears to be a new, simultaneous generalization of \hyperref[arithmeticorientable]{Proposition \ref*{arithmeticorientable}} ($b = 0$) and \hyperref[arithmeticnonorientable]{Proposition \ref*{arithmeticnonorientable}} ($b = 1$), shows this to be true. We will establish this theorem (in fact its equivalent \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}}) in \hyperref[ProofTheorem]{Section \ref*{ProofTheorem}}. 

\begin{thm}
\label{arithmeticpartial} 
Let $q \ge 3$ be any prime number. We have that $K_{q + 1, 0} \equiv 2 \mod q$; $K_{q, 1}  \equiv 1 \mod q$; $K_{2, 0} \equiv -2 \mod q$; $K_{2, 1} \equiv 1 \mod q$; and $K_{1, 1} \equiv -2 \mod q$. For all integers $\eta \in [2 , q - 1]$, we have that $K_{2, \eta} \equiv (-1)^{\eta + 1} \mod q$ and $K_{1, \eta} \equiv (-1)^{\eta} \mod q$. For all other pairs of nonnegative integers $(v, \eta)$, we have that $K_{v, \eta} (2q) \equiv 0 \mod q$. 
\end{thm} 

Similar to \hyperref[arithmeticorientable]{Proposition \ref*{arithmeticorientable}} and \hyperref[arithmeticnonorientable]{Proposition \ref*{arithmeticnonorientable}}, \hyperref[arithmeticpartial]{Theorem \ref*{arithmeticpartial}} suggests the existence of an underlying group action on the set of maps with one face of degree $2q$, a fixed number of vertices, and a fixed $b$-invariant. As before, the nonzero residues appearing in \hyperref[arithmeticpartial]{Theorem \ref*{arithmeticpartial}} might then correspond to fixed points of this group action. However, Goulden and Jackson \cite{CCMMCCJSF} (see also Theorem 3.30 of \cite{TCJPGSTM}) showed that the $b$-invariant $\eta$ of a map depends on the map's rooting. Therefore, due to \hyperref[rooting]{Remark \ref*{rooting}}, the cyclic group $C_q$ does not necessarily act on the set of maps with a fixed $b$-invariant. 

This leads us to ask whether there is a combinatorial proof of \hyperref[arithmeticpartial]{Theorem \ref*{arithmeticpartial}}, perhaps through a different group action or through a different combinatorial interpretation of the integers $K_{v, \eta} (\lambda)$. We remark that a different arithmetic property with no known combinatorial explanation (unless $b \in \{ 0, 1 \}$, in which case it follows from re-rooting) was given by La Croix as Corollary 4.17 of \cite{TCJPGSTM}. 

Our proof of \hyperref[arithmeticpartial]{Theorem \ref*{arithmeticpartial}} is matrix theoretic. Due to \eqref{partiallyorientableseries}, we can rewrite \hyperref[arithmeticpartial]{Theorem \ref*{arithmeticpartial}} as follows. 

\begin{restatable}{thm}{arithmeticpartiallyorientable}
\label{arithmeticpartial2}

For each prime number $q \ge 3$, we have that 
\begin{flalign*}
\big\langle p_{2q} \big\rangle_{\beta} \equiv 2 N^{q + 1} + b N^q + \left( - \displaystyle\sum_{i = 1}^{q - 1} (-b)^i - 2 \right) N^2 + \left( \displaystyle\sum_{i = 2}^{q - 1} (-b)^i - 2b \right) N \mod q. 
\end{flalign*}

\end{restatable}

Unlike the case $\beta \in \{ 1, 2 \}$, there is no known analog of \eqref{sumorientable} and \eqref{sumnonorientable} that explicitly evaluates $\big\langle p_{2s} (X) \big\rangle_{\beta}$ for general $\beta > 0$. Instead, we know of five ways to find exact expressions for $\big\langle p_{2s} (X) \big\rangle_{\beta}$. The first is by evaluating traces of powers of tridiagonal matrix models, given in Chapter 5.4 and Chapter 6.2 of \cite{ESBE} by Dumitriu; the second is through a change of basis from the Jack polynomials to power sums, given in Section 4 of \cite{MVOP} by Dumitriu, Edelman, and Shuman; the third is through the recursive equation \eqref{recursionpower}, given in Chapter 4.4 of \cite{TCJPGSTM} by La Croix; and the fourth uses loop equations for the resolvent, given in Sections 2 and 3 of \cite{MGBELED} by Forrester and Witte (see also the references therein). We do not know of a way to deduce arithmetic properties about $\big\langle p_{2q} (X) \big\rangle_{\beta}$ through any of these four existing methods. 

Therefore, we introduce the fifth method by expressing $\big\langle p_{2q} (X) \big\rangle_{\beta}$ in terms of expectations $\big\langle e_{\lambda} \big\rangle_{\beta}$ of multi-part elementary symmetric polynomials; through an integrality result about these expectations (see \hyperref[integralelementary]{Proposition \ref*{integralelementary}}), we will be able to establish \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}} in \hyperref[PartialArithmetic]{Section \ref*{PartialArithmetic}}. 

In the process, we will derive several results about expectations of symmetric polynomials that might be of independent interest. The first is an integrality result (\hyperref[integralsymmetric]{Corollary \ref*{integralsymmetric}}) about $\big\langle f(X) \big\rangle_{\beta}$ that holds for arbitrary symmetric polynomials $f$ with integer coefficients; as we will discuss in \hyperref[integral]{Remark \ref*{integral}}, special cases of this result have appeared implicitly before, but not in full generality until now. The second is a recursion (\hyperref[recursion]{Theorem \ref*{recursion}}) that explicitly evaluates expectations of multi-part elementary symmetric functions. This gives rise to an algorithm (\hyperref[algorithm]{Corollary \ref*{algorithm}}) that finds $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ efficiently for partitions $\lambda$ of small length; as we will discuss in \hyperref[expectationselementary]{Section \ref*{expectationselementary}}, this complements the algorithm given by Dumitriu, Edelman, and Shuman in \cite{MVOP}.


\subsection*{Acknowledgements}

This project was completed under the Undergraduate Research Opportunities Program (UROP). The author heartily thanks Michael La Croix for his valuable advice, insightful conversations, and suggestions to this paper. The author also thanks Alan Edelman for supervising this project and Peter Forrester and Nicholas Witte for valuable comments. 




\section{Proof of the Arithmetic Property} 

\label{PartialArithmetic}

In this section, we will prove \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}}. Let us begin by outlining the proof. 

\subsection{Outline of Proof of Theorem \ref{arithmeticpartial2}}

\label{Reduction}

Instead of attempting to evaluate the power-sum expectation $\big\langle p_{2q} (X) \big\rangle_{\beta}$ directly, we will first express the $2q$-th power sum as a rational linear combination of multi-part elementary symmetric functions through the Newton-Girard identity. This yields 
\begin{flalign}
\label{rationalsum}
\big\langle p_{2q} (X) \big\rangle = 2q \displaystyle\sum_{|\lambda| = 2q} c_{\lambda} \big\langle e_{\lambda} (X) \big\rangle,
\end{flalign}

\noindent where the $c_{\lambda}$ are explicit rational numbers, and we simplify notation by using $\big\langle f(X) \big\rangle$ in place of $\big\langle f(X) \big\rangle_{\beta}$ for any function $f$ of $N$ variables. The prefactor of $2q$ in \eqref{rationalsum} might initially appear useful for reduction modulo $q$, but there seem to be two issues. 

First, the coefficients $c_{\lambda}$ are rational, and their denominators might be divisible by $q$. Thus, $2q c_{\lambda}$ might not be divisible by $q$, so there might be many summands on the right side of \eqref{rationalsum} that are nonzero modulo $q$. For instance, if $\lambda = 2^q = (2, 2, \ldots , 2)$ (with $q$ twos), then $c_{\lambda} = - q^{-1}$. 

Second, as seen from the following proposition (originally due Aotomo \cite{JPAWSI}), $\big\langle e_{\lambda} (X) \big\rangle$ is not always an integer polynomial in $N$ and $b$. 

\begin{restatable}[\cite{JPAWSI}]{prop}{singlepart}
\label{singlepart}

If $k$ is a nonnegative even integer, then 
\begin{flalign*} 
\big\langle e_k (X) \big\rangle = (-1)^{k / 2} \binom{N}{k} \prod_{j = 1}^{k / 2 - 1} (2j + 1).
\end{flalign*} 
\end{restatable}

The term $\big\langle e_{2q} (X) \big\rangle$ appears as a summand in \eqref{rationalsum} with coefficient $c_{(2q)} = -1$, and the denominator of $\big\langle e_{2q} (X) \big\rangle$ is divisible by $q$ due to \hyperref[singlepart]{Proposition \ref*{singlepart}}. Therefore, the contribution from the single-part partition $\lambda = (2q)$ to \eqref{rationalsum} is nonzero modulo $q$, even though $2q c_{\lambda} = - 2q$ is a multiple of $q$. Many other partitions also contribute nonzero residues to \eqref{rationalsum}. 

\begin{rem}
\hyperref[singlepart]{Proposition \ref*{singlepart}} implies that $\big\langle e_k (X) \big\rangle$ is independent of $\beta$. It can be viewed as a restatement of the fact that the expectation of the characteristic polynomial of a random matrix from the Gaussian $\beta$ Ensemble is the Hermite polynomial, for each $\beta \in \mathbb{R}_{> 0}$. 
\end{rem}

\begin{rem}
\label{analytic}

In fact, Aomoto established a more general result than \hyperref[singlepart]{Proposition \ref*{singlepart}}; he found the expectations of single-part elementary symmetric polynomials against the $\beta$-Jacobi ensemble, which exhibits a limit degeneration to the $\beta$-Hermite ensemble. 
\end{rem}

The first issue can be quickly resolved through the following lemma, which we will establish in \hyperref[ProofTheorem]{Section \ref*{ProofTheorem}}. In what follows, $1^{2q}$ denotes the partition $(1, 1, \ldots , 1)$ (with $2q$ ones).

\begin{restatable}{lem}{divisible}

\label{divisible}

For any partition $\lambda \notin \{ 1^{2q}, 2^q \}$ of size $2q$, there exists an integer $k_{\lambda}$, not divisible by $q$, such that $k_{\lambda} c_{\lambda} \in \mathbb{Z}$; equivalently, the denominator of $c_{\lambda}$ is not divisible by $q$ unless $\lambda = 1^{2q}$ or $\lambda = 2^q$. Furthermore, if $\lambda = 1^{2q}$, then $2q c_{\lambda} = 1$; if $\lambda = 2^q$, then $2q c_{\lambda} = -2$. 
\end{restatable} 

\hyperref[divisible]{Lemma \ref*{divisible}} shows that all but two of the coefficients $c_{\lambda}$ are well-behaved with respect to $q$; it also evaluates the other two coefficients explicitly. Using \hyperref[divisible]{Lemma \ref*{divisible}}, we may rewrite \eqref{rationalsum} as 
\begin{flalign}
\label{rationalsum2}
\big\langle p_{2q} (X) \big\rangle = \big\langle e_1 (X)^{2q} \big\rangle - 2 \big\langle e_2 (X)^q \big\rangle + \displaystyle\sum_{\substack{ |\lambda| = 2q \\ \lambda \notin \{ 1^{2q}, 2^q \} } } \widetilde{c_{\lambda}} \big\langle e_{\lambda} (X) \big\rangle, 
\end{flalign}

\noindent where the $\widetilde{c_{\lambda}}$ are rational numbers whose numerators are multiples of $q$. The polynomials $\big\langle e_1 (X)^{2q} \big\rangle$ and $\big\langle e_2 (X)^q \big\rangle$ will be addressed separately in \hyperref[Coefficient]{Section \ref*{Coefficient}}; the coefficients of all other polynomials on the right side of \eqref{rationalsum2} are divisible by $q$. 

Now, let us turn to the second issue. Although $\big\langle e_{\lambda} (X) \big\rangle$ is not always an integer polynomial, \hyperref[singlepart]{Proposition \ref*{singlepart}} indicates that $\big\langle e_k (X) \big\rangle$ is in the $\mathbb{Z}[b]$-span of the binomal coefficients $\binom{N}{0}, \binom{N}{1}, \ldots $. The following lemma shows that this is also true for $\beta$-expectations of arbitrary multi-part elementary symmetric polynomials. 

\begin{restatable}{lem}{integralelementary}
\label{integralelementary}

\noindent For each partition $\lambda$, there exist integer polynomials $a_{\lambda, 0}, a_{\lambda, 1}, \ldots , a_{\lambda, |\lambda|} \in \mathbb{Z}[b]$ such that $\big\langle e_{\lambda} (X) \big\rangle = \sum_{i = 0}^{|\lambda|} a_{\lambda, i} \binom{N}{i}$. 
\end{restatable}

\begin{rem}
\hyperref[integralelementary]{Lemma \ref*{integralelementary}} does not seem to be immediate from integrating $e_{\lambda} (X)$ against \eqref{betadistribution}. In \hyperref[Determinants]{Section \ref*{Determinants}}, we will present a combinatorial proof of this result using a tridiagonal matrix model.  
\end{rem}

In view of \hyperref[integralelementary]{Lemma \ref*{integralelementary}}, we can rewrite \eqref{rationalsum2} as 
\begin{flalign}
\label{rationalsum3}
\big\langle p_{2q} (X) \big\rangle = \displaystyle\sum_{i = 0}^{2q} a_{1^{2q}, i} \binom{N}{i} - 2 \displaystyle\sum_{i = 0}^{2q} a_{2^q, i} \binom{N}{i} + \displaystyle\sum_{\substack{ |\lambda| = 2q \\ \lambda \notin \{ 1^{2q}, 2^q \} } } \displaystyle\sum_{i = 0}^{2q} \widetilde{c_{\lambda}} a_{\lambda, i} \binom{N}{i}. 
\end{flalign}

\noindent Since the index $i$ in \eqref{rationalsum3} runs from $0$ to $2q$, the issue remains that $q$ appears in the denominators of many summands on the right side of \eqref{rationalsum3}. Moreover, the fact that $i$ can equal $2q$ in \eqref{rationalsum3} seems to suggest that $\big\langle p_{2q} (X) \big\rangle$ is of degree $2q$ in $N$. However, we will see in \hyperref[ProofTheorem]{Section \ref*{ProofTheorem}} that it is in fact a polynomial of degree $q + 1$. Thus, there is cancellation among the high-degree terms in \eqref{rationalsum3}. Specifically, we may rewrite \eqref{rationalsum3} as 
\begin{flalign}
\label{rationalsum4}
\big\langle p_{2q} (X) \big\rangle = \displaystyle\sum_{i = 0}^{q + 1} a_{1^{2q}, i} \binom{N}{i} - 2 \displaystyle\sum_{i = 0}^{q + 1} a_{2^q, i} \binom{N}{i} + \displaystyle\sum_{\substack{ |\lambda| = 2q \\ \lambda \notin \{ 1^{2q}, 2^q \} } } \displaystyle\sum_{i = 0}^{q + 1} \widetilde{c_{\lambda}} a_{\lambda, i} \binom{N}{i}, 
\end{flalign}

\noindent where now the index $i$ does not exceed $q + 1$. 

Furthermore, we will see from a result of Forrester and Witte (which will be more precisely stated in \hyperref[ProofTheorem]{Section \ref*{ProofTheorem}}; see \hyperref[degree]{Proposition \ref*{degree}}) that the degree-$(q + 1)$ and degree-$q$ terms on the right side of \eqref{rationalsum4} are explicit. Therefore, \eqref{rationalsum4} may be rewritten as 
\begin{flalign}
\label{rationalsum5}
\big\langle p_{2q} (X) \big\rangle &= s_q (q + 1)! \binom{N}{q + 1} + t_q q! \binom{N}{q} + \displaystyle\sum_{\substack{ |\lambda| = 2q \\ \lambda \notin \{ 1^{2q}, 2^q \} } } \displaystyle\sum_{i = 0}^{q - 1} \widetilde{c_{\lambda}} a_{\lambda, i} \binom{N}{i} \nonumber \\
& \quad + \displaystyle\sum_{i = 0}^{q - 1} a_{1^{2q}, i} \binom{N}{i} - 2 \displaystyle\sum_{i = 0}^{q - 1} a_{2^q, i} \binom{N}{i},  
\end{flalign}

\noindent for explicit constants $s_q$ and $t_q$. 

Now, the first two terms on the right side of \eqref{rationalsum5} are integer polynomials that can be evaluated modulo $q$ directly, since $s_q$ and $t_q$ are explicit. The third term (which contributes the most summands to \eqref{rationalsum5}) is zero modulo $q$ since $q$ divides $\widetilde{c_{\lambda}}$ and does not divide the denominator of $\binom{N}{i}$ for any integer $i < q$. The remaining two sums in \eqref{rationalsum5} will be evaluated modulo $q$ through an examination of the polynomials $\big\langle e_1 (X)^{2q} \big\rangle$ and $\big\langle e_2 (X)^q \big\rangle$ in \hyperref[Coefficient]{Section \ref*{Coefficient}}. Thus, the right side of \eqref{rationalsum5} can be evaluated modulo $q$ termwise, from which we will be able to deduce \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}}. 

The organization for the remainder of this section is as follows. In \hyperref[Tridiagonal]{Section \ref*{Tridiagonal}}, we will introduce the tridiagonal matrix model for the $\beta$-Hermite ensemble. In \hyperref[Determinants]{Section \ref*{Determinants}}, we will use this matrix model to establish \hyperref[integralelementary]{Lemma \ref*{integralelementary}}. In \hyperref[Coefficient]{Section \ref*{Coefficient}}, we will understand the modulo $q$ properties of $\big\langle e_1 (X)^{2q} \big\rangle$ and $\big\langle e_2 (X)^q \big\rangle$. In \hyperref[ProofTheorem]{Section \ref*{ProofTheorem}}, we will prove \hyperref[divisible]{Lemma \ref*{divisible}} and \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}}. 


\subsection{The Tridiagonal Matrix Model}

\label{Tridiagonal}

Our proof of \hyperref[integralelementary]{Lemma \ref*{integralelementary}} will use the {\itshape tridiagonal matrix model} for the $\beta$-Hermite ensemble, introduced by Dumitriu and Edelman in \cite{MMBE}. In this section, we will describe this matrix model. 

For any real number $\beta > 0$ and positive integer $N$, let $\textbf{T}_N = \textbf{T}_{N, \beta}$ denote the $N \times N$ tridiagonal symmetric random matrix whose $(i, j)$ entry $t_{i, j}$ is distributed as follows. If $|i - j| \ge 2$, then $t_{i, j} = 0$; if $i = j$, then $t_{i, j} = G_i \sqrt{2 / \beta}$, where $G_i$ denotes a normalized, real Gaussian random variable; if $j = i + 1$, then $t_{i, j} = X_{i \beta} / \sqrt{\beta}$, where $X_{i \beta}$ denotes a $\chi_{i \beta}$ distributed random variable; and if $i = j + 1$, then $t_{i, j}$ is determined through the symmetric equality $t_{i, j} = t_{j, i}$. Here, the upper diagonal entries are mutually independent. Examples of $\textbf{T}_N$ for $N \in \{ 3, 4 \}$ are shown in Figure 2. 

The following result of Dumitriu and Edelman appears as Theorem 2.12 in \cite{MMBE}. 

\begin{prop}[{\cite[Theorem 2.12]{MMBE}}] 
\label{tridiagonaldistribution}

For any positive integer $N$, the joint eigenvalue distribution of $\textbf{T}_N$ is $\mathbb{P}_{\beta, N}$. 
\end{prop}

\begin{figure}
\begin{center}
\begin{flalign*}
\textbf{T}_3 = \displaystyle\frac{1}{\sqrt{\beta}} \left[ \begin{array}{ccccccc} G_1 \sqrt{2} & X_{\beta} \\ X_{\beta} & G_2 \sqrt{2} & X_{2 \beta} \\ & X_{2 \beta} & G_3 \sqrt{2} \end{array} \right]; \qquad \textbf{T}_4 = \displaystyle\frac{1}{\sqrt{\beta}} \left[ \begin{array}{ccccccc} G_1 \sqrt{2} & X_{\beta} \\ X_{\beta} & G_2 \sqrt{2} & X_{2 \beta} \\ & X_{2 \beta} & G_3 \sqrt{2} & X_{3 \beta} \\ & & X_{3 \beta} & G_4 \sqrt{2} \end{array} \right]
\end{flalign*}
\caption{The tridiagonal matrix models $\textbf{T}_3$ and $\textbf{T}_4$ are depicted above.}
\end{center}
\end{figure}

\begin{rem}
The tridiagonal model introduced above is slightly different from the tridiagonal model given by Dumitriu and Edelman. Specifically, they use the tridiagonal model $\textbf{T}_N \sqrt{\beta / 2}$ (meaning that all entries in $\textbf{T}_N$ are multiplied by $\sqrt{\beta / 2}$). This is due to a difference in normalization; they define a $\beta$-Hermite ensemble $\mathbb{P}_{\beta}'$ through 
\begin{flalign}
\label{betadistribution2}
\mathrm{d} \mathbb{P}_{\beta}' (X) = c' (\beta) \exp\left( \displaystyle\frac{-1}{2} \displaystyle\sum_{i = 1}^N x_i^2 \right) \displaystyle\prod_{1\le i < j \le N} |x_i - x_j|^{\beta} \displaystyle\prod_{i = 1}^N \mathrm{d}x_i, 
\end{flalign}

\noindent where $c' (\beta) = (2 / \beta)^{n (\beta n - \beta + 2) / 4} c (\beta)$. Their normalization is useful for understanding how the $\beta$-Hermite distribution behaves as $\beta$ tends to $\infty$; however, our normalization is useful for understanding the connection between the $\beta$-Hermite ensemble and map enumeration (see \hyperref[Maps]{Section \ref*{Maps}} and \hyperref[BetaEnsemble]{Section \ref*{BetaEnsemble}}). Observe that our normalization \eqref{betadistribution} is related to the normalization \eqref{betadistribution2} through the identity $\mathbb{P}_{\beta}' (X) = \mathbb{P}_{\beta} (X \sqrt{2 / \beta})$, where $Xc = \{ c x_1, c x_2, \ldots , c x_N \}$ for any $c \in \mathbb{R}$. 
\end{rem} 

For any nonnegative integer $k$ and $N \times N$ matrix $\textbf{M}$, let $P_k (\textbf{M})$ denote the sum of the $\binom{N}{k}$ principal minors of $\textbf{M}$. The following result shows how to obtain $\beta$-expectations of multi-part elementary symmetric polynomials from $\textbf{T}_N$. 

\begin{cor}
\label{elementarytridiagonal}

Let $N$ be a positive integer, and let $\lambda = (\lambda_1, \lambda_2, \ldots , \lambda_r)$ be a partition. Then, $\big\langle e_{\lambda} (X) \big\rangle = \mathbb{E} \big[ \prod_{i = 1}^r P_{\lambda_i} (\textbf{T}_N) \big]$. 
\end{cor}

\begin{proof}
For any nonnegative integer $k$ and $N \times N$ matrix $\textbf{M}$ with eigenvalues $\mu_1, \mu_2, \ldots , \mu_N$, we have that $e_k (\mu_1, \mu_2, \ldots , \mu_N) = P_k (\textbf{M})$. Therefore, $e_{\lambda} (\mu_1, \mu_2, \ldots , \mu_N) = \prod_{i = 1}^r P_{\lambda_i} (\textbf{M})$. Letting $\textbf{M} = \textbf{T}_N$, taking expectations, and applying \hyperref[tridiagonaldistribution]{Proposition \ref*{tridiagonaldistribution}} yields the corollary. 
\end{proof}

When $\textbf{M}$ is a tridiagonal matrix, we can evaluate $P_k (\textbf{M})$ explicitly in terms of the entries of $\textbf{M}$. We begin with the following recursion. 

\begin{prop}
\label{minorsrecursion}
Let $u_1, u_2, \ldots , u_{N - 1}$; $v_1, v_2, \ldots , v_N$; and $w_1, w_2, \ldots , w_{N - 1}$ be real numbers. Suppose that $\textbf{M}$ is $N \times N$ tridiagonal matrix whose $(i, j)$ entry is $v_i$ if $i = j$; is $u_j$ if $i = j + 1$; is $w_i$ if $j = i + 1$; and is $0$ otherwise. Let $\textbf{M}'$ denote the $(N - 1) \times (N - 1)$ matrix obtained from removing the bottommost row and rightmost colum of $\textbf{M}$, and let $\textbf{M}''$ denote the $(N - 2) \times (N - 2)$ matrix obtained from removing the bottommost row and rightmost column of $\textbf{M}'$. For each integer $k$, we have that 
\begin{flalign*}
P_k (\textbf{M}) = P_k (\textbf{M}') + v_N P_{k - 1} (\textbf{M}') - u_{N - 1} w_{N - 1} P_{k - 2} (\textbf{M}'').
\end{flalign*}
\end{prop}

\begin{proof}
Any principal $k \times k$ minor of $\textbf{M}$ either contains the entry $v_N$ (in which case we call it a {\itshape type 1 minor}) or does not contain the entry $v_N$. The sum of all type 1 minors is $v_N P_{k - 1} (\textbf{M}')$. Now, any $k \times k$ principal minor that is not type 1 is either contained in $\textbf{M}'$ (in which case we say that it is a {\itshape type 2 minor}) or contains the entry $u_{N - 1}$ (in which case we say it is a {\itshape type 3 minor}). The sum of all type 2 minors is $P_k (\textbf{M}')$. Any nonzero type 3 minor also contains the entry $w_{N - 1}$, so the sum of all type 3 minors is $-u_{N - 1} w_{N - 1} P_{k - 2} (\textbf{M}'')$. Summing over all types of minors yields the proposition. 
\end{proof}

The following corollary now expresses $P_k (\textbf{M})$ using the entries of $\textbf{M}$. 

\begin{cor}
\label{minors}

Suppose that the conditions of \hyperref[minorsrecursion]{Proposition \ref*{minorsrecursion}} hold. For any nonnegative integer $k$, we have that 
\begin{flalign}
\label{minorssum}
P_k (\textbf{M}) = \displaystyle\sum_{|S_1| + 2|S_2| = k} (-1)^{|S_2|} \displaystyle\prod_{i \in S_1} v_i \displaystyle\prod_{i \in S_2} u_i w_i,
\end{flalign}

\noindent where the sum is over all subsets $S_1 \subset [1, N]$ and $S_2 \subset [1, N - 1]$ such that $|S_1| + 2|S_2| = k$ and such that the following holds. For any $i \in S_2$, we have that $i, i + 1 \notin S_1$ and, for any $i \in S_1$, we have that $i - 1, i \notin S_2$.
\end{cor}

\begin{proof}
One can verify the cases $k \in \{ 0, 1 \}$ and $N \in \{ 1, 2 \}$ directly. Now, let the right side of \eqref{minorssum} be $\widetilde{P_k} (\textbf{M})$. Then, the $\widetilde{P}_k$ satisfy the recursion 
\begin{flalign*}
\widetilde{P}_k (\textbf{M}) = \widetilde{P}_k (\textbf{M}') + v_N \widetilde{P}_{k - 1} (\textbf{M}') - u_{N - 1} w_{N - 1} \widetilde{P}_{k - 2} (\textbf{M}'').
\end{flalign*}

\noindent The corollary now follows by induction on $k$ and \hyperref[minorsrecursion]{Proposition \ref*{minorsrecursion}}. 
\end{proof}

\subsection{Proof of Lemma \ref{integralelementary}} 

\label{Determinants}

\noindent In this section, we will use \hyperref[elementarytridiagonal]{Corolary \ref*{elementarytridiagonal}} and \hyperref[minors]{Corollary \ref*{minors}}, to prove \hyperref[integralelementary]{Lemma \ref*{integralelementary}}. However, it might be useful to first give an alternate proof of \hyperref[singlepart]{Proposition \ref*{singlepart}}, in order to indicate the types of methods that we will use. As mentioned in \hyperref[analytic]{Remark \ref*{analytic}}, our proof of \hyperref[singlepart]{Proposition \ref*{singlepart}} will complement the one given by Aomoto in \cite{JPAWSI}. Instead of being analytic, it will be combinatorial, along the lines of the one given by Ullah \cite{EAANPDEUGI} in the case $\beta = 1$. 

\singlepart* 

\begin{proof}
For any positive integer $N$ and nonnegative integer $k$, let $\mathbb{E} \big[ P_k (\textbf{T}_N) \big] = P_k (N)$; observe that $P_k (N) = \big\langle e_k (X) \big\rangle$ by \hyperref[elementarytridiagonal]{Corollary \ref*{elementarytridiagonal}}. Due to \hyperref[minorsrecursion]{Proposition \ref*{minorsrecursion}}, we have that 
\begin{flalign*}
P_k (\textbf{T}_N) = P_k (\textbf{T}_{N - 1}) + G_N \sqrt{\frac{2}{\beta}} P_{k - 1} (\textbf{T}_{N - 1}) - \beta^{-1} X_{(N - 1) \beta}^2 P_{k - 2} (\textbf{T}_{N - 2}), 
\end{flalign*} 

\noindent where $G_N$ is a normalized, real Gaussian random variable that is independent from all entries in $\textbf{T}_{N - 1}$, and $X_{(N - 1) \beta}$ is a $\chi_{(N - 1) \beta}$ random variable that is independent from all entries in $\textbf{T}_{N - 2}$. Taking expectations, we obtain that 
\begin{flalign*}
\mathbb{E} \big[ P_k (\textbf{T}_N) \big] & = \mathbb{E} \big[ P_k (\textbf{T}_{N - 1}) \big] + \mathbb{E} \left[ G_N \sqrt{\frac{2}{\beta}} \right] \mathbb{E} \big[ P_{k - 1} (\textbf{T}_{N - 1}) \big] \\
& \qquad - \mathbb{E} \big[ \beta^{-1} X_{(N - 1) \beta}^2 \big] \mathbb{E} \big[ P_{k - 2} (\textbf{T}_{N - 2}) \big]. 
\end{flalign*} 

\noindent Using the facts that $\mathbb{E}[G_N \sqrt{2 / \beta}] = 0$ and $\mathbb{E}[\beta^{-1} X_{(N - 1) \beta}^2] = N - 1$, we deduce that 
\begin{flalign*}
P_k (N) = P_k (N - 1) - (N - 1) P_{k - 2} (N - 2). 
\end{flalign*}  

\noindent Since $P_0 (N) = 1$, induction on $k$ yields that $P_k (N) = (-1)^{k / 2} \binom{N}{k} \prod_{j = 1}^{k / 2 - 1} (2j + 1)$; this implies the proposition. 
\end{proof}

Now, let us turn to \hyperref[integralelementary]{Lemma \ref*{integralelementary}}. In order for this lemma to be true, we should have that $\big\langle e_{\lambda} (X) \big\rangle \in \mathbb{Z}[b]$ for any fixed positive integer $N$ and partition $\lambda$. The following proposition establishes this fact. 

\begin{prop}
\label{integralb}
For any fixed positive integer $N$ and partition $\lambda = (\lambda_1, \lambda_2, \ldots , \lambda_r)$, we have that $\big\langle e_{\lambda} (X) \big\rangle \in \mathbb{Z}[b]$. 
\end{prop}

\begin{proof}
By \hyperref[minors]{Corollary \ref*{minors}}, we have that 
\begin{flalign*}
\displaystyle\prod_{i = 1}^r P_{\lambda_i} (\textbf{T}_N) = \displaystyle\prod_{i = 1}^r \displaystyle\sum_{|S_{1, i}| + 2|S_{2, i}| = \lambda_i} \left( \displaystyle\frac{2}{\beta} \right)^{|S_{1, i}| / 2} (-1)^{|S_{2, i}|} \displaystyle\prod_{j \in S_{1, i}} G_j \displaystyle\prod_{j \in S_{2, i}} \displaystyle\frac{X_{j \beta}^2}{\beta},
\end{flalign*}

\noindent where the sum is over all subsets $S_{1, i} \subset [1, N]$ and $S_{2, i} \subset [1, N - 1]$ such that $|S_{1, i}| + 2|S_{2, i}| = k$ and such that the following holds. For any $j \in S_{2, i}$, we have that $j, j + 1 \notin S_{1, i}$ and, for any $j \in S_{1, i}$, we have that $j - 1, j \notin S_{2, i}$. Here, the $G_j$ are normalized, real Gaussian random variables, and the $X_{j \beta}$ are $\chi_{j \beta}$ distributed random variables. 

Taking expectations, applying \hyperref[elementarytridiagonal]{Corollary \ref*{elementarytridiagonal}}, and recalling that $\alpha = 2 / \beta$, we deduce that 
\begin{flalign*}
\big\langle e_{\lambda} (X) \big\rangle = \mathbb{E} \left[ \displaystyle\prod_{i = 1}^r \displaystyle\sum_{|S_{1, i}| + 2|S_{2, i}| = \lambda_i} \alpha^{|S_{1, i}| / 2} (-1)^{|S_{2, i}|} \displaystyle\prod_{j \in S_{1, i}} G_j \displaystyle\prod_{j \in S_{2, i}} \displaystyle\frac{X_{j \beta}^2}{\beta} \right], 
\end{flalign*}

\noindent where the sums are as above. Expanding the product yields that $\big\langle e_{\lambda} (X) \big\rangle$ is equal to
\begin{flalign}
\label{sum}
\displaystyle\sum_{|S_{1, 1}| + 2 |S_{2, 1}| = \lambda_1} \cdots \displaystyle\sum_{|S_{1, r}| + 2 |S_{2, r}| = \lambda_r} \alpha^{\sum_{i = 1}^r |S_{1, i}| / 2} (-1)^{\sum_{i = 1}^r |S_{2, i}|} \displaystyle\prod_{j = 1}^N \mathbb{E}[G_j^{n_{1, j}}] \displaystyle\prod_{j = 1}^{N - 1} \mathbb{E} \left[ \displaystyle\frac{X_{j \beta}^{2 n_{2, j}}}{\beta^{n_{2, j}}} \right], 
\end{flalign} 

\noindent where the $S_{1, i}$ and $S_{2, i}$ are summed as above; $n_{1, j}$ denotes the number of integers $i \in [1, r]$ for which $j \in S_{1, i}$; and $n_{2, j}$ denotes the number of integers $i \in [1, r]$ for which $j \in S_{2, i}$. 

If $|\lambda|$ is odd, then $\big\langle e_{\lambda} (X) \big\rangle = 0$ due to symmetry of \eqref{betadistribution}. If $|\lambda|$ is even, then $\sum_{i = 1}^r |S_{1, i}|$ is even because $\sum_{i = 1}^r |S_{1, i}| + 2 \sum_{i = 1}^r |S_{2, i}| = |\lambda|$. Therefore, the exponent of $\alpha$ in \eqref{sum} is integral. Furthermore, for all nonnegative integers $n_{1, j}$ and $n_{2, j}$, we have that 
\begin{flalign*}
\mathbb{E}[G_j^{n_{1, j}}] \in \mathbb{Z}; \qquad \mathbb{E} \left[ \displaystyle\frac{X_{i \beta}^{2 n_{2, j}}}{\beta^{n_{2, j}}} \right] = \displaystyle\prod_{j = 0}^{n_{2, j} - 1} (i + j \alpha) \in \mathbb{Z}[\alpha]. 
\end{flalign*}

\noindent Thus, \eqref{sum} is the sum of polynomials in $\alpha$ with integral coefficients, so $\big\langle e_{\lambda} (X) \big\rangle \in \mathbb{Z}[\alpha]$. Since $b = \alpha - 1$, we conclude that $\big\langle e_{\lambda} (X) \big\rangle \in \mathbb{Z}[b]$. 
\end{proof}

Using \hyperref[integralb]{Proposition \ref*{integralb}}, we may now establish \hyperref[integralelementary]{Lemma \ref*{integralelementary}}. 

\integralelementary* 

\begin{proof}
By \eqref{recursionpower} and induction on $|\lambda|$, we find that $\big\langle p_{\lambda} (X) \big\rangle \in \mathbb{Q}[N, b]$ is a rational polynomial whose degree in $N$ is at most equal to $|\lambda|$. Since the multi-part elementary symmetric polynomials are in the $\mathbb{Q}$-span of the multi-part power sums, we deduce that $\big\langle e_{\lambda} (X) \big\rangle \in \mathbb{Q}[N, b]$ is a rational polynomial whose degree in $N$ at most equal to $|\lambda|$. Thus, there are rational polynomials $R_{\lambda, i} (N) \in \mathbb{Q}[N]$ such that $\big\langle e_{\lambda} (X) \big\rangle = \sum_{i = 0}^{d_{\lambda}} b^i R_{\lambda, i} (N)$, for some nonnegative integer $d_{\lambda}$. 

From \hyperref[integralb]{Proposition \ref*{integralb}}, $R_{\lambda, i} (N) \in \mathbb{Z}$ for each fixed positive integer $N$, partition $\lambda$, and integer $i \in [0, d_{\lambda}]$. Therefore, the $R_{\lambda, i}$ are integer linear combinations of the binomials $\binom{N}{0}, \binom{N}{1}, \ldots $. Applying this to the equality $\big\langle e_{\lambda} (X) \big\rangle = \sum_{i = 0}^{d_{\lambda}} b^i R_{\lambda, i} (N)$ and using the fact that $\big\langle e_{\lambda} (X) \big\rangle$ is of degree at most $|\lambda|$ in $N$, we deduce the lemma. 
\end{proof} 

\begin{rem}
In the proof of \hyperref[integralelementary]{Lemma \ref*{integralelementary}}, we used the fact that $\big\langle p_{\lambda} (X) \big\rangle \in \mathbb{Q}[N, b]$, which was shown by LaCroix through analytic methods (in fact, he shows the stronger statement that $\big\langle p_{\lambda} (X) \big\rangle \in \mathbb{Z}[N, b]$). It is also possible to give a combinatorial proof of this fact using the tridiagonal matrix model reviewed in \hyperref[Tridiagonal]{Section \ref*{Tridiagonal}}, but we will not pursue this here. 
\end{rem}

\begin{rem}
\label{elementaryevaluation}

\hyperref[integralelementary]{Lemma \ref*{integralelementary}} does not explicitly evaluate $\big\langle e_{\lambda} (X) \big\rangle$. We will investigate the question of how to explicitly evaluate this expectation in \hyperref[expectationselementary]{Section \ref*{expectationselementary}}. 
\end{rem}

The following corollary of  \hyperref[integralelementary]{Lemma \ref*{integralelementary}} will not be used but may be of independent interest. 

\begin{cor}
\label{integralsymmetric}

For any symmetric polynomial $f$ with integer coefficients, there are integers $d_f$ and $h_{i, j}^{(f)}$ such that 
\begin{flalign*}
\big\langle f(X) \big\rangle = \displaystyle\sum_{i = 0}^{d_f} \displaystyle\sum_{j = 0}^{\deg f} h_{i, j}^{(f)} b^i \binom{N}{j}. 
\end{flalign*}
\end{cor}

\begin{proof}
This follows from \hyperref[integralelementary]{Lemma \ref*{integralelementary}} and the fact that $f$ is in the $\mathbb{Z}$-span of the multi-part elementary symmetric polynomials $e_{\lambda}$. 
\end{proof}

\begin{rem}
One can show that $d_f \le \deg f / 2$ using the fact that $\big\langle p_{\lambda} (X) \big\rangle$ is of degree at most $\deg f / 2$ in $b$ (which follows by \eqref{recursionpower} and induction on $|\lambda|$). 
\end{rem}

\begin{rem}
\label{integral}
\hyperref[integralsymmetric]{Corollary \ref*{integralsymmetric}} may be of independent interest, since it has implicitly appeared previously in multiple articles in random matrix theory but has not been stated or proven in full generality until now. For instance, \eqref{partiallyorientableseries} (and thus its special cases \eqref{orientableseries} and \eqref{nonorientableseries}) exhibit the phenomenon stated in \hyperref[integralsymmetric]{Corollary \ref*{integralsymmetric}}. Furthermore, in response to Conjecture 3.4 of Goulden and Jackson in \cite{MLOSIORSM}, Okounkov \cite{PCGJ} (and later Dumitriu in Theorem 8.5.1 in \cite{ESBE}) explicitly evaluated the $\beta$-expectation of the Jack polynomial $J^{(2 / \beta)} (X)$; one may also verify that this expectation satisfies the statement of \hyperref[integralsymmetric]{Corollary \ref*{integralsymmetric}}.
\end{rem}

\begin{rem}
The proofs of the facts mentioned in \hyperref[integral]{Remark \ref*{integral}} are analytic \cite{ESBE, TCJPGSTM, PCGJ}. However, the proof of \hyperref[integralsymmetric]{Corollary \ref*{integralsymmetric}} is mainly elementary and combinatorial, as it used the tridiagonal matrix model for the $\beta$-Hermite ensemble instead of explicit integration against the distribution \eqref{betadistribution}. 
\end{rem}


\subsection{The Polynomials \texorpdfstring{$\big\langle e_1 (X)^{2q} \big\rangle$}{} and \texorpdfstring{$\big\langle e_2 (X)^q \big\rangle$}{}}

\label{Coefficient}

In this section, we will discuss the modulo $q$ properties of the polynomials $\big\langle e_1 (X)^{2q} \big\rangle$ and $\big\langle e_2 (X)^q \big\rangle$. Let us begin with the following result that evaluates $\big\langle p_1 (X)^{2s} \big\rangle$ and $\big\langle p_2 (X)^s \big\rangle$ explicitly. 

\begin{prop}
\label{powersums}

For any positive integer $s$, we have that 
\begin{flalign}
\label{power1}
\big\langle p_1 (X)^{2s} \big\rangle = N^s (b + 1)^s \displaystyle\prod_{i = 1}^s (2i - 1) 
\end{flalign}

\noindent and 
\begin{flalign}
\label{power2}
\big\langle p_2 (X)^s \big\rangle = \displaystyle\prod_{i = 0}^{s - 1} \big( N^2 + bN + 2i (b + 1) \big).
\end{flalign}
\end{prop}

\begin{proof}
Inserting $j = -1$ and $\lambda = 1^{2s - 1}$ into \eqref{recursionpower} yields 
\begin{flalign*}
\big\langle p_1 (X)^{2s} \big\rangle = (b + 1) (2s - 1) \big\langle p_0 (X) p_1 (X)^{2s - 2} \big\rangle. 
\end{flalign*}


\noindent Since $p_0 (X) = N$, \eqref{power1} follows by induction on $s$. 

Inserting $j = 0$ and $\lambda = 2^{s - 1}$ into \eqref{recursionpower} yields 
\begin{flalign*}
\big\langle p_2 (X)^s \big\rangle = b \big\langle p_0 (X) p_2 (X)^{s - 1} \big\rangle + 2 (s - 1) (b + 1) \big\langle p_2 (X)^{s - 1} \big\rangle + \big\langle p_0 (X)^2 p_2 (X)^{s - 1} \big\rangle. 
\end{flalign*}

\noindent Again using the fact that $p_0 (X) = N$, we deduce \eqref{power2} by induction on $s$.
\end{proof}

\begin{rem}

An alternative proof of \eqref{power1} can be obtained by changing variables from $x_i$ to $x_i + t$ in \eqref{betadistribution}, integrating over all $x_i$, and differentiating with respect to $t$. Similarly, an alternative proof of \eqref{power2} can be deduced from changing variables from $x_i$ to $t x_i$ in \eqref{betadistribution}, and then integrating and differentiating. The author is grateful to Peter Forrester for mentioning this. 

\end{rem}

\noindent Next, we transfer from power sums to elementary symmetric polynomials to understand the polynomials $\big\langle e_1 (X)^{2q} \big\rangle$ and $\big\langle e_2 (X)^q \big\rangle$ modulo $q$. 

\begin{cor}
\label{powersumsarithmetic}

For any prime $q \ge 3$, we have that $\big\langle e_1 (X)^{2q} \big\rangle$ and $2^q \big\langle e_2 (X)^q \big\rangle $ are integer polynomials in $N$ and $b$. Furthermore, $\big\langle e_1 (X)^{2q} \big\rangle \equiv 0 \mod q$ and 
\begin{flalign*}
2^q \big\langle e_2 (X)^q \big\rangle & \equiv  \big( (b + 1)^{q - 1} - 1 \big) N^2 + \big( b (b + 1)^{q - 1} - b^q \big) N \\ 
& \quad - (2q)! \binom{N}{2q} - 2 (q + 1)! \binom{N}{q + 1} - b^q q! \binom{N}{q} \mod q. 
\end{flalign*}
\end{cor}

\begin{proof}
The first statement follows from the facts that $e_1 (X) = p_1 (X)$, $2 e_2 (X) = p_1 (X)^2 - p_1 (X)$, and $\big\langle p_{\lambda} (X) \big\rangle \in \mathbb{Z}[N, b]$ for all partitions $\lambda$ (the last fact holds by \eqref{recursionpower} and induction on $|\lambda|$). The second statement follows from \eqref{power1} since $q$ divides $\prod_{i = 1}^q (2i - 1)$. 

For the third statement, observe that 
\begin{flalign*}
2^q \big\langle e_2 (X)^q \big\rangle & \equiv \big\langle \left( p_1 (X)^2 - p_2 (X) \right)^q \big\rangle \mod q \\
& \equiv p_1 (X)^{2q} - p_2 (X)^q \mod q, 
\end{flalign*}

\noindent since $\big\langle p_{\lambda} (X) \big\rangle \in \mathbb{Z}[N, b]$ for each partition $\lambda$. Using the fact that $\big\langle p_1 (X)^{2q} \big\rangle \equiv 0 \mod q$ and applying \eqref{power2}, we deduce that 
\begin{flalign*}
2^q \big\langle e_2 (X)^q \big\rangle \equiv - \displaystyle\prod_{i = 0}^{q - 1} \big( N^2 + bN + 2i (b + 1) \big) \mod q. 
\end{flalign*} 

Using the known fact that $\prod_{i = 0}^{q - 1} (Y + 2i Z) \equiv Y^q - Y Z^{q - 1} \mod q$ for any integer polynomials $Y, Z \in \mathbb{Z}[N, b]$, we deduce (by letting $Y = N^2 + bN$ and $Z = b + 1$) that 
\begin{flalign}
\label{elementary2multiple}
2^q \big\langle e_2 (X)^q \big\rangle & \equiv (b + 1)^{q - 1} (N^2 + b N) - (N^2 + b N)^q \nonumber \\
& \equiv (b + 1)^{q - 1} (N^2 + bN) - N^{2q} - b^q N^q \mod q. 
\end{flalign}

\noindent Now, applying \eqref{elementary2multiple} and using the facts that 
\begin{flalign*}
(2q)! \binom{N}{2q} \equiv \displaystyle\prod_{i = 0}^{2q - 1} (N - i) \equiv N^2 (N^{q - 1} - 1)^2 \equiv N^{2q} - 2 N^{q + 1} + N^2 \mod q; \\
(q + 1)! \binom{N}{q + 1} \equiv N^{q + 1} - N^2 \mod q; \qquad q! \binom{N}{q} \equiv N^q - N \mod q, 
\end{flalign*}

\noindent we deduce the corollary. 
\end{proof}

\begin{rem}
\label{elementaryevaluation2}

In the proof of \hyperref[powersumsarithmetic]{Corollary \ref*{powersumsarithmetic}}, we explicitly evaluated $\big\langle e_1 (X)^{2q} \big\rangle$, but did not explicitly evaluate $\big\langle e_2 (X)^q \big\rangle$. Instead, we found the residue of this term modulo $q$ through the expectation $\big\langle p_2 (X)^q \big\rangle$ of a multi-part power sum. This leads to the question of whether we can find $\big\langle e_2 (X)^q \big\rangle$ explicitly; \hyperref[elementary12]{Proposition \ref*{elementary12}}  in \hyperref[expectationselementary]{Section \ref*{expectationselementary}} will lead to a partial answer. 
\end{rem}

We can now establish the following lemma. 

\begin{lem} 

\label{coefficients}

For any prime number $q \ge 3$, we have that
\begin{flalign*}
 \displaystyle\sum_{i = 0}^{q - 1} a_{1^{2q}, i} \binom{N}{i} - 2^q \displaystyle\sum_{i = 0}^{q - 1} a_{2^q, i} \binom{N}{i} \equiv (N - N^2) \displaystyle\sum_{i = 1}^{q - 1} (-b)^i \mod q, 
\end{flalign*}

\noindent where the $a_{\lambda, i}$ are the polynomials defined by \hyperref[integralelementary]{Lemma \ref*{integralelementary}}. 
\end{lem}

\begin{proof}
From \hyperref[powersumsarithmetic]{Corollary \ref*{powersumsarithmetic}}, we obtain 
\begin{flalign*}
 \displaystyle\sum_{i = 0}^{2q} a_{1^{2q}, i} \binom{N}{i} - 2^q \displaystyle\sum_{i = 0}^{2q} a_{2^q, i} \binom{N}{i} & \equiv \big\langle e_1 (X)^{2q} \big\rangle - 2^q \big\langle e_2 (X)^q \big\rangle \\
& \equiv (2q)! \binom{N}{2q} + 2 (q + 1)! \binom{N}{q + 1} + b^q q! \binom{N}{q} \\
& \quad - \big( (b + 1)^{q - 1} - 1 \big) N^2 - \big( b (b + 1)^{q - 1} - b^q \big) N \mod q. 
\end{flalign*}

\noindent Therefore, we deduce that 
\begin{flalign*}
 \displaystyle\sum_{i = 0}^{q - 1} a_{1^{2q}, i} \binom{N}{i} - 2 \displaystyle\sum_{i = 0}^{q - 1} a_{2^q, i} \binom{N}{i} \equiv \big( 1 - (b + 1)^{q - 1} \big) N^2 - \big( b (b + 1)^{q - 1} - b^q \big) N \mod q. 
\end{flalign*}

\noindent Now the lemma follows from the fact that 
\begin{flalign*}
1 - (b + 1)^{q - 1} \equiv b (b + 1)^{q - 1} - b^q \equiv - \displaystyle\sum_{i = 1}^{q - 1} (-b)^i \mod q. 
\end{flalign*} 
\end{proof} 


\subsection{Proof of Theorem \ref{arithmeticpartial2}}

\label{ProofTheorem}

In this section, we will establish \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}}. However, we first require a proof of \hyperref[divisible]{Lemma \ref*{divisible}}. Throughout, we will follow the notation used in \hyperref[Reduction]{Section \ref*{Reduction}}. 

\divisible*

\begin{proof}
The relationship between power sums and multi-part elementary symmetric polynomials is given by the Newton-Girard identity, which states that 
\begin{flalign*}
p_{2q} (X) = 2q \displaystyle\sum_{|\lambda| = 2q} \left( \displaystyle\frac{ \big( \ell (\lambda) - 1\big)!}{\prod_{i = 1}^{2q} \big( m_i (\lambda) \big)!} \displaystyle\prod_{i = 1}^{\lfloor |\lambda| / 2 \rfloor} (-1)^{m_{2i} (\lambda)} \right) e_{\lambda} (X). 
\end{flalign*}

\noindent Therefore, the coefficients $c_{\lambda}$ are defined by 
\begin{flalign}
\label{clambda}
c_{\lambda} = \displaystyle\frac{ \big( \ell (\lambda) - 1\big)!}{\prod_{i = 1}^{2q} \big( m_i (\lambda) \big) !} \displaystyle\prod_{i = 1}^{\lfloor |\lambda| / 2 \rfloor} (-1)^{m_{2i} (\lambda)}, 
\end{flalign} 

\noindent from which we deduce that 
\begin{flalign}
\label{coefficient}
c_{\lambda} \ell (\lambda) = \pm \binom{ \ell (\lambda)}{m_1 (\lambda), m_2 (\lambda), \ldots , m_{2q} (\lambda)} 
\end{flalign}

\noindent is a (possibly negative) multinomial coefficient and thus integral. Hence the denominator of $c_{\lambda}$ can be divisible by $q$ only if $q$ divides $\ell (\lambda)$. 

Since $\ell (\lambda) \le |\lambda| = 2q$, this implies that $\ell (\lambda) \in \{ q, 2q \}$. If $\ell (\lambda) = 2q$, then $\lambda = 1^{2q} = (1, 1, \ldots , 1)$ (with $2q$ ones). If $\ell (\lambda) = q$, then the right side of \eqref{coefficient} is divisible by $q$ unless $m_i (\lambda) = q = \ell (\lambda)$ for some integer $i \in [1, 2q]$. This implies that $i = 2$ and $\lambda = 2^q$. 

From \eqref{clambda}, we deduce that $c_{\lambda} = (2q)^{-1}$ when $\lambda = 1^{2q}$ and $c_{\lambda} = - q^{-1}$ when $\lambda = 2^q$. 
\end{proof}

Next, we will need the following proposition of Forrester and Witte \cite{MGBELED}, which states that $\big\langle p_{2q} (X) \big\rangle$ is a polynomial of degree $q + 1$ whose high-degree coefficients are explicit. 

\begin{prop}[{\cite[Theorem 3]{MGBELED}}]
\label{degree}

For each integer $s \ge 1$, we have that 
\begin{flalign*}
\big\langle p_{2s} (X) \big\rangle = \displaystyle\frac{1}{s + 1} \binom{2s}{s} N^{s + 1} + \Bigg( 2^{2s - 1} - \binom{2s - 1}{s} \Bigg) b N^s + r(N, b),
\end{flalign*}

\noindent where $r$ is a polynomial of degree $s - 1$ in $N$. 
\end{prop}

\begin{rem}
Forrester and Witte prove \hyperref[degree]{Proposition \ref*{degree}} as Theorem 3 in \cite{MGBELED} by finding loop equations for the resolvent of the $\beta$-Hermite ensemble; in fact, they find the first six leading order terms of $\big\langle p_{2s} (X) \big\rangle$. Though interesting, their methods seem unrelated to ours; therefore, we refer the reader to the original paper \cite{MGBELED} for the proof of this proposition. 
\end{rem}

Now, we may establish \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}}. 

\arithmeticpartiallyorientable* 

\begin{proof}

Due to the fact that 
\begin{flalign*}
\displaystyle\frac{1}{s + 1} \binom{2s}{s} N^{s + 1} & + \Bigg( 2^{2s - 1} - \binom{2s - 1}{s} \Bigg) b N^s - \displaystyle\frac{(2s)!}{s!} \binom{N}{s + 1} \\
& - b \Bigg( 2^{2s - 1} + (s - 1) \binom{2s - 1}{s} \Bigg) s! \binom{N}{s} 
\end{flalign*}

\noindent is a polynomial of degree at most $s - 1$ in $N$, \hyperref[degree]{Propostion \ref*{degree}} states that there exists a polynomial $\tilde{r}(N, b)$ of degree $q - 1$ in $N$ such that 
\begin{flalign*}
\big\langle p_{2s} (X) \big\rangle = \displaystyle\frac{(2q)!}{q!} \binom{N}{q + 1} + b \Bigg( 2^{2q - 1} + (q - 1) \binom{2q - 1}{q} \Bigg) q! \binom{N}{q}+ \tilde{r} (N, b). 
\end{flalign*} 

\noindent Thus, \eqref{rationalsum3} may be rewritten as 
\begin{flalign}
\label{powerelementary4}
\big\langle p_{2q} (X) \big\rangle &= \displaystyle\frac{(2q)!}{q!} \binom{N}{q + 1} + b \Bigg( 2^{2q - 1} + (q - 1) \binom{2q - 1}{q} \Bigg) q! \binom{N}{q} \nonumber \\
& \quad + \displaystyle\sum_{\substack{ |\lambda| = 2q \\ \lambda \notin \{ 1^{2q}, 2^q \} } } \displaystyle\sum_{i = 0}^{q - 1} \widetilde{c_{\lambda}} a_{\lambda, i} \binom{N}{i} + \displaystyle\sum_{i = 0}^{q - 1} a_{1^{2q}, i} \binom{N}{i} - 2 \displaystyle\sum_{i = 0}^{q - 1} a_{2^q, i} \binom{N}{i}. 
\end{flalign}

\noindent Now let us clear denominators in \eqref{powerelementary4}. Let $K = (q - 1)! \prod_{\lambda} k_{\lambda}$, where $\lambda$ ranges over all partitions, not equal to $1^{2q}$ or $2^q$, of size $2q$, and the $k_{\lambda}$ are defined by \hyperref[divisible]{Lemma \ref*{divisible}}. Let $\widetilde{K} = 2^{q - 1} K$; then, $q$ does not divide $\widetilde{K}$. 

Multiplying \eqref{powerelementary4} by $\widetilde{K}$, we obtain that 
\begin{flalign}
\label{powerelementary5}
\widetilde{K} \big\langle p_{2q} (X) \big\rangle &= \displaystyle\frac{\widetilde{K} (2q)!}{q!} \binom{N}{q + 1} + \widetilde{K} b \Bigg( 2^{2q - 1} + (q - 1) \binom{2q - 1}{q} \Bigg) q! \binom{N}{q} \nonumber \\
& \quad + K \left( 2^{q - 1} \displaystyle\sum_{i = 0}^{q - 1} a_{1^{2q}, i} \binom{N}{i} - 2^q \displaystyle\sum_{i = 0}^{q - 1} a_{2^q, i} \binom{N}{i} \right) \nonumber \\ 
& \quad + \widetilde{K} \displaystyle\sum_{\substack{ |\lambda| = 2q \\ \lambda \notin \{ 1^{2q}, 2^q \} } } \displaystyle\sum_{i = 0}^{q - 1} \widetilde{c_{\lambda}} a_{\lambda, i} \binom{N}{i}. 
\end{flalign}

\noindent The first and second terms in \eqref{powerelementary5} can be evaluated directly in terms of $\widetilde{K}$. Indeed, from \eqref{binomial}, we deduce that
\begin{flalign}
\label{product2}
\displaystyle\frac{(2q)!}{q!} \binom{N}{q + 1} \equiv \displaystyle\frac{1}{q + 1} \binom{2q}{q} \displaystyle\prod_{i = 0}^q (N - i) \equiv 2 N^{q + 1} - 2 N^2 \mod q. 
\end{flalign}

\noindent Moreover, since 
\begin{flalign*}
2^{2q - 1} \equiv 2 \mod q; \quad (q - 1) \binom{2q - 1}{q} \equiv - 1 \mod q; \quad \displaystyle\prod_{i = 0}^{q - 1} (N - i) \equiv N^q - N \mod q, 
\end{flalign*}

\noindent we have that 
\begin{flalign}
\label{product1}
b \Bigg( 2^{2q - 1} + (q - 1) \binom{2q - 1}{q} \Bigg) q! \binom{N}{q} \equiv b N^q - b N \mod q. 
\end{flalign}

\noindent The third term on the right side of \eqref{powerelementary5} can be evaluated in terms of $K$ using \hyperref[coefficients]{Lemma \ref*{coefficients}} since $2^{q - 1} \equiv 1 \mod q$, and the fourth term is zero modulo $q$ due to \hyperref[divisible]{Lemma \ref*{divisible}}. Thus, we obtain that 
\begin{flalign}
\label{powerelementary6}
\widetilde{K} \big\langle p_{2q} (X) \big\rangle \equiv  \widetilde{K}  \big( 2 N^{q + 1} + b N^q - 2 N^2 - b N \big) + K \left( \displaystyle\sum_{i = 1}^{q-  1} (-b)^i \right) (N - N^2) \mod q.  
\end{flalign}


\noindent Since $2^{q - 1} \equiv 1 \mod q$, we have that $\widetilde{K} \equiv K \mod q$. Thus, the theorem follows after multiplying \eqref{powerelementary6} by $\widetilde{K}^{-1} \mod q$. 
\end{proof}

\section{Expectations of Elementary Symmetric Polynomials}

\label{expectationselementary}

Several times in the proof of \hyperref[arithmeticpartial2]{Theorem \ref*{arithmeticpartial2}}, we asked the question of how to explicitly evaluate the $\beta$-expectation $\big\langle e_{\lambda} (X) \big\rangle$ of a multi-part elementary symmetric polynomial (see, for instance, \hyperref[singlepart]{Proposition \ref*{singlepart}}, \hyperref[elementaryevaluation]{Remark \ref*{elementaryevaluation}}, and \hyperref[elementaryevaluation2]{Remark \ref*{elementaryevaluation2}}). In this section, we will show one way of doing this through a recurrence that parallels the power-sum recursion \eqref{recursionpower}. 

\subsection{Historical Context}

In general, the question of how to explicitly evaluate the expectations of symmetric polynomials against the $\beta$-Hermite ensemble has been one of both probabilistc and combinatorial interest since the mid 1980s. Let us review some of the known results in this direction. 

In 1986, Harer and Zagier \cite{TECMSC} found an explicit form for the $2$-expectation $\big\langle p_{2k} (X) \big\rangle_2$ of a single-part power sum and gave a combinatorial interpretation to the $2$-expectation $\big\langle p_{\lambda} (X) \big\rangle_2$ of arbitrary multi-part power sums. The analogs of these results for $\beta = 1$ were found by Goulden and Jackson \cite{MLOSIORSM} in 1997. In connection with map enumeration, Goulden and Jackson conjectured an explicit form for the $\beta$-expectation of the Jack polynomial $J^{(2 / \beta)} (X)$ in 1997 \cite{MLOSIORSM}. This conjecture was first proven in \cite{PCGJ} by Okounkov and then in a different way in Chapter 8.5 of \cite{ESBE} by Dumitriu.  The extension of Harer and Zagier's interpretation to the general $\beta$-Hermite ensemble was established by La Croix \cite{TCJPGSTM} in 2009. Moments of the $\beta$-Hermite ensemble have also been understood through loop equations for the resolvent, studied recently by Forrester and Witte in \cite{MGBELED} (see also references therein and \cite{LEACE} for related work). 

Also in 1986, Ullah \cite{EAANPDEUGI} found a closed form for the $1$-expectation $\big\langle e_k (X) \big\rangle_1$ of a single-part elementary symmetric polynomial (this now follows as a special case of \hyperref[singlepart]{Proposition \ref*{singlepart}}). Aomoto later found a different, more analytic, proof of \hyperref[singlepart]{Proposition \ref*{singlepart}} that generalizes to yield expectations of one-part elementary symmetric polyomials against the $\beta$-Jacobi ensemble \cite{JPAWSI}. In Chapter 17.8 of his book \cite{RM}, Mehta uses Aomoto's method to explicitly find the $\beta$-expectations $\big\langle m_{\lambda} (X) \big\rangle_{\beta}$ of monomial symmetric polynomials $m_{\lambda} (X)$, in which all parts of the partition $\lambda$ are less than or equal to $3$. These statements may be extended to more general partitions $\lambda$ but with more complex results as the parts of $\lambda$ increase. 

Relatedly, Andrews, Goulden, and Jackson explicitly evaluated the expectations of moments of determinants for the GOE and GUE in 2003 \cite{DRMJPRS}. Although an explicit form for the expectation $\big\langle e_N (X)^k \big\rangle_{\beta}$ of the $k$-th moment of the determinant of the general $\beta$-Hermite ensemble is not currently known, Dumitriu found the second moment explicitly and found recursive equations for the third and fourth moments (see Chapter 8.5 of \cite{ESBE}). Recently, Edelman and La Croix \cite{TSV} used a singular value decomposition to find the law of the absolute value of the determinant of the GUE; the analog for the GOE was later found by Bornemann and La Croix in \cite{TSVO}. 

Thus, there has been a significant effort over the past 30 years in explicitly evaluating $\big\langle f(X) \big\rangle_{\beta}$, for various symmetric functions $f$. We will be interested in the case when $f = e_{\lambda}$ is a multi-part elementary symmetric polynomial. In \hyperref[RecursionElementary]{Section \ref*{RecursionElementary}}, we will present a recurrence relation that evaluates $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ explicitly; we will also show how it can be applied in several cases. As a related application, we will show in \hyperref[Algorithm]{Section \ref*{Algorithm}} how this recursion can be used to give an algorithm that finds $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ efficiently for all partitions $\lambda$ of small length. 

\subsection{The Recursion} 

\label{RecursionElementary}

The recursion for expectations of elementary symmetric polynomials may be stated as follows. 

\begin{restatable}{thm}{recursion}
\label{recursion}

For any partition $\lambda = (\lambda_1, \ldots, \lambda_r)$ and integer $k \ge \max_{i \in [1, r]} \lambda_i$, we have that \
\begin{flalign*}
k \big\langle e_{k, \lambda} (X) \big\rangle_{\beta} & = (N - k + 1) \alpha \displaystyle\sum_{i = 1}^r \big\langle e_{k - 1, \lambda_i - 1, \lambda \setminus \lambda_i} (X) \big\rangle_{\beta} \\
& \qquad - (N - k + 1)(N - k + 2) \big\langle e_{k - 2, \lambda} (X) \big\rangle_{\beta} \nonumber \\
& \qquad - \alpha \displaystyle\sum_{i = 1}^r \displaystyle\sum_{j = 1}^{\lambda_i - 1} (k - \lambda_i + 2j) \big\langle e_{k + j - 1, \lambda_i - j - 1, \lambda \setminus \lambda_i} (X) \big\rangle_{\beta}.
\end{flalign*}
\end{restatable} 

\begin{rem} 
\label{approximatecontainment}
From \hyperref[recursion]{Theorem \ref*{recursion}}, we deduce that $\big\langle e_{k, \lambda} (X) \big\rangle_{\beta}$ is a linear combination of $\big\langle e_{\mu} (X) \big\rangle_{\beta}$, where $\mu$ is either of the form $(k, \lambda_i - 1, \lambda \setminus \lambda_i)$ for some $i \in [1, r]$; of the form $(k + j - 1, \lambda_i - j - 1, \lambda \setminus \lambda_i)$ for some $i \in [1, r]$ and $j \in [1, \lambda_i - 1]$; or equal to $(k - 2, \lambda)$. In each of these cases, $\ell (\mu) \le \ell (\lambda)$ and $\mu_i \le \lambda_i$ for each $i \in [2, r]$. This property, which is not shared by the recurrence \eqref{recursionpower} given by LaCroix, will later allow for an efficient algorithm that finds $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ for partitions $\lambda$ of small length (see \hyperref[algorithm]{Corollary \ref*{algorithm}}).  
\end{rem}

We will give the proof of \hyperref[recursion]{Theorem \ref*{recursion}} in \hyperref[RecursionProof]{Section \ref*{RecursionProof}}, but let us first show how it can be used to explicitly evaluate $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ for several classes of partitions $\lambda$; some of our results are summarized in the table in Figure 3. For instance, we can obtain another proof of \hyperref[singlepart]{Proposition \ref*{singlepart}}. 

\singlepart* 

\begin{proof}
Observe that $\big\langle e_0 (X) \big\rangle_{\beta} = 1$ since $\mathbb{P}_{\beta}$ is a probability measure. Applying \hyperref[recursion]{Theorem \ref*{recursion}} with empty $\lambda$ yields $k \big\langle e_k (X) \big\rangle_{\beta} = (N - k + 1)(k - 2 - N ) \big\langle e_{k - 2} (X) \big\rangle$ for each integer $k \ge 2$. The proposition then follows by induction on $k$. 
\end{proof}

\noindent Using \hyperref[recursion]{Theorem \ref*{recursion}} can also establish the following results.  

\begin{cor}
If $k$ is odd and positive, then $\big\langle e_{k, 1} (X) \big\rangle_{\beta} = (-1)^{(k - 1) / 2} \binom{N}{k} \alpha \prod_{j = 1}^{(k - 1) / 2} (2j + 1)$. 
\end{cor}

\begin{proof}
Applying \hyperref[recursion]{Theorem \ref*{recursion}} with the single-part partition $\lambda = (1)$, we obtain that 
\begin{flalign}
\label{recursion11}
\big\langle e_{k, 1} (X) \big\rangle_{\beta} = (N - k + 1) \alpha \big\langle e_{k - 1} (X) \big\rangle_{\beta} - (N - k + 1)(N - k + 2) \big\langle e_{k - 2, 1} (X) \big\rangle_{\beta}.
\end{flalign}

\noindent If $k = 1$, then \eqref{recursion11} implies that $\big\langle e_{1, 1} (X) \big\rangle_{\beta} = N \alpha$. If $k \ge 3$, then the result follows from \hyperref[singlepart]{Proposition \ref*{singlepart}}, \eqref{recursion11}, and induction on $k$. 
\end{proof}

\begin{prop}
If $k$ is an even nonnegative integer, then 
\begin{flalign*}
\big\langle e_{k, 2} (X) \big\rangle_{\beta} = (-1)^{k / 2 - 1} \displaystyle\binom{N}{k} \left( k \displaystyle\binom{\alpha + 1}{2} + \displaystyle\binom{N}{2} \right) \displaystyle\prod_{j = 1}^{k / 2 - 1} (2j + 1).
\end{flalign*}
\end{prop}

\begin{proof}
If $k = 0$, then the result follows from \hyperref[singlepart]{Proposition\ref*{singlepart}}, so suppose that $k \ge 2$. Inserting the single-part partition $\lambda = (2)$ into \hyperref[recursion]{Theorem \ref*{recursion}} yields 
\begin{flalign*}
k \big\langle e_{k, 2} (X) \big\rangle_{\beta} &= (N - k + 1) \alpha \big\langle e_{k - 1, 1} (X) \big\rangle_{\beta} - (N - k + 1)(N - k + 2) \big\langle e_{k - 2, 2} (X) \big\rangle_{\beta} \\
& \qquad - \alpha k \big\langle e_k (X) \big\rangle_{\beta} \\
&= (-1)^{k / 2 - 1} k (\alpha + 1) \alpha \binom{N}{k} \displaystyle\prod_{j = 1}^{k / 2 - 1} (2j + 1) \\
& \qquad - (N - k + 1) (N - k + 2) \big\langle e_{k - 2, 2} (X) \big\rangle_{\beta}. 
\end{flalign*}

\noindent The proposition now follows by induction on $k$. 
\end{proof}

\begin{prop}
\label{elementary12} 

If $s$ is a nonnegative integer, then $\big\langle e_1 (X)^{2s} \big\rangle_{\beta} = (N \alpha)^s \prod_{j = 1}^{s - 1} (2j + 1)$. Moreover, for any positive integers $s$ and $r$, we have that 
\begin{flalign*}
2 \big\langle e_1 (X)^{2s} e_2 (X)^r \big\rangle_{\beta} &= 2 s (N - 1) \alpha \big\langle e_1 (X)^{2s} e_2 (X)^{r - 1} \big\rangle_{\beta} \\
& \quad + (N - 1) (r - 1) \alpha \big\langle e_1 (X)^{2s + 2} e_2 (X)^{r - 2} \big\rangle_{\beta} \\
& \quad - N (N - 1) \big\langle e_1 (X)^{2s} e_2 (X)^{r - 1} \big\rangle_{\beta} - 2 \alpha (r - 1) \big\langle e_1 (X)^{2s} e_2 (X)^{r - 1} \big\rangle_{\beta}. 
\end{flalign*}
\end{prop}

\begin{proof}
Inserting $k = 1$ and $\lambda = 1^{2s - 1}$ into \hyperref[recursion]{Theorem \ref*{recursion}} yields $\big\langle e_1 (X)^{2s} \big\rangle_{\beta} = N \alpha (2s - 1) \big\langle e_1 (X)^{2s - 2} \big\rangle_{\beta}$. We therefore deduce the first part of the proposition by induction on $k$. The second statement of the proposition follows from inserting $k = 2$ and $\lambda = 1^s 2^{r - 1}$ (with $s$ ones and $r - 1$ twos) into \hyperref[recursion]{Theorem \ref*{recursion}}. 
\end{proof}

\begin{rem}
The first statement of \hyperref[elementary12]{Proposition \ref*{elementary12}} coincides with \eqref{power1} since $\alpha = b + 1$. The second statement of \hyperref[elementary12]{Proposition \ref*{elementary12}} allows one to evaluate $\big\langle e_2 (X)^s \big\rangle_{\beta}$ through an efficient recurrence, thereby partially answering the question asked in \hyperref[elementaryevaluation2]{Remark \ref*{elementaryevaluation2}}. We do not know of a compact, explicit expression for $\big\langle e_2 (X)^s \big\rangle_{\beta}$, but \hyperref[elementary12]{Proposition \ref*{elementary12}} might be sufficient to provide an alternative derivation of the arithmetic statement \hyperref[powersumsarithmetic]{Corollary \ref*{powersumsarithmetic}}. 
\end{rem}

\begin{prop}
\label{rationalpolynomial} 

For any partition $\lambda$, $\big\langle e_{\lambda} (X) \big\rangle_{\beta} \in \mathbb{Q}[N, \alpha]$ is a polynomial whose degree in $N$ is at most $|\lambda|$. Furthermore, $|\lambda|! \big\langle e_{\lambda} (X) \big\rangle_{\beta} \in \mathbb{Z}[N, \alpha]$. 
\end{prop}

\begin{proof}
We have that $\big\langle e_0 (X) \big\rangle = 1$ and $\big\langle e_1 (X) \big\rangle = 0$; therefore, the statement holds when $|\lambda| \in \{ 0, 1 \}$. Now the proposition follows from \hyperref[recursion]{Theorem \ref*{recursion}} and induction on $|\lambda|$. 
\end{proof}

\begin{rem}
Observe that \hyperref[rationalpolynomial]{Proposition \ref*{rationalpolynomial}} also follows from \hyperref[integralelementary]{Lemma \ref*{integralelementary}}. We do not know how to establish the latter result directly from \hyperref[recursion]{Theorem \ref*{recursion}}. 
\end{rem} 

\begin{figure}
\begin{center}
\begin{tabular}{|c|c|}
\hline 
\multicolumn{2}{|c|}{Expectations of Multi-part Elementary Symmetric Polynomials} \\ 
\hline 
$F(X)$ & $\big\langle F(X) \big\rangle_{\beta}$ \\
\hline 
$ e_{2k} (X)$ & $ (-1)^k \displaystyle\binom{N}{2k} \displaystyle\prod_{j = 1}^{k - 1} (2j + 1) $  \\
\hline 
$ e_{2k + 1, 1} (X)$ & $ (-1)^k \alpha \displaystyle\binom{N}{2k + 1} \displaystyle\prod_{j = 1}^k (2j + 1) $ \\
\hline 
$e_{2k + 2, 2} (X)$ & $ (-1)^k \displaystyle\binom{N}{2k + 2} \left( k \displaystyle\binom{\alpha + 1}{2} + \displaystyle\binom{N}{2} \right) \displaystyle\prod_{j = 1}^k (2j + 1) $ \\
\hline 
$e_1 (X)^{2s}$ & $(N \alpha)^s \displaystyle\prod_{j = 1}^{s - 1} (2j + 1)$ \\
\hline 
$ e_1 (X)^{2s} e_2 (X)$ & $\displaystyle\frac{1}{2} (2 s \alpha - N) (N - 1) (N \alpha)^s \displaystyle\prod_{j = 1}^{s - 1} (2j + 1)$ \\ 
\hline 
\end{tabular}
\end{center}
\caption{Some applications of \hyperref[recursion]{Theorem \ref*{recursion}} are listed in the above table.} 
\end{figure}


\subsection{Algorithm}

\label{Algorithm} 

Due to the interest and applications of the $\beta$-Hermte ensemble, Dumitriu, Edelman, and Shuman introduced an algorithm that evaluates $\big\langle f(X) \big\rangle_{\beta}$ explicitly in terms of $N$ and $\alpha = 2 / \beta$, for any symmetric polynomial $f$ of $N$ variables \cite{MVOP}. Though effective, their algorithm involves a change-of-basis to the Jack symmetric polynomials, which is a high-complexity process. For instance, in order to evaluate $\big\langle e_{2k} (X) \big\rangle_{\beta}$, this algorithm requires complexity $\exp \big( \Theta (\sqrt{k}) \big)$  (see Section 3 of \cite{MVOP}), even though $\big\langle e_{2k} (X) \big\rangle_{\beta}$ is explicit by \hyperref[singlepart]{Proposition \ref*{singlepart}}. 

One may observe that LaCroix's recursive relation \eqref{recursionpower} also yields an algorithm that finds $\beta$-expectations of multi-part power sums $\big\langle p_{\lambda} (X) \big\rangle_{\beta}$ explicitly in terms of $N$ and $b$, for any partition $\lambda$. A change-of-basis (which, depending on $f$, might be a high-complexity process) would then let one evaluate $\big\langle f(X) \big\rangle_{\beta}$, for any symmetric polynomial $f$, as above. However, one may verify that this algorithm also requires complexity $\exp \big( \Theta (\sqrt{k}) \big)$ in order to evaluate $\big\langle e_{2k} (X) \big\rangle_{\beta}$. 

In general, we do not know if there is an algorithm that evaluates $\beta$-expectations of general multi-part elementary symmetric polynomials, power sums, or monomials efficiently. More specifically, it is unknown whether there exists an integer $d$ and an algorithm that finds $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ (or $\big\langle p_{\lambda} (X) \big\rangle_{\beta}$ or $\big\langle m_{\lambda} (X) \big\rangle_{\beta}$) in $O \big( |\lambda|^d \big)$ space and time, for any partition $\lambda$. However, in this section, we will provide a partial result in this direction by presenting a algorithm $\mathcal{A}_{\lambda}$ that finds $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$, as a polynomial in $N$ and $\alpha$, with complexity $O \big( |\lambda|^d (d^2 + |\lambda|) \big)$ for all partitions $\lambda$ satisfying $\ell (\lambda) \le d - 1$. 

Let $\lambda = (\lambda_1, \lambda_2, \ldots , \lambda_r)$ be a partition, and let $S_{\lambda}$ denote the set of partitions $\mu = (\mu_1, \mu_2, \ldots , \mu_s)$ such that either $|\mu| \le |\lambda| - 2$, $s \le r$, and $\mu_i \le \lambda_i$ for each integer $i \in [2, s]$ or such that $\mu = \lambda$. Our algorithm $\mathcal{A}_{\lambda}$ will evaluate $\big\langle e_{\mu} (X) \big\rangle_{\beta}$ for all $\mu \in S_{\lambda}$ in $O \big( |\lambda| |S_{\lambda}| \big)$ time and $O \big( \ell (\lambda)^2 |S_{\lambda}| \big)$ space. We will have $\mathcal{A}_{\lambda}$ perform the following steps. 

\begin{enumerate}
\item{Select an ordering $\mu^{(1)}, \mu^{(2)}, \ldots , \mu^{(|S_{\lambda}|)}$ of the partitions in $S_{\lambda}$ such that $|\mu^{(i)}| \le |\mu^{(i + 1)}|$ for each integer $i \in \big[1, |S_{\lambda}| - 1 \big]$.} 

\item{Create a list of variables $\{ e_{\mu} \}_{\mu \in S_{\lambda}}$ indexed by $S_{\lambda}$, and initialize $e_0 = 1$ and $e_1 = 0$.} 

\item{Let $i$ be the minimum integer such that $e_{\mu^{(i)}}$ has not been defined. Let $\mu^{(i)} = (k, \mu)$, where $k$ is the largest part of $\mu^{(i)}$.} 

\item{Define $e_{k, \mu}$ through the equality 
\begin{flalign}
\label{recursionalgorithm}
k e_{k, \mu} & = (N - k + 1) \alpha \displaystyle\sum_{i = 1}^r e_{k - 1, \mu_i - 1, \mu \setminus \mu_i} - (N - k + 1)(N - k + 2) e_{k - 2, \mu} \nonumber \\
& \quad - \alpha \displaystyle\sum_{i = 1}^r \displaystyle\sum_{j = 1}^{\mu_i - 1} (k - \mu_i + 2j) e_{k + j - 1, \mu_i - j - 1, \mu \setminus \mu_i}. 
\end{flalign}} 

\item{Repeat Steps 3 and 4 until $e_{\lambda}$ is defined; then output $e_{\lambda}$, and stop.} 
\end{enumerate}

By \hyperref[approximatecontainment]{Remark \ref*{approximatecontainment}}, the recursion given by \hyperref[recursion]{Theorem \ref*{recursion}} expresses $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ as a linear combination of $|\lambda| + 1$ terms of the form $\big\langle e_{\mu} (X) \big\rangle_{\beta}$ for $\mu \in S_{\lambda}$. Furthermore, since $S_{\mu} \subseteq S_{\lambda}$ for any partition $\mu \in S_{\lambda}$, we deduce that each $e_{\nu}$ on the right side of \eqref{recursionalgorithm} has been previously defined by $\mathcal{A}_{\lambda}$ (due to the minimality of $|(k, \mu)|$). Therefore, \hyperref[recursion]{Theorem \ref*{recursion}} ensures that $\mathcal{A}_{\lambda}$ outputs $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$, since the $e_{\lambda}$ and the $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ satisfy the same recurrence relation with the same initial conditions. 

It remains to analyze the complexity of $\mathcal{A}$. 

\begin{prop}
\label{algorithm1}

The algorithm $\mathcal{A}_{\lambda}$ runs in time $O \big( |\lambda| |S_{\lambda}| \big)$ and space $O \big( \ell (\lambda)^2 |S_{\lambda}| \big)$. 
\end{prop}

\begin{proof}
The fact that $\mathcal{A}_{\lambda}$ requires space $O \big( \ell (\lambda)^2 |S_{\lambda}| \big)$ is due to the fact that the data stored by $\mathcal{A}_{\lambda}$ consists of the variable set $\{ e_{\mu} \}_{\mu \in S_{\lambda}}$, and each variable is defined by a polynomial in $N$ and $\alpha$ of degree at most $\ell (\lambda)$ in each variable. Now let us turn to the run time of $\mathcal{A}$. Since $|\mu| \in \big[ 0, |\lambda| \big]$, for each $\mu \in S_{\lambda}$, the run time of Step 1 is $O \big( |\lambda| |S_{\lambda}| \big)$. The run time of Step 2 is $O \big( |S_{\lambda}| \big)$. The run time of Step 3 is $O(1)$, and the run time of Step 4 is $O \big( |\lambda| \big)$, since there are $|\lambda| + 1$ terms on the right side of \eqref{recursionalgorithm}. Step 5 repeats Steps 3 and 4 $O \big( |S_{\lambda}| \big)$ times, so the run time of Step 5 is $\mathcal{A}$ is $O \big( |\lambda| |S_{\lambda}| \big)$. Summing over the steps of $\mathcal{A}$, we deduce that the run time of $\mathcal{A}$ is $O \big( |\lambda| |S_{\lambda}| \big)$. 
\end{proof}

\begin{cor} 
\label{algorithm} 

Suppose that $\lambda$ is a partition satisfying $\ell (\lambda) \le d$. Then, $\mathcal{A}$ evaluates $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ in space $O \big( d^2 |\lambda|^d \big)$ and time $O \big( |\lambda|^{d + 1} \big)$. 
\end{cor}

\begin{proof}
This follows from \hyperref[algorithm1]{Proposition \ref*{algorithm1}} and the fact that $|S_{\lambda}| \le |\lambda|^{\ell (\lambda)}$. 
\end{proof} 

\begin{rem}
Generally, $\ell (\lambda)$ is not bounded, which means that $\mathcal{A}$ does not provide a polynomial-time and polynomial-space algorithm that evaluates $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$. However, $\ell (\lambda)$ is bounded for some classes of partitions $\lambda$; in such cases, $\mathcal{A}$ is efficient. For instance, $\mathcal{A}$ is efficient when evaluating small moments of determinants of the $\beta$-Hermite ensemble. 
\end{rem}

\subsection{Proof of Theorem \ref{recursion}} 

\label{RecursionProof} 

Our proof of \hyperref[recursion]{Theorem \ref*{recursion}} will be similar to Aomoto's proof \cite{JPAWSI} of \hyperref[singlepart]{Proposition \ref*{singlepart}}. In particular, we will use his ``de-symmetrizing" idea, which involves transferring between expressions of the form $\big\langle e_k (X) \big\rangle$ and expressions of the form $\big\langle e_j (X \setminus \{ x_1 \} ) \big\rangle$. As in \hyperref[PartialArithmetic]{Section \ref*{PartialArithmetic}}, we write $\big\langle f(X) \big\rangle$ in place of $\big\langle f(X) \big\rangle_{\beta}$ for any symmetric polynomial $f$. 

The following lemma, whose proof will be deferred \hyperref[TransferalProof]{Section	 \ref*{TransferalProof}}, will faciliate this transferal. 

\begin{restatable}{lem}{transferal} 

\label{transferal}

Let $a \ge b \ge 0$ be integers, $\lambda$ be a partition, and $X = (x_1, x_2, \ldots , x_N)$ be a set of real variables. Then, 
\begin{flalign*}
N \big\langle e_{a, b} (X \setminus \{ x_1 \}) e_{\lambda} (X) \big\rangle = (N - a) \big\langle e_{a, b} (X) e_{\lambda} (X) \big\rangle - \displaystyle\sum_{i = 1}^b (a - b + 2i) \big\langle e_{a + i, b - i} (X) e_{\lambda} (X) \big\rangle. 
\end{flalign*}
\end{restatable}

\noindent Using this lemma, we can prove \hyperref[recursion]{Theorem \ref*{recursion}}. 

\recursion*

\begin{proof}
Let
\begin{flalign*}
\Omega_{\beta} (X) = c(\beta) \displaystyle\exp \left( \frac{-\beta}{4} \displaystyle\sum_{i = 1}^N x_i^2 \right) \displaystyle\prod_{1\le i < j \le N} |x_i - x_j|^{\beta}
\end{flalign*}

\noindent denote the density function of the $\beta$-Hermite ensemble \eqref{betadistribution}. Observe that
\begin{flalign}
\label{integraldensity}
\displaystyle\int_{-\infty}^{\infty} \displaystyle\frac{\partial}{\partial x_1} \big( & e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X) \Omega_{\beta} (X) \big) \mathrm{d} x_1 \nonumber \\
& = \displaystyle\lim_{z \rightarrow \infty} e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (z, X \setminus \{ x_1 \} ) \Omega_{\beta} (z, X \setminus \{ x_1 \}) \nonumber \\
& \quad - \displaystyle\lim_{z \rightarrow -\infty} e_{k - 1} (X \setminus \{ x_1 \}) e_{\lambda} (z, X \setminus \{ x_1 \}) \Omega_{\beta} (z, X \setminus \{ x_1 \}) = 0, 
\end{flalign}
\noindent where the second equality holds because both limits are equal to $0$ (due to the exponential term in $\Omega_{\beta} (X)$). The derivative on the left side of \eqref{integraldensity} is equal to
\begin{flalign*}
& \displaystyle\frac{\partial}{\partial x_1} \big( e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X) \Omega_{\beta} (X) \big) \\
& \quad = \Omega_{\beta} (X) \Bigg( \displaystyle\sum_{i = 1}^{\ell(\lambda)} e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda_i - 1} (X \setminus \{ x_1 \} ) e_{\lambda \setminus \lambda_i} (X) \\
& \qquad + \beta e_{\lambda} (X) \displaystyle\sum_{i = 1}^N \displaystyle\frac{e_{k - 1} (X \setminus \{ x_1 \})}{x_1 - x_i} - \displaystyle\frac{\beta x_1 e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X)}{2} \Bigg). 
\end{flalign*}
\noindent Integrating first over $x_1$, then integrating over $X \setminus \{ x_1 \}$, and applying \eqref{integraldensity} yields 
\begin{flalign*}
\displaystyle\sum_{i = 1}^{\ell(\lambda)} \big\langle e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda_i - 1} (X \setminus \{ x_1 \} ) e_{\lambda \setminus \lambda_i} (X)  \big\rangle & + \beta \left\langle e_{\lambda} (X) \displaystyle\sum_{i = 1}^N \displaystyle\frac{e_{k - 1} (X \setminus \{ x_1 \})}{x_1 - x_i} \right\rangle \\ 
& - \displaystyle\frac{\beta \big\langle x_1 e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X) \big\rangle }{2} = 0. 
\end{flalign*} 

\noindent Multiplying by $2 N / \beta = N \alpha$, and rearranging terms yields
\begin{flalign}
\label{expectationvariable}
N \big\langle x_1 e_{k - 1} (X \setminus \{ x_1 \}) e_{\lambda} (X) \big\rangle & = 2N \displaystyle\sum_{i = 1}^N \left\langle \displaystyle\frac{e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X)}{x_1 - x_i} \right\rangle \nonumber \\
& \quad + N \alpha \displaystyle\sum_{i = 1}^{\ell(\lambda)} \big\langle e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda_i - 1} (X \setminus \{ x_1 \} ) e_{\lambda \setminus \lambda_i} (X) \big\rangle.
\end{flalign}

By symmetry, $N \big\langle x_1 e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X) \big\rangle = k \big\langle e_k (X) e_{\lambda} (X) \big\rangle$. Using this fact and applying \hyperref[transferal]{Lemma \ref*{transferal}} to the second summand on the right side of \eqref{expectationvariable} yields 
\begin{flalign}
\label{expectationvariable2}
\begin{aligned}
k \big\langle e_k (X) e_{\lambda} (X) \big\rangle &= 2N \displaystyle\sum_{i = 1}^N \left\langle \displaystyle\frac{e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X)}{x_1 - x_i} \right\rangle \\ 
& \qquad + \alpha (N - k + 1) \displaystyle\sum_{i = 1}^{\ell(\lambda)} \big\langle e_{k - 1, \lambda_i - 1} (X) e_{\lambda \setminus \lambda_i} (X) \big\rangle \\
& \qquad - \alpha \displaystyle\sum_{i = 1}^{\ell(\lambda)} \displaystyle\sum_{j = 1}^{\lambda_i - 1} (k - \lambda_i + 2j) \big\langle e_{k + j - 1, \lambda_i - j - 1} (X) e_{\lambda^{(i)}} (X) \big\rangle. 
\end{aligned} 
\end{flalign}

\noindent By symmetry, the first term on the left side of \eqref{expectationvariable2} is equal to 
\begin{flalign*}
2N \displaystyle\sum_{i = 1}^N \left\langle \displaystyle\frac{e_{k - 1} (X \setminus \{ x_1 \} ) e_{\lambda} (X)}{x_1 - x_i} \right\rangle &= N \displaystyle\sum_{i = 1}^N \left\langle \displaystyle\frac{e_{k - 1} (X \setminus \{ x_1 \}) e_{\lambda} (X)}{x_1 - x_i} - \displaystyle\frac{e_{k - 1} (X \setminus \{ x_i \} ) e_{\lambda} (X)}{x_1 - x_i} \right\rangle \\
&= - N \displaystyle\sum_{i = 1}^N \big\langle e_{k - 2} (X \setminus \{ x_1, x_i \}) e_{\lambda} (X) \big\rangle \\
&= -N (N - 1) \big\langle e_{k - 2} (X \setminus \{ x_1, x_2 \}) e_{\lambda} (X) \big\rangle \\
&= (k - N - 1)(N - k + 2) \big\langle e_{k - 2} (X) e_{\lambda} (X) \big\rangle. 
\end{flalign*}

\noindent Inserting this into \eqref{expectationvariable2} yields the theorem. 
\end{proof} 

\subsection{Proof of Lemma \ref{transferal}}

\label{TransferalProof}

Here, we will give a proof of \hyperref[transferal]{Lemma \ref*{transferal}}. 

\transferal* 

\begin{proof}
For any nonnegative integers $t$ and $s$, let 
\begin{flalign*}
d_{t, s} (X) = \sum_{\substack{|I| = t \\ |J| = s}} \prod_{\substack{ i \in I \\ j \in J}} x_i x_j^2,
\end{flalign*}

\noindent where the sum ranges over all pairs $(I, J)$ of disjoint subset $I, J \subseteq \{ 1, 2, \ldots , n \}$ such that $|I| = t$ and $|J| = s$. Observe that
\begin{flalign}
\label{expressionsum}
e_{a, b} (X) = e_a (X) e_b (X) = \displaystyle\sum_{i = 0}^b d_{a - b + 2i, b - i} (X) \binom{a - b + 2i}{i}. 
\end{flalign}

\noindent Furthermore, by symmetry, we have that 
\begin{flalign}
\label{nvariables}
N \big\langle g (X \setminus \{ x_1 \}) f(X) \big\rangle = \displaystyle\sum_{i = 1}^N \big\langle g (X \setminus \{ x_i \}) f(X) \big\rangle 
\end{flalign}

\noindent and 
\begin{flalign}
\label{tsvariables}
\quad (N - t - s) \big\langle d_{t, s} (X \setminus \{ x_i \}) f(X) \big\rangle = \big\langle d_{t, s} (X) f(X) \big\rangle, 
\end{flalign}

\noindent for any symmetric polynomials $f$ and $g$ in $N$ variables. Applying \eqref{expressionsum} and \eqref{tsvariables} to \eqref{nvariables} (with $f(X) = e_{\lambda} (X)$ and $g(X) = e_{a, b} (X)$) yields 
\begin{flalign}
\label{absymmetric}
N \big\langle e_{a, b} (X \setminus \{ x_1 \} ) e_{\lambda} (X) \big\rangle &= \left\langle e_{\lambda} (X) \displaystyle\sum_{i = 1}^N e_{a, b} (X \setminus \{ x_i \} )\right\rangle \nonumber \\
&= \left\langle e_{\lambda} (X) \displaystyle\sum_{i = 0}^b \binom{a - b + 2i}{i} d_{a - b + 2i, b - i} (X \setminus \{ x_1 \} ) \right\rangle \nonumber \\
&= \left\langle e_{\lambda} (X) \displaystyle\sum_{i = 0}^b \binom{a - b + 2i}{i} (N - a - i) d_{a - b + 2i, b - i} (X) \right\rangle. 
\end{flalign}

\noindent Multiplying \eqref{expressionsum} by $N - a$ and subtracting \eqref{absymmetric} yields 
\begin{flalign}
\label{sumdelementary}
(N - a) \big\langle e_{a, b} (X) e_{\lambda} (X) \big\rangle - N & \big\langle e_{a, b} (X \setminus \{ x_1 \}) e_{\lambda} (X) \big\rangle \nonumber \\
& = \left\langle e_{\lambda} (X) \displaystyle\sum_{i = 1}^b i \binom{a - b + 2i}{i} d_{a - b + 2i, b - i} (X) \right\rangle \nonumber \\
& = \left\langle e_{\lambda} (X) \displaystyle\sum_{i = 0}^{b - 1} (i + 1) \binom{a - b + 2i + 2}{i + 1} d_{a - b + 2i + 2, b - i - 1} (X) \right\rangle. 
\end{flalign}

\noindent Now, observe that 
\begin{flalign}
\label{binomial2}
(i + 1) \binom{a - b + 2i + 2}{i + 1} = (a - b + 2) \binom{a - b + 2i + 2}{i} + i \binom{a - b + 2i + 2}{i},
\end{flalign}

\noindent for each integer $i \in [0, b - 1]$. Multipyling \eqref{binomial2} by $d_{a - b + 2i + 2, b - i - 1}$, summing $i$ between $0$ and $b - 1$, and applying \eqref{expressionsum} (with the pair $(a, b)$ replaced by $(a + 1, b - 1)$), we deduce that 
\begin{flalign}
\label{sumd}
 \displaystyle\sum_{i = 0}^{b - 1} (i + 1) & \binom{a - b + 2i + 2}{i + 1} d_{a - b + 2i + 2, b - i - 1} (X) \nonumber \\
& = (a - b + 2) e_{a + 1, b - 1} (X) + \displaystyle\sum_{i = 1}^{b - 1} i \binom{a - b + 2i + 2}{i} d_{a - b + 2i + 2, b - i - 1} (X) \nonumber \\
& = (a - b + 2) e_{a + 1, b - 1} (X) + \displaystyle\sum_{i = 0}^{b - 2} (i  + 1) \binom{a - b + 2i + 4}{i + 1} d_{a - b + 2i + 4, b - i - 2} (X). 
\end{flalign}

\noindent Applying \eqref{sumd} with $(a, b)$ replaced with $(a + 1, b - 1)$ yields that 
\begin{flalign}
\label{sumd2} 
\displaystyle\sum_{i = 0}^{b - 2} (i  + 1) & \binom{a - b + 2i + 4}{i + 1} d_{a - b + 2i + 4, b - i - 2} (X) \nonumber \\
& = (a - b + 4) e_{a + 2, b - 2} (X) + \displaystyle\sum_{i = 0}^{b - 3} (i  + 1) \binom{a - b + 2i + 6}{i + 1} d_{a - b + 2i + 6, b - i - 3} (X). 
\end{flalign}

\noindent Inserting \eqref{sumd2} into the right side of \eqref{sumd}, we deduce that
\begin{flalign*}
 \displaystyle\sum_{i = 0}^{b - 1} (i + 1) & \binom{a - b + 2i + 2}{i + 1} d_{a - b + 2i + 2, b - i - 1} (X) \nonumber \\
& = \displaystyle\sum_{i = 1}^2 (a - b + 2i) e_{a + i, b - i} (X) +  \displaystyle\sum_{i = 0}^{b - 3} (i  + 1) \binom{a - b + 2i + 6}{i + 1} d_{a - b + 2i + 6, b - i - 3} (X).
\end{flalign*}

\noindent Repeating this procedure $b - 2$ more times yields 
\begin{flalign}
\label{sumd3}
 \displaystyle\sum_{i = 0}^{b - 1} (i + 1) & \binom{a - b + 2i + 2}{i + 1} d_{a - b + 2i + 2, b - i - 1} (X) = \displaystyle\sum_{i = 1}^b (a - b + 2i) e_{a + i, b - i} (X). 
\end{flalign}

\noindent Multiplying \eqref{sumd3} by $e_{\lambda} (X)$, taking expectations, and inserting the resulting equation into \eqref{sumdelementary} yields 
\begin{flalign*}
(N - a) \big\langle e_{a, b} (X) e_{\lambda} (X) \big\rangle - N \big\langle e_{a, b} (X \setminus \{ x_1 \} ) e_{\lambda} (X) \big\rangle = \displaystyle\sum_{i = 1}^b (a - b + 2i) \big\langle e_{a + i, b - i} (X) e_{\lambda} (X) \big\rangle, 
\end{flalign*}

\noindent from which we deduce the lemma. 
\end{proof}


\section{Further Directions} 

In this paper we established an arithmetic property of moments of the $\beta$-Hermite ensemble, and interpreted this result combinatorially. However, instead of combinatorial, our methods were matrix theoretic, which led to us to come across several intermediate results that might be of independent interest to combinatorialists and random matrix theorists alike. 

Still, our study was not exhaustive. We would like to conclude this discussion by listing several related directions that might be interesting to pursue.

\begin{enumerate}

\item{Is there a combinatorial proof of \hyperref[arithmeticpartial]{Theorem \ref*{arithmeticpartial}}?} 

\item{Our methods seem specific to evaluating $\big\langle p_{2q} (X) \big\rangle_{\beta}$ modulo $q$. What modulo $q$ properties do $\big\langle p_{\lambda} (X) \big\rangle_{\beta}$ satisfy for other partitions $\lambda$? For instance, what happens when $\lambda = (4q)$?} 

\item{The result \eqref{partiallyorientableseries} shows that the coefficients $h_{i, j}^{(f)}$ from \hyperref[integralsymmetric]{Corollary \ref*{integralsymmetric}} are nonnegative and have combinatorial intepretations when $f$ is a power sum. For what other symmetric polynomials $f$ are these coefficients nonnegative, and when do they have a combinatorial interpretation?} 

\item{In \hyperref[Algorithm]{Section \ref*{Algorithm}}, we will present an algorithm that finds $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ in $O(|\lambda|^{\ell (\lambda) + 2})$ complexity; this is of polynomial time and space when $\lambda$ has small length. Does there exist an algorithm that evaluates $\big\langle e_{\lambda} (X) \big\rangle_{\beta}$ in polynomial time and space, for all partitions $\lambda$?}

\end{enumerate}






\begin{thebibliography}{}

\bibitem{DRMJPRS} G. E. Andrews, I. P. Goulden, and D. M. Jackson, Determinants of Random Matrices and Jack Polynomials of Rectangular Shape, {\itshape Studies in Applied Math.} \textbf{110}, 377--390 (2003). 

\bibitem{JPAWSI} K. Aomoto, Jacobi Polynomials Associated With Selberg Integrals, {\itshape SIAM J. Math. Anal.} {\bfseries 18} 545--549 (1987). 

\bibitem{TANRMS} E. A. Bender and E. R. Canfield, The Asymptotic Number of Rooted Maps on a Surface, {\itshape J. Combin. Ser. A} \textbf{43}, 244--257 (1986). 

\bibitem{TMAC} E. A. Bender, Z. Gao, and L. B. Richmond, The Map Asymptotics Constant $t_g$, {\itshape Electron. J. Combin.} \textbf{15}, R51 (2008).  

\bibitem{TSVO} F. Bornemann and M. A. La Croix, The Singular Values of the GOE, preprint, http://arxiv.org/pdf/1502.05946v2.pdf. 

\bibitem{PD} E. Br\'{e}zin, C. Itzykson, G. Parisi, and J. B. Zuber, Planar Diagrams, {\itshape Commun. Math. Phys.} \textbf{59}, 35--51 (1978). 

\bibitem{TNOMAC} S. R. Carrell, The Non-Orientable Map Asymptotic Constant $p_g$, preprint, http://arxiv.org/pdf/1406.1760v2.pdf. 

\bibitem{ESBE} I. Dumitriu, {\itshape Eigenvalue Statistics for Beta-Ensembles}. Ph. D. Thesis, Massachusetts Institute of Technology (1999). 

\bibitem{MMBE} I. Dumitriu and A. Edelman, Matrix Models for Beta Ensembles, {\itshape J. Math. Phys.}, {\bfseries 43} 5830--5847 (2002). 

\bibitem{MVOP} I. Dumitriu, A. Edelman, and G. Shuman, MOPS: Multivariate Orthogonal Polynomials (Symbolically), {\itshape J. Symb. Comp.} \textbf{42}, 587--620 (2007).

\bibitem{TSV} A. Edelman and M. A. La Croix, The Singular Values of the GUE (Less is More), \emph{Random Matrices: Theory Appl.}, \textbf{4}, 1550021, 2015. 

\bibitem{MGBELED} P. J. Forrester and N. S. Witte, Moments of the Gaussian $\beta$ Ensembles and the Large-$N$ Expansion of the Densities, {\itshape J. Stat. Phys.} \textbf{55}, 083302 (2014). 

\bibitem{LEACE} P. J. Forrester and N. S. Witte, Loop Equation Analysis of the Circular $\beta$ Ensembles, {\itshape J. High Energy Phys.} \textbf{173}, (2015). 

\bibitem{APANRMS} Z. Gao, A Pattern for the Asymptotic Number of Rooted Maps on Surfaces, {\itshape J. Combin. Ser. A} \textbf{64}, 246--264 (1993). 

\bibitem{UAGC} S. Garoufalidis and M. Mari\~{n}o, Universality and Asymptotics of Graph Counting Problems in Non-Orientable Surfaces, {\itshape J. Combin. Ser. A} \textbf{117}, 715--740, 2010. 

\bibitem{AGPVECMSRCAC} I. P. Goulden, J. L. Harer, and D. M. Jackson, A Geometric Parameterization for the Virtual Euler Characteristic of the Moduli Spaces of Real and Complex Algebraic Curves, {\itshape Trans. Amer. Math. Soc.} \textbf{353}, 4405--4427 (2001). 

\bibitem{CCMMCCJSF} I. P. Goulden and D. M. Jackson, Connection Coefficients, Matchings, Maps and Combinatorial Conjectures for Jack Symmetric Functions, {\itshape Trans. Amer. Math. Soc.} \textbf{348}, 873--892 (1996). 

\bibitem{MLOSIORSM} I. P. Goulden and D. M. Jackson, Maps in Locally Orientable Surfaces and Integrals Over Real Symmetric Surfaces, {\itshape Canad. J. Math.} {\bfseries 49} 865-882 (1997). 

\bibitem{ADB} I. P. Goulden and A. Nica, A Direct Bijection for the Harer-Zagier Formula, {\itshape J. Combin. Theory Ser. A} \textbf{111}, 224--238 (2005). 

\bibitem{TECMSC} J. L. Harer and D. Zagier, The Euler Characteristic of the Moduli Space of Curves, {\itshape Invent. Math.} {\bfseries 85} 457--485 (1986).

\bibitem{APDTFSI} G. 't Hooft, A Planar Diagram Theory for Strong Interactions, {\itshape Nucl. Phys.} \textbf{72}, 461--473 (1974). 

\bibitem{TCJPGSTM} M. A. La Croix, {\itshape The Combinatorics of the Jack Parameter and the Genus Series for Topological Maps}. Ph. D. Thesis, University of Waterloo (2009). 

\bibitem{DC} B. Lass, D\'{e}monstration Combinatoire de la Formule de Harer-Zagier, {\itshape C. R. Acad. Sci. Paris Ser. I Math.} \textbf{333}, 155--160 (2001). 

\bibitem{SFHP} I. G. Macdonald, {\itshape Symmetric Functions and Hall Polynomials.} 2nd Edition. Oxford University Press, New York (1999). 

\bibitem{RM} M. L. Mehta, {\itshape Random Matrices}. San Diego, Academic Press, second edition, (1991).

\bibitem{PCGJ} A. Okounkov, Proof of a Conjecture of Goulden and Jackson, {\itshape Canad. J. Math.}, {\textbf 49} 883--886 (1997). 

\bibitem{RMRP} A. Okounkov, Random Matrices and Random Permutations, {\itshape Intern. Math. Res. Not.} \textbf{20}, 1043--1095 (2000). 
 
\bibitem{ACPM} W. T. Tutte, A Census of Planar Maps, {\itshape Canad. J. Math.} \textbf{15}, 249--271 (1963). 

\bibitem{OEPM} W. T. Tutte, On the Enumeration of Planar Maps, {\itshape Bull. Amer. Math. Soc.} \textbf{74}, 64--74 (1968). 

\bibitem{EAANPDEUGI} N. Ullah, Ensemble Average of an Arbitrary Number of Pairs of Different Eigenvalues Using Grassmanian Integration, {\itshape Commun. Math. Phys.} {\bfseries 104} 693--695 (1986). 

\bibitem{MIME} A. Zvonkin, Matrix Integrals and Map Enumeration: An Accessible Introduction, {\itshape Math. Comput. Modelling} \textbf{26}, 281--304 (1997). 
\end{thebibliography}
\end{document}