\documentclass[12pt]{article}
\usepackage{e-jc}

\usepackage{amsmath,amsfonts,amsthm}
\usepackage{tikz}
\usepackage[utf8]{inputenc}
\usetikzlibrary{arrows,positioning,automata}
%
% use this command to create hyperlinks (optional and recommended)
\usepackage[colorlinks=true,citecolor=black,linkcolor=black,urlcolor=blue]{hyperref}

% use these commands for typesetting doi and arXiv references in the bibliography
\newcommand{\doi}[1]{\href{http://dx.doi.org/#1}{\texttt{doi:#1}}}
\newcommand{\arxiv}[1]{\href{http://arxiv.org/abs/#1}{\texttt{arXiv:#1}}}

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{remark}
\newtheorem*{examp}{Example}
\newcommand{\Nn}{\mathbb N}
\newcommand{\kk}{\boldsymbol k}

\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{open}[theorem]{Open Problem}
\newtheorem{problem}[theorem]{Problem}
\newtheorem{question}[theorem]{Question}

\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{note}[theorem]{Note}

\title{On a matrix representation for polynomially recursive sequences}

\author{Christophe Reutenauer\\
\small Laboratoire de combinatoire et d'informatique math\'ematique,\\[-0.8ex]
\small Universit\'e du Qu\'ebec \`a Montr\'eal\\[-0.8ex]
\small Case postale 8888, succursale Centre-ville\\[-0.8ex]
\small Montr\'eal (Qu\'ebec) H3C 3P8, Canada\\[-0.8ex]
\small\tt reutenauer.christophe@uqam.ca
}
\bibliographystyle{plain}

\date{\dateline{Sep 17, 2012}{Sep 17, 2012}\\
\small Mathematics Subject Classifications: 05A10, 05A15, 05A19, 12H10, 33D15, 39A10, 39A70, 68R05}

\begin{document}
\maketitle 
\begin{abstract}
In this article we derive several consequences of a matricial characterization of P-recursive sequences. This characterization leads to canonical representations of these sequences. We show their uniqueness for a given sequence, up to similarity. We study their properties: operations, closed forms, d'Alembertian sequences, field extensions, positivity, extension of the sequence to $\mathbb Z$, difference Galois group. 
\end{abstract}

\section{Introduction}
Polynomial recursivity (Stanley \cite{stanley1}), equivalently holonomy (Zeilberger \cite{zeilberger}), is a basic notion in the theory of integer sequences. It is known that P-recursive sequences coincide with D-finite series. These sequences are interesting in particular since many sequences appearing in mathematics are P-recursive, and that they contain rational and algebraic series; moreover, they may be effectively computed and manipulated, see the book by Petkov\v{s}ek, Wilf and Zeilberger \cite{a=b}.

Following ideas in algebraic automata theory, we define {\em representations} of P-recursive sequences. Such a matrix representation may be seen as an analogue of the representation $r(1)r(2)\cdots r(n)$ of a hypergeometric sequence, by replacing the rational function $r(t)$ by a matrix of such functions. The basic characterization is Th.\ref{recursion=matrix}. This result motivates the notion of representation: the form we give is close to the Fliess representation $(\lambda, \mu, \gamma)$ of noncommutative rational series \cite{fliess}, following the work of Sch\"utzenberger (see \cite{berstel} and the references therein). 

These representations behave nicely with respect to the usual operations with P-recursive sequences, notably the multiplivation. Minimal representations are studied: for a fixed P-recursive {\em germ} (that is, a sequence viewed at infinity), they are all similar, in the sense of pseudo-linear algebra. It is shown how general representations of a fixed germ are related to minimal ones. From this it follows that closed forms and d'Alembertian germs (introduced by Abramov and Petkov\v{s}ek \cite{abramov}) are nicely characterized by minimal representations: they have respectively diagonal and triangular form. 

We study several questions of arithmetic nature: extension of the ground field, positivity, extension of sequences to $\mathbb Z$ (in a way different from Stanley in \cite{stanley1}), Galois group in the sense of difference fields. We conclude by some comments on effectiveness and integer arithmetic.

The author thanks Marko Petkov\v{s}ek for useful e-mails in 1998. Also Fran\c{c}ois Bergeron, S\'ebastien Labb\'e and Claudine Mitschi for useful discussions and mails. Also the anonymous referee, who gave a long list of stylistic, grammatical and mathematical suggestions, which helped to improve the article.

\section{Matrix representation}\label{repr}
Let $K$ be a field of characteristic 0. 
Recall that a sequence $f(n), n\in \mathbb N$, is said to be {\em polynomially recursive} (or {\em P-recursive}) if there exist $d\in \mathbb N^*$, $n_0\in \mathbb N$ and $d$ rational functions $r_1(t),\ldots,r_d(t)\in K(t)$ such that the $r_i(n)$ are defined for $n\geq n_0$ and that one has the recursion
\begin{equation}\label{recursion}
\forall n\geq n_0, f(n+d)=r_1(n)f(n+d-1)+\cdots +r_d(n)f(n).
\end{equation}    
We call {\em length} of the recursion the integer $d$ (whatever is the value of $n_0$).

The basic theorem, on which the whole paper rests, is the following. 
This result
is  implicitly contained in Proposition 4.1 of the book of van der Put and Singer \cite{put}. The "if" part is Theorem 1 of \cite{larcombe}. We formulate it in a more direct manner. Although this result can be derived from these two articles, we give here a direct and simple proof \footnote {This was originally obtained independently by the author in 1997.}.

Before stating the result, recall that a {\em hypergeometric} sequence is a sequence $f$ such that for some $r(t)\in K(t)$, one has $f(n+1)=f(n)r(n+1)$ if $n$ is large enough. Then for some $n_0$, $f(n)=f(n_0)r(n_0+1)r(n_0+2)\cdots r(n)$ for any $n\geq n_0$. Thus, in view of the theorem below, P-recursive sequences appear as a matrix version of hypergeometric sequences.

\begin{theorem}\label{recursion=matrix}
A sequence $f(n)$ is P-recursive if and only if there exist $d\in \mathbb N^*$, a matrix $M(t)\in K(t)^{d\times d}$, a row matrix $\lambda\in K^{1\times d}$ and a column matrix $\gamma(t) \in K(t)^{d\times 1}$ such that $M(n)$ is defined for each integer $n\geq n_0+1$, that $\gamma(n)$ is defined for $n\geq n_0$ and that
\begin{equation}\label{matrix formula}
\forall n\geq n_0, f(n)=\lambda M(n_0+1) M(n_0+2)\cdots M(n)\gamma(n).
\end{equation}
\end{theorem}

We give two examples. 
The number $D_n$ of derangements (permutations without fixed points) in the symmetric group $S_n$ satisfies $D_0=1,D_1=0$ and $D_{n+2}=(n+1)D_{n+1}+(n+1)D_n$. Hence, we see by the proof below that
$$D_n=(1,0)
\left(
\begin{array}{cc}
0&1\\
1&1
\end{array}
\right)
\left(
\begin{array}{cc}
0&2\\
1&2
\end{array}
\right)
\cdots
\left(
\begin{array}{cc}
0&n\\
1&n
\end{array}
\right)
\left(
\begin{array}{cc}
1\\
0
\end{array}
\right).
$$
The number $I_n$ of involutions in $S_n$ satisfies $I_0=I_1=1$ and $I_{n+2}=I_{n+1}+(n+1)I_n$. Thus
$$I_n=(1,1)
\left(
\begin{array}{cc}
0&1\\
1&1
\end{array}
\right)
\left(
\begin{array}{cc}
0&2\\
1&1
\end{array}
\right)
\cdots
\left(
\begin{array}{cc}
0&n\\
1&1
\end{array}
\right)  
\left(
\begin{array}{cc}
1\\
0
\end{array}
\right).
$$
In these two examples, we have $n_0=0$, with the notations of the theorem.
\begin{proof}
Suppose that $f(n)$ satisfies Eq.(\ref{recursion}). Define $M(t)\in K(t)^{d\times d}$ to be the companion matrix
$$
\left(
\begin{array}{ccccccc}
0&\cdots&\cdots&0&r_d(t-1) \\
1&\ddots&&\vdots&\vdots\\
0&\ddots&\ddots&\vdots&\vdots \\
\vdots&\ddots&\ddots&0 &r_2(t-1)\\
0&\cdots&0&1&r_1(t-1)
\end{array}
\right)
$$
Note that $M(n)$ is defined for $n\geq n_0+1$. Let also $R(n)$ be the row matrix $(f(n),\ldots,f(n+d-1))\in K^{d\times 1}$. Then
$$
R(n+1)=
(f(n+1),\ldots,
f(n+d))
$$
$$=(f(n),\ldots ,f(n+d-1))
\left(
\begin{array}{ccccccc}
0&\cdots&\cdots&0&r_d(n) \\
1&\ddots&&\vdots&\vdots\\
0&\ddots&\ddots&\vdots&\vdots \\
\vdots&\ddots&\ddots&0 &r_2(n)\\
0&\cdots&0&1&r_1(n)
\end{array}
\right)
=R(n)M(n+1)
$$
for any $n\geq n_0$. We deduce that for any such $n$,
$$R(n)=R(n_0)M(n_0+1)M(n_0+2)\cdots M(n).$$
It suffices now to take $\lambda=R(n_0)$ and $\gamma(t)=(1,0,\ldots,0)^T$.

Conversely, suppose that Eq.(\ref{matrix formula}) holds. Consider the column vectors $\gamma(t),M(t+1)\gamma(t+1),M(t+1)M(t+2)\gamma(t+2),\ldots$, in the $d$-dimensional vector space $
K(t)^{d\times 1}$ over $K(t)$. There exists $l\leq d$ and $r_1(t),\ldots,r_l(t)\in K(t)$ such that
$$
M(t+1)\cdots M(t+l)\gamma(t+l)=r_1(t)M(t+1)\cdots M(t+l-1)\gamma(t+l-1)
$$
$$+\cdots+r_{l-1}(t)M(t+1)\gamma(t+1)
+r_l(t)\gamma(t).$$
Let $n_1\geq n_0$ such that the $r_i(n)$ are defined for $n\geq n_1$. By letting $t=n$ and multiplying at the left by $\lambda M(n_0+1)M(n_0+2)\cdots M(n)$ and using Eq.(\ref{matrix formula}), we obtain that for any $n\geq n_1$
$$
f(n+l)=\lambda M(n_0+1) M(n_0+2)\cdots M(n+l)\gamma(n+l)
$$
$$
=\lambda M(n_0+1)M(n_0+2)\cdots M(n) M(n+1)\cdots M(n+l)\gamma(n+l)
$$
$$
=r_1(n)\lambda M(n_0+1)M(n_0+2)\cdots M(n) M(n+1)\cdots M(n+l-1)\gamma(n+l-1)
$$
$$
+\cdots +r_{l-1}(n)\lambda M(n_0+1)M(n_0+2) \cdots M(n) M(n+1)\gamma(n+1)
$$
$$
+r_l(n)\lambda(n)M(n_0+1)M(n_0+2)\cdots M(n) \gamma(n)
$$
$$
=r_1(n)f(n+l-1)+\cdots +r_{l-1}(n)f(n+1)+r_l(n)f(n).
$$
This proves that $f$ is P-recursive.
\end{proof}

The right-hand side of Eq.(\ref{matrix formula}) may be interpreted as a sum of labels of paths. We give this automata-like interpretation in the case $n_0=0$ for simplicity. Consider indeed the directed graph with vertices $1,\ldots,d$ and edge $i\rightarrow j$ labelled $M(n)_{ij}$ if the latter is nonzero. To each path $\pi=i_0i_1\cdots i_n$ of length $n$ in this graph, we assign the label $$l(\pi)=M(1)_{i_0i_1}M(2)_{i_1i_2}\cdots M(n)_{i_{n-1}i_n}.$$ Then 
\begin{equation}\label{path}
f(n)=\sum_{ij} \lambda_i \left(\sum l(\pi)\right) \gamma_j(n),
\end{equation}
where the second sum is over all paths $\pi$ from $i$ to $j$ of length $n$.

We illustrate this by in Figure \ref{automate}: the incoming (resp. outgoing) arrows indicate the row vector $\lambda$ (resp. column vector $\gamma(n)$); they are labelled with the corresponding element of the vector, with the following conventions: no label indicates that the label is 1; no arrow indicates that the label is 0. Thus, the number of involutions in $S_n$ is equal to the sum of the labels of the paths of length $n$ which end at 1; for example, for $n=3$, we have the paths $1\rightarrow 2\rightarrow 2\rightarrow 1$ with label $1\times 1\times 1$, $2 \rightarrow 2\rightarrow 2\rightarrow 1$ with label $1\times 1 \times 1$ and $2\rightarrow 1\rightarrow 2\rightarrow 1$ with label $1\times 2\times 1$, so that the number of involutions is 4, as it must be.

\begin{figure}[h]
\begin{center}
\begin{tikzpicture}[auto, node distance=2cm, >=stealth', 
initial text=, initial where=left, accepting where=right, 
accepting/.style=accepting by arrow] 
% les noeuds
\node[state,accepting,initial] (1) {$1$};
\node[state,initial] (2) [right=of 1] {$2$};
% les transitions
\draw[->,bend left=20] (1) to node {$n$} (2);
\draw[->,bend left=20] (2) to node {$1$} (1);
\draw[->] (2) to [loop above] node {$1$} ();
\end{tikzpicture}
\caption{Involutions}
\label{automate}
\end{center}
\end{figure}


\section{Germs}\label{germs}

Following \cite{stanley1}, \cite {stanley2} Section 6.4, we define an equivalence on the set of sequences over $K$ by $f\sim g$ if for some $n_0$, one has: $\forall n\geq n_0, f(n)=g(n)$. The equivalence class of $f$ is denoted by $[f]$ and called a {\em germ}. The set $\cal G$ of germs over $K$ is a vector space over $K(t)$. The result of the action of $r(t) \in K(t)$ on $\phi \in \cal G$ is $\gamma \in \cal G$ as follows: if the sequence $f$ is a representative of the germ $\phi$, then the sequence $g(n)$ defined by $g(n)=r(n)f(n)$, $n$ large enough, is a representative of $\gamma$; this is well-defined ({\em loc. cit}). 

Let $\phi =[f]$. The {\em shift mapping} $E$ is defined on germs by $E(\phi)=\gamma$, where $\gamma =[g]$, with $g$ the sequence $g(n)=f(n+1)$. It is a $K$-linear automorphism of the $K$-vector space $\cal G$. It is not $K(t)$-linear, but satisfies the equation
\begin{equation}\label{E pseudo-linear}
E(r\phi)=\theta(r)E(\phi),
\end{equation}
where $\theta$ is the $K$-automorphism of $K(t)$ which sends $t$ onto $t+1$.

A sequence is P-recursive if and only if all the sequences in its equivalence class are P-recursive, \cite{stanley1}, Th.1.4. In this case, we say that the class is a {\em P-recursive germ}. 

\section{Representations}\label{Repr}

Let $\phi$ be a P-recursive germ. We call {\em representation} of  $\phi$ a quadruple $(\lambda,M(t),\gamma(t),n_0)$, satisfying the conditions stated in Th.\ref{recursion=matrix}, and such that the sequence $f$ defined by Eq.(\ref{matrix formula}) is a representative of $\phi$; its {\em dimension} is $d$. 
We also say that $(\lambda,M(t),\gamma(t),n_0)$ {\em represents} $\phi$ and $f$.

For $k\in \mathbb N$, the {\em $k$-translate} of representation $(\lambda, M(t), \gamma(t), n_0)$ is the representation $(\lambda',M(t),\gamma(t), n'_0)$, with $\lambda'=\lambda M(n_0+1)M(n_0+2)\cdots M(n_0+k)$ and $n'_0=n_0+k$. It represents the same germ: indeed, for any $n\geq n'_0$,
$$\lambda' M(n'_0+1)M(n'_0+2)\cdots M(n)\gamma(n)=$$
$$\lambda M(n_0+1)M(n_0+2)\cdots M(n_0+k) M(n_0+k+1)M(n_0+k+2) \cdots M(n)\gamma(n)$$
$$=\lambda M(n_0+1)M(n_0+2)\cdots M(n)\gamma(n).$$

A {\em translate} of a representation is a $k$-translate for some natural number $k$. Note that translating a representation $(\lambda,M(t),\gamma(t),n_0)$ changes neither $M(t)$ nor $\gamma(t)$. 

The next result is then evident.

\begin{lemma}\label{translate}
A P-recursive germ has representations $(\lambda,M(t),\gamma(t),n_0)$, with arbitrarily large $n_0$.
\end{lemma}
For later use, we state the following lemmas.
\begin{lemma}\label{shift}
If $(\lambda,M(t),\gamma(t),n_0)$ represents $\phi$, then for any natural integer $i$, $E^i(\phi)$ is represented by $(\lambda,M(t),M(t+1)\cdots M(t+i)\gamma(t+i),n_0)$.
\end{lemma}

\begin{proof}
Indeed, we have for any $n\geq n_0$, $f(n+i)=\lambda M(n_0+1)M(n_0+2)\cdots M(n+i)\gamma(n+i)=\lambda M(n_0+1)M(n_0+2)\cdots M(n) (M(n+1)\cdots M(n+i)\gamma(n+i))$.
\end{proof}

\begin{lemma} \label{construction}
Let $\phi,\phi_i$, $i=1,\ldots,d$ be germs and $M(t)\in K(t)^{d\times d}$, $\gamma(t)\in K(t)^{d\times 1}$ such that
$E(\phi_j)=\sum_{1\leq i\leq d}M(t+1)_{ij}\phi_i$ for $j=1,\ldots,d$, and a column vector $\gamma(t)$ over $R$ such that $\phi=\sum_{1\leq i\leq d} \gamma(t)_i \phi_i$. Then there exist $\lambda\in K^{1\times d}$ and $n_0\in \mathbb N$ such that $(\lambda,M(t),\gamma(t),n_0)$ is a representation of $\phi$.
\end{lemma}

\begin{proof}
Let $f, f_i$ be representatives of $\phi,\phi_i$ and let $n_0$ be such that $$f_j(n+1)= \sum_{1\leq i\leq d}M(n+1)_{ij}f_i(n)$$ and $f(n)=\sum_{1\leq i\leq d} \gamma(n)_i f_i(n)$ for any $n\geq n_0$. Let $F(n)=(f_1(n),\ldots,f_d(n))$ and $\lambda=(f_1(n_0),\ldots,f_d(n_0))$. Then $F(n+1)=F(n)M(n+1)$ and therefore $F(n)=F(n_0)M(n_0+1)M(n_0+2)\ldots M(n)$. Since $f(n)=F(n)\gamma(n)$, the lemma follows.
\end{proof}

\section{Product and other operations}\label{operations}
We consider here several operations which preserve P-recursiveness. The results are of course not new. However, matrix representations are particularly well-adapted for the proofs.

Recall that the {\em product} of two germs $[f(n)]$ and $[g(n)]$ is the germ $[f(n)g(n)]$. This is well-defined.

In order to understand the generality of the next result (since there is an implicit assumption on the constant $n_0$), recall Lemma \ref{translate}.
\begin{theorem} \label{oper}
Consider the representations 
$(\lambda_i,M_i(t),\gamma_i(t),n_0)$ of two germs $\phi_i$, $i=1,2$ . 
 
(i) $\phi_1+\phi_2$ has the representation 
$$
((\lambda_1,\lambda_2),\left( \begin{array}{cc}M_1(t)&0\\0&M_2(t)\end{array}\right),\left( \begin{array}{c}\gamma_1(t)\\ \gamma_2(t)\end{array}\right)).
$$

(ii) $\phi_1\phi_2$ has the representation $(\lambda_1\otimes \lambda_2,M_1(t)\otimes M_2(t),\gamma_1(t)\otimes\gamma_2(t),n_0)$.

Consider now a single germ $\phi=[f]$ with representation $(\lambda,M(t),\gamma(t),n_0)$. Let $r(t)\in K(t)$. Let $i,p$ be natural integers.

(iii) $r(t)\phi$ has the representation $(\lambda,M(t),r(t)\gamma(t),n_0)$, provided that $r(t)$ is defined for $n\geq n_0$.

(iv) The germ $[f(i+pn)_{n\in \mathbb N}]$ has the representation $(\lambda',M'(t),\gamma'(t),n'_0),$ where the natural integers $k,n'_0$ have been chosen in such a way that $n_0+k=i+pn'_0$, with $\lambda'=\lambda M(n_0+1)\cdots M(n_0+k)$, $M'(t)=M(i+p(t-1)+1)M(i+p(t-1)+2)\cdots M(i+pt)$ and $\gamma'(t)=\gamma(i+pt)$.
\end{theorem}

\begin{proof}
(i) and (iii) are straighforward. (ii) is a consequence of the multiplicativity of tensoring matrices: $(A_1\otimes A_2)(B_1\otimes B_2)=(A_1B_1)\otimes(A_2B_2)$. We verify (iv): let $n\geq n'_0$; then
$$
\lambda' M'(n'_0+1)\cdots M'(n) \gamma'(n)=$$
$$\lambda M(n_0+1)\cdots M(n_0+k)  M(i+p(n'_0+1-1)+1)\cdots M(i+p(n'_0+1)) \cdots $$
$$\cdots M(i+p(n-1)+1)\cdots M(i+pn) \gamma(i+pn)=f(i+pn).
$$
\end{proof}

\begin{corollary}
Let $p$ be a natural integer and $f$ be a sequence. Then $f$ is P-recursive if and only if for any $i=0,\ldots,p-1$, the sequence $f(i+pn)_{n\in\mathbb N}$ is P-recursive.
\end{corollary}

\begin{proof}
The "only if" part follows from Th.\ref{oper} (iv). Let us prove the converse. Fix $p$ and $i, 0\leq i\leq p-1$. Since P-recursiveness is preserved by addition, it is enough to show that if a sequence $g(n)$ is P-recursive, then the following sequence $f(n)$ is also P-recursive: $f(i+pn)=g(n)$, and $f(n)=0$ if $n$ is not congruent to $i$ modulo $p$. 

Suppose that $g$ satisfies 
\begin{equation}\label{recursion1}
\forall n\geq n_0, g(n+d)=r_1(n)g(n+d-1)+\cdots +r_d(n)g(n).
\end{equation}   
Then $f$ satisfies the equation
$ f(n+dp)=r_1((n-i)/p)f(n+(d-1)p)+\cdots +r_d((n-i)/p)f(n)$ for $n$ large enough. Indeed, if $n$ is not congruent to $i$ modulo $p$, then so are $n+dp$, $n+(d-1)p,\ldots,n$, hence the corresponding values of $f$ vanish and the equation is satisfied. Now, in the other case, replace in this equation $n$ by $i+pn$, obtaining: $f(i+pn+dp)=r_1(n)f(i+pn+(d-1)p)+\cdots +r_d(n)f(i+pn)$, that is $g(n+d)=r_1(n)g(n+d-1)+\cdots+r_d(n)g(n)$, which is Eq.(\ref{recursion1}).
\end{proof}
We shall also need the next result.
\begin{lemma}\label{LinearCombGamma}
Let $\phi_i$ have the representation $(\lambda, M(t), \gamma_i(t), n_0)$, for $i=1,\ldots,l$. If $\sum_ir_i(t)\gamma_i(t)=0$, then $\sum_ir_i(t)\phi_i=0$.
\end{lemma}
\begin{proof}
Without harm, we may assume that the $r_i(t)$ are polynomials. In this case, $r_i(n)$ is defined for $n\geq n_0$ and it follows from Th.\ref{oper} (iii) that $r_i(t)\phi_i$ has the representation $(\lambda, M(t),r_i(t)\gamma_i(t), n_0)$. It is easily deduced that $\sum_ir_i(t)\phi_i$ has the representation $(\lambda, M(t),\sum_ir_i(t)\gamma_i(t), n_0)$. Hence $\sum_ir_i(t)\phi_i=0$.
\end{proof}

%%%
\section{Pseudo-linear algebra}\label{pseudo-linear}

In this section, we follow \cite{jacobson}, see also \cite{bronstein}, with the addition of some apparently well-known results on diagonalizability and triangularizability, with their proofs, for which we could not find references. We restrict the presentation of his work to the aspects that we need here \footnote{In full generality, one considers a field $F$ with an automorphism $\theta$ and a $\theta$-derivation; $K$ is then the {\em subfield of constants}, that is the subset of elements fixed by $\theta$ and which are in the kernel of the derivation; here we take $F=K(t)$ with $\theta(t)=t+1$ and the zero derivation.}. Note that pseudo-linear algebra has already been used previously in the study of P-recursive sequences; see \cite{bronstein}.

Let $V,W$ be vector spaces over $F=K(t)$. An additive homomorphism $E$ from $V$ into $W$ is called {\em pseudo-linear} if for any $r$ in $K(t)$ and $v$ in $V$, one has $E(rv)=\theta(r)E(v)$, where $\theta$ is as in Section \ref{germs}. Thus the mapping $E$ in Section \ref{germs} is pseudo-linear, by Eq.(\ref{E pseudo-linear}).

Given bases $(v_j)$ and $(w_i)$ of $V,W$, the matrix of $E$ is as usually the matrix $M(E)=(m_{ij})$ with $E(v_j)=\sum_i m_{ij}w_i$. If the column vector $\gamma\in F^{\dim V\times 1}$ represents some vector $v\in V$ in the basis $(v_j)$, then $E(v)$ is represented in the basis $(w_i)$ by the column vector $M(E)\gamma^\theta$. If $u$ is a {\em linear} mapping, left composable with $E$, then $u\circ E$ is a pseudo-linear mapping and $M(u\circ E)=M(u)M(E)$; if $u$ is right composable with $E$, then $E\circ u$ is pseudo-linear and $M(E\circ u)=M(E)M(u)^\theta$.
In particular, if $v$ is a $K(t)$-linear isomorphism, and if $v^{-1}\circ E\circ v$ is defined, then 
\begin{equation}\label{conj}
M(v^{-1}\circ E\circ v)=M(v)^{-1}M(E)M(v)^\theta.
\end{equation}

Viewed otherwise, if $E$ is a pseudo-linear endomorphism of $V$ and has the matrices $M_e$ and $M_v$ in the bases $(e_i)$ and $(v_i)$ respectively, then these matrices are related by $M_v=P^{-1}M_eP^\theta$, where $P$ is the matrix whose $j$-th column represents the coefficients of $(v_j)$ expressed in the basis $(e_i)$ (see \cite{jacobson} p. 486). We say that two matrices $M,M'$ are {\em pseudo-similar} if for some square invertible matrix $P$ one has  $M'=P^{-1}MP^\theta$.

Let $E$ be a pseudo-linear endomorphism of $V$. A subspace $W$ of $V$ is called {\em stable} under $E$ if $E(W)\subset W$. In that case $E$ induces a pseudo-linear endomorphism of $W$ and of $V/W$.

As in the classical case, if $E(v)=rv$ for some $v\in V\setminus\{0\}$ and $r\in F$, we call $v$ an {\em eigenvector} of $E$, and $r$ the corresponding {\em eigenvalue}. Note that in this case, for any $a\in F^*$, $av$ is also an eigenvector; however, its eigenvalue is not $r$ in general, but $r\frac{a^\theta}{a}$; indeed, $E(av)=a^\theta E(v)=a^\theta rv=r\frac{a^\theta}{a}(av)$. One therefore cannot define eigenspaces as in the classical case. Observe that however the set of eigenvectors with a given eigenvalue, together with the zero vector, is a $K$-subspace.

We say that the pseudo-linear endomorphism $E$ of $V$ is {\em diagonalizable} if there is a basis of $V$ consisting of eigenvectors. Note that if $V$ has a spanning set of eigenvectors of $E$, then it is diagonalizable. Equivalently, $E$ is diagonalizable if and only if in some basis, its matrix is a diagonal matrix. Correspondingly, we say that a square matrix $M$ over $F$ is {\em pseudo-diagonalizable} if it is the matrix of some diagonalizable pseudo-linear endomorphism; in other words, there exists a square invertible matrix $P$ such that $P^{-1}MP^\theta$ is a diagonal matrix.

If $W$ is a subspace of $V$, stable under $E$, a diagonalizable pseudo-linear endomorphism of $V$, then the endomorphisms induced by $E$ on $W$ and $V/W$ are diagonalizable. This is easy to see for $V/W$, since the basis of eigenvectors of $E$ gives a spanning set of eigenvectors in $V/W$. For $W$, we do as follows: consider the ring of skew polynomials $R=F[x]$, where the product is subject to the rule $xa=a^\theta x$ for any $a\in F$. Then $V$ is a left $R$-module, extending its structure of vector space over $F$, by the rule $xv=E(v)$. Now, stable subspaces in the previous sense coincide with $R$-submodules. If $E$ is assumed to be  diagonalizable, then $V$ is a semisimple $R$-module, since for any eigenvector $v$, the line $Fv$ is stable, hence a simple submodule. Hence $V$ is a sum of simple submodules, hence semisimple. It is well-known that each simple submodule must be one of these. Now each submodule of a semisimple module is also semisimple, which implies that $W$ is a sum of simple submodules, and so the restriction of $E$ to $W$ is diagonalizable.


%let $w\in W$; then $w=v_1+\cdots+v_n$, where $v_i$ are eigenvectors of $E$, attached to eigenvalues $r_i$, respectively; since a sum of eigenvectors with the same eigenvalue is still an eigenvector with the same eigenvalue, we may assume that the $r_i$ are distinct. We prove by induction on $n$ that all the $v_i$ are in $W$. The case $n=1$ is immediate. Applying $E$, we obtain $r_1 v_1+\cdots+r_n v_n=E(w)\in W$. Let $n\geq 2$. Then we may assume that $r_n$ is nonzero. Then $(1-r_1/r_n)v_1+\cdots+(1-r_{n-1}/r_n)v_{n-1}\in W$. The vectors $(1-r_i/r_n)v_i$ are eigenvectors of $E$, hence by induction, they are in $W$; consequently, all the $v_i$, $i=1,\ldots,n-1$, are in $W$ and finally $v_n$ is in $W$, too. We conclude that $W$ has a spanning set of eigenvectors.

We say that the endomorphism $E$ of $V$ is {\em triangularizable} if there exists a maximal chain of subspaces of $V$, each of them being moreover stable; in other words, there exists a basis in which the matrix of $E$ is an upper (or equivalently lower) triangular matrix. Correspondingly, we say that a square matrix $M$ over $F$ is {\em pseudo-triangularizable} if it is the matrix of some triangularizable pseudo-linear endomorphism; in other words, there exists a square invertible matrix $P$ such that $P^{-1}MP^\theta$ is a triangular matrix.

If $W$ is a subspace of $V$, stable under $E$, triangularizable pseudo-linear endomorphism of $V$, then the endomorphisms induced by $E$ on $W$ and $V/W$ are triangularizable. Indeed, the chain of subspaces above induces a similar chain in $W$ and $V/W$.

\section{Minimal representations}
A representation of the germ $\phi$ is called {\em minimal} if it has the minimal dimension among all representations of $\phi$. We call this dimension the {\em rank} of $\phi$.
The lemma below follows from the proof of Th.\ref{recursion=matrix}. 

\begin{lemma}\label{rank=length}
Let $f$ be a sequence over $K$. The rank of $[f]$ is equal to the smallest length of a recursion for $f$. 
\end{lemma}

The following lemma is a consequence of the previous one and of \cite{stanley1}, Th. 1.6 and its proof.

\begin{lemma}\label{rank=dimension}
The rank of $[f]$ is equal to the dimension over $K(t)$ of the vector subspace spanned by the germs $E^i[f]$, $i\in \mathbb N$. More precisely, a basis of this subspace is formed by these elements for $i=0,\ldots,d-1$, where $d$ is the rank.
\end{lemma}

Similarly, we have, using the proof of Th.\ref{recursion=matrix},

\begin{lemma}\label{shortest}
Let $\phi$ be a P-recursive germ. The following conditions are equivalent, for elements $r_1(t),\ldots,r_d(t)$ of $K(t)$:

(i) the shortest recursion satisfied by a representative $f$ of $\phi$ is Eq.(\ref{recursion});

(ii) $\phi$ has a representation $(\lambda, M(t), \gamma(t), n_0)$ with $M(t+1)\cdots M(t+d)\gamma(t+d)=r_1(t)M(t+1)\cdots M(t+d-1)\gamma(t+d-1)
+\cdots+r_{d-1}(t)M(t+1)\gamma(t+1)
+r_d(t)\gamma(t)$ and $d$ is the smallest possible.
\end{lemma}

We can give an intrinsic characterization of minimal representations.

\begin{proposition}\label{characterization minimality}
Representation $(\lambda, M(t), \gamma(t), n_0)$ of dimension $d$ is minimal if and only if the following two conditions hold:

(i) the $d$ column vectors $\gamma(t)$, $M(t+1)\gamma(t+1)$,\ldots,$M(t+1)\cdots M(t+d-1)\gamma(t+d-1)$ form a basis of the $K(t)$-vector space $K(t)^{d \times 1}$;

(ii) let $e_i$ be the canonical basis of $K(t)^{d \times 1}$. Then the $d$ germs represented by the representations $(\lambda, M(t), e_i, n_0)$ are linearly independent over $K(t)$.
\end{proposition}

Note that condition (i) means equivalently that the vectors $M(t+1)M(t+2)\ldots M(t+i)$, $i\in \mathbb N$ span the $K(t)$-vector space $K(t)^{d \times 1}$; this follows from the calculations in the second part of the proof of Theorem \ref{recursion=matrix}. Moreover, condition (ii) means equivalently that for no nonzero vector $v(t)\in K[t]^{d\times 1}$, the germ represented by $(\lambda, M(t), v(t), n_0)$ is the zero germ.

\begin{proof}
Suppose that the given representation is minimal. Then $d$ is the rank of the germ $\phi$ represented by it.
Condition (i) follows from the proof of Th. \ref{recursion=matrix}: indeed, if these vectors do not form a basis, they are not linearly independent; then that proof shows that the germ satisfies a linear recursion of length smaller than $d$, contradicting Lemma \ref{rank=length}.

Suppose that condition (ii) does not hold. Then these $d$ germs $\phi_i$ are linearly dependent and the subspace they span is therefore of dimension less than $d$ over $K(t)$. The germs $E^j(\phi)$ are represented by $(\lambda,M(t),M(t+1)\cdots M(t+j)\gamma(t+j),n_0)$ (Lemma \ref{shift}). We have, for some elements $r_i(t)$ of $K(t)$: $M(t+1)\cdots M(t+j)\gamma(t+j)=\sum_{i=1}^d r_i(t)e_i$; by Lemma \ref{LinearCombGamma}, we deduce that $E^j(\phi)=\sum_{i=1}^d r_i(t)\phi_i$; thus $E^j(\phi)$ belongs to a subspace of dimension $<d$, contradicting Lemma \ref{rank=dimension}.

Conversely, suppose that these two conditions hold. By condition (i), the $e_i$ are linear combinations over $K(t)$ of the vectors in (i). 
Now, by Lemma \ref{LinearCombGamma}, the $d$ germs of (ii) are linear combinations of the $d$ germs represented by the representations $(\lambda, M(t), M(t+1)\cdots M(t+i-1)\gamma(t+i-1), n_0)$, $i=0,\ldots,d-1$. Thus, by (ii), the $d$ latter germs are linearly independent over $K(t)$. But these germs are $E^i(\phi)$, which shows by Lemma \ref{rank=dimension} that $d$ is $\leq$ the rank of $\phi$. Hence this rank is $d$ and the representation is therefore minimal.
\end{proof}

We give now two examples which show that the conditions (i) and (ii) in the proposition are independent. Consider the representation 
$$((1,0),\left(\begin{array}{cc}1&0\\1&2\end{array}\right),\left(\begin{array}{c}1\\0\end{array}\right),0).$$
Since $\left(\begin{array}{cc}1&0\\1&2\end{array}\right)\left(\begin{array}{c}1\\0\end{array}\right)=\left(\begin{array}{c}1\\1\end{array}\right)$, condition (i) is satisfied. Now the representation 
$$((1,0),\left(\begin{array}{cc}1&0\\1&2\end{array}\right),\left(\begin{array}{c}0\\1\end{array}\right),0)$$
represents the zero sequence, so that condition (ii) is not satisfied.

For the converse counter-example, we choose the transpose representation
$$((1,0),\left(\begin{array}{cc}1&1\\0&2\end{array}\right),\left(\begin{array}{c}1\\0\end{array}\right),0).$$
Since $\left(\begin{array}{cc}1&1\\0&2\end{array}\right)\left(\begin{array}{c}1\\0\end{array}\right)=\left(\begin{array}{c}1\\0\end{array}\right)$, condition (i) is not satisfied; moreover, this representation represents the constant sequence $(1)$. Now the representation 
$$((1,0),\left(\begin{array}{cc}1&1\\0&2\end{array}\right),\left(\begin{array}{c}0\\1\end{array}\right),0)$$
represents the sequence $(2^n-1)$, since $\left(\begin{array}{cc}1&1\\0&2\end{array}\right)^n=\left(\begin{array}{cc}1&2^n-1\\0&2^n\end{array}\right)$. Since the germs represented by the sequences $(1)$ and $(2^n-1)$ are linearly independent over $K(t)$ (by Petkovsek's algorithm), (ii) is satisfied.

\section{Similarity}
The aim of this section is to show how the different minimal representations of $\phi$ are related to each other.

Two representations $(\lambda, M(t), \gamma(t), n_0)$ and $(\lambda', M'(t), \gamma'(t), n'_0)$ are {\em strongly} {\em similar} if they have the same dimension $d$, if $n_0=n'_0$ and if there exists a matrix $P(t)\in GL_d(K(t))$ such that $\lambda'=\lambda P(n_0)$, $M'(t)=P(t-1)^{-1}M(t)P(t)$, $\gamma'(t)=P(t)^{-1}\gamma(t)$ and that $P(n), P(n)^{-1}$ are defined for any $n\geq n_0$.
Then they represent the same germ: indeed, for any $n\geq n_0$, one has
$$ 
\lambda' M'(n_0+1) M'(n_0+2)\cdots M'(n)\gamma'(n)$$
$$
=\lambda P(n_0) P(n_0)^{-1}M(n_0+1)P(n_0+1)P(n_0+1)^{-1}M(n_0+2)P(n_0+2)\cdots $$
$$\cdots P(n-1)^{-1}M(n)P(n)P(n)^{-1}\gamma(n) =
\lambda M(n_0+1) M(n_0+2)\cdots M(n)\gamma(n).$$

Two representations are called {\em similar} if they have translates which are strongly similar. They represent therefore the same germ. The following result is the converse, under the hypothesis of minimality.

\begin{theorem}\label{similar}
Any two minimal representations of the same P-recursive germ are similar.
\end{theorem}
In particular, we obtain
\begin{corollary}\label{Msimilar}
If $(\lambda, M(t), \gamma(t), n_0)$ and $(\lambda', M'(t), \gamma'(t), n'_0)$ are two minimal representations of the same germ, then the matrices $M(t)$ and $M'(t)$ are pseudo-similar.
\end{corollary}
We begin by some lemmas.
\begin{lemma}\label{spec}
Let $(\lambda, M(t), \gamma(t), n_0)$ be a minimal representation of dimension $d$, and let $(\lambda', M(t), \gamma(t), n'_0)$, where $n'_0=n_0+k$, be its $k$-translate. Then for all sufficiently large $k$ the column vectors $\gamma(n'_0)$, $M(n'_0+1)\gamma(n'_0+1)$,\ldots, $M(n'_0+1)M(n'_0+2)\ldots M(n'_0+d-1)\gamma(n'_0+d-1)$ are $K$-linearly independent.
\end{lemma}

\begin{proof}
We know by Proposition \ref{characterization minimality} that the $d$ column vectors $\gamma(t)$, $M(t+1)\gamma(t+1)$,\ldots,$M(t+1)\cdots M(t+d-1)\gamma(t+d-1)$ form a basis of the $K(t)$-vector space $K(t)^{d \times 1}$. Hence the $d\times d$ determinant over $K(t)$ formed by these columns is nonzero. Thus, if $k$ is large enough, this determinant is nonzero when we put $t=n_0+k$. Let $n'_0=n_0+k$. Then the $k$-translate of $(\lambda, M(t), \gamma(t), n_0)$ is $(\lambda', M(t), \gamma(t), n'_0)$ with $n'_0=n_0+k$. Thus the lemma follows.
\end{proof}

\begin{lemma}\label{M'=PMP}
Let $(\lambda, M(t), \gamma(t), n_0)$ and $(\lambda', M'(t), \gamma'(t), n'_0)$ be two minimal representations of dimension $d$ of the same germ.  If there exists a matrix $P(t)\in GL_d(K(t))$ such that $M'(t)=P(t-1)^{-1}M(t)P(t)$ and $\gamma'(t)=P(t)^{-1}\gamma(t)$, then these representations are similar.
\end{lemma}

\begin{proof}
By taking translates of these representations, we may assume that $n_0=n'_0$, that the sequences that they represent coincide for $n\geq n_0$, that $P(n), P(n)^{-1}$ are defined for $n\geq n_0$, and that
the column vectors $\gamma(n_0)$, $M(n_0+1)\gamma(n_0+1)$,\ldots, $M(n_0+1)M(n_0+2)\ldots M(n_0+d-1)\gamma(n_0+d-1)$ are $K$-linearly independent (Lemma \ref{spec}).

Consider the representation $(\lambda_1, M'(t),\gamma'(t), n_0)$ with $\lambda_1=\lambda P(n_0)$. It is strongly similar to $(\lambda, M(t), \gamma(t), n_0)$. Moreover, since strongly similar representations represent the same sequence for $n \geq n_0$, we have $\lambda_1M'(n_0+1)M'(n_0+2)\cdots M'(n_0+i)\gamma'(n_0+i)=\lambda P(n_0)P(n_0)^{-1}M(n_0+1)P(n_0+1)\cdots P(n_0+i-1)^{-1}M(n_0+i)P(n_0+i)P(n_0+i)^{-1}\gamma(n_0+i)=\lambda M(n_0+1)\cdots M(n_0+i)\gamma(n_0+i)=\lambda'M'(n_0+1)M'(n_0+2)\cdots M'(n_0+i)\gamma'(n_0+i)$ for any $i\in \mathbb N$. Thus, by the linear independence above, we have $\lambda_1=\lambda'$. This proves the lemma.
\end{proof}

We now prove the theorem above.
\begin{proof}
Let $(\lambda,M(t),\gamma(t),n_0)$ be a minimal representation of a germ $\phi$. Then $$(\lambda,M(t),M(t+1)\cdots M(t+i)\gamma(t+i),n_0)$$ is a representation of the germ $E^i(\phi)$ for any $i\in \mathbb N$ (Lemma \ref{shift}). By Lemma \ref{rank=dimension} and Proposition \ref{characterization minimality}, we obtain therefore a $K(t)$-linear isomorphism $u$ from the vector subspace of $\cal G$ spanned by the $E^i(\phi)$ into the vector space $K(t)^{d\times 1}$, which sends $E^i(\phi)$ onto $M(t+1)\cdots M(t+i)\gamma(t+i)$, for $i=0,\ldots,d-1$. Let $G$ be the mapping sending any $\gamma'(t)\in K(t)^{d\times 1}$ onto $M(t+1)\gamma'(t+1)$. The mapping $G$ is pseudo-linear and its matrix in the canonical basis is $M(t+1)$. One has $u\circ E=G \circ u$, by Lemma \ref{shortest}.

Taking another minimal representation $(\lambda', M'(t), \gamma'(t), n'_0)$ and doing the same thing, we have $u'\circ E=G' \circ u'$. Therefore we obtain by composition a $K(t)$-linear automorphism $v=u\circ u'^{-1}$ of $K(t)^{d\times 1}$ such that $G'=v^{-1}\circ G\circ v$ and $v^{-1}(\gamma)=\gamma'$. Therefore, the matrix of $v$ being $P(t)$, we have by Eq.(\ref{conj}), $M'(t+1)=P(t)^{-1}M(t+1)P(t+1)$, hence $M'(t)=P(t-1)^{-1}M(t)P(t)$, and $P(t)^{-1}\gamma(t)=\gamma'(t)$. This implies the the theorem, by Lemma \ref{M'=PMP}.
\end{proof}

Similarity of minimal representations has the following two consequences.

\begin{corollary}\label{invertible}
If $(\lambda,M(t),\gamma(t),n_0)$ is a minimal representation of a P-recursive germ, then $M(t)$ is an invertible matrix.
\end{corollary}

\begin{proof}
This follows from the proof of Theorem \ref{similar}, noting that $E$ is bijective; in the proof, $G$ is therefore bijective, and its matrix is invertible; one concludes using Corollary \ref{Msimilar}.
\end{proof}

Let $(\lambda,M(t),\gamma(t),n_0)$ be a representation of $\phi$. By taking a suitable $k$-translate of this representation, we may by Corollary \ref{invertible} assume that the matrices $M(n)$ are invertible over $K$ for $n\geq n_0$. Let $V$ denote the set of germs represented by  $(v,M(t),\gamma(t),n_0)$, $v\in K^{1\times d}$; $V$ is clearly a $K$-subspace of $\cal G$.

\begin{corollary}\label{depends}
Suppose that $(\lambda,M(t),\gamma(t),n_0)$ is a minimal representation of $\phi$ with $M(n)$ invertible for $n\geq n_0$. Then the subspace $V$ defined above depends only on $\phi$.
\end{corollary}

\begin{proof}
If two representations $(\lambda, M(t), \gamma(t), n_0)$ and $(\lambda', M'(t), \gamma'(t), n'_0)$ are strongly similar, then the associated subspaces are equal, because $n_0=n'_0$ and
$$ 
M'(n_0+1) M'(n_0+2)\cdots M'(n)\gamma'(n)$$
$$
=P(n_0)^{-1}M(n_0+1)P(n_0+1)P(n_0+1)^{-1}M(n_0+2)P(n_0+2)\cdots $$
$$\cdots P(n-1)^{-1}M(n)P(n)P(n)^{-1}\gamma(n) =
P(n_0)^{-1}M(n_0+1) M(n_0+2)\cdots M(n)\gamma(n)$$
and because
$P(n_0)$ is an invertible matrix over $K$.

Moreover, if $(\lambda', M'(t), \gamma'(t), n'_0)$ is a $k$-translate of $(\lambda, M(t), \gamma(t), n_0)$, then we have $M'=M, \gamma'=\gamma$ and $n'_0=n_0+k$. 
Thus for $n\geq n_0+k$
$$M(n_0+1)M(n_0+2)\cdots M(n)\gamma(n)=$$
$$M(n_0+1)M(n_0+2)\cdots M(n_0+k) M(n_0+k+1) M(n_0+k+2)\cdots M(n)\gamma(n)$$
$$=M(n_0+1)M(n_0+2)\cdots M(n_0+k) M(n'_0+1)M(n'_0+2)\cdots M(n)\gamma'(n).
$$
Since the matrix $M(n_0+1)M(n_0+2)\cdots M(n_0+k)$ is invertible over $K$, the associated subspaces are equal.

Thus the corollary follows from Th. \ref{similar}.
\end{proof}

%%%%%%

\section{Minimal and nonminimal representations}
\label{nonminimal}

We show how general representations of $\phi$ are related to minimal representations: they have a block decomposition, with 3 diagonal blocks, such that the middle block is a minimal decomposition.

\begin{theorem}\label{nonminimal}
Let $(\lambda, M(t), \gamma(t), n_0)$ be a representation of dimension $d$ of a P-recursive germ $\phi$. Then it is similar to a representation of the form $(\bar\lambda, \bar M(t), \bar\gamma(t), \bar n_0)$ with the compatible block decompositions $\bar \lambda=(0,\lambda',\times)$,
$$\bar M=
\left(
\begin{array}{ccccc}
M_1&\times&\times\\
0&M'&\times\\
0&0&M_2
\end{array}
\right)
$$
and 
$$
\bar \gamma= 
\left(\begin{array}{c}
\times\\
\gamma'
\\0
\end{array}
\right),
$$
where $(\lambda',M'(t),\gamma'(t),\bar n_0)$ is a minimal representation of $\phi$.
\end{theorem}

\begin{proof}
1. Consider the $K(t)$-vector space $J=K(t)^{d\times 1}$ and its pseudo-linear automorphism $G$, having matrix $M(t+1)$ in the canonical basis. It sends each vector $v(t)$ onto $M(t+1)v(t+1)$.

2. Consider the subset $H$ of $J$ of vectors $v(t)$ such that for any large enough $n\geq n_0$, $v(n)$ is defined and satisfies
%the representation $(\lambda, M(t), v(t), n_0)$ represents the zero germ;
%, for $N_0$ large enough; 
%in other words, $v(t)\in H$ if and only if 
%for $N_0$ large enough, 
%one has, for each $n\geq n_0$ such that $v(n)$ is defined:
\begin{equation}\label{space H}
\lambda M(n_0+1)M(n_0+2)\ldots M(n)v(n)=0.
\end{equation}
%where each factor is defined. 
The subset $H$ is a $K(t)$-subspace of $J$, as is seen by multiplying the latter equation by $r(n)$, for any $r(t)\in K(t)$. It is stable under $G$: indeed, if $v(t)\in H$, let $w(t)=G(v(t))=M(t+1)v(t+1)$. Then we have Eq.(\ref{space H}), hence also for any $n\geq n_0$, $w(n)$ is defined for $n$ large enough and $\lambda M(n_0+1)M(n_0+2)\ldots M(n)w(n)=\lambda M(n_0+1)M(n_0+2)\ldots M(n)M(n+1)v(n+1)=0$. Thus $w(t)\in H$.

3. Now consider the $K(t)$-subspace $I$ of $J$ spanned by the vectors $M(t+1)M(t+2)\ldots M(t+i)\gamma(t+i)$ for $i\in \mathbb N$. It is also stable under $G$. Note that $\gamma(t)\in I$.

4. Note that if we translate the representation $(\lambda, M(t), \gamma(t), n_0)$, then the subspaces $H,I$ are unchanged. Indeed, $M(t)$ and $\gamma(t)$ are unchanged, by Section \ref{Repr}, and this implies that $I$ remains unchanged. For $I$,  it follows from Eq.(\ref{space H}).

5. The subspace $H\cap I$ is also stable under $G$. Let $v_1(t),\ldots,v_d(t)$ be a basis of $J$ such that $v_1,\ldots,v_{dim(H\cap I)}$ is a basis of $H\cap I$  and $v_1,\ldots,v_{dim(I)}$ is a basis of $I$. 
Consider the $K(t)$-linear automorphism $u$ of $J$ that sends $e_j$ onto $v_j(t)$ ($(e_j)$ is the canonical basis). Let $P(t)$ denote its matrix in the canonical basis; in other words, the $j$-th column of $P(t)$ is the vector $v_j(t)$. Choose $\bar n_0\geq n_0$ large enough so that $P(n), P(n)^{-1}$ and the $v_i(n)$ are defined for $n\geq \bar n_0$, and that the vectors $v_i(\bar n_0)$, $i=1,\ldots,dim(H\cap I)$, are linearly independent over $K$; moreover, we may assume that Eq.(\ref{space H}) holds for $n\geq \bar n_0$.

6. Now, we translate the representation $(\lambda, M(t), \gamma(t), n_0)$ by $k=\bar n_0-n_0$: so we are reduced to the case $n_0=\bar n_0$.

7. The matrix of the pseudo-linear mapping $v=u^{-1} \circ G\circ u$ in the canonical basis is by Eq.(\ref{conj}) equal to
$P(t)^{-1}M(t+1)P(t+1)$. Let $\bar M(t)=P(t-1)^{-1}M(t)P(t)$ and $\bar \lambda=\lambda P(\bar n_0)$. The first $dim(H\cap I)$ coordinates of $\bar \lambda$ vanish, since by Eq.(\ref{space H}), one has $\lambda v_i(\bar n_0)=0$ for $i=1,\ldots ,dim(H\cap I)$ (recall that $n_0=\bar n_0$).
%and that the vectors $ v_i(\bar n_0)=0$ for $i=1,\ldots ,dim(H\cap I)$, are linearly independent over $K$. 
Since $H\cap I$ and $I$ are stable under $G$, the matrix of $v$, hence also the matrix $\bar M(t)$, has the block form indicated in the statement. The last $d-dim(I)$ coordinates of the vector $u^{-1}(\gamma(t))$ vanish since $\gamma(t)$ is in $I$; it is represented by the column vector $\bar \gamma(t)=P(t)^{-1} \gamma(t)$. Thus $\bar \lambda$ and $\bar \gamma$ have the block form of the statement.

8. It follows that $(\lambda',M'(t), \gamma'(t),\bar n_0)$ is a representation of $\phi$, of dimension $e=dim(I)-dim(H\cap I)=dim(I/(H\cap I))$. We may identify $K(t)^{e\times 1}$ and $I/(H\cap I)$. We deduce from the construction and from Proposition \ref{characterization minimality} that it is minimal. 

9. Indeed, if condition (ii) were not satisfied, there would exist a nonvanishing $v(t)\in K(t)^{e\times 1}$ such that $(\lambda',M'(t), v(t),\bar n_0)$ is the zero germ; thus we could find a nonvanishing element in $H$ which is a linear combination of the vectors $v_i(t)$, $i=dim(H\cap I)+1,\ldots,dim(I)$, contradicting the construction of the $v_i(t)$. 

10. Moreover, condition (i) is satisfied, since the space $I$ is spanned by the elements $M(t+1)M(t+2)\ldots M(t+i)\gamma(t+i)$ for $i\in \mathbb N$; hence the space $I/(H\cap I)$ is spanned by the vectors $M'(t+1)M'(t+2)\ldots M'(t+i)\gamma'(t+1)$ for $i\in \mathbb N$; so that these vectors, for $i= 0,\ldots,e-1$ form a basis of $K(t)^{e\times 1}$ and (i) holds (see the remark after Prop. \ref{characterization minimality}).
\end{proof}
%%%%%%
\section{Closed forms}\label{closed}
We call a germ {\em hypergeometric} if it is of the form $[f]$ where $f$ is a hypergeometric sequence. Clearly, hypergeometric germs are the P-recursive germs of rank 1; this follows since a $K(t)$-multiple of a hypergeometric germ is hypergeometric.

Following \cite{petkovsek} p. 259, or \cite{a=b} Definition 8.1.1, we say that a germ $\phi$ is a {\em closed form} if it is a sum of hypergeometric germs.

\begin{theorem} \label{closed} The following conditions are equivalent for a P-recursive germ:

(i) it is a closed form;

(ii) it has a representation $(\lambda, M(t), \gamma(t), n_0)$ such that $M(t)$ is a diagonal matrix;

(iii) it has a minimal representation $(\lambda, M(t), \gamma(t), n_0)$ such that $M(t)$ is a diagonal matrix.
\end{theorem}
 

\begin{proof}
(i) implies (ii): suppose that germ $\phi$ is a sum of hypergeometric germs. Then clearly, $\phi$ has a representation $(\lambda, M(t), \gamma(t), n_0)$, not necessarily minimal, where $M(t)$ is a diagonal matrix. 

(ii) implies (iii): By Section \ref{pseudo-linear}, we know that diagonalizable pseudo-linear endomorphisms induce, on stable subspaces and quotients, pseudo-linear diagonalizable endomorphisms. Thus, by Th.\ref{nonminimal}, $\phi$ has a minimal representation $(\lambda', M'(t), \gamma'(t), n'_0)$ such that $M'(t)$ is pseudo-diagonalizable. 

Then we can find an invertible square matrix $Q(t)$ over $K(t)$ such that $$Q(t)^{-1}M'(t)Q(t+1)$$ is diagonal. Writing $P(t)=Q(t+1)$, we see that $P(t)$ is square and invertible and that $M(t)=P(t-1)^{-1}M'(t)P(t)$ is diagonal. Let $\gamma(t)=P(t)^{-1}\gamma'$. Then by Lemma \ref{M'=PMP}, our representation is similar to the representation $(\lambda, M(t), \gamma(t), n_0)$. 

(iii) implies (i): Clearly, $\phi$ is a linear combination over $K(t)$ of hypergeometric germs. Since a $K(t)$-multiple of a hypergeometric germ is hypergeometric, we see that actually $\phi$ is a sum of hypergeometric germs.
\end{proof}

The proof also shows the following two corollaries.

\begin{corollary}\label{sum}
If a germ is a closed form, then it is a sum of exactly $d$ hypergeometric germs, where $d$ is its rank, and not less. 
\end{corollary}

According to \cite{petkovsek} Proposition 5.4, this sum is even unique.

\begin{corollary}
A germ is a closed form if and only if for each minimal representation $(\lambda, M(t), \gamma(t), n_0)$, $M(t)$ is pseudo-diagonalizable.
\end{corollary}
 
\section{D'Alembertian germs}\label{d'Al}

D'Alembertian germs have been introduced in \cite{abramov}. We follow Definition 1 of \cite{barkatou}: a germ $\phi$ is {\em d'Alembertian} if there exist rational functions $r_1(t),\ldots,r_d(t)$ such that 
\begin{equation}\label{Alemb}
(E-r_1(t))\circ \cdots \circ (E-r_d(t)) (\phi)=0.
\end{equation}
%In other words, the set of d'Alembertian germs is the smallest subset $\cal A$ of $\cal G$ containing the hypergeometric germs and such that: for any $\phi \in \cal A$, $\psi\in\cal G$ and $r(t)\in K(t)$, $(E-r(t))(\psi)=\phi$ implies $\psi\in \cal A$. 
%
%This follows, since for any $s(t)\in K(t)$, one has $s(t+1)(E-r(t))(\phi)=(E-r(t)\frac{s(t+1)}{s(t)})(s(t)\phi)$.

\begin{theorem} \label{d'Alemb} The following conditions are equivalent for a P-recursive germ:

(i) it is a d'Alembertian germ;

(ii) it has a representation $(\lambda, M(t), \gamma(t), n_0)$ with $M(t)$ triangular. 

(iii) it has a minimal representation $(\lambda, M(t), \gamma(t), n_0)$ with $M(t)$ triangular. 
\end{theorem}

\begin{proof}
(i) implies (ii): let $\phi$ be a d'Alembertian germ. Then it satisfies Eq. (\ref{Alemb}). We prove (ii) by induction on $d$. If $d=1$, then $\phi$ is hypergeometric and it has a representation of dimension 1. Suppose that $d>1$ and let $\phi'=(E-r_d(t)) (\phi)$. Then $\phi'$ is d'Alembertian with $d-1$. We conclude by induction that $\phi'$ has a representation $(\lambda', M'(t), \gamma'(t), n'_0)$ of dimension $d'$, where $M'(t)$ is an upper triangular matrix. For $n\geq n'_0$, consider the row vector $F(n)=\lambda' M'(n'_0+1)M'(n'_0+2)\cdots M'(n)$. Then $F(n+1)=F(n)M'(n+1)$ and, by definition of representations, the sequence $f'$ defined by $f'(n)=F(n)\gamma'(n)$, $n\geq n_0$, is a representative of the germ $\phi'$. Let $f$ be a representative of the germ $\phi$. Since $\phi'=(E-r_d(t)) (\phi)$, we have for $n$ large enough, $f(n+1)=f'(n)+r_d(n)f(n)=F(n)\gamma'(n)+r_d(n)f(n)$. Let $n_0\geq n'_0$ such that this equality holds for $n\geq n_0$. Then we have the matrix equality
$$
(F(n+1),f(n+1))=(F(n),f(n)) 
\left( \begin{array}{cc} 
M'(n+1)&\gamma'(n)\\0&r_d(n)
\end{array} \right).
$$
Let $M(n+1)$ denote the latter matrix, $\lambda =(F(n_0), f(n_0))$ and $\gamma =(0,\ldots,0,1)^T$. Then $(\lambda, M(t), \gamma(t), n_0)$ is a representation of $\phi$. Moreover, $M(n)$ is clearly upper triangular.

(ii) implies (iii): similar to the proof of the similar implication in Theorem \ref{closed}.

(iii) implies (i): we show by induction on $d$ that if $\phi$ has a triangular representation of dimension $d$, then Eq.(\ref{Alemb}) holds. Let $$((\lambda,a),\left(\begin{array}{cc}M(t)&\delta(t)\\0&b(t)\end{array}\right),\left(\begin{array}{c}\gamma(t)\\c(t)\end{array}\right),n_0)$$ be a representation of $f(n)$ in the class of $\phi$, of dimension $d+1$, with a block decomposition such that $a, b(t), c(t)$ are one-by-one matrices. By translating the representation if necessary, we may assume that $c(n)$ is nonzero for $n\geq n_0$; indeed, if $c(t)=0$, then we may directly apply induction. For $n\geq n_0$, let $A(n)=M(n_0+1)M(n_0+2)\cdots M(n)$ and $B(n)=b(n_0+1)b(n_0+2)\cdots b(n)$.

We see that $f(n)$ is equal to
$$\lambda A(n)\gamma(n)+\lambda\sum_{i=n_0+1}^{n} M(n_0+1)\cdots M(i-1)\delta(i) b(i+1)\cdots b(n)c(n)+aB(n)c(n).
$$
Applying the operator $E-\frac{c(t+1)}{c(t)}b(t+1)$ to $\phi$, we obtain a germ $\phi'$ which is represented by the sequence $g(n)=f(n+1)-\frac{c(n+1)}{c(n)}b(n+1)f(n)$. We have
$$g(n)=\lambda A(n+1)\gamma(n+1)-\lambda A(n)\gamma(n) \frac{c(n+1)}{c(n)}b(n+1)
$$
$$+\lambda \sum_{i=n_0+1}^{n+1} M(n_0+1)\cdots M(i-1)\delta(i) b(i+1)\cdots b(n+1)c(n+1)
$$
$$-\lambda\sum_{i=n_0+1}^{n} M(n_0+1)\cdots M(i-1)\delta(i) b(i+1)\cdots b(n)c(n)\frac{c(n+1)}{c(n)}b(n+1)
$$
$$+aB(n+1)c(n+1)-aB(n)c(n)\frac{c(n+1)}{c(n)}b(n+1).
$$
After simplifications, we have
$g(n)=\lambda A(n)\gamma'(n)$, where
$$\gamma'(n)=M(n+1)\gamma(n+1)-\gamma(n)\frac{c(n+1)}{c(n)}b(n+1)+\delta(n+1)c(n+1).$$
Thus the germ $\phi'=[g]$ has the representation $(\lambda, M(t), \gamma'(t),n_0$), which shows by induction that we have $(E-r_1(t))\circ \cdots \circ (E-r_d(t)) (\phi')=0$ for some $r_i(t)/in K(t)$. Thus we obtain
$(E-r_1(t))\circ \cdots \circ (E-r_d(t)) \circ(E-\frac{c(t+1)}{c(t)}b(t+1)) (\phi)=0$ and $\phi$ is d'Alembertian.
\end{proof}
For $r,r'\in K(t)$, define an equivalence relation: $r\sim r'$ if $r=r's^\theta/s$ for some nonzero $s\in K(t)$.
\begin{corollary}
If a germ $\phi$ is d'Alembertian, then its rank is equal to the smallest $d$ such that an equation of the form (\ref{Alemb}) holds. Moreover, if one has also $(E-r'_1(t))\circ \cdots \circ (E-r'_d(t)) (\phi)=0$, then for some permutation $\sigma\in S_d$, one has $r_{\sigma(i)}\sim r'_i$ for $i=1,\ldots, d$. 
\end{corollary}

\begin{proof}
This follows from the proof of the previous theorem, knowing that the diagonal elements of two pseudo-similar triangular matrices are related as in the statement. This latter fact follows from the Jordan-H\"older theorem.
\end{proof}

We obtain also the following two corollaries.

\begin{corollary}
A germ is d'Alembertian if and only if for each minimal representation $(\lambda, M(t), \gamma(t), n_0)$, $M(t)$ is pseudo-triangularizable.
\end{corollary}

\begin{corollary}
The set of d'Alembertian germs is a $K(t)$-subalgebra of $\cal G$.
\end{corollary}

This follows since the tensor product of triangular matrices is triangular. This result was already known (see \cite{a=b} page 157).

The germ $[I_n]$, where $I_n$ is the number of involutions in $S_n$, is not d'Alembertian. Indeed, the associated recursion $f(n+2)=f(n+1)+(n+1)f(n)$ has no nonzero hypergeometric solution, as follows from Petkov\v{s}ek's algorithm {\em Hyper}, see \cite{petkovsek} Example 4.4, or \cite{a=b} Example 8.4.3. Now, if a minimal representation of a germ $\phi$ is triangular, then by modifying $\lambda$, one obtains a representation of a nonzero hypergeometric germ, which satisfies the shortest linear recursion of $\phi$; it follows then from Cor. \ref{depends} that $[I_n]$ has no minimal triangular representation.

\section{Field extension}\label{extension}
The next result was known to Petkov\v{s}ek (personal communication) and its proof is completely similar to the proof of \cite{petkovsek} Theorem 5.2, who gives the similar result for hypergeometric germs.

\begin{proposition}
Let $K\subset L$ be a field extension. If a germ $\phi=[f]$ is P-recursive over $L$ and if $f(n)$ is in $K$ for $n$ large enough, then $\phi$ is P-recursive over $K$. Moreover, its rank over $K$ and $L$ are equal.
\end{proposition}

\begin{proof}
We use the following well-known result: if an infinite system of linear equations in finitely many variables over $K$ has a nonzero solution in $L$, then it has a nonzero solution in $K$.

By hypothesis, we know that there exist polynomials $p_0(t),\ldots, p_d(t)$ over $L$, with $p_0(t)$ nonzero, such that for $n\geq n_0$, $p_0(n)f(n+d)+p_1(n)f(n+d-1)+\cdots+p_d(n)f(n)=0$ and that $f(n)\in K$. We may assume that $f(n)\in K$ for $n\geq n_0$. Consider this as an infinite system of linear equations in the coefficients of the $p_i$; this system has coefficients in $K$ (since $f(n)$ and $n^i$ are in $K$), and a nonzero solution in $L$ (since $p_i(t)\in L[t]$ and $p_0(t)\neq 0$). Hence it has a solution in $K$. Thus, $\phi$ is P-recursive over $K$. If we choose $d$ minimal, then $d$ is the rank over $L$; thus the rank over $K$ is not larger; hence the ranks are equal, since a representation over $K$ is also a representation over $L$.
\end{proof}

Observe that, unlike the rank, the property for a germ to be a closed form, or d'Alembertian, depends on the ground field. This is already seen by the simple example of the Fibonacci sequence: over any field containing the golden number, it is a closed form, by the Binet formula; however, over the rationals, it is not a closed form, and not even d'Alembertian. Indeed, by Petkov\v{s}ek's algorithm {\em Hyper} (see \cite{petkovsek} Example 5.1), the Fibonacci recurrence has no hypergeometric solution over $\mathbb Q$.

The {\em rank} of a sequence satisfying a {\em linear recursion with constant coefficients} is classically defined as the length of the shortest such recursion; equivalently of the rank of its {\em Hankel matrix}, see \cite{fliess}, \cite{berstel}, chapter 6. Such a sequence is clearly P-recursive. It is however not true that both ranks coincide. Indeed, for such a sequence $f$, $f(n)$ may be expressed by an {\em exponential polynomial}; the number of exponentials appearing in its is equal to the number of nonzero roots of the companion polynomial of the shortest recursion with constant coefficients (loc cit.). This number is equal to the rank of $f(n)$ as P-recursive sequence, as it follows from Corollary 5.1 in \cite{petkovsek} or Theorem 8.7.1 in \cite{a=b} (see also Cor. \ref{sum}). For example, $(n2^n)$ has rank 1 as P-recursive sequence, but rank 2 in the other sense. 
\section{Positivity}

Let $R$ denote a subsemiring of $K(t)$ . 
We say that $f$ or $\phi$ is {\em P-recursive on} $R$ if it has a representation $(\lambda,M(t),\gamma (t),n_0)$ whose matrices are over $R$.

\begin{proposition} If $\phi$ is P-recursive germ on $\mathbb Z[t]$, then it is the difference of two germs which are P-recursive on $\mathbb N[t]$.
\end{proposition}

This result is similar to a result of Sch\"{u}tzenberger on noncommutative rational series over $\mathbb Z$ and $\mathbb N$ (see \cite{berstel} Exercise 7.2.3). For P-recursive sequences, a similar result, due to Kotek and Makowsky \cite{kotek}, is known; I do not know how the result above relates to theirs.

\begin{proof} It will be useful to adopt the following notation: if $r(t)$ is a polynomial over $\mathbb Z$, choose two polynomials $r_+(t)$ and $r_-(t)$ over $\mathbb N$ such that $r=r_+-r_-$.

Instead of representations, we use the graph model given at the end of Section \ref{repr}. We assume that $n_0=0$ for simplicity. Starting with a representation over $\mathbb Z[t]$ and the corresponding graph, we double the number of vertices: a vertex $i$ gives two vertices $i_+$ and $i_-$. For each arrow $i\rightarrow j$ in the original graph, we know that its label is $r(t)\in \mathbb Z[t]$; this arrow gives four arrows in the new graph: $i_s\rightarrow j_u$, with $s,u\in\{+,-\}$, labelled $r_+(t)$ if $s,u$ are equal, and labelled $r_-(t)$ otherwise. This new graph corresponds to a matrix $N(t)$.

Now define two row vectors $\lambda_+$ and $\lambda_-$ indexed by the $i_s$: $(\lambda_s)_{i_u}=(\lambda_i)_+$ if $s=u$, and $=(\lambda_i)_-$ otherwise. Likewise, we define two row vectors $\gamma_+(t)$ and $\gamma_-(t)$ indexed by the $i_s$: $(\gamma_s(t))_{i_u}=(\gamma_i)(t)_+$ if $s=u$, and $=(\gamma_i)(t)_-$ otherwise.

We then obtain two representations $(\lambda_s,N(t),\gamma_s(t),0)$, $s\in\{+,-\}$, which represent two P-recursive germs over $\mathbb N[t]$, whose difference is $\phi$. Indeed, this follows from Eq.(\ref{path}), which may be rewritten as
$$
\sum_{ij}((\lambda_i)_+-(\lambda_i)_-)(\sum_{\pi}((s_1)_+(1)-(s_1)_-(1))\ldots ((s_n)_+(n)-(s_n)_-(n)))$$
$$\times ((\gamma_j)_+(t)-(\gamma_j)_-(t)),
$$
where the second sum is over all paths $\pi=i_0i_1\ldots i_n$ of length $n$ from $i=i_0$ to $j=i_n$, where $s_j(t)=M(t)_{i_{j-1}i_j}$. The expansion of this formula implies the result. 
\end{proof}

\section{Extension of a sequence to $\mathbb Z$}

In Th.3.4 of \cite{stanley1}, Stanley considers a recursion of the form 
Eq.(\ref{recursion}) where the $r_i(n)$ are defined for each $n\in \mathbb Z$ (the formulation is different but equivalent). He then considers extensions of $f(n)$ to all $n\in \mathbb Z$ (and he obtains for P-recursive sequences a generalization of Popoviciu's theorem on rational series, see Th.3.4 in \cite{stanley1}) . Our aim here is to replace Stanley's condition by a matricial condition, which will allow to compute the values $f(n)$ for all integers $n$.

We say that a representation $(\lambda, M(t), \gamma(t), n_0)$ is {\em strict} if $M(n)$ and $\gamma(n)$ are defined for all $n$ in $\mathbb Z$, and if moreover $M(n)$ is invertible for any $n\in \mathbb Z$ satisfying $n\leq n_0$ (equivalently for this latter condition, $det(M(n))$ is nonzero for each $n\leq n_0$). 

Let $R$ be the subring of $K(t)$ of fractions which are defined for any $n\in \mathbb Z$. This ring is principal, since it is a localization of the principal ring $K[t]$. Note that the condition for $M(t)$ and $\gamma(t)$ to be defined for any integer $n$ is equivalent to the fact that their entries are in $R$.

In order to have the next result, we need to extend slightly the notion of representation: we allow here the integer $n_0$ to be negative. 

\begin{proposition}
The following conditions are equivalent, for a P-recursive germ $\phi$.
 
(i) $\phi$ has a strict representation;

(ii) $\phi$ has a strict minimal representation;

(iii) the $R$-submodule of $\cal G$ spanned by the shifts $E^i(\phi)$, $i\in \mathbb N$ of $\phi$ is a finitely generated $R$-module.
\end{proposition}

\begin{proof}
(i) implies (iii): let $(\lambda, M(t), \gamma(t), n_0)$ be a strict representation of $\phi$. Let $\phi_i$ have the representation $(\lambda, M(t), e_i, n_0)$, where $e_i$ is the $i$-th canonical column vector. Let $\cal M$ be the $R$-submodule of $\cal G$ spanned by the $\phi_i$; $\cal M$ is a finitely generated $R$-module. Since $E(\phi_i)$ is represented by $(\lambda, M(t),M(t+1)e_i, n_0)$, we see by Lemma \ref{LinearCombGamma} that $\cal M$ is closed under $E$, because by hypothesis the entries of $M(t+1)$ are in $R$. Moreover, the germ $\phi$ is in $\cal M$, because the entries of $\gamma(t)$ are in $R$. It follows, since $R$ is principal, that  the $R$-submodule of $\cal G$ spanned by the shifts $E^i(\phi)$, $i\in \mathbb N$ of $\phi$ is a finitely generated $R$-module.

(iii) implies (ii):  note that the field of fractions of $R$ is $K(t)$. Since the $R$-submodule ${\cal M}_0$ of $\cal G$ spanned by the shifts $E^i(\phi)$, $i\in \mathbb N$ is a finitely generated $R$-module, and since it is contained in the $K(t)$-vector space $\cal G$, it is a free $R$-module, of rank equal to the dimension $d$ of the $K(t)$-vector space spanned by the shifts $E^i(\phi)$, $i\in \mathbb N$; thus $d$ is the rank of $\phi$, by Lemma \ref{rank=dimension}. Let $\phi_1,\ldots,\phi_d$ be a basis of the $R$-module ${\cal M}_0$. Then we can find a matrix $M(t)$ over $R$ such that $E(\phi_j)=\sum_{1\leq i\leq d}M(t+1)_{ij}\phi_i$ and and a column vector $\gamma(t)$ over $R$ such that $\phi=\sum_{1\leq i\leq d} \gamma(t)_i \phi_i$. Hence, by Lemma \ref{construction}, we may find a representation of $\phi$ with matrix $M(t)$; $M(t)$ is invertible by Corollary \ref {invertible}, and there exists $n_0\in \mathbb Z$ such that $det(M(n))\neq 0$ for any $n\leq n_0$. It follows that $(\lambda, M(t), \gamma(t), n_0)$ is a strict representation of $\phi$ since the entries of $M(t)$ and $\gamma(t)$ are in $R$. It is minimal since its dimension is the rank of $\phi$.

(ii) implies (i) is clear.
\end{proof}

A natural way to extend a P-recursive sequence $f(n)$, satisfying Eq.(\ref{recursion}), to all of $\mathbb Z$ is to assume that the $r_i(n)$ are defined for all $n$ in $\mathbb Z$ (Stanley's condition considered above) and moreover to assume that $r_d(n)$ is nonzero for all $n\in \mathbb Z$. Note that in this case $f$ has a (minimal) strict representation $(\lambda, M(t), \gamma(t), 0)$; indeed, it suffices to follow the first part of the proof of Theorem \ref{recursion=matrix}, noting that $\gamma(t)$ is constant, that $M(t)$ is the companion matrix of the recursion and that $\pm r_d(t)$ is its determinant.

Let $f$ have the minimal strict representation $(\lambda, M(t), \gamma(t), n_0)$ of dimension $d$. Then we extend $f$ to all of $\mathbb Z$ by the formula 
\begin{equation}\label{negative}
f(n_0-n)=\lambda M(n_0)^{-1}M(n_0-1)^{-1}\cdots M(n_0-n+1)^{-1} \gamma(n_0-n),
\end{equation}
for any $n\geq 0$. Note that this is well defined by the "strict" hypothesis.

\begin{proposition}\label{true}
Let $f$ be as above and suppose that for $n$ large enough, $f(n)$ satisfies the recursion 
\begin{equation}\label{polynomial}
p_0(n)f(n+e)+p_1(n)f(n+e-1)+\cdots+p_e(n)f(n)=0,
\end{equation}
where the $p_i(t)$ are in $K[t]$.
Then this recursion is true for any $n\in \mathbb Z$.
\end{proposition}

\begin{proof}
Suppose that $(\lambda, M(t), \gamma(t), n_0)$ is a minimal representation of $f(n)$, which is therefore strict. Let $v(t)=p_0(t)M(t+1)\cdots M(t+e)\gamma(t+e)+p_1(t)M(t+1)\cdots M(t+e-1)\gamma(t+e-1)+\cdots+p_e(t)\gamma(t)$. Then by hypothesis $(\lambda, M(t), v(t), n_0)$ represents 0. Thus by Proposition \ref{characterization minimality} (ii), $v(t)$ vanishes. 

We thus have
\begin{equation}\label{rec}
\begin{array}{c}
p_0(t)M(t+1)\cdots M(t+e)\gamma(t+e)+p_1(t)M(t+1)\cdots \\
\times M(t+e-1)\gamma(t+e-1)
+\cdots+p_{e-1}(t)M(t+1)\gamma(t+1)
+p_e(t)\gamma(t)=0.
\end{array}
\end{equation}

In Eq.(\ref{rec}), put $t=n$ with $n\geq n_0$. Thus
$$
\begin{array}{c}
p_0(n)M(n+1)\cdots M(n+e)\gamma(n+e)+p_1(n)M(n+1)\cdots
M(n+e-1)\\
\times\gamma(n+e-1)
+\cdots+p_{e-1}(n)M(n+1)\gamma(n+1)
+p_e(n)\gamma(n)=0.
\end{array}
$$
Multiply on the left by $\lambda M(n_0+1)\cdots M(n)$. We obtain that $p_0(n)f(n+e)+p_1(n)f(n+e-1)+\cdots+p_e(n)f(n)=0$.

Now, in Eq.(\ref{rec}), put $t=n_0-i$ with $1\leq i\leq e$. We obtain
$$
\begin{array}{c}
p_0(n_0-i)M(n_0-i+1)\cdots M(n_0-i+e)\gamma(n_0-i+e)\\+p_1(n_0-i)M(n_0-i+1)\cdots M(n_0-i+e-1)\gamma(n_0-i+e-1)\\
+\cdots +p_{e-i}(n_0-i)M(n_0-i+1)\cdots M(n_0)\gamma(n_0) +\cdots
\\
+p_{e-1}(n_0-i)M(n_0-i+1)\gamma(n_0-i+1)
+p_e(n_0-i)\gamma(n_0-i)=0.
\end{array}
$$
Multiply by $\lambda M(n_0)^{-1}\cdots M(n_0-i+1)^{-1}$ on the left, obtaining
$$
\begin{array}{c}
p_0(n_0-i)\lambda M(n_0+1)\cdots M(n_0-i+e)\gamma(n_0-i+e)\\+p_1(n_0-i)\lambda M(n_0+1)\cdots M(n_0-i+e-1)\gamma(n_0-i+e-1)\\
+\cdots +p_{e-i}(n_0-i)\lambda\gamma(n_0) +\cdots
\\ +p_{e-1}(n_0-i)\lambda M(n_0)^{-1}\cdots M(n_0-i+2)^{-1}\gamma(n_0-i+1)
\\+p_e(n_0-i)\lambda M(n_0)^{-1}\cdots M(n_0-i+1)^{-1}\gamma(n_0-i)=0.
\end{array}
$$
Thus, taking in account Eq.(\ref{negative}) we obtain $p_0(n_0-i)f(n_0-i+e)+p_1(n_0-i)f(n_0-i+e-1)+\cdots
+p_{e-i}(n_0-i)f(n_0) +\cdots 
+p_{e-1}(n_0-i)f(n_0-i+1)+p_e(n_0-i)f(n_0-i)=0$.

Now put $t=n_0-n$ in Eq.(\ref{rec}) with $n>e$. We obtain
$$
p_0(n_0-n)M(n_0-n+1)\cdots M(n_0-n+e)\gamma(n_0-n+e)
$$
$$
+p_1(n_0-n)M(n_0-n+1)\cdots M(n_0-n+e-1)\gamma(n_0-n+e-1)
$$
$$
+\cdots+p_{e-1}(n_0-n)M(n_0-n+1)\gamma(n_0-n+1)+p_e(n_0-n)\gamma(n_0-n)=0.
$$
Multiply at the left by $\lambda M(n_0)^{-1}\cdots M(n_0-n+1)^{-1}$. We obtain
$$
p_0(n_0-n)\lambda M(n_0)^{-1}\cdots M(n_0-n+e+1)^{-1}\gamma(n_0-n+e)
$$
$$
+p_1(n_0-n)\lambda M(n_0)^{-1}\cdots M(n_0-n+e)^{-1}\gamma(n_0-n+e-1)+\cdots
$$
$$
+p_{e-1}(n_0-n)M(n_0)^{-1}\cdots M(n_0-n+2)^{-1}\gamma(n_0-n+1)
$$
$$
+p_e(n_0-n)M(n_0)^{-1}\cdots M(n_0-n+1)^{-1}\gamma(n_0-n)=0.
$$
Thus we obtain $p_0(n_0-n)f(n_0-n+e)+p_1(n_0-n)f(n_0-n+e-1)+\cdots+p_{e-1}(n_0-n)f(n_0-n+1)+p_e(n_0-n)f(n_0-n)$.

Thus Eq.(\ref{polynomial}) holds for any $n\in \mathbb Z$.
\end{proof}

\begin{corollary}\label{indep}
The extension to $\mathbb Z$ given by Eq.(\ref{negative}) is independent of the chosen strict minimal representation.
\end{corollary}

%Proposition \ref{true} and Corollary \ref{indep} are certainly true if the strict representation is not minimal, but we do not give the proof of this generalization here.

As mentioned before, Stanley considers in \cite{stanley1} recursion like Eq.(\ref{polynomial}), with the assumption that $p_0$ has no integer roots. We show by an example that a sequence may have a strict representation without satisfying any such recursion. Take indeed the representation $(\lambda, M(t)=
\left(\begin{array}{cc}1&1\\t&t+1
\end{array}\right),\gamma(t)=\left(\begin{array}{c}1\\0\end{array}\right),0)$ with any nonvanishing $\lambda$. Then one has
$$M(t+1)M(t+2)\gamma(t)=\frac{t^2+5t+5}{t+1}M(t+1)\gamma(t+1)-\frac{t+2}{t+1}\gamma(t).$$
This implies that the associated sequence satisfies the recursion $f(n+2)=\frac{n^2+5n+5}{n+1}f(n+1)-\frac{n+2}{n+1}f(n)$. It may be shown by using Petkov\v{s}ek's algorithm {\em Hyper} that no hypergeometric sequence satisfies this recursion. Hence, $f$ is not hypergeometric, the above recursion is its shortest one and does not satisfy Stanley's condition since it is not defined at $n=-1$.

\section{Galois group}

The field $K(t)$ is a {\em difference field}, that is, a field with an automorphism, which here is defined by $\theta(t)=t+1$. Likewise, the ring $\cal G$ is a {\em difference ring}, because $E$ is a ring automorphism. Since $\cal G$ contains $K(t)$ (the embedding is $r(t)\mapsto [f]$, with $f(n)=r(n)$, $n$ large), and since $E$ restricts to $\theta$, $\cal G$ is a difference ring extension of $K(t)$. The field of constants (that is, the fixpoints of the automorphism), in both cases, is $K$. We assume in this section that $K$ is algebraically closed.

Let $\phi$ be a P-recursive germ. Let $(\lambda, M(t), \gamma(t), n_0)$ be a minimal representation of $\phi$. We may by Corollary \ref{invertible} assume that $M(n)\in GL_d(K)$ for $n\geq n_0$. Define for $n\geq n_0$, $Z(n)=M(n_0+1)M(n_0+2)\cdots M(n)$. This defines a $d\times d$ matrix $Z$ over $\cal G$, which by construction is in $GL_d(\cal G)$; indeed, its determinant is the invertible hypergeometric germ  associated to the sequence $d(n_0+1)d(n_0+2)\cdots d(n)$, where $d(t)$ is the determinant of $M(t)$. Moreover, $E(Z)=ZM(t+1)$, by definition of the embedding of $K(t)$ into $\cal G$. 

We follow now \cite{put} and \cite{hendriks}. Define the subring $R$ of $\cal G$ generated by $K(t)$, by the $d^2$ entries of $Z$ and by the inverse of the determinant of $Z$. By \cite{put} Proposition 4.1 and \cite{hendriks} Proposition 3.1, $R$ is the {\em Picard-Vessiot ring associated to the difference equation} 
\begin{equation}\label{diff}EY=YM(t+1).
\end{equation} 
It follows from Corollary \ref{depends} that $R$ does not depend on the chosen minimal representation.
This ring is closed under $E$ (by Eq.(\ref{diff}) and since the inverse of a hypergeometric germ is hypergeometric), and is therefore a difference subring of $\cal G$. 

The {\em Galois group} of Eq.(\ref{diff}) is by definition the group of ring automorphisms of $R$ that fix $K(t)$ pointwise and that commute with $E$. 
We call this group the {\em Galois group} of the P-recursive germ $\phi$.

We use the following result of van der Put and Singer (see Proposition 1.21 in \cite{put} or Theorem 2.1 and 2.2 in \cite{hendriks}): let $G$ be an algebraic subgroup of $GL_d$, defined over $K$; recall that $G(K(t))$denotes the set of matrices in $GL_d(K(t))$ which satisfy the polynomial equations over $K$ defining the algebraic group $G$. Then: (a) if $M(t+1)$ is a point in the extension $G(K(t))$, then the Galois group of Eq.(\ref{diff}) is a subgroup of $G$; (b) if $G$ is the Galois group of the equation, then there exists a matrix $P(t)\in GL_d(K(t))$  such that $P(t)^{-1}M(t+1)P(t+1)$ is in $G$.

\begin{theorem} Let $\phi$ be a P-recursive germ and $G(\phi)$ its Galois group.
Then $\phi$ is d'Alembertian if and only if G($\phi$) is triangularizable. 
\end{theorem}

\begin{proof}
If $\phi$ is d'Alembertian, then we know by Th.\ref{d'Alemb} that $\phi$ has a minimal representation with $M(t)$ upper triangular. It follows from (a) above that $G$ is upper triangular. Conversely, if $G$ is upper triangular, then it follows from (b) above that $M(t)$ is pseudo-similar to a triangular matrix; it follows that $\phi$ has a minimal representation with a triangular matrix and we deduce from Th.\ref{d'Alemb} that $\phi$ is d'Alembertian.
\end{proof}

A germ is hypergeometric if and only if its Galois group is contained in $K^*$ \footnote{An example: the Picard-Vessiot extension of $K(t)$ corresponding to the hypergeometric sequence $f(n)=n!$ is isomorphic with the polynomial ring $K(t)[f,f^{-1}]$, and the action of $a\in K^*$ is induced by the mapping $f\mapsto af$.}; likewise, a germ is a closed form if and only if its Galois group is contained in a torus (that is, product of several $K^*$). This follows from \cite{put}, Chapter 2, were the reader may find all the details of the action of the Galois group as a group of automorphisms of the ring $R$ (see also \cite{hendriks}). 

In \cite{hendriks}, Hendriks and Singer define a class ${\cal L}$ of P-recursive germs, called {\em Liouvillian}. It is by definition the smallest subring of $\cal G$ such that
\begin{enumerate}
\item $K(t)\subset {\cal L}$;
\item ${\cal L}$ is closed under $E$ and $E^{-1}$;
\item it contains all the hypergeometric germs;
\item if $\phi$ is in {\cal L} and $E(\psi)-\psi=\phi$, then $\psi\in {\cal L}$;
\item the interlacing of elements in ${\cal L}$ is in ${\cal L}$.
\end{enumerate}
Here, the {\em interlacing} of $m$ sequences $f_0,\ldots,f_{m-1}$ is the sequence $g$ defined by $g(n)=f_r(q)$ if $n=qm+r$ (Euclidean division of $m$ by $n$). Thus the interlacing of germs is well-defined. Note that "interlacing" is called "merge" in \cite{berstel}.

The main result of \cite{hendriks} is that a germ is Liouvillian if and only if its Galois group is solvable. Now, the celebrated Lie-Kolchin theorem asserts that a {\em connected} algebraic group is solvable if and only if it is triangularizable. However, the Galois group of difference equations are in general not connected; if $G^0$ is the connected component of $1$ in the algebraic group $G$, then it is a normal subgroup of finite index with cyclic quotient, see \cite{put} Proposition 1.20 or \cite{hendriks} Theorem 2.1.

\begin{corollary}
A germ $\phi$ is Liouvillian if and only if it is a interlacing of d'Alembertian germs. 
\end{corollary}

\begin{proof}
It follows from \cite{hendriks} that ${\cal L}$ contains the d'Alembertian germs; hence also their interlacing. Conversely, the proof of Theorem 3.4 in \cite{hendriks} shows that if the Galois group of a germ is solvable, then it is a interlacing of germs belonging to the class defined by items 1 to 4 above. This class is exactly the class of d'Alembertian germs by Section 8.6 in \cite{a=b}. Thus the corollary follows.
\end{proof}

\begin{figure}[h]
\begin{center}
\begin{tikzpicture}[auto, node distance=2cm, >=stealth', 
initial text=, initial where=left, accepting where=right, 
accepting/.style=accepting by arrow] 
% les noeuds
\node[state,accepting,initial] (1) {$1$};
\node[state] (2) [right=of 1] {$2$};
% les transitions
\draw[->,bend left=20] (1) to node {$n/2$} (2);
\draw[->,bend left=20] (2) to node {$1$} (1);
\end{tikzpicture}
\caption{A Liouvillian germ which is not d'Alembertian}
\label{automate2}
\end{center}
\end{figure}
In Figure \ref{automate2} is shown  the representation (in graph form, with $n_0=0$) of a Liouvillian germ which is not d'Alembertian. Its Galois group is the set of matrices $$\{\left(\begin{array}{cc}a&0\\0&b\end{array}\right),a,b\in K^*\} \cup \{\left(\begin{array}{cc}0&a\\b&0\end{array}\right),a,b\in K^*\},$$ which is a non connected solvable algebraic group. The germ is the class of $f$, with $f(2n)=\frac{1.3...(2n-1)}{2^n}$ and $f(2n+1)=0$. The recursion for $f$ is $f(n+2)=\frac{n+1}{2}f(n)$ and $f(0)=1, f(1)=0$.

\section{Effectiveness}

A major result which is not proved here is the effectiveness of computing a minimal representation of a given P-recursive germ. This is equivalent to computing the shortest P-recursion Eq.(\ref{recursion}). Petkov\v{s}ek has shown that, if $K=\mathbb Q$, a shortest recursion is effectively computable (personal communication \cite{petkovsek2}); his algorithm uses the resolution of algebraic equations over the field $\bar {\mathbb Q}$ of algebraic numbers and the fact that the shortest recursions over both fields coincide, for a germ over $\mathbb Q$, see Section \ref{extension}. 
%Note that this solves in particular that the question of whether a given germ is hypergeometric is decidable. 

The difficulty in computing a minimal representation lies apparently in condition (ii) of Prop.\ref{characterization minimality}, since it is easy to find a representation satisfying condition (i), by following for example the proof of Th.\ref{recursion=matrix}. 

%Another effectiveness problem is to decide if a germ is diagonalizable, and to compute its diagonal form. It is equivalent to ask if it is a closed form, and to compute it. 

%This problem appears as a generalization of Gosper's algorithm, see \cite{a=b}, Chapter 5. Indeed, consider a hypergeometric sequence $f(n)$ defined by $f(n+1)=r(n+1)f(n)$, where $r(n)$ is a rational function defined for all natural integers (for simplicity). Then consider a sequence $g(n)$ satisfying the recursion $g(n+1)=f(n+1)+g(n)$. Gosper's algorithm answers the question: is there some hypergeometric solution $g(n)$? Equivalently,  is $f(1)+f(2)+\cdots +f(n)$ equal to a hypergeometric sequence + some constant? Now, it is easy to see that $g(n)$ has the linear representation 
%$$((f(0),g(0)),\left(\begin{array}{cc}r(t)&r(t)\\0&1\end{array}\right),\left(\begin{array}{c}0\\1\end{array}\right),0).$$
%This representation satisfies condition (i) of Prop.\ref{characterization minimality}. Indeed, $\gamma(t)=\left(\begin{array}{c}0\\1\end{array}\right)$ and $M(t+1)\gamma(t+1)=\left(\begin{array}{c}r(t+1)\\1\end{array}\right)$ span the space $K(t)^{2\times 1}$, since we assume that $r(t)\neq 1$. Since $g(n)$ 
%is hypergeometric if and only if its rank is 1, we see that $g(n)$ is hypergeometric if and only if the latter representation is not minimal, that is, does not satisfy condition (ii); in this case, computing a minimal representation will give the exact hypergeometric form of $g(n)$.

A more general algorithmic question is the following: given several P-recursive sequences, decide if they are linearly dependent over $K(t)$. An algorithm for this problem will imply an algorithm for the computation of the minimal representation.

\section{Arithmetic}

A very natural question is the following: when has a given P-recursive germ coefficients in $\mathbb Z$? (that is, is it the class $[f]$ of a sequence over $\mathbb Z$?). This question, even for the subclass of hypergeometric germs, seems very difficult. 

A particular case is the study of ratios of products of factorials, a question studied by Landau \cite{landau}, who showed that their integrality is equivalent to the positivity of a certain step function; the latter problem is related to the Nyman–Beurling formulation of the Riemann hypothesis, see \cite{bober, borisov} for details and references. Picon \cite{picon2} has given new insight into Landau's problem, and also sufficient conditions for the integrality of hypergeometric sequences \cite{picon1, picon3}. 
\begin{thebibliography}{99}

\bibitem[AB]{barkatou}
S.A. Abramov and M.A. Barkatou:
\newblock \textsl{D'Alembertian series solutions at ordinary points of LODE
with polynomial coefficients,}
\newblock Journal of Symbolic Computation,
\textbf{44}, 48-59 (2009).

\bibitem[AP]{abramov}
S.A. Abramov and M. Petkov\v{s}ek:
\newblock \textsl{D'Alembertian Solutions of Linear Differential and Difference Equations,}
\newblock Proceedings of ISSAC 1994,
\newblock ACM Press, 169-174 (1994).

\bibitem[BR]{berstel}
J. Berstel and C. Reutenauer:
\newblock Noncommutative Rational Series with Applications,
\newblock Cambridge University Press (2011).

\bibitem[B]{bober}
J.W. Bober:
\newblock \textsl{Factorial ratios, hypergeometric series, and a family
of step functions,}
\newblock Journal of the London Mathematical Society
\textbf{79}, 422-444 (2009).

\bibitem[Bo]{borisov}
A.A. Borisov:
\newblock \textsl{Quotient singularities, integer ratios of factorials, and the Riemann hypothesis,}
\newblock International Mathematics Research Notices 
\textbf{15}, 19 pp. (2008).

\bibitem[BP]{bronstein}
M. Bronstein and M. Petkov\v{s}ek:
\newblock \textsl{An introduction to pseudo-linear algebra,}
\newblock Theoretical Computer Science
\textbf{157}, 3-33 (1996).

\bibitem[F]{fliess}
M. Fliess:
\newblock \textsl{Matrices de Hankel,}
\newblock Journal de Math\'ematiques Pures et Apliqu\'ees
\textbf{53}, 197-222 (1974).

\bibitem[HS]{hendriks}
P.A. Hendriks and M.F. Singer:
\newblock \textsl{Solving difference equations in finite terms,}
\newblock Journal of Symbolic Computation
\textbf{27}, 239-259 (1999).

\bibitem[J]{jacobson}
N. Jacobson:
\newblock \textsl{Pseudo-linear transformations,}
\newblock Annals of Mathematics
\textbf{38}, 484-507 (1937).

\bibitem[KM]{kotek}
T. Kotek, J. A. Makowsky:
\newblock \textsl{A representation theorem for holonomic sequences
based on counting lattice paths,}
\newblock Fundamenta Informaticae
\textbf{117}, 199-213 (2012).

\bibitem[L]{landau}
E. Landau:
\newblock \textsl{Sur les conditions de divisibilit\'e d'un produit de factorielles par un autre,}
\newblock Nouvelles Annales de Math\'ematiques
\textbf{3}, 344-362 (1900).

\bibitem[LRZ]{larcombe}
P.J. Larcombe, A. Riese and B. Zimmermann:
\newblock \textsl{Computer proofs of matrix product identities,}
\newblock Journal of Algebra and its Applications
\textbf{3}, 105-109 (2004).

\bibitem[P1]{petkovsek}
M. Petkov\v{s}ek:
\newblock \textsl{Hypergeometric solutions of linear recurrences with polynomial coefficients,}
\newblock Journal of Symbolic Computation
\textbf{14}, 243-264 (1992).

\bibitem[P2]{petkovsek2}
M. Petkov\v{s}ek:
\newblock Mail to C. Reutenauer,
16 March 1998.

\bibitem[PWZ]{a=b}
M. Petkov\v{s}ek, H.S. Wilf and D. Zeilberger:
\newblock A=B,
\newblock A K Peters (1996).


\bibitem[Pi1]{picon1}
P.A. Picon:
\newblock \textsl{Un résultat d'int\'egrit\'e,}
\newblock Discrete Mathematics
\textbf{38}, 227-241 (1982).

\bibitem[Pi2]{picon2}
P.A. Picon:
\newblock \textsl{A more precise formulation of a theorem of Landau in the linear case and some applications,}
\newblock European Journal of Combinatorics
\textbf{15}, 561-577 (1994).

\bibitem[Pi3]{picon3}
P.A. Picon:
\newblock \textsl{Sum-translation and symmetry operators and integrality of hypergeometric linear coefficients,}
\newblock International Journal of Algebra and Computation
\textbf{5}, 19-45 (1995).

\bibitem[PS]{put}
M. van der Put and M.F. Singer:
\newblock Galois Theory of Difference Equations,
\newblock Lecture Notes in Mathematics, Springer (1997).

\bibitem[S1]{stanley1}
R.P. Stanley:
\newblock \textsl{Differentially finite power series,}
\newblock European Journal of Combinatorics
\textbf{1}, 175-188 (1980).

\bibitem[S2]{stanley2}
R.P. Stanley:
\newblock Enumerative Combinatorics Vol. 2,
\newblock Cambridge University Press (1999).

\bibitem[Z]{zeilberger}
D. Zeilberger:
\newblock \textsl{A holonomic systems approach to special functions identities,}
\newblock Journal of Computational and Applied Mathematics
\textbf{32}, 321-368 (1990).



\end{thebibliography}

\end{document}
